diff --git a/flo-token-explorer/bin/activate b/flo-token-explorer/bin/activate deleted file mode 100644 index 9e470a4..0000000 --- a/flo-token-explorer/bin/activate +++ /dev/null @@ -1,76 +0,0 @@ -# This file must be used with "source bin/activate" *from bash* -# you cannot run it directly - -deactivate () { - # reset old environment variables - if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then - PATH="${_OLD_VIRTUAL_PATH:-}" - export PATH - unset _OLD_VIRTUAL_PATH - fi - if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then - PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" - export PYTHONHOME - unset _OLD_VIRTUAL_PYTHONHOME - fi - - # This should detect bash and zsh, which have a hash command that must - # be called to get it to forget past commands. Without forgetting - # past commands the $PATH changes we made may not be respected - if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r - fi - - if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then - PS1="${_OLD_VIRTUAL_PS1:-}" - export PS1 - unset _OLD_VIRTUAL_PS1 - fi - - unset VIRTUAL_ENV - if [ ! "$1" = "nondestructive" ] ; then - # Self destruct! - unset -f deactivate - fi -} - -# unset irrelevant variables -deactivate nondestructive - -VIRTUAL_ENV="/home/production/deployed/flo-token-explorer/flo-token-explorer" -export VIRTUAL_ENV - -_OLD_VIRTUAL_PATH="$PATH" -PATH="$VIRTUAL_ENV/bin:$PATH" -export PATH - -# unset PYTHONHOME if set -# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) -# could use `if (set -u; : $PYTHONHOME) ;` in bash -if [ -n "${PYTHONHOME:-}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" - unset PYTHONHOME -fi - -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1:-}" - if [ "x(flo-token-explorer) " != x ] ; then - PS1="(flo-token-explorer) ${PS1:-}" - else - if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" - else - PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" - fi - fi - export PS1 -fi - -# This should detect bash and zsh, which have a hash command that must -# be called to get it to forget past commands. Without forgetting -# past commands the $PATH changes we made may not be respected -if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r -fi diff --git a/flo-token-explorer/bin/activate.csh b/flo-token-explorer/bin/activate.csh deleted file mode 100644 index 04bc4a7..0000000 --- a/flo-token-explorer/bin/activate.csh +++ /dev/null @@ -1,37 +0,0 @@ -# This file must be used with "source bin/activate.csh" *from csh*. -# You cannot run it directly. -# Created by Davide Di Blasi . -# Ported to Python 3.3 venv by Andrew Svetlov - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' - -# Unset irrelevant variables. -deactivate nondestructive - -setenv VIRTUAL_ENV "/home/production/deployed/flo-token-explorer/flo-token-explorer" - -set _OLD_VIRTUAL_PATH="$PATH" -setenv PATH "$VIRTUAL_ENV/bin:$PATH" - - -set _OLD_VIRTUAL_PROMPT="$prompt" - -if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then - if ("flo-token-explorer" != "") then - set env_name = "flo-token-explorer" - else - if (`basename "VIRTUAL_ENV"` == "__") then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` - else - set env_name = `basename "$VIRTUAL_ENV"` - endif - endif - set prompt = "[$env_name] $prompt" - unset env_name -endif - -alias pydoc python -m pydoc - -rehash diff --git a/flo-token-explorer/bin/activate.fish b/flo-token-explorer/bin/activate.fish deleted file mode 100644 index 986365a..0000000 --- a/flo-token-explorer/bin/activate.fish +++ /dev/null @@ -1,75 +0,0 @@ -# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) -# you cannot run it directly - -function deactivate -d "Exit virtualenv and return to normal shell environment" - # reset old environment variables - if test -n "$_OLD_VIRTUAL_PATH" - set -gx PATH $_OLD_VIRTUAL_PATH - set -e _OLD_VIRTUAL_PATH - end - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME - set -e _OLD_VIRTUAL_PYTHONHOME - end - - if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - functions -e fish_prompt - set -e _OLD_FISH_PROMPT_OVERRIDE - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt - end - - set -e VIRTUAL_ENV - if test "$argv[1]" != "nondestructive" - # Self destruct! - functions -e deactivate - end -end - -# unset irrelevant variables -deactivate nondestructive - -set -gx VIRTUAL_ENV "/home/production/deployed/flo-token-explorer/flo-token-explorer" - -set -gx _OLD_VIRTUAL_PATH $PATH -set -gx PATH "$VIRTUAL_ENV/bin" $PATH - -# unset PYTHONHOME if set -if set -q PYTHONHOME - set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME - set -e PYTHONHOME -end - -if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # fish uses a function instead of an env var to generate the prompt. - - # save the current fish_prompt function as the function _old_fish_prompt - functions -c fish_prompt _old_fish_prompt - - # with the original prompt function renamed, we can override with our own. - function fish_prompt - # Save the return status of the last command - set -l old_status $status - - # Prompt override? - if test -n "(flo-token-explorer) " - printf "%s%s" "(flo-token-explorer) " (set_color normal) - else - # ...Otherwise, prepend env - set -l _checkbase (basename "$VIRTUAL_ENV") - if test $_checkbase = "__" - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) - else - printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) - end - end - - # Restore the return status of the previous command. - echo "exit $old_status" | . - _old_fish_prompt - end - - set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" -end diff --git a/flo-token-explorer/bin/alembic b/flo-token-explorer/bin/alembic deleted file mode 100755 index 051ddfa..0000000 --- a/flo-token-explorer/bin/alembic +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from alembic.config import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/flo-token-explorer/bin/easy_install b/flo-token-explorer/bin/easy_install deleted file mode 100755 index 0154e28..0000000 --- a/flo-token-explorer/bin/easy_install +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/flo-token-explorer/bin/easy_install-3.6 b/flo-token-explorer/bin/easy_install-3.6 deleted file mode 100755 index 0154e28..0000000 --- a/flo-token-explorer/bin/easy_install-3.6 +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/flo-token-explorer/bin/flask b/flo-token-explorer/bin/flask deleted file mode 100755 index ad3718f..0000000 --- a/flo-token-explorer/bin/flask +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from flask.cli import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/flo-token-explorer/bin/gunicorn b/flo-token-explorer/bin/gunicorn deleted file mode 100755 index 564d186..0000000 --- a/flo-token-explorer/bin/gunicorn +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from gunicorn.app.wsgiapp import run - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run()) diff --git a/flo-token-explorer/bin/gunicorn_paster b/flo-token-explorer/bin/gunicorn_paster deleted file mode 100755 index a049a9e..0000000 --- a/flo-token-explorer/bin/gunicorn_paster +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from gunicorn.app.pasterapp import run - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run()) diff --git a/flo-token-explorer/bin/mako-render b/flo-token-explorer/bin/mako-render deleted file mode 100755 index 60f9211..0000000 --- a/flo-token-explorer/bin/mako-render +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from mako.cmd import cmdline - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(cmdline()) diff --git a/flo-token-explorer/bin/pip b/flo-token-explorer/bin/pip deleted file mode 100755 index 3f56030..0000000 --- a/flo-token-explorer/bin/pip +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from pip import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/flo-token-explorer/bin/pip3 b/flo-token-explorer/bin/pip3 deleted file mode 100755 index 3f56030..0000000 --- a/flo-token-explorer/bin/pip3 +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from pip import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/flo-token-explorer/bin/pip3.6 b/flo-token-explorer/bin/pip3.6 deleted file mode 100755 index 3f56030..0000000 --- a/flo-token-explorer/bin/pip3.6 +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from pip import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/flo-token-explorer/bin/python b/flo-token-explorer/bin/python deleted file mode 120000 index 039b719..0000000 --- a/flo-token-explorer/bin/python +++ /dev/null @@ -1 +0,0 @@ -python3.6 \ No newline at end of file diff --git a/flo-token-explorer/bin/python3 b/flo-token-explorer/bin/python3 deleted file mode 120000 index 039b719..0000000 --- a/flo-token-explorer/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -python3.6 \ No newline at end of file diff --git a/flo-token-explorer/bin/python3.6 b/flo-token-explorer/bin/python3.6 deleted file mode 120000 index 6270541..0000000 --- a/flo-token-explorer/bin/python3.6 +++ /dev/null @@ -1 +0,0 @@ -/usr/bin/python3.6 \ No newline at end of file diff --git a/flo-token-explorer/bin/wheel b/flo-token-explorer/bin/wheel deleted file mode 100755 index e51b166..0000000 --- a/flo-token-explorer/bin/wheel +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/production/deployed/flo-token-explorer/flo-token-explorer/bin/python3.6 - -# -*- coding: utf-8 -*- -import re -import sys - -from wheel.cli import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/LICENSE.txt b/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/LICENSE.txt deleted file mode 100644 index 87ce152..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/LICENSE.txt +++ /dev/null @@ -1,39 +0,0 @@ -Copyright © 2014 by the Pallets team. - -Some rights reserved. - -Redistribution and use in source and binary forms of the software as -well as documentation, with or without modification, are permitted -provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -- Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND -CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, -BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. - ----- - -Click uses parts of optparse written by Gregory P. Ward and maintained -by the Python Software Foundation. This is limited to code in parser.py. - -Copyright © 2001-2006 Gregory P. Ward. All rights reserved. -Copyright © 2002-2006 Python Software Foundation. All rights reserved. diff --git a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/METADATA deleted file mode 100644 index 625bdad..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/METADATA +++ /dev/null @@ -1,121 +0,0 @@ -Metadata-Version: 2.1 -Name: Click -Version: 7.0 -Summary: Composable command line interface toolkit -Home-page: https://palletsprojects.com/p/click/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: Pallets Team -Maintainer-email: contact@palletsprojects.com -License: BSD -Project-URL: Documentation, https://click.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/click -Project-URL: Issue tracker, https://github.com/pallets/click/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* - -\$ click\_ -========== - -Click is a Python package for creating beautiful command line interfaces -in a composable way with as little code as necessary. It's the "Command -Line Interface Creation Kit". It's highly configurable but comes with -sensible defaults out of the box. - -It aims to make the process of writing command line tools quick and fun -while also preventing any frustration caused by the inability to -implement an intended CLI API. - -Click in three points: - -- Arbitrary nesting of commands -- Automatic help page generation -- Supports lazy loading of subcommands at runtime - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - $ pip install click - -Click supports Python 3.4 and newer, Python 2.7, and PyPy. - -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - -A Simple Example ----------------- - -What does it look like? Here is an example of a simple Click program: - -.. code-block:: python - - import click - - @click.command() - @click.option("--count", default=1, help="Number of greetings.") - @click.option("--name", prompt="Your name", - help="The person to greet.") - def hello(count, name): - """Simple program that greets NAME for a total of COUNT times.""" - for _ in range(count): - click.echo("Hello, %s!" % name) - - if __name__ == '__main__': - hello() - -And what it looks like when run: - -.. code-block:: text - - $ python hello.py --count=3 - Your name: Click - Hello, Click! - Hello, Click! - Hello, Click! - - -Donate ------- - -The Pallets organization develops and supports Click and other popular -packages. In order to grow the community of contributors and users, and -allow the maintainers to devote more time to the projects, `please -donate today`_. - -.. _please donate today: https://palletsprojects.com/donate - - -Links ------ - -* Website: https://palletsprojects.com/p/click/ -* Documentation: https://click.palletsprojects.com/ -* License: `BSD `_ -* Releases: https://pypi.org/project/click/ -* Code: https://github.com/pallets/click -* Issue tracker: https://github.com/pallets/click/issues -* Test status: - - * Linux, Mac: https://travis-ci.org/pallets/click - * Windows: https://ci.appveyor.com/project/pallets/click - -* Test coverage: https://codecov.io/gh/pallets/click - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/RECORD deleted file mode 100644 index e3c80c9..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/RECORD +++ /dev/null @@ -1,40 +0,0 @@ -Click-7.0.dist-info/LICENSE.txt,sha256=4hIxn676T0Wcisk3_chVcECjyrivKTZsoqSNI5AlIlw,1876 -Click-7.0.dist-info/METADATA,sha256=-r8jeke3Zer4diRvT1MjFZuiJ6yTT_qFP39svLqdaLI,3516 -Click-7.0.dist-info/RECORD,, -Click-7.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 -Click-7.0.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6 -click/__init__.py,sha256=HjGThQ7tef9kkwCV371TBnrf0SAi6fKfU_jtEnbYTvQ,2789 -click/_bashcomplete.py,sha256=iaNUmtxag0YPfxba3TDYCNietiTMQIrvhRLj-H8okFU,11014 -click/_compat.py,sha256=vYmvoj4opPxo-c-2GMQQjYT_r_QkOKybkfGoeVrt0dA,23399 -click/_termui_impl.py,sha256=xHmLtOJhKUCVD6168yucJ9fknUJPAMs0eUTPgVUO-GQ,19611 -click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198 -click/_unicodefun.py,sha256=QHy2_5jYlX-36O-JVrTHNnHOqg8tquUR0HmQFev7Ics,4364 -click/_winconsole.py,sha256=PPWVak8Iikm_gAPsxMrzwsVFCvHgaW3jPaDWZ1JBl3U,8965 -click/core.py,sha256=q8FLcDZsagBGSRe5Y9Hi_FGvAeZvusNfoO5EkhkSQ8Y,75305 -click/decorators.py,sha256=idKt6duLUUfAFftrHoREi8MJSd39XW36pUVHthdglwk,11226 -click/exceptions.py,sha256=CNpAjBAE7qjaV4WChxQeak95e5yUOau8AsvT-8m6wss,7663 -click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889 -click/globals.py,sha256=oQkou3ZQ5DgrbVM6BwIBirwiqozbjfirzsLGAlLRRdg,1514 -click/parser.py,sha256=m-nGZz4VwprM42_qtFlWFGo7yRJQxkBlRcZodoH593Y,15510 -click/termui.py,sha256=o_ZXB2jyvL2Rce7P_bFGq452iyBq9ykJyRApIPMCZO0,23207 -click/testing.py,sha256=aYGqY_iWLu2p4k7lkuJ6t3fqpf6aPGqTsyLzNY_ngKg,13062 -click/types.py,sha256=2Q929p-aBP_ZYuMFJqJR-Ipucofv3fmDc5JzBDPmzJU,23287 -click/utils.py,sha256=6-D0WkAxvv9FkgHXSHwDIv0l9Gdx9Mm6Z5vuKNLIfZI,15763 -Click-7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -click/__pycache__/_textwrap.cpython-36.pyc,, -click/__pycache__/testing.cpython-36.pyc,, -click/__pycache__/_unicodefun.cpython-36.pyc,, -click/__pycache__/_bashcomplete.cpython-36.pyc,, -click/__pycache__/_compat.cpython-36.pyc,, -click/__pycache__/globals.cpython-36.pyc,, -click/__pycache__/_winconsole.cpython-36.pyc,, -click/__pycache__/parser.cpython-36.pyc,, -click/__pycache__/_termui_impl.cpython-36.pyc,, -click/__pycache__/types.cpython-36.pyc,, -click/__pycache__/termui.cpython-36.pyc,, -click/__pycache__/formatting.cpython-36.pyc,, -click/__pycache__/decorators.cpython-36.pyc,, -click/__pycache__/core.cpython-36.pyc,, -click/__pycache__/__init__.cpython-36.pyc,, -click/__pycache__/utils.cpython-36.pyc,, -click/__pycache__/exceptions.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/WHEEL deleted file mode 100644 index 1316c41..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/top_level.txt deleted file mode 100644 index dca9a90..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Click-7.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -click diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/LICENSE.txt b/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/LICENSE.txt deleted file mode 100644 index 8f9252f..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/LICENSE.txt +++ /dev/null @@ -1,31 +0,0 @@ -Copyright © 2010 by the Pallets team. - -Some rights reserved. - -Redistribution and use in source and binary forms of the software as -well as documentation, with or without modification, are permitted -provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND -CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, -BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/METADATA deleted file mode 100644 index c600e73..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/METADATA +++ /dev/null @@ -1,130 +0,0 @@ -Metadata-Version: 2.1 -Name: Flask -Version: 1.0.2 -Summary: A simple framework for building complex web applications. -Home-page: https://www.palletsprojects.com/p/flask/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: Pallets team -Maintainer-email: contact@palletsprojects.com -License: BSD -Project-URL: Documentation, http://flask.pocoo.org/docs/ -Project-URL: Code, https://github.com/pallets/flask -Project-URL: Issue tracker, https://github.com/pallets/flask/issues -Platform: any -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Framework :: Flask -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application -Classifier: Topic :: Software Development :: Libraries :: Application Frameworks -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Provides-Extra: dev -Provides-Extra: docs -Provides-Extra: dotenv -Requires-Dist: Werkzeug (>=0.14) -Requires-Dist: Jinja2 (>=2.10) -Requires-Dist: itsdangerous (>=0.24) -Requires-Dist: click (>=5.1) -Provides-Extra: dev -Requires-Dist: pytest (>=3); extra == 'dev' -Requires-Dist: coverage; extra == 'dev' -Requires-Dist: tox; extra == 'dev' -Requires-Dist: sphinx; extra == 'dev' -Requires-Dist: pallets-sphinx-themes; extra == 'dev' -Requires-Dist: sphinxcontrib-log-cabinet; extra == 'dev' -Provides-Extra: docs -Requires-Dist: sphinx; extra == 'docs' -Requires-Dist: pallets-sphinx-themes; extra == 'docs' -Requires-Dist: sphinxcontrib-log-cabinet; extra == 'docs' -Provides-Extra: dotenv -Requires-Dist: python-dotenv; extra == 'dotenv' - -Flask -===== - -Flask is a lightweight `WSGI`_ web application framework. It is designed -to make getting started quick and easy, with the ability to scale up to -complex applications. It began as a simple wrapper around `Werkzeug`_ -and `Jinja`_ and has become one of the most popular Python web -application frameworks. - -Flask offers suggestions, but doesn't enforce any dependencies or -project layout. It is up to the developer to choose the tools and -libraries they want to use. There are many extensions provided by the -community that make adding new functionality easy. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - pip install -U Flask - - -A Simple Example ----------------- - -.. code-block:: python - - from flask import Flask - - app = Flask(__name__) - - @app.route('/') - def hello(): - return 'Hello, World!' - -.. code-block:: text - - $ FLASK_APP=hello.py flask run - * Serving Flask app "hello" - * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) - - -Donate ------- - -The Pallets organization develops and supports Flask and the libraries -it uses. In order to grow the community of contributors and users, and -allow the maintainers to devote more time to the projects, `please -donate today`_. - -.. _please donate today: https://psfmember.org/civicrm/contribute/transact?reset=1&id=20 - - -Links ------ - -* Website: https://www.palletsprojects.com/p/flask/ -* Documentation: http://flask.pocoo.org/docs/ -* License: `BSD `_ -* Releases: https://pypi.org/project/Flask/ -* Code: https://github.com/pallets/flask -* Issue tracker: https://github.com/pallets/flask/issues -* Test status: - - * Linux, Mac: https://travis-ci.org/pallets/flask - * Windows: https://ci.appveyor.com/project/pallets/flask - -* Test coverage: https://codecov.io/gh/pallets/flask - -.. _WSGI: https://wsgi.readthedocs.io -.. _Werkzeug: https://www.palletsprojects.com/p/werkzeug/ -.. _Jinja: https://www.palletsprojects.com/p/jinja/ -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/RECORD deleted file mode 100644 index 5ecfe80..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/RECORD +++ /dev/null @@ -1,48 +0,0 @@ -Flask-1.0.2.dist-info/LICENSE.txt,sha256=ziEXA3AIuaiUn1qe4cd1XxCESWTYrk4TjN7Qb06J3l8,1575 -Flask-1.0.2.dist-info/METADATA,sha256=iA5tiNWzTtgCVe80aTZGNWsckj853fJyfvHs9U-WZRk,4182 -Flask-1.0.2.dist-info/RECORD,, -Flask-1.0.2.dist-info/WHEEL,sha256=J3CsTk7Mf2JNUyhImI-mjX-fmI4oDjyiXgWT4qgZiCE,110 -Flask-1.0.2.dist-info/entry_points.txt,sha256=gBLA1aKg0OYR8AhbAfg8lnburHtKcgJLDU52BBctN0k,42 -Flask-1.0.2.dist-info/top_level.txt,sha256=dvi65F6AeGWVU0TBpYiC04yM60-FX1gJFkK31IKQr5c,6 -flask/__init__.py,sha256=qq8lK6QQbxJALf1igz7qsvUwOTAoKvFGfdLm7jPNsso,1673 -flask/__main__.py,sha256=pgIXrHhxM5MAMvgzAqWpw_t6AXZ1zG38us4JRgJKtxk,291 -flask/_compat.py,sha256=UDFGhosh6mOdNB-4evKPuneHum1OpcAlwTNJCRm0irQ,2892 -flask/app.py,sha256=ahpe3T8w98rQd_Er5d7uDxK57S1nnqGQx3V3hirBovU,94147 -flask/blueprints.py,sha256=Cyhl_x99tgwqEZPtNDJUFneAfVJxWfEU4bQA7zWS6VU,18331 -flask/cli.py,sha256=30QYAO10Do9LbZYCLgfI_xhKjASdLopL8wKKVUGS2oA,29442 -flask/config.py,sha256=kznUhj4DLYxsTF_4kfDG8GEHto1oZG_kqblyrLFtpqQ,9951 -flask/ctx.py,sha256=leFzS9fzmo0uaLCdxpHc5_iiJZ1H0X_Ig4yPCOvT--g,16224 -flask/debughelpers.py,sha256=1ceC-UyqZTd4KsJkf0OObHPsVt5R3T6vnmYhiWBjV-w,6479 -flask/globals.py,sha256=pGg72QW_-4xUfsI33I5L_y76c21AeqfSqXDcbd8wvXU,1649 -flask/helpers.py,sha256=YCl8D1plTO1evEYP4KIgaY3H8Izww5j4EdgRJ89oHTw,40106 -flask/logging.py,sha256=qV9h0vt7NIRkKM9OHDWndzO61E5CeBMlqPJyTt-W2Wc,2231 -flask/sessions.py,sha256=2XHV4ASREhSEZ8bsPQW6pNVNuFtbR-04BzfKg0AfvHo,14452 -flask/signals.py,sha256=BGQbVyCYXnzKK2DVCzppKFyWN1qmrtW1QMAYUs-1Nr8,2211 -flask/templating.py,sha256=FDfWMbpgpC3qObW8GGXRAVrkHFF8K4CHOJymB1wvULI,4914 -flask/testing.py,sha256=XD3gWNvLUV8dqVHwKd9tZzsj81fSHtjOphQ1wTNtlMs,9379 -flask/views.py,sha256=Wy-_WkUVtCfE2zCXYeJehNgHuEtviE4v3HYfJ--MpbY,5733 -flask/wrappers.py,sha256=1Z9hF5-hXQajn_58XITQFRY8efv3Vy3uZ0avBfZu6XI,7511 -flask/json/__init__.py,sha256=Ns1Hj805XIxuBMh2z0dYnMVfb_KUgLzDmP3WoUYaPhw,10729 -flask/json/tag.py,sha256=9ehzrmt5k7hxf7ZEK0NOs3swvQyU9fWNe-pnYe69N60,8223 -../../../bin/flask,sha256=uljvjs5jUYbuYpdLAIC-GrbC0uGaux9EkKi8G8l_0Sk,273 -Flask-1.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -flask/__pycache__/testing.cpython-36.pyc,, -flask/__pycache__/helpers.cpython-36.pyc,, -flask/__pycache__/_compat.cpython-36.pyc,, -flask/__pycache__/blueprints.cpython-36.pyc,, -flask/__pycache__/wrappers.cpython-36.pyc,, -flask/__pycache__/config.cpython-36.pyc,, -flask/__pycache__/globals.cpython-36.pyc,, -flask/__pycache__/views.cpython-36.pyc,, -flask/__pycache__/__main__.cpython-36.pyc,, -flask/__pycache__/ctx.cpython-36.pyc,, -flask/__pycache__/app.cpython-36.pyc,, -flask/__pycache__/sessions.cpython-36.pyc,, -flask/__pycache__/__init__.cpython-36.pyc,, -flask/__pycache__/logging.cpython-36.pyc,, -flask/__pycache__/templating.cpython-36.pyc,, -flask/__pycache__/signals.cpython-36.pyc,, -flask/__pycache__/debughelpers.cpython-36.pyc,, -flask/__pycache__/cli.cpython-36.pyc,, -flask/json/__pycache__/tag.cpython-36.pyc,, -flask/json/__pycache__/__init__.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/WHEEL deleted file mode 100644 index f21b51c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/entry_points.txt b/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/entry_points.txt deleted file mode 100644 index 1eb0252..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[console_scripts] -flask = flask.cli:main - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/top_level.txt deleted file mode 100644 index 7e10602..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask-1.0.2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -flask diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/LICENSE b/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/LICENSE deleted file mode 100644 index 2448fd2..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Miguel Grinberg - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/METADATA deleted file mode 100644 index 016e668..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/METADATA +++ /dev/null @@ -1,29 +0,0 @@ -Metadata-Version: 2.1 -Name: Flask-Migrate -Version: 2.4.0 -Summary: SQLAlchemy database migrations for Flask applications using Alembic -Home-page: http://github.com/miguelgrinberg/flask-migrate/ -Author: Miguel Grinberg -Author-email: miguelgrinberg50@gmail.com -License: MIT -Platform: any -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 3 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Requires-Dist: Flask (>=0.9) -Requires-Dist: Flask-SQLAlchemy (>=1.0) -Requires-Dist: alembic (>=0.7) - - -Flask-Migrate --------------- - -SQLAlchemy database migrations for Flask applications using Alembic. - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/RECORD deleted file mode 100644 index 0e0fbfc..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/RECORD +++ /dev/null @@ -1,21 +0,0 @@ -flask_migrate/__init__.py,sha256=Z8xs6oPoj7bkGQeYBWB-hwaFFfmTfMcwes77O9QmvHg,19308 -flask_migrate/cli.py,sha256=ZdLHsiedXuyqCq-Ey0fLqPSMi8fb1xJm97pWx_f_Ff4,9598 -flask_migrate/templates/flask/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38 -flask_migrate/templates/flask/alembic.ini.mako,sha256=zQU53x-FQXAbtuOxp3_hgtsEZK8M0Unkw9F_uMSBEDc,770 -flask_migrate/templates/flask/env.py,sha256=MPAdo_Jedak46J5hjtji39oGAOMRwVR3iCTQB5LYLTw,2908 -flask_migrate/templates/flask/script.py.mako,sha256=8_xgA-gm_OhehnO7CiIijWgnm00ZlszEHtIHrAYFJl0,494 -flask_migrate/templates/flask-multidb/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38 -flask_migrate/templates/flask-multidb/alembic.ini.mako,sha256=zQU53x-FQXAbtuOxp3_hgtsEZK8M0Unkw9F_uMSBEDc,770 -flask_migrate/templates/flask-multidb/env.py,sha256=RzPgH9uePePCsUo5kbQsiPVe5184Yd3qXGearIJZF5A,5580 -flask_migrate/templates/flask-multidb/script.py.mako,sha256=lVwJ36kfy6N1gRW7Lepg5EjXQ6Ouar4GTSBHcHXYHbs,965 -Flask_Migrate-2.4.0.dist-info/LICENSE,sha256=kfkXGlJQvKy3Y__6tAJ8ynIp1HQfeROXhL8jZU1d-DI,1082 -Flask_Migrate-2.4.0.dist-info/METADATA,sha256=axbSac7kVxuHE19O4BmXC0pYAiAXno6yz8i6DuBY6fg,946 -Flask_Migrate-2.4.0.dist-info/WHEEL,sha256=8T8fxefr_r-A79qbOJ9d_AaEgkpCGmEPHc-gpCq5BRg,110 -Flask_Migrate-2.4.0.dist-info/entry_points.txt,sha256=KIMh5vVHpfcQw9lq5G7y7cVhHgS-0DdbmIS8X7mnrzI,44 -Flask_Migrate-2.4.0.dist-info/top_level.txt,sha256=jLoPgiMG6oR4ugNteXn3IHskVVIyIXVStZOVq-AWLdU,14 -Flask_Migrate-2.4.0.dist-info/RECORD,, -Flask_Migrate-2.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -flask_migrate/templates/flask/__pycache__/env.cpython-36.pyc,, -flask_migrate/templates/flask-multidb/__pycache__/env.cpython-36.pyc,, -flask_migrate/__pycache__/__init__.cpython-36.pyc,, -flask_migrate/__pycache__/cli.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/WHEEL deleted file mode 100644 index 1001235..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.32.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/entry_points.txt b/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/entry_points.txt deleted file mode 100644 index f15410e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[flask.commands] -db = flask_migrate.cli:db - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/top_level.txt deleted file mode 100644 index 0652762..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_Migrate-2.4.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -flask_migrate diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/DESCRIPTION.rst b/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/DESCRIPTION.rst deleted file mode 100644 index c9db5b5..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,15 +0,0 @@ - -Flask-SQLAlchemy ----------------- - -Adds SQLAlchemy support to your Flask application. - -Links -````` - -* `documentation `_ -* `development version - `_ - - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/LICENSE.txt b/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/LICENSE.txt deleted file mode 100644 index 49fcac3..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/LICENSE.txt +++ /dev/null @@ -1,31 +0,0 @@ -Copyright (c) 2014 by Armin Ronacher. - -Some rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -* The names of the contributors may not be used to endorse or - promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/METADATA deleted file mode 100644 index 4d94cbd..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/METADATA +++ /dev/null @@ -1,43 +0,0 @@ -Metadata-Version: 2.0 -Name: Flask-SQLAlchemy -Version: 2.3.2 -Summary: Adds SQLAlchemy support to your Flask application -Home-page: http://github.com/mitsuhiko/flask-sqlalchemy -Author: Phil Howell -Author-email: phil@quae.co.uk -License: BSD -Description-Content-Type: UNKNOWN -Platform: any -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Requires-Dist: Flask (>=0.10) -Requires-Dist: SQLAlchemy (>=0.8.0) - - -Flask-SQLAlchemy ----------------- - -Adds SQLAlchemy support to your Flask application. - -Links -````` - -* `documentation `_ -* `development version - `_ - - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/RECORD deleted file mode 100644 index b3d1349..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/RECORD +++ /dev/null @@ -1,14 +0,0 @@ -Flask_SQLAlchemy-2.3.2.dist-info/DESCRIPTION.rst,sha256=Mp4bpckSjf082xflOARFwzWLTnUszq7JxcY0dR9vD2w,273 -Flask_SQLAlchemy-2.3.2.dist-info/LICENSE.txt,sha256=2smrI3hNiP6c5TcX0fa6fqODgsdJVLC166X0kVxei9A,1457 -Flask_SQLAlchemy-2.3.2.dist-info/METADATA,sha256=iDXuOIujwz5MXBrH-I4WsW7kLKsY07feI7hggFHFfEk,1384 -Flask_SQLAlchemy-2.3.2.dist-info/RECORD,, -Flask_SQLAlchemy-2.3.2.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 -Flask_SQLAlchemy-2.3.2.dist-info/metadata.json,sha256=VOw756wP14azHrBwNxHIfbYkK4DkEPrCaV6Kf0VO36E,1257 -Flask_SQLAlchemy-2.3.2.dist-info/top_level.txt,sha256=w2K4fNNoTh4HItoFfz2FRQShSeLcvHYrzU_sZov21QU,17 -flask_sqlalchemy/__init__.py,sha256=0ZyibSbbC_Q1x8Kemp_2s2-NCowd_-CRvLyE1dPfnvw,35991 -flask_sqlalchemy/_compat.py,sha256=6rFcZZ3kxvyeJUC_FyB62mG1saNU8iQthzWHLDcKPVE,1057 -flask_sqlalchemy/model.py,sha256=7CTvGxxKmLscwcwq9mVT5ny_w301QZvTVjSqMoMx6DI,4974 -Flask_SQLAlchemy-2.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -flask_sqlalchemy/__pycache__/_compat.cpython-36.pyc,, -flask_sqlalchemy/__pycache__/model.cpython-36.pyc,, -flask_sqlalchemy/__pycache__/__init__.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/WHEEL deleted file mode 100644 index 7332a41..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.30.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/metadata.json b/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/metadata.json deleted file mode 100644 index 96339bd..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "phil@quae.co.uk", "name": "Phil Howell", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://github.com/mitsuhiko/flask-sqlalchemy"}}}, "extras": [], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask-SQLAlchemy", "platform": "any", "run_requires": [{"requires": ["Flask (>=0.10)", "SQLAlchemy (>=0.8.0)"]}], "summary": "Adds SQLAlchemy support to your Flask application", "version": "2.3.2"} \ No newline at end of file diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/top_level.txt deleted file mode 100644 index 8a5538e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_SQLAlchemy-2.3.2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -flask_sqlalchemy diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/DESCRIPTION.rst b/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/DESCRIPTION.rst deleted file mode 100644 index 7d7eef7..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,21 +0,0 @@ -Flask-WTF -========= - -.. image:: https://travis-ci.org/lepture/flask-wtf.svg?branch=master - :target: https://travis-ci.org/lepture/flask-wtf - :alt: Travis CI Status -.. image:: https://coveralls.io/repos/lepture/flask-wtf/badge.svg?branch=master - :target: https://coveralls.io/r/lepture/flask-wtf - :alt: Coverage Status - -Simple integration of Flask and WTForms, including CSRF, file upload, -and reCAPTCHA. - -Links ------ - -* `Documentation `_ -* `PyPI `_ -* `GitHub `_ - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/LICENSE.txt b/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/LICENSE.txt deleted file mode 100644 index 5cbad1a..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/LICENSE.txt +++ /dev/null @@ -1,32 +0,0 @@ -Copyright (c) 2010 by Dan Jacob. -Copyright (c) 2013 by Hsiaoming Yang. - -Some rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -* The names of the contributors may not be used to endorse or - promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/METADATA deleted file mode 100644 index 8dd02a4..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/METADATA +++ /dev/null @@ -1,52 +0,0 @@ -Metadata-Version: 2.0 -Name: Flask-WTF -Version: 0.14.2 -Summary: Simple integration of Flask and WTForms. -Home-page: https://github.com/lepture/flask-wtf -Author: Hsiaoming Yang -Author-email: me@lepture.com -License: BSD -Platform: any -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Framework :: Flask -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Requires-Dist: Flask -Requires-Dist: WTForms - -Flask-WTF -========= - -.. image:: https://travis-ci.org/lepture/flask-wtf.svg?branch=master - :target: https://travis-ci.org/lepture/flask-wtf - :alt: Travis CI Status -.. image:: https://coveralls.io/repos/lepture/flask-wtf/badge.svg?branch=master - :target: https://coveralls.io/r/lepture/flask-wtf - :alt: Coverage Status - -Simple integration of Flask and WTForms, including CSRF, file upload, -and reCAPTCHA. - -Links ------ - -* `Documentation `_ -* `PyPI `_ -* `GitHub `_ - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/RECORD deleted file mode 100644 index b0122b0..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/RECORD +++ /dev/null @@ -1,30 +0,0 @@ -Flask_WTF-0.14.2.dist-info/DESCRIPTION.rst,sha256=vyJWnOD4vgnZ6x2ERr5EH1l2uzLxXCBhr_O1L6Ell2E,584 -Flask_WTF-0.14.2.dist-info/LICENSE.txt,sha256=oHX42YrP2wXdmHFiQrniwbOrmHIpJrPEz2yRasFOg1A,1490 -Flask_WTF-0.14.2.dist-info/METADATA,sha256=M8ZfImxUciRZ5Av5r1x37JnEC3wG5sacQv346wmldHU,1846 -Flask_WTF-0.14.2.dist-info/RECORD,, -Flask_WTF-0.14.2.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113 -Flask_WTF-0.14.2.dist-info/metadata.json,sha256=qGwhg5DSr2WilK8cvCcQsdrtDJ5NFgR1faLrO8YZCAY,1370 -Flask_WTF-0.14.2.dist-info/top_level.txt,sha256=zK3flQPSjYTkAMjB0V6Jhu3jyotC0biL1mMhzitYoog,10 -flask_wtf/__init__.py,sha256=zNLRzvfi7PLTc7jkqQT7pzgtsw9_9eN7BfO4fzwKxJc,406 -flask_wtf/_compat.py,sha256=4h1U_W5vbM9L8sJ4ZPFevuneM1TirnBTTVrsHRH3uUE,849 -flask_wtf/csrf.py,sha256=suKAZarzLIBuiJFqwP--RldEYabPj0DGfYkQA32Cc1E,11554 -flask_wtf/file.py,sha256=2UnODjSq47IjsFQMiu_z218vFA5pnQ9nL1FpX7hpK1M,2971 -flask_wtf/form.py,sha256=lpx-ItUnKjYOW8VxQpBAlbhoROJNd2PHi3v0loPPyYI,4948 -flask_wtf/html5.py,sha256=ReZHJto8DAZkO3BxUDdHnkyz5mM21KtqKYh0achJ5IM,372 -flask_wtf/i18n.py,sha256=xMB_jHCOaWfF1RXm7E6hsRHwPsUyVyKX2Rhy3tBOUgk,1790 -flask_wtf/recaptcha/__init__.py,sha256=q3TC7tZPSAZ3On3GApZKGn0EcydX4zprisbyTlhN3sQ,86 -flask_wtf/recaptcha/fields.py,sha256=kN_10iZYQcYg1EtxFp4B87BlFnnrJCktrh7bTykOVj4,453 -flask_wtf/recaptcha/validators.py,sha256=8UgjA72OxUyHVk_lm8-fGhPEvKgkMtsoFNt7yzjo0xw,2398 -flask_wtf/recaptcha/widgets.py,sha256=me-oaqMNPW2BLujNTuDHCXWcVhh6eI7wlm6_TIrIF_U,1267 -Flask_WTF-0.14.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -flask_wtf/recaptcha/__pycache__/__init__.cpython-36.pyc,, -flask_wtf/recaptcha/__pycache__/fields.cpython-36.pyc,, -flask_wtf/recaptcha/__pycache__/validators.cpython-36.pyc,, -flask_wtf/recaptcha/__pycache__/widgets.cpython-36.pyc,, -flask_wtf/__pycache__/_compat.cpython-36.pyc,, -flask_wtf/__pycache__/form.cpython-36.pyc,, -flask_wtf/__pycache__/i18n.cpython-36.pyc,, -flask_wtf/__pycache__/html5.cpython-36.pyc,, -flask_wtf/__pycache__/file.cpython-36.pyc,, -flask_wtf/__pycache__/csrf.cpython-36.pyc,, -flask_wtf/__pycache__/__init__.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/WHEEL deleted file mode 100644 index 7bf9daa..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.30.0.a0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/metadata.json b/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/metadata.json deleted file mode 100644 index d48bac6..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Flask", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.details": {"contacts": [{"email": "me@lepture.com", "name": "Hsiaoming Yang", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/lepture/flask-wtf"}}}, "extras": [], "generator": "bdist_wheel (0.30.0.a0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask-WTF", "platform": "any", "run_requires": [{"requires": ["Flask", "WTForms"]}], "summary": "Simple integration of Flask and WTForms.", "version": "0.14.2"} \ No newline at end of file diff --git a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/top_level.txt deleted file mode 100644 index 716f422..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Flask_WTF-0.14.2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -flask_wtf diff --git a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/LICENSE b/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/LICENSE deleted file mode 100644 index 10145a2..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details. - -Some rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * The names of the contributors may not be used to endorse or - promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/METADATA deleted file mode 100644 index fb4a867..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/METADATA +++ /dev/null @@ -1,67 +0,0 @@ -Metadata-Version: 2.1 -Name: Jinja2 -Version: 2.10.1 -Summary: A small but fast and easy to use stand-alone template engine written in pure python. -Home-page: http://jinja.pocoo.org/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -License: BSD -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML -Requires-Dist: MarkupSafe (>=0.23) -Provides-Extra: i18n -Requires-Dist: Babel (>=0.8) ; extra == 'i18n' - - -Jinja2 -~~~~~~ - -Jinja2 is a template engine written in pure Python. It provides a -`Django`_ inspired non-XML syntax but supports inline expressions and -an optional `sandboxed`_ environment. - -Nutshell --------- - -Here a small example of a Jinja template:: - - {% extends 'base.html' %} - {% block title %}Memberlist{% endblock %} - {% block content %} - - {% endblock %} - -Philosophy ----------- - -Application logic is for the controller but don't try to make the life -for the template designer too hard by giving him too few functionality. - -For more informations visit the new `Jinja2 webpage`_ and `documentation`_. - -.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security) -.. _Django: https://www.djangoproject.com/ -.. _Jinja2 webpage: http://jinja.pocoo.org/ -.. _documentation: http://jinja.pocoo.org/2/documentation/ - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/RECORD deleted file mode 100644 index 5cbdd18..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/RECORD +++ /dev/null @@ -1,61 +0,0 @@ -jinja2/__init__.py,sha256=V1D-JHQKklZseXOMA-uAW7-BeKe_TfPpOFi9-dV04ZA,2616 -jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596 -jinja2/_identifier.py,sha256=W1QBSY-iJsyt6oR_nKSuNNCzV95vLIOYgUNPUI1d5gU,1726 -jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144 -jinja2/asyncsupport.py,sha256=UErQ3YlTLaSjFb94P4MVn08-aVD9jJxty2JVfMRb-1M,7878 -jinja2/bccache.py,sha256=nQldx0ZRYANMyfvOihRoYFKSlUdd5vJkS7BjxNwlOZM,12794 -jinja2/compiler.py,sha256=BqC5U6JxObSRhblyT_a6Tp5GtEU5z3US1a4jLQaxxgo,65386 -jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626 -jinja2/debug.py,sha256=WTVeUFGUa4v6ReCsYv-iVPa3pkNB75OinJt3PfxNdXs,12045 -jinja2/defaults.py,sha256=Em-95hmsJxIenDCZFB1YSvf9CNhe9rBmytN3yUrBcWA,1400 -jinja2/environment.py,sha256=VnkAkqw8JbjZct4tAyHlpBrka2vqB-Z58RAP-32P1ZY,50849 -jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428 -jinja2/ext.py,sha256=atMQydEC86tN1zUsdQiHw5L5cF62nDbqGue25Yiu3N4,24500 -jinja2/filters.py,sha256=yOAJk0MsH-_gEC0i0U6NweVQhbtYaC-uE8xswHFLF4w,36528 -jinja2/idtracking.py,sha256=2GbDSzIvGArEBGLkovLkqEfmYxmWsEf8c3QZwM4uNsw,9197 -jinja2/lexer.py,sha256=ySEPoXd1g7wRjsuw23uimS6nkGN5aqrYwcOKxCaVMBQ,28559 -jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382 -jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340 -jinja2/nativetypes.py,sha256=_sJhS8f-8Q0QMIC0dm1YEdLyxEyoO-kch8qOL5xUDfE,7308 -jinja2/nodes.py,sha256=L10L_nQDfubLhO3XjpF9qz46FSh2clL-3e49ogVlMmA,30853 -jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722 -jinja2/parser.py,sha256=lPzTEbcpTRBLw8ii6OYyExHeAhaZLMA05Hpv4ll3ULk,35875 -jinja2/runtime.py,sha256=DHdD38Pq8gj7uWQC5usJyWFoNWL317A9AvXOW_CLB34,27755 -jinja2/sandbox.py,sha256=UmX8hVjdaERCbA3RXBwrV1f-beA23KmijG5kzPJyU4A,17106 -jinja2/tests.py,sha256=iJQLwbapZr-EKquTG_fVOVdwHUUKf3SX9eNkjQDF8oU,4237 -jinja2/utils.py,sha256=q24VupGZotQ-uOyrJxCaXtDWhZC1RgsQG7kcdmjck2Q,20629 -jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316 -Jinja2-2.10.1.dist-info/LICENSE,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554 -Jinja2-2.10.1.dist-info/METADATA,sha256=rx0eN8lX8iq8-YVppmCzV1Qx4y3Pj9IWi08mXUCrewI,2227 -Jinja2-2.10.1.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110 -Jinja2-2.10.1.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72 -Jinja2-2.10.1.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 -Jinja2-2.10.1.dist-info/RECORD,, -Jinja2-2.10.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -jinja2/__pycache__/visitor.cpython-36.pyc,, -jinja2/__pycache__/nativetypes.cpython-36.pyc,, -jinja2/__pycache__/constants.cpython-36.pyc,, -jinja2/__pycache__/defaults.cpython-36.pyc,, -jinja2/__pycache__/lexer.cpython-36.pyc,, -jinja2/__pycache__/_compat.cpython-36.pyc,, -jinja2/__pycache__/environment.cpython-36.pyc,, -jinja2/__pycache__/ext.cpython-36.pyc,, -jinja2/__pycache__/tests.cpython-36.pyc,, -jinja2/__pycache__/idtracking.cpython-36.pyc,, -jinja2/__pycache__/nodes.cpython-36.pyc,, -jinja2/__pycache__/asyncfilters.cpython-36.pyc,, -jinja2/__pycache__/asyncsupport.cpython-36.pyc,, -jinja2/__pycache__/optimizer.cpython-36.pyc,, -jinja2/__pycache__/runtime.cpython-36.pyc,, -jinja2/__pycache__/meta.cpython-36.pyc,, -jinja2/__pycache__/parser.cpython-36.pyc,, -jinja2/__pycache__/loaders.cpython-36.pyc,, -jinja2/__pycache__/filters.cpython-36.pyc,, -jinja2/__pycache__/compiler.cpython-36.pyc,, -jinja2/__pycache__/__init__.cpython-36.pyc,, -jinja2/__pycache__/sandbox.cpython-36.pyc,, -jinja2/__pycache__/bccache.cpython-36.pyc,, -jinja2/__pycache__/utils.cpython-36.pyc,, -jinja2/__pycache__/_identifier.cpython-36.pyc,, -jinja2/__pycache__/debug.cpython-36.pyc,, -jinja2/__pycache__/exceptions.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/WHEEL deleted file mode 100644 index c8240f0..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/entry_points.txt b/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/entry_points.txt deleted file mode 100644 index 32e6b75..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/entry_points.txt +++ /dev/null @@ -1,4 +0,0 @@ - - [babel.extractors] - jinja2 = jinja2.ext:babel_extract[i18n] - \ No newline at end of file diff --git a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/top_level.txt deleted file mode 100644 index 7f7afbf..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Jinja2-2.10.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -jinja2 diff --git a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/AUTHORS b/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/AUTHORS deleted file mode 100644 index 81d16dc..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/AUTHORS +++ /dev/null @@ -1,13 +0,0 @@ -Mako was created by Michael Bayer. - -Major contributing authors include: - -- Michael Bayer -- Geoffrey T. Dairiki -- Philip Jenvey -- David Peckam -- Armin Ronacher -- Ben Bangert -- Ben Trofatter - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/LICENSE b/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/LICENSE deleted file mode 100644 index 86543fc..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -This is the MIT license: http://www.opensource.org/licenses/mit-license.php - -Copyright (C) 2006-2016 the Mako authors and contributors . -Mako is a trademark of Michael Bayer. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons -to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE -FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/METADATA deleted file mode 100644 index 7f726ce..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/METADATA +++ /dev/null @@ -1,75 +0,0 @@ -Metadata-Version: 2.1 -Name: Mako -Version: 1.0.9 -Summary: A super-fast templating language that borrows the best ideas from the existing templating languages. -Home-page: https://www.makotemplates.org/ -Author: Mike Bayer -Author-email: mike@zzzcomputing.com -License: MIT -Keywords: templates -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Requires-Python: >=2.6 -Requires-Dist: MarkupSafe (>=0.9.2) - -========================= -Mako Templates for Python -========================= - -Mako is a template library written in Python. It provides a familiar, non-XML -syntax which compiles into Python modules for maximum performance. Mako's -syntax and API borrows from the best ideas of many others, including Django -templates, Cheetah, Myghty, and Genshi. Conceptually, Mako is an embedded -Python (i.e. Python Server Page) language, which refines the familiar ideas -of componentized layout and inheritance to produce one of the most -straightforward and flexible models available, while also maintaining close -ties to Python calling and scoping semantics. - -Nutshell -======== - -:: - - <%inherit file="base.html"/> - <% - rows = [[v for v in range(0,10)] for row in range(0,10)] - %> - - % for row in rows: - ${makerow(row)} - % endfor -
- - <%def name="makerow(row)"> - - % for name in row: - ${name}\ - % endfor - - - -Philosophy -=========== - -Python is a great scripting language. Don't reinvent the wheel...your templates can handle it ! - -Documentation -============== - -See documentation for Mako at https://docs.makotemplates.org/en/latest/ - -License -======== - -Mako is licensed under an MIT-style license (see LICENSE). -Other incorporated projects may be licensed under different licenses. -All licenses allow for non-commercial and commercial use. - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/RECORD deleted file mode 100644 index 8925210..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/RECORD +++ /dev/null @@ -1,61 +0,0 @@ -mako/__init__.py,sha256=ZdXqNtmcK-M7ucrydMwe0hb6BNkUnKpYYVNIJWujoo0,246 -mako/_ast_util.py,sha256=bdVQXQ0QZFgVhNATupTHHUi9AjIJHKHo8NsB1bhUPp4,25798 -mako/ast.py,sha256=mZROTb_qlDaW-yr8J3hYAvvq0lhQzPOzdJfnKS7UzPw,6635 -mako/cache.py,sha256=l0zOgcktKgmXa0pw0XxpX7q4KLRHXdt6-oIs1Ewyn8k,7739 -mako/cmd.py,sha256=cfHkwm0r467JSVxynhHI3mVpcwe1EEfaCOBbPuBvlE8,2288 -mako/codegen.py,sha256=CUC5vqJybKf0nwhd3BLx1me27YVMOIByzMZmk5FnKzs,48384 -mako/compat.py,sha256=uebS5YgFIoLIQWLNvuD2QG22kiTYn_ftAvG3L_aTCQc,4927 -mako/exceptions.py,sha256=SoJLw2XYVs6Y5c0sC8v-utsaKp8n8MPbe1zW7ZH2QqA,12454 -mako/filters.py,sha256=dhW_i6AaH6n4lMtN3zOgUYDhIUsfDKs1YgVSEDt18bQ,6003 -mako/lexer.py,sha256=zVjetXTkKdn-chSZcYdTFxfl7zSZkf6JxO02OtioL-o,16495 -mako/lookup.py,sha256=yEfmYwA5gyjvtVQJ1knoEhkLKqkHm7is3VUv99cJTtI,12952 -mako/parsetree.py,sha256=p_UkUOEIot53jMGLrxpfSp1d9F85HYaSIKLGNSlhDlg,19237 -mako/pygen.py,sha256=oksVR6_GiCW2nU-xCCjs5zgbMDayzgdY518mE7BXKks,9952 -mako/pyparser.py,sha256=6ekC6AtXAf5sbllIYhreCFOURz8axqGT8LmlaEEjRpQ,7642 -mako/runtime.py,sha256=SYWQQdwhvJpsEhNxCxWA5-pXcvvwqDTp_3AfK-InVmE,27905 -mako/template.py,sha256=o83q4QpA8UO2CUBu9g6h18HIs68wYmbG_adUfPbndjc,27347 -mako/util.py,sha256=09P9hmbum_55a84tETWfVRLPxGzB0zSe6IR97qhx0lc,10927 -mako/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -mako/ext/autohandler.py,sha256=HGRo8oBg82tgd4LurpPxLcNqNa_6oR_W_HOM50hQdIs,1829 -mako/ext/babelplugin.py,sha256=hFsOHvvM81CEbUn9zqxNAt8n5wGISB82L9m8JOPI_XE,2079 -mako/ext/beaker_cache.py,sha256=09OPj9Cpe-jPDb744W9FovmzOT1qAJIoDVe_vfZ0D-M,2365 -mako/ext/extract.py,sha256=HOPywPXuNBll7-fu_hvF-hYzsNOief_gFJIVlrxEsr8,4261 -mako/ext/linguaplugin.py,sha256=LVtznhDPq4qI22F8EbEkENNEJSZYbt7esKiQlrgEadc,1663 -mako/ext/preprocessors.py,sha256=1VsCYm6hhAZiNt8ohERHHfjgK-bLTdaYCUxHGXroaew,580 -mako/ext/pygmentplugin.py,sha256=2wU1AzyCoLq8HxZMYq0L9EgBUe5Z8jkwcM2h5AmHCBs,4530 -mako/ext/turbogears.py,sha256=o1cwfNQYZZV2kKRA0xL7OGYnRHoGz2zRXflPNbBzSxg,2132 -Mako-1.0.9.dist-info/AUTHORS,sha256=Io2Vw70mjYS7yFcUuJxhIGiMUQt8FWJuxiiwyUW1WRg,282 -Mako-1.0.9.dist-info/LICENSE,sha256=hPb4eYmQI51pe21iYx3EApYtT7OJWVXqkm3OeVa69xs,1217 -Mako-1.0.9.dist-info/METADATA,sha256=Skq83gaOWk_JFGw9X9VuBi1kCsl9v0jGCDPvA1Y89mQ,2264 -Mako-1.0.9.dist-info/WHEEL,sha256=MTT8nte0XKevE9yJl-TwoUKDw_i34GiAL_IxFfS6kr8,93 -Mako-1.0.9.dist-info/entry_points.txt,sha256=pRPyt8V0Ss7h84ZQVCX9MkK0Xo5MClQWlQ67pJ4bGdU,586 -Mako-1.0.9.dist-info/top_level.txt,sha256=LItdH8cDPetpUu8rUyBG3DObS6h9Gcpr9j_WLj2S-R0,5 -Mako-1.0.9.dist-info/RECORD,, -../../../bin/mako-render,sha256=pCIeS5G54ve_NAs-ViE2or6p5YT3VMfyGjccYl32ttw,278 -Mako-1.0.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -mako/__pycache__/cmd.cpython-36.pyc,, -mako/__pycache__/pygen.cpython-36.pyc,, -mako/__pycache__/lexer.cpython-36.pyc,, -mako/__pycache__/cache.cpython-36.pyc,, -mako/__pycache__/ast.cpython-36.pyc,, -mako/__pycache__/pyparser.cpython-36.pyc,, -mako/__pycache__/lookup.cpython-36.pyc,, -mako/__pycache__/compat.cpython-36.pyc,, -mako/__pycache__/util.cpython-36.pyc,, -mako/__pycache__/runtime.cpython-36.pyc,, -mako/__pycache__/filters.cpython-36.pyc,, -mako/__pycache__/parsetree.cpython-36.pyc,, -mako/__pycache__/codegen.cpython-36.pyc,, -mako/__pycache__/__init__.cpython-36.pyc,, -mako/__pycache__/template.cpython-36.pyc,, -mako/__pycache__/_ast_util.cpython-36.pyc,, -mako/__pycache__/exceptions.cpython-36.pyc,, -mako/ext/__pycache__/linguaplugin.cpython-36.pyc,, -mako/ext/__pycache__/babelplugin.cpython-36.pyc,, -mako/ext/__pycache__/beaker_cache.cpython-36.pyc,, -mako/ext/__pycache__/turbogears.cpython-36.pyc,, -mako/ext/__pycache__/autohandler.cpython-36.pyc,, -mako/ext/__pycache__/preprocessors.cpython-36.pyc,, -mako/ext/__pycache__/__init__.cpython-36.pyc,, -mako/ext/__pycache__/extract.cpython-36.pyc,, -mako/ext/__pycache__/pygmentplugin.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/WHEEL deleted file mode 100644 index 5b3c6fb..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.32.3) -Root-Is-Purelib: true -Tag: cp36-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/entry_points.txt b/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/entry_points.txt deleted file mode 100644 index 3b15006..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/entry_points.txt +++ /dev/null @@ -1,20 +0,0 @@ - - [python.templating.engines] - mako = mako.ext.turbogears:TGPlugin - - [pygments.lexers] - mako = mako.ext.pygmentplugin:MakoLexer - html+mako = mako.ext.pygmentplugin:MakoHtmlLexer - xml+mako = mako.ext.pygmentplugin:MakoXmlLexer - js+mako = mako.ext.pygmentplugin:MakoJavascriptLexer - css+mako = mako.ext.pygmentplugin:MakoCssLexer - - [babel.extractors] - mako = mako.ext.babelplugin:extract - - [lingua.extractors] - mako = mako.ext.linguaplugin:LinguaMakoExtractor - - [console_scripts] - mako-render = mako.cmd:cmdline - \ No newline at end of file diff --git a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/top_level.txt deleted file mode 100644 index 2951cdd..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Mako-1.0.9.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -mako diff --git a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.txt b/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.txt deleted file mode 100644 index 9d227a0..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.txt +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/METADATA deleted file mode 100644 index b208d93..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/METADATA +++ /dev/null @@ -1,103 +0,0 @@ -Metadata-Version: 2.1 -Name: MarkupSafe -Version: 1.1.1 -Summary: Safely add untrusted strings to HTML/XML markup. -Home-page: https://palletsprojects.com/p/markupsafe/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: The Pallets Team -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://markupsafe.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/markupsafe -Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML -Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* - -MarkupSafe -========== - -MarkupSafe implements a text object that escapes characters so it is -safe to use in HTML and XML. Characters that have special meanings are -replaced so that they display as the actual characters. This mitigates -injection attacks, meaning untrusted user input can safely be displayed -on a page. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - pip install -U MarkupSafe - -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - -Examples --------- - -.. code-block:: pycon - - >>> from markupsafe import Markup, escape - >>> # escape replaces special characters and wraps in Markup - >>> escape('') - Markup(u'<script>alert(document.cookie);</script>') - >>> # wrap in Markup to mark text "safe" and prevent escaping - >>> Markup('Hello') - Markup('hello') - >>> escape(Markup('Hello')) - Markup('hello') - >>> # Markup is a text subclass (str on Python 3, unicode on Python 2) - >>> # methods and operators escape their arguments - >>> template = Markup("Hello %s") - >>> template % '"World"' - Markup('Hello "World"') - - -Donate ------- - -The Pallets organization develops and supports MarkupSafe and other -libraries that use it. In order to grow the community of contributors -and users, and allow the maintainers to devote more time to the -projects, `please donate today`_. - -.. _please donate today: https://palletsprojects.com/donate - - -Links ------ - -* Website: https://palletsprojects.com/p/markupsafe/ -* Documentation: https://markupsafe.palletsprojects.com/ -* License: `BSD-3-Clause `_ -* Releases: https://pypi.org/project/MarkupSafe/ -* Code: https://github.com/pallets/markupsafe -* Issue tracker: https://github.com/pallets/markupsafe/issues -* Test status: - - * Linux, Mac: https://travis-ci.org/pallets/markupsafe - * Windows: https://ci.appveyor.com/project/pallets/markupsafe - -* Test coverage: https://codecov.io/gh/pallets/markupsafe - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/RECORD deleted file mode 100644 index 57ac82c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/RECORD +++ /dev/null @@ -1,16 +0,0 @@ -MarkupSafe-1.1.1.dist-info/LICENSE.txt,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 -MarkupSafe-1.1.1.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 -MarkupSafe-1.1.1.dist-info/RECORD,, -MarkupSafe-1.1.1.dist-info/METADATA,sha256=nJHwJ4_4ka-V39QH883jPrslj6inNdyyNASBXbYgHXQ,3570 -MarkupSafe-1.1.1.dist-info/WHEEL,sha256=d2ILPScH-y2UwGxsW1PeA2TT-KW0Git4AJ6LeOK8sQo,109 -markupsafe/_constants.py,sha256=zo2ajfScG-l1Sb_52EP3MlDCqO7Y1BVHUXXKRsVDRNk,4690 -markupsafe/_compat.py,sha256=uEW1ybxEjfxIiuTbRRaJpHsPFf4yQUMMKaPgYEC5XbU,558 -markupsafe/_native.py,sha256=d-8S_zzYt2y512xYcuSxq0NeG2DUUvG80wVdTn-4KI8,1873 -markupsafe/__init__.py,sha256=oTblO5f9KFM-pvnq9bB0HgElnqkJyqHnFN1Nx2NIvnY,10126 -markupsafe/_speedups.cpython-36m-x86_64-linux-gnu.so,sha256=YAxqjdtS2XJQ043wfxMAnE1u7KpsBc49UrW9yGPiK0w,38875 -markupsafe/_speedups.c,sha256=k0fzEIK3CP6MmMqeY0ob43TP90mVN0DTyn7BAl3RqSg,9884 -MarkupSafe-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -markupsafe/__pycache__/_native.cpython-36.pyc,, -markupsafe/__pycache__/_compat.cpython-36.pyc,, -markupsafe/__pycache__/_constants.cpython-36.pyc,, -markupsafe/__pycache__/__init__.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL deleted file mode 100644 index 92946fe..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: false -Tag: cp36-cp36m-manylinux1_x86_64 - diff --git a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt deleted file mode 100644 index 75bf729..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -markupsafe diff --git a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/LICENSE b/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/LICENSE deleted file mode 100644 index 86c0615..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -This is the MIT license: http://www.opensource.org/licenses/mit-license.php - -Copyright (c) 2005-2019 the SQLAlchemy authors and contributors . -SQLAlchemy is a trademark of Michael Bayer. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons -to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE -FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/METADATA deleted file mode 100644 index 1116077..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/METADATA +++ /dev/null @@ -1,191 +0,0 @@ -Metadata-Version: 2.1 -Name: SQLAlchemy -Version: 1.3.3 -Summary: Database Abstraction Library -Home-page: http://www.sqlalchemy.org -Author: Mike Bayer -Author-email: mike_mp@zzzcomputing.com -License: MIT License -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Database :: Front-Ends -Classifier: Operating System :: OS Independent -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* -Provides-Extra: mssql -Requires-Dist: pyodbc ; extra == 'mssql' -Provides-Extra: mssql_pymssql -Requires-Dist: pymssql ; extra == 'mssql_pymssql' -Provides-Extra: mssql_pyodbc -Requires-Dist: pyodbc ; extra == 'mssql_pyodbc' -Provides-Extra: mysql -Requires-Dist: mysqlclient ; extra == 'mysql' -Provides-Extra: oracle -Requires-Dist: cx-oracle ; extra == 'oracle' -Provides-Extra: postgresql -Requires-Dist: psycopg2 ; extra == 'postgresql' -Provides-Extra: postgresql_pg8000 -Requires-Dist: pg8000 ; extra == 'postgresql_pg8000' -Provides-Extra: postgresql_psycopg2binary -Requires-Dist: psycopg2-binary ; extra == 'postgresql_psycopg2binary' -Provides-Extra: postgresql_psycopg2cffi -Requires-Dist: psycopg2cffi ; extra == 'postgresql_psycopg2cffi' -Provides-Extra: pymysql -Requires-Dist: pymysql ; extra == 'pymysql' - -SQLAlchemy -========== - -The Python SQL Toolkit and Object Relational Mapper - -Introduction -------------- - -SQLAlchemy is the Python SQL toolkit and Object Relational Mapper -that gives application developers the full power and -flexibility of SQL. SQLAlchemy provides a full suite -of well known enterprise-level persistence patterns, -designed for efficient and high-performing database -access, adapted into a simple and Pythonic domain -language. - -Major SQLAlchemy features include: - -* An industrial strength ORM, built - from the core on the identity map, unit of work, - and data mapper patterns. These patterns - allow transparent persistence of objects - using a declarative configuration system. - Domain models - can be constructed and manipulated naturally, - and changes are synchronized with the - current transaction automatically. -* A relationally-oriented query system, exposing - the full range of SQL's capabilities - explicitly, including joins, subqueries, - correlation, and most everything else, - in terms of the object model. - Writing queries with the ORM uses the same - techniques of relational composition you use - when writing SQL. While you can drop into - literal SQL at any time, it's virtually never - needed. -* A comprehensive and flexible system - of eager loading for related collections and objects. - Collections are cached within a session, - and can be loaded on individual access, all - at once using joins, or by query per collection - across the full result set. -* A Core SQL construction system and DBAPI - interaction layer. The SQLAlchemy Core is - separate from the ORM and is a full database - abstraction layer in its own right, and includes - an extensible Python-based SQL expression - language, schema metadata, connection pooling, - type coercion, and custom types. -* All primary and foreign key constraints are - assumed to be composite and natural. Surrogate - integer primary keys are of course still the - norm, but SQLAlchemy never assumes or hardcodes - to this model. -* Database introspection and generation. Database - schemas can be "reflected" in one step into - Python structures representing database metadata; - those same structures can then generate - CREATE statements right back out - all within - the Core, independent of the ORM. - -SQLAlchemy's philosophy: - -* SQL databases behave less and less like object - collections the more size and performance start to - matter; object collections behave less and less like - tables and rows the more abstraction starts to matter. - SQLAlchemy aims to accommodate both of these - principles. -* An ORM doesn't need to hide the "R". A relational - database provides rich, set-based functionality - that should be fully exposed. SQLAlchemy's - ORM provides an open-ended set of patterns - that allow a developer to construct a custom - mediation layer between a domain model and - a relational schema, turning the so-called - "object relational impedance" issue into - a distant memory. -* The developer, in all cases, makes all decisions - regarding the design, structure, and naming conventions - of both the object model as well as the relational - schema. SQLAlchemy only provides the means - to automate the execution of these decisions. -* With SQLAlchemy, there's no such thing as - "the ORM generated a bad query" - you - retain full control over the structure of - queries, including how joins are organized, - how subqueries and correlation is used, what - columns are requested. Everything SQLAlchemy - does is ultimately the result of a developer- - initiated decision. -* Don't use an ORM if the problem doesn't need one. - SQLAlchemy consists of a Core and separate ORM - component. The Core offers a full SQL expression - language that allows Pythonic construction - of SQL constructs that render directly to SQL - strings for a target database, returning - result sets that are essentially enhanced DBAPI - cursors. -* Transactions should be the norm. With SQLAlchemy's - ORM, nothing goes to permanent storage until - commit() is called. SQLAlchemy encourages applications - to create a consistent means of delineating - the start and end of a series of operations. -* Never render a literal value in a SQL statement. - Bound parameters are used to the greatest degree - possible, allowing query optimizers to cache - query plans effectively and making SQL injection - attacks a non-issue. - -Documentation -------------- - -Latest documentation is at: - -http://www.sqlalchemy.org/docs/ - -Installation / Requirements ---------------------------- - -Full documentation for installation is at -`Installation `_. - -Getting Help / Development / Bug reporting ------------------------------------------- - -Please refer to the `SQLAlchemy Community Guide `_. - -Code of Conduct ---------------- - -Above all, SQLAlchemy places great emphasis on polite, thoughtful, and -constructive communication between users and developers. -Please see our current Code of Conduct at -`Code of Conduct `_. - -License -------- - -SQLAlchemy is distributed under the `MIT license -`_. - - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/RECORD deleted file mode 100644 index 1b943c9..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/RECORD +++ /dev/null @@ -1,395 +0,0 @@ -sqlalchemy/__init__.py,sha256=b-0b9f1lSjccQc_hQAYsbbACOSVSlEgEmTftA7n1Pwg,4621 -sqlalchemy/cprocessors.cpython-36m-x86_64-linux-gnu.so,sha256=NOashYAvtxfT-lfK8HOlib-TawQACizm9-Csj69QNMg,52616 -sqlalchemy/cresultproxy.cpython-36m-x86_64-linux-gnu.so,sha256=bNHzvANDY0t4AUhEdUQADizgjFbeNhlboUIVa832Rqc,62544 -sqlalchemy/cutils.cpython-36m-x86_64-linux-gnu.so,sha256=GQ1DJWKo1lxKcgUJTP-Fz7z_23v9sf-v6NObjP_1zYQ,34936 -sqlalchemy/events.py,sha256=q8YUUj9ie-jqHAhpRCSob6U0AWnO97XTXNU4eFMIoqA,51669 -sqlalchemy/exc.py,sha256=dj3R5czuI1QW-csTi_QkruQgYd4tkyHQLgBktG1uJ7s,16088 -sqlalchemy/inspection.py,sha256=Ld16VK6UHHncT6OTk2EF4pb5dVy6UAluKirO_efhJe4,2990 -sqlalchemy/interfaces.py,sha256=-hmOQE7bQj97PDqwBLnFbHP8spyq_SdWAsT2jj5V_2w,12721 -sqlalchemy/log.py,sha256=p15DWPQsSEbiuM-n9fXpUUKfDcC8DZzR8iVnX9vJyPU,6693 -sqlalchemy/processors.py,sha256=JaohpclZth6hoENx14gSoJ0g6Xtw2hBNYpJB6tDMgh8,5648 -sqlalchemy/schema.py,sha256=6WropRk5UfzJ3ux_PW2XM6LF_VKChK3bVuH0CEtkxlE,2377 -sqlalchemy/types.py,sha256=5fqsxdZyh0VSUuKkLaNuvTkZ3Vd6yyXZaMLQ6-DdozU,3377 -sqlalchemy/connectors/__init__.py,sha256=PmO8JpjouRQDDgTZG6KMmkjuhjNaHKD_mVVmC7BIb3o,278 -sqlalchemy/connectors/mxodbc.py,sha256=shvfYLXjpIfWhzgmAOAmLhgzRuh1cQBviFdlmqsanpQ,5352 -sqlalchemy/connectors/pyodbc.py,sha256=7xXTtBwpiInIZY7cHQrt2kdYMWL2wt0BaBpoPrTk6TY,5586 -sqlalchemy/connectors/zxJDBC.py,sha256=q_1nkGMdgfpdTirsOFPv5zUwvcLl3eFqPWX8kLlonIU,1878 -sqlalchemy/databases/__init__.py,sha256=q48zKLOxc0QAoin17eb3uWgKj8ZKIUn3U-1yCIBxOiA,819 -sqlalchemy/dialects/__init__.py,sha256=sLp-2CliXi_1-53Ocw3RecyguSFVNcgbnpGbQ_l79CE,1349 -sqlalchemy/dialects/firebird/__init__.py,sha256=DIDoybKRq57MbVzxGAEjhlUKOnMxqS_YtqHTqKEqbA8,1152 -sqlalchemy/dialects/firebird/base.py,sha256=edEt_7JD0mjDMYZ5Trx797g3fYSWKu2z0JlSrordFHs,30346 -sqlalchemy/dialects/firebird/fdb.py,sha256=cMR5lkee9uQUW4dLrFJGQwzTqwS0jF7VmFC2-1XHBa0,4071 -sqlalchemy/dialects/firebird/kinterbasdb.py,sha256=umuKwPP3yCApLlgP9nFQnAzM33Tfz6yggXTNfL9NJxg,6372 -sqlalchemy/dialects/mssql/__init__.py,sha256=8BaqPXU2LSiLzAA0y-Dy0TNp01-Yqo9Bd0TJgHwPmkY,1769 -sqlalchemy/dialects/mssql/adodbapi.py,sha256=Iln4YdUq9MLjGQl0B3sWvLllnKWL3MVJDgrla2yyxlo,2716 -sqlalchemy/dialects/mssql/base.py,sha256=KFMLs5GrjeQ_eR-KNTqs_Fz3fesTtoyjfQOZcPo2iK0,84492 -sqlalchemy/dialects/mssql/information_schema.py,sha256=hAcPUvSr39CTKQWM7eMXAX10sRRHdCTZktQYfu8I5xs,5343 -sqlalchemy/dialects/mssql/mxodbc.py,sha256=bWJLGOj1U58hBnM494NL2bL1xm1mMMe2c5Q4mQ20Pgs,4616 -sqlalchemy/dialects/mssql/pymssql.py,sha256=rNE6rjZGFtqn9jrPwCa_H3smpnzw0mq-8QpwtDeQF9A,3848 -sqlalchemy/dialects/mssql/pyodbc.py,sha256=i5bykiOeydS3trFSHKRBX20dbi-ls38Uz6Zj3ERYxX8,12289 -sqlalchemy/dialects/mssql/zxjdbc.py,sha256=WWK58AZg_5QvzdcMLYSQiKwkX8Pz0dpIJGFGQhca-ek,2311 -sqlalchemy/dialects/mysql/__init__.py,sha256=5sqAvOrhuLe7BUFzAMg7e-3CEh0ACsf6czm-TXHDvJg,2056 -sqlalchemy/dialects/mysql/base.py,sha256=9rFOA2_HHyA--WpGwz3HKcNmS58la6WCjIW4DIWdiWk,95958 -sqlalchemy/dialects/mysql/cymysql.py,sha256=f_ckGdJ1C2F-oD58yF12wqWwPwIoX80NI3FLZazgfto,2043 -sqlalchemy/dialects/mysql/dml.py,sha256=r0lH3nvsP-f_9-eCnR4wcAovnscPGYsXNyz27ftktYw,4559 -sqlalchemy/dialects/mysql/enumerated.py,sha256=4knKAcM5JzfgUS15j3602esjLd3op85_DRwav_4Ogok,11307 -sqlalchemy/dialects/mysql/gaerdbms.py,sha256=JFtH8MmNcVR9D6kSqCgY4bgPQk8MR_ciO9WRPOzmNAo,3368 -sqlalchemy/dialects/mysql/json.py,sha256=mvbQip8qW6b0zs-Kx6JekcWmccqJp3Pt9En1_jsbCac,2050 -sqlalchemy/dialects/mysql/mysqlconnector.py,sha256=uDp399MtwQ6TVCiulN__TqEj6glEKhddZWN2SRzxvOA,7596 -sqlalchemy/dialects/mysql/mysqldb.py,sha256=nneTYVLV1W5tlQnoVMBugz82YtYk69m2jPUqHga6qVc,7942 -sqlalchemy/dialects/mysql/oursql.py,sha256=kJ0qkuApnZ9RxAUllh4fO7zKiOTwyNdSfNG2QX7Ynew,7860 -sqlalchemy/dialects/mysql/pymysql.py,sha256=ZfTjzAWBchsP3uSzHH3gsOZAJDjEUy86CqLyHPbAfYk,2338 -sqlalchemy/dialects/mysql/pyodbc.py,sha256=Z-p_UAqzbdEFBeUYifN1LCfZn4MsQABkq__8Bek-7Sc,2691 -sqlalchemy/dialects/mysql/reflection.py,sha256=i4lgW19eafX1pjMz4RZ9Wjlvy1FPMEe5K24J4dvXAq8,17837 -sqlalchemy/dialects/mysql/types.py,sha256=vXCGrcs3ubhaGpO3ClHqJdjKlgkkFAKMdeepbgwheU4,24601 -sqlalchemy/dialects/mysql/zxjdbc.py,sha256=YwxhLeIHlxcozvo099_sNWl6bUmPK9LBivUQDTRJ6Vc,3970 -sqlalchemy/dialects/oracle/__init__.py,sha256=ISElqFGbFJyBKl00a9_eN_JXTOincncCGy0q7qC0Ia0,1257 -sqlalchemy/dialects/oracle/base.py,sha256=yPqjWLr1_K6RXFbU5VRLN7KCfDBhCbgw7Ip4ywEDFRQ,67114 -sqlalchemy/dialects/oracle/cx_oracle.py,sha256=eLKFR5uRhnbv1cxyQwu8olQtQ46FJ28CaNy5Eq4IN6c,40689 -sqlalchemy/dialects/oracle/zxjdbc.py,sha256=OOtJlVq3YPkQMrzy7QHUveuWw5fzffSx1SQ73sfh0TI,8207 -sqlalchemy/dialects/postgresql/__init__.py,sha256=ICu5C7h84FiVkO5cQp7-GT-a1-XfPagGZ3eV8vFNsws,2461 -sqlalchemy/dialects/postgresql/array.py,sha256=qVqQMUPkMTIVl0rlPV65zfTMHj4ibMAf0WohJeN6Dv8,10167 -sqlalchemy/dialects/postgresql/base.py,sha256=GOV2M3v04zheKYIHJiW9VHQKmPRn0DtwtoK-Q6EpaC0,117647 -sqlalchemy/dialects/postgresql/dml.py,sha256=OTrr1V4O2aHR-QcWMxrEvNFIpwxtd6gHG_M6tjXiQ2c,7579 -sqlalchemy/dialects/postgresql/ext.py,sha256=50vUJeDhqt9eYB-FOOXSwckkbRA-vvf-QGoSbMWL2dE,6823 -sqlalchemy/dialects/postgresql/hstore.py,sha256=ziir2ENH-jtGiHHDmqh5LsYQ9wkskYzK4mHhrCseyHs,12510 -sqlalchemy/dialects/postgresql/json.py,sha256=MLz3Jen1cKfKvtLKAHl19z02-x3mYk8j_9yFjhS1AYQ,9938 -sqlalchemy/dialects/postgresql/pg8000.py,sha256=GMJK0XZNOvhYLqiLwmVzI74-_uh7YTYTgMBSdTmW91Y,9531 -sqlalchemy/dialects/postgresql/psycopg2.py,sha256=CwtXti1K1tOPCYILPasS64TVC7IhZUY893wZjjq9GF0,29411 -sqlalchemy/dialects/postgresql/psycopg2cffi.py,sha256=CxJa1kEhcVL89CDofQu5cLCkXi0Kak_3qHtSY28m3D0,1657 -sqlalchemy/dialects/postgresql/pygresql.py,sha256=ZD_HMinuKFaZVDueKcgZqow6HjpgWW6cg1fvmPMoYW0,7937 -sqlalchemy/dialects/postgresql/pypostgresql.py,sha256=IjU3zKH9g32WyCJ0cQY5q4GPiZ5KvK_ZLB6b5gManeo,2721 -sqlalchemy/dialects/postgresql/ranges.py,sha256=5kXgQWlUUoFaqM4nybF9lXdubPyLuyQ5-Cl1MrW0HY4,4750 -sqlalchemy/dialects/postgresql/zxjdbc.py,sha256=EOBSePhEZ5ofxWxpL4pjKrBdH-J0IU7MNI6bfrNg_f8,1415 -sqlalchemy/dialects/sqlite/__init__.py,sha256=xf7Jo5xBzd3pM2FskTBvK4GEwe2uKF3615wx5igAJnY,1042 -sqlalchemy/dialects/sqlite/base.py,sha256=yWjYvLTnNETrcW3Ir0NARmqtpYMRHhlE4D8DEw6E2YA,68885 -sqlalchemy/dialects/sqlite/json.py,sha256=NlSvgukUs7BOK7HAhoHuk5Li1K2UxUgK0DpTkJ449NE,2292 -sqlalchemy/dialects/sqlite/pysqlcipher.py,sha256=xVjUM8QfJvXZWVPacptuVUrAvTELmgKk6idlK9yCg8M,4689 -sqlalchemy/dialects/sqlite/pysqlite.py,sha256=ZwUGJh3fsxWgEGaxIjkPd4yD33of79iopQRTk7mzaQc,14878 -sqlalchemy/dialects/sybase/__init__.py,sha256=E7EalDeHIf7ol6Guvh2fxkUt6db39SMTmpr2BNcFqhA,1363 -sqlalchemy/dialects/sybase/base.py,sha256=XaQhHJkL7RcmwJAmG_MbROb0W1LcAEmSvipupRVZfMI,31906 -sqlalchemy/dialects/sybase/mxodbc.py,sha256=SAFYjwWM43w5kexxouD9k9ZixvVnjdYgrV-3cFmehtM,902 -sqlalchemy/dialects/sybase/pyodbc.py,sha256=GY1ckbvHEyAMsOATiD3l154xBFLbvyEl2DAezuKkK-o,2120 -sqlalchemy/dialects/sybase/pysybase.py,sha256=5aDa_EP4TDwD58p78MQCFdL8xEVeUfLgI9TsRL3awcI,3313 -sqlalchemy/engine/__init__.py,sha256=fQ_TNcfWTLkpczTWGYmPk8tddBwuA9U60VSBZAKsxpc,21671 -sqlalchemy/engine/base.py,sha256=0jIN2kK4B07pPH7F1xDceEOYIroY9sYzLZDc5-nS9Do,84096 -sqlalchemy/engine/default.py,sha256=jchVpHCfI9AlK_DxOSgeG7TWd6nX25Q38ZnHSj6igeM,51313 -sqlalchemy/engine/interfaces.py,sha256=r9JF1xpBi680kKA6KDGlfWInC2QIvoJWtH7J3FN-uYo,42228 -sqlalchemy/engine/reflection.py,sha256=RmBVFmXPXqDm4h5lbLW74xAHxNVj5fLYSHqlvsB8yV8,32647 -sqlalchemy/engine/result.py,sha256=hE3rVIz3KgEMNPRPn1Pv62eNYYeOySvEqSRpJeOgbPM,52683 -sqlalchemy/engine/strategies.py,sha256=JkUNBQW_1skLZA-9pM8lc76miwIWElDefD0X99_ovvI,9700 -sqlalchemy/engine/threadlocal.py,sha256=emgAmROgo6SmJkCP8gG8eFViUQHYnd8U3pN1aAt831o,4746 -sqlalchemy/engine/url.py,sha256=d25Q-jI8KUqgVD8aGrsW6A3pI3LIP2TiV1Ny8rgZEyM,9411 -sqlalchemy/engine/util.py,sha256=pdoqEAzvm1TIIYNQsHP5eeAQZVbdEzyBE69r5rjdo94,2421 -sqlalchemy/event/__init__.py,sha256=MEawrpuhgajkBpodIp2BfEmLyHYkCicSoz60dTFJk7A,596 -sqlalchemy/event/api.py,sha256=42LGaYFxvq0W0PRr7DEE0Zygn7YNPMvKuQcCbIkJEKk,6279 -sqlalchemy/event/attr.py,sha256=5TRnPHyWBYg0OgYIZmjwP29iiFfNTTGZQCXDXoIXmh4,12994 -sqlalchemy/event/base.py,sha256=1qV9UUIw3FcKc6AjFTiV7wiFRpNv4CFCPGYq2nuqANc,9760 -sqlalchemy/event/legacy.py,sha256=N5IF3sonoACbA3zusM6nejBmecdD9r7o5ZrWj7JUfyo,5904 -sqlalchemy/event/registry.py,sha256=1wpdKBp8yiQalFJemBBS0Bo0Ikoot1_Tmr_44Jdwh_Q,8066 -sqlalchemy/ext/__init__.py,sha256=FZuukFICvDXSuR142gLZ7dBWNpRHqASSL2aMtUfachE,322 -sqlalchemy/ext/associationproxy.py,sha256=-_aMD4KPIaWG0q6_uA9Bdzq6h2vM9qRP4zv9B_7vkKc,49583 -sqlalchemy/ext/automap.py,sha256=Sejl92gboBfHYXpw4xn-e1-uCr3DLud8u6MJRU395MU,41710 -sqlalchemy/ext/baked.py,sha256=jq3nLzBklE7BP8kulVGgUCbqOxVTyWVAdy6ZihkW40U,21468 -sqlalchemy/ext/compiler.py,sha256=bnwt9fjMG4_rrwPsX_5Kv96JgqEJM4TKQY4OorHEttc,16850 -sqlalchemy/ext/horizontal_shard.py,sha256=TxazGyRKBY-0q_iYGQ6omdZw96GoRb11Dh5omMH35Lk,9109 -sqlalchemy/ext/hybrid.py,sha256=l1qSV6leIR57j3dsSXbCfJMIQH3EYzoEBY7IUnn-4ME,40233 -sqlalchemy/ext/indexable.py,sha256=Zsx-ZWUQ4EJOgGIi8fkwqNZ1Z0u234Vx45jcH9zJ1GM,11169 -sqlalchemy/ext/instrumentation.py,sha256=7pxb7SDAnFvtHUxXnx8rqg4XKCadnWDGOBzFQHqeBS8,14351 -sqlalchemy/ext/mutable.py,sha256=rHTPaTAAiLda-cTAZpW4a1zl8Mqa0pgde0wWc6pGABM,31808 -sqlalchemy/ext/orderinglist.py,sha256=HEqlfaUQv5TI6kVchcNuxF_dcwvHHH_uGXY1EvP4GxQ,13888 -sqlalchemy/ext/serializer.py,sha256=D7DGYqH_tQsYSB2NFh44Eouud3aaZjclnfA-7lymCdE,5784 -sqlalchemy/ext/declarative/__init__.py,sha256=vjC-rEQ8py1ai0Gsh47UYHcYNlrhNw3774_u5ffhyws,902 -sqlalchemy/ext/declarative/api.py,sha256=KGnM7bHVyB03WDcd6ToiLCJQifIXxxx0NyDrHmb5XQM,27683 -sqlalchemy/ext/declarative/base.py,sha256=eoPCw3XK6OHBlsKlUf41JmaktvHO2wKFrzD_ZlsUxWY,31915 -sqlalchemy/ext/declarative/clsregistry.py,sha256=jH0vm14_bN7fw_hfkqNNiK0eihkRDpA1YlyS_yjkfIY,11051 -sqlalchemy/orm/__init__.py,sha256=0oVxiEjATefM7AsVfn3GwFVdpoTqClB5v2h3wkoJubs,9542 -sqlalchemy/orm/attributes.py,sha256=7tKQDyi8xpFtQoALX1sAlUU05fRMQHc6N2vJsWyez-o,65829 -sqlalchemy/orm/base.py,sha256=buwPRxecLZ-jlLiE6iMr-Nu5dQapiAjQVhHFzqDlAS4,14864 -sqlalchemy/orm/collections.py,sha256=AtwAGy1fBr8Nb6cOE6ErkfbwB04WAMZh02OE5c1ab1M,52614 -sqlalchemy/orm/dependency.py,sha256=gSbhKIW5fSR5l3DhoHQR23XpZJlcWkAXNsha97ZHmuk,46265 -sqlalchemy/orm/deprecated_interfaces.py,sha256=Ck4e2jxE09lf0STxFPvJm7XtnrAsCKi26NG2WWgnI4s,20751 -sqlalchemy/orm/descriptor_props.py,sha256=uw_3B8zPTXJRw1JjZetl87oHLP70B92enkZTzMyIIGw,28201 -sqlalchemy/orm/dynamic.py,sha256=ewxEYuJ8DeRzFOHmthe4xnrR9ZX4svImGia6qDcnbv8,14199 -sqlalchemy/orm/evaluator.py,sha256=bKHiPgCfcGlZ48by-j1E2ui5quPjEa3dcjinTpAdaNc,5441 -sqlalchemy/orm/events.py,sha256=puZVKPStl9DMOsCvmX2n29YGxjWl1hY0z3C4sfxMa_E,92232 -sqlalchemy/orm/exc.py,sha256=mgjHeg87b0WJ8NGM1Exq39dQ5-nz_Crn61dSXKyHO6Q,6610 -sqlalchemy/orm/identity.py,sha256=zQBUIWAto8UBUKsMV74aX3AqYwqYlURjUZ2xiS0Mrj8,10299 -sqlalchemy/orm/instrumentation.py,sha256=lxa3rHi_Y7uRcQkZ7JwCWqfgN5e9mbir0XUK25R3kic,18050 -sqlalchemy/orm/interfaces.py,sha256=JzAQS8ulSBk-AUFPPxIAkBkfPV9HA5WztCL7osBIY9g,25766 -sqlalchemy/orm/loading.py,sha256=lQpLChrS32LN5x3YrZhZvyWptx3B6mDPtIUgMKxI_88,32236 -sqlalchemy/orm/mapper.py,sha256=xU7Npp0S030yNzeDfV7MhU9laxYIrKImt3-rEwkBuv4,128333 -sqlalchemy/orm/path_registry.py,sha256=WRQ22J7JGmFYtjXphdEK4uUVl33Hl0TFXPDRX_IIGhY,9262 -sqlalchemy/orm/persistence.py,sha256=7adak74kUf_2HDRWCGJD2NmoKNUgp_w4gmj0owtICyw,64710 -sqlalchemy/orm/properties.py,sha256=R_QJCCH1POO3mm6M_TP2UXGhF1tzMZ4T5S1muj1Kmc0,11043 -sqlalchemy/orm/query.py,sha256=0ISY4UChgo8-BCkbvVpF4M9HRm8vCyAxdZDhZh2X0pU,172159 -sqlalchemy/orm/relationships.py,sha256=pj0f0Vj5nJ4L3zjrU99G3JX0d4-amO8uUrrh5su67tU,124457 -sqlalchemy/orm/scoping.py,sha256=XQbBSdlbNC9kiKFt6RQ4AhR_Ds-zd2dbyEsDYQNzL_E,6393 -sqlalchemy/orm/session.py,sha256=oY8Vtg_npKKtPA5xHk4Quf7KvL_kLADSoOKwYsL1Yrg,128432 -sqlalchemy/orm/state.py,sha256=mK5CslhHSpFFsiXr8Le7OLDZW0Jmh0nc2haLvqSOAtM,31044 -sqlalchemy/orm/strategies.py,sha256=qYnIG4yLYb0CXM7c3XQaav7i1RaBQP1QgKgX9OxiyFU,80489 -sqlalchemy/orm/strategy_options.py,sha256=u-7J4jOVaoCesn3l2N5B4D-LGe3k1ptIxOw2nvAN-t8,52591 -sqlalchemy/orm/sync.py,sha256=M5lWCUhgzxI_CXh3WJvI6xEKLCeQ2F2e42DDri27vZ8,5536 -sqlalchemy/orm/unitofwork.py,sha256=qM0DWBlZfI6BkZWH9Ir7xBxUWEXgnV53XytBMHSACq0,24735 -sqlalchemy/orm/util.py,sha256=MNMdmV-OvxnqD6BqMopTR0AztK-XGehEhAHl51IltpE,44950 -sqlalchemy/pool/__init__.py,sha256=ryTuFoeMcSGBRtGWhkQLgqWBpDW72KRNU7mnEoI6zZs,1483 -sqlalchemy/pool/base.py,sha256=n6MyvajNFk7_Ng_XdoznWISYuZCHlhD3dXxUVHAU3PY,34739 -sqlalchemy/pool/dbapi_proxy.py,sha256=fXfMOhXiphQUXtdUkXyALbSSdTK1rmAl-96UWdt1yi0,4320 -sqlalchemy/pool/impl.py,sha256=IlCjSEY0e3TxLIM8Qqt_3kUp0TgQ_dUkFiB60DrRoEw,14594 -sqlalchemy/sql/__init__.py,sha256=yYsKlrXrIVLaaohxVqZDQny02FRj5uZ2KwBRtj6nDfE,3789 -sqlalchemy/sql/annotation.py,sha256=mEFt4M7DT4SRwDSRwmi5ZR4idRrCsRESITl9jDAU_HE,6651 -sqlalchemy/sql/base.py,sha256=JqxZ-yhSWV37SpYxedwKjogOYIqopJy5b6sT4oE_kkU,21506 -sqlalchemy/sql/compiler.py,sha256=JOu6TXRsEYOU7LULqtABcrf5vALuVSeiP8uxF5LsNtU,123812 -sqlalchemy/sql/crud.py,sha256=lioHSo6Ud4pEZ8H-4LHWgR6AHwPjfoB1PKxJL3EAab8,25840 -sqlalchemy/sql/ddl.py,sha256=JWrv5XZNr6NtReXV_M4a4UBg8tACGpJD7EPoRAMWiNM,39549 -sqlalchemy/sql/default_comparator.py,sha256=BhF_MnLS2ZGTlscNw8Inru4L-C9J8J0TzsK_YNxFoXM,12148 -sqlalchemy/sql/dml.py,sha256=EbCBhs-9aFk1t7aSs9AXkTwOCoc9XfXnUygnU_LPl-A,34420 -sqlalchemy/sql/elements.py,sha256=PNXozUrzaY57YRDzo9PQwWbvM5vrQB9uZoNJoS2KMz4,152316 -sqlalchemy/sql/expression.py,sha256=ZOFtmLM6gKfC_GGq7jEnCjBntiZoSwfjbIHvbqg-v2M,9037 -sqlalchemy/sql/functions.py,sha256=7aX0ZZf1eLZ3SSTt1tNq1EqNMrWi5KC9wm2LX0GJAhg,30648 -sqlalchemy/sql/naming.py,sha256=XhFmVhT4AyXTyfr98Fm1zJXDQWXGyaDSx6pjkFzwe1w,5885 -sqlalchemy/sql/operators.py,sha256=Q3fibmpoWwB6IytLx0wb1UVVgOxee4otu5Or5_wHr1Y,42148 -sqlalchemy/sql/schema.py,sha256=V70OfyDQ8Ym-75-KZvpVWVpWBdb2uSyfQkult-vcdxY,164266 -sqlalchemy/sql/selectable.py,sha256=UhPNuWwMXGSg4jDgp9yvS7htjztnTIOAWF11pnowpFU,130930 -sqlalchemy/sql/sqltypes.py,sha256=PfQ-Nsn9U-GDl185Ez6E0tsYENfOmzXzLkVBt1Xcd8E,94583 -sqlalchemy/sql/type_api.py,sha256=zg9OvnQyw2I9nHkcqCi9YunpLkxRBipeJEAiCnPO3XM,51268 -sqlalchemy/sql/util.py,sha256=RqV2UkXR6TD-TNJp8KunVQnvs5gNVkSU7_pnHtJmA4g,29275 -sqlalchemy/sql/visitors.py,sha256=sOZmJko47vBGCu7G7u6spNCczLrWyq3ChzKdi-AK5fE,10268 -sqlalchemy/testing/__init__.py,sha256=AMAb69uUKMaRMWpk-RYade6muNOfqhq-yx0gklK-tDw,2362 -sqlalchemy/testing/assertions.py,sha256=k-n4GfA2FHRVazA0d_RgHFG-c8pQZU-ymZEbAeRcuKo,18017 -sqlalchemy/testing/assertsql.py,sha256=hex8NrwU4_yQlsd9TrzunWE93mreZq-QatZvyzGqMlc,13398 -sqlalchemy/testing/config.py,sha256=xtKFYGVj0dhSlpLupX2nuYv_dqlwSr9h3xULp6GuwwA,2680 -sqlalchemy/testing/engines.py,sha256=KLU8Uyy6mCA19c4ha4hnbfSST3OyhYiQWG03qixaeX8,10437 -sqlalchemy/testing/entities.py,sha256=v30FAovFH3pJz9Ryq05u6EDBfWjt_fBfYht2iGBURpo,3093 -sqlalchemy/testing/exclusions.py,sha256=kKLxtaE7qXrOXmn5Y_woKod2h7V7DO4vLJ0NRb9TwHM,12765 -sqlalchemy/testing/fixtures.py,sha256=6bPUb2yuO-xmvDbF5H1gpgBLnQRGsDxfuRiKXI2-vzQ,10814 -sqlalchemy/testing/mock.py,sha256=A5GADY9Iwr7Cbr3OeAZwNl32EzJx1U2OIRPl8hLV3kM,893 -sqlalchemy/testing/pickleable.py,sha256=LolLY6wnug3CwcJ701BTPYPm9A_dCNT8r4MT8F0M1N4,2693 -sqlalchemy/testing/profiling.py,sha256=lKQLwvTW0H_JnuKaA4riaQOjwdRvW4JX7X2y4Q1m74Y,8513 -sqlalchemy/testing/provision.py,sha256=8wno6MSqe2WGfoo_aiDzHmnxmQSi1CF5kIiWjankuBw,13263 -sqlalchemy/testing/replay_fixture.py,sha256=W_QZD96t7ichRNvILOjhuoQXTCYnd2usiHBQhPkzUYI,5875 -sqlalchemy/testing/requirements.py,sha256=0mU2SpPFAM1H2E6XnUk3sBKNgTMYfLG_3ucuaPyp_bw,26861 -sqlalchemy/testing/schema.py,sha256=bDySUn-cFBuly4XZrxZLrWmWF0BO7KKAcV5uSZmM16w,3712 -sqlalchemy/testing/util.py,sha256=lxFV07DVuFrTyE7i4DLxa-_ifGi2dX-mQul5gAUC36U,7738 -sqlalchemy/testing/warnings.py,sha256=uHN8jNwg7d6D5eEYkI1RcSYk7wfyk7v8zNe5ApN0yIk,1298 -sqlalchemy/testing/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -sqlalchemy/testing/plugin/bootstrap.py,sha256=0rkror_9S175GPGNnbtbDmfdLEhu9v-AAv715lR8KyU,1468 -sqlalchemy/testing/plugin/plugin_base.py,sha256=0OeTsbo9vZekAmVsFa9LxwxDS_BxweV1oxeyGk8jWpo,18867 -sqlalchemy/testing/plugin/pytestplugin.py,sha256=DL2cL74zrHTeGH4nKzlubqxhZG9MAmbgghdyXJYohmg,7145 -sqlalchemy/testing/suite/__init__.py,sha256=SUWU-LR3asH2hN2YsIhlpqxeuo8fpvej3o6nct-L4xU,358 -sqlalchemy/testing/suite/test_cte.py,sha256=yRWq8d3q5YJu_PBDaV_mrTcfGhJCaFtjWnL0P6H3p_0,6788 -sqlalchemy/testing/suite/test_ddl.py,sha256=zzG8K1NrzTQowMbIqHEV2TjZwk0crEdc8d1u6oHbzwg,2870 -sqlalchemy/testing/suite/test_dialect.py,sha256=aZ6b0Jy6OKKVdM9gYs74NNGls8tUfO0oya1RPpTfIME,4572 -sqlalchemy/testing/suite/test_insert.py,sha256=Usn0cmLEfn82vOGlVHxaeaq9KH9tO6L_BZWrW-I3XYM,9586 -sqlalchemy/testing/suite/test_reflection.py,sha256=SN99s7CSu7ULdR2M0Dtq6a1elaPcBpZhvoOHZkKbpi4,38268 -sqlalchemy/testing/suite/test_results.py,sha256=cR_lb33pssyzg9PYLvoQYp2P1fuCu36scjOAIFBR8GM,11513 -sqlalchemy/testing/suite/test_select.py,sha256=7IKzlpcg18mC3HegLy1-WcZ0wlphFEGGMoj0DTHvCNM,19984 -sqlalchemy/testing/suite/test_sequence.py,sha256=oacBvtAqW3Ua3gcqyqnT1U_hpJutEW_EmEbwvf7Xq7E,4661 -sqlalchemy/testing/suite/test_types.py,sha256=SwfV-R_CYvGBtj8KiiVkyQ6W2y1RXGWN2K7b7BfzhT8,29632 -sqlalchemy/testing/suite/test_update_delete.py,sha256=PA0kY_yeRDELIwvTljpElr8Q6sw_D0bFQqPTODzrL6w,1478 -sqlalchemy/util/__init__.py,sha256=BeuHkC0z7vyYSNPpolkFhlNcQsLAp97kz5ZqI94BeQ0,6580 -sqlalchemy/util/_collections.py,sha256=BtaTnF9T6DmcwNm_Ppn6bMC5q-DrlMDyqvdWL_5CR9s,29153 -sqlalchemy/util/compat.py,sha256=ZPgmGxtOszTbiBzjsYcvteM0zAyeKVbD5QtQh3agsFg,11264 -sqlalchemy/util/deprecations.py,sha256=GgG2fEYkvwGzgagvGyPleV8UFMyfBy9hXcEDF11CrGI,7169 -sqlalchemy/util/langhelpers.py,sha256=ImXE5T0SERxy41PJ88Cm_sOd_GUtrCIREMXcGlziIGU,47645 -sqlalchemy/util/queue.py,sha256=EBxMwtWpxO2od9YlVetESq6-ShQER2ejH1MqmeA8iss,6827 -sqlalchemy/util/topological.py,sha256=lymXt3K0HlPlJsZRQCyIyLS9VZNgRFuALXkJiB_e7Bk,2767 -SQLAlchemy-1.3.3.dist-info/LICENSE,sha256=VTaFntofQKoeltlNDAJ8zV62gi-Gn7drymVfS1h0Kmo,1229 -SQLAlchemy-1.3.3.dist-info/METADATA,sha256=JI7NckYQJyu48q4gm7b5d_TqA2hjMIkn-pnwef314zM,7122 -SQLAlchemy-1.3.3.dist-info/WHEEL,sha256=zfLI8UG8SYrkkKiIcNZV3RdKvj24wfV1YiJLFokwxiQ,104 -SQLAlchemy-1.3.3.dist-info/top_level.txt,sha256=rp-ZgB7D8G11ivXON5VGPjupT1voYmWqkciDt5Uaw_Q,11 -SQLAlchemy-1.3.3.dist-info/RECORD,, -SQLAlchemy-1.3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -sqlalchemy/util/__pycache__/queue.cpython-36.pyc,, -sqlalchemy/util/__pycache__/topological.cpython-36.pyc,, -sqlalchemy/util/__pycache__/langhelpers.cpython-36.pyc,, -sqlalchemy/util/__pycache__/deprecations.cpython-36.pyc,, -sqlalchemy/util/__pycache__/_collections.cpython-36.pyc,, -sqlalchemy/util/__pycache__/compat.cpython-36.pyc,, -sqlalchemy/util/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/event/__pycache__/api.cpython-36.pyc,, -sqlalchemy/event/__pycache__/attr.cpython-36.pyc,, -sqlalchemy/event/__pycache__/base.cpython-36.pyc,, -sqlalchemy/event/__pycache__/registry.cpython-36.pyc,, -sqlalchemy/event/__pycache__/legacy.cpython-36.pyc,, -sqlalchemy/event/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/testing/plugin/__pycache__/bootstrap.cpython-36.pyc,, -sqlalchemy/testing/plugin/__pycache__/pytestplugin.cpython-36.pyc,, -sqlalchemy/testing/plugin/__pycache__/plugin_base.cpython-36.pyc,, -sqlalchemy/testing/plugin/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_sequence.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_cte.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_update_delete.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_select.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_results.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_dialect.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_ddl.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_insert.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_reflection.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/testing/suite/__pycache__/test_types.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/mock.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/provision.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/engines.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/replay_fixture.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/assertsql.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/pickleable.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/config.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/entities.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/requirements.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/util.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/fixtures.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/profiling.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/schema.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/warnings.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/exclusions.cpython-36.pyc,, -sqlalchemy/testing/__pycache__/assertions.cpython-36.pyc,, -sqlalchemy/connectors/__pycache__/zxJDBC.cpython-36.pyc,, -sqlalchemy/connectors/__pycache__/pyodbc.cpython-36.pyc,, -sqlalchemy/connectors/__pycache__/mxodbc.cpython-36.pyc,, -sqlalchemy/connectors/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/naming.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/crud.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/functions.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/sqltypes.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/visitors.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/default_comparator.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/util.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/base.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/ddl.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/selectable.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/compiler.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/type_api.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/schema.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/elements.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/expression.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/operators.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/dml.cpython-36.pyc,, -sqlalchemy/sql/__pycache__/annotation.cpython-36.pyc,, -sqlalchemy/pool/__pycache__/impl.cpython-36.pyc,, -sqlalchemy/pool/__pycache__/dbapi_proxy.cpython-36.pyc,, -sqlalchemy/pool/__pycache__/base.cpython-36.pyc,, -sqlalchemy/pool/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/__pycache__/interfaces.cpython-36.pyc,, -sqlalchemy/__pycache__/events.cpython-36.pyc,, -sqlalchemy/__pycache__/log.cpython-36.pyc,, -sqlalchemy/__pycache__/types.cpython-36.pyc,, -sqlalchemy/__pycache__/schema.cpython-36.pyc,, -sqlalchemy/__pycache__/processors.cpython-36.pyc,, -sqlalchemy/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/__pycache__/exc.cpython-36.pyc,, -sqlalchemy/__pycache__/inspection.cpython-36.pyc,, -sqlalchemy/ext/declarative/__pycache__/clsregistry.cpython-36.pyc,, -sqlalchemy/ext/declarative/__pycache__/api.cpython-36.pyc,, -sqlalchemy/ext/declarative/__pycache__/base.cpython-36.pyc,, -sqlalchemy/ext/declarative/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/mutable.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/hybrid.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/orderinglist.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/baked.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/automap.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/serializer.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/compiler.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/associationproxy.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/instrumentation.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/horizontal_shard.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/indexable.cpython-36.pyc,, -sqlalchemy/ext/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/databases/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/default.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/interfaces.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/threadlocal.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/result.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/util.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/strategies.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/base.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/reflection.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/url.cpython-36.pyc,, -sqlalchemy/engine/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/cymysql.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/oursql.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/pyodbc.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/pymysql.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/base.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/gaerdbms.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/mysqldb.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/reflection.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/json.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/types.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/enumerated.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/mysqlconnector.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/zxjdbc.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/dialects/mysql/__pycache__/dml.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/pypostgresql.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/ext.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/pg8000.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/psycopg2cffi.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/hstore.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/base.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/json.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/ranges.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/zxjdbc.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/array.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/psycopg2.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/pygresql.cpython-36.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/dml.cpython-36.pyc,, -sqlalchemy/dialects/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/dialects/mssql/__pycache__/pyodbc.cpython-36.pyc,, -sqlalchemy/dialects/mssql/__pycache__/base.cpython-36.pyc,, -sqlalchemy/dialects/mssql/__pycache__/information_schema.cpython-36.pyc,, -sqlalchemy/dialects/mssql/__pycache__/zxjdbc.cpython-36.pyc,, -sqlalchemy/dialects/mssql/__pycache__/mxodbc.cpython-36.pyc,, -sqlalchemy/dialects/mssql/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/dialects/mssql/__pycache__/adodbapi.cpython-36.pyc,, -sqlalchemy/dialects/mssql/__pycache__/pymssql.cpython-36.pyc,, -sqlalchemy/dialects/sybase/__pycache__/pysybase.cpython-36.pyc,, -sqlalchemy/dialects/sybase/__pycache__/pyodbc.cpython-36.pyc,, -sqlalchemy/dialects/sybase/__pycache__/base.cpython-36.pyc,, -sqlalchemy/dialects/sybase/__pycache__/mxodbc.cpython-36.pyc,, -sqlalchemy/dialects/sybase/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/dialects/oracle/__pycache__/base.cpython-36.pyc,, -sqlalchemy/dialects/oracle/__pycache__/cx_oracle.cpython-36.pyc,, -sqlalchemy/dialects/oracle/__pycache__/zxjdbc.cpython-36.pyc,, -sqlalchemy/dialects/oracle/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/pysqlite.cpython-36.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/base.cpython-36.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/json.cpython-36.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/pysqlcipher.cpython-36.pyc,, -sqlalchemy/dialects/firebird/__pycache__/kinterbasdb.cpython-36.pyc,, -sqlalchemy/dialects/firebird/__pycache__/fdb.cpython-36.pyc,, -sqlalchemy/dialects/firebird/__pycache__/base.cpython-36.pyc,, -sqlalchemy/dialects/firebird/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/properties.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/interfaces.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/sync.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/dynamic.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/loading.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/events.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/dependency.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/collections.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/attributes.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/session.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/deprecated_interfaces.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/state.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/scoping.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/identity.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/strategy_options.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/util.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/strategies.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/base.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/relationships.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/mapper.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/evaluator.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/descriptor_props.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/instrumentation.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/persistence.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/path_registry.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/__init__.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/exc.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/query.cpython-36.pyc,, -sqlalchemy/orm/__pycache__/unitofwork.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/WHEEL deleted file mode 100644 index f347002..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.32.3) -Root-Is-Purelib: false -Tag: cp36-cp36m-linux_x86_64 - diff --git a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/top_level.txt deleted file mode 100644 index 39fb2be..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/SQLAlchemy-1.3.3.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -sqlalchemy diff --git a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/METADATA deleted file mode 100644 index 8a77324..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/METADATA +++ /dev/null @@ -1,101 +0,0 @@ -Metadata-Version: 2.1 -Name: WTForms -Version: 2.2.1 -Summary: A flexible forms validation and rendering library for Python web development. -Home-page: https://wtforms.readthedocs.io/ -Author: Thomas Johansson, James Crasta -Author-email: wtforms@simplecodes.com -Maintainer: WTForms team -Maintainer-email: davidism@gmail.com -License: BSD -Project-URL: Documentation, https://wtforms.readthedocs.io/ -Project-URL: Code, https://github.com/wtforms/wtforms -Project-URL: Issue tracker, https://github.com/wtforms/wtforms/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Provides-Extra: locale -Requires-Dist: ordereddict; python_version=="2.6" -Provides-Extra: locale -Requires-Dist: Babel (>=1.3); extra == 'locale' - -WTForms -======= - -WTForms is a flexible forms validation and rendering library for Python -web development. It is `framework agnostic`_ and can work with whatever -web framework and template engine you choose. There are various -community libraries that provide closer integration with popular -frameworks. - -To get started using WTForms, we recommend reading the `crash course`_ -in the docs. - -.. _crash course: https://wtforms.readthedocs.io/en/stable/crash_course.html -.. _framework agnostic: https://wtforms.readthedocs.io/en/stable/faq.html#does-wtforms-work-with-library-here - - -Installation ------------- - -Install and update using pip:: - - pip install -U WTForms - - -Third-Party Library Integrations --------------------------------- - -WTForms is designed to work with any web framework and template engine. -There are a number of community-provided libraries that make integrating -with frameworks even better. - -- `Flask-WTF`_ integrates with the Flask framework. It can - automatically load data from the request, uses Flask-Babel to - translate based on user-selected locale, provides full-application - CSRF, and more. -- `WTForms-Alchemy`_ provides rich support for generating forms from - SQLAlchemy models, including an expanded set of fields and - validators. -- `WTForms-SQLAlchemy`_ provides ORM-backed fields and form generation - from SQLAlchemy models. -- `WTForms-AppEngine`_ provides ORM-backed fields and form generation - from AppEnding db/ndb schema -- `WTForms-AppEngine`_ provides ORM-backed fields and form generation - from Django models, as well as integration with Django's I18N - support. - -.. _Flask-WTF: https://flask-wtf.readthedocs.io/ -.. _WTForms-Alchemy: https://wtforms-alchemy.readthedocs.io/ -.. _WTForms-SQLAlchemy: https://github.com/wtforms/wtforms-sqlalchemy -.. _WTForms-AppEngine: https://github.com/wtforms/wtforms-appengine -.. _WTForms-Django: https://github.com/wtforms/wtforms-django - - -Links ------ - -- Documentation: https://wtforms.readthedocs.io/ -- License: `BSD `_ -- Releases: https://pypi.org/project/WTForms/ -- Code: https://github.com/wtforms/wtforms -- Issue tracker: https://github.com/wtforms/wtforms/issues -- Test status: - - - Linux: https://travis-ci.org/wtforms/wtforms - -- Test coverage: https://coveralls.io/github/wtforms/wtforms - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/RECORD deleted file mode 100644 index c7865c2..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/RECORD +++ /dev/null @@ -1,147 +0,0 @@ -WTForms-2.2.1.dist-info/METADATA,sha256=Aqv5s_FPo1o3VxjnX-nclKn2dBPIVOpTwggPPH-DJs0,3771 -WTForms-2.2.1.dist-info/RECORD,, -WTForms-2.2.1.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 -WTForms-2.2.1.dist-info/top_level.txt,sha256=k5K62RAEkLEN23p118t3tRgvL6I_k56NiIU7Hk8Phv8,8 -wtforms/__init__.py,sha256=h4gmUHtk1Y9cGJ-l63rhrp-nC9REGdpcRPBGoJKP9hk,380 -wtforms/compat.py,sha256=buY-q7yLNO-2OlxA5QPAcdBO8urjZTtxvFnxg_1Euuo,589 -wtforms/form.py,sha256=ahME3_8CmTuvVsatV-AKqinBkOSEnLOE_nMeQLgrQEA,11608 -wtforms/i18n.py,sha256=RuMPdvfsxHGMqKySUy4DpMfEAzruPK_7gHe6GQTrekc,2175 -wtforms/meta.py,sha256=9yLQuKP4N_OiPBsPy3tBc7auldxhFryZweySDsKL8zI,3822 -wtforms/utils.py,sha256=Zg70vKv96pnHjrkSZ6KlzSo1noh20GV5IqfPy6FrOyA,1504 -wtforms/validators.py,sha256=niMtYGGRijIiZ2ruslYfRP7CTGDul_DHiR-iYen7zRg,19430 -wtforms/csrf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wtforms/csrf/core.py,sha256=Ot8eOSAZ88qeDBlSUhRqiLfyWA13g3EFJ4zWZ7EGYnc,3157 -wtforms/csrf/session.py,sha256=baww8MJ5YObyYItXX0Vz5AjxZTdOfTqti3zsD3koka0,3056 -wtforms/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wtforms/ext/appengine/__init__.py,sha256=xXkE1qkwzkkBw4o0YhWGZSZXcsV60DaLxX4fkxNcNe8,269 -wtforms/ext/appengine/db.py,sha256=IEJng34ztXLVSlLxneZ7M4kgGOZOPf9zR_6RTqv6Z1Q,18588 -wtforms/ext/appengine/fields.py,sha256=8Z2BJy7ft0fu_vZksneZ7xdVxdqHkWIMNjgnyfdKtho,7574 -wtforms/ext/appengine/ndb.py,sha256=szIwWA5FyD2lqZefayl__C2UsXMEAGQndqPYPhOH4Vk,17124 -wtforms/ext/csrf/__init__.py,sha256=bIQ48rbnoYrYPZkkGz04b_7PZ8leQY_CExEqYw8yitI,45 -wtforms/ext/csrf/fields.py,sha256=Ta3vLg9KQkpUTCnDF-7CP3IW11X0UqqhvL68sAopYTs,430 -wtforms/ext/csrf/form.py,sha256=ZxmvC3Um2qYeUncu6D390-W62mVQclzwPLP9_R7GedU,1785 -wtforms/ext/csrf/session.py,sha256=aKYb9_jgEmxIgvWuk0cdx9YAGTi9s3F4xy_0ibxyhbo,2627 -wtforms/ext/dateutil/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wtforms/ext/dateutil/fields.py,sha256=RlupqB1WX_HiKJEYqi9IAxiCElxgbBDHHuXrGF4nbYs,3429 -wtforms/ext/django/__init__.py,sha256=OQ0wr3s5_cUmUU7htHXhobyxVWJS16Ve4qBK_PLs_rw,259 -wtforms/ext/django/fields.py,sha256=pEWxaAtMq5_p8QaJPOffWsX7U4LB5f8Bq8ZBw4fedxk,4580 -wtforms/ext/django/i18n.py,sha256=VLvzJ8lQOqs5Uxnhe4aOE5StGgPEvGhfBEHNrRQFtp0,626 -wtforms/ext/django/orm.py,sha256=Mme5i_o_bJTXGKkabRz03EJmGggPMejAg95XNhYtNUc,6096 -wtforms/ext/django/templatetags/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wtforms/ext/django/templatetags/wtforms.py,sha256=iCOicSMEkixm5bcJHz35Zx0h6xVwnz1H9JglB_hU69o,2826 -wtforms/ext/i18n/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wtforms/ext/i18n/form.py,sha256=mfsavr4LGI1GhoFLsWSuSqVPHH6QNiyqoAfY94u-XP0,1608 -wtforms/ext/i18n/utils.py,sha256=rx9-pNYjIp8DLU-VQ9XxRSXHYZuFv4ktRejzVBPTDBg,530 -wtforms/ext/sqlalchemy/__init__.py,sha256=4U9BzeiFD_YF8pXRsTehei0ekP6jikt2bX4MN3GNT9s,431 -wtforms/ext/sqlalchemy/fields.py,sha256=XwOgJUJCcXvw-QGdF6q2w51m1CI4E_COq8GXb9blgI0,6846 -wtforms/ext/sqlalchemy/orm.py,sha256=6wJN-Zm4YB3st9xsXU5xJR5jQUsdSRqcbEZ7JvvGD9s,10671 -wtforms/fields/__init__.py,sha256=M-0pFfY9EEk-GoYzRkg3yvarM_iP_cRhPjpLEl5KgVU,219 -wtforms/fields/core.py,sha256=KevHc47k4mMJgRGe8Y07UrS_9o_nzXbn3U2HznpdMI0,34307 -wtforms/fields/html5.py,sha256=bwLHIBrEWICRcS80am_lBp6GitDCVIRvBdIWEPJeSz0,1995 -wtforms/fields/simple.py,sha256=dY7cYfb6PXMDjUefXcDeTDWpv3UGyr_BMlebJAeoRso,2218 -wtforms/locale/README.md,sha256=xL3Ain6UPZK3UdL8tMrIKwfodEsPT0IYCVDpI6do524,1062 -wtforms/locale/wtforms.pot,sha256=Sqe4LRpObVRUc30htYXgZuueKYfW7wt2lNVKtM_Jrr0,4170 -wtforms/locale/ar/LC_MESSAGES/wtforms.mo,sha256=r1DDYnBCr1hT7KwEG3NpQLR52i4j_-er5ENIVqT9Sbo,4530 -wtforms/locale/ar/LC_MESSAGES/wtforms.po,sha256=Qkhg_pS-ZEf7jEZz76mDC47UPpqWcU_8t7L88ALAPvk,6262 -wtforms/locale/bg/LC_MESSAGES/wtforms.mo,sha256=aPnglyINf0hH4FGUM3U5OJpqcJT_8XRx6GiaD4Jif3g,4297 -wtforms/locale/bg/LC_MESSAGES/wtforms.po,sha256=xflJaMOGUTNN7zbFMWL-FbMVjmj-Svmvkek84mJl5NI,6356 -wtforms/locale/ca/LC_MESSAGES/wtforms.mo,sha256=zBX48Ru44A2O82FXwC9CwzU3_FiFkUyb4KGNya4toSg,3425 -wtforms/locale/ca/LC_MESSAGES/wtforms.po,sha256=oT09ydRQNsmf0a1uwskao0wfbwQqAh2tKXjFqI_iscw,5465 -wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.mo,sha256=MJQPoiMNPfdHYX5eQQ2OW7PmvQ9BFETva2qm3xmPSvo,3618 -wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.po,sha256=MZ1Iv28-oX4dqzSPgGo65YU3iijeBmYBKZSGsl8YYS0,5596 -wtforms/locale/cy/LC_MESSAGES/wtforms.mo,sha256=8pJPG9dguZLej33ksWSwWmCOKIJ7VmpNVlaDMb30_lc,3371 -wtforms/locale/cy/LC_MESSAGES/wtforms.po,sha256=DTGkDUWJ1MsZqFPV8YhwHaBI1uJP6uXwiud7K3LW1yw,5415 -wtforms/locale/de/LC_MESSAGES/wtforms.mo,sha256=D4BRsJeeT_cKYagO7W1LHQ8YpwC2c7_0hbv3tDgk82E,3412 -wtforms/locale/de/LC_MESSAGES/wtforms.po,sha256=BF7F3vwQOAL_yaZTHi7x2KZnaCTzz3MNUNCtuc6e47A,5457 -wtforms/locale/de_CH/LC_MESSAGES/wtforms.mo,sha256=lBUgz2N_AlkXB4W-CxaNGuHdwhgTrYCPtwM9DWL-pP0,3418 -wtforms/locale/de_CH/LC_MESSAGES/wtforms.po,sha256=LiAqravsNbETdXHJiOi3vJD4o3hWrTRZWSHcLNvHjgc,5477 -wtforms/locale/el/LC_MESSAGES/wtforms.mo,sha256=r0_oQGB_KYBZdSmFsielQMCF0P7rgsLDCA28u37XAkw,4307 -wtforms/locale/el/LC_MESSAGES/wtforms.po,sha256=snlBcC-cjlFdpIbSG9pRGYlWFhl1EaQX72Umv2PWfp8,6345 -wtforms/locale/en/LC_MESSAGES/wtforms.mo,sha256=DCJnvT-_j_oec9za8vxn0FZSog4mm5PnaiWIpesctDE,3285 -wtforms/locale/en/LC_MESSAGES/wtforms.po,sha256=-GGpFQm9Sdz3Yg0EqltIGTEcOwnYqmepRSREkHV_UVU,5347 -wtforms/locale/es/LC_MESSAGES/wtforms.mo,sha256=U_oe-S3-i6A2VsBTVKxZ8N5QAEbpqXBlenSIaLnFupE,3394 -wtforms/locale/es/LC_MESSAGES/wtforms.po,sha256=P36kwWq3LZNjYHXTyoyMl86WziWpZYXxGFsFiqev1oU,5368 -wtforms/locale/et/LC_MESSAGES/wtforms.mo,sha256=Ugx0IpG1TJtP-DKpNZiVyo-L5F8ESrr_qCpPXR96pww,3456 -wtforms/locale/et/LC_MESSAGES/wtforms.po,sha256=doeYijsnPkyHy_JK4JRH6AQdHG8uaQTQWYwsCP6_Iuk,5497 -wtforms/locale/fa/LC_MESSAGES/wtforms.mo,sha256=exJzwjxXvOALqJhsQetN9Kcad4Lx62Exvnx2jtzja8Q,4137 -wtforms/locale/fa/LC_MESSAGES/wtforms.po,sha256=MHjVwlp-MHMV-TTUUkUYtuBdtbEjfV0jzVSgWHFv80Q,6149 -wtforms/locale/fi/LC_MESSAGES/wtforms.mo,sha256=NiodjvNOW25UkxEpuCioXdpvjbGwPoYmz0dfiMxE3S8,3416 -wtforms/locale/fi/LC_MESSAGES/wtforms.po,sha256=4uP6A6sfNoATdRR_8PlecqiiTsVzIp9qpcn9qe0jGMA,5456 -wtforms/locale/fr/LC_MESSAGES/wtforms.mo,sha256=BoZI4I1MK0-nipyLWOSG-s_55E9x9eG0WqYdz1qZ1KQ,3484 -wtforms/locale/fr/LC_MESSAGES/wtforms.po,sha256=60tb7Uyco3tdKc1Z4sdvwta46V_RGSmvXM9SdvuBvhg,5529 -wtforms/locale/he/LC_MESSAGES/wtforms.mo,sha256=UhetGKepgOnGXa5IsjZBdOi5IbPLCufpIugkkDuXkjQ,3649 -wtforms/locale/he/LC_MESSAGES/wtforms.po,sha256=GJy7zG0ik8U0YnubNlfjjl9iPT62w3XyaAP4kNCntkQ,5657 -wtforms/locale/hu/LC_MESSAGES/wtforms.mo,sha256=Z-qEeJI422dmm7-2qJIgCuCS1eyS2pJfoavPnGK2334,3544 -wtforms/locale/hu/LC_MESSAGES/wtforms.po,sha256=eiyNXYa4_XLQWRd-j4KmAXml27cYAPjIBhjjIv9WMbE,5492 -wtforms/locale/it/LC_MESSAGES/wtforms.mo,sha256=petuqW4x1p1S69sJax15WpLQryWoDRXW0uQjr58E9Jw,3510 -wtforms/locale/it/LC_MESSAGES/wtforms.po,sha256=EuI0Plf7nLfg5NcRPqQvfg3z7fpfIdRQGBmyq1ivpGE,5556 -wtforms/locale/ja/LC_MESSAGES/wtforms.mo,sha256=thfPsxKfihz2wNvb9LA7MzYb4PnfyXT81gaE_802AlM,3736 -wtforms/locale/ja/LC_MESSAGES/wtforms.po,sha256=ydUzTwxnk8sUQcPTeS7AuU7sgArIMWgbDzxFt85mhG8,5753 -wtforms/locale/ko/LC_MESSAGES/wtforms.mo,sha256=ZRJGcizRhJifuw4rElZ6Bb-hNdH3zqCYzxhwYJisCpU,3851 -wtforms/locale/ko/LC_MESSAGES/wtforms.po,sha256=9os2sRuqxoX0fTWHr47IvBwlkY_sDoLKdn3byS7MfjQ,5842 -wtforms/locale/nb/LC_MESSAGES/wtforms.mo,sha256=0YxYTElaTGBpIurcZqZHPU2lXslt3UNF_HOw575OAKM,3337 -wtforms/locale/nb/LC_MESSAGES/wtforms.po,sha256=NXrr3nrnoOo2x2t0g8UZXT2Jm9KQnkYdnieeoB7U9Yw,5387 -wtforms/locale/nl/LC_MESSAGES/wtforms.mo,sha256=8wLTkRK82jpG5oDkqM-jLNVLYHte4fRHYF6VAN7lB6U,3350 -wtforms/locale/nl/LC_MESSAGES/wtforms.po,sha256=9xSoztymVdIgFBA2vnzaHeSK4qEGTGbiPbfwjdcHN0k,5388 -wtforms/locale/pl/LC_MESSAGES/wtforms.mo,sha256=QUs5iz_IOoo6oCVmcpWWNNkXyqYA0X01wERmQYQiXYo,3610 -wtforms/locale/pl/LC_MESSAGES/wtforms.po,sha256=XrkwltOhyLHrOOgxYVvcmR2Hcw4LUN3_sZEdJofS5Vk,5652 -wtforms/locale/pt/LC_MESSAGES/wtforms.mo,sha256=PC5HRiM-QYt4GX3eMPapzG31jLKmo3zt6nKGVb_o174,3438 -wtforms/locale/pt/LC_MESSAGES/wtforms.po,sha256=cXIZJJZ4UDDR24yrQ-XYck3klonRZd9Ajt8A7dqqJc4,5481 -wtforms/locale/ru/LC_MESSAGES/wtforms.mo,sha256=ski71qWfnwGL9GtZEQZ1fksHBeZsePxi4ZN16AlLeZE,4406 -wtforms/locale/ru/LC_MESSAGES/wtforms.po,sha256=3eeI-CxivICl6FzYpKrqfYnz2rB68hMNCicC_9aM90s,6407 -wtforms/locale/sk/LC_MESSAGES/wtforms.mo,sha256=Lo_5eGNF_LnkJsJLOde_YNWE_F3UZtScFTFlO4v-EyU,3548 -wtforms/locale/sk/LC_MESSAGES/wtforms.po,sha256=ywPpnxYnHgEkD6Ab7LJgyqgC6dIj8cBmn6hB21aS3NI,5586 -wtforms/locale/sv/LC_MESSAGES/wtforms.mo,sha256=U7noK9cso_pRgaQcvF4duRQ69joI7SHN0XcHyd0mAVg,3376 -wtforms/locale/sv/LC_MESSAGES/wtforms.po,sha256=jMtpwUlQPbi4Xiut9KNfLjGhsjqmys1Y_iGZ3lJA4NQ,5416 -wtforms/locale/tr/LC_MESSAGES/wtforms.mo,sha256=kp3J8k2FVBaXVVJJclGnUmZTEUYHS6Hg1v2baGwtReo,3391 -wtforms/locale/tr/LC_MESSAGES/wtforms.po,sha256=PFo_e3vKCMgKtkcQSaXqNOlr-YgzxvgUtg8Ju5M-8f8,5431 -wtforms/locale/uk/LC_MESSAGES/wtforms.mo,sha256=5iZS-8LmCyeteqN3TXQ15byNTGJbjpsDa8AF3zh6L1o,4451 -wtforms/locale/uk/LC_MESSAGES/wtforms.po,sha256=fIijOGm8gXO-yZkdYoX6kWMPXZE6j9yALhekfQCK5KU,6520 -wtforms/locale/zh/LC_MESSAGES/wtforms.mo,sha256=yCzjCCwAf5yu80NhllpGqlk7V6PBFyJYfoZ6IF2dQnM,3362 -wtforms/locale/zh/LC_MESSAGES/wtforms.po,sha256=ZIh59O9rnjZMRpdKFfvrk59wouOAUHyjZS0f-TMsN6U,5378 -wtforms/locale/zh_TW/LC_MESSAGES/wtforms.mo,sha256=iha5oFUQDVs7wPBpcWLLAP_Jgm42Ea9n9xIlaCsUsNE,3204 -wtforms/locale/zh_TW/LC_MESSAGES/wtforms.po,sha256=a7q2T9fdwN_xESBCD4umHMfSptN7Qt-abjO9UFRWDBo,5218 -wtforms/widgets/__init__.py,sha256=nxI0oIsofuJCNgc4Oxwzf3_q3IiCYZTSiCoEuSRZeJM,124 -wtforms/widgets/core.py,sha256=X3I5PRFbPeX1nU3DrPpsJyglsObujdN1hMxHHFTkKOk,11150 -wtforms/widgets/html5.py,sha256=LDnNegNTx-LYpw4YkbymvS2TaA2V03p2rRdYN83skYQ,2440 -WTForms-2.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -wtforms/csrf/__pycache__/session.cpython-36.pyc,, -wtforms/csrf/__pycache__/core.cpython-36.pyc,, -wtforms/csrf/__pycache__/__init__.cpython-36.pyc,, -wtforms/widgets/__pycache__/html5.cpython-36.pyc,, -wtforms/widgets/__pycache__/core.cpython-36.pyc,, -wtforms/widgets/__pycache__/__init__.cpython-36.pyc,, -wtforms/__pycache__/form.cpython-36.pyc,, -wtforms/__pycache__/i18n.cpython-36.pyc,, -wtforms/__pycache__/compat.cpython-36.pyc,, -wtforms/__pycache__/meta.cpython-36.pyc,, -wtforms/__pycache__/__init__.cpython-36.pyc,, -wtforms/__pycache__/utils.cpython-36.pyc,, -wtforms/__pycache__/validators.cpython-36.pyc,, -wtforms/ext/csrf/__pycache__/form.cpython-36.pyc,, -wtforms/ext/csrf/__pycache__/session.cpython-36.pyc,, -wtforms/ext/csrf/__pycache__/__init__.cpython-36.pyc,, -wtforms/ext/csrf/__pycache__/fields.cpython-36.pyc,, -wtforms/ext/sqlalchemy/__pycache__/orm.cpython-36.pyc,, -wtforms/ext/sqlalchemy/__pycache__/__init__.cpython-36.pyc,, -wtforms/ext/sqlalchemy/__pycache__/fields.cpython-36.pyc,, -wtforms/ext/django/__pycache__/orm.cpython-36.pyc,, -wtforms/ext/django/__pycache__/i18n.cpython-36.pyc,, -wtforms/ext/django/__pycache__/__init__.cpython-36.pyc,, -wtforms/ext/django/__pycache__/fields.cpython-36.pyc,, -wtforms/ext/django/templatetags/__pycache__/wtforms.cpython-36.pyc,, -wtforms/ext/django/templatetags/__pycache__/__init__.cpython-36.pyc,, -wtforms/ext/appengine/__pycache__/db.cpython-36.pyc,, -wtforms/ext/appengine/__pycache__/ndb.cpython-36.pyc,, -wtforms/ext/appengine/__pycache__/__init__.cpython-36.pyc,, -wtforms/ext/appengine/__pycache__/fields.cpython-36.pyc,, -wtforms/ext/dateutil/__pycache__/__init__.cpython-36.pyc,, -wtforms/ext/dateutil/__pycache__/fields.cpython-36.pyc,, -wtforms/ext/__pycache__/__init__.cpython-36.pyc,, -wtforms/ext/i18n/__pycache__/form.cpython-36.pyc,, -wtforms/ext/i18n/__pycache__/__init__.cpython-36.pyc,, -wtforms/ext/i18n/__pycache__/utils.cpython-36.pyc,, -wtforms/fields/__pycache__/simple.cpython-36.pyc,, -wtforms/fields/__pycache__/html5.cpython-36.pyc,, -wtforms/fields/__pycache__/core.cpython-36.pyc,, -wtforms/fields/__pycache__/__init__.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/WHEEL deleted file mode 100644 index 1316c41..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/top_level.txt deleted file mode 100644 index 26d80fd..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/WTForms-2.2.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -wtforms diff --git a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/LICENSE.rst b/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/LICENSE.rst deleted file mode 100644 index c37cae4..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/LICENSE.rst +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2007 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/METADATA deleted file mode 100644 index 2c07a51..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/METADATA +++ /dev/null @@ -1,132 +0,0 @@ -Metadata-Version: 2.1 -Name: Werkzeug -Version: 0.15.2 -Summary: The comprehensive WSGI web application library. -Home-page: https://palletsprojects.com/p/werkzeug/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: The Pallets Team -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://werkzeug.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/werkzeug -Project-URL: Issue tracker, https://github.com/pallets/werkzeug/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware -Classifier: Topic :: Software Development :: Libraries :: Application Frameworks -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* -Provides-Extra: dev -Requires-Dist: pytest ; extra == 'dev' -Requires-Dist: coverage ; extra == 'dev' -Requires-Dist: tox ; extra == 'dev' -Requires-Dist: sphinx ; extra == 'dev' -Requires-Dist: pallets-sphinx-themes ; extra == 'dev' -Requires-Dist: sphinx-issues ; extra == 'dev' -Provides-Extra: termcolor -Requires-Dist: termcolor ; extra == 'termcolor' -Provides-Extra: watchdog -Requires-Dist: watchdog ; extra == 'watchdog' - -Werkzeug -======== - -*werkzeug* German noun: "tool". Etymology: *werk* ("work"), *zeug* ("stuff") - -Werkzeug is a comprehensive `WSGI`_ web application library. It began as -a simple collection of various utilities for WSGI applications and has -become one of the most advanced WSGI utility libraries. - -It includes: - -- An interactive debugger that allows inspecting stack traces and - source code in the browser with an interactive interpreter for any - frame in the stack. -- A full-featured request object with objects to interact with - headers, query args, form data, files, and cookies. -- A response object that can wrap other WSGI applications and handle - streaming data. -- A routing system for matching URLs to endpoints and generating URLs - for endpoints, with an extensible system for capturing variables - from URLs. -- HTTP utilities to handle entity tags, cache control, dates, user - agents, cookies, files, and more. -- A threaded WSGI server for use while developing applications - locally. -- A test client for simulating HTTP requests during testing without - requiring running a server. - -Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up -to the developer to choose a template engine, database adapter, and even -how to handle requests. It can be used to build all sorts of end user -applications such as blogs, wikis, or bulletin boards. - -`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while -providing more structure and patterns for defining powerful -applications. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - pip install -U Werkzeug - - -A Simple Example ----------------- - -.. code-block:: python - - from werkzeug.wrappers import Request, Response - - @Request.application - def application(request): - return Response('Hello, World!') - - if __name__ == '__main__': - from werkzeug.serving import run_simple - run_simple('localhost', 4000, application) - - -Links ------ - -- Website: https://www.palletsprojects.com/p/werkzeug/ -- Documentation: https://werkzeug.palletsprojects.com/ -- Releases: https://pypi.org/project/Werkzeug/ -- Code: https://github.com/pallets/werkzeug -- Issue tracker: https://github.com/pallets/werkzeug/issues -- Test status: - - - Linux, Mac: https://travis-ci.org/pallets/werkzeug - - Windows: https://ci.appveyor.com/project/pallets/werkzeug - -- Test coverage: https://codecov.io/gh/pallets/werkzeug - -.. _WSGI: https://wsgi.readthedocs.io/en/latest/ -.. _Flask: https://www.palletsprojects.com/p/flask/ -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/RECORD deleted file mode 100644 index 7c0b625..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/RECORD +++ /dev/null @@ -1,119 +0,0 @@ -werkzeug/__init__.py,sha256=CxWnTwGG-pO1kcwNE3EKdb73DrCXyhY9cF5mIkp8DMs,6805 -werkzeug/_compat.py,sha256=oBEVVrJT4sqYdIZbUWmgV9T9w257RhTSDBlTjh0Zbb0,6431 -werkzeug/_internal.py,sha256=Wx7cpTRWqeBd0LAqobo0lCO4pNUW4oav6XKf7Taumgk,14590 -werkzeug/_reloader.py,sha256=8B8T1npsQT-96nGeVJjV1KXWK_ong6ZlTXOWgxfRLpg,11241 -werkzeug/datastructures.py,sha256=8HoA4Gu9i7ZWi5OBjx244OLWvDEE4JTQQUUTRoAYKog,91761 -werkzeug/exceptions.py,sha256=YQSIZDq-_xpUvGklLVlehso1mcGMzF2AJaOhWUTIs68,22933 -werkzeug/filesystem.py,sha256=HzKl-j0Hd8Jl66j778UbPTAYNnY6vUZgYLlBZ0e7uw0,2101 -werkzeug/formparser.py,sha256=tN6SO4mn6RUsxRZq4qVBWXbNWNuasn2KaBznTieMaVk,21790 -werkzeug/http.py,sha256=t0ET2tySAf9ZWdEelVWJoLaZzFViYpjoUmiYHPz10-E,43304 -werkzeug/local.py,sha256=USVEcgIg-oCiUJFPIecFIW9jkIejfw4Fjf1u5yN-Np4,14456 -werkzeug/posixemulation.py,sha256=gSSiv1SCmOyzOM_nq1ZaZCtxP__C5MeDJl_4yXJmi4Q,3541 -werkzeug/routing.py,sha256=w36bDJ4-LFo56aY6Uv_FfZ69id7qsB9AmKdQ4j8Ihj0,80733 -werkzeug/security.py,sha256=mfxfcM-D6U8LhsyDK5W_rnL1oVTZWgyt-E8E4FlSdrI,8026 -werkzeug/serving.py,sha256=xMxg69-SL7I7cIQqA9c2Znb1JvnVX12EHZCu0xm_gu8,35668 -werkzeug/test.py,sha256=jTWQtkfYpmopDGB6SKqDTe7H1X-WiSYq98AkYimyhXo,40641 -werkzeug/testapp.py,sha256=hcKBzorVlSHC-uGvGXXjCm3FzCwGWq4yjbTG3Pr7MV8,9301 -werkzeug/urls.py,sha256=8yHdYI99N__-isoTwvGqvuj9QhOh66dd1Xh1DIp0q0g,39261 -werkzeug/useragents.py,sha256=FIonyUF790Ro8OG8cJqG1zixhg5YzXdHmkZbrnK0QRo,5965 -werkzeug/utils.py,sha256=O20Y0qWk5O1IWamC_A5gkmzR5cgBd3yDIHviwBTfNB0,27387 -werkzeug/wsgi.py,sha256=h-zyAeInwE6X6ciSnHI14ImA85adV-F861PmR7UGtRk,36681 -werkzeug/contrib/__init__.py,sha256=EvNyiiCF49j5P0fZYJ3ZGe82ofXdSBvUNqWFwwBMibQ,553 -werkzeug/contrib/atom.py,sha256=KpPJcTfzNW1J0VNQckCbVtVGBe3V8s451tOUya4qByI,15415 -werkzeug/contrib/cache.py,sha256=AEh5UIw-Ui7sHZnlpvrD7ueOKUhCaAD55FXiPtXbbRs,32115 -werkzeug/contrib/fixers.py,sha256=peEtAiIWYT5bh00EWEPOGKzGZXivOzVhhzKPvvzk1RM,9193 -werkzeug/contrib/iterio.py,sha256=KKHa_8aCF_uhoeQVyPGUwrivuB6y6nNdXYo2D2vzOA8,10928 -werkzeug/contrib/lint.py,sha256=NdIxP0E2kVt1xDIxoaIz3Rcl8ZdgmHaFbGTOaybGpN4,296 -werkzeug/contrib/profiler.py,sha256=k_oMLU-AtsVvQ9TxNdermY6FuzSTYr-WE-ZmWb_DMyU,1229 -werkzeug/contrib/securecookie.py,sha256=xbtElskGmtbiApgOJ5WhGgqGDs_68_PcWzqDIAY_QZY,13076 -werkzeug/contrib/sessions.py,sha256=oVXh_7-6_CWOMxDKqcaK05H8RpYoWqAd3al-KzMFPYs,13042 -werkzeug/contrib/wrappers.py,sha256=ZmNk0wpzD66yomPnQxapndZQs4c0kNJaRzqI-BVxeQk,13199 -werkzeug/debug/__init__.py,sha256=bpogxaNQ_0oOnkDZV7D38CeVvxlCsYCk0H2D7TYrcIM,17939 -werkzeug/debug/console.py,sha256=HoBL21bbcmtiCLqiLDJLZi1LYnWMZxjoXYH5WaZB1XY,5469 -werkzeug/debug/repr.py,sha256=lIwuhbyrMwVe3P_cFqNyqzHL7P93TLKod7lw9clydEw,9621 -werkzeug/debug/tbtools.py,sha256=HstooKsBY2a3iy6bzplU68JwmYHpn-iblnBcy5o4vA0,20236 -werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673 -werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507 -werkzeug/debug/shared/debugger.js,sha256=rOhqZMRfpZnnu6_XCGn6wMWPhtfwRAcyZKksdIxPJas,6400 -werkzeug/debug/shared/jquery.js,sha256=FgpCb_KJQlLNfOu91ta32o_NMZxltwRo8QtmkMRdAu8,86927 -werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191 -werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200 -werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818 -werkzeug/debug/shared/style.css,sha256=_Y98F6dR2CBUZNKylsOdgSHjwVaVy717WqE3-xJVcmE,6581 -werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220 -werkzeug/middleware/__init__.py,sha256=f1SFZo67IlW4k1uqKzNHxYQlsakUS-D6KK_j0e3jjwQ,549 -werkzeug/middleware/dispatcher.py,sha256=_-KoMzHtcISHS7ouWKAOraqlCLprdh83YOAn_8DjLp8,2240 -werkzeug/middleware/http_proxy.py,sha256=lRjTdMmghHiZuZrS7_UJ3gZc-vlFizhBbFZ-XZPLwIA,7117 -werkzeug/middleware/lint.py,sha256=6fmVw6-pLEKtfkF9VwgtGsZhSzoSFl6D466rqJeNL1M,12791 -werkzeug/middleware/profiler.py,sha256=2BhLPLFGwEmXQ1nVDr9F5Dijf7vvqQHXZGPLZ1vh110,4468 -werkzeug/middleware/proxy_fix.py,sha256=Y86VcU2oAQ--x0mi4iFVJyEFMzp3Ao8q0zvr_SsrpNw,8506 -werkzeug/middleware/shared_data.py,sha256=6aUzMABeOLul0Krf5S_hs-T7oUc7ZIQ3B8tAO4p8C7E,8541 -werkzeug/wrappers/__init__.py,sha256=S4VioKAmF_av9Ec9zQvG71X1EOkYfPx1TYck9jyDiyY,1384 -werkzeug/wrappers/accept.py,sha256=TIvjUc0g73fhTWX54wg_D9NNzKvpnG1X8u1w26tK1o8,1760 -werkzeug/wrappers/auth.py,sha256=Pmn6iaGHBrUyHbJpW0lZhO_q9RVoAa5QalaTqcavdAI,1158 -werkzeug/wrappers/base_request.py,sha256=k5mu1UU99X_xrPqmXj44pzJbkPRpgvwMuP2j9vl8QFU,26873 -werkzeug/wrappers/base_response.py,sha256=ZA1XlxtsbvG4SpbdOEMT5--z7aZM0w6C5y33W8wOXa4,27906 -werkzeug/wrappers/common_descriptors.py,sha256=OJ8jOwMun4L-BxCuFPkK1vaefx_-Y5IndVXvvn_ems4,12089 -werkzeug/wrappers/etag.py,sha256=TwMO1fvluXbBqnFTj2DvrCNa3mYhbHYe1UZAVzfXvuU,12533 -werkzeug/wrappers/json.py,sha256=HvK_A4NpO0sLqgb10sTJcoZydYOwyNiPCJPV7SVgcgE,4343 -werkzeug/wrappers/request.py,sha256=qPo2zmmBv4HxboywtWZb2pJL8OPXo07BUXBKw2j9Fi8,1338 -werkzeug/wrappers/response.py,sha256=vDZFEGzDOG0jjmS0uVVjeT3hqRt1hFaf15npnx7RD28,2329 -werkzeug/wrappers/user_agent.py,sha256=4bTgQKTLQmGUyxOREYOzbeiFP2VwIOE7E14AhUB5NqM,444 -Werkzeug-0.15.2.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 -Werkzeug-0.15.2.dist-info/METADATA,sha256=YmEsyyRecto-hVeBAWJ6QXzxjMIjCLWXMGPKGQpnc4E,4818 -Werkzeug-0.15.2.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110 -Werkzeug-0.15.2.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9 -Werkzeug-0.15.2.dist-info/RECORD,, -Werkzeug-0.15.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -werkzeug/middleware/__pycache__/shared_data.cpython-36.pyc,, -werkzeug/middleware/__pycache__/proxy_fix.cpython-36.pyc,, -werkzeug/middleware/__pycache__/dispatcher.cpython-36.pyc,, -werkzeug/middleware/__pycache__/profiler.cpython-36.pyc,, -werkzeug/middleware/__pycache__/lint.cpython-36.pyc,, -werkzeug/middleware/__pycache__/http_proxy.cpython-36.pyc,, -werkzeug/middleware/__pycache__/__init__.cpython-36.pyc,, -werkzeug/debug/__pycache__/tbtools.cpython-36.pyc,, -werkzeug/debug/__pycache__/__init__.cpython-36.pyc,, -werkzeug/debug/__pycache__/repr.cpython-36.pyc,, -werkzeug/debug/__pycache__/console.cpython-36.pyc,, -werkzeug/contrib/__pycache__/securecookie.cpython-36.pyc,, -werkzeug/contrib/__pycache__/cache.cpython-36.pyc,, -werkzeug/contrib/__pycache__/wrappers.cpython-36.pyc,, -werkzeug/contrib/__pycache__/fixers.cpython-36.pyc,, -werkzeug/contrib/__pycache__/profiler.cpython-36.pyc,, -werkzeug/contrib/__pycache__/lint.cpython-36.pyc,, -werkzeug/contrib/__pycache__/atom.cpython-36.pyc,, -werkzeug/contrib/__pycache__/sessions.cpython-36.pyc,, -werkzeug/contrib/__pycache__/__init__.cpython-36.pyc,, -werkzeug/contrib/__pycache__/iterio.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/response.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/base_request.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/request.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/user_agent.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/auth.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/accept.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/etag.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/json.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/__init__.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/common_descriptors.cpython-36.pyc,, -werkzeug/wrappers/__pycache__/base_response.cpython-36.pyc,, -werkzeug/__pycache__/formparser.cpython-36.pyc,, -werkzeug/__pycache__/_compat.cpython-36.pyc,, -werkzeug/__pycache__/useragents.cpython-36.pyc,, -werkzeug/__pycache__/local.cpython-36.pyc,, -werkzeug/__pycache__/posixemulation.cpython-36.pyc,, -werkzeug/__pycache__/routing.cpython-36.pyc,, -werkzeug/__pycache__/urls.cpython-36.pyc,, -werkzeug/__pycache__/serving.cpython-36.pyc,, -werkzeug/__pycache__/filesystem.cpython-36.pyc,, -werkzeug/__pycache__/_internal.cpython-36.pyc,, -werkzeug/__pycache__/testapp.cpython-36.pyc,, -werkzeug/__pycache__/test.cpython-36.pyc,, -werkzeug/__pycache__/wsgi.cpython-36.pyc,, -werkzeug/__pycache__/__init__.cpython-36.pyc,, -werkzeug/__pycache__/security.cpython-36.pyc,, -werkzeug/__pycache__/http.cpython-36.pyc,, -werkzeug/__pycache__/utils.cpython-36.pyc,, -werkzeug/__pycache__/_reloader.cpython-36.pyc,, -werkzeug/__pycache__/datastructures.cpython-36.pyc,, -werkzeug/__pycache__/exceptions.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/WHEEL deleted file mode 100644 index c8240f0..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/top_level.txt deleted file mode 100644 index 6fe8da8..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/Werkzeug-0.15.2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -werkzeug diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/LICENSE b/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/LICENSE deleted file mode 100644 index 624883f..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -This is the MIT license: http://www.opensource.org/licenses/mit-license.php - -Copyright (C) 2009-2019 by Michael Bayer. -Alembic is a trademark of Michael Bayer. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons -to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE -FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/METADATA deleted file mode 100644 index d3fa0e6..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/METADATA +++ /dev/null @@ -1,109 +0,0 @@ -Metadata-Version: 2.1 -Name: alembic -Version: 1.0.9 -Summary: A database migration tool for SQLAlchemy. -Home-page: https://alembic.sqlalchemy.org -Author: Mike Bayer -Author-email: mike@zzzcomputing.com -License: MIT -Keywords: SQLAlchemy migrations -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Console -Classifier: Intended Audience :: Developers -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Database :: Front-Ends -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* -Requires-Dist: SQLAlchemy (>=0.9.0) -Requires-Dist: Mako -Requires-Dist: python-editor (>=0.3) -Requires-Dist: python-dateutil - -Alembic is a database migrations tool written by the author -of `SQLAlchemy `_. A migrations tool -offers the following functionality: - -* Can emit ALTER statements to a database in order to change - the structure of tables and other constructs -* Provides a system whereby "migration scripts" may be constructed; - each script indicates a particular series of steps that can "upgrade" a - target database to a new version, and optionally a series of steps that can - "downgrade" similarly, doing the same steps in reverse. -* Allows the scripts to execute in some sequential manner. - -The goals of Alembic are: - -* Very open ended and transparent configuration and operation. A new - Alembic environment is generated from a set of templates which is selected - among a set of options when setup first occurs. The templates then deposit a - series of scripts that define fully how database connectivity is established - and how migration scripts are invoked; the migration scripts themselves are - generated from a template within that series of scripts. The scripts can - then be further customized to define exactly how databases will be - interacted with and what structure new migration files should take. -* Full support for transactional DDL. The default scripts ensure that all - migrations occur within a transaction - for those databases which support - this (Postgresql, Microsoft SQL Server), migrations can be tested with no - need to manually undo changes upon failure. -* Minimalist script construction. Basic operations like renaming - tables/columns, adding/removing columns, changing column attributes can be - performed through one line commands like alter_column(), rename_table(), - add_constraint(). There is no need to recreate full SQLAlchemy Table - structures for simple operations like these - the functions themselves - generate minimalist schema structures behind the scenes to achieve the given - DDL sequence. -* "auto generation" of migrations. While real world migrations are far more - complex than what can be automatically determined, Alembic can still - eliminate the initial grunt work in generating new migration directives - from an altered schema. The ``--autogenerate`` feature will inspect the - current status of a database using SQLAlchemy's schema inspection - capabilities, compare it to the current state of the database model as - specified in Python, and generate a series of "candidate" migrations, - rendering them into a new migration script as Python directives. The - developer then edits the new file, adding additional directives and data - migrations as needed, to produce a finished migration. Table and column - level changes can be detected, with constraints and indexes to follow as - well. -* Full support for migrations generated as SQL scripts. Those of us who - work in corporate environments know that direct access to DDL commands on a - production database is a rare privilege, and DBAs want textual SQL scripts. - Alembic's usage model and commands are oriented towards being able to run a - series of migrations into a textual output file as easily as it runs them - directly to a database. Care must be taken in this mode to not invoke other - operations that rely upon in-memory SELECTs of rows - Alembic tries to - provide helper constructs like bulk_insert() to help with data-oriented - operations that are compatible with script-based DDL. -* Non-linear, dependency-graph versioning. Scripts are given UUID - identifiers similarly to a DVCS, and the linkage of one script to the next - is achieved via human-editable markers within the scripts themselves. - The structure of a set of migration files is considered as a - directed-acyclic graph, meaning any migration file can be dependent - on any other arbitrary set of migration files, or none at - all. Through this open-ended system, migration files can be organized - into branches, multiple roots, and mergepoints, without restriction. - Commands are provided to produce new branches, roots, and merges of - branches automatically. -* Provide a library of ALTER constructs that can be used by any SQLAlchemy - application. The DDL constructs build upon SQLAlchemy's own DDLElement base - and can be used standalone by any application or script. -* At long last, bring SQLite and its inablity to ALTER things into the fold, - but in such a way that SQLite's very special workflow needs are accommodated - in an explicit way that makes the most of a bad situation, through the - concept of a "batch" migration, where multiple changes to a table can - be batched together to form a series of instructions for a single, subsequent - "move-and-copy" workflow. You can even use "move-and-copy" workflow for - other databases, if you want to recreate a table in the background - on a busy system. - -Documentation and status of Alembic is at https://alembic.sqlalchemy.org/ - - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/RECORD deleted file mode 100644 index 0ed5156..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/RECORD +++ /dev/null @@ -1,135 +0,0 @@ -alembic/__init__.py,sha256=EVBLBgclY8qU_jQH_mF-6jsscowLQlwooFaLcYrY5Q0,321 -alembic/command.py,sha256=zLMK8VZ8sCKOJXrUJIU4nZB76OG1RAQVN-j0csaQc0I,16223 -alembic/config.py,sha256=eA7eDx6hlmi4gFVnPTndGmqR-kJgZq1JTfPpyL0r_iY,17953 -alembic/context.py,sha256=hK1AJOQXJ29Bhn276GYcosxeG7pC5aZRT5E8c4bMJ4Q,195 -alembic/op.py,sha256=flHtcsVqOD-ZgZKK2pv-CJ5Cwh-KJ7puMUNXzishxLw,167 -alembic/autogenerate/__init__.py,sha256=98WZvBQ3k-cfpa1GsVFEq0Kqmzl1-UPYCtau9urpsIQ,431 -alembic/autogenerate/api.py,sha256=BVBmYlJtHe1axk__GDe1PDEx2rN9HaA6na45fObhoZw,16763 -alembic/autogenerate/compare.py,sha256=5gCq1EU35XUJhcDULsyNOnoRfEJCHUhcxx512pz--tI,35201 -alembic/autogenerate/render.py,sha256=9NDqHHXqDoSRFmPsCDxHZLRiLboRuN8ZVjNJR9NVpI8,27456 -alembic/autogenerate/rewriter.py,sha256=Ie5y069LTMvd-3A_XTdrKDQCXWuIPbJiINvetvLbdy8,5532 -alembic/ddl/__init__.py,sha256=7cwkSz69tWKVbUxbHpE0SDOYUgbxSBkVIHHTyJ1O7V8,185 -alembic/ddl/base.py,sha256=anjLkIUdQWAQBUX-_9wj_-v-fTWvU8TSL_zd5kGPdp4,6302 -alembic/ddl/impl.py,sha256=ifYhTI26bTwwXz-BMOrlaGTF6o084yhAiB653HQIzxs,14438 -alembic/ddl/mssql.py,sha256=pMu_vdAt4tx3UzB8K5qDCHZNWSCxwh9ptRY4Y_DBGbw,8834 -alembic/ddl/mysql.py,sha256=gCPf-Y7qXJF9r9UupcCnxwHk2wFTXJVjRfNaqJyBLnY,13943 -alembic/ddl/oracle.py,sha256=n12yIFXReeLZ8AqldZ0HIrZSbWorfL0CQvrBSUYZckg,3323 -alembic/ddl/postgresql.py,sha256=yW485pn2tgU2r2vFyEPU8HChiYBLB-dhEEWgDWDfT9U,17901 -alembic/ddl/sqlite.py,sha256=StCW8FX9geLPNba1N5xD3qitt8Y6wSNypK2CgkddZCU,3536 -alembic/operations/__init__.py,sha256=nJbmMAwapU2py4fJ4GUBanBp-EMXhDyMngb717NIHM8,192 -alembic/operations/base.py,sha256=dOvNbG0KtVpOSydy881Vqj4WPArtjV8gkRCA_xWiW38,16869 -alembic/operations/batch.py,sha256=u0jho-8sWhslwV8nUb9cYpJU50gJDaMMyYn7BpXUW0k,15036 -alembic/operations/ops.py,sha256=8F7FYgTo6beDbHFlr9oUtRnNN3tKO7xGXqlDfcY-rAE,81813 -alembic/operations/schemaobj.py,sha256=hFUtUzV-kSEq9TnXqH6IygM-aCcB5VjIpUm0kAKYGE4,5736 -alembic/operations/toimpl.py,sha256=3QphnlDAokP3H7eHrxWuvANhtFH8MIzeAwazl4ZseAg,5687 -alembic/runtime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -alembic/runtime/environment.py,sha256=r0dwROTyJwEAjJWoG9j1eQAO0Sh-4GacVuGYq2zMI0U,37663 -alembic/runtime/migration.py,sha256=GfsN1r8XnD4HqYdIGL9YBv0eQQtDO7tKTq6qwgHv7pA,35607 -alembic/script/__init__.py,sha256=SxmoPlnSDLFW_9p-OTG-yaev76Ok7TiyN6u2TAKczkU,116 -alembic/script/base.py,sha256=p-D8P04PvkgR8Gc40RHsVhYEn1x72dhskbquM6j92NI,31258 -alembic/script/revision.py,sha256=s2Lr-g8vs9-trtyK95_Ez_RJHS0qCV1yCxqraQGTQGk,33623 -alembic/templates/generic/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38 -alembic/templates/generic/alembic.ini.mako,sha256=ogtFTqcp30l4ycOL6alM9-VsSVlklphYLsJUR90yMdw,1681 -alembic/templates/generic/env.py,sha256=yg9rsCM8EvSQMn5yNaxTmzNAGslFTppssAFP5Y_wcYk,1977 -alembic/templates/generic/script.py.mako,sha256=8_xgA-gm_OhehnO7CiIijWgnm00ZlszEHtIHrAYFJl0,494 -alembic/templates/multidb/README,sha256=c7CNHkVvVSJsGZ75Qlcuo1nXKQITDu0W3hSULyz1pWg,41 -alembic/templates/multidb/alembic.ini.mako,sha256=alMGLKSsRGAtP9vpzWjsnSiHxu1t6uiMRCIqYm593Fs,1776 -alembic/templates/multidb/env.py,sha256=U2E5hL6IvIXWmi3szCCSsia62AUovnhnXptBqoxNDB8,4109 -alembic/templates/multidb/script.py.mako,sha256=k09J7yYXfXFyedV6D5VgJzuPQxPnYKxID0huIabH46w,923 -alembic/templates/pylons/README,sha256=gr4MQnn_ScvV_kasPpXgo6ntAtcIWmOlga9vURbgUwI,59 -alembic/templates/pylons/alembic.ini.mako,sha256=DtmSxtJS4kuL7RQoCStss7IOCU2xtSV9CdvKoZETVDY,1160 -alembic/templates/pylons/env.py,sha256=7bGJawV2_gFTI_VgREaskfpBYMNrB7OE_OVOeDIZkJA,2199 -alembic/templates/pylons/script.py.mako,sha256=8_xgA-gm_OhehnO7CiIijWgnm00ZlszEHtIHrAYFJl0,494 -alembic/testing/__init__.py,sha256=i1OoDhTC_rnx17P9TdqDtrIlW5SA7PrssetYHwkvryE,469 -alembic/testing/assertions.py,sha256=-XpKD1bnw4dOGxIglSZBGY61axW-tix93yfeszi4KfM,6298 -alembic/testing/compat.py,sha256=XChbeZeVznPmGhjEvV_ESt58QqTrhK7kZFsf1rFt9rU,309 -alembic/testing/config.py,sha256=XDVdzH_Z3-1Q4mSdbcJ0mczPR5mG_0E6fr_5_rT2dVI,2556 -alembic/testing/engines.py,sha256=2gtRzX4olDki9-gwBa3CcJpWnBdeKdoD26Nei8Z9Z2o,765 -alembic/testing/env.py,sha256=tMqcGfSrJANAdgwTTLt3-bFfUVx-MPfpnIWalBq4Qcw,9992 -alembic/testing/exclusions.py,sha256=xSZKQI6_yvaDUqhXNMoBelrJihiBEOLkJAokSegs9fw,12634 -alembic/testing/fixtures.py,sha256=1PNSDx1faeqs_3VymfHvX0X_BgE2zFBTBOJsRBUVDHA,4896 -alembic/testing/mock.py,sha256=jES0zJTv2Iu_BFRChonsh0Zk3vkIqiiKAPJpZCyUzpY,818 -alembic/testing/provision.py,sha256=RGkfbYIThuWSv6Lk58iyXzW7T9AJLgRHQBu9f307yus,10465 -alembic/testing/requirements.py,sha256=KnPVXYPjR9RA18IyXXc7Ldqn8BNZez-XRcMmNQYVzsg,4449 -alembic/testing/runner.py,sha256=9DJkUYcWCjU9Jb9VCJeoTR0Db3FGEprZ2HNawfKF-H8,1603 -alembic/testing/util.py,sha256=LS4XN0wFGk2ci_lHrbbT9KZNoJLVdf235KYN5qFN0k8,477 -alembic/testing/warnings.py,sha256=uxEhL4nERRDhxBSrODqxOhi28HWDL7Z0q-qAxkKgr64,1432 -alembic/testing/plugin/__init__.py,sha256=P23dal0Z5q1kt-l6ddqaHypJwD8tx2draPs5_RL8W-I,147 -alembic/testing/plugin/bootstrap.py,sha256=iyyx3XudcRky5wTZVpX1LE4rjwq5U3aClqksnruZAWA,1650 -alembic/testing/plugin/noseplugin.py,sha256=7iC7938_w9Hr-1SVgJFr1OFuS2Qag5cqjRAXpL6uKmo,2746 -alembic/testing/plugin/plugin_base.py,sha256=8j8lDcMoF84UYV6ecQsrXB0he3dV8Xhl8vkinPfV8fM,18021 -alembic/testing/plugin/pytestplugin.py,sha256=NpYEY97kHo_2ufAi2cuSVzmdCPkHJUrL0UnWQZlpKZQ,7020 -alembic/util/__init__.py,sha256=Biiv3AP96Sv8xztdHjTw7YMHeOIJ5rFc_P3DSCuqxjA,1591 -alembic/util/compat.py,sha256=2J2ePk96oBgMBB-P_pzwbySS-0WJsOYDWrkV7tgw4gk,9390 -alembic/util/exc.py,sha256=GBd-Fw-pvtsUNg6wrub7yhY2venv1MD1eMuJZebJiMY,40 -alembic/util/langhelpers.py,sha256=BXkBYZQxh96Jb2B3GKlZvnhJ0bBVRujatLosBlVeZmk,9246 -alembic/util/messaging.py,sha256=34lpck_uLANjeYhPVrX6VYqVqTkxOU4c5jxUV_Bm9Do,2480 -alembic/util/pyfiles.py,sha256=9kH4P28wMtdUTnHoRbpygVDj1IAQwYkRw_3i4snBnZk,2761 -alembic/util/sqla_compat.py,sha256=OzLhN4QD7XINArOQWEvYP7NfxEpl8eFJs-yVwc0MmqA,7150 -alembic-1.0.9.dist-info/LICENSE,sha256=yaEMocr0UqnhGyH3000Oy1Iz84WW-xNu9-TKO0q3mIw,1184 -alembic-1.0.9.dist-info/METADATA,sha256=VbBXTCtlvkAkdQFAvYPfnTOfFDgRHh6So4le_bMLVhQ,6032 -alembic-1.0.9.dist-info/WHEEL,sha256=_wJFdOYk7i3xxT8ElOkUJvOdOvfNGbR9g-bf6UQT6sU,110 -alembic-1.0.9.dist-info/entry_points.txt,sha256=jOSnN_2fhU8xzDQ50rdNr425J8kf_exuY8GrAo1daz8,49 -alembic-1.0.9.dist-info/top_level.txt,sha256=FwKWd5VsPFC8iQjpu1u9Cn-JnK3-V1RhUCmWqz1cl-s,8 -alembic-1.0.9.dist-info/RECORD,, -../../../bin/alembic,sha256=LItKXTstgZgyfc8vJ7NCVKIGNMWWMItU0xv8_HZ7M5Q,278 -alembic-1.0.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -alembic/util/__pycache__/messaging.cpython-36.pyc,, -alembic/util/__pycache__/langhelpers.cpython-36.pyc,, -alembic/util/__pycache__/sqla_compat.cpython-36.pyc,, -alembic/util/__pycache__/compat.cpython-36.pyc,, -alembic/util/__pycache__/__init__.cpython-36.pyc,, -alembic/util/__pycache__/exc.cpython-36.pyc,, -alembic/util/__pycache__/pyfiles.cpython-36.pyc,, -alembic/operations/__pycache__/base.cpython-36.pyc,, -alembic/operations/__pycache__/schemaobj.cpython-36.pyc,, -alembic/operations/__pycache__/ops.cpython-36.pyc,, -alembic/operations/__pycache__/__init__.cpython-36.pyc,, -alembic/operations/__pycache__/batch.cpython-36.pyc,, -alembic/operations/__pycache__/toimpl.cpython-36.pyc,, -alembic/testing/plugin/__pycache__/bootstrap.cpython-36.pyc,, -alembic/testing/plugin/__pycache__/pytestplugin.cpython-36.pyc,, -alembic/testing/plugin/__pycache__/noseplugin.cpython-36.pyc,, -alembic/testing/plugin/__pycache__/plugin_base.cpython-36.pyc,, -alembic/testing/plugin/__pycache__/__init__.cpython-36.pyc,, -alembic/testing/__pycache__/mock.cpython-36.pyc,, -alembic/testing/__pycache__/provision.cpython-36.pyc,, -alembic/testing/__pycache__/engines.cpython-36.pyc,, -alembic/testing/__pycache__/config.cpython-36.pyc,, -alembic/testing/__pycache__/requirements.cpython-36.pyc,, -alembic/testing/__pycache__/env.cpython-36.pyc,, -alembic/testing/__pycache__/compat.cpython-36.pyc,, -alembic/testing/__pycache__/util.cpython-36.pyc,, -alembic/testing/__pycache__/fixtures.cpython-36.pyc,, -alembic/testing/__pycache__/warnings.cpython-36.pyc,, -alembic/testing/__pycache__/__init__.cpython-36.pyc,, -alembic/testing/__pycache__/exclusions.cpython-36.pyc,, -alembic/testing/__pycache__/assertions.cpython-36.pyc,, -alembic/testing/__pycache__/runner.cpython-36.pyc,, -alembic/templates/generic/__pycache__/env.cpython-36.pyc,, -alembic/templates/pylons/__pycache__/env.cpython-36.pyc,, -alembic/templates/multidb/__pycache__/env.cpython-36.pyc,, -alembic/__pycache__/op.cpython-36.pyc,, -alembic/__pycache__/config.cpython-36.pyc,, -alembic/__pycache__/context.cpython-36.pyc,, -alembic/__pycache__/__init__.cpython-36.pyc,, -alembic/__pycache__/command.cpython-36.pyc,, -alembic/script/__pycache__/base.cpython-36.pyc,, -alembic/script/__pycache__/__init__.cpython-36.pyc,, -alembic/script/__pycache__/revision.cpython-36.pyc,, -alembic/ddl/__pycache__/mysql.cpython-36.pyc,, -alembic/ddl/__pycache__/mssql.cpython-36.pyc,, -alembic/ddl/__pycache__/oracle.cpython-36.pyc,, -alembic/ddl/__pycache__/impl.cpython-36.pyc,, -alembic/ddl/__pycache__/base.cpython-36.pyc,, -alembic/ddl/__pycache__/sqlite.cpython-36.pyc,, -alembic/ddl/__pycache__/__init__.cpython-36.pyc,, -alembic/ddl/__pycache__/postgresql.cpython-36.pyc,, -alembic/autogenerate/__pycache__/api.cpython-36.pyc,, -alembic/autogenerate/__pycache__/compare.cpython-36.pyc,, -alembic/autogenerate/__pycache__/render.cpython-36.pyc,, -alembic/autogenerate/__pycache__/__init__.cpython-36.pyc,, -alembic/autogenerate/__pycache__/rewriter.cpython-36.pyc,, -alembic/runtime/__pycache__/environment.cpython-36.pyc,, -alembic/runtime/__pycache__/__init__.cpython-36.pyc,, -alembic/runtime/__pycache__/migration.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/WHEEL deleted file mode 100644 index c4bde30..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.32.3) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/entry_points.txt b/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/entry_points.txt deleted file mode 100644 index 27ac374..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[console_scripts] -alembic = alembic.config:main - diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/top_level.txt deleted file mode 100644 index b5bd98d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic-1.0.9.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -alembic diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/__init__.py deleted file mode 100644 index b94bcb2..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from os import path -import sys - -from . import context # noqa -from . import op # noqa -from .runtime import environment -from .runtime import migration - -__version__ = '1.0.9' - -package_dir = path.abspath(path.dirname(__file__)) - -sys.modules["alembic.migration"] = migration -sys.modules["alembic.environment"] = environment diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/__init__.py deleted file mode 100644 index a0f8ec2..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .api import _render_migration_diffs # noqa -from .api import compare_metadata # noqa -from .api import produce_migrations # noqa -from .api import render_python_code # noqa -from .api import RevisionContext # noqa -from .compare import _produce_net_changes # noqa -from .compare import comparators # noqa -from .render import render_op_text # noqa -from .render import renderers # noqa -from .rewriter import Rewriter # noqa diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/api.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/api.py deleted file mode 100644 index 709e97d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/api.py +++ /dev/null @@ -1,504 +0,0 @@ -"""Provide the 'autogenerate' feature which can produce migration operations -automatically.""" - -import contextlib - -from sqlalchemy.engine.reflection import Inspector - -from . import compare -from . import render -from .. import util -from ..operations import ops - - -def compare_metadata(context, metadata): - """Compare a database schema to that given in a - :class:`~sqlalchemy.schema.MetaData` instance. - - The database connection is presented in the context - of a :class:`.MigrationContext` object, which - provides database connectivity as well as optional - comparison functions to use for datatypes and - server defaults - see the "autogenerate" arguments - at :meth:`.EnvironmentContext.configure` - for details on these. - - The return format is a list of "diff" directives, - each representing individual differences:: - - from alembic.migration import MigrationContext - from alembic.autogenerate import compare_metadata - from sqlalchemy.schema import SchemaItem - from sqlalchemy.types import TypeEngine - from sqlalchemy import (create_engine, MetaData, Column, - Integer, String, Table) - import pprint - - engine = create_engine("sqlite://") - - engine.execute(''' - create table foo ( - id integer not null primary key, - old_data varchar, - x integer - )''') - - engine.execute(''' - create table bar ( - data varchar - )''') - - metadata = MetaData() - Table('foo', metadata, - Column('id', Integer, primary_key=True), - Column('data', Integer), - Column('x', Integer, nullable=False) - ) - Table('bat', metadata, - Column('info', String) - ) - - mc = MigrationContext.configure(engine.connect()) - - diff = compare_metadata(mc, metadata) - pprint.pprint(diff, indent=2, width=20) - - Output:: - - [ ( 'add_table', - Table('bat', MetaData(bind=None), - Column('info', String(), table=), schema=None)), - ( 'remove_table', - Table(u'bar', MetaData(bind=None), - Column(u'data', VARCHAR(), table=), schema=None)), - ( 'add_column', - None, - 'foo', - Column('data', Integer(), table=)), - ( 'remove_column', - None, - 'foo', - Column(u'old_data', VARCHAR(), table=None)), - [ ( 'modify_nullable', - None, - 'foo', - u'x', - { 'existing_server_default': None, - 'existing_type': INTEGER()}, - True, - False)]] - - - :param context: a :class:`.MigrationContext` - instance. - :param metadata: a :class:`~sqlalchemy.schema.MetaData` - instance. - - .. seealso:: - - :func:`.produce_migrations` - produces a :class:`.MigrationScript` - structure based on metadata comparison. - - """ - - migration_script = produce_migrations(context, metadata) - return migration_script.upgrade_ops.as_diffs() - - -def produce_migrations(context, metadata): - """Produce a :class:`.MigrationScript` structure based on schema - comparison. - - This function does essentially what :func:`.compare_metadata` does, - but then runs the resulting list of diffs to produce the full - :class:`.MigrationScript` object. For an example of what this looks like, - see the example in :ref:`customizing_revision`. - - .. versionadded:: 0.8.0 - - .. seealso:: - - :func:`.compare_metadata` - returns more fundamental "diff" - data from comparing a schema. - - """ - - autogen_context = AutogenContext(context, metadata=metadata) - - migration_script = ops.MigrationScript( - rev_id=None, - upgrade_ops=ops.UpgradeOps([]), - downgrade_ops=ops.DowngradeOps([]), - ) - - compare._populate_migration_script(autogen_context, migration_script) - - return migration_script - - -def render_python_code( - up_or_down_op, - sqlalchemy_module_prefix="sa.", - alembic_module_prefix="op.", - render_as_batch=False, - imports=(), - render_item=None, -): - """Render Python code given an :class:`.UpgradeOps` or - :class:`.DowngradeOps` object. - - This is a convenience function that can be used to test the - autogenerate output of a user-defined :class:`.MigrationScript` structure. - - """ - opts = { - "sqlalchemy_module_prefix": sqlalchemy_module_prefix, - "alembic_module_prefix": alembic_module_prefix, - "render_item": render_item, - "render_as_batch": render_as_batch, - } - - autogen_context = AutogenContext(None, opts=opts) - autogen_context.imports = set(imports) - return render._indent( - render._render_cmd_body(up_or_down_op, autogen_context) - ) - - -def _render_migration_diffs(context, template_args): - """legacy, used by test_autogen_composition at the moment""" - - autogen_context = AutogenContext(context) - - upgrade_ops = ops.UpgradeOps([]) - compare._produce_net_changes(autogen_context, upgrade_ops) - - migration_script = ops.MigrationScript( - rev_id=None, - upgrade_ops=upgrade_ops, - downgrade_ops=upgrade_ops.reverse(), - ) - - render._render_python_into_templatevars( - autogen_context, migration_script, template_args - ) - - -class AutogenContext(object): - """Maintains configuration and state that's specific to an - autogenerate operation.""" - - metadata = None - """The :class:`~sqlalchemy.schema.MetaData` object - representing the destination. - - This object is the one that is passed within ``env.py`` - to the :paramref:`.EnvironmentContext.configure.target_metadata` - parameter. It represents the structure of :class:`.Table` and other - objects as stated in the current database model, and represents the - destination structure for the database being examined. - - While the :class:`~sqlalchemy.schema.MetaData` object is primarily - known as a collection of :class:`~sqlalchemy.schema.Table` objects, - it also has an :attr:`~sqlalchemy.schema.MetaData.info` dictionary - that may be used by end-user schemes to store additional schema-level - objects that are to be compared in custom autogeneration schemes. - - """ - - connection = None - """The :class:`~sqlalchemy.engine.base.Connection` object currently - connected to the database backend being compared. - - This is obtained from the :attr:`.MigrationContext.bind` and is - utimately set up in the ``env.py`` script. - - """ - - dialect = None - """The :class:`~sqlalchemy.engine.Dialect` object currently in use. - - This is normally obtained from the - :attr:`~sqlalchemy.engine.base.Connection.dialect` attribute. - - """ - - imports = None - """A ``set()`` which contains string Python import directives. - - The directives are to be rendered into the ``${imports}`` section - of a script template. The set is normally empty and can be modified - within hooks such as the - :paramref:`.EnvironmentContext.configure.render_item` hook. - - .. versionadded:: 0.8.3 - - .. seealso:: - - :ref:`autogen_render_types` - - """ - - migration_context = None - """The :class:`.MigrationContext` established by the ``env.py`` script.""" - - def __init__( - self, migration_context, metadata=None, opts=None, autogenerate=True - ): - - if ( - autogenerate - and migration_context is not None - and migration_context.as_sql - ): - raise util.CommandError( - "autogenerate can't use as_sql=True as it prevents querying " - "the database for schema information" - ) - - if opts is None: - opts = migration_context.opts - - self.metadata = metadata = ( - opts.get("target_metadata", None) if metadata is None else metadata - ) - - if ( - autogenerate - and metadata is None - and migration_context is not None - and migration_context.script is not None - ): - raise util.CommandError( - "Can't proceed with --autogenerate option; environment " - "script %s does not provide " - "a MetaData object or sequence of objects to the context." - % (migration_context.script.env_py_location) - ) - - include_symbol = opts.get("include_symbol", None) - include_object = opts.get("include_object", None) - - object_filters = [] - if include_symbol: - - def include_symbol_filter( - object_, name, type_, reflected, compare_to - ): - if type_ == "table": - return include_symbol(name, object_.schema) - else: - return True - - object_filters.append(include_symbol_filter) - if include_object: - object_filters.append(include_object) - - self._object_filters = object_filters - - self.migration_context = migration_context - if self.migration_context is not None: - self.connection = self.migration_context.bind - self.dialect = self.migration_context.dialect - - self.imports = set() - self.opts = opts - self._has_batch = False - - @util.memoized_property - def inspector(self): - return Inspector.from_engine(self.connection) - - @contextlib.contextmanager - def _within_batch(self): - self._has_batch = True - yield - self._has_batch = False - - def run_filters(self, object_, name, type_, reflected, compare_to): - """Run the context's object filters and return True if the targets - should be part of the autogenerate operation. - - This method should be run for every kind of object encountered within - an autogenerate operation, giving the environment the chance - to filter what objects should be included in the comparison. - The filters here are produced directly via the - :paramref:`.EnvironmentContext.configure.include_object` - and :paramref:`.EnvironmentContext.configure.include_symbol` - functions, if present. - - """ - for fn in self._object_filters: - if not fn(object_, name, type_, reflected, compare_to): - return False - else: - return True - - @util.memoized_property - def sorted_tables(self): - """Return an aggregate of the :attr:`.MetaData.sorted_tables` collection(s). - - For a sequence of :class:`.MetaData` objects, this - concatenates the :attr:`.MetaData.sorted_tables` collection - for each individual :class:`.MetaData` in the order of the - sequence. It does **not** collate the sorted tables collections. - - .. versionadded:: 0.9.0 - - """ - result = [] - for m in util.to_list(self.metadata): - result.extend(m.sorted_tables) - return result - - @util.memoized_property - def table_key_to_table(self): - """Return an aggregate of the :attr:`.MetaData.tables` dictionaries. - - The :attr:`.MetaData.tables` collection is a dictionary of table key - to :class:`.Table`; this method aggregates the dictionary across - multiple :class:`.MetaData` objects into one dictionary. - - Duplicate table keys are **not** supported; if two :class:`.MetaData` - objects contain the same table key, an exception is raised. - - .. versionadded:: 0.9.0 - - """ - result = {} - for m in util.to_list(self.metadata): - intersect = set(result).intersection(set(m.tables)) - if intersect: - raise ValueError( - "Duplicate table keys across multiple " - "MetaData objects: %s" - % (", ".join('"%s"' % key for key in sorted(intersect))) - ) - - result.update(m.tables) - return result - - -class RevisionContext(object): - """Maintains configuration and state that's specific to a revision - file generation operation.""" - - def __init__( - self, - config, - script_directory, - command_args, - process_revision_directives=None, - ): - self.config = config - self.script_directory = script_directory - self.command_args = command_args - self.process_revision_directives = process_revision_directives - self.template_args = { - "config": config # Let templates use config for - # e.g. multiple databases - } - self.generated_revisions = [self._default_revision()] - - def _to_script(self, migration_script): - template_args = {} - for k, v in self.template_args.items(): - template_args.setdefault(k, v) - - if getattr(migration_script, "_needs_render", False): - autogen_context = self._last_autogen_context - - # clear out existing imports if we are doing multiple - # renders - autogen_context.imports = set() - if migration_script.imports: - autogen_context.imports.update(migration_script.imports) - render._render_python_into_templatevars( - autogen_context, migration_script, template_args - ) - - return self.script_directory.generate_revision( - migration_script.rev_id, - migration_script.message, - refresh=True, - head=migration_script.head, - splice=migration_script.splice, - branch_labels=migration_script.branch_label, - version_path=migration_script.version_path, - depends_on=migration_script.depends_on, - **template_args - ) - - def run_autogenerate(self, rev, migration_context): - self._run_environment(rev, migration_context, True) - - def run_no_autogenerate(self, rev, migration_context): - self._run_environment(rev, migration_context, False) - - def _run_environment(self, rev, migration_context, autogenerate): - if autogenerate: - if self.command_args["sql"]: - raise util.CommandError( - "Using --sql with --autogenerate does not make any sense" - ) - if set(self.script_directory.get_revisions(rev)) != set( - self.script_directory.get_revisions("heads") - ): - raise util.CommandError("Target database is not up to date.") - - upgrade_token = migration_context.opts["upgrade_token"] - downgrade_token = migration_context.opts["downgrade_token"] - - migration_script = self.generated_revisions[-1] - if not getattr(migration_script, "_needs_render", False): - migration_script.upgrade_ops_list[-1].upgrade_token = upgrade_token - migration_script.downgrade_ops_list[ - -1 - ].downgrade_token = downgrade_token - migration_script._needs_render = True - else: - migration_script._upgrade_ops.append( - ops.UpgradeOps([], upgrade_token=upgrade_token) - ) - migration_script._downgrade_ops.append( - ops.DowngradeOps([], downgrade_token=downgrade_token) - ) - - self._last_autogen_context = autogen_context = AutogenContext( - migration_context, autogenerate=autogenerate - ) - - if autogenerate: - compare._populate_migration_script( - autogen_context, migration_script - ) - - if self.process_revision_directives: - self.process_revision_directives( - migration_context, rev, self.generated_revisions - ) - - hook = migration_context.opts["process_revision_directives"] - if hook: - hook(migration_context, rev, self.generated_revisions) - - for migration_script in self.generated_revisions: - migration_script._needs_render = True - - def _default_revision(self): - op = ops.MigrationScript( - rev_id=self.command_args["rev_id"] or util.rev_id(), - message=self.command_args["message"], - upgrade_ops=ops.UpgradeOps([]), - downgrade_ops=ops.DowngradeOps([]), - head=self.command_args["head"], - splice=self.command_args["splice"], - branch_label=self.command_args["branch_label"], - version_path=self.command_args["version_path"], - depends_on=self.command_args["depends_on"], - ) - return op - - def generate_scripts(self): - for generated_revision in self.generated_revisions: - yield self._to_script(generated_revision) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/compare.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/compare.py deleted file mode 100644 index df1e509..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/compare.py +++ /dev/null @@ -1,1094 +0,0 @@ -import contextlib -import logging -import re - -from sqlalchemy import event -from sqlalchemy import schema as sa_schema -from sqlalchemy import types as sqltypes -from sqlalchemy.engine.reflection import Inspector -from sqlalchemy.util import OrderedSet - -from alembic.ddl.base import _fk_spec -from .render import _user_defined_render -from .. import util -from ..operations import ops -from ..util import compat -from ..util import sqla_compat - -log = logging.getLogger(__name__) - - -def _populate_migration_script(autogen_context, migration_script): - upgrade_ops = migration_script.upgrade_ops_list[-1] - downgrade_ops = migration_script.downgrade_ops_list[-1] - - _produce_net_changes(autogen_context, upgrade_ops) - upgrade_ops.reverse_into(downgrade_ops) - - -comparators = util.Dispatcher(uselist=True) - - -def _produce_net_changes(autogen_context, upgrade_ops): - - connection = autogen_context.connection - include_schemas = autogen_context.opts.get("include_schemas", False) - - inspector = Inspector.from_engine(connection) - - default_schema = connection.dialect.default_schema_name - if include_schemas: - schemas = set(inspector.get_schema_names()) - # replace default schema name with None - schemas.discard("information_schema") - # replace the "default" schema with None - schemas.discard(default_schema) - schemas.add(None) - else: - schemas = [None] - - comparators.dispatch("schema", autogen_context.dialect.name)( - autogen_context, upgrade_ops, schemas - ) - - -@comparators.dispatch_for("schema") -def _autogen_for_tables(autogen_context, upgrade_ops, schemas): - inspector = autogen_context.inspector - - conn_table_names = set() - - version_table_schema = ( - autogen_context.migration_context.version_table_schema - ) - version_table = autogen_context.migration_context.version_table - - for s in schemas: - tables = set(inspector.get_table_names(schema=s)) - if s == version_table_schema: - tables = tables.difference( - [autogen_context.migration_context.version_table] - ) - conn_table_names.update(zip([s] * len(tables), tables)) - - metadata_table_names = OrderedSet( - [(table.schema, table.name) for table in autogen_context.sorted_tables] - ).difference([(version_table_schema, version_table)]) - - _compare_tables( - conn_table_names, - metadata_table_names, - inspector, - upgrade_ops, - autogen_context, - ) - - -def _compare_tables( - conn_table_names, - metadata_table_names, - inspector, - upgrade_ops, - autogen_context, -): - - default_schema = inspector.bind.dialect.default_schema_name - - # tables coming from the connection will not have "schema" - # set if it matches default_schema_name; so we need a list - # of table names from local metadata that also have "None" if schema - # == default_schema_name. Most setups will be like this anyway but - # some are not (see #170) - metadata_table_names_no_dflt_schema = OrderedSet( - [ - (schema if schema != default_schema else None, tname) - for schema, tname in metadata_table_names - ] - ) - - # to adjust for the MetaData collection storing the tables either - # as "schemaname.tablename" or just "tablename", create a new lookup - # which will match the "non-default-schema" keys to the Table object. - tname_to_table = dict( - ( - no_dflt_schema, - autogen_context.table_key_to_table[ - sa_schema._get_table_key(tname, schema) - ], - ) - for no_dflt_schema, (schema, tname) in zip( - metadata_table_names_no_dflt_schema, metadata_table_names - ) - ) - metadata_table_names = metadata_table_names_no_dflt_schema - - for s, tname in metadata_table_names.difference(conn_table_names): - name = "%s.%s" % (s, tname) if s else tname - metadata_table = tname_to_table[(s, tname)] - if autogen_context.run_filters( - metadata_table, tname, "table", False, None - ): - upgrade_ops.ops.append( - ops.CreateTableOp.from_table(metadata_table) - ) - log.info("Detected added table %r", name) - modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) - - comparators.dispatch("table")( - autogen_context, - modify_table_ops, - s, - tname, - None, - metadata_table, - ) - if not modify_table_ops.is_empty(): - upgrade_ops.ops.append(modify_table_ops) - - removal_metadata = sa_schema.MetaData() - for s, tname in conn_table_names.difference(metadata_table_names): - name = sa_schema._get_table_key(tname, s) - exists = name in removal_metadata.tables - t = sa_schema.Table(tname, removal_metadata, schema=s) - - if not exists: - event.listen( - t, - "column_reflect", - # fmt: off - autogen_context.migration_context.impl. - _compat_autogen_column_reflect - (inspector), - # fmt: on - ) - inspector.reflecttable(t, None) - if autogen_context.run_filters(t, tname, "table", True, None): - - modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) - - comparators.dispatch("table")( - autogen_context, modify_table_ops, s, tname, t, None - ) - if not modify_table_ops.is_empty(): - upgrade_ops.ops.append(modify_table_ops) - - upgrade_ops.ops.append(ops.DropTableOp.from_table(t)) - log.info("Detected removed table %r", name) - - existing_tables = conn_table_names.intersection(metadata_table_names) - - existing_metadata = sa_schema.MetaData() - conn_column_info = {} - for s, tname in existing_tables: - name = sa_schema._get_table_key(tname, s) - exists = name in existing_metadata.tables - t = sa_schema.Table(tname, existing_metadata, schema=s) - if not exists: - event.listen( - t, - "column_reflect", - # fmt: off - autogen_context.migration_context.impl. - _compat_autogen_column_reflect(inspector), - # fmt: on - ) - inspector.reflecttable(t, None) - conn_column_info[(s, tname)] = t - - for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])): - s = s or None - name = "%s.%s" % (s, tname) if s else tname - metadata_table = tname_to_table[(s, tname)] - conn_table = existing_metadata.tables[name] - - if autogen_context.run_filters( - metadata_table, tname, "table", False, conn_table - ): - - modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) - with _compare_columns( - s, - tname, - conn_table, - metadata_table, - modify_table_ops, - autogen_context, - inspector, - ): - - comparators.dispatch("table")( - autogen_context, - modify_table_ops, - s, - tname, - conn_table, - metadata_table, - ) - - if not modify_table_ops.is_empty(): - upgrade_ops.ops.append(modify_table_ops) - - -def _make_index(params, conn_table): - ix = sa_schema.Index( - params["name"], - *[conn_table.c[cname] for cname in params["column_names"]], - unique=params["unique"] - ) - if "duplicates_constraint" in params: - ix.info["duplicates_constraint"] = params["duplicates_constraint"] - return ix - - -def _make_unique_constraint(params, conn_table): - uq = sa_schema.UniqueConstraint( - *[conn_table.c[cname] for cname in params["column_names"]], - name=params["name"] - ) - if "duplicates_index" in params: - uq.info["duplicates_index"] = params["duplicates_index"] - - return uq - - -def _make_foreign_key(params, conn_table): - tname = params["referred_table"] - if params["referred_schema"]: - tname = "%s.%s" % (params["referred_schema"], tname) - - options = params.get("options", {}) - - const = sa_schema.ForeignKeyConstraint( - [conn_table.c[cname] for cname in params["constrained_columns"]], - ["%s.%s" % (tname, n) for n in params["referred_columns"]], - onupdate=options.get("onupdate"), - ondelete=options.get("ondelete"), - deferrable=options.get("deferrable"), - initially=options.get("initially"), - name=params["name"], - ) - # needed by 0.7 - conn_table.append_constraint(const) - return const - - -@contextlib.contextmanager -def _compare_columns( - schema, - tname, - conn_table, - metadata_table, - modify_table_ops, - autogen_context, - inspector, -): - name = "%s.%s" % (schema, tname) if schema else tname - metadata_cols_by_name = dict( - (c.name, c) for c in metadata_table.c if not c.system - ) - conn_col_names = dict((c.name, c) for c in conn_table.c) - metadata_col_names = OrderedSet(sorted(metadata_cols_by_name)) - - for cname in metadata_col_names.difference(conn_col_names): - if autogen_context.run_filters( - metadata_cols_by_name[cname], cname, "column", False, None - ): - modify_table_ops.ops.append( - ops.AddColumnOp.from_column_and_tablename( - schema, tname, metadata_cols_by_name[cname] - ) - ) - log.info("Detected added column '%s.%s'", name, cname) - - for colname in metadata_col_names.intersection(conn_col_names): - metadata_col = metadata_cols_by_name[colname] - conn_col = conn_table.c[colname] - if not autogen_context.run_filters( - metadata_col, colname, "column", False, conn_col - ): - continue - alter_column_op = ops.AlterColumnOp(tname, colname, schema=schema) - - comparators.dispatch("column")( - autogen_context, - alter_column_op, - schema, - tname, - colname, - conn_col, - metadata_col, - ) - - if alter_column_op.has_changes(): - modify_table_ops.ops.append(alter_column_op) - - yield - - for cname in set(conn_col_names).difference(metadata_col_names): - if autogen_context.run_filters( - conn_table.c[cname], cname, "column", True, None - ): - modify_table_ops.ops.append( - ops.DropColumnOp.from_column_and_tablename( - schema, tname, conn_table.c[cname] - ) - ) - log.info("Detected removed column '%s.%s'", name, cname) - - -class _constraint_sig(object): - def md_name_to_sql_name(self, context): - return self.name - - def __eq__(self, other): - return self.const == other.const - - def __ne__(self, other): - return self.const != other.const - - def __hash__(self): - return hash(self.const) - - -class _uq_constraint_sig(_constraint_sig): - is_index = False - is_unique = True - - def __init__(self, const): - self.const = const - self.name = const.name - self.sig = tuple(sorted([col.name for col in const.columns])) - - @property - def column_names(self): - return [col.name for col in self.const.columns] - - -class _ix_constraint_sig(_constraint_sig): - is_index = True - - def __init__(self, const): - self.const = const - self.name = const.name - self.sig = tuple(sorted([col.name for col in const.columns])) - self.is_unique = bool(const.unique) - - def md_name_to_sql_name(self, context): - return sqla_compat._get_index_final_name(context.dialect, self.const) - - @property - def column_names(self): - return sqla_compat._get_index_column_names(self.const) - - -class _fk_constraint_sig(_constraint_sig): - def __init__(self, const, include_options=False): - self.const = const - self.name = const.name - - ( - self.source_schema, - self.source_table, - self.source_columns, - self.target_schema, - self.target_table, - self.target_columns, - onupdate, - ondelete, - deferrable, - initially, - ) = _fk_spec(const) - - self.sig = ( - self.source_schema, - self.source_table, - tuple(self.source_columns), - self.target_schema, - self.target_table, - tuple(self.target_columns), - ) - if include_options: - self.sig += ( - (None if onupdate.lower() == "no action" else onupdate.lower()) - if onupdate - else None, - (None if ondelete.lower() == "no action" else ondelete.lower()) - if ondelete - else None, - # convert initially + deferrable into one three-state value - "initially_deferrable" - if initially and initially.lower() == "deferred" - else "deferrable" - if deferrable - else "not deferrable", - ) - - -@comparators.dispatch_for("table") -def _compare_indexes_and_uniques( - autogen_context, modify_ops, schema, tname, conn_table, metadata_table -): - - inspector = autogen_context.inspector - is_create_table = conn_table is None - is_drop_table = metadata_table is None - - # 1a. get raw indexes and unique constraints from metadata ... - if metadata_table is not None: - metadata_unique_constraints = set( - uq - for uq in metadata_table.constraints - if isinstance(uq, sa_schema.UniqueConstraint) - ) - metadata_indexes = set(metadata_table.indexes) - else: - metadata_unique_constraints = set() - metadata_indexes = set() - - conn_uniques = conn_indexes = frozenset() - - supports_unique_constraints = False - - unique_constraints_duplicate_unique_indexes = False - - if conn_table is not None: - # 1b. ... and from connection, if the table exists - if hasattr(inspector, "get_unique_constraints"): - try: - conn_uniques = inspector.get_unique_constraints( - tname, schema=schema - ) - supports_unique_constraints = True - except NotImplementedError: - pass - except TypeError: - # number of arguments is off for the base - # method in SQLAlchemy due to the cache decorator - # not being present - pass - else: - for uq in conn_uniques: - if uq.get("duplicates_index"): - unique_constraints_duplicate_unique_indexes = True - try: - conn_indexes = inspector.get_indexes(tname, schema=schema) - except NotImplementedError: - pass - - # 2. convert conn-level objects from raw inspector records - # into schema objects - if is_drop_table: - # for DROP TABLE uniques are inline, don't need them - conn_uniques = set() - else: - conn_uniques = set( - _make_unique_constraint(uq_def, conn_table) - for uq_def in conn_uniques - ) - - conn_indexes = set(_make_index(ix, conn_table) for ix in conn_indexes) - - # 2a. if the dialect dupes unique indexes as unique constraints - # (mysql and oracle), correct for that - - if unique_constraints_duplicate_unique_indexes: - _correct_for_uq_duplicates_uix( - conn_uniques, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ) - - # 3. give the dialect a chance to omit indexes and constraints that - # we know are either added implicitly by the DB or that the DB - # can't accurately report on - autogen_context.migration_context.impl.correct_for_autogen_constraints( - conn_uniques, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ) - - # 4. organize the constraints into "signature" collections, the - # _constraint_sig() objects provide a consistent facade over both - # Index and UniqueConstraint so we can easily work with them - # interchangeably - metadata_unique_constraints = set( - _uq_constraint_sig(uq) for uq in metadata_unique_constraints - ) - - metadata_indexes = set(_ix_constraint_sig(ix) for ix in metadata_indexes) - - conn_unique_constraints = set( - _uq_constraint_sig(uq) for uq in conn_uniques - ) - - conn_indexes = set(_ix_constraint_sig(ix) for ix in conn_indexes) - - # 5. index things by name, for those objects that have names - metadata_names = dict( - (c.md_name_to_sql_name(autogen_context), c) - for c in metadata_unique_constraints.union(metadata_indexes) - if c.name is not None - ) - - conn_uniques_by_name = dict((c.name, c) for c in conn_unique_constraints) - conn_indexes_by_name = dict((c.name, c) for c in conn_indexes) - - conn_names = dict( - (c.name, c) - for c in conn_unique_constraints.union(conn_indexes) - if c.name is not None - ) - - doubled_constraints = dict( - (name, (conn_uniques_by_name[name], conn_indexes_by_name[name])) - for name in set(conn_uniques_by_name).intersection( - conn_indexes_by_name - ) - ) - - # 6. index things by "column signature", to help with unnamed unique - # constraints. - conn_uniques_by_sig = dict((uq.sig, uq) for uq in conn_unique_constraints) - metadata_uniques_by_sig = dict( - (uq.sig, uq) for uq in metadata_unique_constraints - ) - metadata_indexes_by_sig = dict((ix.sig, ix) for ix in metadata_indexes) - unnamed_metadata_uniques = dict( - (uq.sig, uq) for uq in metadata_unique_constraints if uq.name is None - ) - - # assumptions: - # 1. a unique constraint or an index from the connection *always* - # has a name. - # 2. an index on the metadata side *always* has a name. - # 3. a unique constraint on the metadata side *might* have a name. - # 4. The backend may double up indexes as unique constraints and - # vice versa (e.g. MySQL, Postgresql) - - def obj_added(obj): - if obj.is_index: - if autogen_context.run_filters( - obj.const, obj.name, "index", False, None - ): - modify_ops.ops.append(ops.CreateIndexOp.from_index(obj.const)) - log.info( - "Detected added index '%s' on %s", - obj.name, - ", ".join(["'%s'" % obj.column_names]), - ) - else: - if not supports_unique_constraints: - # can't report unique indexes as added if we don't - # detect them - return - if is_create_table or is_drop_table: - # unique constraints are created inline with table defs - return - if autogen_context.run_filters( - obj.const, obj.name, "unique_constraint", False, None - ): - modify_ops.ops.append( - ops.AddConstraintOp.from_constraint(obj.const) - ) - log.info( - "Detected added unique constraint '%s' on %s", - obj.name, - ", ".join(["'%s'" % obj.column_names]), - ) - - def obj_removed(obj): - if obj.is_index: - if obj.is_unique and not supports_unique_constraints: - # many databases double up unique constraints - # as unique indexes. without that list we can't - # be sure what we're doing here - return - - if autogen_context.run_filters( - obj.const, obj.name, "index", True, None - ): - modify_ops.ops.append(ops.DropIndexOp.from_index(obj.const)) - log.info( - "Detected removed index '%s' on '%s'", obj.name, tname - ) - else: - if is_create_table or is_drop_table: - # if the whole table is being dropped, we don't need to - # consider unique constraint separately - return - if autogen_context.run_filters( - obj.const, obj.name, "unique_constraint", True, None - ): - modify_ops.ops.append( - ops.DropConstraintOp.from_constraint(obj.const) - ) - log.info( - "Detected removed unique constraint '%s' on '%s'", - obj.name, - tname, - ) - - def obj_changed(old, new, msg): - if old.is_index: - if autogen_context.run_filters( - new.const, new.name, "index", False, old.const - ): - log.info( - "Detected changed index '%s' on '%s':%s", - old.name, - tname, - ", ".join(msg), - ) - modify_ops.ops.append(ops.DropIndexOp.from_index(old.const)) - modify_ops.ops.append(ops.CreateIndexOp.from_index(new.const)) - else: - if autogen_context.run_filters( - new.const, new.name, "unique_constraint", False, old.const - ): - log.info( - "Detected changed unique constraint '%s' on '%s':%s", - old.name, - tname, - ", ".join(msg), - ) - modify_ops.ops.append( - ops.DropConstraintOp.from_constraint(old.const) - ) - modify_ops.ops.append( - ops.AddConstraintOp.from_constraint(new.const) - ) - - for added_name in sorted(set(metadata_names).difference(conn_names)): - obj = metadata_names[added_name] - obj_added(obj) - - for existing_name in sorted(set(metadata_names).intersection(conn_names)): - metadata_obj = metadata_names[existing_name] - - if existing_name in doubled_constraints: - conn_uq, conn_idx = doubled_constraints[existing_name] - if metadata_obj.is_index: - conn_obj = conn_idx - else: - conn_obj = conn_uq - else: - conn_obj = conn_names[existing_name] - - if conn_obj.is_index != metadata_obj.is_index: - obj_removed(conn_obj) - obj_added(metadata_obj) - else: - msg = [] - if conn_obj.is_unique != metadata_obj.is_unique: - msg.append( - " unique=%r to unique=%r" - % (conn_obj.is_unique, metadata_obj.is_unique) - ) - if conn_obj.sig != metadata_obj.sig: - msg.append( - " columns %r to %r" % (conn_obj.sig, metadata_obj.sig) - ) - - if msg: - obj_changed(conn_obj, metadata_obj, msg) - - for removed_name in sorted(set(conn_names).difference(metadata_names)): - conn_obj = conn_names[removed_name] - if not conn_obj.is_index and conn_obj.sig in unnamed_metadata_uniques: - continue - elif removed_name in doubled_constraints: - if ( - conn_obj.sig not in metadata_indexes_by_sig - and conn_obj.sig not in metadata_uniques_by_sig - ): - conn_uq, conn_idx = doubled_constraints[removed_name] - obj_removed(conn_uq) - obj_removed(conn_idx) - else: - obj_removed(conn_obj) - - for uq_sig in unnamed_metadata_uniques: - if uq_sig not in conn_uniques_by_sig: - obj_added(unnamed_metadata_uniques[uq_sig]) - - -def _correct_for_uq_duplicates_uix( - conn_unique_constraints, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, -): - # dedupe unique indexes vs. constraints, since MySQL / Oracle - # doesn't really have unique constraints as a separate construct. - # but look in the metadata and try to maintain constructs - # that already seem to be defined one way or the other - # on that side. This logic was formerly local to MySQL dialect, - # generalized to Oracle and others. See #276 - metadata_uq_names = set( - [ - cons.name - for cons in metadata_unique_constraints - if cons.name is not None - ] - ) - - unnamed_metadata_uqs = set( - [ - _uq_constraint_sig(cons).sig - for cons in metadata_unique_constraints - if cons.name is None - ] - ) - - metadata_ix_names = set( - [cons.name for cons in metadata_indexes if cons.unique] - ) - conn_ix_names = dict( - (cons.name, cons) for cons in conn_indexes if cons.unique - ) - - uqs_dupe_indexes = dict( - (cons.name, cons) - for cons in conn_unique_constraints - if cons.info["duplicates_index"] - ) - - for overlap in uqs_dupe_indexes: - if overlap not in metadata_uq_names: - if ( - _uq_constraint_sig(uqs_dupe_indexes[overlap]).sig - not in unnamed_metadata_uqs - ): - - conn_unique_constraints.discard(uqs_dupe_indexes[overlap]) - elif overlap not in metadata_ix_names: - conn_indexes.discard(conn_ix_names[overlap]) - - -@comparators.dispatch_for("column") -def _compare_nullable( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - # work around SQLAlchemy issue #3023 - if metadata_col.primary_key: - return - - metadata_col_nullable = metadata_col.nullable - conn_col_nullable = conn_col.nullable - alter_column_op.existing_nullable = conn_col_nullable - - if conn_col_nullable is not metadata_col_nullable: - alter_column_op.modify_nullable = metadata_col_nullable - log.info( - "Detected %s on column '%s.%s'", - "NULL" if metadata_col_nullable else "NOT NULL", - tname, - cname, - ) - - -@comparators.dispatch_for("column") -def _setup_autoincrement( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - if metadata_col.table._autoincrement_column is metadata_col: - alter_column_op.kw["autoincrement"] = True - elif util.sqla_110 and metadata_col.autoincrement is True: - alter_column_op.kw["autoincrement"] = True - elif metadata_col.autoincrement is False: - alter_column_op.kw["autoincrement"] = False - - -@comparators.dispatch_for("column") -def _compare_type( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - conn_type = conn_col.type - alter_column_op.existing_type = conn_type - metadata_type = metadata_col.type - if conn_type._type_affinity is sqltypes.NullType: - log.info( - "Couldn't determine database type " "for column '%s.%s'", - tname, - cname, - ) - return - if metadata_type._type_affinity is sqltypes.NullType: - log.info( - "Column '%s.%s' has no type within " "the model; can't compare", - tname, - cname, - ) - return - - isdiff = autogen_context.migration_context._compare_type( - conn_col, metadata_col - ) - - if isdiff: - alter_column_op.modify_type = metadata_type - log.info( - "Detected type change from %r to %r on '%s.%s'", - conn_type, - metadata_type, - tname, - cname, - ) - - -def _render_server_default_for_compare( - metadata_default, metadata_col, autogen_context -): - rendered = _user_defined_render( - "server_default", metadata_default, autogen_context - ) - if rendered is not False: - return rendered - - if isinstance(metadata_default, sa_schema.DefaultClause): - if isinstance(metadata_default.arg, compat.string_types): - metadata_default = metadata_default.arg - else: - metadata_default = str( - metadata_default.arg.compile(dialect=autogen_context.dialect) - ) - if isinstance(metadata_default, compat.string_types): - if metadata_col.type._type_affinity is sqltypes.String: - metadata_default = re.sub(r"^'|'$", "", metadata_default) - return repr(metadata_default) - else: - return metadata_default - else: - return None - - -@comparators.dispatch_for("column") -def _compare_server_default( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - metadata_default = metadata_col.server_default - conn_col_default = conn_col.server_default - if conn_col_default is None and metadata_default is None: - return False - rendered_metadata_default = _render_server_default_for_compare( - metadata_default, metadata_col, autogen_context - ) - - rendered_conn_default = ( - conn_col.server_default.arg.text if conn_col.server_default else None - ) - - alter_column_op.existing_server_default = conn_col_default - - isdiff = autogen_context.migration_context._compare_server_default( - conn_col, - metadata_col, - rendered_metadata_default, - rendered_conn_default, - ) - if isdiff: - alter_column_op.modify_server_default = metadata_default - log.info("Detected server default on column '%s.%s'", tname, cname) - - -@comparators.dispatch_for("column") -def _compare_column_comment( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - if not sqla_compat._dialect_supports_comments(autogen_context.dialect): - return - - metadata_comment = metadata_col.comment - conn_col_comment = conn_col.comment - if conn_col_comment is None and metadata_comment is None: - return False - - alter_column_op.existing_comment = conn_col_comment - - if conn_col_comment != metadata_comment: - alter_column_op.modify_comment = metadata_comment - log.info("Detected column comment '%s.%s'", tname, cname) - - -@comparators.dispatch_for("table") -def _compare_foreign_keys( - autogen_context, - modify_table_ops, - schema, - tname, - conn_table, - metadata_table, -): - - # if we're doing CREATE TABLE, all FKs are created - # inline within the table def - if conn_table is None or metadata_table is None: - return - - inspector = autogen_context.inspector - metadata_fks = set( - fk - for fk in metadata_table.constraints - if isinstance(fk, sa_schema.ForeignKeyConstraint) - ) - - conn_fks = inspector.get_foreign_keys(tname, schema=schema) - - backend_reflects_fk_options = conn_fks and "options" in conn_fks[0] - - conn_fks = set(_make_foreign_key(const, conn_table) for const in conn_fks) - - # give the dialect a chance to correct the FKs to match more - # closely - autogen_context.migration_context.impl.correct_for_autogen_foreignkeys( - conn_fks, metadata_fks - ) - - metadata_fks = set( - _fk_constraint_sig(fk, include_options=backend_reflects_fk_options) - for fk in metadata_fks - ) - - conn_fks = set( - _fk_constraint_sig(fk, include_options=backend_reflects_fk_options) - for fk in conn_fks - ) - - conn_fks_by_sig = dict((c.sig, c) for c in conn_fks) - metadata_fks_by_sig = dict((c.sig, c) for c in metadata_fks) - - metadata_fks_by_name = dict( - (c.name, c) for c in metadata_fks if c.name is not None - ) - conn_fks_by_name = dict( - (c.name, c) for c in conn_fks if c.name is not None - ) - - def _add_fk(obj, compare_to): - if autogen_context.run_filters( - obj.const, obj.name, "foreign_key_constraint", False, compare_to - ): - modify_table_ops.ops.append( - ops.CreateForeignKeyOp.from_constraint(const.const) - ) - - log.info( - "Detected added foreign key (%s)(%s) on table %s%s", - ", ".join(obj.source_columns), - ", ".join(obj.target_columns), - "%s." % obj.source_schema if obj.source_schema else "", - obj.source_table, - ) - - def _remove_fk(obj, compare_to): - if autogen_context.run_filters( - obj.const, obj.name, "foreign_key_constraint", True, compare_to - ): - modify_table_ops.ops.append( - ops.DropConstraintOp.from_constraint(obj.const) - ) - log.info( - "Detected removed foreign key (%s)(%s) on table %s%s", - ", ".join(obj.source_columns), - ", ".join(obj.target_columns), - "%s." % obj.source_schema if obj.source_schema else "", - obj.source_table, - ) - - # so far it appears we don't need to do this by name at all. - # SQLite doesn't preserve constraint names anyway - - for removed_sig in set(conn_fks_by_sig).difference(metadata_fks_by_sig): - const = conn_fks_by_sig[removed_sig] - if removed_sig not in metadata_fks_by_sig: - compare_to = ( - metadata_fks_by_name[const.name].const - if const.name in metadata_fks_by_name - else None - ) - _remove_fk(const, compare_to) - - for added_sig in set(metadata_fks_by_sig).difference(conn_fks_by_sig): - const = metadata_fks_by_sig[added_sig] - if added_sig not in conn_fks_by_sig: - compare_to = ( - conn_fks_by_name[const.name].const - if const.name in conn_fks_by_name - else None - ) - _add_fk(const, compare_to) - - -@comparators.dispatch_for("table") -def _compare_table_comment( - autogen_context, - modify_table_ops, - schema, - tname, - conn_table, - metadata_table, -): - - if not sqla_compat._dialect_supports_comments(autogen_context.dialect): - return - - # if we're doing CREATE TABLE, comments will be created inline - # with the create_table op. - if conn_table is None or metadata_table is None: - return - - if conn_table.comment is None and metadata_table.comment is None: - return - - if metadata_table.comment is None and conn_table.comment is not None: - modify_table_ops.ops.append( - ops.DropTableCommentOp( - tname, existing_comment=conn_table.comment, schema=schema - ) - ) - elif metadata_table.comment != conn_table.comment: - modify_table_ops.ops.append( - ops.CreateTableCommentOp( - tname, - metadata_table.comment, - existing_comment=conn_table.comment, - schema=schema, - ) - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/render.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/render.py deleted file mode 100644 index 21bbb50..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/render.py +++ /dev/null @@ -1,883 +0,0 @@ -import re - -from mako.pygen import PythonPrinter -from sqlalchemy import schema as sa_schema -from sqlalchemy import sql -from sqlalchemy import types as sqltypes - -from .. import util -from ..operations import ops -from ..util import compat -from ..util import sqla_compat -from ..util.compat import string_types -from ..util.compat import StringIO - - -MAX_PYTHON_ARGS = 255 - -try: - from sqlalchemy.sql.naming import conv - - def _render_gen_name(autogen_context, name): - if isinstance(name, conv): - return _f_name(_alembic_autogenerate_prefix(autogen_context), name) - else: - return name - - -except ImportError: - - def _render_gen_name(autogen_context, name): - return name - - -def _indent(text): - text = re.compile(r"^", re.M).sub(" ", text).strip() - text = re.compile(r" +$", re.M).sub("", text) - return text - - -def _render_python_into_templatevars( - autogen_context, migration_script, template_args -): - imports = autogen_context.imports - - for upgrade_ops, downgrade_ops in zip( - migration_script.upgrade_ops_list, migration_script.downgrade_ops_list - ): - template_args[upgrade_ops.upgrade_token] = _indent( - _render_cmd_body(upgrade_ops, autogen_context) - ) - template_args[downgrade_ops.downgrade_token] = _indent( - _render_cmd_body(downgrade_ops, autogen_context) - ) - template_args["imports"] = "\n".join(sorted(imports)) - - -default_renderers = renderers = util.Dispatcher() - - -def _render_cmd_body(op_container, autogen_context): - - buf = StringIO() - printer = PythonPrinter(buf) - - printer.writeline( - "# ### commands auto generated by Alembic - please adjust! ###" - ) - - if not op_container.ops: - printer.writeline("pass") - else: - for op in op_container.ops: - lines = render_op(autogen_context, op) - - for line in lines: - printer.writeline(line) - - printer.writeline("# ### end Alembic commands ###") - - return buf.getvalue() - - -def render_op(autogen_context, op): - renderer = renderers.dispatch(op) - lines = util.to_list(renderer(autogen_context, op)) - return lines - - -def render_op_text(autogen_context, op): - return "\n".join(render_op(autogen_context, op)) - - -@renderers.dispatch_for(ops.ModifyTableOps) -def _render_modify_table(autogen_context, op): - opts = autogen_context.opts - render_as_batch = opts.get("render_as_batch", False) - - if op.ops: - lines = [] - if render_as_batch: - with autogen_context._within_batch(): - lines.append( - "with op.batch_alter_table(%r, schema=%r) as batch_op:" - % (op.table_name, op.schema) - ) - for t_op in op.ops: - t_lines = render_op(autogen_context, t_op) - lines.extend(t_lines) - lines.append("") - else: - for t_op in op.ops: - t_lines = render_op(autogen_context, t_op) - lines.extend(t_lines) - - return lines - else: - return ["pass"] - - -@renderers.dispatch_for(ops.CreateTableCommentOp) -def _render_create_table_comment(autogen_context, op): - - templ = ( - "{prefix}create_table_comment(\n" - "{indent}'{tname}',\n" - "{indent}{comment},\n" - "{indent}existing_comment={existing},\n" - "{indent}schema={schema}\n" - ")" - ) - return templ.format( - prefix=_alembic_autogenerate_prefix(autogen_context), - tname=op.table_name, - comment="'%s'" % op.comment if op.comment is not None else None, - existing="'%s'" % op.existing_comment - if op.existing_comment is not None - else None, - schema="'%s'" % op.schema if op.schema is not None else None, - indent=" ", - ) - - -@renderers.dispatch_for(ops.DropTableCommentOp) -def _render_drop_table_comment(autogen_context, op): - - templ = ( - "{prefix}drop_table_comment(\n" - "{indent}'{tname}',\n" - "{indent}existing_comment={existing},\n" - "{indent}schema={schema}\n" - ")" - ) - return templ.format( - prefix=_alembic_autogenerate_prefix(autogen_context), - tname=op.table_name, - existing="'%s'" % op.existing_comment - if op.existing_comment is not None - else None, - schema="'%s'" % op.schema if op.schema is not None else None, - indent=" ", - ) - - -@renderers.dispatch_for(ops.CreateTableOp) -def _add_table(autogen_context, op): - table = op.to_table() - - args = [ - col - for col in [ - _render_column(col, autogen_context) for col in table.columns - ] - if col - ] + sorted( - [ - rcons - for rcons in [ - _render_constraint(cons, autogen_context) - for cons in table.constraints - ] - if rcons is not None - ] - ) - - if len(args) > MAX_PYTHON_ARGS: - args = "*[" + ",\n".join(args) + "]" - else: - args = ",\n".join(args) - - text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % { - "tablename": _ident(op.table_name), - "prefix": _alembic_autogenerate_prefix(autogen_context), - "args": args, - } - if op.schema: - text += ",\nschema=%r" % _ident(op.schema) - - comment = sqla_compat._comment_attribute(table) - if comment: - text += ",\ncomment=%r" % _ident(comment) - for k in sorted(op.kw): - text += ",\n%s=%r" % (k.replace(" ", "_"), op.kw[k]) - text += "\n)" - return text - - -@renderers.dispatch_for(ops.DropTableOp) -def _drop_table(autogen_context, op): - text = "%(prefix)sdrop_table(%(tname)r" % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "tname": _ident(op.table_name), - } - if op.schema: - text += ", schema=%r" % _ident(op.schema) - text += ")" - return text - - -@renderers.dispatch_for(ops.CreateIndexOp) -def _add_index(autogen_context, op): - index = op.to_index() - - has_batch = autogen_context._has_batch - - if has_batch: - tmpl = ( - "%(prefix)screate_index(%(name)r, [%(columns)s], " - "unique=%(unique)r%(kwargs)s)" - ) - else: - tmpl = ( - "%(prefix)screate_index(%(name)r, %(table)r, [%(columns)s], " - "unique=%(unique)r%(schema)s%(kwargs)s)" - ) - - text = tmpl % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "name": _render_gen_name(autogen_context, index.name), - "table": _ident(index.table.name), - "columns": ", ".join( - _get_index_rendered_expressions(index, autogen_context) - ), - "unique": index.unique or False, - "schema": (", schema=%r" % _ident(index.table.schema)) - if index.table.schema - else "", - "kwargs": ( - ", " - + ", ".join( - [ - "%s=%s" - % (key, _render_potential_expr(val, autogen_context)) - for key, val in index.kwargs.items() - ] - ) - ) - if len(index.kwargs) - else "", - } - return text - - -@renderers.dispatch_for(ops.DropIndexOp) -def _drop_index(autogen_context, op): - has_batch = autogen_context._has_batch - - if has_batch: - tmpl = "%(prefix)sdrop_index(%(name)r)" - else: - tmpl = ( - "%(prefix)sdrop_index(%(name)r, " - "table_name=%(table_name)r%(schema)s)" - ) - - text = tmpl % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "name": _render_gen_name(autogen_context, op.index_name), - "table_name": _ident(op.table_name), - "schema": ((", schema=%r" % _ident(op.schema)) if op.schema else ""), - } - return text - - -@renderers.dispatch_for(ops.CreateUniqueConstraintOp) -def _add_unique_constraint(autogen_context, op): - return [_uq_constraint(op.to_constraint(), autogen_context, True)] - - -@renderers.dispatch_for(ops.CreateForeignKeyOp) -def _add_fk_constraint(autogen_context, op): - - args = [repr(_render_gen_name(autogen_context, op.constraint_name))] - if not autogen_context._has_batch: - args.append(repr(_ident(op.source_table))) - - args.extend( - [ - repr(_ident(op.referent_table)), - repr([_ident(col) for col in op.local_cols]), - repr([_ident(col) for col in op.remote_cols]), - ] - ) - - kwargs = [ - "referent_schema", - "onupdate", - "ondelete", - "initially", - "deferrable", - "use_alter", - ] - if not autogen_context._has_batch: - kwargs.insert(0, "source_schema") - - for k in kwargs: - if k in op.kw: - value = op.kw[k] - if value is not None: - args.append("%s=%r" % (k, value)) - - return "%(prefix)screate_foreign_key(%(args)s)" % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - - -@renderers.dispatch_for(ops.CreatePrimaryKeyOp) -def _add_pk_constraint(constraint, autogen_context): - raise NotImplementedError() - - -@renderers.dispatch_for(ops.CreateCheckConstraintOp) -def _add_check_constraint(constraint, autogen_context): - raise NotImplementedError() - - -@renderers.dispatch_for(ops.DropConstraintOp) -def _drop_constraint(autogen_context, op): - - if autogen_context._has_batch: - template = "%(prefix)sdrop_constraint" "(%(name)r, type_=%(type)r)" - else: - template = ( - "%(prefix)sdrop_constraint" - "(%(name)r, '%(table_name)s'%(schema)s, type_=%(type)r)" - ) - - text = template % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "name": _render_gen_name(autogen_context, op.constraint_name), - "table_name": _ident(op.table_name), - "type": op.constraint_type, - "schema": (", schema=%r" % _ident(op.schema)) if op.schema else "", - } - return text - - -@renderers.dispatch_for(ops.AddColumnOp) -def _add_column(autogen_context, op): - - schema, tname, column = op.schema, op.table_name, op.column - if autogen_context._has_batch: - template = "%(prefix)sadd_column(%(column)s)" - else: - template = "%(prefix)sadd_column(%(tname)r, %(column)s" - if schema: - template += ", schema=%(schema)r" - template += ")" - text = template % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "tname": tname, - "column": _render_column(column, autogen_context), - "schema": schema, - } - return text - - -@renderers.dispatch_for(ops.DropColumnOp) -def _drop_column(autogen_context, op): - - schema, tname, column_name = op.schema, op.table_name, op.column_name - - if autogen_context._has_batch: - template = "%(prefix)sdrop_column(%(cname)r)" - else: - template = "%(prefix)sdrop_column(%(tname)r, %(cname)r" - if schema: - template += ", schema=%(schema)r" - template += ")" - - text = template % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "tname": _ident(tname), - "cname": _ident(column_name), - "schema": _ident(schema), - } - return text - - -@renderers.dispatch_for(ops.AlterColumnOp) -def _alter_column(autogen_context, op): - - tname = op.table_name - cname = op.column_name - server_default = op.modify_server_default - type_ = op.modify_type - nullable = op.modify_nullable - comment = op.modify_comment - autoincrement = op.kw.get("autoincrement", None) - existing_type = op.existing_type - existing_nullable = op.existing_nullable - existing_comment = op.existing_comment - existing_server_default = op.existing_server_default - schema = op.schema - - indent = " " * 11 - - if autogen_context._has_batch: - template = "%(prefix)salter_column(%(cname)r" - else: - template = "%(prefix)salter_column(%(tname)r, %(cname)r" - - text = template % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "tname": tname, - "cname": cname, - } - if existing_type is not None: - text += ",\n%sexisting_type=%s" % ( - indent, - _repr_type(existing_type, autogen_context), - ) - if server_default is not False: - rendered = _render_server_default(server_default, autogen_context) - text += ",\n%sserver_default=%s" % (indent, rendered) - - if type_ is not None: - text += ",\n%stype_=%s" % (indent, _repr_type(type_, autogen_context)) - if nullable is not None: - text += ",\n%snullable=%r" % (indent, nullable) - if comment is not False: - text += ",\n%scomment=%r" % (indent, comment) - if existing_comment is not None: - text += ",\n%sexisting_comment=%r" % (indent, existing_comment) - if nullable is None and existing_nullable is not None: - text += ",\n%sexisting_nullable=%r" % (indent, existing_nullable) - if autoincrement is not None: - text += ",\n%sautoincrement=%r" % (indent, autoincrement) - if server_default is False and existing_server_default: - rendered = _render_server_default( - existing_server_default, autogen_context - ) - text += ",\n%sexisting_server_default=%s" % (indent, rendered) - if schema and not autogen_context._has_batch: - text += ",\n%sschema=%r" % (indent, schema) - text += ")" - return text - - -class _f_name(object): - def __init__(self, prefix, name): - self.prefix = prefix - self.name = name - - def __repr__(self): - return "%sf(%r)" % (self.prefix, _ident(self.name)) - - -def _ident(name): - """produce a __repr__() object for a string identifier that may - use quoted_name() in SQLAlchemy 0.9 and greater. - - The issue worked around here is that quoted_name() doesn't have - very good repr() behavior by itself when unicode is involved. - - """ - if name is None: - return name - elif util.sqla_09 and isinstance(name, sql.elements.quoted_name): - if compat.py2k: - # the attempt to encode to ascii here isn't super ideal, - # however we are trying to cut down on an explosion of - # u'' literals only when py2k + SQLA 0.9, in particular - # makes unit tests testing code generation very difficult - try: - return name.encode("ascii") - except UnicodeError: - return compat.text_type(name) - else: - return compat.text_type(name) - elif isinstance(name, compat.string_types): - return name - - -def _render_potential_expr(value, autogen_context, wrap_in_text=True): - if isinstance(value, sql.ClauseElement): - compile_kw = dict( - compile_kwargs={"literal_binds": True, "include_table": False} - ) - - if wrap_in_text: - template = "%(prefix)stext(%(sql)r)" - else: - template = "%(sql)r" - - return template % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "sql": compat.text_type( - value.compile(dialect=autogen_context.dialect, **compile_kw) - ), - } - - else: - return repr(value) - - -def _get_index_rendered_expressions(idx, autogen_context): - return [ - repr(_ident(getattr(exp, "name", None))) - if isinstance(exp, sa_schema.Column) - else _render_potential_expr(exp, autogen_context) - for exp in idx.expressions - ] - - -def _uq_constraint(constraint, autogen_context, alter): - opts = [] - - has_batch = autogen_context._has_batch - - if constraint.deferrable: - opts.append(("deferrable", str(constraint.deferrable))) - if constraint.initially: - opts.append(("initially", str(constraint.initially))) - if not has_batch and alter and constraint.table.schema: - opts.append(("schema", _ident(constraint.table.schema))) - if not alter and constraint.name: - opts.append( - ("name", _render_gen_name(autogen_context, constraint.name)) - ) - - if alter: - args = [repr(_render_gen_name(autogen_context, constraint.name))] - if not has_batch: - args += [repr(_ident(constraint.table.name))] - args.append(repr([_ident(col.name) for col in constraint.columns])) - args.extend(["%s=%r" % (k, v) for k, v in opts]) - return "%(prefix)screate_unique_constraint(%(args)s)" % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - else: - args = [repr(_ident(col.name)) for col in constraint.columns] - args.extend(["%s=%r" % (k, v) for k, v in opts]) - return "%(prefix)sUniqueConstraint(%(args)s)" % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - - -def _user_autogenerate_prefix(autogen_context, target): - prefix = autogen_context.opts["user_module_prefix"] - if prefix is None: - return "%s." % target.__module__ - else: - return prefix - - -def _sqlalchemy_autogenerate_prefix(autogen_context): - return autogen_context.opts["sqlalchemy_module_prefix"] or "" - - -def _alembic_autogenerate_prefix(autogen_context): - if autogen_context._has_batch: - return "batch_op." - else: - return autogen_context.opts["alembic_module_prefix"] or "" - - -def _user_defined_render(type_, object_, autogen_context): - if "render_item" in autogen_context.opts: - render = autogen_context.opts["render_item"] - if render: - rendered = render(type_, object_, autogen_context) - if rendered is not False: - return rendered - return False - - -def _render_column(column, autogen_context): - rendered = _user_defined_render("column", column, autogen_context) - if rendered is not False: - return rendered - - opts = [] - if column.server_default: - rendered = _render_server_default( - column.server_default, autogen_context - ) - if rendered: - opts.append(("server_default", rendered)) - - if ( - column.autoincrement is not None - and column.autoincrement != sqla_compat.AUTOINCREMENT_DEFAULT - ): - opts.append(("autoincrement", column.autoincrement)) - - if column.nullable is not None: - opts.append(("nullable", column.nullable)) - - if column.system: - opts.append(("system", column.system)) - - comment = sqla_compat._comment_attribute(column) - if comment: - opts.append(("comment", "%r" % comment)) - - # TODO: for non-ascii colname, assign a "key" - return "%(prefix)sColumn(%(name)r, %(type)s, %(kw)s)" % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "name": _ident(column.name), - "type": _repr_type(column.type, autogen_context), - "kw": ", ".join(["%s=%s" % (kwname, val) for kwname, val in opts]), - } - - -def _render_server_default(default, autogen_context, repr_=True): - rendered = _user_defined_render("server_default", default, autogen_context) - if rendered is not False: - return rendered - - if isinstance(default, sa_schema.DefaultClause): - if isinstance(default.arg, compat.string_types): - default = default.arg - else: - return _render_potential_expr(default.arg, autogen_context) - - if isinstance(default, string_types) and repr_: - default = repr(re.sub(r"^'|'$", "", default)) - - return default - - -def _repr_type(type_, autogen_context): - rendered = _user_defined_render("type", type_, autogen_context) - if rendered is not False: - return rendered - - if hasattr(autogen_context.migration_context, "impl"): - impl_rt = autogen_context.migration_context.impl.render_type( - type_, autogen_context - ) - else: - impl_rt = None - - mod = type(type_).__module__ - imports = autogen_context.imports - if mod.startswith("sqlalchemy.dialects"): - dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1) - if imports is not None: - imports.add("from sqlalchemy.dialects import %s" % dname) - if impl_rt: - return impl_rt - else: - return "%s.%r" % (dname, type_) - elif impl_rt: - return impl_rt - elif mod.startswith("sqlalchemy."): - if "_render_%s_type" % type_.__visit_name__ in globals(): - fn = globals()["_render_%s_type" % type_.__visit_name__] - return fn(type_, autogen_context) - else: - prefix = _sqlalchemy_autogenerate_prefix(autogen_context) - return "%s%r" % (prefix, type_) - else: - prefix = _user_autogenerate_prefix(autogen_context, type_) - return "%s%r" % (prefix, type_) - - -def _render_ARRAY_type(type_, autogen_context): - return _render_type_w_subtype( - type_, autogen_context, "item_type", r"(.+?\()" - ) - - -def _render_type_w_subtype( - type_, autogen_context, attrname, regexp, prefix=None -): - outer_repr = repr(type_) - inner_type = getattr(type_, attrname, None) - if inner_type is None: - return False - - inner_repr = repr(inner_type) - - inner_repr = re.sub(r"([\(\)])", r"\\\1", inner_repr) - sub_type = _repr_type(getattr(type_, attrname), autogen_context) - outer_type = re.sub(regexp + inner_repr, r"\1%s" % sub_type, outer_repr) - - if prefix: - return "%s%s" % (prefix, outer_type) - - mod = type(type_).__module__ - if mod.startswith("sqlalchemy.dialects"): - dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1) - return "%s.%s" % (dname, outer_type) - elif mod.startswith("sqlalchemy"): - prefix = _sqlalchemy_autogenerate_prefix(autogen_context) - return "%s%s" % (prefix, outer_type) - else: - return None - - -_constraint_renderers = util.Dispatcher() - - -def _render_constraint(constraint, autogen_context): - try: - renderer = _constraint_renderers.dispatch(constraint) - except ValueError: - util.warn("No renderer is established for object %r" % constraint) - return "[Unknown Python object %r]" % constraint - else: - return renderer(constraint, autogen_context) - - -@_constraint_renderers.dispatch_for(sa_schema.PrimaryKeyConstraint) -def _render_primary_key(constraint, autogen_context): - rendered = _user_defined_render("primary_key", constraint, autogen_context) - if rendered is not False: - return rendered - - if not constraint.columns: - return None - - opts = [] - if constraint.name: - opts.append( - ("name", repr(_render_gen_name(autogen_context, constraint.name))) - ) - return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "args": ", ".join( - [repr(c.name) for c in constraint.columns] - + ["%s=%s" % (kwname, val) for kwname, val in opts] - ), - } - - -def _fk_colspec(fk, metadata_schema): - """Implement a 'safe' version of ForeignKey._get_colspec() that - won't fail if the remote table can't be resolved. - - """ - colspec = fk._get_colspec() - tokens = colspec.split(".") - tname, colname = tokens[-2:] - - if metadata_schema is not None and len(tokens) == 2: - table_fullname = "%s.%s" % (metadata_schema, tname) - else: - table_fullname = ".".join(tokens[0:-1]) - - if ( - not fk.link_to_name - and fk.parent is not None - and fk.parent.table is not None - ): - # try to resolve the remote table in order to adjust for column.key. - # the FK constraint needs to be rendered in terms of the column - # name. - parent_metadata = fk.parent.table.metadata - if table_fullname in parent_metadata.tables: - col = parent_metadata.tables[table_fullname].c.get(colname) - if col is not None: - colname = _ident(col.name) - - colspec = "%s.%s" % (table_fullname, colname) - - return colspec - - -def _populate_render_fk_opts(constraint, opts): - - if constraint.onupdate: - opts.append(("onupdate", repr(constraint.onupdate))) - if constraint.ondelete: - opts.append(("ondelete", repr(constraint.ondelete))) - if constraint.initially: - opts.append(("initially", repr(constraint.initially))) - if constraint.deferrable: - opts.append(("deferrable", repr(constraint.deferrable))) - if constraint.use_alter: - opts.append(("use_alter", repr(constraint.use_alter))) - - -@_constraint_renderers.dispatch_for(sa_schema.ForeignKeyConstraint) -def _render_foreign_key(constraint, autogen_context): - rendered = _user_defined_render("foreign_key", constraint, autogen_context) - if rendered is not False: - return rendered - - opts = [] - if constraint.name: - opts.append( - ("name", repr(_render_gen_name(autogen_context, constraint.name))) - ) - - _populate_render_fk_opts(constraint, opts) - - apply_metadata_schema = constraint.parent.metadata.schema - return ( - "%(prefix)sForeignKeyConstraint([%(cols)s], " - "[%(refcols)s], %(args)s)" - % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "cols": ", ".join( - "%r" % _ident(f.parent.name) for f in constraint.elements - ), - "refcols": ", ".join( - repr(_fk_colspec(f, apply_metadata_schema)) - for f in constraint.elements - ), - "args": ", ".join( - ["%s=%s" % (kwname, val) for kwname, val in opts] - ), - } - ) - - -@_constraint_renderers.dispatch_for(sa_schema.UniqueConstraint) -def _render_unique_constraint(constraint, autogen_context): - rendered = _user_defined_render("unique", constraint, autogen_context) - if rendered is not False: - return rendered - - return _uq_constraint(constraint, autogen_context, False) - - -@_constraint_renderers.dispatch_for(sa_schema.CheckConstraint) -def _render_check_constraint(constraint, autogen_context): - rendered = _user_defined_render("check", constraint, autogen_context) - if rendered is not False: - return rendered - - # detect the constraint being part of - # a parent type which is probably in the Table already. - # ideally SQLAlchemy would give us more of a first class - # way to detect this. - if ( - constraint._create_rule - and hasattr(constraint._create_rule, "target") - and isinstance(constraint._create_rule.target, sqltypes.TypeEngine) - ): - return None - opts = [] - if constraint.name: - opts.append( - ("name", repr(_render_gen_name(autogen_context, constraint.name))) - ) - return "%(prefix)sCheckConstraint(%(sqltext)s%(opts)s)" % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "opts": ", " + (", ".join("%s=%s" % (k, v) for k, v in opts)) - if opts - else "", - "sqltext": _render_potential_expr( - constraint.sqltext, autogen_context, wrap_in_text=False - ), - } - - -@renderers.dispatch_for(ops.ExecuteSQLOp) -def _execute_sql(autogen_context, op): - if not isinstance(op.sqltext, string_types): - raise NotImplementedError( - "Autogenerate rendering of SQL Expression language constructs " - "not supported here; please use a plain SQL string" - ) - return "op.execute(%r)" % op.sqltext - - -renderers = default_renderers.branch() diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/rewriter.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/rewriter.py deleted file mode 100644 index 1e9522b..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/autogenerate/rewriter.py +++ /dev/null @@ -1,154 +0,0 @@ -from alembic import util -from alembic.operations import ops - - -class Rewriter(object): - """A helper object that allows easy 'rewriting' of ops streams. - - The :class:`.Rewriter` object is intended to be passed along - to the - :paramref:`.EnvironmentContext.configure.process_revision_directives` - parameter in an ``env.py`` script. Once constructed, any number - of "rewrites" functions can be associated with it, which will be given - the opportunity to modify the structure without having to have explicit - knowledge of the overall structure. - - The function is passed the :class:`.MigrationContext` object and - ``revision`` tuple that are passed to the :paramref:`.Environment - Context.configure.process_revision_directives` function normally, - and the third argument is an individual directive of the type - noted in the decorator. The function has the choice of returning - a single op directive, which normally can be the directive that - was actually passed, or a new directive to replace it, or a list - of zero or more directives to replace it. - - .. seealso:: - - :ref:`autogen_rewriter` - usage example - - .. versionadded:: 0.8 - - """ - - _traverse = util.Dispatcher() - - _chained = None - - def __init__(self): - self.dispatch = util.Dispatcher() - - def chain(self, other): - """Produce a "chain" of this :class:`.Rewriter` to another. - - This allows two rewriters to operate serially on a stream, - e.g.:: - - writer1 = autogenerate.Rewriter() - writer2 = autogenerate.Rewriter() - - @writer1.rewrites(ops.AddColumnOp) - def add_column_nullable(context, revision, op): - op.column.nullable = True - return op - - @writer2.rewrites(ops.AddColumnOp) - def add_column_idx(context, revision, op): - idx_op = ops.CreateIndexOp( - 'ixc', op.table_name, [op.column.name]) - return [ - op, - idx_op - ] - - writer = writer1.chain(writer2) - - :param other: a :class:`.Rewriter` instance - :return: a new :class:`.Rewriter` that will run the operations - of this writer, then the "other" writer, in succession. - - """ - wr = self.__class__.__new__(self.__class__) - wr.__dict__.update(self.__dict__) - wr._chained = other - return wr - - def rewrites(self, operator): - """Register a function as rewriter for a given type. - - The function should receive three arguments, which are - the :class:`.MigrationContext`, a ``revision`` tuple, and - an op directive of the type indicated. E.g.:: - - @writer1.rewrites(ops.AddColumnOp) - def add_column_nullable(context, revision, op): - op.column.nullable = True - return op - - """ - return self.dispatch.dispatch_for(operator) - - def _rewrite(self, context, revision, directive): - try: - _rewriter = self.dispatch.dispatch(directive) - except ValueError: - _rewriter = None - yield directive - else: - for r_directive in util.to_list( - _rewriter(context, revision, directive) - ): - yield r_directive - - def __call__(self, context, revision, directives): - self.process_revision_directives(context, revision, directives) - if self._chained: - self._chained(context, revision, directives) - - @_traverse.dispatch_for(ops.MigrationScript) - def _traverse_script(self, context, revision, directive): - upgrade_ops_list = [] - for upgrade_ops in directive.upgrade_ops_list: - ret = self._traverse_for(context, revision, directive.upgrade_ops) - if len(ret) != 1: - raise ValueError( - "Can only return single object for UpgradeOps traverse" - ) - upgrade_ops_list.append(ret[0]) - directive.upgrade_ops = upgrade_ops_list - - downgrade_ops_list = [] - for downgrade_ops in directive.downgrade_ops_list: - ret = self._traverse_for( - context, revision, directive.downgrade_ops - ) - if len(ret) != 1: - raise ValueError( - "Can only return single object for DowngradeOps traverse" - ) - downgrade_ops_list.append(ret[0]) - directive.downgrade_ops = downgrade_ops_list - - @_traverse.dispatch_for(ops.OpContainer) - def _traverse_op_container(self, context, revision, directive): - self._traverse_list(context, revision, directive.ops) - - @_traverse.dispatch_for(ops.MigrateOperation) - def _traverse_any_directive(self, context, revision, directive): - pass - - def _traverse_for(self, context, revision, directive): - directives = list(self._rewrite(context, revision, directive)) - for directive in directives: - traverser = self._traverse.dispatch(directive) - traverser(self, context, revision, directive) - return directives - - def _traverse_list(self, context, revision, directives): - dest = [] - for directive in directives: - dest.extend(self._traverse_for(context, revision, directive)) - - directives[:] = dest - - def process_revision_directives(self, context, revision, directives): - self._traverse_list(context, revision, directives) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/command.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/command.py deleted file mode 100644 index 5416c42..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/command.py +++ /dev/null @@ -1,564 +0,0 @@ -import os - -from . import autogenerate as autogen -from . import util -from .runtime.environment import EnvironmentContext -from .script import ScriptDirectory - - -def list_templates(config): - """List available templates - - :param config: a :class:`.Config` object. - - """ - - config.print_stdout("Available templates:\n") - for tempname in os.listdir(config.get_template_directory()): - with open( - os.path.join(config.get_template_directory(), tempname, "README") - ) as readme: - synopsis = next(readme) - config.print_stdout("%s - %s", tempname, synopsis) - - config.print_stdout("\nTemplates are used via the 'init' command, e.g.:") - config.print_stdout("\n alembic init --template generic ./scripts") - - -def init(config, directory, template="generic"): - """Initialize a new scripts directory. - - :param config: a :class:`.Config` object. - - :param directory: string path of the target directory - - :param template: string name of the migration environment template to - use. - - """ - - if os.access(directory, os.F_OK): - raise util.CommandError("Directory %s already exists" % directory) - - template_dir = os.path.join(config.get_template_directory(), template) - if not os.access(template_dir, os.F_OK): - raise util.CommandError("No such template %r" % template) - - util.status( - "Creating directory %s" % os.path.abspath(directory), - os.makedirs, - directory, - ) - - versions = os.path.join(directory, "versions") - util.status( - "Creating directory %s" % os.path.abspath(versions), - os.makedirs, - versions, - ) - - script = ScriptDirectory(directory) - - for file_ in os.listdir(template_dir): - file_path = os.path.join(template_dir, file_) - if file_ == "alembic.ini.mako": - config_file = os.path.abspath(config.config_file_name) - if os.access(config_file, os.F_OK): - util.msg("File %s already exists, skipping" % config_file) - else: - script._generate_template( - file_path, config_file, script_location=directory - ) - elif os.path.isfile(file_path): - output_file = os.path.join(directory, file_) - script._copy_file(file_path, output_file) - - util.msg( - "Please edit configuration/connection/logging " - "settings in %r before proceeding." % config_file - ) - - -def revision( - config, - message=None, - autogenerate=False, - sql=False, - head="head", - splice=False, - branch_label=None, - version_path=None, - rev_id=None, - depends_on=None, - process_revision_directives=None, -): - """Create a new revision file. - - :param config: a :class:`.Config` object. - - :param message: string message to apply to the revision; this is the - ``-m`` option to ``alembic revision``. - - :param autogenerate: whether or not to autogenerate the script from - the database; this is the ``--autogenerate`` option to - ``alembic revision``. - - :param sql: whether to dump the script out as a SQL string; when specified, - the script is dumped to stdout. This is the ``--sql`` option to - ``alembic revision``. - - :param head: head revision to build the new revision upon as a parent; - this is the ``--head`` option to ``alembic revision``. - - :param splice: whether or not the new revision should be made into a - new head of its own; is required when the given ``head`` is not itself - a head. This is the ``--splice`` option to ``alembic revision``. - - :param branch_label: string label to apply to the branch; this is the - ``--branch-label`` option to ``alembic revision``. - - :param version_path: string symbol identifying a specific version path - from the configuration; this is the ``--version-path`` option to - ``alembic revision``. - - :param rev_id: optional revision identifier to use instead of having - one generated; this is the ``--rev-id`` option to ``alembic revision``. - - :param depends_on: optional list of "depends on" identifiers; this is the - ``--depends-on`` option to ``alembic revision``. - - :param process_revision_directives: this is a callable that takes the - same form as the callable described at - :paramref:`.EnvironmentContext.configure.process_revision_directives`; - will be applied to the structure generated by the revision process - where it can be altered programmatically. Note that unlike all - the other parameters, this option is only available via programmatic - use of :func:`.command.revision` - - .. versionadded:: 0.9.0 - - """ - - script_directory = ScriptDirectory.from_config(config) - - command_args = dict( - message=message, - autogenerate=autogenerate, - sql=sql, - head=head, - splice=splice, - branch_label=branch_label, - version_path=version_path, - rev_id=rev_id, - depends_on=depends_on, - ) - revision_context = autogen.RevisionContext( - config, - script_directory, - command_args, - process_revision_directives=process_revision_directives, - ) - - environment = util.asbool(config.get_main_option("revision_environment")) - - if autogenerate: - environment = True - - if sql: - raise util.CommandError( - "Using --sql with --autogenerate does not make any sense" - ) - - def retrieve_migrations(rev, context): - revision_context.run_autogenerate(rev, context) - return [] - - elif environment: - - def retrieve_migrations(rev, context): - revision_context.run_no_autogenerate(rev, context) - return [] - - elif sql: - raise util.CommandError( - "Using --sql with the revision command when " - "revision_environment is not configured does not make any sense" - ) - - if environment: - with EnvironmentContext( - config, - script_directory, - fn=retrieve_migrations, - as_sql=sql, - template_args=revision_context.template_args, - revision_context=revision_context, - ): - script_directory.run_env() - - scripts = [script for script in revision_context.generate_scripts()] - if len(scripts) == 1: - return scripts[0] - else: - return scripts - - -def merge(config, revisions, message=None, branch_label=None, rev_id=None): - """Merge two revisions together. Creates a new migration file. - - .. versionadded:: 0.7.0 - - :param config: a :class:`.Config` instance - - :param message: string message to apply to the revision - - :param branch_label: string label name to apply to the new revision - - :param rev_id: hardcoded revision identifier instead of generating a new - one. - - .. seealso:: - - :ref:`branches` - - """ - - script = ScriptDirectory.from_config(config) - template_args = { - "config": config # Let templates use config for - # e.g. multiple databases - } - return script.generate_revision( - rev_id or util.rev_id(), - message, - refresh=True, - head=revisions, - branch_labels=branch_label, - **template_args - ) - - -def upgrade(config, revision, sql=False, tag=None): - """Upgrade to a later version. - - :param config: a :class:`.Config` instance. - - :param revision: string revision target or range for --sql mode - - :param sql: if True, use ``--sql`` mode - - :param tag: an arbitrary "tag" that can be intercepted by custom - ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` - method. - - """ - - script = ScriptDirectory.from_config(config) - - starting_rev = None - if ":" in revision: - if not sql: - raise util.CommandError("Range revision not allowed") - starting_rev, revision = revision.split(":", 2) - - def upgrade(rev, context): - return script._upgrade_revs(revision, rev) - - with EnvironmentContext( - config, - script, - fn=upgrade, - as_sql=sql, - starting_rev=starting_rev, - destination_rev=revision, - tag=tag, - ): - script.run_env() - - -def downgrade(config, revision, sql=False, tag=None): - """Revert to a previous version. - - :param config: a :class:`.Config` instance. - - :param revision: string revision target or range for --sql mode - - :param sql: if True, use ``--sql`` mode - - :param tag: an arbitrary "tag" that can be intercepted by custom - ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` - method. - - """ - - script = ScriptDirectory.from_config(config) - starting_rev = None - if ":" in revision: - if not sql: - raise util.CommandError("Range revision not allowed") - starting_rev, revision = revision.split(":", 2) - elif sql: - raise util.CommandError( - "downgrade with --sql requires :" - ) - - def downgrade(rev, context): - return script._downgrade_revs(revision, rev) - - with EnvironmentContext( - config, - script, - fn=downgrade, - as_sql=sql, - starting_rev=starting_rev, - destination_rev=revision, - tag=tag, - ): - script.run_env() - - -def show(config, rev): - """Show the revision(s) denoted by the given symbol. - - :param config: a :class:`.Config` instance. - - :param revision: string revision target - - """ - - script = ScriptDirectory.from_config(config) - - if rev == "current": - - def show_current(rev, context): - for sc in script.get_revisions(rev): - config.print_stdout(sc.log_entry) - return [] - - with EnvironmentContext(config, script, fn=show_current): - script.run_env() - else: - for sc in script.get_revisions(rev): - config.print_stdout(sc.log_entry) - - -def history(config, rev_range=None, verbose=False, indicate_current=False): - """List changeset scripts in chronological order. - - :param config: a :class:`.Config` instance. - - :param rev_range: string revision range - - :param verbose: output in verbose mode. - - :param indicate_current: indicate current revision. - - ..versionadded:: 0.9.9 - - """ - - script = ScriptDirectory.from_config(config) - if rev_range is not None: - if ":" not in rev_range: - raise util.CommandError( - "History range requires [start]:[end], " "[start]:, or :[end]" - ) - base, head = rev_range.strip().split(":") - else: - base = head = None - - environment = ( - util.asbool(config.get_main_option("revision_environment")) - or indicate_current - ) - - def _display_history(config, script, base, head, currents=()): - for sc in script.walk_revisions( - base=base or "base", head=head or "heads" - ): - - if indicate_current: - sc._db_current_indicator = sc.revision in currents - - config.print_stdout( - sc.cmd_format( - verbose=verbose, - include_branches=True, - include_doc=True, - include_parents=True, - ) - ) - - def _display_history_w_current(config, script, base, head): - def _display_current_history(rev, context): - if head == "current": - _display_history(config, script, base, rev, rev) - elif base == "current": - _display_history(config, script, rev, head, rev) - else: - _display_history(config, script, base, head, rev) - return [] - - with EnvironmentContext(config, script, fn=_display_current_history): - script.run_env() - - if base == "current" or head == "current" or environment: - _display_history_w_current(config, script, base, head) - else: - _display_history(config, script, base, head) - - -def heads(config, verbose=False, resolve_dependencies=False): - """Show current available heads in the script directory - - :param config: a :class:`.Config` instance. - - :param verbose: output in verbose mode. - - :param resolve_dependencies: treat dependency version as down revisions. - - """ - - script = ScriptDirectory.from_config(config) - if resolve_dependencies: - heads = script.get_revisions("heads") - else: - heads = script.get_revisions(script.get_heads()) - - for rev in heads: - config.print_stdout( - rev.cmd_format( - verbose, include_branches=True, tree_indicators=False - ) - ) - - -def branches(config, verbose=False): - """Show current branch points. - - :param config: a :class:`.Config` instance. - - :param verbose: output in verbose mode. - - """ - script = ScriptDirectory.from_config(config) - for sc in script.walk_revisions(): - if sc.is_branch_point: - config.print_stdout( - "%s\n%s\n", - sc.cmd_format(verbose, include_branches=True), - "\n".join( - "%s -> %s" - % ( - " " * len(str(sc.revision)), - rev_obj.cmd_format( - False, include_branches=True, include_doc=verbose - ), - ) - for rev_obj in ( - script.get_revision(rev) for rev in sc.nextrev - ) - ), - ) - - -def current(config, verbose=False, head_only=False): - """Display the current revision for a database. - - :param config: a :class:`.Config` instance. - - :param verbose: output in verbose mode. - - :param head_only: deprecated; use ``verbose`` for additional output. - - """ - - script = ScriptDirectory.from_config(config) - - if head_only: - util.warn("--head-only is deprecated") - - def display_version(rev, context): - if verbose: - config.print_stdout( - "Current revision(s) for %s:", - util.obfuscate_url_pw(context.connection.engine.url), - ) - for rev in script.get_all_current(rev): - config.print_stdout(rev.cmd_format(verbose)) - - return [] - - with EnvironmentContext(config, script, fn=display_version): - script.run_env() - - -def stamp(config, revision, sql=False, tag=None): - """'stamp' the revision table with the given revision; don't - run any migrations. - - :param config: a :class:`.Config` instance. - - :param revision: target revision. - - :param sql: use ``--sql`` mode - - :param tag: an arbitrary "tag" that can be intercepted by custom - ``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument` - method. - - """ - - script = ScriptDirectory.from_config(config) - - starting_rev = None - if ":" in revision: - if not sql: - raise util.CommandError("Range revision not allowed") - starting_rev, revision = revision.split(":", 2) - - def do_stamp(rev, context): - return script._stamp_revs(revision, rev) - - with EnvironmentContext( - config, - script, - fn=do_stamp, - as_sql=sql, - destination_rev=revision, - starting_rev=starting_rev, - tag=tag, - ): - script.run_env() - - -def edit(config, rev): - """Edit revision script(s) using $EDITOR. - - :param config: a :class:`.Config` instance. - - :param rev: target revision. - - """ - - script = ScriptDirectory.from_config(config) - - if rev == "current": - - def edit_current(rev, context): - if not rev: - raise util.CommandError("No current revisions") - for sc in script.get_revisions(rev): - util.edit(sc.path) - return [] - - with EnvironmentContext(config, script, fn=edit_current): - script.run_env() - else: - revs = script.get_revisions(rev) - if not revs: - raise util.CommandError( - "No revision files indicated by symbol '%s'" % rev - ) - for sc in revs: - util.edit(sc.path) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/config.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/config.py deleted file mode 100644 index f06a25c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/config.py +++ /dev/null @@ -1,531 +0,0 @@ -from argparse import ArgumentParser -import inspect -import os -import sys - -from . import command -from . import package_dir -from . import util -from .util import compat -from .util.compat import SafeConfigParser - - -class Config(object): - - """Represent an Alembic configuration. - - Within an ``env.py`` script, this is available - via the :attr:`.EnvironmentContext.config` attribute, - which in turn is available at ``alembic.context``:: - - from alembic import context - - some_param = context.config.get_main_option("my option") - - When invoking Alembic programatically, a new - :class:`.Config` can be created by passing - the name of an .ini file to the constructor:: - - from alembic.config import Config - alembic_cfg = Config("/path/to/yourapp/alembic.ini") - - With a :class:`.Config` object, you can then - run Alembic commands programmatically using the directives - in :mod:`alembic.command`. - - The :class:`.Config` object can also be constructed without - a filename. Values can be set programmatically, and - new sections will be created as needed:: - - from alembic.config import Config - alembic_cfg = Config() - alembic_cfg.set_main_option("script_location", "myapp:migrations") - alembic_cfg.set_main_option("sqlalchemy.url", "postgresql://foo/bar") - alembic_cfg.set_section_option("mysection", "foo", "bar") - - .. warning:: - - When using programmatic configuration, make sure the - ``env.py`` file in use is compatible with the target configuration; - including that the call to Python ``logging.fileConfig()`` is - omitted if the programmatic configuration doesn't actually include - logging directives. - - For passing non-string values to environments, such as connections and - engines, use the :attr:`.Config.attributes` dictionary:: - - with engine.begin() as connection: - alembic_cfg.attributes['connection'] = connection - command.upgrade(alembic_cfg, "head") - - :param file_: name of the .ini file to open. - :param ini_section: name of the main Alembic section within the - .ini file - :param output_buffer: optional file-like input buffer which - will be passed to the :class:`.MigrationContext` - used to redirect - the output of "offline generation" when using Alembic programmatically. - :param stdout: buffer where the "print" output of commands will be sent. - Defaults to ``sys.stdout``. - - .. versionadded:: 0.4 - - :param config_args: A dictionary of keys and values that will be used - for substitution in the alembic config file. The dictionary as given - is **copied** to a new one, stored locally as the attribute - ``.config_args``. When the :attr:`.Config.file_config` attribute is - first invoked, the replacement variable ``here`` will be added to this - dictionary before the dictionary is passed to ``SafeConfigParser()`` - to parse the .ini file. - - .. versionadded:: 0.7.0 - - :param attributes: optional dictionary of arbitrary Python keys/values, - which will be populated into the :attr:`.Config.attributes` dictionary. - - .. versionadded:: 0.7.5 - - .. seealso:: - - :ref:`connection_sharing` - - """ - - def __init__( - self, - file_=None, - ini_section="alembic", - output_buffer=None, - stdout=sys.stdout, - cmd_opts=None, - config_args=util.immutabledict(), - attributes=None, - ): - """Construct a new :class:`.Config` - - """ - self.config_file_name = file_ - self.config_ini_section = ini_section - self.output_buffer = output_buffer - self.stdout = stdout - self.cmd_opts = cmd_opts - self.config_args = dict(config_args) - if attributes: - self.attributes.update(attributes) - - cmd_opts = None - """The command-line options passed to the ``alembic`` script. - - Within an ``env.py`` script this can be accessed via the - :attr:`.EnvironmentContext.config` attribute. - - .. versionadded:: 0.6.0 - - .. seealso:: - - :meth:`.EnvironmentContext.get_x_argument` - - """ - - config_file_name = None - """Filesystem path to the .ini file in use.""" - - config_ini_section = None - """Name of the config file section to read basic configuration - from. Defaults to ``alembic``, that is the ``[alembic]`` section - of the .ini file. This value is modified using the ``-n/--name`` - option to the Alembic runnier. - - """ - - @util.memoized_property - def attributes(self): - """A Python dictionary for storage of additional state. - - - This is a utility dictionary which can include not just strings but - engines, connections, schema objects, or anything else. - Use this to pass objects into an env.py script, such as passing - a :class:`sqlalchemy.engine.base.Connection` when calling - commands from :mod:`alembic.command` programmatically. - - .. versionadded:: 0.7.5 - - .. seealso:: - - :ref:`connection_sharing` - - :paramref:`.Config.attributes` - - """ - return {} - - def print_stdout(self, text, *arg): - """Render a message to standard out. - - When :meth:`.Config.print_stdout` is called with additional args - those arguments will formatted against the provided text, - otherwise we simply output the provided text verbatim. - - e.g.:: - - >>> config.print_stdout('Some text %s', 'arg') - Some Text arg - - """ - - if arg: - output = compat.text_type(text) % arg - else: - output = compat.text_type(text) - - util.write_outstream(self.stdout, output, "\n") - - @util.memoized_property - def file_config(self): - """Return the underlying ``ConfigParser`` object. - - Direct access to the .ini file is available here, - though the :meth:`.Config.get_section` and - :meth:`.Config.get_main_option` - methods provide a possibly simpler interface. - - """ - - if self.config_file_name: - here = os.path.abspath(os.path.dirname(self.config_file_name)) - else: - here = "" - self.config_args["here"] = here - file_config = SafeConfigParser(self.config_args) - if self.config_file_name: - file_config.read([self.config_file_name]) - else: - file_config.add_section(self.config_ini_section) - return file_config - - def get_template_directory(self): - """Return the directory where Alembic setup templates are found. - - This method is used by the alembic ``init`` and ``list_templates`` - commands. - - """ - return os.path.join(package_dir, "templates") - - def get_section(self, name): - """Return all the configuration options from a given .ini file section - as a dictionary. - - """ - return dict(self.file_config.items(name)) - - def set_main_option(self, name, value): - """Set an option programmatically within the 'main' section. - - This overrides whatever was in the .ini file. - - :param name: name of the value - - :param value: the value. Note that this value is passed to - ``ConfigParser.set``, which supports variable interpolation using - pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of - an interpolation symbol must therefore be escaped, e.g. ``%%``. - The given value may refer to another value already in the file - using the interpolation format. - - """ - self.set_section_option(self.config_ini_section, name, value) - - def remove_main_option(self, name): - self.file_config.remove_option(self.config_ini_section, name) - - def set_section_option(self, section, name, value): - """Set an option programmatically within the given section. - - The section is created if it doesn't exist already. - The value here will override whatever was in the .ini - file. - - :param section: name of the section - - :param name: name of the value - - :param value: the value. Note that this value is passed to - ``ConfigParser.set``, which supports variable interpolation using - pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of - an interpolation symbol must therefore be escaped, e.g. ``%%``. - The given value may refer to another value already in the file - using the interpolation format. - - """ - - if not self.file_config.has_section(section): - self.file_config.add_section(section) - self.file_config.set(section, name, value) - - def get_section_option(self, section, name, default=None): - """Return an option from the given section of the .ini file. - - """ - if not self.file_config.has_section(section): - raise util.CommandError( - "No config file %r found, or file has no " - "'[%s]' section" % (self.config_file_name, section) - ) - if self.file_config.has_option(section, name): - return self.file_config.get(section, name) - else: - return default - - def get_main_option(self, name, default=None): - """Return an option from the 'main' section of the .ini file. - - This defaults to being a key from the ``[alembic]`` - section, unless the ``-n/--name`` flag were used to - indicate a different section. - - """ - return self.get_section_option(self.config_ini_section, name, default) - - -class CommandLine(object): - def __init__(self, prog=None): - self._generate_args(prog) - - def _generate_args(self, prog): - def add_options(parser, positional, kwargs): - kwargs_opts = { - "template": ( - "-t", - "--template", - dict( - default="generic", - type=str, - help="Setup template for use with 'init'", - ), - ), - "message": ( - "-m", - "--message", - dict( - type=str, help="Message string to use with 'revision'" - ), - ), - "sql": ( - "--sql", - dict( - action="store_true", - help="Don't emit SQL to database - dump to " - "standard output/file instead. See docs on " - "offline mode.", - ), - ), - "tag": ( - "--tag", - dict( - type=str, - help="Arbitrary 'tag' name - can be used by " - "custom env.py scripts.", - ), - ), - "head": ( - "--head", - dict( - type=str, - help="Specify head revision or @head " - "to base new revision on.", - ), - ), - "splice": ( - "--splice", - dict( - action="store_true", - help="Allow a non-head revision as the " - "'head' to splice onto", - ), - ), - "depends_on": ( - "--depends-on", - dict( - action="append", - help="Specify one or more revision identifiers " - "which this revision should depend on.", - ), - ), - "rev_id": ( - "--rev-id", - dict( - type=str, - help="Specify a hardcoded revision id instead of " - "generating one", - ), - ), - "version_path": ( - "--version-path", - dict( - type=str, - help="Specify specific path from config for " - "version file", - ), - ), - "branch_label": ( - "--branch-label", - dict( - type=str, - help="Specify a branch label to apply to the " - "new revision", - ), - ), - "verbose": ( - "-v", - "--verbose", - dict(action="store_true", help="Use more verbose output"), - ), - "resolve_dependencies": ( - "--resolve-dependencies", - dict( - action="store_true", - help="Treat dependency versions as down revisions", - ), - ), - "autogenerate": ( - "--autogenerate", - dict( - action="store_true", - help="Populate revision script with candidate " - "migration operations, based on comparison " - "of database to model.", - ), - ), - "head_only": ( - "--head-only", - dict( - action="store_true", - help="Deprecated. Use --verbose for " - "additional output", - ), - ), - "rev_range": ( - "-r", - "--rev-range", - dict( - action="store", - help="Specify a revision range; " - "format is [start]:[end]", - ), - ), - "indicate_current": ( - "-i", - "--indicate-current", - dict( - action="store_true", - help="Indicate the current revision", - ), - ), - } - positional_help = { - "directory": "location of scripts directory", - "revision": "revision identifier", - "revisions": "one or more revisions, or 'heads' for all heads", - } - for arg in kwargs: - if arg in kwargs_opts: - args = kwargs_opts[arg] - args, kw = args[0:-1], args[-1] - parser.add_argument(*args, **kw) - - for arg in positional: - if arg == "revisions": - subparser.add_argument( - arg, nargs="+", help=positional_help.get(arg) - ) - else: - subparser.add_argument(arg, help=positional_help.get(arg)) - - parser = ArgumentParser(prog=prog) - parser.add_argument( - "-c", - "--config", - type=str, - default="alembic.ini", - help="Alternate config file", - ) - parser.add_argument( - "-n", - "--name", - type=str, - default="alembic", - help="Name of section in .ini file to " "use for Alembic config", - ) - parser.add_argument( - "-x", - action="append", - help="Additional arguments consumed by " - "custom env.py scripts, e.g. -x " - "setting1=somesetting -x setting2=somesetting", - ) - parser.add_argument( - "--raiseerr", - action="store_true", - help="Raise a full stack trace on error", - ) - subparsers = parser.add_subparsers() - - for fn in [getattr(command, n) for n in dir(command)]: - if ( - inspect.isfunction(fn) - and fn.__name__[0] != "_" - and fn.__module__ == "alembic.command" - ): - - spec = compat.inspect_getargspec(fn) - if spec[3]: - positional = spec[0][1 : -len(spec[3])] - kwarg = spec[0][-len(spec[3]) :] - else: - positional = spec[0][1:] - kwarg = [] - - subparser = subparsers.add_parser(fn.__name__, help=fn.__doc__) - add_options(subparser, positional, kwarg) - subparser.set_defaults(cmd=(fn, positional, kwarg)) - self.parser = parser - - def run_cmd(self, config, options): - fn, positional, kwarg = options.cmd - - try: - fn( - config, - *[getattr(options, k, None) for k in positional], - **dict((k, getattr(options, k, None)) for k in kwarg) - ) - except util.CommandError as e: - if options.raiseerr: - raise - else: - util.err(str(e)) - - def main(self, argv=None): - options = self.parser.parse_args(argv) - if not hasattr(options, "cmd"): - # see http://bugs.python.org/issue9253, argparse - # behavior changed incompatibly in py3.3 - self.parser.error("too few arguments") - else: - cfg = Config( - file_=options.config, - ini_section=options.name, - cmd_opts=options, - ) - self.run_cmd(cfg, options) - - -def main(argv=None, prog=None, **kwargs): - """The console runner function for Alembic.""" - - CommandLine(prog=prog).main(argv=argv) - - -if __name__ == "__main__": - main() diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/context.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/context.py deleted file mode 100644 index 758fca8..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/context.py +++ /dev/null @@ -1,5 +0,0 @@ -from .runtime.environment import EnvironmentContext - -# create proxy functions for -# each method on the EnvironmentContext class. -EnvironmentContext.create_module_class_proxy(globals(), locals()) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/__init__.py deleted file mode 100644 index 7d50ba0..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from . import mssql # noqa -from . import mysql # noqa -from . import oracle # noqa -from . import postgresql # noqa -from . import sqlite # noqa -from .impl import DefaultImpl # noqa diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/base.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/base.py deleted file mode 100644 index 90573cf..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/base.py +++ /dev/null @@ -1,213 +0,0 @@ -import functools - -from sqlalchemy import Integer -from sqlalchemy import types as sqltypes -from sqlalchemy.ext.compiler import compiles -from sqlalchemy.schema import Column -from sqlalchemy.schema import DDLElement - -from .. import util -from ..util.sqla_compat import _columns_for_constraint # noqa -from ..util.sqla_compat import _find_columns # noqa -from ..util.sqla_compat import _fk_spec # noqa -from ..util.sqla_compat import _is_type_bound # noqa -from ..util.sqla_compat import _table_for_constraint # noqa - -# backwards compat - -if util.sqla_09: - from sqlalchemy.sql.elements import quoted_name - - -class AlterTable(DDLElement): - - """Represent an ALTER TABLE statement. - - Only the string name and optional schema name of the table - is required, not a full Table object. - - """ - - def __init__(self, table_name, schema=None): - self.table_name = table_name - self.schema = schema - - -class RenameTable(AlterTable): - def __init__(self, old_table_name, new_table_name, schema=None): - super(RenameTable, self).__init__(old_table_name, schema=schema) - self.new_table_name = new_table_name - - -class AlterColumn(AlterTable): - def __init__( - self, - name, - column_name, - schema=None, - existing_type=None, - existing_nullable=None, - existing_server_default=None, - existing_comment=None, - ): - super(AlterColumn, self).__init__(name, schema=schema) - self.column_name = column_name - self.existing_type = ( - sqltypes.to_instance(existing_type) - if existing_type is not None - else None - ) - self.existing_nullable = existing_nullable - self.existing_server_default = existing_server_default - self.existing_comment = existing_comment - - -class ColumnNullable(AlterColumn): - def __init__(self, name, column_name, nullable, **kw): - super(ColumnNullable, self).__init__(name, column_name, **kw) - self.nullable = nullable - - -class ColumnType(AlterColumn): - def __init__(self, name, column_name, type_, **kw): - super(ColumnType, self).__init__(name, column_name, **kw) - self.type_ = sqltypes.to_instance(type_) - - -class ColumnName(AlterColumn): - def __init__(self, name, column_name, newname, **kw): - super(ColumnName, self).__init__(name, column_name, **kw) - self.newname = newname - - -class ColumnDefault(AlterColumn): - def __init__(self, name, column_name, default, **kw): - super(ColumnDefault, self).__init__(name, column_name, **kw) - self.default = default - - -class AddColumn(AlterTable): - def __init__(self, name, column, schema=None): - super(AddColumn, self).__init__(name, schema=schema) - self.column = column - - -class DropColumn(AlterTable): - def __init__(self, name, column, schema=None): - super(DropColumn, self).__init__(name, schema=schema) - self.column = column - - -class ColumnComment(AlterColumn): - def __init__(self, name, column_name, comment, **kw): - super(ColumnComment, self).__init__(name, column_name, **kw) - self.comment = comment - - -@compiles(RenameTable) -def visit_rename_table(element, compiler, **kw): - return "%s RENAME TO %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_table_name(compiler, element.new_table_name, element.schema), - ) - - -@compiles(AddColumn) -def visit_add_column(element, compiler, **kw): - return "%s %s" % ( - alter_table(compiler, element.table_name, element.schema), - add_column(compiler, element.column, **kw), - ) - - -@compiles(DropColumn) -def visit_drop_column(element, compiler, **kw): - return "%s %s" % ( - alter_table(compiler, element.table_name, element.schema), - drop_column(compiler, element.column.name, **kw), - ) - - -@compiles(ColumnNullable) -def visit_column_nullable(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "DROP NOT NULL" if element.nullable else "SET NOT NULL", - ) - - -@compiles(ColumnType) -def visit_column_type(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "TYPE %s" % format_type(compiler, element.type_), - ) - - -@compiles(ColumnName) -def visit_column_name(element, compiler, **kw): - return "%s RENAME %s TO %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - format_column_name(compiler, element.newname), - ) - - -@compiles(ColumnDefault) -def visit_column_default(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "SET DEFAULT %s" % format_server_default(compiler, element.default) - if element.default is not None - else "DROP DEFAULT", - ) - - -def quote_dotted(name, quote): - """quote the elements of a dotted name""" - - if util.sqla_09 and isinstance(name, quoted_name): - return quote(name) - result = ".".join([quote(x) for x in name.split(".")]) - return result - - -def format_table_name(compiler, name, schema): - quote = functools.partial(compiler.preparer.quote) - if schema: - return quote_dotted(schema, quote) + "." + quote(name) - else: - return quote(name) - - -def format_column_name(compiler, name): - return compiler.preparer.quote(name) - - -def format_server_default(compiler, default): - return compiler.get_column_default_string( - Column("x", Integer, server_default=default) - ) - - -def format_type(compiler, type_): - return compiler.dialect.type_compiler.process(type_) - - -def alter_table(compiler, name, schema): - return "ALTER TABLE %s" % format_table_name(compiler, name, schema) - - -def drop_column(compiler, name): - return "DROP COLUMN %s" % format_column_name(compiler, name) - - -def alter_column(compiler, name): - return "ALTER COLUMN %s" % format_column_name(compiler, name) - - -def add_column(compiler, column, **kw): - return "ADD COLUMN %s" % compiler.get_column_specification(column, **kw) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/impl.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/impl.py deleted file mode 100644 index 5df7c04..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/impl.py +++ /dev/null @@ -1,455 +0,0 @@ -from sqlalchemy import schema -from sqlalchemy import text -from sqlalchemy import types as sqltypes - -from . import base -from .. import util -from ..util import sqla_compat -from ..util.compat import string_types -from ..util.compat import text_type -from ..util.compat import with_metaclass - - -class ImplMeta(type): - def __init__(cls, classname, bases, dict_): - newtype = type.__init__(cls, classname, bases, dict_) - if "__dialect__" in dict_: - _impls[dict_["__dialect__"]] = cls - return newtype - - -_impls = {} - - -class DefaultImpl(with_metaclass(ImplMeta)): - - """Provide the entrypoint for major migration operations, - including database-specific behavioral variances. - - While individual SQL/DDL constructs already provide - for database-specific implementations, variances here - allow for entirely different sequences of operations - to take place for a particular migration, such as - SQL Server's special 'IDENTITY INSERT' step for - bulk inserts. - - """ - - __dialect__ = "default" - - transactional_ddl = False - command_terminator = ";" - - def __init__( - self, - dialect, - connection, - as_sql, - transactional_ddl, - output_buffer, - context_opts, - ): - self.dialect = dialect - self.connection = connection - self.as_sql = as_sql - self.literal_binds = context_opts.get("literal_binds", False) - - self.output_buffer = output_buffer - self.memo = {} - self.context_opts = context_opts - if transactional_ddl is not None: - self.transactional_ddl = transactional_ddl - - if self.literal_binds: - if not self.as_sql: - raise util.CommandError( - "Can't use literal_binds setting without as_sql mode" - ) - - @classmethod - def get_by_dialect(cls, dialect): - return _impls[dialect.name] - - def static_output(self, text): - self.output_buffer.write(text_type(text + "\n\n")) - self.output_buffer.flush() - - def requires_recreate_in_batch(self, batch_op): - """Return True if the given :class:`.BatchOperationsImpl` - would need the table to be recreated and copied in order to - proceed. - - Normally, only returns True on SQLite when operations other - than add_column are present. - - """ - return False - - def prep_table_for_batch(self, table): - """perform any operations needed on a table before a new - one is created to replace it in batch mode. - - the PG dialect uses this to drop constraints on the table - before the new one uses those same names. - - """ - - @property - def bind(self): - return self.connection - - def _exec( - self, - construct, - execution_options=None, - multiparams=(), - params=util.immutabledict(), - ): - if isinstance(construct, string_types): - construct = text(construct) - if self.as_sql: - if multiparams or params: - # TODO: coverage - raise Exception("Execution arguments not allowed with as_sql") - - if self.literal_binds and not isinstance( - construct, schema.DDLElement - ): - compile_kw = dict(compile_kwargs={"literal_binds": True}) - else: - compile_kw = {} - - self.static_output( - text_type( - construct.compile(dialect=self.dialect, **compile_kw) - ) - .replace("\t", " ") - .strip() - + self.command_terminator - ) - else: - conn = self.connection - if execution_options: - conn = conn.execution_options(**execution_options) - return conn.execute(construct, *multiparams, **params) - - def execute(self, sql, execution_options=None): - self._exec(sql, execution_options) - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - schema=None, - autoincrement=None, - comment=False, - existing_comment=None, - existing_type=None, - existing_server_default=None, - existing_nullable=None, - existing_autoincrement=None, - ): - if autoincrement is not None or existing_autoincrement is not None: - util.warn( - "autoincrement and existing_autoincrement " - "only make sense for MySQL" - ) - if nullable is not None: - self._exec( - base.ColumnNullable( - table_name, - column_name, - nullable, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - ) - ) - if server_default is not False: - self._exec( - base.ColumnDefault( - table_name, - column_name, - server_default, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - ) - ) - if type_ is not None: - self._exec( - base.ColumnType( - table_name, - column_name, - type_, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - ) - ) - - if comment is not False: - self._exec( - base.ColumnComment( - table_name, - column_name, - comment, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - ) - ) - - # do the new name last ;) - if name is not None: - self._exec( - base.ColumnName( - table_name, - column_name, - name, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - ) - ) - - def add_column(self, table_name, column, schema=None): - self._exec(base.AddColumn(table_name, column, schema=schema)) - - def drop_column(self, table_name, column, schema=None, **kw): - self._exec(base.DropColumn(table_name, column, schema=schema)) - - def add_constraint(self, const): - if const._create_rule is None or const._create_rule(self): - self._exec(schema.AddConstraint(const)) - - def drop_constraint(self, const): - self._exec(schema.DropConstraint(const)) - - def rename_table(self, old_table_name, new_table_name, schema=None): - self._exec( - base.RenameTable(old_table_name, new_table_name, schema=schema) - ) - - def create_table(self, table): - table.dispatch.before_create( - table, self.connection, checkfirst=False, _ddl_runner=self - ) - self._exec(schema.CreateTable(table)) - table.dispatch.after_create( - table, self.connection, checkfirst=False, _ddl_runner=self - ) - for index in table.indexes: - self._exec(schema.CreateIndex(index)) - - with_comment = ( - sqla_compat._dialect_supports_comments(self.dialect) - and not self.dialect.inline_comments - ) - comment = sqla_compat._comment_attribute(table) - if comment and with_comment: - self.create_table_comment(table) - - for column in table.columns: - comment = sqla_compat._comment_attribute(column) - if comment and with_comment: - self.create_column_comment(column) - - def drop_table(self, table): - self._exec(schema.DropTable(table)) - - def create_index(self, index): - self._exec(schema.CreateIndex(index)) - - def create_table_comment(self, table): - self._exec(schema.SetTableComment(table)) - - def drop_table_comment(self, table): - self._exec(schema.DropTableComment(table)) - - def create_column_comment(self, column): - self._exec(schema.SetColumnComment(column)) - - def drop_index(self, index): - self._exec(schema.DropIndex(index)) - - def bulk_insert(self, table, rows, multiinsert=True): - if not isinstance(rows, list): - raise TypeError("List expected") - elif rows and not isinstance(rows[0], dict): - raise TypeError("List of dictionaries expected") - if self.as_sql: - for row in rows: - self._exec( - table.insert(inline=True).values( - **dict( - ( - k, - sqla_compat._literal_bindparam( - k, v, type_=table.c[k].type - ) - if not isinstance( - v, sqla_compat._literal_bindparam - ) - else v, - ) - for k, v in row.items() - ) - ) - ) - else: - # work around http://www.sqlalchemy.org/trac/ticket/2461 - if not hasattr(table, "_autoincrement_column"): - table._autoincrement_column = None - if rows: - if multiinsert: - self._exec(table.insert(inline=True), multiparams=rows) - else: - for row in rows: - self._exec(table.insert(inline=True).values(**row)) - - def compare_type(self, inspector_column, metadata_column): - - conn_type = inspector_column.type - metadata_type = metadata_column.type - - metadata_impl = metadata_type.dialect_impl(self.dialect) - if isinstance(metadata_impl, sqltypes.Variant): - metadata_impl = metadata_impl.impl.dialect_impl(self.dialect) - - # work around SQLAlchemy bug "stale value for type affinity" - # fixed in 0.7.4 - metadata_impl.__dict__.pop("_type_affinity", None) - - if hasattr(metadata_impl, "compare_against_backend"): - comparison = metadata_impl.compare_against_backend( - self.dialect, conn_type - ) - if comparison is not None: - return not comparison - - if conn_type._compare_type_affinity(metadata_impl): - comparator = _type_comparators.get(conn_type._type_affinity, None) - - return comparator and comparator(metadata_impl, conn_type) - else: - return True - - def compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_inspector_default, - ): - return rendered_inspector_default != rendered_metadata_default - - def correct_for_autogen_constraints( - self, - conn_uniques, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ): - pass - - def _compat_autogen_column_reflect(self, inspector): - return self.autogen_column_reflect - - def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks): - pass - - def autogen_column_reflect(self, inspector, table, column_info): - """A hook that is attached to the 'column_reflect' event for when - a Table is reflected from the database during the autogenerate - process. - - Dialects can elect to modify the information gathered here. - - """ - - def start_migrations(self): - """A hook called when :meth:`.EnvironmentContext.run_migrations` - is called. - - Implementations can set up per-migration-run state here. - - """ - - def emit_begin(self): - """Emit the string ``BEGIN``, or the backend-specific - equivalent, on the current connection context. - - This is used in offline mode and typically - via :meth:`.EnvironmentContext.begin_transaction`. - - """ - self.static_output("BEGIN" + self.command_terminator) - - def emit_commit(self): - """Emit the string ``COMMIT``, or the backend-specific - equivalent, on the current connection context. - - This is used in offline mode and typically - via :meth:`.EnvironmentContext.begin_transaction`. - - """ - self.static_output("COMMIT" + self.command_terminator) - - def render_type(self, type_obj, autogen_context): - return False - - -def _string_compare(t1, t2): - return t1.length is not None and t1.length != t2.length - - -def _numeric_compare(t1, t2): - return (t1.precision is not None and t1.precision != t2.precision) or ( - t1.precision is not None - and t1.scale is not None - and t1.scale != t2.scale - ) - - -def _integer_compare(t1, t2): - t1_small_or_big = ( - "S" - if isinstance(t1, sqltypes.SmallInteger) - else "B" - if isinstance(t1, sqltypes.BigInteger) - else "I" - ) - t2_small_or_big = ( - "S" - if isinstance(t2, sqltypes.SmallInteger) - else "B" - if isinstance(t2, sqltypes.BigInteger) - else "I" - ) - return t1_small_or_big != t2_small_or_big - - -def _datetime_compare(t1, t2): - return t1.timezone != t2.timezone - - -_type_comparators = { - sqltypes.String: _string_compare, - sqltypes.Numeric: _numeric_compare, - sqltypes.Integer: _integer_compare, - sqltypes.DateTime: _datetime_compare, -} diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/mssql.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/mssql.py deleted file mode 100644 index de2168b..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/mssql.py +++ /dev/null @@ -1,257 +0,0 @@ -from sqlalchemy import types as sqltypes -from sqlalchemy.ext.compiler import compiles -from sqlalchemy.schema import Column -from sqlalchemy.schema import CreateIndex -from sqlalchemy.sql.expression import ClauseElement -from sqlalchemy.sql.expression import Executable - -from .base import AddColumn -from .base import alter_column -from .base import alter_table -from .base import ColumnDefault -from .base import ColumnName -from .base import ColumnNullable -from .base import ColumnType -from .base import format_column_name -from .base import format_server_default -from .base import format_table_name -from .base import format_type -from .base import RenameTable -from .impl import DefaultImpl -from .. import util - - -class MSSQLImpl(DefaultImpl): - __dialect__ = "mssql" - transactional_ddl = True - batch_separator = "GO" - - def __init__(self, *arg, **kw): - super(MSSQLImpl, self).__init__(*arg, **kw) - self.batch_separator = self.context_opts.get( - "mssql_batch_separator", self.batch_separator - ) - - def _exec(self, construct, *args, **kw): - result = super(MSSQLImpl, self)._exec(construct, *args, **kw) - if self.as_sql and self.batch_separator: - self.static_output(self.batch_separator) - return result - - def emit_begin(self): - self.static_output("BEGIN TRANSACTION" + self.command_terminator) - - def emit_commit(self): - super(MSSQLImpl, self).emit_commit() - if self.as_sql and self.batch_separator: - self.static_output(self.batch_separator) - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - schema=None, - existing_type=None, - existing_server_default=None, - existing_nullable=None, - **kw - ): - - if nullable is not None and existing_type is None: - if type_ is not None: - existing_type = type_ - # the NULL/NOT NULL alter will handle - # the type alteration - type_ = None - else: - raise util.CommandError( - "MS-SQL ALTER COLUMN operations " - "with NULL or NOT NULL require the " - "existing_type or a new type_ be passed." - ) - - super(MSSQLImpl, self).alter_column( - table_name, - column_name, - nullable=nullable, - type_=type_, - schema=schema, - existing_type=existing_type, - existing_nullable=existing_nullable, - **kw - ) - - if server_default is not False: - if existing_server_default is not False or server_default is None: - self._exec( - _ExecDropConstraint( - table_name, column_name, "sys.default_constraints" - ) - ) - if server_default is not None: - super(MSSQLImpl, self).alter_column( - table_name, - column_name, - schema=schema, - server_default=server_default, - ) - - if name is not None: - super(MSSQLImpl, self).alter_column( - table_name, column_name, schema=schema, name=name - ) - - def create_index(self, index): - # this likely defaults to None if not present, so get() - # should normally not return the default value. being - # defensive in any case - mssql_include = index.kwargs.get("mssql_include", None) or () - for col in mssql_include: - if col not in index.table.c: - index.table.append_column(Column(col, sqltypes.NullType)) - self._exec(CreateIndex(index)) - - def bulk_insert(self, table, rows, **kw): - if self.as_sql: - self._exec( - "SET IDENTITY_INSERT %s ON" - % self.dialect.identifier_preparer.format_table(table) - ) - super(MSSQLImpl, self).bulk_insert(table, rows, **kw) - self._exec( - "SET IDENTITY_INSERT %s OFF" - % self.dialect.identifier_preparer.format_table(table) - ) - else: - super(MSSQLImpl, self).bulk_insert(table, rows, **kw) - - def drop_column(self, table_name, column, **kw): - drop_default = kw.pop("mssql_drop_default", False) - if drop_default: - self._exec( - _ExecDropConstraint( - table_name, column, "sys.default_constraints" - ) - ) - drop_check = kw.pop("mssql_drop_check", False) - if drop_check: - self._exec( - _ExecDropConstraint( - table_name, column, "sys.check_constraints" - ) - ) - drop_fks = kw.pop("mssql_drop_foreign_key", False) - if drop_fks: - self._exec(_ExecDropFKConstraint(table_name, column)) - super(MSSQLImpl, self).drop_column(table_name, column, **kw) - - -class _ExecDropConstraint(Executable, ClauseElement): - def __init__(self, tname, colname, type_): - self.tname = tname - self.colname = colname - self.type_ = type_ - - -class _ExecDropFKConstraint(Executable, ClauseElement): - def __init__(self, tname, colname): - self.tname = tname - self.colname = colname - - -@compiles(_ExecDropConstraint, "mssql") -def _exec_drop_col_constraint(element, compiler, **kw): - tname, colname, type_ = element.tname, element.colname, element.type_ - # from http://www.mssqltips.com/sqlservertip/1425/\ - # working-with-default-constraints-in-sql-server/ - # TODO: needs table formatting, etc. - return """declare @const_name varchar(256) -select @const_name = [name] from %(type)s -where parent_object_id = object_id('%(tname)s') -and col_name(parent_object_id, parent_column_id) = '%(colname)s' -exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { - "type": type_, - "tname": tname, - "colname": colname, - "tname_quoted": format_table_name(compiler, tname, None), - } - - -@compiles(_ExecDropFKConstraint, "mssql") -def _exec_drop_col_fk_constraint(element, compiler, **kw): - tname, colname = element.tname, element.colname - - return """declare @const_name varchar(256) -select @const_name = [name] from - sys.foreign_keys fk join sys.foreign_key_columns fkc - on fk.object_id=fkc.constraint_object_id -where fkc.parent_object_id = object_id('%(tname)s') -and col_name(fkc.parent_object_id, fkc.parent_column_id) = '%(colname)s' -exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { - "tname": tname, - "colname": colname, - "tname_quoted": format_table_name(compiler, tname, None), - } - - -@compiles(AddColumn, "mssql") -def visit_add_column(element, compiler, **kw): - return "%s %s" % ( - alter_table(compiler, element.table_name, element.schema), - mssql_add_column(compiler, element.column, **kw), - ) - - -def mssql_add_column(compiler, column, **kw): - return "ADD %s" % compiler.get_column_specification(column, **kw) - - -@compiles(ColumnNullable, "mssql") -def visit_column_nullable(element, compiler, **kw): - return "%s %s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - format_type(compiler, element.existing_type), - "NULL" if element.nullable else "NOT NULL", - ) - - -@compiles(ColumnDefault, "mssql") -def visit_column_default(element, compiler, **kw): - # TODO: there can also be a named constraint - # with ADD CONSTRAINT here - return "%s ADD DEFAULT %s FOR %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_server_default(compiler, element.default), - format_column_name(compiler, element.column_name), - ) - - -@compiles(ColumnName, "mssql") -def visit_rename_column(element, compiler, **kw): - return "EXEC sp_rename '%s.%s', %s, 'COLUMN'" % ( - format_table_name(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - format_column_name(compiler, element.newname), - ) - - -@compiles(ColumnType, "mssql") -def visit_column_type(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - format_type(compiler, element.type_), - ) - - -@compiles(RenameTable, "mssql") -def visit_rename_table(element, compiler, **kw): - return "EXEC sp_rename '%s', %s" % ( - format_table_name(compiler, element.table_name, element.schema), - format_table_name(compiler, element.new_table_name, None), - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/mysql.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/mysql.py deleted file mode 100644 index d20aec6..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/mysql.py +++ /dev/null @@ -1,414 +0,0 @@ -import re - -from sqlalchemy import schema -from sqlalchemy import types as sqltypes -from sqlalchemy.ext.compiler import compiles - -from .base import alter_table -from .base import AlterColumn -from .base import ColumnDefault -from .base import ColumnName -from .base import ColumnNullable -from .base import ColumnType -from .base import format_column_name -from .base import format_server_default -from .impl import DefaultImpl -from .. import util -from ..autogenerate import compare -from ..util.compat import string_types -from ..util.sqla_compat import _is_type_bound -from ..util.sqla_compat import sqla_100 - - -class MySQLImpl(DefaultImpl): - __dialect__ = "mysql" - - transactional_ddl = False - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - schema=None, - existing_type=None, - existing_server_default=None, - existing_nullable=None, - autoincrement=None, - existing_autoincrement=None, - comment=False, - existing_comment=None, - **kw - ): - if name is not None: - self._exec( - MySQLChangeColumn( - table_name, - column_name, - schema=schema, - newname=name, - nullable=nullable - if nullable is not None - else existing_nullable - if existing_nullable is not None - else True, - type_=type_ if type_ is not None else existing_type, - default=server_default - if server_default is not False - else existing_server_default, - autoincrement=autoincrement - if autoincrement is not None - else existing_autoincrement, - ) - ) - elif ( - nullable is not None - or type_ is not None - or autoincrement is not None - or comment is not False - ): - self._exec( - MySQLModifyColumn( - table_name, - column_name, - schema=schema, - newname=name if name is not None else column_name, - nullable=nullable - if nullable is not None - else existing_nullable - if existing_nullable is not None - else True, - type_=type_ if type_ is not None else existing_type, - default=server_default - if server_default is not False - else existing_server_default, - autoincrement=autoincrement - if autoincrement is not None - else existing_autoincrement, - comment=comment - if comment is not False - else existing_comment, - ) - ) - elif server_default is not False: - self._exec( - MySQLAlterDefault( - table_name, column_name, server_default, schema=schema - ) - ) - - def drop_constraint(self, const): - if isinstance(const, schema.CheckConstraint) and _is_type_bound(const): - return - - super(MySQLImpl, self).drop_constraint(const) - - def compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_inspector_default, - ): - # partially a workaround for SQLAlchemy issue #3023; if the - # column were created without "NOT NULL", MySQL may have added - # an implicit default of '0' which we need to skip - # TODO: this is not really covered anymore ? - if ( - metadata_column.type._type_affinity is sqltypes.Integer - and inspector_column.primary_key - and not inspector_column.autoincrement - and not rendered_metadata_default - and rendered_inspector_default == "'0'" - ): - return False - elif inspector_column.type._type_affinity is sqltypes.Integer: - rendered_inspector_default = re.sub( - r"^'|'$", "", rendered_inspector_default - ) - return rendered_inspector_default != rendered_metadata_default - elif rendered_inspector_default and rendered_metadata_default: - # adjust for "function()" vs. "FUNCTION" - return re.sub( - r"(.*?)(?:\(\))?$", r"\1", rendered_inspector_default.lower() - ) != re.sub( - r"(.*?)(?:\(\))?$", r"\1", rendered_metadata_default.lower() - ) - else: - return rendered_inspector_default != rendered_metadata_default - - def correct_for_autogen_constraints( - self, - conn_unique_constraints, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ): - - # TODO: if SQLA 1.0, make use of "duplicates_index" - # metadata - removed = set() - for idx in list(conn_indexes): - if idx.unique: - continue - # MySQL puts implicit indexes on FK columns, even if - # composite and even if MyISAM, so can't check this too easily. - # the name of the index may be the column name or it may - # be the name of the FK constraint. - for col in idx.columns: - if idx.name == col.name: - conn_indexes.remove(idx) - removed.add(idx.name) - break - for fk in col.foreign_keys: - if fk.name == idx.name: - conn_indexes.remove(idx) - removed.add(idx.name) - break - if idx.name in removed: - break - - # then remove indexes from the "metadata_indexes" - # that we've removed from reflected, otherwise they come out - # as adds (see #202) - for idx in list(metadata_indexes): - if idx.name in removed: - metadata_indexes.remove(idx) - - if not sqla_100: - self._legacy_correct_for_dupe_uq_uix( - conn_unique_constraints, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ) - - def _legacy_correct_for_dupe_uq_uix( - self, - conn_unique_constraints, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ): - - # then dedupe unique indexes vs. constraints, since MySQL - # doesn't really have unique constraints as a separate construct. - # but look in the metadata and try to maintain constructs - # that already seem to be defined one way or the other - # on that side. See #276 - metadata_uq_names = set( - [ - cons.name - for cons in metadata_unique_constraints - if cons.name is not None - ] - ) - - unnamed_metadata_uqs = set( - [ - compare._uq_constraint_sig(cons).sig - for cons in metadata_unique_constraints - if cons.name is None - ] - ) - - metadata_ix_names = set( - [cons.name for cons in metadata_indexes if cons.unique] - ) - conn_uq_names = dict( - (cons.name, cons) for cons in conn_unique_constraints - ) - conn_ix_names = dict( - (cons.name, cons) for cons in conn_indexes if cons.unique - ) - - for overlap in set(conn_uq_names).intersection(conn_ix_names): - if overlap not in metadata_uq_names: - if ( - compare._uq_constraint_sig(conn_uq_names[overlap]).sig - not in unnamed_metadata_uqs - ): - - conn_unique_constraints.discard(conn_uq_names[overlap]) - elif overlap not in metadata_ix_names: - conn_indexes.discard(conn_ix_names[overlap]) - - def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks): - conn_fk_by_sig = dict( - (compare._fk_constraint_sig(fk).sig, fk) for fk in conn_fks - ) - metadata_fk_by_sig = dict( - (compare._fk_constraint_sig(fk).sig, fk) for fk in metadata_fks - ) - - for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig): - mdfk = metadata_fk_by_sig[sig] - cnfk = conn_fk_by_sig[sig] - # MySQL considers RESTRICT to be the default and doesn't - # report on it. if the model has explicit RESTRICT and - # the conn FK has None, set it to RESTRICT - if ( - mdfk.ondelete is not None - and mdfk.ondelete.lower() == "restrict" - and cnfk.ondelete is None - ): - cnfk.ondelete = "RESTRICT" - if ( - mdfk.onupdate is not None - and mdfk.onupdate.lower() == "restrict" - and cnfk.onupdate is None - ): - cnfk.onupdate = "RESTRICT" - - -class MySQLAlterDefault(AlterColumn): - def __init__(self, name, column_name, default, schema=None): - super(AlterColumn, self).__init__(name, schema=schema) - self.column_name = column_name - self.default = default - - -class MySQLChangeColumn(AlterColumn): - def __init__( - self, - name, - column_name, - schema=None, - newname=None, - type_=None, - nullable=None, - default=False, - autoincrement=None, - comment=False, - ): - super(AlterColumn, self).__init__(name, schema=schema) - self.column_name = column_name - self.nullable = nullable - self.newname = newname - self.default = default - self.autoincrement = autoincrement - self.comment = comment - if type_ is None: - raise util.CommandError( - "All MySQL CHANGE/MODIFY COLUMN operations " - "require the existing type." - ) - - self.type_ = sqltypes.to_instance(type_) - - -class MySQLModifyColumn(MySQLChangeColumn): - pass - - -@compiles(ColumnNullable, "mysql") -@compiles(ColumnName, "mysql") -@compiles(ColumnDefault, "mysql") -@compiles(ColumnType, "mysql") -def _mysql_doesnt_support_individual(element, compiler, **kw): - raise NotImplementedError( - "Individual alter column constructs not supported by MySQL" - ) - - -@compiles(MySQLAlterDefault, "mysql") -def _mysql_alter_default(element, compiler, **kw): - return "%s ALTER COLUMN %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - "SET DEFAULT %s" % format_server_default(compiler, element.default) - if element.default is not None - else "DROP DEFAULT", - ) - - -@compiles(MySQLModifyColumn, "mysql") -def _mysql_modify_column(element, compiler, **kw): - return "%s MODIFY %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - _mysql_colspec( - compiler, - nullable=element.nullable, - server_default=element.default, - type_=element.type_, - autoincrement=element.autoincrement, - comment=element.comment, - ), - ) - - -@compiles(MySQLChangeColumn, "mysql") -def _mysql_change_column(element, compiler, **kw): - return "%s CHANGE %s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - format_column_name(compiler, element.newname), - _mysql_colspec( - compiler, - nullable=element.nullable, - server_default=element.default, - type_=element.type_, - autoincrement=element.autoincrement, - comment=element.comment, - ), - ) - - -def _render_value(compiler, expr): - if isinstance(expr, string_types): - return "'%s'" % expr - else: - return compiler.sql_compiler.process(expr) - - -def _mysql_colspec( - compiler, nullable, server_default, type_, autoincrement, comment -): - spec = "%s %s" % ( - compiler.dialect.type_compiler.process(type_), - "NULL" if nullable else "NOT NULL", - ) - if autoincrement: - spec += " AUTO_INCREMENT" - if server_default is not False and server_default is not None: - spec += " DEFAULT %s" % _render_value(compiler, server_default) - if comment: - spec += " COMMENT %s" % compiler.sql_compiler.render_literal_value( - comment, sqltypes.String() - ) - - return spec - - -@compiles(schema.DropConstraint, "mysql") -def _mysql_drop_constraint(element, compiler, **kw): - """Redefine SQLAlchemy's drop constraint to - raise errors for invalid constraint type.""" - - constraint = element.element - if isinstance( - constraint, - ( - schema.ForeignKeyConstraint, - schema.PrimaryKeyConstraint, - schema.UniqueConstraint, - ), - ): - return compiler.visit_drop_constraint(element, **kw) - elif isinstance(constraint, schema.CheckConstraint): - # note that SQLAlchemy as of 1.2 does not yet support - # DROP CONSTRAINT for MySQL/MariaDB, so we implement fully - # here. - return "ALTER TABLE %s DROP CONSTRAINT %s" % ( - compiler.preparer.format_table(constraint.table), - compiler.preparer.format_constraint(constraint), - ) - else: - raise NotImplementedError( - "No generic 'DROP CONSTRAINT' in MySQL - " - "please specify constraint type" - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/oracle.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/oracle.py deleted file mode 100644 index 76cf7c5..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/oracle.py +++ /dev/null @@ -1,109 +0,0 @@ -from sqlalchemy.ext.compiler import compiles -from sqlalchemy.sql import sqltypes - -from .base import AddColumn -from .base import alter_table -from .base import ColumnComment -from .base import ColumnDefault -from .base import ColumnName -from .base import ColumnNullable -from .base import ColumnType -from .base import format_column_name -from .base import format_server_default -from .base import format_type -from .impl import DefaultImpl - - -class OracleImpl(DefaultImpl): - __dialect__ = "oracle" - transactional_ddl = False - batch_separator = "/" - command_terminator = "" - - def __init__(self, *arg, **kw): - super(OracleImpl, self).__init__(*arg, **kw) - self.batch_separator = self.context_opts.get( - "oracle_batch_separator", self.batch_separator - ) - - def _exec(self, construct, *args, **kw): - result = super(OracleImpl, self)._exec(construct, *args, **kw) - if self.as_sql and self.batch_separator: - self.static_output(self.batch_separator) - return result - - def emit_begin(self): - self._exec("SET TRANSACTION READ WRITE") - - def emit_commit(self): - self._exec("COMMIT") - - -@compiles(AddColumn, "oracle") -def visit_add_column(element, compiler, **kw): - return "%s %s" % ( - alter_table(compiler, element.table_name, element.schema), - add_column(compiler, element.column, **kw), - ) - - -@compiles(ColumnNullable, "oracle") -def visit_column_nullable(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "NULL" if element.nullable else "NOT NULL", - ) - - -@compiles(ColumnType, "oracle") -def visit_column_type(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "%s" % format_type(compiler, element.type_), - ) - - -@compiles(ColumnName, "oracle") -def visit_column_name(element, compiler, **kw): - return "%s RENAME COLUMN %s TO %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - format_column_name(compiler, element.newname), - ) - - -@compiles(ColumnDefault, "oracle") -def visit_column_default(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "DEFAULT %s" % format_server_default(compiler, element.default) - if element.default is not None - else "DEFAULT NULL", - ) - - -@compiles(ColumnComment, "oracle") -def visit_column_comment(element, compiler, **kw): - ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}" - - comment = compiler.sql_compiler.render_literal_value( - (element.comment if element.comment is not None else ""), - sqltypes.String(), - ) - - return ddl.format( - table_name=element.table_name, - column_name=element.column_name, - comment=comment, - ) - - -def alter_column(compiler, name): - return "MODIFY %s" % format_column_name(compiler, name) - - -def add_column(compiler, column, **kw): - return "ADD %s" % compiler.get_column_specification(column, **kw) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/postgresql.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/postgresql.py deleted file mode 100644 index 255c7e6..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/postgresql.py +++ /dev/null @@ -1,537 +0,0 @@ -import logging -import re - -from sqlalchemy import Column -from sqlalchemy import Numeric -from sqlalchemy import text -from sqlalchemy import types as sqltypes -from sqlalchemy.dialects.postgresql import BIGINT -from sqlalchemy.dialects.postgresql import INTEGER -from sqlalchemy.sql.expression import ColumnClause -from sqlalchemy.sql.expression import UnaryExpression -from sqlalchemy.types import NULLTYPE - -from .base import alter_column -from .base import alter_table -from .base import AlterColumn -from .base import ColumnComment -from .base import compiles -from .base import format_table_name -from .base import format_type -from .base import RenameTable -from .impl import DefaultImpl -from .. import util -from ..autogenerate import render -from ..operations import ops -from ..operations import schemaobj -from ..operations.base import BatchOperations -from ..operations.base import Operations -from ..util import compat -from ..util import sqla_compat - -if util.sqla_100: - from sqlalchemy.dialects.postgresql import ExcludeConstraint - - -log = logging.getLogger(__name__) - - -class PostgresqlImpl(DefaultImpl): - __dialect__ = "postgresql" - transactional_ddl = True - - def prep_table_for_batch(self, table): - for constraint in table.constraints: - if constraint.name is not None: - self.drop_constraint(constraint) - - def compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_inspector_default, - ): - # don't do defaults for SERIAL columns - if ( - metadata_column.primary_key - and metadata_column is metadata_column.table._autoincrement_column - ): - return False - - conn_col_default = rendered_inspector_default - - defaults_equal = conn_col_default == rendered_metadata_default - if defaults_equal: - return False - - if None in (conn_col_default, rendered_metadata_default): - return not defaults_equal - - if compat.py2k: - # look for a python 2 "u''" string and filter - m = re.match(r"^u'(.*)'$", rendered_metadata_default) - if m: - rendered_metadata_default = "'%s'" % m.group(1) - - # check for unquoted string and quote for PG String types - if ( - not isinstance(inspector_column.type, Numeric) and - metadata_column.server_default is not None - and isinstance( - metadata_column.server_default.arg, compat.string_types - ) - and not re.match(r"^'.*'$", rendered_metadata_default) - ): - rendered_metadata_default = "'%s'" % rendered_metadata_default - - return not self.connection.scalar( - "SELECT %s = %s" % (conn_col_default, rendered_metadata_default) - ) - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - schema=None, - autoincrement=None, - existing_type=None, - existing_server_default=None, - existing_nullable=None, - existing_autoincrement=None, - **kw - ): - - using = kw.pop("postgresql_using", None) - - if using is not None and type_ is None: - raise util.CommandError( - "postgresql_using must be used with the type_ parameter" - ) - - if type_ is not None: - self._exec( - PostgresqlColumnType( - table_name, - column_name, - type_, - schema=schema, - using=using, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - ) - ) - - super(PostgresqlImpl, self).alter_column( - table_name, - column_name, - nullable=nullable, - server_default=server_default, - name=name, - schema=schema, - autoincrement=autoincrement, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_autoincrement=existing_autoincrement, - **kw - ) - - def autogen_column_reflect(self, inspector, table, column_info): - if column_info.get("default") and isinstance( - column_info["type"], (INTEGER, BIGINT) - ): - seq_match = re.match( - r"nextval\('(.+?)'::regclass\)", column_info["default"] - ) - if seq_match: - info = inspector.bind.execute( - text( - "select c.relname, a.attname " - "from pg_class as c join " - "pg_depend d on d.objid=c.oid and " - "d.classid='pg_class'::regclass and " - "d.refclassid='pg_class'::regclass " - "join pg_class t on t.oid=d.refobjid " - "join pg_attribute a on a.attrelid=t.oid and " - "a.attnum=d.refobjsubid " - "where c.relkind='S' and c.relname=:seqname" - ), - seqname=seq_match.group(1), - ).first() - if info: - seqname, colname = info - if colname == column_info["name"]: - log.info( - "Detected sequence named '%s' as " - "owned by integer column '%s(%s)', " - "assuming SERIAL and omitting", - seqname, - table.name, - colname, - ) - # sequence, and the owner is this column, - # its a SERIAL - whack it! - del column_info["default"] - - def correct_for_autogen_constraints( - self, - conn_unique_constraints, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ): - - conn_uniques_by_name = dict( - (c.name, c) for c in conn_unique_constraints - ) - conn_indexes_by_name = dict((c.name, c) for c in conn_indexes) - - if not util.sqla_100: - doubled_constraints = set( - conn_indexes_by_name[name] - for name in set(conn_uniques_by_name).intersection( - conn_indexes_by_name - ) - ) - else: - doubled_constraints = set( - index - for index in conn_indexes - if index.info.get("duplicates_constraint") - ) - - for ix in doubled_constraints: - conn_indexes.remove(ix) - - for idx in list(metadata_indexes): - if idx.name in conn_indexes_by_name: - continue - exprs = idx.expressions - for expr in exprs: - while isinstance(expr, UnaryExpression): - expr = expr.element - if not isinstance(expr, Column): - util.warn( - "autogenerate skipping functional index %s; " - "not supported by SQLAlchemy reflection" % idx.name - ) - metadata_indexes.discard(idx) - - def render_type(self, type_, autogen_context): - mod = type(type_).__module__ - if not mod.startswith("sqlalchemy.dialects.postgresql"): - return False - - if hasattr(self, "_render_%s_type" % type_.__visit_name__): - meth = getattr(self, "_render_%s_type" % type_.__visit_name__) - return meth(type_, autogen_context) - - return False - - def _render_HSTORE_type(self, type_, autogen_context): - return render._render_type_w_subtype( - type_, autogen_context, "text_type", r"(.+?\(.*text_type=)" - ) - - def _render_ARRAY_type(self, type_, autogen_context): - return render._render_type_w_subtype( - type_, autogen_context, "item_type", r"(.+?\()" - ) - - def _render_JSON_type(self, type_, autogen_context): - return render._render_type_w_subtype( - type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)" - ) - - def _render_JSONB_type(self, type_, autogen_context): - return render._render_type_w_subtype( - type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)" - ) - - -class PostgresqlColumnType(AlterColumn): - def __init__(self, name, column_name, type_, **kw): - using = kw.pop("using", None) - super(PostgresqlColumnType, self).__init__(name, column_name, **kw) - self.type_ = sqltypes.to_instance(type_) - self.using = using - - -@compiles(RenameTable, "postgresql") -def visit_rename_table(element, compiler, **kw): - return "%s RENAME TO %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_table_name(compiler, element.new_table_name, None), - ) - - -@compiles(PostgresqlColumnType, "postgresql") -def visit_column_type(element, compiler, **kw): - return "%s %s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "TYPE %s" % format_type(compiler, element.type_), - "USING %s" % element.using if element.using else "", - ) - - -@compiles(ColumnComment, "postgresql") -def visit_column_comment(element, compiler, **kw): - ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}" - comment = ( - compiler.sql_compiler.render_literal_value( - element.comment, sqltypes.String() - ) - if element.comment is not None - else "NULL" - ) - - return ddl.format( - table_name=element.table_name, - column_name=element.column_name, - comment=comment, - ) - - -@Operations.register_operation("create_exclude_constraint") -@BatchOperations.register_operation( - "create_exclude_constraint", "batch_create_exclude_constraint" -) -@ops.AddConstraintOp.register_add_constraint("exclude_constraint") -class CreateExcludeConstraintOp(ops.AddConstraintOp): - """Represent a create exclude constraint operation.""" - - constraint_type = "exclude" - - def __init__( - self, - constraint_name, - table_name, - elements, - where=None, - schema=None, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.elements = elements - self.where = where - self.schema = schema - self._orig_constraint = _orig_constraint - self.kw = kw - - @classmethod - def from_constraint(cls, constraint): - constraint_table = sqla_compat._table_for_constraint(constraint) - - return cls( - constraint.name, - constraint_table.name, - [(expr, op) for expr, name, op in constraint._render_exprs], - where=constraint.where, - schema=constraint_table.schema, - _orig_constraint=constraint, - deferrable=constraint.deferrable, - initially=constraint.initially, - using=constraint.using, - ) - - def to_constraint(self, migration_context=None): - if not util.sqla_100: - raise NotImplementedError( - "ExcludeConstraint not supported until SQLAlchemy 1.0" - ) - if self._orig_constraint is not None: - return self._orig_constraint - schema_obj = schemaobj.SchemaObjects(migration_context) - t = schema_obj.table(self.table_name, schema=self.schema) - excl = ExcludeConstraint( - *self.elements, - name=self.constraint_name, - where=self.where, - **self.kw - ) - for expr, name, oper in excl._render_exprs: - t.append_column(Column(name, NULLTYPE)) - t.append_constraint(excl) - return excl - - @classmethod - def create_exclude_constraint( - cls, operations, constraint_name, table_name, *elements, **kw - ): - """Issue an alter to create an EXCLUDE constraint using the - current migration context. - - .. note:: This method is Postgresql specific, and additionally - requires at least SQLAlchemy 1.0. - - e.g.:: - - from alembic import op - - op.create_exclude_constraint( - "user_excl", - "user", - - ("period", '&&'), - ("group", '='), - where=("group != 'some group'") - - ) - - Note that the expressions work the same way as that of - the ``ExcludeConstraint`` object itself; if plain strings are - passed, quoting rules must be applied manually. - - :param name: Name of the constraint. - :param table_name: String name of the source table. - :param elements: exclude conditions. - :param where: SQL expression or SQL string with optional WHERE - clause. - :param deferrable: optional bool. If set, emit DEFERRABLE or - NOT DEFERRABLE when issuing DDL for this constraint. - :param initially: optional string. If set, emit INITIALLY - when issuing DDL for this constraint. - :param schema: Optional schema name to operate within. - - .. versionadded:: 0.9.0 - - """ - op = cls(constraint_name, table_name, elements, **kw) - return operations.invoke(op) - - @classmethod - def batch_create_exclude_constraint( - cls, operations, constraint_name, *elements, **kw - ): - """Issue a "create exclude constraint" instruction using the - current batch migration context. - - .. note:: This method is Postgresql specific, and additionally - requires at least SQLAlchemy 1.0. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :meth:`.Operations.create_exclude_constraint` - - """ - kw["schema"] = operations.impl.schema - op = cls(constraint_name, operations.impl.table_name, elements, **kw) - return operations.invoke(op) - - -@render.renderers.dispatch_for(CreateExcludeConstraintOp) -def _add_exclude_constraint(autogen_context, op): - return _exclude_constraint(op.to_constraint(), autogen_context, alter=True) - - -if util.sqla_100: - - @render._constraint_renderers.dispatch_for(ExcludeConstraint) - def _render_inline_exclude_constraint(constraint, autogen_context): - rendered = render._user_defined_render( - "exclude", constraint, autogen_context - ) - if rendered is not False: - return rendered - - return _exclude_constraint(constraint, autogen_context, False) - - -def _postgresql_autogenerate_prefix(autogen_context): - - imports = autogen_context.imports - if imports is not None: - imports.add("from sqlalchemy.dialects import postgresql") - return "postgresql." - - -def _exclude_constraint(constraint, autogen_context, alter): - opts = [] - - has_batch = autogen_context._has_batch - - if constraint.deferrable: - opts.append(("deferrable", str(constraint.deferrable))) - if constraint.initially: - opts.append(("initially", str(constraint.initially))) - if constraint.using: - opts.append(("using", str(constraint.using))) - if not has_batch and alter and constraint.table.schema: - opts.append(("schema", render._ident(constraint.table.schema))) - if not alter and constraint.name: - opts.append( - ("name", render._render_gen_name(autogen_context, constraint.name)) - ) - - if alter: - args = [ - repr(render._render_gen_name(autogen_context, constraint.name)) - ] - if not has_batch: - args += [repr(render._ident(constraint.table.name))] - args.extend( - [ - "(%s, %r)" - % ( - _render_potential_column(sqltext, autogen_context), - opstring, - ) - for sqltext, name, opstring in constraint._render_exprs - ] - ) - if constraint.where is not None: - args.append( - "where=%s" - % render._render_potential_expr( - constraint.where, autogen_context - ) - ) - args.extend(["%s=%r" % (k, v) for k, v in opts]) - return "%(prefix)screate_exclude_constraint(%(args)s)" % { - "prefix": render._alembic_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - else: - args = [ - "(%s, %r)" - % (_render_potential_column(sqltext, autogen_context), opstring) - for sqltext, name, opstring in constraint._render_exprs - ] - if constraint.where is not None: - args.append( - "where=%s" - % render._render_potential_expr( - constraint.where, autogen_context - ) - ) - args.extend(["%s=%r" % (k, v) for k, v in opts]) - return "%(prefix)sExcludeConstraint(%(args)s)" % { - "prefix": _postgresql_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - - -def _render_potential_column(value, autogen_context): - if isinstance(value, ColumnClause): - template = "%(prefix)scolumn(%(name)r)" - - return template % { - "prefix": render._sqlalchemy_autogenerate_prefix(autogen_context), - "name": value.name, - } - - else: - return render._render_potential_expr( - value, autogen_context, wrap_in_text=False - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/sqlite.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/sqlite.py deleted file mode 100644 index c0385e1..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/ddl/sqlite.py +++ /dev/null @@ -1,110 +0,0 @@ -import re - -from .impl import DefaultImpl -from .. import util - - -class SQLiteImpl(DefaultImpl): - __dialect__ = "sqlite" - - transactional_ddl = False - """SQLite supports transactional DDL, but pysqlite does not: - see: http://bugs.python.org/issue10740 - """ - - def requires_recreate_in_batch(self, batch_op): - """Return True if the given :class:`.BatchOperationsImpl` - would need the table to be recreated and copied in order to - proceed. - - Normally, only returns True on SQLite when operations other - than add_column are present. - - """ - for op in batch_op.batch: - if op[0] not in ("add_column", "create_index", "drop_index"): - return True - else: - return False - - def add_constraint(self, const): - # attempt to distinguish between an - # auto-gen constraint and an explicit one - if const._create_rule is None: - raise NotImplementedError( - "No support for ALTER of constraints in SQLite dialect" - ) - elif const._create_rule(self): - util.warn( - "Skipping unsupported ALTER for " - "creation of implicit constraint" - ) - - def drop_constraint(self, const): - if const._create_rule is None: - raise NotImplementedError( - "No support for ALTER of constraints in SQLite dialect" - ) - - def compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_inspector_default, - ): - - if rendered_metadata_default is not None: - rendered_metadata_default = re.sub( - r"^\"'|\"'$", "", rendered_metadata_default - ) - if rendered_inspector_default is not None: - rendered_inspector_default = re.sub( - r"^\"'|\"'$", "", rendered_inspector_default - ) - - return rendered_inspector_default != rendered_metadata_default - - def correct_for_autogen_constraints( - self, - conn_unique_constraints, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ): - - if util.sqla_100: - return - - # adjustments to accommodate for SQLite unnamed unique constraints - # not being reported from the backend; this was updated in - # SQLA 1.0. - - def uq_sig(uq): - return tuple(sorted(uq.columns.keys())) - - conn_unique_sigs = set(uq_sig(uq) for uq in conn_unique_constraints) - - for idx in list(metadata_unique_constraints): - # SQLite backend can't report on unnamed UNIQUE constraints, - # so remove these, unless we see an exact signature match - if idx.name is None and uq_sig(idx) not in conn_unique_sigs: - metadata_unique_constraints.remove(idx) - - -# @compiles(AddColumn, 'sqlite') -# def visit_add_column(element, compiler, **kw): -# return "%s %s" % ( -# alter_table(compiler, element.table_name, element.schema), -# add_column(compiler, element.column, **kw) -# ) - - -# def add_column(compiler, column, **kw): -# text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw) -# need to modify SQLAlchemy so that the CHECK associated with a Boolean -# or Enum gets placed as part of the column constraints, not the Table -# see ticket 98 -# for const in column.constraints: -# text += compiler.process(AddConstraint(const)) -# return text diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/op.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/op.py deleted file mode 100644 index f3f5fac..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/op.py +++ /dev/null @@ -1,5 +0,0 @@ -from .operations.base import Operations - -# create proxy functions for -# each method on the Operations class. -Operations.create_module_class_proxy(globals(), locals()) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/__init__.py deleted file mode 100644 index dc2d3a4..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from . import toimpl # noqa -from .base import BatchOperations -from .base import Operations -from .ops import MigrateOperation - - -__all__ = ["Operations", "BatchOperations", "MigrateOperation"] diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/base.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/base.py deleted file mode 100644 index 90b3500..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/base.py +++ /dev/null @@ -1,473 +0,0 @@ -from contextlib import contextmanager -import textwrap - -from . import batch -from . import schemaobj -from .. import util -from ..util import sqla_compat -from ..util.compat import exec_ -from ..util.compat import inspect_formatargspec -from ..util.compat import inspect_getargspec - -__all__ = ("Operations", "BatchOperations") - -try: - from sqlalchemy.sql.naming import conv -except: - conv = None - - -class Operations(util.ModuleClsProxy): - - """Define high level migration operations. - - Each operation corresponds to some schema migration operation, - executed against a particular :class:`.MigrationContext` - which in turn represents connectivity to a database, - or a file output stream. - - While :class:`.Operations` is normally configured as - part of the :meth:`.EnvironmentContext.run_migrations` - method called from an ``env.py`` script, a standalone - :class:`.Operations` instance can be - made for use cases external to regular Alembic - migrations by passing in a :class:`.MigrationContext`:: - - from alembic.migration import MigrationContext - from alembic.operations import Operations - - conn = myengine.connect() - ctx = MigrationContext.configure(conn) - op = Operations(ctx) - - op.alter_column("t", "c", nullable=True) - - Note that as of 0.8, most of the methods on this class are produced - dynamically using the :meth:`.Operations.register_operation` - method. - - """ - - _to_impl = util.Dispatcher() - - def __init__(self, migration_context, impl=None): - """Construct a new :class:`.Operations` - - :param migration_context: a :class:`.MigrationContext` - instance. - - """ - self.migration_context = migration_context - if impl is None: - self.impl = migration_context.impl - else: - self.impl = impl - - self.schema_obj = schemaobj.SchemaObjects(migration_context) - - @classmethod - def register_operation(cls, name, sourcename=None): - """Register a new operation for this class. - - This method is normally used to add new operations - to the :class:`.Operations` class, and possibly the - :class:`.BatchOperations` class as well. All Alembic migration - operations are implemented via this system, however the system - is also available as a public API to facilitate adding custom - operations. - - .. versionadded:: 0.8.0 - - .. seealso:: - - :ref:`operation_plugins` - - - """ - - def register(op_cls): - if sourcename is None: - fn = getattr(op_cls, name) - source_name = fn.__name__ - else: - fn = getattr(op_cls, sourcename) - source_name = fn.__name__ - - spec = inspect_getargspec(fn) - - name_args = spec[0] - assert name_args[0:2] == ["cls", "operations"] - - name_args[0:2] = ["self"] - - args = inspect_formatargspec(*spec) - num_defaults = len(spec[3]) if spec[3] else 0 - if num_defaults: - defaulted_vals = name_args[0 - num_defaults :] - else: - defaulted_vals = () - - apply_kw = inspect_formatargspec( - name_args, - spec[1], - spec[2], - defaulted_vals, - formatvalue=lambda x: "=" + x, - ) - - func_text = textwrap.dedent( - """\ - def %(name)s%(args)s: - %(doc)r - return op_cls.%(source_name)s%(apply_kw)s - """ - % { - "name": name, - "source_name": source_name, - "args": args, - "apply_kw": apply_kw, - "doc": fn.__doc__, - "meth": fn.__name__, - } - ) - globals_ = {"op_cls": op_cls} - lcl = {} - exec_(func_text, globals_, lcl) - setattr(cls, name, lcl[name]) - fn.__func__.__doc__ = ( - "This method is proxied on " - "the :class:`.%s` class, via the :meth:`.%s.%s` method." - % (cls.__name__, cls.__name__, name) - ) - if hasattr(fn, "_legacy_translations"): - lcl[name]._legacy_translations = fn._legacy_translations - return op_cls - - return register - - @classmethod - def implementation_for(cls, op_cls): - """Register an implementation for a given :class:`.MigrateOperation`. - - This is part of the operation extensibility API. - - .. seealso:: - - :ref:`operation_plugins` - example of use - - """ - - def decorate(fn): - cls._to_impl.dispatch_for(op_cls)(fn) - return fn - - return decorate - - @classmethod - @contextmanager - def context(cls, migration_context): - op = Operations(migration_context) - op._install_proxy() - yield op - op._remove_proxy() - - @contextmanager - def batch_alter_table( - self, - table_name, - schema=None, - recreate="auto", - copy_from=None, - table_args=(), - table_kwargs=util.immutabledict(), - reflect_args=(), - reflect_kwargs=util.immutabledict(), - naming_convention=None, - ): - """Invoke a series of per-table migrations in batch. - - Batch mode allows a series of operations specific to a table - to be syntactically grouped together, and allows for alternate - modes of table migration, in particular the "recreate" style of - migration required by SQLite. - - "recreate" style is as follows: - - 1. A new table is created with the new specification, based on the - migration directives within the batch, using a temporary name. - - 2. the data copied from the existing table to the new table. - - 3. the existing table is dropped. - - 4. the new table is renamed to the existing table name. - - The directive by default will only use "recreate" style on the - SQLite backend, and only if directives are present which require - this form, e.g. anything other than ``add_column()``. The batch - operation on other backends will proceed using standard ALTER TABLE - operations. - - The method is used as a context manager, which returns an instance - of :class:`.BatchOperations`; this object is the same as - :class:`.Operations` except that table names and schema names - are omitted. E.g.:: - - with op.batch_alter_table("some_table") as batch_op: - batch_op.add_column(Column('foo', Integer)) - batch_op.drop_column('bar') - - The operations within the context manager are invoked at once - when the context is ended. When run against SQLite, if the - migrations include operations not supported by SQLite's ALTER TABLE, - the entire table will be copied to a new one with the new - specification, moving all data across as well. - - The copy operation by default uses reflection to retrieve the current - structure of the table, and therefore :meth:`.batch_alter_table` - in this mode requires that the migration is run in "online" mode. - The ``copy_from`` parameter may be passed which refers to an existing - :class:`.Table` object, which will bypass this reflection step. - - .. note:: The table copy operation will currently not copy - CHECK constraints, and may not copy UNIQUE constraints that are - unnamed, as is possible on SQLite. See the section - :ref:`sqlite_batch_constraints` for workarounds. - - :param table_name: name of table - :param schema: optional schema name. - :param recreate: under what circumstances the table should be - recreated. At its default of ``"auto"``, the SQLite dialect will - recreate the table if any operations other than ``add_column()``, - ``create_index()``, or ``drop_index()`` are - present. Other options include ``"always"`` and ``"never"``. - :param copy_from: optional :class:`~sqlalchemy.schema.Table` object - that will act as the structure of the table being copied. If omitted, - table reflection is used to retrieve the structure of the table. - - .. versionadded:: 0.7.6 Fully implemented the - :paramref:`~.Operations.batch_alter_table.copy_from` - parameter. - - .. seealso:: - - :ref:`batch_offline_mode` - - :paramref:`~.Operations.batch_alter_table.reflect_args` - - :paramref:`~.Operations.batch_alter_table.reflect_kwargs` - - :param reflect_args: a sequence of additional positional arguments that - will be applied to the table structure being reflected / copied; - this may be used to pass column and constraint overrides to the - table that will be reflected, in lieu of passing the whole - :class:`~sqlalchemy.schema.Table` using - :paramref:`~.Operations.batch_alter_table.copy_from`. - - .. versionadded:: 0.7.1 - - :param reflect_kwargs: a dictionary of additional keyword arguments - that will be applied to the table structure being copied; this may be - used to pass additional table and reflection options to the table that - will be reflected, in lieu of passing the whole - :class:`~sqlalchemy.schema.Table` using - :paramref:`~.Operations.batch_alter_table.copy_from`. - - .. versionadded:: 0.7.1 - - :param table_args: a sequence of additional positional arguments that - will be applied to the new :class:`~sqlalchemy.schema.Table` when - created, in addition to those copied from the source table. - This may be used to provide additional constraints such as CHECK - constraints that may not be reflected. - :param table_kwargs: a dictionary of additional keyword arguments - that will be applied to the new :class:`~sqlalchemy.schema.Table` - when created, in addition to those copied from the source table. - This may be used to provide for additional table options that may - not be reflected. - - .. versionadded:: 0.7.0 - - :param naming_convention: a naming convention dictionary of the form - described at :ref:`autogen_naming_conventions` which will be applied - to the :class:`~sqlalchemy.schema.MetaData` during the reflection - process. This is typically required if one wants to drop SQLite - constraints, as these constraints will not have names when - reflected on this backend. Requires SQLAlchemy **0.9.4** or greater. - - .. seealso:: - - :ref:`dropping_sqlite_foreign_keys` - - .. versionadded:: 0.7.1 - - .. note:: batch mode requires SQLAlchemy 0.8 or above. - - .. seealso:: - - :ref:`batch_migrations` - - """ - impl = batch.BatchOperationsImpl( - self, - table_name, - schema, - recreate, - copy_from, - table_args, - table_kwargs, - reflect_args, - reflect_kwargs, - naming_convention, - ) - batch_op = BatchOperations(self.migration_context, impl=impl) - yield batch_op - impl.flush() - - def get_context(self): - """Return the :class:`.MigrationContext` object that's - currently in use. - - """ - - return self.migration_context - - def invoke(self, operation): - """Given a :class:`.MigrateOperation`, invoke it in terms of - this :class:`.Operations` instance. - - .. versionadded:: 0.8.0 - - """ - fn = self._to_impl.dispatch( - operation, self.migration_context.impl.__dialect__ - ) - return fn(self, operation) - - def f(self, name): - """Indicate a string name that has already had a naming convention - applied to it. - - This feature combines with the SQLAlchemy ``naming_convention`` feature - to disambiguate constraint names that have already had naming - conventions applied to them, versus those that have not. This is - necessary in the case that the ``"%(constraint_name)s"`` token - is used within a naming convention, so that it can be identified - that this particular name should remain fixed. - - If the :meth:`.Operations.f` is used on a constraint, the naming - convention will not take effect:: - - op.add_column('t', 'x', Boolean(name=op.f('ck_bool_t_x'))) - - Above, the CHECK constraint generated will have the name - ``ck_bool_t_x`` regardless of whether or not a naming convention is - in use. - - Alternatively, if a naming convention is in use, and 'f' is not used, - names will be converted along conventions. If the ``target_metadata`` - contains the naming convention - ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the - output of the following: - - op.add_column('t', 'x', Boolean(name='x')) - - will be:: - - CONSTRAINT ck_bool_t_x CHECK (x in (1, 0))) - - The function is rendered in the output of autogenerate when - a particular constraint name is already converted, for SQLAlchemy - version **0.9.4 and greater only**. Even though ``naming_convention`` - was introduced in 0.9.2, the string disambiguation service is new - as of 0.9.4. - - .. versionadded:: 0.6.4 - - """ - if conv: - return conv(name) - else: - raise NotImplementedError( - "op.f() feature requires SQLAlchemy 0.9.4 or greater." - ) - - def inline_literal(self, value, type_=None): - """Produce an 'inline literal' expression, suitable for - using in an INSERT, UPDATE, or DELETE statement. - - When using Alembic in "offline" mode, CRUD operations - aren't compatible with SQLAlchemy's default behavior surrounding - literal values, - which is that they are converted into bound values and passed - separately into the ``execute()`` method of the DBAPI cursor. - An offline SQL - script needs to have these rendered inline. While it should - always be noted that inline literal values are an **enormous** - security hole in an application that handles untrusted input, - a schema migration is not run in this context, so - literals are safe to render inline, with the caveat that - advanced types like dates may not be supported directly - by SQLAlchemy. - - See :meth:`.execute` for an example usage of - :meth:`.inline_literal`. - - The environment can also be configured to attempt to render - "literal" values inline automatically, for those simple types - that are supported by the dialect; see - :paramref:`.EnvironmentContext.configure.literal_binds` for this - more recently added feature. - - :param value: The value to render. Strings, integers, and simple - numerics should be supported. Other types like boolean, - dates, etc. may or may not be supported yet by various - backends. - :param type_: optional - a :class:`sqlalchemy.types.TypeEngine` - subclass stating the type of this value. In SQLAlchemy - expressions, this is usually derived automatically - from the Python type of the value itself, as well as - based on the context in which the value is used. - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.literal_binds` - - """ - return sqla_compat._literal_bindparam(None, value, type_=type_) - - def get_bind(self): - """Return the current 'bind'. - - Under normal circumstances, this is the - :class:`~sqlalchemy.engine.Connection` currently being used - to emit SQL to the database. - - In a SQL script context, this value is ``None``. [TODO: verify this] - - """ - return self.migration_context.impl.bind - - -class BatchOperations(Operations): - """Modifies the interface :class:`.Operations` for batch mode. - - This basically omits the ``table_name`` and ``schema`` parameters - from associated methods, as these are a given when running under batch - mode. - - .. seealso:: - - :meth:`.Operations.batch_alter_table` - - Note that as of 0.8, most of the methods on this class are produced - dynamically using the :meth:`.Operations.register_operation` - method. - - """ - - def _noop(self, operation): - raise NotImplementedError( - "The %s method does not apply to a batch table alter operation." - % operation - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/batch.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/batch.py deleted file mode 100644 index 9e829b3..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/batch.py +++ /dev/null @@ -1,419 +0,0 @@ -from sqlalchemy import cast -from sqlalchemy import CheckConstraint -from sqlalchemy import Column -from sqlalchemy import ForeignKeyConstraint -from sqlalchemy import Index -from sqlalchemy import MetaData -from sqlalchemy import PrimaryKeyConstraint -from sqlalchemy import schema as sql_schema -from sqlalchemy import select -from sqlalchemy import Table -from sqlalchemy import types as sqltypes -from sqlalchemy.events import SchemaEventTarget -from sqlalchemy.util import OrderedDict - -from ..util.sqla_compat import _columns_for_constraint -from ..util.sqla_compat import _fk_is_self_referential -from ..util.sqla_compat import _is_type_bound -from ..util.sqla_compat import _remove_column_from_collection - - -class BatchOperationsImpl(object): - def __init__( - self, - operations, - table_name, - schema, - recreate, - copy_from, - table_args, - table_kwargs, - reflect_args, - reflect_kwargs, - naming_convention, - ): - self.operations = operations - self.table_name = table_name - self.schema = schema - if recreate not in ("auto", "always", "never"): - raise ValueError( - "recreate may be one of 'auto', 'always', or 'never'." - ) - self.recreate = recreate - self.copy_from = copy_from - self.table_args = table_args - self.table_kwargs = dict(table_kwargs) - self.reflect_args = reflect_args - self.reflect_kwargs = reflect_kwargs - self.naming_convention = naming_convention - self.batch = [] - - @property - def dialect(self): - return self.operations.impl.dialect - - @property - def impl(self): - return self.operations.impl - - def _should_recreate(self): - if self.recreate == "auto": - return self.operations.impl.requires_recreate_in_batch(self) - elif self.recreate == "always": - return True - else: - return False - - def flush(self): - should_recreate = self._should_recreate() - - if not should_recreate: - for opname, arg, kw in self.batch: - fn = getattr(self.operations.impl, opname) - fn(*arg, **kw) - else: - if self.naming_convention: - m1 = MetaData(naming_convention=self.naming_convention) - else: - m1 = MetaData() - - if self.copy_from is not None: - existing_table = self.copy_from - reflected = False - else: - existing_table = Table( - self.table_name, - m1, - schema=self.schema, - autoload=True, - autoload_with=self.operations.get_bind(), - *self.reflect_args, - **self.reflect_kwargs - ) - reflected = True - - batch_impl = ApplyBatchImpl( - existing_table, self.table_args, self.table_kwargs, reflected - ) - for opname, arg, kw in self.batch: - fn = getattr(batch_impl, opname) - fn(*arg, **kw) - - batch_impl._create(self.impl) - - def alter_column(self, *arg, **kw): - self.batch.append(("alter_column", arg, kw)) - - def add_column(self, *arg, **kw): - self.batch.append(("add_column", arg, kw)) - - def drop_column(self, *arg, **kw): - self.batch.append(("drop_column", arg, kw)) - - def add_constraint(self, const): - self.batch.append(("add_constraint", (const,), {})) - - def drop_constraint(self, const): - self.batch.append(("drop_constraint", (const,), {})) - - def rename_table(self, *arg, **kw): - self.batch.append(("rename_table", arg, kw)) - - def create_index(self, idx): - self.batch.append(("create_index", (idx,), {})) - - def drop_index(self, idx): - self.batch.append(("drop_index", (idx,), {})) - - def create_table(self, table): - raise NotImplementedError("Can't create table in batch mode") - - def drop_table(self, table): - raise NotImplementedError("Can't drop table in batch mode") - - -class ApplyBatchImpl(object): - def __init__(self, table, table_args, table_kwargs, reflected): - self.table = table # this is a Table object - self.table_args = table_args - self.table_kwargs = table_kwargs - self.temp_table_name = self._calc_temp_name(table.name) - self.new_table = None - self.column_transfers = OrderedDict( - (c.name, {"expr": c}) for c in self.table.c - ) - self.reflected = reflected - self._grab_table_elements() - - @classmethod - def _calc_temp_name(cls, tablename): - return ("_alembic_tmp_%s" % tablename)[0:50] - - def _grab_table_elements(self): - schema = self.table.schema - self.columns = OrderedDict() - for c in self.table.c: - c_copy = c.copy(schema=schema) - c_copy.unique = c_copy.index = False - # ensure that the type object was copied, - # as we may need to modify it in-place - if isinstance(c.type, SchemaEventTarget): - assert c_copy.type is not c.type - self.columns[c.name] = c_copy - self.named_constraints = {} - self.unnamed_constraints = [] - self.indexes = {} - self.new_indexes = {} - for const in self.table.constraints: - if _is_type_bound(const): - continue - elif self.reflected and isinstance(const, CheckConstraint): - # TODO: we are skipping reflected CheckConstraint because - # we have no way to determine _is_type_bound() for these. - pass - elif const.name: - self.named_constraints[const.name] = const - else: - self.unnamed_constraints.append(const) - - for idx in self.table.indexes: - self.indexes[idx.name] = idx - - for k in self.table.kwargs: - self.table_kwargs.setdefault(k, self.table.kwargs[k]) - - def _transfer_elements_to_new_table(self): - assert self.new_table is None, "Can only create new table once" - - m = MetaData() - schema = self.table.schema - - self.new_table = new_table = Table( - self.temp_table_name, - m, - *(list(self.columns.values()) + list(self.table_args)), - schema=schema, - **self.table_kwargs - ) - - for const in ( - list(self.named_constraints.values()) + self.unnamed_constraints - ): - - const_columns = set( - [c.key for c in _columns_for_constraint(const)] - ) - - if not const_columns.issubset(self.column_transfers): - continue - - if isinstance(const, ForeignKeyConstraint): - if _fk_is_self_referential(const): - # for self-referential constraint, refer to the - # *original* table name, and not _alembic_batch_temp. - # This is consistent with how we're handling - # FK constraints from other tables; we assume SQLite - # no foreign keys just keeps the names unchanged, so - # when we rename back, they match again. - const_copy = const.copy( - schema=schema, target_table=self.table - ) - else: - # "target_table" for ForeignKeyConstraint.copy() is - # only used if the FK is detected as being - # self-referential, which we are handling above. - const_copy = const.copy(schema=schema) - else: - const_copy = const.copy(schema=schema, target_table=new_table) - if isinstance(const, ForeignKeyConstraint): - self._setup_referent(m, const) - new_table.append_constraint(const_copy) - - def _gather_indexes_from_both_tables(self): - idx = [] - idx.extend(self.indexes.values()) - for index in self.new_indexes.values(): - idx.append( - Index( - index.name, - unique=index.unique, - *[self.new_table.c[col] for col in index.columns.keys()], - **index.kwargs - ) - ) - return idx - - def _setup_referent(self, metadata, constraint): - spec = constraint.elements[0]._get_colspec() - parts = spec.split(".") - tname = parts[-2] - if len(parts) == 3: - referent_schema = parts[0] - else: - referent_schema = None - - if tname != self.temp_table_name: - key = sql_schema._get_table_key(tname, referent_schema) - if key in metadata.tables: - t = metadata.tables[key] - for elem in constraint.elements: - colname = elem._get_colspec().split(".")[-1] - if not t.c.contains_column(colname): - t.append_column(Column(colname, sqltypes.NULLTYPE)) - else: - Table( - tname, - metadata, - *[ - Column(n, sqltypes.NULLTYPE) - for n in [ - elem._get_colspec().split(".")[-1] - for elem in constraint.elements - ] - ], - schema=referent_schema - ) - - def _create(self, op_impl): - self._transfer_elements_to_new_table() - - op_impl.prep_table_for_batch(self.table) - op_impl.create_table(self.new_table) - - try: - op_impl._exec( - self.new_table.insert(inline=True).from_select( - list( - k - for k, transfer in self.column_transfers.items() - if "expr" in transfer - ), - select( - [ - transfer["expr"] - for transfer in self.column_transfers.values() - if "expr" in transfer - ] - ), - ) - ) - op_impl.drop_table(self.table) - except: - op_impl.drop_table(self.new_table) - raise - else: - op_impl.rename_table( - self.temp_table_name, self.table.name, schema=self.table.schema - ) - self.new_table.name = self.table.name - try: - for idx in self._gather_indexes_from_both_tables(): - op_impl.create_index(idx) - finally: - self.new_table.name = self.temp_table_name - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - autoincrement=None, - **kw - ): - existing = self.columns[column_name] - existing_transfer = self.column_transfers[column_name] - if name is not None and name != column_name: - # note that we don't change '.key' - we keep referring - # to the renamed column by its old key in _create(). neat! - existing.name = name - existing_transfer["name"] = name - - if type_ is not None: - type_ = sqltypes.to_instance(type_) - # old type is being discarded so turn off eventing - # rules. Alternatively we can - # erase the events set up by this type, but this is simpler. - # we also ignore the drop_constraint that will come here from - # Operations.implementation_for(alter_column) - if isinstance(existing.type, SchemaEventTarget): - existing.type._create_events = ( - existing.type.create_constraint - ) = False - - if existing.type._type_affinity is not type_._type_affinity: - existing_transfer["expr"] = cast( - existing_transfer["expr"], type_ - ) - - existing.type = type_ - - # we *dont* however set events for the new type, because - # alter_column is invoked from - # Operations.implementation_for(alter_column) which already - # will emit an add_constraint() - - if nullable is not None: - existing.nullable = nullable - if server_default is not False: - if server_default is None: - existing.server_default = None - else: - sql_schema.DefaultClause(server_default)._set_parent(existing) - if autoincrement is not None: - existing.autoincrement = bool(autoincrement) - - def add_column(self, table_name, column, **kw): - # we copy the column because operations.add_column() - # gives us a Column that is part of a Table already. - self.columns[column.name] = column.copy(schema=self.table.schema) - self.column_transfers[column.name] = {} - - def drop_column(self, table_name, column, **kw): - if column.name in self.table.primary_key.columns: - _remove_column_from_collection( - self.table.primary_key.columns, column - ) - del self.columns[column.name] - del self.column_transfers[column.name] - - def add_constraint(self, const): - if not const.name: - raise ValueError("Constraint must have a name") - if isinstance(const, sql_schema.PrimaryKeyConstraint): - if self.table.primary_key in self.unnamed_constraints: - self.unnamed_constraints.remove(self.table.primary_key) - - self.named_constraints[const.name] = const - - def drop_constraint(self, const): - if not const.name: - raise ValueError("Constraint must have a name") - try: - const = self.named_constraints.pop(const.name) - except KeyError: - if _is_type_bound(const): - # type-bound constraints are only included in the new - # table via their type object in any case, so ignore the - # drop_constraint() that comes here via the - # Operations.implementation_for(alter_column) - return - raise ValueError("No such constraint: '%s'" % const.name) - else: - if isinstance(const, PrimaryKeyConstraint): - for col in const.columns: - self.columns[col.name].primary_key = False - - def create_index(self, idx): - self.new_indexes[idx.name] = idx - - def drop_index(self, idx): - try: - del self.indexes[idx.name] - except KeyError: - raise ValueError("No such index: '%s'" % idx.name) - - def rename_table(self, *arg, **kw): - raise NotImplementedError("TODO") diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/ops.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/ops.py deleted file mode 100644 index 90f35cd..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/ops.py +++ /dev/null @@ -1,2435 +0,0 @@ -import re - -from sqlalchemy.types import NULLTYPE - -from . import schemaobj -from .base import BatchOperations -from .base import Operations -from .. import util -from ..util import sqla_compat - - -class MigrateOperation(object): - """base class for migration command and organization objects. - - This system is part of the operation extensibility API. - - .. versionadded:: 0.8.0 - - .. seealso:: - - :ref:`operation_objects` - - :ref:`operation_plugins` - - :ref:`customizing_revision` - - """ - - @util.memoized_property - def info(self): - """A dictionary that may be used to store arbitrary information - along with this :class:`.MigrateOperation` object. - - """ - return {} - - -class AddConstraintOp(MigrateOperation): - """Represent an add constraint operation.""" - - add_constraint_ops = util.Dispatcher() - - @property - def constraint_type(self): - raise NotImplementedError() - - @classmethod - def register_add_constraint(cls, type_): - def go(klass): - cls.add_constraint_ops.dispatch_for(type_)(klass.from_constraint) - return klass - - return go - - @classmethod - def from_constraint(cls, constraint): - return cls.add_constraint_ops.dispatch(constraint.__visit_name__)( - constraint - ) - - def reverse(self): - return DropConstraintOp.from_constraint(self.to_constraint()) - - def to_diff_tuple(self): - return ("add_constraint", self.to_constraint()) - - -@Operations.register_operation("drop_constraint") -@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint") -class DropConstraintOp(MigrateOperation): - """Represent a drop constraint operation.""" - - def __init__( - self, - constraint_name, - table_name, - type_=None, - schema=None, - _orig_constraint=None, - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.constraint_type = type_ - self.schema = schema - self._orig_constraint = _orig_constraint - - def reverse(self): - if self._orig_constraint is None: - raise ValueError( - "operation is not reversible; " - "original constraint is not present" - ) - return AddConstraintOp.from_constraint(self._orig_constraint) - - def to_diff_tuple(self): - if self.constraint_type == "foreignkey": - return ("remove_fk", self.to_constraint()) - else: - return ("remove_constraint", self.to_constraint()) - - @classmethod - def from_constraint(cls, constraint): - types = { - "unique_constraint": "unique", - "foreign_key_constraint": "foreignkey", - "primary_key_constraint": "primary", - "check_constraint": "check", - "column_check_constraint": "check", - } - - constraint_table = sqla_compat._table_for_constraint(constraint) - return cls( - constraint.name, - constraint_table.name, - schema=constraint_table.schema, - type_=types[constraint.__visit_name__], - _orig_constraint=constraint, - ) - - def to_constraint(self): - if self._orig_constraint is not None: - return self._orig_constraint - else: - raise ValueError( - "constraint cannot be produced; " - "original constraint is not present" - ) - - @classmethod - @util._with_legacy_names([("type", "type_"), ("name", "constraint_name")]) - def drop_constraint( - cls, operations, constraint_name, table_name, type_=None, schema=None - ): - """Drop a constraint of the given name, typically via DROP CONSTRAINT. - - :param constraint_name: name of the constraint. - :param table_name: table name. - :param type_: optional, required on MySQL. can be - 'foreignkey', 'primary', 'unique', or 'check'. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - - """ - - op = cls(constraint_name, table_name, type_=type_, schema=schema) - return operations.invoke(op) - - @classmethod - def batch_drop_constraint(cls, operations, constraint_name, type_=None): - """Issue a "drop constraint" instruction using the - current batch migration context. - - The batch form of this call omits the ``table_name`` and ``schema`` - arguments from the call. - - .. seealso:: - - :meth:`.Operations.drop_constraint` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - - """ - op = cls( - constraint_name, - operations.impl.table_name, - type_=type_, - schema=operations.impl.schema, - ) - return operations.invoke(op) - - -@Operations.register_operation("create_primary_key") -@BatchOperations.register_operation( - "create_primary_key", "batch_create_primary_key" -) -@AddConstraintOp.register_add_constraint("primary_key_constraint") -class CreatePrimaryKeyOp(AddConstraintOp): - """Represent a create primary key operation.""" - - constraint_type = "primarykey" - - def __init__( - self, - constraint_name, - table_name, - columns, - schema=None, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.columns = columns - self.schema = schema - self._orig_constraint = _orig_constraint - self.kw = kw - - @classmethod - def from_constraint(cls, constraint): - constraint_table = sqla_compat._table_for_constraint(constraint) - - return cls( - constraint.name, - constraint_table.name, - constraint.columns, - schema=constraint_table.schema, - _orig_constraint=constraint, - ) - - def to_constraint(self, migration_context=None): - if self._orig_constraint is not None: - return self._orig_constraint - - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.primary_key_constraint( - self.constraint_name, - self.table_name, - self.columns, - schema=self.schema, - ) - - @classmethod - @util._with_legacy_names( - [("name", "constraint_name"), ("cols", "columns")] - ) - def create_primary_key( - cls, operations, constraint_name, table_name, columns, schema=None - ): - """Issue a "create primary key" instruction using the current - migration context. - - e.g.:: - - from alembic import op - op.create_primary_key( - "pk_my_table", "my_table", - ["id", "version"] - ) - - This internally generates a :class:`~sqlalchemy.schema.Table` object - containing the necessary columns, then generates a new - :class:`~sqlalchemy.schema.PrimaryKeyConstraint` - object which it then associates with the - :class:`~sqlalchemy.schema.Table`. - Any event listeners associated with this action will be fired - off normally. The :class:`~sqlalchemy.schema.AddConstraint` - construct is ultimately used to generate the ALTER statement. - - :param name: Name of the primary key constraint. The name is necessary - so that an ALTER statement can be emitted. For setups that - use an automated naming scheme such as that described at - :ref:`sqla:constraint_naming_conventions` - ``name`` here can be ``None``, as the event listener will - apply the name to the constraint object when it is associated - with the table. - :param table_name: String name of the target table. - :param columns: a list of string column names to be applied to the - primary key constraint. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * cols -> columns - - """ - op = cls(constraint_name, table_name, columns, schema) - return operations.invoke(op) - - @classmethod - def batch_create_primary_key(cls, operations, constraint_name, columns): - """Issue a "create primary key" instruction using the - current batch migration context. - - The batch form of this call omits the ``table_name`` and ``schema`` - arguments from the call. - - .. seealso:: - - :meth:`.Operations.create_primary_key` - - """ - op = cls( - constraint_name, - operations.impl.table_name, - columns, - schema=operations.impl.schema, - ) - return operations.invoke(op) - - -@Operations.register_operation("create_unique_constraint") -@BatchOperations.register_operation( - "create_unique_constraint", "batch_create_unique_constraint" -) -@AddConstraintOp.register_add_constraint("unique_constraint") -class CreateUniqueConstraintOp(AddConstraintOp): - """Represent a create unique constraint operation.""" - - constraint_type = "unique" - - def __init__( - self, - constraint_name, - table_name, - columns, - schema=None, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.columns = columns - self.schema = schema - self._orig_constraint = _orig_constraint - self.kw = kw - - @classmethod - def from_constraint(cls, constraint): - constraint_table = sqla_compat._table_for_constraint(constraint) - - kw = {} - if constraint.deferrable: - kw["deferrable"] = constraint.deferrable - if constraint.initially: - kw["initially"] = constraint.initially - - return cls( - constraint.name, - constraint_table.name, - [c.name for c in constraint.columns], - schema=constraint_table.schema, - _orig_constraint=constraint, - **kw - ) - - def to_constraint(self, migration_context=None): - if self._orig_constraint is not None: - return self._orig_constraint - - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.unique_constraint( - self.constraint_name, - self.table_name, - self.columns, - schema=self.schema, - **self.kw - ) - - @classmethod - @util._with_legacy_names( - [ - ("name", "constraint_name"), - ("source", "table_name"), - ("local_cols", "columns"), - ] - ) - def create_unique_constraint( - cls, - operations, - constraint_name, - table_name, - columns, - schema=None, - **kw - ): - """Issue a "create unique constraint" instruction using the - current migration context. - - e.g.:: - - from alembic import op - op.create_unique_constraint("uq_user_name", "user", ["name"]) - - This internally generates a :class:`~sqlalchemy.schema.Table` object - containing the necessary columns, then generates a new - :class:`~sqlalchemy.schema.UniqueConstraint` - object which it then associates with the - :class:`~sqlalchemy.schema.Table`. - Any event listeners associated with this action will be fired - off normally. The :class:`~sqlalchemy.schema.AddConstraint` - construct is ultimately used to generate the ALTER statement. - - :param name: Name of the unique constraint. The name is necessary - so that an ALTER statement can be emitted. For setups that - use an automated naming scheme such as that described at - :ref:`sqla:constraint_naming_conventions`, - ``name`` here can be ``None``, as the event listener will - apply the name to the constraint object when it is associated - with the table. - :param table_name: String name of the source table. - :param columns: a list of string column names in the - source table. - :param deferrable: optional bool. If set, emit DEFERRABLE or - NOT DEFERRABLE when issuing DDL for this constraint. - :param initially: optional string. If set, emit INITIALLY - when issuing DDL for this constraint. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * source -> table_name - * local_cols -> columns - - """ - - op = cls(constraint_name, table_name, columns, schema=schema, **kw) - return operations.invoke(op) - - @classmethod - @util._with_legacy_names([("name", "constraint_name")]) - def batch_create_unique_constraint( - cls, operations, constraint_name, columns, **kw - ): - """Issue a "create unique constraint" instruction using the - current batch migration context. - - The batch form of this call omits the ``source`` and ``schema`` - arguments from the call. - - .. seealso:: - - :meth:`.Operations.create_unique_constraint` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - - """ - kw["schema"] = operations.impl.schema - op = cls(constraint_name, operations.impl.table_name, columns, **kw) - return operations.invoke(op) - - -@Operations.register_operation("create_foreign_key") -@BatchOperations.register_operation( - "create_foreign_key", "batch_create_foreign_key" -) -@AddConstraintOp.register_add_constraint("foreign_key_constraint") -class CreateForeignKeyOp(AddConstraintOp): - """Represent a create foreign key constraint operation.""" - - constraint_type = "foreignkey" - - def __init__( - self, - constraint_name, - source_table, - referent_table, - local_cols, - remote_cols, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.source_table = source_table - self.referent_table = referent_table - self.local_cols = local_cols - self.remote_cols = remote_cols - self._orig_constraint = _orig_constraint - self.kw = kw - - def to_diff_tuple(self): - return ("add_fk", self.to_constraint()) - - @classmethod - def from_constraint(cls, constraint): - kw = {} - if constraint.onupdate: - kw["onupdate"] = constraint.onupdate - if constraint.ondelete: - kw["ondelete"] = constraint.ondelete - if constraint.initially: - kw["initially"] = constraint.initially - if constraint.deferrable: - kw["deferrable"] = constraint.deferrable - if constraint.use_alter: - kw["use_alter"] = constraint.use_alter - - ( - source_schema, - source_table, - source_columns, - target_schema, - target_table, - target_columns, - onupdate, - ondelete, - deferrable, - initially, - ) = sqla_compat._fk_spec(constraint) - - kw["source_schema"] = source_schema - kw["referent_schema"] = target_schema - - return cls( - constraint.name, - source_table, - target_table, - source_columns, - target_columns, - _orig_constraint=constraint, - **kw - ) - - def to_constraint(self, migration_context=None): - if self._orig_constraint is not None: - return self._orig_constraint - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.foreign_key_constraint( - self.constraint_name, - self.source_table, - self.referent_table, - self.local_cols, - self.remote_cols, - **self.kw - ) - - @classmethod - @util._with_legacy_names( - [ - ("name", "constraint_name"), - ("source", "source_table"), - ("referent", "referent_table"), - ] - ) - def create_foreign_key( - cls, - operations, - constraint_name, - source_table, - referent_table, - local_cols, - remote_cols, - onupdate=None, - ondelete=None, - deferrable=None, - initially=None, - match=None, - source_schema=None, - referent_schema=None, - **dialect_kw - ): - """Issue a "create foreign key" instruction using the - current migration context. - - e.g.:: - - from alembic import op - op.create_foreign_key( - "fk_user_address", "address", - "user", ["user_id"], ["id"]) - - This internally generates a :class:`~sqlalchemy.schema.Table` object - containing the necessary columns, then generates a new - :class:`~sqlalchemy.schema.ForeignKeyConstraint` - object which it then associates with the - :class:`~sqlalchemy.schema.Table`. - Any event listeners associated with this action will be fired - off normally. The :class:`~sqlalchemy.schema.AddConstraint` - construct is ultimately used to generate the ALTER statement. - - :param name: Name of the foreign key constraint. The name is necessary - so that an ALTER statement can be emitted. For setups that - use an automated naming scheme such as that described at - :ref:`sqla:constraint_naming_conventions`, - ``name`` here can be ``None``, as the event listener will - apply the name to the constraint object when it is associated - with the table. - :param source_table: String name of the source table. - :param referent_table: String name of the destination table. - :param local_cols: a list of string column names in the - source table. - :param remote_cols: a list of string column names in the - remote table. - :param onupdate: Optional string. If set, emit ON UPDATE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - :param ondelete: Optional string. If set, emit ON DELETE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - :param deferrable: optional bool. If set, emit DEFERRABLE or NOT - DEFERRABLE when issuing DDL for this constraint. - :param source_schema: Optional schema name of the source table. - :param referent_schema: Optional schema name of the destination table. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * source -> source_table - * referent -> referent_table - - """ - - op = cls( - constraint_name, - source_table, - referent_table, - local_cols, - remote_cols, - onupdate=onupdate, - ondelete=ondelete, - deferrable=deferrable, - source_schema=source_schema, - referent_schema=referent_schema, - initially=initially, - match=match, - **dialect_kw - ) - return operations.invoke(op) - - @classmethod - @util._with_legacy_names( - [("name", "constraint_name"), ("referent", "referent_table")] - ) - def batch_create_foreign_key( - cls, - operations, - constraint_name, - referent_table, - local_cols, - remote_cols, - referent_schema=None, - onupdate=None, - ondelete=None, - deferrable=None, - initially=None, - match=None, - **dialect_kw - ): - """Issue a "create foreign key" instruction using the - current batch migration context. - - The batch form of this call omits the ``source`` and ``source_schema`` - arguments from the call. - - e.g.:: - - with batch_alter_table("address") as batch_op: - batch_op.create_foreign_key( - "fk_user_address", - "user", ["user_id"], ["id"]) - - .. seealso:: - - :meth:`.Operations.create_foreign_key` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * referent -> referent_table - - """ - op = cls( - constraint_name, - operations.impl.table_name, - referent_table, - local_cols, - remote_cols, - onupdate=onupdate, - ondelete=ondelete, - deferrable=deferrable, - source_schema=operations.impl.schema, - referent_schema=referent_schema, - initially=initially, - match=match, - **dialect_kw - ) - return operations.invoke(op) - - -@Operations.register_operation("create_check_constraint") -@BatchOperations.register_operation( - "create_check_constraint", "batch_create_check_constraint" -) -@AddConstraintOp.register_add_constraint("check_constraint") -@AddConstraintOp.register_add_constraint("column_check_constraint") -class CreateCheckConstraintOp(AddConstraintOp): - """Represent a create check constraint operation.""" - - constraint_type = "check" - - def __init__( - self, - constraint_name, - table_name, - condition, - schema=None, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.condition = condition - self.schema = schema - self._orig_constraint = _orig_constraint - self.kw = kw - - @classmethod - def from_constraint(cls, constraint): - constraint_table = sqla_compat._table_for_constraint(constraint) - - return cls( - constraint.name, - constraint_table.name, - constraint.sqltext, - schema=constraint_table.schema, - _orig_constraint=constraint, - ) - - def to_constraint(self, migration_context=None): - if self._orig_constraint is not None: - return self._orig_constraint - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.check_constraint( - self.constraint_name, - self.table_name, - self.condition, - schema=self.schema, - **self.kw - ) - - @classmethod - @util._with_legacy_names( - [("name", "constraint_name"), ("source", "table_name")] - ) - def create_check_constraint( - cls, - operations, - constraint_name, - table_name, - condition, - schema=None, - **kw - ): - """Issue a "create check constraint" instruction using the - current migration context. - - e.g.:: - - from alembic import op - from sqlalchemy.sql import column, func - - op.create_check_constraint( - "ck_user_name_len", - "user", - func.len(column('name')) > 5 - ) - - CHECK constraints are usually against a SQL expression, so ad-hoc - table metadata is usually needed. The function will convert the given - arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound - to an anonymous table in order to emit the CREATE statement. - - :param name: Name of the check constraint. The name is necessary - so that an ALTER statement can be emitted. For setups that - use an automated naming scheme such as that described at - :ref:`sqla:constraint_naming_conventions`, - ``name`` here can be ``None``, as the event listener will - apply the name to the constraint object when it is associated - with the table. - :param table_name: String name of the source table. - :param condition: SQL expression that's the condition of the - constraint. Can be a string or SQLAlchemy expression language - structure. - :param deferrable: optional bool. If set, emit DEFERRABLE or - NOT DEFERRABLE when issuing DDL for this constraint. - :param initially: optional string. If set, emit INITIALLY - when issuing DDL for this constraint. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * source -> table_name - - """ - op = cls(constraint_name, table_name, condition, schema=schema, **kw) - return operations.invoke(op) - - @classmethod - @util._with_legacy_names([("name", "constraint_name")]) - def batch_create_check_constraint( - cls, operations, constraint_name, condition, **kw - ): - """Issue a "create check constraint" instruction using the - current batch migration context. - - The batch form of this call omits the ``source`` and ``schema`` - arguments from the call. - - .. seealso:: - - :meth:`.Operations.create_check_constraint` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - - """ - op = cls( - constraint_name, - operations.impl.table_name, - condition, - schema=operations.impl.schema, - **kw - ) - return operations.invoke(op) - - -@Operations.register_operation("create_index") -@BatchOperations.register_operation("create_index", "batch_create_index") -class CreateIndexOp(MigrateOperation): - """Represent a create index operation.""" - - def __init__( - self, - index_name, - table_name, - columns, - schema=None, - unique=False, - _orig_index=None, - **kw - ): - self.index_name = index_name - self.table_name = table_name - self.columns = columns - self.schema = schema - self.unique = unique - self.kw = kw - self._orig_index = _orig_index - - def reverse(self): - return DropIndexOp.from_index(self.to_index()) - - def to_diff_tuple(self): - return ("add_index", self.to_index()) - - @classmethod - def from_index(cls, index): - return cls( - index.name, - index.table.name, - sqla_compat._get_index_expressions(index), - schema=index.table.schema, - unique=index.unique, - _orig_index=index, - **index.kwargs - ) - - def to_index(self, migration_context=None): - if self._orig_index: - return self._orig_index - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.index( - self.index_name, - self.table_name, - self.columns, - schema=self.schema, - unique=self.unique, - **self.kw - ) - - @classmethod - @util._with_legacy_names([("name", "index_name")]) - def create_index( - cls, - operations, - index_name, - table_name, - columns, - schema=None, - unique=False, - **kw - ): - r"""Issue a "create index" instruction using the current - migration context. - - e.g.:: - - from alembic import op - op.create_index('ik_test', 't1', ['foo', 'bar']) - - Functional indexes can be produced by using the - :func:`sqlalchemy.sql.expression.text` construct:: - - from alembic import op - from sqlalchemy import text - op.create_index('ik_test', 't1', [text('lower(foo)')]) - - .. versionadded:: 0.6.7 support for making use of the - :func:`~sqlalchemy.sql.expression.text` construct in - conjunction with - :meth:`.Operations.create_index` in - order to produce functional expressions within CREATE INDEX. - - :param index_name: name of the index. - :param table_name: name of the owning table. - :param columns: a list consisting of string column names and/or - :func:`~sqlalchemy.sql.expression.text` constructs. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param unique: If True, create a unique index. - - :param quote: - Force quoting of this column's name on or off, corresponding - to ``True`` or ``False``. When left at its default - of ``None``, the column identifier will be quoted according to - whether the name is case sensitive (identifiers with at least one - upper case character are treated as case sensitive), or if it's a - reserved word. This flag is only needed to force quoting of a - reserved word which is not known by the SQLAlchemy dialect. - - :param \**kw: Additional keyword arguments not mentioned above are - dialect specific, and passed in the form - ``_``. - See the documentation regarding an individual dialect at - :ref:`dialect_toplevel` for detail on documented arguments. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> index_name - - """ - op = cls( - index_name, table_name, columns, schema=schema, unique=unique, **kw - ) - return operations.invoke(op) - - @classmethod - def batch_create_index(cls, operations, index_name, columns, **kw): - """Issue a "create index" instruction using the - current batch migration context. - - .. seealso:: - - :meth:`.Operations.create_index` - - """ - - op = cls( - index_name, - operations.impl.table_name, - columns, - schema=operations.impl.schema, - **kw - ) - return operations.invoke(op) - - -@Operations.register_operation("drop_index") -@BatchOperations.register_operation("drop_index", "batch_drop_index") -class DropIndexOp(MigrateOperation): - """Represent a drop index operation.""" - - def __init__( - self, index_name, table_name=None, schema=None, _orig_index=None, **kw - ): - self.index_name = index_name - self.table_name = table_name - self.schema = schema - self._orig_index = _orig_index - self.kw = kw - - def to_diff_tuple(self): - return ("remove_index", self.to_index()) - - def reverse(self): - if self._orig_index is None: - raise ValueError( - "operation is not reversible; " "original index is not present" - ) - return CreateIndexOp.from_index(self._orig_index) - - @classmethod - def from_index(cls, index): - return cls( - index.name, - index.table.name, - schema=index.table.schema, - _orig_index=index, - **index.kwargs - ) - - def to_index(self, migration_context=None): - if self._orig_index is not None: - return self._orig_index - - schema_obj = schemaobj.SchemaObjects(migration_context) - - # need a dummy column name here since SQLAlchemy - # 0.7.6 and further raises on Index with no columns - return schema_obj.index( - self.index_name, - self.table_name, - ["x"], - schema=self.schema, - **self.kw - ) - - @classmethod - @util._with_legacy_names( - [("name", "index_name"), ("tablename", "table_name")] - ) - def drop_index( - cls, operations, index_name, table_name=None, schema=None, **kw - ): - r"""Issue a "drop index" instruction using the current - migration context. - - e.g.:: - - drop_index("accounts") - - :param index_name: name of the index. - :param table_name: name of the owning table. Some - backends such as Microsoft SQL Server require this. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param \**kw: Additional keyword arguments not mentioned above are - dialect specific, and passed in the form - ``_``. - See the documentation regarding an individual dialect at - :ref:`dialect_toplevel` for detail on documented arguments. - - .. versionadded:: 0.9.5 Support for dialect-specific keyword - arguments for DROP INDEX - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> index_name - - """ - op = cls(index_name, table_name=table_name, schema=schema, **kw) - return operations.invoke(op) - - @classmethod - @util._with_legacy_names([("name", "index_name")]) - def batch_drop_index(cls, operations, index_name, **kw): - """Issue a "drop index" instruction using the - current batch migration context. - - .. seealso:: - - :meth:`.Operations.drop_index` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> index_name - - """ - - op = cls( - index_name, - table_name=operations.impl.table_name, - schema=operations.impl.schema, - **kw - ) - return operations.invoke(op) - - -@Operations.register_operation("create_table") -class CreateTableOp(MigrateOperation): - """Represent a create table operation.""" - - def __init__( - self, table_name, columns, schema=None, _orig_table=None, **kw - ): - self.table_name = table_name - self.columns = columns - self.schema = schema - self.kw = kw - self._orig_table = _orig_table - - def reverse(self): - return DropTableOp.from_table(self.to_table()) - - def to_diff_tuple(self): - return ("add_table", self.to_table()) - - @classmethod - def from_table(cls, table): - return cls( - table.name, - list(table.c) + list(table.constraints), - schema=table.schema, - _orig_table=table, - **table.kwargs - ) - - def to_table(self, migration_context=None): - if self._orig_table is not None: - return self._orig_table - schema_obj = schemaobj.SchemaObjects(migration_context) - - return schema_obj.table( - self.table_name, *self.columns, schema=self.schema, **self.kw - ) - - @classmethod - @util._with_legacy_names([("name", "table_name")]) - def create_table(cls, operations, table_name, *columns, **kw): - r"""Issue a "create table" instruction using the current migration - context. - - This directive receives an argument list similar to that of the - traditional :class:`sqlalchemy.schema.Table` construct, but without the - metadata:: - - from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column - from alembic import op - - op.create_table( - 'account', - Column('id', INTEGER, primary_key=True), - Column('name', VARCHAR(50), nullable=False), - Column('description', NVARCHAR(200)), - Column('timestamp', TIMESTAMP, server_default=func.now()) - ) - - Note that :meth:`.create_table` accepts - :class:`~sqlalchemy.schema.Column` - constructs directly from the SQLAlchemy library. In particular, - default values to be created on the database side are - specified using the ``server_default`` parameter, and not - ``default`` which only specifies Python-side defaults:: - - from alembic import op - from sqlalchemy import Column, TIMESTAMP, func - - # specify "DEFAULT NOW" along with the "timestamp" column - op.create_table('account', - Column('id', INTEGER, primary_key=True), - Column('timestamp', TIMESTAMP, server_default=func.now()) - ) - - The function also returns a newly created - :class:`~sqlalchemy.schema.Table` object, corresponding to the table - specification given, which is suitable for - immediate SQL operations, in particular - :meth:`.Operations.bulk_insert`:: - - from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column - from alembic import op - - account_table = op.create_table( - 'account', - Column('id', INTEGER, primary_key=True), - Column('name', VARCHAR(50), nullable=False), - Column('description', NVARCHAR(200)), - Column('timestamp', TIMESTAMP, server_default=func.now()) - ) - - op.bulk_insert( - account_table, - [ - {"name": "A1", "description": "account 1"}, - {"name": "A2", "description": "account 2"}, - ] - ) - - .. versionadded:: 0.7.0 - - :param table_name: Name of the table - :param \*columns: collection of :class:`~sqlalchemy.schema.Column` - objects within - the table, as well as optional :class:`~sqlalchemy.schema.Constraint` - objects - and :class:`~.sqlalchemy.schema.Index` objects. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - :param \**kw: Other keyword arguments are passed to the underlying - :class:`sqlalchemy.schema.Table` object created for the command. - - :return: the :class:`~sqlalchemy.schema.Table` object corresponding - to the parameters given. - - .. versionadded:: 0.7.0 - the :class:`~sqlalchemy.schema.Table` - object is returned. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> table_name - - """ - op = cls(table_name, columns, **kw) - return operations.invoke(op) - - -@Operations.register_operation("drop_table") -class DropTableOp(MigrateOperation): - """Represent a drop table operation.""" - - def __init__( - self, table_name, schema=None, table_kw=None, _orig_table=None - ): - self.table_name = table_name - self.schema = schema - self.table_kw = table_kw or {} - self._orig_table = _orig_table - - def to_diff_tuple(self): - return ("remove_table", self.to_table()) - - def reverse(self): - if self._orig_table is None: - raise ValueError( - "operation is not reversible; " "original table is not present" - ) - return CreateTableOp.from_table(self._orig_table) - - @classmethod - def from_table(cls, table): - return cls(table.name, schema=table.schema, _orig_table=table) - - def to_table(self, migration_context=None): - if self._orig_table is not None: - return self._orig_table - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.table( - self.table_name, schema=self.schema, **self.table_kw - ) - - @classmethod - @util._with_legacy_names([("name", "table_name")]) - def drop_table(cls, operations, table_name, schema=None, **kw): - r"""Issue a "drop table" instruction using the current - migration context. - - - e.g.:: - - drop_table("accounts") - - :param table_name: Name of the table - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param \**kw: Other keyword arguments are passed to the underlying - :class:`sqlalchemy.schema.Table` object created for the command. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> table_name - - """ - op = cls(table_name, schema=schema, table_kw=kw) - operations.invoke(op) - - -class AlterTableOp(MigrateOperation): - """Represent an alter table operation.""" - - def __init__(self, table_name, schema=None): - self.table_name = table_name - self.schema = schema - - -@Operations.register_operation("rename_table") -class RenameTableOp(AlterTableOp): - """Represent a rename table operation.""" - - def __init__(self, old_table_name, new_table_name, schema=None): - super(RenameTableOp, self).__init__(old_table_name, schema=schema) - self.new_table_name = new_table_name - - @classmethod - def rename_table( - cls, operations, old_table_name, new_table_name, schema=None - ): - """Emit an ALTER TABLE to rename a table. - - :param old_table_name: old name. - :param new_table_name: new name. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - """ - op = cls(old_table_name, new_table_name, schema=schema) - return operations.invoke(op) - - -@Operations.register_operation("create_table_comment") -class CreateTableCommentOp(AlterTableOp): - """Represent a COMMENT ON `table` operation. - """ - - def __init__( - self, table_name, comment, schema=None, existing_comment=None - ): - self.table_name = table_name - self.comment = comment - self.existing_comment = existing_comment - self.schema = schema - - @classmethod - def create_table_comment( - cls, - operations, - table_name, - comment, - existing_comment=None, - schema=None, - ): - """Emit a COMMENT ON operation to set the comment for a table. - - .. versionadded:: 1.0.6 - - :param table_name: string name of the target table. - :param comment: string value of the comment being registered against - the specified table. - :param existing_comment: String value of a comment - already registered on the specified table, used within autogenerate - so that the operation is reversible, but not required for direct - use. - - .. seealso:: - - :meth:`.Operations.drop_table_comment` - - :paramref:`.Operations.alter_column.comment` - - """ - - op = cls( - table_name, - comment, - existing_comment=existing_comment, - schema=schema, - ) - return operations.invoke(op) - - def reverse(self): - """Reverses the COMMENT ON operation against a table. - """ - if self.existing_comment is None: - return DropTableCommentOp( - self.table_name, - existing_comment=self.comment, - schema=self.schema, - ) - else: - return CreateTableCommentOp( - self.table_name, - self.existing_comment, - existing_comment=self.comment, - schema=self.schema, - ) - - def to_table(self, migration_context=None): - schema_obj = schemaobj.SchemaObjects(migration_context) - - return schema_obj.table( - self.table_name, schema=self.schema, comment=self.comment - ) - - def to_diff_tuple(self): - return ("add_table_comment", self.to_table(), self.existing_comment) - - -@Operations.register_operation("drop_table_comment") -class DropTableCommentOp(AlterTableOp): - """Represent an operation to remove the comment from a table. - """ - - def __init__(self, table_name, schema=None, existing_comment=None): - self.table_name = table_name - self.existing_comment = existing_comment - self.schema = schema - - @classmethod - def drop_table_comment( - cls, operations, table_name, existing_comment=None, schema=None - ): - """Issue a "drop table comment" operation to - remove an existing comment set on a table. - - .. versionadded:: 1.0.6 - - :param table_name: string name of the target table. - :param existing_comment: An optional string value of a comment already - registered on the specified table. - - .. seealso:: - - :meth:`.Operations.create_table_comment` - - :paramref:`.Operations.alter_column.comment` - - """ - - op = cls(table_name, existing_comment=existing_comment, schema=schema) - return operations.invoke(op) - - def reverse(self): - """Reverses the COMMENT ON operation against a table. - """ - return CreateTableCommentOp( - self.table_name, self.existing_comment, schema=self.schema - ) - - def to_table(self, migration_context=None): - schema_obj = schemaobj.SchemaObjects(migration_context) - - return schema_obj.table(self.table_name, schema=self.schema) - - def to_diff_tuple(self): - return ("remove_table_comment", self.to_table()) - - -@Operations.register_operation("alter_column") -@BatchOperations.register_operation("alter_column", "batch_alter_column") -class AlterColumnOp(AlterTableOp): - """Represent an alter column operation.""" - - def __init__( - self, - table_name, - column_name, - schema=None, - existing_type=None, - existing_server_default=False, - existing_nullable=None, - existing_comment=None, - modify_nullable=None, - modify_comment=False, - modify_server_default=False, - modify_name=None, - modify_type=None, - **kw - ): - super(AlterColumnOp, self).__init__(table_name, schema=schema) - self.column_name = column_name - self.existing_type = existing_type - self.existing_server_default = existing_server_default - self.existing_nullable = existing_nullable - self.existing_comment = existing_comment - self.modify_nullable = modify_nullable - self.modify_comment = modify_comment - self.modify_server_default = modify_server_default - self.modify_name = modify_name - self.modify_type = modify_type - self.kw = kw - - def to_diff_tuple(self): - col_diff = [] - schema, tname, cname = self.schema, self.table_name, self.column_name - - if self.modify_type is not None: - col_diff.append( - ( - "modify_type", - schema, - tname, - cname, - { - "existing_nullable": self.existing_nullable, - "existing_server_default": ( - self.existing_server_default - ), - "existing_comment": self.existing_comment, - }, - self.existing_type, - self.modify_type, - ) - ) - - if self.modify_nullable is not None: - col_diff.append( - ( - "modify_nullable", - schema, - tname, - cname, - { - "existing_type": self.existing_type, - "existing_server_default": ( - self.existing_server_default - ), - "existing_comment": self.existing_comment, - }, - self.existing_nullable, - self.modify_nullable, - ) - ) - - if self.modify_server_default is not False: - col_diff.append( - ( - "modify_default", - schema, - tname, - cname, - { - "existing_nullable": self.existing_nullable, - "existing_type": self.existing_type, - "existing_comment": self.existing_comment, - }, - self.existing_server_default, - self.modify_server_default, - ) - ) - - if self.modify_comment is not False: - col_diff.append( - ( - "modify_comment", - schema, - tname, - cname, - { - "existing_nullable": self.existing_nullable, - "existing_type": self.existing_type, - "existing_server_default": ( - self.existing_server_default - ), - }, - self.existing_comment, - self.modify_comment, - ) - ) - - return col_diff - - def has_changes(self): - hc1 = ( - self.modify_nullable is not None - or self.modify_server_default is not False - or self.modify_type is not None - or self.modify_comment is not False - ) - if hc1: - return True - for kw in self.kw: - if kw.startswith("modify_"): - return True - else: - return False - - def reverse(self): - - kw = self.kw.copy() - kw["existing_type"] = self.existing_type - kw["existing_nullable"] = self.existing_nullable - kw["existing_server_default"] = self.existing_server_default - kw["existing_comment"] = self.existing_comment - if self.modify_type is not None: - kw["modify_type"] = self.modify_type - if self.modify_nullable is not None: - kw["modify_nullable"] = self.modify_nullable - if self.modify_server_default is not False: - kw["modify_server_default"] = self.modify_server_default - if self.modify_comment is not False: - kw["modify_comment"] = self.modify_comment - - # TODO: make this a little simpler - all_keys = set( - m.group(1) - for m in [re.match(r"^(?:existing_|modify_)(.+)$", k) for k in kw] - if m - ) - - for k in all_keys: - if "modify_%s" % k in kw: - swap = kw["existing_%s" % k] - kw["existing_%s" % k] = kw["modify_%s" % k] - kw["modify_%s" % k] = swap - - return self.__class__( - self.table_name, self.column_name, schema=self.schema, **kw - ) - - @classmethod - @util._with_legacy_names([("name", "new_column_name")]) - def alter_column( - cls, - operations, - table_name, - column_name, - nullable=None, - comment=False, - server_default=False, - new_column_name=None, - type_=None, - existing_type=None, - existing_server_default=False, - existing_nullable=None, - existing_comment=None, - schema=None, - **kw - ): - """Issue an "alter column" instruction using the - current migration context. - - Generally, only that aspect of the column which - is being changed, i.e. name, type, nullability, - default, needs to be specified. Multiple changes - can also be specified at once and the backend should - "do the right thing", emitting each change either - separately or together as the backend allows. - - MySQL has special requirements here, since MySQL - cannot ALTER a column without a full specification. - When producing MySQL-compatible migration files, - it is recommended that the ``existing_type``, - ``existing_server_default``, and ``existing_nullable`` - parameters be present, if not being altered. - - Type changes which are against the SQLAlchemy - "schema" types :class:`~sqlalchemy.types.Boolean` - and :class:`~sqlalchemy.types.Enum` may also - add or drop constraints which accompany those - types on backends that don't support them natively. - The ``existing_type`` argument is - used in this case to identify and remove a previous - constraint that was bound to the type object. - - :param table_name: string name of the target table. - :param column_name: string name of the target column, - as it exists before the operation begins. - :param nullable: Optional; specify ``True`` or ``False`` - to alter the column's nullability. - :param server_default: Optional; specify a string - SQL expression, :func:`~sqlalchemy.sql.expression.text`, - or :class:`~sqlalchemy.schema.DefaultClause` to indicate - an alteration to the column's default value. - Set to ``None`` to have the default removed. - :param comment: optional string text of a new comment to add to the - column. - - .. versionadded:: 1.0.6 - - :param new_column_name: Optional; specify a string name here to - indicate the new name within a column rename operation. - :param type_: Optional; a :class:`~sqlalchemy.types.TypeEngine` - type object to specify a change to the column's type. - For SQLAlchemy types that also indicate a constraint (i.e. - :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), - the constraint is also generated. - :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column; - currently understood by the MySQL dialect. - :param existing_type: Optional; a - :class:`~sqlalchemy.types.TypeEngine` - type object to specify the previous type. This - is required for all MySQL column alter operations that - don't otherwise specify a new type, as well as for - when nullability is being changed on a SQL Server - column. It is also used if the type is a so-called - SQLlchemy "schema" type which may define a constraint (i.e. - :class:`~sqlalchemy.types.Boolean`, - :class:`~sqlalchemy.types.Enum`), - so that the constraint can be dropped. - :param existing_server_default: Optional; The existing - default value of the column. Required on MySQL if - an existing default is not being changed; else MySQL - removes the default. - :param existing_nullable: Optional; the existing nullability - of the column. Required on MySQL if the existing nullability - is not being changed; else MySQL sets this to NULL. - :param existing_autoincrement: Optional; the existing autoincrement - of the column. Used for MySQL's system of altering a column - that specifies ``AUTO_INCREMENT``. - :param existing_comment: string text of the existing comment on the - column to be maintained. Required on MySQL if the existing comment - on the column is not being changed. - - .. versionadded:: 1.0.6 - - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param postgresql_using: String argument which will indicate a - SQL expression to render within the Postgresql-specific USING clause - within ALTER COLUMN. This string is taken directly as raw SQL which - must explicitly include any necessary quoting or escaping of tokens - within the expression. - - .. versionadded:: 0.8.8 - - """ - - alt = cls( - table_name, - column_name, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - modify_name=new_column_name, - modify_type=type_, - modify_server_default=server_default, - modify_nullable=nullable, - modify_comment=comment, - **kw - ) - - return operations.invoke(alt) - - @classmethod - def batch_alter_column( - cls, - operations, - column_name, - nullable=None, - comment=False, - server_default=False, - new_column_name=None, - type_=None, - existing_type=None, - existing_server_default=False, - existing_nullable=None, - existing_comment=None, - **kw - ): - """Issue an "alter column" instruction using the current - batch migration context. - - .. seealso:: - - :meth:`.Operations.alter_column` - - """ - alt = cls( - operations.impl.table_name, - column_name, - schema=operations.impl.schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - modify_name=new_column_name, - modify_type=type_, - modify_server_default=server_default, - modify_nullable=nullable, - modify_comment=comment, - **kw - ) - - return operations.invoke(alt) - - -@Operations.register_operation("add_column") -@BatchOperations.register_operation("add_column", "batch_add_column") -class AddColumnOp(AlterTableOp): - """Represent an add column operation.""" - - def __init__(self, table_name, column, schema=None): - super(AddColumnOp, self).__init__(table_name, schema=schema) - self.column = column - - def reverse(self): - return DropColumnOp.from_column_and_tablename( - self.schema, self.table_name, self.column - ) - - def to_diff_tuple(self): - return ("add_column", self.schema, self.table_name, self.column) - - def to_column(self): - return self.column - - @classmethod - def from_column(cls, col): - return cls(col.table.name, col, schema=col.table.schema) - - @classmethod - def from_column_and_tablename(cls, schema, tname, col): - return cls(tname, col, schema=schema) - - @classmethod - def add_column(cls, operations, table_name, column, schema=None): - """Issue an "add column" instruction using the current - migration context. - - e.g.:: - - from alembic import op - from sqlalchemy import Column, String - - op.add_column('organization', - Column('name', String()) - ) - - The provided :class:`~sqlalchemy.schema.Column` object can also - specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing - a remote table name. Alembic will automatically generate a stub - "referenced" table and emit a second ALTER statement in order - to add the constraint separately:: - - from alembic import op - from sqlalchemy import Column, INTEGER, ForeignKey - - op.add_column('organization', - Column('account_id', INTEGER, ForeignKey('accounts.id')) - ) - - Note that this statement uses the :class:`~sqlalchemy.schema.Column` - construct as is from the SQLAlchemy library. In particular, - default values to be created on the database side are - specified using the ``server_default`` parameter, and not - ``default`` which only specifies Python-side defaults:: - - from alembic import op - from sqlalchemy import Column, TIMESTAMP, func - - # specify "DEFAULT NOW" along with the column add - op.add_column('account', - Column('timestamp', TIMESTAMP, server_default=func.now()) - ) - - :param table_name: String name of the parent table. - :param column: a :class:`sqlalchemy.schema.Column` object - representing the new column. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - - """ - - op = cls(table_name, column, schema=schema) - return operations.invoke(op) - - @classmethod - def batch_add_column(cls, operations, column): - """Issue an "add column" instruction using the current - batch migration context. - - .. seealso:: - - :meth:`.Operations.add_column` - - """ - op = cls( - operations.impl.table_name, column, schema=operations.impl.schema - ) - return operations.invoke(op) - - -@Operations.register_operation("drop_column") -@BatchOperations.register_operation("drop_column", "batch_drop_column") -class DropColumnOp(AlterTableOp): - """Represent a drop column operation.""" - - def __init__( - self, table_name, column_name, schema=None, _orig_column=None, **kw - ): - super(DropColumnOp, self).__init__(table_name, schema=schema) - self.column_name = column_name - self.kw = kw - self._orig_column = _orig_column - - def to_diff_tuple(self): - return ( - "remove_column", - self.schema, - self.table_name, - self.to_column(), - ) - - def reverse(self): - if self._orig_column is None: - raise ValueError( - "operation is not reversible; " - "original column is not present" - ) - - return AddColumnOp.from_column_and_tablename( - self.schema, self.table_name, self._orig_column - ) - - @classmethod - def from_column_and_tablename(cls, schema, tname, col): - return cls(tname, col.name, schema=schema, _orig_column=col) - - def to_column(self, migration_context=None): - if self._orig_column is not None: - return self._orig_column - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.column(self.column_name, NULLTYPE) - - @classmethod - def drop_column( - cls, operations, table_name, column_name, schema=None, **kw - ): - """Issue a "drop column" instruction using the current - migration context. - - e.g.:: - - drop_column('organization', 'account_id') - - :param table_name: name of table - :param column_name: name of column - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param mssql_drop_check: Optional boolean. When ``True``, on - Microsoft SQL Server only, first - drop the CHECK constraint on the column using a - SQL-script-compatible - block that selects into a @variable from sys.check_constraints, - then exec's a separate DROP CONSTRAINT for that constraint. - :param mssql_drop_default: Optional boolean. When ``True``, on - Microsoft SQL Server only, first - drop the DEFAULT constraint on the column using a - SQL-script-compatible - block that selects into a @variable from sys.default_constraints, - then exec's a separate DROP CONSTRAINT for that default. - :param mssql_drop_foreign_key: Optional boolean. When ``True``, on - Microsoft SQL Server only, first - drop a single FOREIGN KEY constraint on the column using a - SQL-script-compatible - block that selects into a @variable from - sys.foreign_keys/sys.foreign_key_columns, - then exec's a separate DROP CONSTRAINT for that default. Only - works if the column has exactly one FK constraint which refers to - it, at the moment. - - .. versionadded:: 0.6.2 - - """ - - op = cls(table_name, column_name, schema=schema, **kw) - return operations.invoke(op) - - @classmethod - def batch_drop_column(cls, operations, column_name, **kw): - """Issue a "drop column" instruction using the current - batch migration context. - - .. seealso:: - - :meth:`.Operations.drop_column` - - """ - op = cls( - operations.impl.table_name, - column_name, - schema=operations.impl.schema, - **kw - ) - return operations.invoke(op) - - -@Operations.register_operation("bulk_insert") -class BulkInsertOp(MigrateOperation): - """Represent a bulk insert operation.""" - - def __init__(self, table, rows, multiinsert=True): - self.table = table - self.rows = rows - self.multiinsert = multiinsert - - @classmethod - def bulk_insert(cls, operations, table, rows, multiinsert=True): - """Issue a "bulk insert" operation using the current - migration context. - - This provides a means of representing an INSERT of multiple rows - which works equally well in the context of executing on a live - connection as well as that of generating a SQL script. In the - case of a SQL script, the values are rendered inline into the - statement. - - e.g.:: - - from alembic import op - from datetime import date - from sqlalchemy.sql import table, column - from sqlalchemy import String, Integer, Date - - # Create an ad-hoc table to use for the insert statement. - accounts_table = table('account', - column('id', Integer), - column('name', String), - column('create_date', Date) - ) - - op.bulk_insert(accounts_table, - [ - {'id':1, 'name':'John Smith', - 'create_date':date(2010, 10, 5)}, - {'id':2, 'name':'Ed Williams', - 'create_date':date(2007, 5, 27)}, - {'id':3, 'name':'Wendy Jones', - 'create_date':date(2008, 8, 15)}, - ] - ) - - When using --sql mode, some datatypes may not render inline - automatically, such as dates and other special types. When this - issue is present, :meth:`.Operations.inline_literal` may be used:: - - op.bulk_insert(accounts_table, - [ - {'id':1, 'name':'John Smith', - 'create_date':op.inline_literal("2010-10-05")}, - {'id':2, 'name':'Ed Williams', - 'create_date':op.inline_literal("2007-05-27")}, - {'id':3, 'name':'Wendy Jones', - 'create_date':op.inline_literal("2008-08-15")}, - ], - multiinsert=False - ) - - When using :meth:`.Operations.inline_literal` in conjunction with - :meth:`.Operations.bulk_insert`, in order for the statement to work - in "online" (e.g. non --sql) mode, the - :paramref:`~.Operations.bulk_insert.multiinsert` - flag should be set to ``False``, which will have the effect of - individual INSERT statements being emitted to the database, each - with a distinct VALUES clause, so that the "inline" values can - still be rendered, rather than attempting to pass the values - as bound parameters. - - .. versionadded:: 0.6.4 :meth:`.Operations.inline_literal` can now - be used with :meth:`.Operations.bulk_insert`, and the - :paramref:`~.Operations.bulk_insert.multiinsert` flag has - been added to assist in this usage when running in "online" - mode. - - :param table: a table object which represents the target of the INSERT. - - :param rows: a list of dictionaries indicating rows. - - :param multiinsert: when at its default of True and --sql mode is not - enabled, the INSERT statement will be executed using - "executemany()" style, where all elements in the list of - dictionaries are passed as bound parameters in a single - list. Setting this to False results in individual INSERT - statements being emitted per parameter set, and is needed - in those cases where non-literal values are present in the - parameter sets. - - .. versionadded:: 0.6.4 - - """ - - op = cls(table, rows, multiinsert=multiinsert) - operations.invoke(op) - - -@Operations.register_operation("execute") -class ExecuteSQLOp(MigrateOperation): - """Represent an execute SQL operation.""" - - def __init__(self, sqltext, execution_options=None): - self.sqltext = sqltext - self.execution_options = execution_options - - @classmethod - def execute(cls, operations, sqltext, execution_options=None): - r"""Execute the given SQL using the current migration context. - - The given SQL can be a plain string, e.g.:: - - op.execute("INSERT INTO table (foo) VALUES ('some value')") - - Or it can be any kind of Core SQL Expression construct, such as - below where we use an update construct:: - - from sqlalchemy.sql import table, column - from sqlalchemy import String - from alembic import op - - account = table('account', - column('name', String) - ) - op.execute( - account.update().\\ - where(account.c.name==op.inline_literal('account 1')).\\ - values({'name':op.inline_literal('account 2')}) - ) - - Above, we made use of the SQLAlchemy - :func:`sqlalchemy.sql.expression.table` and - :func:`sqlalchemy.sql.expression.column` constructs to make a brief, - ad-hoc table construct just for our UPDATE statement. A full - :class:`~sqlalchemy.schema.Table` construct of course works perfectly - fine as well, though note it's a recommended practice to at least - ensure the definition of a table is self-contained within the migration - script, rather than imported from a module that may break compatibility - with older migrations. - - In a SQL script context, the statement is emitted directly to the - output stream. There is *no* return result, however, as this - function is oriented towards generating a change script - that can run in "offline" mode. Additionally, parameterized - statements are discouraged here, as they *will not work* in offline - mode. Above, we use :meth:`.inline_literal` where parameters are - to be used. - - For full interaction with a connected database where parameters can - also be used normally, use the "bind" available from the context:: - - from alembic import op - connection = op.get_bind() - - connection.execute( - account.update().where(account.c.name=='account 1'). - values({"name": "account 2"}) - ) - - Additionally, when passing the statement as a plain string, it is first - coerceed into a :func:`sqlalchemy.sql.expression.text` construct - before being passed along. In the less likely case that the - literal SQL string contains a colon, it must be escaped with a - backslash, as:: - - op.execute("INSERT INTO table (foo) VALUES ('\:colon_value')") - - - :param sql: Any legal SQLAlchemy expression, including: - - * a string - * a :func:`sqlalchemy.sql.expression.text` construct. - * a :func:`sqlalchemy.sql.expression.insert` construct. - * a :func:`sqlalchemy.sql.expression.update`, - :func:`sqlalchemy.sql.expression.insert`, - or :func:`sqlalchemy.sql.expression.delete` construct. - * Pretty much anything that's "executable" as described - in :ref:`sqlexpression_toplevel`. - - .. note:: when passing a plain string, the statement is coerced into - a :func:`sqlalchemy.sql.expression.text` construct. This construct - considers symbols with colons, e.g. ``:foo`` to be bound parameters. - To avoid this, ensure that colon symbols are escaped, e.g. - ``\:foo``. - - :param execution_options: Optional dictionary of - execution options, will be passed to - :meth:`sqlalchemy.engine.Connection.execution_options`. - """ - op = cls(sqltext, execution_options=execution_options) - return operations.invoke(op) - - -class OpContainer(MigrateOperation): - """Represent a sequence of operations operation.""" - - def __init__(self, ops=()): - self.ops = ops - - def is_empty(self): - return not self.ops - - def as_diffs(self): - return list(OpContainer._ops_as_diffs(self)) - - @classmethod - def _ops_as_diffs(cls, migrations): - for op in migrations.ops: - if hasattr(op, "ops"): - for sub_op in cls._ops_as_diffs(op): - yield sub_op - else: - yield op.to_diff_tuple() - - -class ModifyTableOps(OpContainer): - """Contains a sequence of operations that all apply to a single Table.""" - - def __init__(self, table_name, ops, schema=None): - super(ModifyTableOps, self).__init__(ops) - self.table_name = table_name - self.schema = schema - - def reverse(self): - return ModifyTableOps( - self.table_name, - ops=list(reversed([op.reverse() for op in self.ops])), - schema=self.schema, - ) - - -class UpgradeOps(OpContainer): - """contains a sequence of operations that would apply to the - 'upgrade' stream of a script. - - .. seealso:: - - :ref:`customizing_revision` - - """ - - def __init__(self, ops=(), upgrade_token="upgrades"): - super(UpgradeOps, self).__init__(ops=ops) - self.upgrade_token = upgrade_token - - def reverse_into(self, downgrade_ops): - downgrade_ops.ops[:] = list( - reversed([op.reverse() for op in self.ops]) - ) - return downgrade_ops - - def reverse(self): - return self.reverse_into(DowngradeOps(ops=[])) - - -class DowngradeOps(OpContainer): - """contains a sequence of operations that would apply to the - 'downgrade' stream of a script. - - .. seealso:: - - :ref:`customizing_revision` - - """ - - def __init__(self, ops=(), downgrade_token="downgrades"): - super(DowngradeOps, self).__init__(ops=ops) - self.downgrade_token = downgrade_token - - def reverse(self): - return UpgradeOps( - ops=list(reversed([op.reverse() for op in self.ops])) - ) - - -class MigrationScript(MigrateOperation): - """represents a migration script. - - E.g. when autogenerate encounters this object, this corresponds to the - production of an actual script file. - - A normal :class:`.MigrationScript` object would contain a single - :class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive. - These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops`` - attributes. - - In the case of an autogenerate operation that runs multiple times, - such as the multiple database example in the "multidb" template, - the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled, - and instead these objects should be accessed via the ``.upgrade_ops_list`` - and ``.downgrade_ops_list`` list-based attributes. These latter - attributes are always available at the very least as single-element lists. - - .. versionchanged:: 0.8.1 the ``.upgrade_ops`` and ``.downgrade_ops`` - attributes should be accessed via the ``.upgrade_ops_list`` - and ``.downgrade_ops_list`` attributes if multiple autogenerate - passes proceed on the same :class:`.MigrationScript` object. - - .. seealso:: - - :ref:`customizing_revision` - - """ - - def __init__( - self, - rev_id, - upgrade_ops, - downgrade_ops, - message=None, - imports=set(), - head=None, - splice=None, - branch_label=None, - version_path=None, - depends_on=None, - ): - self.rev_id = rev_id - self.message = message - self.imports = imports - self.head = head - self.splice = splice - self.branch_label = branch_label - self.version_path = version_path - self.depends_on = depends_on - self.upgrade_ops = upgrade_ops - self.downgrade_ops = downgrade_ops - - @property - def upgrade_ops(self): - """An instance of :class:`.UpgradeOps`. - - .. seealso:: - - :attr:`.MigrationScript.upgrade_ops_list` - """ - if len(self._upgrade_ops) > 1: - raise ValueError( - "This MigrationScript instance has a multiple-entry " - "list for UpgradeOps; please use the " - "upgrade_ops_list attribute." - ) - elif not self._upgrade_ops: - return None - else: - return self._upgrade_ops[0] - - @upgrade_ops.setter - def upgrade_ops(self, upgrade_ops): - self._upgrade_ops = util.to_list(upgrade_ops) - for elem in self._upgrade_ops: - assert isinstance(elem, UpgradeOps) - - @property - def downgrade_ops(self): - """An instance of :class:`.DowngradeOps`. - - .. seealso:: - - :attr:`.MigrationScript.downgrade_ops_list` - """ - if len(self._downgrade_ops) > 1: - raise ValueError( - "This MigrationScript instance has a multiple-entry " - "list for DowngradeOps; please use the " - "downgrade_ops_list attribute." - ) - elif not self._downgrade_ops: - return None - else: - return self._downgrade_ops[0] - - @downgrade_ops.setter - def downgrade_ops(self, downgrade_ops): - self._downgrade_ops = util.to_list(downgrade_ops) - for elem in self._downgrade_ops: - assert isinstance(elem, DowngradeOps) - - @property - def upgrade_ops_list(self): - """A list of :class:`.UpgradeOps` instances. - - This is used in place of the :attr:`.MigrationScript.upgrade_ops` - attribute when dealing with a revision operation that does - multiple autogenerate passes. - - .. versionadded:: 0.8.1 - - """ - return self._upgrade_ops - - @property - def downgrade_ops_list(self): - """A list of :class:`.DowngradeOps` instances. - - This is used in place of the :attr:`.MigrationScript.downgrade_ops` - attribute when dealing with a revision operation that does - multiple autogenerate passes. - - .. versionadded:: 0.8.1 - - """ - return self._downgrade_ops diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/schemaobj.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/schemaobj.py deleted file mode 100644 index d90b5e6..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/schemaobj.py +++ /dev/null @@ -1,181 +0,0 @@ -from sqlalchemy import schema as sa_schema -from sqlalchemy.types import Integer -from sqlalchemy.types import NULLTYPE - -from .. import util -from ..util.compat import string_types - - -class SchemaObjects(object): - def __init__(self, migration_context=None): - self.migration_context = migration_context - - def primary_key_constraint(self, name, table_name, cols, schema=None): - m = self.metadata() - columns = [sa_schema.Column(n, NULLTYPE) for n in cols] - t = sa_schema.Table(table_name, m, *columns, schema=schema) - p = sa_schema.PrimaryKeyConstraint(*[t.c[n] for n in cols], name=name) - t.append_constraint(p) - return p - - def foreign_key_constraint( - self, - name, - source, - referent, - local_cols, - remote_cols, - onupdate=None, - ondelete=None, - deferrable=None, - source_schema=None, - referent_schema=None, - initially=None, - match=None, - **dialect_kw - ): - m = self.metadata() - if source == referent and source_schema == referent_schema: - t1_cols = local_cols + remote_cols - else: - t1_cols = local_cols - sa_schema.Table( - referent, - m, - *[sa_schema.Column(n, NULLTYPE) for n in remote_cols], - schema=referent_schema - ) - - t1 = sa_schema.Table( - source, - m, - *[sa_schema.Column(n, NULLTYPE) for n in t1_cols], - schema=source_schema - ) - - tname = ( - "%s.%s" % (referent_schema, referent) - if referent_schema - else referent - ) - - dialect_kw["match"] = match - - f = sa_schema.ForeignKeyConstraint( - local_cols, - ["%s.%s" % (tname, n) for n in remote_cols], - name=name, - onupdate=onupdate, - ondelete=ondelete, - deferrable=deferrable, - initially=initially, - **dialect_kw - ) - t1.append_constraint(f) - - return f - - def unique_constraint(self, name, source, local_cols, schema=None, **kw): - t = sa_schema.Table( - source, - self.metadata(), - *[sa_schema.Column(n, NULLTYPE) for n in local_cols], - schema=schema - ) - kw["name"] = name - uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw) - # TODO: need event tests to ensure the event - # is fired off here - t.append_constraint(uq) - return uq - - def check_constraint(self, name, source, condition, schema=None, **kw): - t = sa_schema.Table( - source, - self.metadata(), - sa_schema.Column("x", Integer), - schema=schema, - ) - ck = sa_schema.CheckConstraint(condition, name=name, **kw) - t.append_constraint(ck) - return ck - - def generic_constraint(self, name, table_name, type_, schema=None, **kw): - t = self.table(table_name, schema=schema) - types = { - "foreignkey": lambda name: sa_schema.ForeignKeyConstraint( - [], [], name=name - ), - "primary": sa_schema.PrimaryKeyConstraint, - "unique": sa_schema.UniqueConstraint, - "check": lambda name: sa_schema.CheckConstraint("", name=name), - None: sa_schema.Constraint, - } - try: - const = types[type_] - except KeyError: - raise TypeError( - "'type' can be one of %s" - % ", ".join(sorted(repr(x) for x in types)) - ) - else: - const = const(name=name) - t.append_constraint(const) - return const - - def metadata(self): - kw = {} - if ( - self.migration_context is not None - and "target_metadata" in self.migration_context.opts - ): - mt = self.migration_context.opts["target_metadata"] - if hasattr(mt, "naming_convention"): - kw["naming_convention"] = mt.naming_convention - return sa_schema.MetaData(**kw) - - def table(self, name, *columns, **kw): - m = self.metadata() - t = sa_schema.Table(name, m, *columns, **kw) - for f in t.foreign_keys: - self._ensure_table_for_fk(m, f) - return t - - def column(self, name, type_, **kw): - return sa_schema.Column(name, type_, **kw) - - def index(self, name, tablename, columns, schema=None, **kw): - t = sa_schema.Table( - tablename or "no_table", self.metadata(), schema=schema - ) - idx = sa_schema.Index( - name, - *[util.sqla_compat._textual_index_column(t, n) for n in columns], - **kw - ) - return idx - - def _parse_table_key(self, table_key): - if "." in table_key: - tokens = table_key.split(".") - sname = ".".join(tokens[0:-1]) - tname = tokens[-1] - else: - tname = table_key - sname = None - return (sname, tname) - - def _ensure_table_for_fk(self, metadata, fk): - """create a placeholder Table object for the referent of a - ForeignKey. - - """ - if isinstance(fk._colspec, string_types): - table_key, cname = fk._colspec.rsplit(".", 1) - sname, tname = self._parse_table_key(table_key) - if table_key not in metadata.tables: - rel_t = sa_schema.Table(tname, metadata, schema=sname) - else: - rel_t = metadata.tables[table_key] - if cname not in rel_t.c: - rel_t.append_column(sa_schema.Column(cname, NULLTYPE)) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/toimpl.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/toimpl.py deleted file mode 100644 index 5699423..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/operations/toimpl.py +++ /dev/null @@ -1,177 +0,0 @@ -from sqlalchemy import schema as sa_schema - -from . import ops -from .base import Operations -from ..util import sqla_compat - - -@Operations.implementation_for(ops.AlterColumnOp) -def alter_column(operations, operation): - - compiler = operations.impl.dialect.statement_compiler( - operations.impl.dialect, None - ) - - existing_type = operation.existing_type - existing_nullable = operation.existing_nullable - existing_server_default = operation.existing_server_default - type_ = operation.modify_type - column_name = operation.column_name - table_name = operation.table_name - schema = operation.schema - server_default = operation.modify_server_default - new_column_name = operation.modify_name - nullable = operation.modify_nullable - comment = operation.modify_comment - existing_comment = operation.existing_comment - - def _count_constraint(constraint): - return not isinstance(constraint, sa_schema.PrimaryKeyConstraint) and ( - not constraint._create_rule or constraint._create_rule(compiler) - ) - - if existing_type and type_: - t = operations.schema_obj.table( - table_name, - sa_schema.Column(column_name, existing_type), - schema=schema, - ) - for constraint in t.constraints: - if _count_constraint(constraint): - operations.impl.drop_constraint(constraint) - - operations.impl.alter_column( - table_name, - column_name, - nullable=nullable, - server_default=server_default, - name=new_column_name, - type_=type_, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - comment=comment, - existing_comment=existing_comment, - **operation.kw - ) - - if type_: - t = operations.schema_obj.table( - table_name, - operations.schema_obj.column(column_name, type_), - schema=schema, - ) - for constraint in t.constraints: - if _count_constraint(constraint): - operations.impl.add_constraint(constraint) - - -@Operations.implementation_for(ops.DropTableOp) -def drop_table(operations, operation): - operations.impl.drop_table( - operation.to_table(operations.migration_context) - ) - - -@Operations.implementation_for(ops.DropColumnOp) -def drop_column(operations, operation): - column = operation.to_column(operations.migration_context) - operations.impl.drop_column( - operation.table_name, column, schema=operation.schema, **operation.kw - ) - - -@Operations.implementation_for(ops.CreateIndexOp) -def create_index(operations, operation): - idx = operation.to_index(operations.migration_context) - operations.impl.create_index(idx) - - -@Operations.implementation_for(ops.DropIndexOp) -def drop_index(operations, operation): - operations.impl.drop_index( - operation.to_index(operations.migration_context) - ) - - -@Operations.implementation_for(ops.CreateTableOp) -def create_table(operations, operation): - table = operation.to_table(operations.migration_context) - operations.impl.create_table(table) - return table - - -@Operations.implementation_for(ops.RenameTableOp) -def rename_table(operations, operation): - operations.impl.rename_table( - operation.table_name, operation.new_table_name, schema=operation.schema - ) - - -@Operations.implementation_for(ops.CreateTableCommentOp) -def create_table_comment(operations, operation): - table = operation.to_table(operations.migration_context) - operations.impl.create_table_comment(table) - - -@Operations.implementation_for(ops.DropTableCommentOp) -def drop_table_comment(operations, operation): - table = operation.to_table(operations.migration_context) - operations.impl.drop_table_comment(table) - - -@Operations.implementation_for(ops.AddColumnOp) -def add_column(operations, operation): - table_name = operation.table_name - column = operation.column - schema = operation.schema - - t = operations.schema_obj.table(table_name, column, schema=schema) - operations.impl.add_column(table_name, column, schema=schema) - for constraint in t.constraints: - if not isinstance(constraint, sa_schema.PrimaryKeyConstraint): - operations.impl.add_constraint(constraint) - for index in t.indexes: - operations.impl.create_index(index) - - with_comment = ( - sqla_compat._dialect_supports_comments(operations.impl.dialect) - and not operations.impl.dialect.inline_comments - ) - comment = sqla_compat._comment_attribute(column) - if comment and with_comment: - operations.impl.create_column_comment(column) - - -@Operations.implementation_for(ops.AddConstraintOp) -def create_constraint(operations, operation): - operations.impl.add_constraint( - operation.to_constraint(operations.migration_context) - ) - - -@Operations.implementation_for(ops.DropConstraintOp) -def drop_constraint(operations, operation): - operations.impl.drop_constraint( - operations.schema_obj.generic_constraint( - operation.constraint_name, - operation.table_name, - operation.constraint_type, - schema=operation.schema, - ) - ) - - -@Operations.implementation_for(ops.BulkInsertOp) -def bulk_insert(operations, operation): - operations.impl.bulk_insert( - operation.table, operation.rows, multiinsert=operation.multiinsert - ) - - -@Operations.implementation_for(ops.ExecuteSQLOp) -def execute_sql(operations, operation): - operations.migration_context.impl.execute( - operation.sqltext, execution_options=operation.execution_options - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/runtime/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/runtime/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/runtime/environment.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/runtime/environment.py deleted file mode 100644 index c4d6586..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/runtime/environment.py +++ /dev/null @@ -1,938 +0,0 @@ -from .migration import MigrationContext -from .. import util -from ..operations import Operations - - -class EnvironmentContext(util.ModuleClsProxy): - - """A configurational facade made available in an ``env.py`` script. - - The :class:`.EnvironmentContext` acts as a *facade* to the more - nuts-and-bolts objects of :class:`.MigrationContext` as well as certain - aspects of :class:`.Config`, - within the context of the ``env.py`` script that is invoked by - most Alembic commands. - - :class:`.EnvironmentContext` is normally instantiated - when a command in :mod:`alembic.command` is run. It then makes - itself available in the ``alembic.context`` module for the scope - of the command. From within an ``env.py`` script, the current - :class:`.EnvironmentContext` is available by importing this module. - - :class:`.EnvironmentContext` also supports programmatic usage. - At this level, it acts as a Python context manager, that is, is - intended to be used using the - ``with:`` statement. A typical use of :class:`.EnvironmentContext`:: - - from alembic.config import Config - from alembic.script import ScriptDirectory - - config = Config() - config.set_main_option("script_location", "myapp:migrations") - script = ScriptDirectory.from_config(config) - - def my_function(rev, context): - '''do something with revision "rev", which - will be the current database revision, - and "context", which is the MigrationContext - that the env.py will create''' - - with EnvironmentContext( - config, - script, - fn = my_function, - as_sql = False, - starting_rev = 'base', - destination_rev = 'head', - tag = "sometag" - ): - script.run_env() - - The above script will invoke the ``env.py`` script - within the migration environment. If and when ``env.py`` - calls :meth:`.MigrationContext.run_migrations`, the - ``my_function()`` function above will be called - by the :class:`.MigrationContext`, given the context - itself as well as the current revision in the database. - - .. note:: - - For most API usages other than full blown - invocation of migration scripts, the :class:`.MigrationContext` - and :class:`.ScriptDirectory` objects can be created and - used directly. The :class:`.EnvironmentContext` object - is *only* needed when you need to actually invoke the - ``env.py`` module present in the migration environment. - - """ - - _migration_context = None - - config = None - """An instance of :class:`.Config` representing the - configuration file contents as well as other variables - set programmatically within it.""" - - script = None - """An instance of :class:`.ScriptDirectory` which provides - programmatic access to version files within the ``versions/`` - directory. - - """ - - def __init__(self, config, script, **kw): - r"""Construct a new :class:`.EnvironmentContext`. - - :param config: a :class:`.Config` instance. - :param script: a :class:`.ScriptDirectory` instance. - :param \**kw: keyword options that will be ultimately - passed along to the :class:`.MigrationContext` when - :meth:`.EnvironmentContext.configure` is called. - - """ - self.config = config - self.script = script - self.context_opts = kw - - def __enter__(self): - """Establish a context which provides a - :class:`.EnvironmentContext` object to - env.py scripts. - - The :class:`.EnvironmentContext` will - be made available as ``from alembic import context``. - - """ - self._install_proxy() - return self - - def __exit__(self, *arg, **kw): - self._remove_proxy() - - def is_offline_mode(self): - """Return True if the current migrations environment - is running in "offline mode". - - This is ``True`` or ``False`` depending - on the the ``--sql`` flag passed. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - """ - return self.context_opts.get("as_sql", False) - - def is_transactional_ddl(self): - """Return True if the context is configured to expect a - transactional DDL capable backend. - - This defaults to the type of database in use, and - can be overridden by the ``transactional_ddl`` argument - to :meth:`.configure` - - This function requires that a :class:`.MigrationContext` - has first been made available via :meth:`.configure`. - - """ - return self.get_context().impl.transactional_ddl - - def requires_connection(self): - return not self.is_offline_mode() - - def get_head_revision(self): - """Return the hex identifier of the 'head' script revision. - - If the script directory has multiple heads, this - method raises a :class:`.CommandError`; - :meth:`.EnvironmentContext.get_head_revisions` should be preferred. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - .. seealso:: :meth:`.EnvironmentContext.get_head_revisions` - - """ - return self.script.as_revision_number("head") - - def get_head_revisions(self): - """Return the hex identifier of the 'heads' script revision(s). - - This returns a tuple containing the version number of all - heads in the script directory. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - .. versionadded:: 0.7.0 - - """ - return self.script.as_revision_number("heads") - - def get_starting_revision_argument(self): - """Return the 'starting revision' argument, - if the revision was passed using ``start:end``. - - This is only meaningful in "offline" mode. - Returns ``None`` if no value is available - or was configured. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - """ - if self._migration_context is not None: - return self.script.as_revision_number( - self.get_context()._start_from_rev - ) - elif "starting_rev" in self.context_opts: - return self.script.as_revision_number( - self.context_opts["starting_rev"] - ) - else: - # this should raise only in the case that a command - # is being run where the "starting rev" is never applicable; - # this is to catch scripts which rely upon this in - # non-sql mode or similar - raise util.CommandError( - "No starting revision argument is available." - ) - - def get_revision_argument(self): - """Get the 'destination' revision argument. - - This is typically the argument passed to the - ``upgrade`` or ``downgrade`` command. - - If it was specified as ``head``, the actual - version number is returned; if specified - as ``base``, ``None`` is returned. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - """ - return self.script.as_revision_number( - self.context_opts["destination_rev"] - ) - - def get_tag_argument(self): - """Return the value passed for the ``--tag`` argument, if any. - - The ``--tag`` argument is not used directly by Alembic, - but is available for custom ``env.py`` configurations that - wish to use it; particularly for offline generation scripts - that wish to generate tagged filenames. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - .. seealso:: - - :meth:`.EnvironmentContext.get_x_argument` - a newer and more - open ended system of extending ``env.py`` scripts via the command - line. - - """ - return self.context_opts.get("tag", None) - - def get_x_argument(self, as_dictionary=False): - """Return the value(s) passed for the ``-x`` argument, if any. - - The ``-x`` argument is an open ended flag that allows any user-defined - value or values to be passed on the command line, then available - here for consumption by a custom ``env.py`` script. - - The return value is a list, returned directly from the ``argparse`` - structure. If ``as_dictionary=True`` is passed, the ``x`` arguments - are parsed using ``key=value`` format into a dictionary that is - then returned. - - For example, to support passing a database URL on the command line, - the standard ``env.py`` script can be modified like this:: - - cmd_line_url = context.get_x_argument( - as_dictionary=True).get('dbname') - if cmd_line_url: - engine = create_engine(cmd_line_url) - else: - engine = engine_from_config( - config.get_section(config.config_ini_section), - prefix='sqlalchemy.', - poolclass=pool.NullPool) - - This then takes effect by running the ``alembic`` script as:: - - alembic -x dbname=postgresql://user:pass@host/dbname upgrade head - - This function does not require that the :class:`.MigrationContext` - has been configured. - - .. versionadded:: 0.6.0 - - .. seealso:: - - :meth:`.EnvironmentContext.get_tag_argument` - - :attr:`.Config.cmd_opts` - - """ - if self.config.cmd_opts is not None: - value = self.config.cmd_opts.x or [] - else: - value = [] - if as_dictionary: - value = dict(arg.split("=", 1) for arg in value) - return value - - def configure( - self, - connection=None, - url=None, - dialect_name=None, - transactional_ddl=None, - transaction_per_migration=False, - output_buffer=None, - starting_rev=None, - tag=None, - template_args=None, - render_as_batch=False, - target_metadata=None, - include_symbol=None, - include_object=None, - include_schemas=False, - process_revision_directives=None, - compare_type=False, - compare_server_default=False, - render_item=None, - literal_binds=False, - upgrade_token="upgrades", - downgrade_token="downgrades", - alembic_module_prefix="op.", - sqlalchemy_module_prefix="sa.", - user_module_prefix=None, - on_version_apply=None, - **kw - ): - """Configure a :class:`.MigrationContext` within this - :class:`.EnvironmentContext` which will provide database - connectivity and other configuration to a series of - migration scripts. - - Many methods on :class:`.EnvironmentContext` require that - this method has been called in order to function, as they - ultimately need to have database access or at least access - to the dialect in use. Those which do are documented as such. - - The important thing needed by :meth:`.configure` is a - means to determine what kind of database dialect is in use. - An actual connection to that database is needed only if - the :class:`.MigrationContext` is to be used in - "online" mode. - - If the :meth:`.is_offline_mode` function returns ``True``, - then no connection is needed here. Otherwise, the - ``connection`` parameter should be present as an - instance of :class:`sqlalchemy.engine.Connection`. - - This function is typically called from the ``env.py`` - script within a migration environment. It can be called - multiple times for an invocation. The most recent - :class:`~sqlalchemy.engine.Connection` - for which it was called is the one that will be operated upon - by the next call to :meth:`.run_migrations`. - - General parameters: - - :param connection: a :class:`~sqlalchemy.engine.Connection` - to use - for SQL execution in "online" mode. When present, is also - used to determine the type of dialect in use. - :param url: a string database url, or a - :class:`sqlalchemy.engine.url.URL` object. - The type of dialect to be used will be derived from this if - ``connection`` is not passed. - :param dialect_name: string name of a dialect, such as - "postgresql", "mssql", etc. - The type of dialect to be used will be derived from this if - ``connection`` and ``url`` are not passed. - :param transactional_ddl: Force the usage of "transactional" - DDL on or off; - this otherwise defaults to whether or not the dialect in - use supports it. - :param transaction_per_migration: if True, nest each migration script - in a transaction rather than the full series of migrations to - run. - - .. versionadded:: 0.6.5 - - :param output_buffer: a file-like object that will be used - for textual output - when the ``--sql`` option is used to generate SQL scripts. - Defaults to - ``sys.stdout`` if not passed here and also not present on - the :class:`.Config` - object. The value here overrides that of the :class:`.Config` - object. - :param output_encoding: when using ``--sql`` to generate SQL - scripts, apply this encoding to the string output. - :param literal_binds: when using ``--sql`` to generate SQL - scripts, pass through the ``literal_binds`` flag to the compiler - so that any literal values that would ordinarily be bound - parameters are converted to plain strings. - - .. warning:: Dialects can typically only handle simple datatypes - like strings and numbers for auto-literal generation. Datatypes - like dates, intervals, and others may still require manual - formatting, typically using :meth:`.Operations.inline_literal`. - - .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy - versions prior to 0.8 where this feature is not supported. - - .. versionadded:: 0.7.6 - - .. seealso:: - - :meth:`.Operations.inline_literal` - - :param starting_rev: Override the "starting revision" argument - when using ``--sql`` mode. - :param tag: a string tag for usage by custom ``env.py`` scripts. - Set via the ``--tag`` option, can be overridden here. - :param template_args: dictionary of template arguments which - will be added to the template argument environment when - running the "revision" command. Note that the script environment - is only run within the "revision" command if the --autogenerate - option is used, or if the option "revision_environment=true" - is present in the alembic.ini file. - - :param version_table: The name of the Alembic version table. - The default is ``'alembic_version'``. - :param version_table_schema: Optional schema to place version - table within. - :param version_table_pk: boolean, whether the Alembic version table - should use a primary key constraint for the "value" column; this - only takes effect when the table is first created. - Defaults to True; setting to False should not be necessary and is - here for backwards compatibility reasons. - - .. versionadded:: 0.8.10 Added the - :paramref:`.EnvironmentContext.configure.version_table_pk` - flag and additionally established that the Alembic version table - has a primary key constraint by default. - - :param on_version_apply: a callable or collection of callables to be - run for each migration step. - The callables will be run in the order they are given, once for - each migration step, after the respective operation has been - applied but before its transaction is finalized. - Each callable accepts no positional arguments and the following - keyword arguments: - - * ``ctx``: the :class:`.MigrationContext` running the migration, - * ``step``: a :class:`.MigrationInfo` representing the - step currently being applied, - * ``heads``: a collection of version strings representing the - current heads, - * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`. - - .. versionadded:: 0.9.3 - - - Parameters specific to the autogenerate feature, when - ``alembic revision`` is run with the ``--autogenerate`` feature: - - :param target_metadata: a :class:`sqlalchemy.schema.MetaData` - object, or a sequence of :class:`~sqlalchemy.schema.MetaData` - objects, that will be consulted during autogeneration. - The tables present in each :class:`~sqlalchemy.schema.MetaData` - will be compared against - what is locally available on the target - :class:`~sqlalchemy.engine.Connection` - to produce candidate upgrade/downgrade operations. - - .. versionchanged:: 0.9.0 the - :paramref:`.EnvironmentContext.configure.target_metadata` - parameter may now be passed a sequence of - :class:`~sqlalchemy.schema.MetaData` objects to support - autogeneration of multiple :class:`~sqlalchemy.schema.MetaData` - collections. - - :param compare_type: Indicates type comparison behavior during - an autogenerate - operation. Defaults to ``False`` which disables type - comparison. Set to - ``True`` to turn on default type comparison, which has varied - accuracy depending on backend. See :ref:`compare_types` - for an example as well as information on other type - comparison options. - - .. seealso:: - - :ref:`compare_types` - - :paramref:`.EnvironmentContext.configure.compare_server_default` - - :param compare_server_default: Indicates server default comparison - behavior during - an autogenerate operation. Defaults to ``False`` which disables - server default - comparison. Set to ``True`` to turn on server default comparison, - which has - varied accuracy depending on backend. - - To customize server default comparison behavior, a callable may - be specified - which can filter server default comparisons during an - autogenerate operation. - defaults during an autogenerate operation. The format of this - callable is:: - - def my_compare_server_default(context, inspected_column, - metadata_column, inspected_default, metadata_default, - rendered_metadata_default): - # return True if the defaults are different, - # False if not, or None to allow the default implementation - # to compare these defaults - return None - - context.configure( - # ... - compare_server_default = my_compare_server_default - ) - - ``inspected_column`` is a dictionary structure as returned by - :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas - ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from - the local model environment. - - A return value of ``None`` indicates to allow default server default - comparison - to proceed. Note that some backends such as Postgresql actually - execute - the two defaults on the database side to compare for equivalence. - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.compare_type` - - :param include_object: A callable function which is given - the chance to return ``True`` or ``False`` for any object, - indicating if the given object should be considered in the - autogenerate sweep. - - The function accepts the following positional arguments: - - * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such - as a :class:`~sqlalchemy.schema.Table`, - :class:`~sqlalchemy.schema.Column`, - :class:`~sqlalchemy.schema.Index` - :class:`~sqlalchemy.schema.UniqueConstraint`, - or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object - * ``name``: the name of the object. This is typically available - via ``object.name``. - * ``type``: a string describing the type of object; currently - ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``, - or ``"foreign_key_constraint"`` - - .. versionadded:: 0.7.0 Support for indexes and unique constraints - within the - :paramref:`~.EnvironmentContext.configure.include_object` hook. - - .. versionadded:: 0.7.1 Support for foreign keys within the - :paramref:`~.EnvironmentContext.configure.include_object` hook. - - * ``reflected``: ``True`` if the given object was produced based on - table reflection, ``False`` if it's from a local :class:`.MetaData` - object. - * ``compare_to``: the object being compared against, if available, - else ``None``. - - E.g.:: - - def include_object(object, name, type_, reflected, compare_to): - if (type_ == "column" and - not reflected and - object.info.get("skip_autogenerate", False)): - return False - else: - return True - - context.configure( - # ... - include_object = include_object - ) - - :paramref:`.EnvironmentContext.configure.include_object` can also - be used to filter on specific schemas to include or omit, when - the :paramref:`.EnvironmentContext.configure.include_schemas` - flag is set to ``True``. The :attr:`.Table.schema` attribute - on each :class:`.Table` object reflected will indicate the name of the - schema from which the :class:`.Table` originates. - - .. versionadded:: 0.6.0 - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.include_schemas` - - :param include_symbol: A callable function which, given a table name - and schema name (may be ``None``), returns ``True`` or ``False``, - indicating if the given table should be considered in the - autogenerate sweep. - - .. deprecated:: 0.6.0 - :paramref:`.EnvironmentContext.configure.include_symbol` - is superceded by the more generic - :paramref:`.EnvironmentContext.configure.include_object` - parameter. - - E.g.:: - - def include_symbol(tablename, schema): - return tablename not in ("skip_table_one", "skip_table_two") - - context.configure( - # ... - include_symbol = include_symbol - ) - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.include_schemas` - - :paramref:`.EnvironmentContext.configure.include_object` - - :param render_as_batch: if True, commands which alter elements - within a table will be placed under a ``with batch_alter_table():`` - directive, so that batch migrations will take place. - - .. versionadded:: 0.7.0 - - .. seealso:: - - :ref:`batch_migrations` - - :param include_schemas: If True, autogenerate will scan across - all schemas located by the SQLAlchemy - :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names` - method, and include all differences in tables found across all - those schemas. When using this option, you may want to also - use the :paramref:`.EnvironmentContext.configure.include_object` - option to specify a callable which - can filter the tables/schemas that get included. - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.include_object` - - :param render_item: Callable that can be used to override how - any schema item, i.e. column, constraint, type, - etc., is rendered for autogenerate. The callable receives a - string describing the type of object, the object, and - the autogen context. If it returns False, the - default rendering method will be used. If it returns None, - the item will not be rendered in the context of a Table - construct, that is, can be used to skip columns or constraints - within op.create_table():: - - def my_render_column(type_, col, autogen_context): - if type_ == "column" and isinstance(col, MySpecialCol): - return repr(col) - else: - return False - - context.configure( - # ... - render_item = my_render_column - ) - - Available values for the type string include: ``"column"``, - ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``, - ``"type"``, ``"server_default"``. - - .. seealso:: - - :ref:`autogen_render_types` - - :param upgrade_token: When autogenerate completes, the text of the - candidate upgrade operations will be present in this template - variable when ``script.py.mako`` is rendered. Defaults to - ``upgrades``. - :param downgrade_token: When autogenerate completes, the text of the - candidate downgrade operations will be present in this - template variable when ``script.py.mako`` is rendered. Defaults to - ``downgrades``. - - :param alembic_module_prefix: When autogenerate refers to Alembic - :mod:`alembic.operations` constructs, this prefix will be used - (i.e. ``op.create_table``) Defaults to "``op.``". - Can be ``None`` to indicate no prefix. - - :param sqlalchemy_module_prefix: When autogenerate refers to - SQLAlchemy - :class:`~sqlalchemy.schema.Column` or type classes, this prefix - will be used - (i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``". - Can be ``None`` to indicate no prefix. - Note that when dialect-specific types are rendered, autogenerate - will render them using the dialect module name, i.e. ``mssql.BIT()``, - ``postgresql.UUID()``. - - :param user_module_prefix: When autogenerate refers to a SQLAlchemy - type (e.g. :class:`.TypeEngine`) where the module name is not - under the ``sqlalchemy`` namespace, this prefix will be used - within autogenerate. If left at its default of - ``None``, the ``__module__`` attribute of the type is used to - render the import module. It's a good practice to set this - and to have all custom types be available from a fixed module space, - in order to future-proof migration files against reorganizations - in modules. - - .. versionchanged:: 0.7.0 - :paramref:`.EnvironmentContext.configure.user_module_prefix` - no longer defaults to the value of - :paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix` - when left at ``None``; the ``__module__`` attribute is now used. - - .. versionadded:: 0.6.3 added - :paramref:`.EnvironmentContext.configure.user_module_prefix` - - .. seealso:: - - :ref:`autogen_module_prefix` - - :param process_revision_directives: a callable function that will - be passed a structure representing the end result of an autogenerate - or plain "revision" operation, which can be manipulated to affect - how the ``alembic revision`` command ultimately outputs new - revision scripts. The structure of the callable is:: - - def process_revision_directives(context, revision, directives): - pass - - The ``directives`` parameter is a Python list containing - a single :class:`.MigrationScript` directive, which represents - the revision file to be generated. This list as well as its - contents may be freely modified to produce any set of commands. - The section :ref:`customizing_revision` shows an example of - doing this. The ``context`` parameter is the - :class:`.MigrationContext` in use, - and ``revision`` is a tuple of revision identifiers representing the - current revision of the database. - - The callable is invoked at all times when the ``--autogenerate`` - option is passed to ``alembic revision``. If ``--autogenerate`` - is not passed, the callable is invoked only if the - ``revision_environment`` variable is set to True in the Alembic - configuration, in which case the given ``directives`` collection - will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps` - collections for ``.upgrade_ops`` and ``.downgrade_ops``. The - ``--autogenerate`` option itself can be inferred by inspecting - ``context.config.cmd_opts.autogenerate``. - - The callable function may optionally be an instance of - a :class:`.Rewriter` object. This is a helper object that - assists in the production of autogenerate-stream rewriter functions. - - - .. versionadded:: 0.8.0 - - .. versionchanged:: 0.8.1 - The - :paramref:`.EnvironmentContext.configure.process_revision_directives` - hook can append op directives into :class:`.UpgradeOps` and - :class:`.DowngradeOps` which will be rendered in Python regardless - of whether the ``--autogenerate`` option is in use or not; - the ``revision_environment`` configuration variable should be - set to "true" in the config to enable this. - - - .. seealso:: - - :ref:`customizing_revision` - - :ref:`autogen_rewriter` - - :paramref:`.command.revision.process_revision_directives` - - Parameters specific to individual backends: - - :param mssql_batch_separator: The "batch separator" which will - be placed between each statement when generating offline SQL Server - migrations. Defaults to ``GO``. Note this is in addition to the - customary semicolon ``;`` at the end of each statement; SQL Server - considers the "batch separator" to denote the end of an - individual statement execution, and cannot group certain - dependent operations in one step. - :param oracle_batch_separator: The "batch separator" which will - be placed between each statement when generating offline - Oracle migrations. Defaults to ``/``. Oracle doesn't add a - semicolon between statements like most other backends. - - """ - opts = self.context_opts - if transactional_ddl is not None: - opts["transactional_ddl"] = transactional_ddl - if output_buffer is not None: - opts["output_buffer"] = output_buffer - elif self.config.output_buffer is not None: - opts["output_buffer"] = self.config.output_buffer - if starting_rev: - opts["starting_rev"] = starting_rev - if tag: - opts["tag"] = tag - if template_args and "template_args" in opts: - opts["template_args"].update(template_args) - opts["transaction_per_migration"] = transaction_per_migration - opts["target_metadata"] = target_metadata - opts["include_symbol"] = include_symbol - opts["include_object"] = include_object - opts["include_schemas"] = include_schemas - opts["render_as_batch"] = render_as_batch - opts["upgrade_token"] = upgrade_token - opts["downgrade_token"] = downgrade_token - opts["sqlalchemy_module_prefix"] = sqlalchemy_module_prefix - opts["alembic_module_prefix"] = alembic_module_prefix - opts["user_module_prefix"] = user_module_prefix - opts["literal_binds"] = literal_binds - opts["process_revision_directives"] = process_revision_directives - opts["on_version_apply"] = util.to_tuple(on_version_apply, default=()) - - if render_item is not None: - opts["render_item"] = render_item - if compare_type is not None: - opts["compare_type"] = compare_type - if compare_server_default is not None: - opts["compare_server_default"] = compare_server_default - opts["script"] = self.script - - opts.update(kw) - - self._migration_context = MigrationContext.configure( - connection=connection, - url=url, - dialect_name=dialect_name, - environment_context=self, - opts=opts, - ) - - def run_migrations(self, **kw): - """Run migrations as determined by the current command line - configuration - as well as versioning information present (or not) in the current - database connection (if one is present). - - The function accepts optional ``**kw`` arguments. If these are - passed, they are sent directly to the ``upgrade()`` and - ``downgrade()`` - functions within each target revision file. By modifying the - ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()`` - functions accept arguments, parameters can be passed here so that - contextual information, usually information to identify a particular - database in use, can be passed from a custom ``env.py`` script - to the migration functions. - - This function requires that a :class:`.MigrationContext` has - first been made available via :meth:`.configure`. - - """ - with Operations.context(self._migration_context): - self.get_context().run_migrations(**kw) - - def execute(self, sql, execution_options=None): - """Execute the given SQL using the current change context. - - The behavior of :meth:`.execute` is the same - as that of :meth:`.Operations.execute`. Please see that - function's documentation for full detail including - caveats and limitations. - - This function requires that a :class:`.MigrationContext` has - first been made available via :meth:`.configure`. - - """ - self.get_context().execute(sql, execution_options=execution_options) - - def static_output(self, text): - """Emit text directly to the "offline" SQL stream. - - Typically this is for emitting comments that - start with --. The statement is not treated - as a SQL execution, no ; or batch separator - is added, etc. - - """ - self.get_context().impl.static_output(text) - - def begin_transaction(self): - """Return a context manager that will - enclose an operation within a "transaction", - as defined by the environment's offline - and transactional DDL settings. - - e.g.:: - - with context.begin_transaction(): - context.run_migrations() - - :meth:`.begin_transaction` is intended to - "do the right thing" regardless of - calling context: - - * If :meth:`.is_transactional_ddl` is ``False``, - returns a "do nothing" context manager - which otherwise produces no transactional - state or directives. - * If :meth:`.is_offline_mode` is ``True``, - returns a context manager that will - invoke the :meth:`.DefaultImpl.emit_begin` - and :meth:`.DefaultImpl.emit_commit` - methods, which will produce the string - directives ``BEGIN`` and ``COMMIT`` on - the output stream, as rendered by the - target backend (e.g. SQL Server would - emit ``BEGIN TRANSACTION``). - * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin` - on the current online connection, which - returns a :class:`sqlalchemy.engine.Transaction` - object. This object demarcates a real - transaction and is itself a context manager, - which will roll back if an exception - is raised. - - Note that a custom ``env.py`` script which - has more specific transactional needs can of course - manipulate the :class:`~sqlalchemy.engine.Connection` - directly to produce transactional state in "online" - mode. - - """ - - return self.get_context().begin_transaction() - - def get_context(self): - """Return the current :class:`.MigrationContext` object. - - If :meth:`.EnvironmentContext.configure` has not been - called yet, raises an exception. - - """ - - if self._migration_context is None: - raise Exception("No context has been configured yet.") - return self._migration_context - - def get_bind(self): - """Return the current 'bind'. - - In "online" mode, this is the - :class:`sqlalchemy.engine.Connection` currently being used - to emit SQL to the database. - - This function requires that a :class:`.MigrationContext` - has first been made available via :meth:`.configure`. - - """ - return self.get_context().bind - - def get_impl(self): - return self.get_context().impl diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/runtime/migration.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/runtime/migration.py deleted file mode 100644 index a4ad740..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/runtime/migration.py +++ /dev/null @@ -1,1065 +0,0 @@ -from contextlib import contextmanager -import logging -import sys - -from sqlalchemy import Column -from sqlalchemy import literal_column -from sqlalchemy import MetaData -from sqlalchemy import PrimaryKeyConstraint -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy.engine import Connection -from sqlalchemy.engine import url as sqla_url -from sqlalchemy.engine.strategies import MockEngineStrategy - -from .. import ddl -from .. import util -from ..util.compat import callable -from ..util.compat import EncodedIO - -log = logging.getLogger(__name__) - - -class MigrationContext(object): - - """Represent the database state made available to a migration - script. - - :class:`.MigrationContext` is the front end to an actual - database connection, or alternatively a string output - stream given a particular database dialect, - from an Alembic perspective. - - When inside the ``env.py`` script, the :class:`.MigrationContext` - is available via the - :meth:`.EnvironmentContext.get_context` method, - which is available at ``alembic.context``:: - - # from within env.py script - from alembic import context - migration_context = context.get_context() - - For usage outside of an ``env.py`` script, such as for - utility routines that want to check the current version - in the database, the :meth:`.MigrationContext.configure` - method to create new :class:`.MigrationContext` objects. - For example, to get at the current revision in the - database using :meth:`.MigrationContext.get_current_revision`:: - - # in any application, outside of an env.py script - from alembic.migration import MigrationContext - from sqlalchemy import create_engine - - engine = create_engine("postgresql://mydatabase") - conn = engine.connect() - - context = MigrationContext.configure(conn) - current_rev = context.get_current_revision() - - The above context can also be used to produce - Alembic migration operations with an :class:`.Operations` - instance:: - - # in any application, outside of the normal Alembic environment - from alembic.operations import Operations - op = Operations(context) - op.alter_column("mytable", "somecolumn", nullable=True) - - """ - - def __init__(self, dialect, connection, opts, environment_context=None): - self.environment_context = environment_context - self.opts = opts - self.dialect = dialect - self.script = opts.get("script") - as_sql = opts.get("as_sql", False) - transactional_ddl = opts.get("transactional_ddl") - self._transaction_per_migration = opts.get( - "transaction_per_migration", False - ) - self.on_version_apply_callbacks = opts.get("on_version_apply", ()) - - if as_sql: - self.connection = self._stdout_connection(connection) - assert self.connection is not None - else: - self.connection = connection - self._migrations_fn = opts.get("fn") - self.as_sql = as_sql - - if "output_encoding" in opts: - self.output_buffer = EncodedIO( - opts.get("output_buffer") or sys.stdout, - opts["output_encoding"], - ) - else: - self.output_buffer = opts.get("output_buffer", sys.stdout) - - self._user_compare_type = opts.get("compare_type", False) - self._user_compare_server_default = opts.get( - "compare_server_default", False - ) - self.version_table = version_table = opts.get( - "version_table", "alembic_version" - ) - self.version_table_schema = version_table_schema = opts.get( - "version_table_schema", None - ) - self._version = Table( - version_table, - MetaData(), - Column("version_num", String(32), nullable=False), - schema=version_table_schema, - ) - if opts.get("version_table_pk", True): - self._version.append_constraint( - PrimaryKeyConstraint( - "version_num", name="%s_pkc" % version_table - ) - ) - - self._start_from_rev = opts.get("starting_rev") - self.impl = ddl.DefaultImpl.get_by_dialect(dialect)( - dialect, - self.connection, - self.as_sql, - transactional_ddl, - self.output_buffer, - opts, - ) - log.info("Context impl %s.", self.impl.__class__.__name__) - if self.as_sql: - log.info("Generating static SQL") - log.info( - "Will assume %s DDL.", - "transactional" - if self.impl.transactional_ddl - else "non-transactional", - ) - - @classmethod - def configure( - cls, - connection=None, - url=None, - dialect_name=None, - dialect=None, - environment_context=None, - opts=None, - ): - """Create a new :class:`.MigrationContext`. - - This is a factory method usually called - by :meth:`.EnvironmentContext.configure`. - - :param connection: a :class:`~sqlalchemy.engine.Connection` - to use for SQL execution in "online" mode. When present, - is also used to determine the type of dialect in use. - :param url: a string database url, or a - :class:`sqlalchemy.engine.url.URL` object. - The type of dialect to be used will be derived from this if - ``connection`` is not passed. - :param dialect_name: string name of a dialect, such as - "postgresql", "mssql", etc. The type of dialect to be used will be - derived from this if ``connection`` and ``url`` are not passed. - :param opts: dictionary of options. Most other options - accepted by :meth:`.EnvironmentContext.configure` are passed via - this dictionary. - - """ - if opts is None: - opts = {} - - if connection: - if not isinstance(connection, Connection): - util.warn( - "'connection' argument to configure() is expected " - "to be a sqlalchemy.engine.Connection instance, " - "got %r" % connection - ) - dialect = connection.dialect - elif url: - url = sqla_url.make_url(url) - dialect = url.get_dialect()() - elif dialect_name: - url = sqla_url.make_url("%s://" % dialect_name) - dialect = url.get_dialect()() - elif not dialect: - raise Exception("Connection, url, or dialect_name is required.") - - return MigrationContext(dialect, connection, opts, environment_context) - - def begin_transaction(self, _per_migration=False): - transaction_now = _per_migration == self._transaction_per_migration - - if not transaction_now: - - @contextmanager - def do_nothing(): - yield - - return do_nothing() - - elif not self.impl.transactional_ddl: - - @contextmanager - def do_nothing(): - yield - - return do_nothing() - elif self.as_sql: - - @contextmanager - def begin_commit(): - self.impl.emit_begin() - yield - self.impl.emit_commit() - - return begin_commit() - else: - return self.bind.begin() - - def get_current_revision(self): - """Return the current revision, usually that which is present - in the ``alembic_version`` table in the database. - - This method intends to be used only for a migration stream that - does not contain unmerged branches in the target database; - if there are multiple branches present, an exception is raised. - The :meth:`.MigrationContext.get_current_heads` should be preferred - over this method going forward in order to be compatible with - branch migration support. - - If this :class:`.MigrationContext` was configured in "offline" - mode, that is with ``as_sql=True``, the ``starting_rev`` - parameter is returned instead, if any. - - """ - heads = self.get_current_heads() - if len(heads) == 0: - return None - elif len(heads) > 1: - raise util.CommandError( - "Version table '%s' has more than one head present; " - "please use get_current_heads()" % self.version_table - ) - else: - return heads[0] - - def get_current_heads(self): - """Return a tuple of the current 'head versions' that are represented - in the target database. - - For a migration stream without branches, this will be a single - value, synonymous with that of - :meth:`.MigrationContext.get_current_revision`. However when multiple - unmerged branches exist within the target database, the returned tuple - will contain a value for each head. - - If this :class:`.MigrationContext` was configured in "offline" - mode, that is with ``as_sql=True``, the ``starting_rev`` - parameter is returned in a one-length tuple. - - If no version table is present, or if there are no revisions - present, an empty tuple is returned. - - .. versionadded:: 0.7.0 - - """ - if self.as_sql: - start_from_rev = self._start_from_rev - if start_from_rev == "base": - start_from_rev = None - elif start_from_rev is not None and self.script: - start_from_rev = self.script.get_revision( - start_from_rev - ).revision - - return util.to_tuple(start_from_rev, default=()) - else: - if self._start_from_rev: - raise util.CommandError( - "Can't specify current_rev to context " - "when using a database connection" - ) - if not self._has_version_table(): - return () - return tuple( - row[0] for row in self.connection.execute(self._version.select()) - ) - - def _ensure_version_table(self): - self._version.create(self.connection, checkfirst=True) - - def _has_version_table(self): - return self.connection.dialect.has_table( - self.connection, self.version_table, self.version_table_schema - ) - - def stamp(self, script_directory, revision): - """Stamp the version table with a specific revision. - - This method calculates those branches to which the given revision - can apply, and updates those branches as though they were migrated - towards that revision (either up or down). If no current branches - include the revision, it is added as a new branch head. - - .. versionadded:: 0.7.0 - - """ - heads = self.get_current_heads() - if not self.as_sql and not heads: - self._ensure_version_table() - head_maintainer = HeadMaintainer(self, heads) - for step in script_directory._stamp_revs(revision, heads): - head_maintainer.update_to_step(step) - - def run_migrations(self, **kw): - r"""Run the migration scripts established for this - :class:`.MigrationContext`, if any. - - The commands in :mod:`alembic.command` will set up a function - that is ultimately passed to the :class:`.MigrationContext` - as the ``fn`` argument. This function represents the "work" - that will be done when :meth:`.MigrationContext.run_migrations` - is called, typically from within the ``env.py`` script of the - migration environment. The "work function" then provides an iterable - of version callables and other version information which - in the case of the ``upgrade`` or ``downgrade`` commands are the - list of version scripts to invoke. Other commands yield nothing, - in the case that a command wants to run some other operation - against the database such as the ``current`` or ``stamp`` commands. - - :param \**kw: keyword arguments here will be passed to each - migration callable, that is the ``upgrade()`` or ``downgrade()`` - method within revision scripts. - - """ - self.impl.start_migrations() - - heads = self.get_current_heads() - if not self.as_sql and not heads: - self._ensure_version_table() - - head_maintainer = HeadMaintainer(self, heads) - - starting_in_transaction = ( - not self.as_sql and self._in_connection_transaction() - ) - - for step in self._migrations_fn(heads, self): - with self.begin_transaction(_per_migration=True): - if self.as_sql and not head_maintainer.heads: - # for offline mode, include a CREATE TABLE from - # the base - self._version.create(self.connection) - log.info("Running %s", step) - if self.as_sql: - self.impl.static_output( - "-- Running %s" % (step.short_log,) - ) - step.migration_fn(**kw) - - # previously, we wouldn't stamp per migration - # if we were in a transaction, however given the more - # complex model that involves any number of inserts - # and row-targeted updates and deletes, it's simpler for now - # just to run the operations on every version - head_maintainer.update_to_step(step) - for callback in self.on_version_apply_callbacks: - callback( - ctx=self, - step=step.info, - heads=set(head_maintainer.heads), - run_args=kw, - ) - - if ( - not starting_in_transaction - and not self.as_sql - and not self.impl.transactional_ddl - and self._in_connection_transaction() - ): - raise util.CommandError( - 'Migration "%s" has left an uncommitted ' - "transaction opened; transactional_ddl is False so " - "Alembic is not committing transactions" % step - ) - - if self.as_sql and not head_maintainer.heads: - self._version.drop(self.connection) - - def _in_connection_transaction(self): - try: - meth = self.connection.in_transaction - except AttributeError: - return False - else: - return meth() - - def execute(self, sql, execution_options=None): - """Execute a SQL construct or string statement. - - The underlying execution mechanics are used, that is - if this is "offline mode" the SQL is written to the - output buffer, otherwise the SQL is emitted on - the current SQLAlchemy connection. - - """ - self.impl._exec(sql, execution_options) - - def _stdout_connection(self, connection): - def dump(construct, *multiparams, **params): - self.impl._exec(construct) - - return MockEngineStrategy.MockConnection(self.dialect, dump) - - @property - def bind(self): - """Return the current "bind". - - In online mode, this is an instance of - :class:`sqlalchemy.engine.Connection`, and is suitable - for ad-hoc execution of any kind of usage described - in :ref:`sqlexpression_toplevel` as well as - for usage with the :meth:`sqlalchemy.schema.Table.create` - and :meth:`sqlalchemy.schema.MetaData.create_all` methods - of :class:`~sqlalchemy.schema.Table`, - :class:`~sqlalchemy.schema.MetaData`. - - Note that when "standard output" mode is enabled, - this bind will be a "mock" connection handler that cannot - return results and is only appropriate for a very limited - subset of commands. - - """ - return self.connection - - @property - def config(self): - """Return the :class:`.Config` used by the current environment, if any. - - .. versionadded:: 0.6.6 - - """ - if self.environment_context: - return self.environment_context.config - else: - return None - - def _compare_type(self, inspector_column, metadata_column): - if self._user_compare_type is False: - return False - - if callable(self._user_compare_type): - user_value = self._user_compare_type( - self, - inspector_column, - metadata_column, - inspector_column.type, - metadata_column.type, - ) - if user_value is not None: - return user_value - - return self.impl.compare_type(inspector_column, metadata_column) - - def _compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_column_default, - ): - - if self._user_compare_server_default is False: - return False - - if callable(self._user_compare_server_default): - user_value = self._user_compare_server_default( - self, - inspector_column, - metadata_column, - rendered_column_default, - metadata_column.server_default, - rendered_metadata_default, - ) - if user_value is not None: - return user_value - - return self.impl.compare_server_default( - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_column_default, - ) - - -class HeadMaintainer(object): - def __init__(self, context, heads): - self.context = context - self.heads = set(heads) - - def _insert_version(self, version): - assert version not in self.heads - self.heads.add(version) - - self.context.impl._exec( - self.context._version.insert().values( - version_num=literal_column("'%s'" % version) - ) - ) - - def _delete_version(self, version): - self.heads.remove(version) - - ret = self.context.impl._exec( - self.context._version.delete().where( - self.context._version.c.version_num - == literal_column("'%s'" % version) - ) - ) - if not self.context.as_sql and ret.rowcount != 1: - raise util.CommandError( - "Online migration expected to match one " - "row when deleting '%s' in '%s'; " - "%d found" - % (version, self.context.version_table, ret.rowcount) - ) - - def _update_version(self, from_, to_): - assert to_ not in self.heads - self.heads.remove(from_) - self.heads.add(to_) - - ret = self.context.impl._exec( - self.context._version.update() - .values(version_num=literal_column("'%s'" % to_)) - .where( - self.context._version.c.version_num - == literal_column("'%s'" % from_) - ) - ) - if not self.context.as_sql and ret.rowcount != 1: - raise util.CommandError( - "Online migration expected to match one " - "row when updating '%s' to '%s' in '%s'; " - "%d found" - % (from_, to_, self.context.version_table, ret.rowcount) - ) - - def update_to_step(self, step): - if step.should_delete_branch(self.heads): - vers = step.delete_version_num - log.debug("branch delete %s", vers) - self._delete_version(vers) - elif step.should_create_branch(self.heads): - vers = step.insert_version_num - log.debug("new branch insert %s", vers) - self._insert_version(vers) - elif step.should_merge_branches(self.heads): - # delete revs, update from rev, update to rev - ( - delete_revs, - update_from_rev, - update_to_rev, - ) = step.merge_branch_idents(self.heads) - log.debug( - "merge, delete %s, update %s to %s", - delete_revs, - update_from_rev, - update_to_rev, - ) - for delrev in delete_revs: - self._delete_version(delrev) - self._update_version(update_from_rev, update_to_rev) - elif step.should_unmerge_branches(self.heads): - ( - update_from_rev, - update_to_rev, - insert_revs, - ) = step.unmerge_branch_idents(self.heads) - log.debug( - "unmerge, insert %s, update %s to %s", - insert_revs, - update_from_rev, - update_to_rev, - ) - for insrev in insert_revs: - self._insert_version(insrev) - self._update_version(update_from_rev, update_to_rev) - else: - from_, to_ = step.update_version_num(self.heads) - log.debug("update %s to %s", from_, to_) - self._update_version(from_, to_) - - -class MigrationInfo(object): - """Exposes information about a migration step to a callback listener. - - The :class:`.MigrationInfo` object is available exclusively for the - benefit of the :paramref:`.EnvironmentContext.on_version_apply` - callback hook. - - .. versionadded:: 0.9.3 - - """ - - is_upgrade = None - """True/False: indicates whether this operation ascends or descends the - version tree.""" - - is_stamp = None - """True/False: indicates whether this operation is a stamp (i.e. whether - it results in any actual database operations).""" - - up_revision_id = None - """Version string corresponding to :attr:`.Revision.revision`. - - In the case of a stamp operation, it is advised to use the - :attr:`.MigrationInfo.up_revision_ids` tuple as a stamp operation can - make a single movement from one or more branches down to a single - branchpoint, in which case there will be multiple "up" revisions. - - .. seealso:: - - :attr:`.MigrationInfo.up_revision_ids` - - """ - - up_revision_ids = None - """Tuple of version strings corresponding to :attr:`.Revision.revision`. - - In the majority of cases, this tuple will be a single value, synonomous - with the scalar value of :attr:`.MigrationInfo.up_revision_id`. - It can be multiple revision identifiers only in the case of an - ``alembic stamp`` operation which is moving downwards from multiple - branches down to their common branch point. - - .. versionadded:: 0.9.4 - - """ - - down_revision_ids = None - """Tuple of strings representing the base revisions of this migration step. - - If empty, this represents a root revision; otherwise, the first item - corresponds to :attr:`.Revision.down_revision`, and the rest are inferred - from dependencies. - """ - - revision_map = None - """The revision map inside of which this operation occurs.""" - - def __init__( - self, revision_map, is_upgrade, is_stamp, up_revisions, down_revisions - ): - self.revision_map = revision_map - self.is_upgrade = is_upgrade - self.is_stamp = is_stamp - self.up_revision_ids = util.to_tuple(up_revisions, default=()) - if self.up_revision_ids: - self.up_revision_id = self.up_revision_ids[0] - else: - # this should never be the case with - # "upgrade", "downgrade", or "stamp" as we are always - # measuring movement in terms of at least one upgrade version - self.up_revision_id = None - self.down_revision_ids = util.to_tuple(down_revisions, default=()) - - @property - def is_migration(self): - """True/False: indicates whether this operation is a migration. - - At present this is true if and only the migration is not a stamp. - If other operation types are added in the future, both this attribute - and :attr:`~.MigrationInfo.is_stamp` will be false. - """ - return not self.is_stamp - - @property - def source_revision_ids(self): - """Active revisions before this migration step is applied.""" - return ( - self.down_revision_ids if self.is_upgrade else self.up_revision_ids - ) - - @property - def destination_revision_ids(self): - """Active revisions after this migration step is applied.""" - return ( - self.up_revision_ids if self.is_upgrade else self.down_revision_ids - ) - - @property - def up_revision(self): - """Get :attr:`~.MigrationInfo.up_revision_id` as - a :class:`.Revision`. - - """ - return self.revision_map.get_revision(self.up_revision_id) - - @property - def up_revisions(self): - """Get :attr:`~.MigrationInfo.up_revision_ids` as a :class:`.Revision`. - - .. versionadded:: 0.9.4 - - """ - return self.revision_map.get_revisions(self.up_revision_ids) - - @property - def down_revisions(self): - """Get :attr:`~.MigrationInfo.down_revision_ids` as a tuple of - :class:`Revisions <.Revision>`.""" - return self.revision_map.get_revisions(self.down_revision_ids) - - @property - def source_revisions(self): - """Get :attr:`~MigrationInfo.source_revision_ids` as a tuple of - :class:`Revisions <.Revision>`.""" - return self.revision_map.get_revisions(self.source_revision_ids) - - @property - def destination_revisions(self): - """Get :attr:`~MigrationInfo.destination_revision_ids` as a tuple of - :class:`Revisions <.Revision>`.""" - return self.revision_map.get_revisions(self.destination_revision_ids) - - -class MigrationStep(object): - @property - def name(self): - return self.migration_fn.__name__ - - @classmethod - def upgrade_from_script(cls, revision_map, script): - return RevisionStep(revision_map, script, True) - - @classmethod - def downgrade_from_script(cls, revision_map, script): - return RevisionStep(revision_map, script, False) - - @property - def is_downgrade(self): - return not self.is_upgrade - - @property - def short_log(self): - return "%s %s -> %s" % ( - self.name, - util.format_as_comma(self.from_revisions_no_deps), - util.format_as_comma(self.to_revisions_no_deps), - ) - - def __str__(self): - if self.doc: - return "%s %s -> %s, %s" % ( - self.name, - util.format_as_comma(self.from_revisions_no_deps), - util.format_as_comma(self.to_revisions_no_deps), - self.doc, - ) - else: - return self.short_log - - -class RevisionStep(MigrationStep): - def __init__(self, revision_map, revision, is_upgrade): - self.revision_map = revision_map - self.revision = revision - self.is_upgrade = is_upgrade - if is_upgrade: - self.migration_fn = revision.module.upgrade - else: - self.migration_fn = revision.module.downgrade - - def __repr__(self): - return "RevisionStep(%r, is_upgrade=%r)" % ( - self.revision.revision, - self.is_upgrade, - ) - - def __eq__(self, other): - return ( - isinstance(other, RevisionStep) - and other.revision == self.revision - and self.is_upgrade == other.is_upgrade - ) - - @property - def doc(self): - return self.revision.doc - - @property - def from_revisions(self): - if self.is_upgrade: - return self.revision._all_down_revisions - else: - return (self.revision.revision,) - - @property - def from_revisions_no_deps(self): - if self.is_upgrade: - return self.revision._versioned_down_revisions - else: - return (self.revision.revision,) - - @property - def to_revisions(self): - if self.is_upgrade: - return (self.revision.revision,) - else: - return self.revision._all_down_revisions - - @property - def to_revisions_no_deps(self): - if self.is_upgrade: - return (self.revision.revision,) - else: - return self.revision._versioned_down_revisions - - @property - def _has_scalar_down_revision(self): - return len(self.revision._all_down_revisions) == 1 - - def should_delete_branch(self, heads): - """A delete is when we are a. in a downgrade and b. - we are going to the "base" or we are going to a version that - is implied as a dependency on another version that is remaining. - - """ - if not self.is_downgrade: - return False - - if self.revision.revision not in heads: - return False - - downrevs = self.revision._all_down_revisions - - if not downrevs: - # is a base - return True - else: - # determine what the ultimate "to_revisions" for an - # unmerge would be. If there are none, then we're a delete. - to_revisions = self._unmerge_to_revisions(heads) - return not to_revisions - - def merge_branch_idents(self, heads): - other_heads = set(heads).difference(self.from_revisions) - - if other_heads: - ancestors = set( - r.revision - for r in self.revision_map._get_ancestor_nodes( - self.revision_map.get_revisions(other_heads), check=False - ) - ) - from_revisions = list( - set(self.from_revisions).difference(ancestors) - ) - else: - from_revisions = list(self.from_revisions) - - return ( - # delete revs, update from rev, update to rev - list(from_revisions[0:-1]), - from_revisions[-1], - self.to_revisions[0], - ) - - def _unmerge_to_revisions(self, heads): - other_heads = set(heads).difference([self.revision.revision]) - if other_heads: - ancestors = set( - r.revision - for r in self.revision_map._get_ancestor_nodes( - self.revision_map.get_revisions(other_heads), check=False - ) - ) - return list(set(self.to_revisions).difference(ancestors)) - else: - return self.to_revisions - - def unmerge_branch_idents(self, heads): - to_revisions = self._unmerge_to_revisions(heads) - - return ( - # update from rev, update to rev, insert revs - self.from_revisions[0], - to_revisions[-1], - to_revisions[0:-1], - ) - - def should_create_branch(self, heads): - if not self.is_upgrade: - return False - - downrevs = self.revision._all_down_revisions - - if not downrevs: - # is a base - return True - else: - # none of our downrevs are present, so... - # we have to insert our version. This is true whether - # or not there is only one downrev, or multiple (in the latter - # case, we're a merge point.) - if not heads.intersection(downrevs): - return True - else: - return False - - def should_merge_branches(self, heads): - if not self.is_upgrade: - return False - - downrevs = self.revision._all_down_revisions - - if len(downrevs) > 1 and len(heads.intersection(downrevs)) > 1: - return True - - return False - - def should_unmerge_branches(self, heads): - if not self.is_downgrade: - return False - - downrevs = self.revision._all_down_revisions - - if self.revision.revision in heads and len(downrevs) > 1: - return True - - return False - - def update_version_num(self, heads): - if not self._has_scalar_down_revision: - downrev = heads.intersection(self.revision._all_down_revisions) - assert ( - len(downrev) == 1 - ), "Can't do an UPDATE because downrevision is ambiguous" - down_revision = list(downrev)[0] - else: - down_revision = self.revision._all_down_revisions[0] - - if self.is_upgrade: - return down_revision, self.revision.revision - else: - return self.revision.revision, down_revision - - @property - def delete_version_num(self): - return self.revision.revision - - @property - def insert_version_num(self): - return self.revision.revision - - @property - def info(self): - return MigrationInfo( - revision_map=self.revision_map, - up_revisions=self.revision.revision, - down_revisions=self.revision._all_down_revisions, - is_upgrade=self.is_upgrade, - is_stamp=False, - ) - - -class StampStep(MigrationStep): - def __init__(self, from_, to_, is_upgrade, branch_move, revision_map=None): - self.from_ = util.to_tuple(from_, default=()) - self.to_ = util.to_tuple(to_, default=()) - self.is_upgrade = is_upgrade - self.branch_move = branch_move - self.migration_fn = self.stamp_revision - self.revision_map = revision_map - - doc = None - - def stamp_revision(self, **kw): - return None - - def __eq__(self, other): - return ( - isinstance(other, StampStep) - and other.from_revisions == self.revisions - and other.to_revisions == self.to_revisions - and other.branch_move == self.branch_move - and self.is_upgrade == other.is_upgrade - ) - - @property - def from_revisions(self): - return self.from_ - - @property - def to_revisions(self): - return self.to_ - - @property - def from_revisions_no_deps(self): - return self.from_ - - @property - def to_revisions_no_deps(self): - return self.to_ - - @property - def delete_version_num(self): - assert len(self.from_) == 1 - return self.from_[0] - - @property - def insert_version_num(self): - assert len(self.to_) == 1 - return self.to_[0] - - def update_version_num(self, heads): - assert len(self.from_) == 1 - assert len(self.to_) == 1 - return self.from_[0], self.to_[0] - - def merge_branch_idents(self, heads): - return ( - # delete revs, update from rev, update to rev - list(self.from_[0:-1]), - self.from_[-1], - self.to_[0], - ) - - def unmerge_branch_idents(self, heads): - return ( - # update from rev, update to rev, insert revs - self.from_[0], - self.to_[-1], - list(self.to_[0:-1]), - ) - - def should_delete_branch(self, heads): - return self.is_downgrade and self.branch_move - - def should_create_branch(self, heads): - return self.is_upgrade and self.branch_move - - def should_merge_branches(self, heads): - return len(self.from_) > 1 - - def should_unmerge_branches(self, heads): - return len(self.to_) > 1 - - @property - def info(self): - up, down = ( - (self.to_, self.from_) - if self.is_upgrade - else (self.from_, self.to_) - ) - return MigrationInfo( - revision_map=self.revision_map, - up_revisions=up, - down_revisions=down, - is_upgrade=self.is_upgrade, - is_stamp=True, - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/script/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/script/__init__.py deleted file mode 100644 index 540d627..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/script/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .base import Script # noqa -from .base import ScriptDirectory # noqa - -__all__ = ["ScriptDirectory", "Script"] diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/script/base.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/script/base.py deleted file mode 100644 index b386dea..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/script/base.py +++ /dev/null @@ -1,905 +0,0 @@ -from contextlib import contextmanager -import datetime -import os -import re -import shutil - -from dateutil import tz - -from . import revision -from .. import util -from ..runtime import migration -from ..util import compat - -_sourceless_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)(c|o)?$") -_only_source_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)$") -_legacy_rev = re.compile(r"([a-f0-9]+)\.py$") -_mod_def_re = re.compile(r"(upgrade|downgrade)_([a-z0-9]+)") -_slug_re = re.compile(r"\w+") -_default_file_template = "%(rev)s_%(slug)s" -_split_on_space_comma = re.compile(r",|(?: +)") - - -class ScriptDirectory(object): - - """Provides operations upon an Alembic script directory. - - This object is useful to get information as to current revisions, - most notably being able to get at the "head" revision, for schemes - that want to test if the current revision in the database is the most - recent:: - - from alembic.script import ScriptDirectory - from alembic.config import Config - config = Config() - config.set_main_option("script_location", "myapp:migrations") - script = ScriptDirectory.from_config(config) - - head_revision = script.get_current_head() - - - - """ - - def __init__( - self, - dir, # noqa - file_template=_default_file_template, - truncate_slug_length=40, - version_locations=None, - sourceless=False, - output_encoding="utf-8", - timezone=None, - ): - self.dir = dir - self.file_template = file_template - self.version_locations = version_locations - self.truncate_slug_length = truncate_slug_length or 40 - self.sourceless = sourceless - self.output_encoding = output_encoding - self.revision_map = revision.RevisionMap(self._load_revisions) - self.timezone = timezone - - if not os.access(dir, os.F_OK): - raise util.CommandError( - "Path doesn't exist: %r. Please use " - "the 'init' command to create a new " - "scripts folder." % dir - ) - - @property - def versions(self): - loc = self._version_locations - if len(loc) > 1: - raise util.CommandError("Multiple version_locations present") - else: - return loc[0] - - @util.memoized_property - def _version_locations(self): - if self.version_locations: - return [ - os.path.abspath(util.coerce_resource_to_filename(location)) - for location in self.version_locations - ] - else: - return (os.path.abspath(os.path.join(self.dir, "versions")),) - - def _load_revisions(self): - if self.version_locations: - paths = [ - vers - for vers in self._version_locations - if os.path.exists(vers) - ] - else: - paths = [self.versions] - - dupes = set() - for vers in paths: - for file_ in Script._list_py_dir(self, vers): - path = os.path.realpath(os.path.join(vers, file_)) - if path in dupes: - util.warn( - "File %s loaded twice! ignoring. Please ensure " - "version_locations is unique." % path - ) - continue - dupes.add(path) - script = Script._from_filename(self, vers, file_) - if script is None: - continue - yield script - - @classmethod - def from_config(cls, config): - """Produce a new :class:`.ScriptDirectory` given a :class:`.Config` - instance. - - The :class:`.Config` need only have the ``script_location`` key - present. - - """ - script_location = config.get_main_option("script_location") - if script_location is None: - raise util.CommandError( - "No 'script_location' key " "found in configuration." - ) - truncate_slug_length = config.get_main_option("truncate_slug_length") - if truncate_slug_length is not None: - truncate_slug_length = int(truncate_slug_length) - - version_locations = config.get_main_option("version_locations") - if version_locations: - version_locations = _split_on_space_comma.split(version_locations) - - return ScriptDirectory( - util.coerce_resource_to_filename(script_location), - file_template=config.get_main_option( - "file_template", _default_file_template - ), - truncate_slug_length=truncate_slug_length, - sourceless=config.get_main_option("sourceless") == "true", - output_encoding=config.get_main_option("output_encoding", "utf-8"), - version_locations=version_locations, - timezone=config.get_main_option("timezone"), - ) - - @contextmanager - def _catch_revision_errors( - self, - ancestor=None, - multiple_heads=None, - start=None, - end=None, - resolution=None, - ): - try: - yield - except revision.RangeNotAncestorError as rna: - if start is None: - start = rna.lower - if end is None: - end = rna.upper - if not ancestor: - ancestor = ( - "Requested range %(start)s:%(end)s does not refer to " - "ancestor/descendant revisions along the same branch" - ) - ancestor = ancestor % {"start": start, "end": end} - compat.raise_from_cause(util.CommandError(ancestor)) - except revision.MultipleHeads as mh: - if not multiple_heads: - multiple_heads = ( - "Multiple head revisions are present for given " - "argument '%(head_arg)s'; please " - "specify a specific target revision, " - "'@%(head_arg)s' to " - "narrow to a specific head, or 'heads' for all heads" - ) - multiple_heads = multiple_heads % { - "head_arg": end or mh.argument, - "heads": util.format_as_comma(mh.heads), - } - compat.raise_from_cause(util.CommandError(multiple_heads)) - except revision.ResolutionError as re: - if resolution is None: - resolution = "Can't locate revision identified by '%s'" % ( - re.argument - ) - compat.raise_from_cause(util.CommandError(resolution)) - except revision.RevisionError as err: - compat.raise_from_cause(util.CommandError(err.args[0])) - - def walk_revisions(self, base="base", head="heads"): - """Iterate through all revisions. - - :param base: the base revision, or "base" to start from the - empty revision. - - :param head: the head revision; defaults to "heads" to indicate - all head revisions. May also be "head" to indicate a single - head revision. - - .. versionchanged:: 0.7.0 the "head" identifier now refers to - the head of a non-branched repository only; use "heads" to - refer to the set of all head branches simultaneously. - - """ - with self._catch_revision_errors(start=base, end=head): - for rev in self.revision_map.iterate_revisions( - head, base, inclusive=True, assert_relative_length=False - ): - yield rev - - def get_revisions(self, id_): - """Return the :class:`.Script` instance with the given rev identifier, - symbolic name, or sequence of identifiers. - - .. versionadded:: 0.7.0 - - """ - with self._catch_revision_errors(): - return self.revision_map.get_revisions(id_) - - def get_all_current(self, id_): - with self._catch_revision_errors(): - top_revs = set(self.revision_map.get_revisions(id_)) - top_revs.update( - self.revision_map._get_ancestor_nodes( - list(top_revs), include_dependencies=True - ) - ) - top_revs = self.revision_map._filter_into_branch_heads(top_revs) - return top_revs - - def get_revision(self, id_): - """Return the :class:`.Script` instance with the given rev id. - - .. seealso:: - - :meth:`.ScriptDirectory.get_revisions` - - """ - - with self._catch_revision_errors(): - return self.revision_map.get_revision(id_) - - def as_revision_number(self, id_): - """Convert a symbolic revision, i.e. 'head' or 'base', into - an actual revision number.""" - - with self._catch_revision_errors(): - rev, branch_name = self.revision_map._resolve_revision_number(id_) - - if not rev: - # convert () to None - return None - elif id_ == "heads": - return rev - else: - return rev[0] - - def iterate_revisions(self, upper, lower): - """Iterate through script revisions, starting at the given - upper revision identifier and ending at the lower. - - The traversal uses strictly the `down_revision` - marker inside each migration script, so - it is a requirement that upper >= lower, - else you'll get nothing back. - - The iterator yields :class:`.Script` objects. - - .. seealso:: - - :meth:`.RevisionMap.iterate_revisions` - - """ - return self.revision_map.iterate_revisions(upper, lower) - - def get_current_head(self): - """Return the current head revision. - - If the script directory has multiple heads - due to branching, an error is raised; - :meth:`.ScriptDirectory.get_heads` should be - preferred. - - :return: a string revision number. - - .. seealso:: - - :meth:`.ScriptDirectory.get_heads` - - """ - with self._catch_revision_errors( - multiple_heads=( - "The script directory has multiple heads (due to branching)." - "Please use get_heads(), or merge the branches using " - "alembic merge." - ) - ): - return self.revision_map.get_current_head() - - def get_heads(self): - """Return all "versioned head" revisions as strings. - - This is normally a list of length one, - unless branches are present. The - :meth:`.ScriptDirectory.get_current_head()` method - can be used normally when a script directory - has only one head. - - :return: a tuple of string revision numbers. - """ - return list(self.revision_map.heads) - - def get_base(self): - """Return the "base" revision as a string. - - This is the revision number of the script that - has a ``down_revision`` of None. - - If the script directory has multiple bases, an error is raised; - :meth:`.ScriptDirectory.get_bases` should be - preferred. - - """ - bases = self.get_bases() - if len(bases) > 1: - raise util.CommandError( - "The script directory has multiple bases. " - "Please use get_bases()." - ) - elif bases: - return bases[0] - else: - return None - - def get_bases(self): - """return all "base" revisions as strings. - - This is the revision number of all scripts that - have a ``down_revision`` of None. - - .. versionadded:: 0.7.0 - - """ - return list(self.revision_map.bases) - - def _upgrade_revs(self, destination, current_rev): - with self._catch_revision_errors( - ancestor="Destination %(end)s is not a valid upgrade " - "target from current head(s)", - end=destination, - ): - revs = self.revision_map.iterate_revisions( - destination, current_rev, implicit_base=True - ) - revs = list(revs) - return [ - migration.MigrationStep.upgrade_from_script( - self.revision_map, script - ) - for script in reversed(list(revs)) - ] - - def _downgrade_revs(self, destination, current_rev): - with self._catch_revision_errors( - ancestor="Destination %(end)s is not a valid downgrade " - "target from current head(s)", - end=destination, - ): - revs = self.revision_map.iterate_revisions( - current_rev, destination, select_for_downgrade=True - ) - return [ - migration.MigrationStep.downgrade_from_script( - self.revision_map, script - ) - for script in revs - ] - - def _stamp_revs(self, revision, heads): - with self._catch_revision_errors( - multiple_heads="Multiple heads are present; please specify a " - "single target revision" - ): - - heads = self.get_revisions(heads) - - # filter for lineage will resolve things like - # branchname@base, version@base, etc. - filtered_heads = self.revision_map.filter_for_lineage( - heads, revision, include_dependencies=True - ) - - steps = [] - - dests = self.get_revisions(revision) or [None] - for dest in dests: - if dest is None: - # dest is 'base'. Return a "delete branch" migration - # for all applicable heads. - steps.extend( - [ - migration.StampStep( - head.revision, - None, - False, - True, - self.revision_map, - ) - for head in filtered_heads - ] - ) - continue - elif dest in filtered_heads: - # the dest is already in the version table, do nothing. - continue - - # figure out if the dest is a descendant or an - # ancestor of the selected nodes - descendants = set( - self.revision_map._get_descendant_nodes([dest]) - ) - ancestors = set(self.revision_map._get_ancestor_nodes([dest])) - - if descendants.intersection(filtered_heads): - # heads are above the target, so this is a downgrade. - # we can treat them as a "merge", single step. - assert not ancestors.intersection(filtered_heads) - todo_heads = [head.revision for head in filtered_heads] - step = migration.StampStep( - todo_heads, - dest.revision, - False, - False, - self.revision_map, - ) - steps.append(step) - continue - elif ancestors.intersection(filtered_heads): - # heads are below the target, so this is an upgrade. - # we can treat them as a "merge", single step. - todo_heads = [head.revision for head in filtered_heads] - step = migration.StampStep( - todo_heads, - dest.revision, - True, - False, - self.revision_map, - ) - steps.append(step) - continue - else: - # destination is in a branch not represented, - # treat it as new branch - step = migration.StampStep( - (), dest.revision, True, True, self.revision_map - ) - steps.append(step) - continue - return steps - - def run_env(self): - """Run the script environment. - - This basically runs the ``env.py`` script present - in the migration environment. It is called exclusively - by the command functions in :mod:`alembic.command`. - - - """ - util.load_python_file(self.dir, "env.py") - - @property - def env_py_location(self): - return os.path.abspath(os.path.join(self.dir, "env.py")) - - def _generate_template(self, src, dest, **kw): - util.status( - "Generating %s" % os.path.abspath(dest), - util.template_to_file, - src, - dest, - self.output_encoding, - **kw - ) - - def _copy_file(self, src, dest): - util.status( - "Generating %s" % os.path.abspath(dest), shutil.copy, src, dest - ) - - def _ensure_directory(self, path): - path = os.path.abspath(path) - if not os.path.exists(path): - util.status("Creating directory %s" % path, os.makedirs, path) - - def _generate_create_date(self): - if self.timezone is not None: - # First, assume correct capitalization - tzinfo = tz.gettz(self.timezone) - if tzinfo is None: - # Fall back to uppercase - tzinfo = tz.gettz(self.timezone.upper()) - if tzinfo is None: - raise util.CommandError( - "Can't locate timezone: %s" % self.timezone - ) - create_date = ( - datetime.datetime.utcnow() - .replace(tzinfo=tz.tzutc()) - .astimezone(tzinfo) - ) - else: - create_date = datetime.datetime.now() - return create_date - - def generate_revision( - self, - revid, - message, - head=None, - refresh=False, - splice=False, - branch_labels=None, - version_path=None, - depends_on=None, - **kw - ): - """Generate a new revision file. - - This runs the ``script.py.mako`` template, given - template arguments, and creates a new file. - - :param revid: String revision id. Typically this - comes from ``alembic.util.rev_id()``. - :param message: the revision message, the one passed - by the -m argument to the ``revision`` command. - :param head: the head revision to generate against. Defaults - to the current "head" if no branches are present, else raises - an exception. - - .. versionadded:: 0.7.0 - - :param splice: if True, allow the "head" version to not be an - actual head; otherwise, the selected head must be a head - (e.g. endpoint) revision. - :param refresh: deprecated. - - """ - if head is None: - head = "head" - - try: - Script.verify_rev_id(revid) - except revision.RevisionError as err: - compat.raise_from_cause(util.CommandError(err.args[0])) - - with self._catch_revision_errors( - multiple_heads=( - "Multiple heads are present; please specify the head " - "revision on which the new revision should be based, " - "or perform a merge." - ) - ): - heads = self.revision_map.get_revisions(head) - - if len(set(heads)) != len(heads): - raise util.CommandError("Duplicate head revisions specified") - - create_date = self._generate_create_date() - - if version_path is None: - if len(self._version_locations) > 1: - for head in heads: - if head is not None: - version_path = os.path.dirname(head.path) - break - else: - raise util.CommandError( - "Multiple version locations present, " - "please specify --version-path" - ) - else: - version_path = self.versions - - norm_path = os.path.normpath(os.path.abspath(version_path)) - for vers_path in self._version_locations: - if os.path.normpath(vers_path) == norm_path: - break - else: - raise util.CommandError( - "Path %s is not represented in current " - "version locations" % version_path - ) - - if self.version_locations: - self._ensure_directory(version_path) - - path = self._rev_path(version_path, revid, message, create_date) - - if not splice: - for head in heads: - if head is not None and not head.is_head: - raise util.CommandError( - "Revision %s is not a head revision; please specify " - "--splice to create a new branch from this revision" - % head.revision - ) - - if depends_on: - with self._catch_revision_errors(): - depends_on = [ - dep - if dep in rev.branch_labels # maintain branch labels - else rev.revision # resolve partial revision identifiers - for rev, dep in [ - (self.revision_map.get_revision(dep), dep) - for dep in util.to_list(depends_on) - ] - ] - - self._generate_template( - os.path.join(self.dir, "script.py.mako"), - path, - up_revision=str(revid), - down_revision=revision.tuple_rev_as_scalar( - tuple(h.revision if h is not None else None for h in heads) - ), - branch_labels=util.to_tuple(branch_labels), - depends_on=revision.tuple_rev_as_scalar(depends_on), - create_date=create_date, - comma=util.format_as_comma, - message=message if message is not None else ("empty message"), - **kw - ) - try: - script = Script._from_path(self, path) - except revision.RevisionError as err: - compat.raise_from_cause(util.CommandError(err.args[0])) - if branch_labels and not script.branch_labels: - raise util.CommandError( - "Version %s specified branch_labels %s, however the " - "migration file %s does not have them; have you upgraded " - "your script.py.mako to include the " - "'branch_labels' section?" - % (script.revision, branch_labels, script.path) - ) - - self.revision_map.add_revision(script) - return script - - def _rev_path(self, path, rev_id, message, create_date): - slug = "_".join(_slug_re.findall(message or "")).lower() - if len(slug) > self.truncate_slug_length: - slug = slug[: self.truncate_slug_length].rsplit("_", 1)[0] + "_" - filename = "%s.py" % ( - self.file_template - % { - "rev": rev_id, - "slug": slug, - "year": create_date.year, - "month": create_date.month, - "day": create_date.day, - "hour": create_date.hour, - "minute": create_date.minute, - "second": create_date.second, - } - ) - return os.path.join(path, filename) - - -class Script(revision.Revision): - - """Represent a single revision file in a ``versions/`` directory. - - The :class:`.Script` instance is returned by methods - such as :meth:`.ScriptDirectory.iterate_revisions`. - - """ - - def __init__(self, module, rev_id, path): - self.module = module - self.path = path - super(Script, self).__init__( - rev_id, - module.down_revision, - branch_labels=util.to_tuple( - getattr(module, "branch_labels", None), default=() - ), - dependencies=util.to_tuple( - getattr(module, "depends_on", None), default=() - ), - ) - - module = None - """The Python module representing the actual script itself.""" - - path = None - """Filesystem path of the script.""" - - _db_current_indicator = None - """Utility variable which when set will cause string output to indicate - this is a "current" version in some database""" - - @property - def doc(self): - """Return the docstring given in the script.""" - - return re.split("\n\n", self.longdoc)[0] - - @property - def longdoc(self): - """Return the docstring given in the script.""" - - doc = self.module.__doc__ - if doc: - if hasattr(self.module, "_alembic_source_encoding"): - doc = doc.decode(self.module._alembic_source_encoding) - return doc.strip() - else: - return "" - - @property - def log_entry(self): - entry = "Rev: %s%s%s%s%s\n" % ( - self.revision, - " (head)" if self.is_head else "", - " (branchpoint)" if self.is_branch_point else "", - " (mergepoint)" if self.is_merge_point else "", - " (current)" if self._db_current_indicator else "", - ) - if self.is_merge_point: - entry += "Merges: %s\n" % (self._format_down_revision(),) - else: - entry += "Parent: %s\n" % (self._format_down_revision(),) - - if self.dependencies: - entry += "Also depends on: %s\n" % ( - util.format_as_comma(self.dependencies) - ) - - if self.is_branch_point: - entry += "Branches into: %s\n" % ( - util.format_as_comma(self.nextrev) - ) - - if self.branch_labels: - entry += "Branch names: %s\n" % ( - util.format_as_comma(self.branch_labels), - ) - - entry += "Path: %s\n" % (self.path,) - - entry += "\n%s\n" % ( - "\n".join(" %s" % para for para in self.longdoc.splitlines()) - ) - return entry - - def __str__(self): - return "%s -> %s%s%s%s, %s" % ( - self._format_down_revision(), - self.revision, - " (head)" if self.is_head else "", - " (branchpoint)" if self.is_branch_point else "", - " (mergepoint)" if self.is_merge_point else "", - self.doc, - ) - - def _head_only( - self, - include_branches=False, - include_doc=False, - include_parents=False, - tree_indicators=True, - head_indicators=True, - ): - text = self.revision - if include_parents: - if self.dependencies: - text = "%s (%s) -> %s" % ( - self._format_down_revision(), - util.format_as_comma(self.dependencies), - text, - ) - else: - text = "%s -> %s" % (self._format_down_revision(), text) - if include_branches and self.branch_labels: - text += " (%s)" % util.format_as_comma(self.branch_labels) - if head_indicators or tree_indicators: - text += "%s%s%s" % ( - " (head)" if self._is_real_head else "", - " (effective head)" - if self.is_head and not self._is_real_head - else "", - " (current)" if self._db_current_indicator else "", - ) - if tree_indicators: - text += "%s%s" % ( - " (branchpoint)" if self.is_branch_point else "", - " (mergepoint)" if self.is_merge_point else "", - ) - if include_doc: - text += ", %s" % self.doc - return text - - def cmd_format( - self, - verbose, - include_branches=False, - include_doc=False, - include_parents=False, - tree_indicators=True, - ): - if verbose: - return self.log_entry - else: - return self._head_only( - include_branches, include_doc, include_parents, tree_indicators - ) - - def _format_down_revision(self): - if not self.down_revision: - return "" - else: - return util.format_as_comma(self._versioned_down_revisions) - - @classmethod - def _from_path(cls, scriptdir, path): - dir_, filename = os.path.split(path) - return cls._from_filename(scriptdir, dir_, filename) - - @classmethod - def _list_py_dir(cls, scriptdir, path): - if scriptdir.sourceless: - # read files in version path, e.g. pyc or pyo files - # in the immediate path - paths = os.listdir(path) - - names = set(fname.split(".")[0] for fname in paths) - - # look for __pycache__ - if os.path.exists(os.path.join(path, "__pycache__")): - # add all files from __pycache__ whose filename is not - # already in the names we got from the version directory. - # add as relative paths including __pycache__ token - paths.extend( - os.path.join("__pycache__", pyc) - for pyc in os.listdir(os.path.join(path, "__pycache__")) - if pyc.split(".")[0] not in names - ) - return paths - else: - return os.listdir(path) - - @classmethod - def _from_filename(cls, scriptdir, dir_, filename): - if scriptdir.sourceless: - py_match = _sourceless_rev_file.match(filename) - else: - py_match = _only_source_rev_file.match(filename) - - if not py_match: - return None - - py_filename = py_match.group(1) - - if scriptdir.sourceless: - is_c = py_match.group(2) == "c" - is_o = py_match.group(2) == "o" - else: - is_c = is_o = False - - if is_o or is_c: - py_exists = os.path.exists(os.path.join(dir_, py_filename)) - pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c")) - - # prefer .py over .pyc because we'd like to get the - # source encoding; prefer .pyc over .pyo because we'd like to - # have the docstrings which a -OO file would not have - if py_exists or is_o and pyc_exists: - return None - - module = util.load_python_file(dir_, filename) - - if not hasattr(module, "revision"): - # attempt to get the revision id from the script name, - # this for legacy only - m = _legacy_rev.match(filename) - if not m: - raise util.CommandError( - "Could not determine revision id from filename %s. " - "Be sure the 'revision' variable is " - "declared inside the script (please see 'Upgrading " - "from Alembic 0.1 to 0.2' in the documentation)." - % filename - ) - else: - revision = m.group(1) - else: - revision = module.revision - return Script(module, revision, os.path.join(dir_, filename)) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/script/revision.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/script/revision.py deleted file mode 100644 index af08688..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/script/revision.py +++ /dev/null @@ -1,1038 +0,0 @@ -import collections -import re - -from sqlalchemy import util as sqlautil - -from .. import util -from ..util import compat - -_relative_destination = re.compile(r"(?:(.+?)@)?(\w+)?((?:\+|-)\d+)") -_revision_illegal_chars = ["@", "-", "+"] - - -class RevisionError(Exception): - pass - - -class RangeNotAncestorError(RevisionError): - def __init__(self, lower, upper): - self.lower = lower - self.upper = upper - super(RangeNotAncestorError, self).__init__( - "Revision %s is not an ancestor of revision %s" - % (lower or "base", upper or "base") - ) - - -class MultipleHeads(RevisionError): - def __init__(self, heads, argument): - self.heads = heads - self.argument = argument - super(MultipleHeads, self).__init__( - "Multiple heads are present for given argument '%s'; " - "%s" % (argument, ", ".join(heads)) - ) - - -class ResolutionError(RevisionError): - def __init__(self, message, argument): - super(ResolutionError, self).__init__(message) - self.argument = argument - - -class RevisionMap(object): - """Maintains a map of :class:`.Revision` objects. - - :class:`.RevisionMap` is used by :class:`.ScriptDirectory` to maintain - and traverse the collection of :class:`.Script` objects, which are - themselves instances of :class:`.Revision`. - - """ - - def __init__(self, generator): - """Construct a new :class:`.RevisionMap`. - - :param generator: a zero-arg callable that will generate an iterable - of :class:`.Revision` instances to be used. These are typically - :class:`.Script` subclasses within regular Alembic use. - - """ - self._generator = generator - - @util.memoized_property - def heads(self): - """All "head" revisions as strings. - - This is normally a tuple of length one, - unless unmerged branches are present. - - :return: a tuple of string revision numbers. - - """ - self._revision_map - return self.heads - - @util.memoized_property - def bases(self): - """All "base" revisions as strings. - - These are revisions that have a ``down_revision`` of None, - or empty tuple. - - :return: a tuple of string revision numbers. - - """ - self._revision_map - return self.bases - - @util.memoized_property - def _real_heads(self): - """All "real" head revisions as strings. - - :return: a tuple of string revision numbers. - - """ - self._revision_map - return self._real_heads - - @util.memoized_property - def _real_bases(self): - """All "real" base revisions as strings. - - :return: a tuple of string revision numbers. - - """ - self._revision_map - return self._real_bases - - @util.memoized_property - def _revision_map(self): - """memoized attribute, initializes the revision map from the - initial collection. - - """ - map_ = {} - - heads = sqlautil.OrderedSet() - _real_heads = sqlautil.OrderedSet() - self.bases = () - self._real_bases = () - - has_branch_labels = set() - has_depends_on = set() - for revision in self._generator(): - - if revision.revision in map_: - util.warn( - "Revision %s is present more than once" % revision.revision - ) - map_[revision.revision] = revision - if revision.branch_labels: - has_branch_labels.add(revision) - if revision.dependencies: - has_depends_on.add(revision) - heads.add(revision.revision) - _real_heads.add(revision.revision) - if revision.is_base: - self.bases += (revision.revision,) - if revision._is_real_base: - self._real_bases += (revision.revision,) - - # add the branch_labels to the map_. We'll need these - # to resolve the dependencies. - for revision in has_branch_labels: - self._map_branch_labels(revision, map_) - - for revision in has_depends_on: - self._add_depends_on(revision, map_) - - for rev in map_.values(): - for downrev in rev._all_down_revisions: - if downrev not in map_: - util.warn( - "Revision %s referenced from %s is not present" - % (downrev, rev) - ) - down_revision = map_[downrev] - down_revision.add_nextrev(rev) - if downrev in rev._versioned_down_revisions: - heads.discard(downrev) - _real_heads.discard(downrev) - - map_[None] = map_[()] = None - self.heads = tuple(heads) - self._real_heads = tuple(_real_heads) - - for revision in has_branch_labels: - self._add_branches(revision, map_, map_branch_labels=False) - return map_ - - def _map_branch_labels(self, revision, map_): - if revision.branch_labels: - for branch_label in revision._orig_branch_labels: - if branch_label in map_: - raise RevisionError( - "Branch name '%s' in revision %s already " - "used by revision %s" - % ( - branch_label, - revision.revision, - map_[branch_label].revision, - ) - ) - map_[branch_label] = revision - - def _add_branches(self, revision, map_, map_branch_labels=True): - if map_branch_labels: - self._map_branch_labels(revision, map_) - - if revision.branch_labels: - revision.branch_labels.update(revision.branch_labels) - for node in self._get_descendant_nodes( - [revision], map_, include_dependencies=False - ): - node.branch_labels.update(revision.branch_labels) - - parent = node - while ( - parent - and not parent._is_real_branch_point - and not parent.is_merge_point - ): - - parent.branch_labels.update(revision.branch_labels) - if parent.down_revision: - parent = map_[parent.down_revision] - else: - break - - def _add_depends_on(self, revision, map_): - if revision.dependencies: - deps = [map_[dep] for dep in util.to_tuple(revision.dependencies)] - revision._resolved_dependencies = tuple([d.revision for d in deps]) - - def add_revision(self, revision, _replace=False): - """add a single revision to an existing map. - - This method is for single-revision use cases, it's not - appropriate for fully populating an entire revision map. - - """ - map_ = self._revision_map - if not _replace and revision.revision in map_: - util.warn( - "Revision %s is present more than once" % revision.revision - ) - elif _replace and revision.revision not in map_: - raise Exception("revision %s not in map" % revision.revision) - - map_[revision.revision] = revision - self._add_branches(revision, map_) - self._add_depends_on(revision, map_) - - if revision.is_base: - self.bases += (revision.revision,) - if revision._is_real_base: - self._real_bases += (revision.revision,) - for downrev in revision._all_down_revisions: - if downrev not in map_: - util.warn( - "Revision %s referenced from %s is not present" - % (downrev, revision) - ) - map_[downrev].add_nextrev(revision) - if revision._is_real_head: - self._real_heads = tuple( - head - for head in self._real_heads - if head - not in set(revision._all_down_revisions).union( - [revision.revision] - ) - ) + (revision.revision,) - if revision.is_head: - self.heads = tuple( - head - for head in self.heads - if head - not in set(revision._versioned_down_revisions).union( - [revision.revision] - ) - ) + (revision.revision,) - - def get_current_head(self, branch_label=None): - """Return the current head revision. - - If the script directory has multiple heads - due to branching, an error is raised; - :meth:`.ScriptDirectory.get_heads` should be - preferred. - - :param branch_label: optional branch name which will limit the - heads considered to those which include that branch_label. - - :return: a string revision number. - - .. seealso:: - - :meth:`.ScriptDirectory.get_heads` - - """ - current_heads = self.heads - if branch_label: - current_heads = self.filter_for_lineage( - current_heads, branch_label - ) - if len(current_heads) > 1: - raise MultipleHeads( - current_heads, - "%s@head" % branch_label if branch_label else "head", - ) - - if current_heads: - return current_heads[0] - else: - return None - - def _get_base_revisions(self, identifier): - return self.filter_for_lineage(self.bases, identifier) - - def get_revisions(self, id_): - """Return the :class:`.Revision` instances with the given rev id - or identifiers. - - May be given a single identifier, a sequence of identifiers, or the - special symbols "head" or "base". The result is a tuple of one - or more identifiers, or an empty tuple in the case of "base". - - In the cases where 'head', 'heads' is requested and the - revision map is empty, returns an empty tuple. - - Supports partial identifiers, where the given identifier - is matched against all identifiers that start with the given - characters; if there is exactly one match, that determines the - full revision. - - """ - if isinstance(id_, (list, tuple, set, frozenset)): - return sum([self.get_revisions(id_elem) for id_elem in id_], ()) - else: - resolved_id, branch_label = self._resolve_revision_number(id_) - return tuple( - self._revision_for_ident(rev_id, branch_label) - for rev_id in resolved_id - ) - - def get_revision(self, id_): - """Return the :class:`.Revision` instance with the given rev id. - - If a symbolic name such as "head" or "base" is given, resolves - the identifier into the current head or base revision. If the symbolic - name refers to multiples, :class:`.MultipleHeads` is raised. - - Supports partial identifiers, where the given identifier - is matched against all identifiers that start with the given - characters; if there is exactly one match, that determines the - full revision. - - """ - - resolved_id, branch_label = self._resolve_revision_number(id_) - if len(resolved_id) > 1: - raise MultipleHeads(resolved_id, id_) - elif resolved_id: - resolved_id = resolved_id[0] - - return self._revision_for_ident(resolved_id, branch_label) - - def _resolve_branch(self, branch_label): - try: - branch_rev = self._revision_map[branch_label] - except KeyError: - try: - nonbranch_rev = self._revision_for_ident(branch_label) - except ResolutionError: - raise ResolutionError( - "No such branch: '%s'" % branch_label, branch_label - ) - else: - return nonbranch_rev - else: - return branch_rev - - def _revision_for_ident(self, resolved_id, check_branch=None): - if check_branch: - branch_rev = self._resolve_branch(check_branch) - else: - branch_rev = None - - try: - revision = self._revision_map[resolved_id] - except KeyError: - # break out to avoid misleading py3k stack traces - revision = False - if revision is False: - # do a partial lookup - revs = [ - x - for x in self._revision_map - if x and x.startswith(resolved_id) - ] - if branch_rev: - revs = self.filter_for_lineage(revs, check_branch) - if not revs: - raise ResolutionError( - "No such revision or branch '%s'" % resolved_id, - resolved_id, - ) - elif len(revs) > 1: - raise ResolutionError( - "Multiple revisions start " - "with '%s': %s..." - % (resolved_id, ", ".join("'%s'" % r for r in revs[0:3])), - resolved_id, - ) - else: - revision = self._revision_map[revs[0]] - - if check_branch and revision is not None: - if not self._shares_lineage( - revision.revision, branch_rev.revision - ): - raise ResolutionError( - "Revision %s is not a member of branch '%s'" - % (revision.revision, check_branch), - resolved_id, - ) - return revision - - def _filter_into_branch_heads(self, targets): - targets = set(targets) - - for rev in list(targets): - if targets.intersection( - self._get_descendant_nodes([rev], include_dependencies=False) - ).difference([rev]): - targets.discard(rev) - return targets - - def filter_for_lineage( - self, targets, check_against, include_dependencies=False - ): - id_, branch_label = self._resolve_revision_number(check_against) - - shares = [] - if branch_label: - shares.append(branch_label) - if id_: - shares.extend(id_) - - return [ - tg - for tg in targets - if self._shares_lineage( - tg, shares, include_dependencies=include_dependencies - ) - ] - - def _shares_lineage( - self, target, test_against_revs, include_dependencies=False - ): - if not test_against_revs: - return True - if not isinstance(target, Revision): - target = self._revision_for_ident(target) - - test_against_revs = [ - self._revision_for_ident(test_against_rev) - if not isinstance(test_against_rev, Revision) - else test_against_rev - for test_against_rev in util.to_tuple( - test_against_revs, default=() - ) - ] - - return bool( - set( - self._get_descendant_nodes( - [target], include_dependencies=include_dependencies - ) - ) - .union( - self._get_ancestor_nodes( - [target], include_dependencies=include_dependencies - ) - ) - .intersection(test_against_revs) - ) - - def _resolve_revision_number(self, id_): - if isinstance(id_, compat.string_types) and "@" in id_: - branch_label, id_ = id_.split("@", 1) - else: - branch_label = None - - # ensure map is loaded - self._revision_map - if id_ == "heads": - if branch_label: - return ( - self.filter_for_lineage(self.heads, branch_label), - branch_label, - ) - else: - return self._real_heads, branch_label - elif id_ == "head": - current_head = self.get_current_head(branch_label) - if current_head: - return (current_head,), branch_label - else: - return (), branch_label - elif id_ == "base" or id_ is None: - return (), branch_label - else: - return util.to_tuple(id_, default=None), branch_label - - def _relative_iterate( - self, - destination, - source, - is_upwards, - implicit_base, - inclusive, - assert_relative_length, - ): - if isinstance(destination, compat.string_types): - match = _relative_destination.match(destination) - if not match: - return None - else: - return None - - relative = int(match.group(3)) - symbol = match.group(2) - branch_label = match.group(1) - - reldelta = 1 if inclusive and not symbol else 0 - - if is_upwards: - if branch_label: - from_ = "%s@head" % branch_label - elif symbol: - if symbol.startswith("head"): - from_ = symbol - else: - from_ = "%s@head" % symbol - else: - from_ = "head" - to_ = source - else: - if branch_label: - to_ = "%s@base" % branch_label - elif symbol: - to_ = "%s@base" % symbol - else: - to_ = "base" - from_ = source - - revs = list( - self._iterate_revisions( - from_, to_, inclusive=inclusive, implicit_base=implicit_base - ) - ) - - if symbol: - if branch_label: - symbol_rev = self.get_revision( - "%s@%s" % (branch_label, symbol) - ) - else: - symbol_rev = self.get_revision(symbol) - if symbol.startswith("head"): - index = 0 - elif symbol == "base": - index = len(revs) - 1 - else: - range_ = compat.range(len(revs) - 1, 0, -1) - for index in range_: - if symbol_rev.revision == revs[index].revision: - break - else: - index = 0 - else: - index = 0 - if is_upwards: - revs = revs[index - relative - reldelta :] - if ( - not index - and assert_relative_length - and len(revs) < abs(relative - reldelta) - ): - raise RevisionError( - "Relative revision %s didn't " - "produce %d migrations" % (destination, abs(relative)) - ) - else: - revs = revs[0 : index - relative + reldelta] - if ( - not index - and assert_relative_length - and len(revs) != abs(relative) + reldelta - ): - raise RevisionError( - "Relative revision %s didn't " - "produce %d migrations" % (destination, abs(relative)) - ) - - return iter(revs) - - def iterate_revisions( - self, - upper, - lower, - implicit_base=False, - inclusive=False, - assert_relative_length=True, - select_for_downgrade=False, - ): - """Iterate through script revisions, starting at the given - upper revision identifier and ending at the lower. - - The traversal uses strictly the `down_revision` - marker inside each migration script, so - it is a requirement that upper >= lower, - else you'll get nothing back. - - The iterator yields :class:`.Revision` objects. - - """ - - relative_upper = self._relative_iterate( - upper, - lower, - True, - implicit_base, - inclusive, - assert_relative_length, - ) - if relative_upper: - return relative_upper - - relative_lower = self._relative_iterate( - lower, - upper, - False, - implicit_base, - inclusive, - assert_relative_length, - ) - if relative_lower: - return relative_lower - - return self._iterate_revisions( - upper, - lower, - inclusive=inclusive, - implicit_base=implicit_base, - select_for_downgrade=select_for_downgrade, - ) - - def _get_descendant_nodes( - self, - targets, - map_=None, - check=False, - omit_immediate_dependencies=False, - include_dependencies=True, - ): - - if omit_immediate_dependencies: - - def fn(rev): - if rev not in targets: - return rev._all_nextrev - else: - return rev.nextrev - - elif include_dependencies: - - def fn(rev): - return rev._all_nextrev - - else: - - def fn(rev): - return rev.nextrev - - return self._iterate_related_revisions( - fn, targets, map_=map_, check=check - ) - - def _get_ancestor_nodes( - self, targets, map_=None, check=False, include_dependencies=True - ): - - if include_dependencies: - - def fn(rev): - return rev._all_down_revisions - - else: - - def fn(rev): - return rev._versioned_down_revisions - - return self._iterate_related_revisions( - fn, targets, map_=map_, check=check - ) - - def _iterate_related_revisions(self, fn, targets, map_, check=False): - if map_ is None: - map_ = self._revision_map - - seen = set() - todo = collections.deque() - for target in targets: - - todo.append(target) - if check: - per_target = set() - - while todo: - rev = todo.pop() - if check: - per_target.add(rev) - - if rev in seen: - continue - seen.add(rev) - todo.extend(map_[rev_id] for rev_id in fn(rev)) - yield rev - if check: - overlaps = per_target.intersection(targets).difference( - [target] - ) - if overlaps: - raise RevisionError( - "Requested revision %s overlaps with " - "other requested revisions %s" - % ( - target.revision, - ", ".join(r.revision for r in overlaps), - ) - ) - - def _iterate_revisions( - self, - upper, - lower, - inclusive=True, - implicit_base=False, - select_for_downgrade=False, - ): - """iterate revisions from upper to lower. - - The traversal is depth-first within branches, and breadth-first - across branches as a whole. - - """ - - requested_lowers = self.get_revisions(lower) - - # some complexity to accommodate an iteration where some - # branches are starting from nothing, and others are starting - # from a given point. Additionally, if the bottom branch - # is specified using a branch identifier, then we limit operations - # to just that branch. - - limit_to_lower_branch = isinstance( - lower, compat.string_types - ) and lower.endswith("@base") - - uppers = util.dedupe_tuple(self.get_revisions(upper)) - - if not uppers and not requested_lowers: - return - - upper_ancestors = set(self._get_ancestor_nodes(uppers, check=True)) - - if limit_to_lower_branch: - lowers = self.get_revisions(self._get_base_revisions(lower)) - elif implicit_base and requested_lowers: - lower_ancestors = set(self._get_ancestor_nodes(requested_lowers)) - lower_descendants = set( - self._get_descendant_nodes(requested_lowers) - ) - base_lowers = set() - candidate_lowers = upper_ancestors.difference( - lower_ancestors - ).difference(lower_descendants) - for rev in candidate_lowers: - for downrev in rev._all_down_revisions: - if self._revision_map[downrev] in candidate_lowers: - break - else: - base_lowers.add(rev) - lowers = base_lowers.union(requested_lowers) - elif implicit_base: - base_lowers = set(self.get_revisions(self._real_bases)) - lowers = base_lowers.union(requested_lowers) - elif not requested_lowers: - lowers = set(self.get_revisions(self._real_bases)) - else: - lowers = requested_lowers - - # represents all nodes we will produce - total_space = set( - rev.revision for rev in upper_ancestors - ).intersection( - rev.revision - for rev in self._get_descendant_nodes( - lowers, - check=True, - omit_immediate_dependencies=( - select_for_downgrade and requested_lowers - ), - ) - ) - - if not total_space: - # no nodes. determine if this is an invalid range - # or not. - start_from = set(requested_lowers) - start_from.update( - self._get_ancestor_nodes( - list(start_from), include_dependencies=True - ) - ) - - # determine all the current branch points represented - # by requested_lowers - start_from = self._filter_into_branch_heads(start_from) - - # if the requested start is one of those branch points, - # then just return empty set - if start_from.intersection(upper_ancestors): - return - else: - # otherwise, they requested nodes out of - # order - raise RangeNotAncestorError(lower, upper) - - # organize branch points to be consumed separately from - # member nodes - branch_todo = set( - rev - for rev in (self._revision_map[rev] for rev in total_space) - if rev._is_real_branch_point - and len(total_space.intersection(rev._all_nextrev)) > 1 - ) - - # it's not possible for any "uppers" to be in branch_todo, - # because the ._all_nextrev of those nodes is not in total_space - # assert not branch_todo.intersection(uppers) - - todo = collections.deque( - r for r in uppers if r.revision in total_space - ) - - # iterate for total_space being emptied out - total_space_modified = True - while total_space: - - if not total_space_modified: - raise RevisionError( - "Dependency resolution failed; iteration can't proceed" - ) - total_space_modified = False - # when everything non-branch pending is consumed, - # add to the todo any branch nodes that have no - # descendants left in the queue - if not todo: - todo.extendleft( - sorted( - ( - rev - for rev in branch_todo - if not rev._all_nextrev.intersection(total_space) - ), - # favor "revisioned" branch points before - # dependent ones - key=lambda rev: 0 if rev.is_branch_point else 1, - ) - ) - branch_todo.difference_update(todo) - # iterate nodes that are in the immediate todo - while todo: - rev = todo.popleft() - total_space.remove(rev.revision) - total_space_modified = True - - # do depth first for elements within branches, - # don't consume any actual branch nodes - todo.extendleft( - [ - self._revision_map[downrev] - for downrev in reversed(rev._all_down_revisions) - if self._revision_map[downrev] not in branch_todo - and downrev in total_space - ] - ) - - if not inclusive and rev in requested_lowers: - continue - yield rev - - assert not branch_todo - - -class Revision(object): - """Base class for revisioned objects. - - The :class:`.Revision` class is the base of the more public-facing - :class:`.Script` object, which represents a migration script. - The mechanics of revision management and traversal are encapsulated - within :class:`.Revision`, while :class:`.Script` applies this logic - to Python files in a version directory. - - """ - - nextrev = frozenset() - """following revisions, based on down_revision only.""" - - _all_nextrev = frozenset() - - revision = None - """The string revision number.""" - - down_revision = None - """The ``down_revision`` identifier(s) within the migration script. - - Note that the total set of "down" revisions is - down_revision + dependencies. - - """ - - dependencies = None - """Additional revisions which this revision is dependent on. - - From a migration standpoint, these dependencies are added to the - down_revision to form the full iteration. However, the separation - of down_revision from "dependencies" is to assist in navigating - a history that contains many branches, typically a multi-root scenario. - - """ - - branch_labels = None - """Optional string/tuple of symbolic names to apply to this - revision's branch""" - - @classmethod - def verify_rev_id(cls, revision): - illegal_chars = set(revision).intersection(_revision_illegal_chars) - if illegal_chars: - raise RevisionError( - "Character(s) '%s' not allowed in revision identifier '%s'" - % (", ".join(sorted(illegal_chars)), revision) - ) - - def __init__( - self, revision, down_revision, dependencies=None, branch_labels=None - ): - self.verify_rev_id(revision) - self.revision = revision - self.down_revision = tuple_rev_as_scalar(down_revision) - self.dependencies = tuple_rev_as_scalar(dependencies) - self._resolved_dependencies = () - self._orig_branch_labels = util.to_tuple(branch_labels, default=()) - self.branch_labels = set(self._orig_branch_labels) - - def __repr__(self): - args = [repr(self.revision), repr(self.down_revision)] - if self.dependencies: - args.append("dependencies=%r" % (self.dependencies,)) - if self.branch_labels: - args.append("branch_labels=%r" % (self.branch_labels,)) - return "%s(%s)" % (self.__class__.__name__, ", ".join(args)) - - def add_nextrev(self, revision): - self._all_nextrev = self._all_nextrev.union([revision.revision]) - if self.revision in revision._versioned_down_revisions: - self.nextrev = self.nextrev.union([revision.revision]) - - @property - def _all_down_revisions(self): - return ( - util.to_tuple(self.down_revision, default=()) - + self._resolved_dependencies - ) - - @property - def _versioned_down_revisions(self): - return util.to_tuple(self.down_revision, default=()) - - @property - def is_head(self): - """Return True if this :class:`.Revision` is a 'head' revision. - - This is determined based on whether any other :class:`.Script` - within the :class:`.ScriptDirectory` refers to this - :class:`.Script`. Multiple heads can be present. - - """ - return not bool(self.nextrev) - - @property - def _is_real_head(self): - return not bool(self._all_nextrev) - - @property - def is_base(self): - """Return True if this :class:`.Revision` is a 'base' revision.""" - - return self.down_revision is None - - @property - def _is_real_base(self): - """Return True if this :class:`.Revision` is a "real" base revision, - e.g. that it has no dependencies either.""" - - # we use self.dependencies here because this is called up - # in initialization where _real_dependencies isn't set up - # yet - return self.down_revision is None and self.dependencies is None - - @property - def is_branch_point(self): - """Return True if this :class:`.Script` is a branch point. - - A branchpoint is defined as a :class:`.Script` which is referred - to by more than one succeeding :class:`.Script`, that is more - than one :class:`.Script` has a `down_revision` identifier pointing - here. - - """ - return len(self.nextrev) > 1 - - @property - def _is_real_branch_point(self): - """Return True if this :class:`.Script` is a 'real' branch point, - taking into account dependencies as well. - - """ - return len(self._all_nextrev) > 1 - - @property - def is_merge_point(self): - """Return True if this :class:`.Script` is a merge point.""" - - return len(self._versioned_down_revisions) > 1 - - -def tuple_rev_as_scalar(rev): - if not rev: - return None - elif len(rev) == 1: - return rev[0] - else: - return rev diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/README b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/README deleted file mode 100644 index 98e4f9c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/README +++ /dev/null @@ -1 +0,0 @@ -Generic single-database configuration. \ No newline at end of file diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/alembic.ini.mako b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/alembic.ini.mako deleted file mode 100644 index 1b7d6ea..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/alembic.ini.mako +++ /dev/null @@ -1,74 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = ${script_location} - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# timezone to use when rendering the date -# within the migration file as well as the filename. -# string value is passed to dateutil.tz.gettz() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the -# "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; this defaults -# to ${script_location}/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat ${script_location}/versions - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -sqlalchemy.url = driver://user:pass@localhost/dbname - - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/env.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/env.py deleted file mode 100644 index 15cb472..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/env.py +++ /dev/null @@ -1,75 +0,0 @@ - -from logging.config import fileConfig - -from sqlalchemy import engine_from_config -from sqlalchemy import pool - -from alembic import context - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = None - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True - ) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - connectable = engine_from_config( - config.get_section(config.config_ini_section), - prefix="sqlalchemy.", - poolclass=pool.NullPool, - ) - - with connectable.connect() as connection: - context.configure( - connection=connection, target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/script.py.mako b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/script.py.mako deleted file mode 100644 index 2c01563..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/generic/script.py.mako +++ /dev/null @@ -1,24 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/README b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/README deleted file mode 100644 index 5db219f..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/README +++ /dev/null @@ -1 +0,0 @@ -Rudimentary multi-database configuration. \ No newline at end of file diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/alembic.ini.mako b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/alembic.ini.mako deleted file mode 100644 index 79fcb79..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/alembic.ini.mako +++ /dev/null @@ -1,80 +0,0 @@ -# a multi-database configuration. - -[alembic] -# path to migration scripts -script_location = ${script_location} - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# timezone to use when rendering the date -# within the migration file as well as the filename. -# string value is passed to dateutil.tz.gettz() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the -# "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; this defaults -# to ${script_location}/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat ${script_location}/versions - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -databases = engine1, engine2 - -[engine1] -sqlalchemy.url = driver://user:pass@localhost/dbname - -[engine2] -sqlalchemy.url = driver://user:pass@localhost/dbname2 - - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/env.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/env.py deleted file mode 100644 index 607efaa..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/env.py +++ /dev/null @@ -1,139 +0,0 @@ - -import logging -from logging.config import fileConfig -import re - -from sqlalchemy import engine_from_config -from sqlalchemy import pool - -from alembic import context - -USE_TWOPHASE = False - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) -logger = logging.getLogger("alembic.env") - -# gather section names referring to different -# databases. These are named "engine1", "engine2" -# in the sample .ini file. -db_names = config.get_main_option("databases") - -# add your model's MetaData objects here -# for 'autogenerate' support. These must be set -# up to hold just those tables targeting a -# particular database. table.tometadata() may be -# helpful here in case a "copy" of -# a MetaData is needed. -# from myapp import mymodel -# target_metadata = { -# 'engine1':mymodel.metadata1, -# 'engine2':mymodel.metadata2 -# } -target_metadata = {} - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - # for the --sql use case, run migrations for each URL into - # individual files. - - engines = {} - for name in re.split(r",\s*", db_names): - engines[name] = rec = {} - rec["url"] = context.config.get_section_option(name, "sqlalchemy.url") - - for name, rec in engines.items(): - logger.info("Migrating database %s" % name) - file_ = "%s.sql" % name - logger.info("Writing output to %s" % file_) - with open(file_, "w") as buffer: - context.configure( - url=rec["url"], - output_buffer=buffer, - target_metadata=target_metadata.get(name), - literal_binds=True, - ) - with context.begin_transaction(): - context.run_migrations(engine_name=name) - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - - # for the direct-to-DB use case, start a transaction on all - # engines, then run all migrations, then commit all transactions. - - engines = {} - for name in re.split(r",\s*", db_names): - engines[name] = rec = {} - rec["engine"] = engine_from_config( - context.config.get_section(name), - prefix="sqlalchemy.", - poolclass=pool.NullPool, - ) - - for name, rec in engines.items(): - engine = rec["engine"] - rec["connection"] = conn = engine.connect() - - if USE_TWOPHASE: - rec["transaction"] = conn.begin_twophase() - else: - rec["transaction"] = conn.begin() - - try: - for name, rec in engines.items(): - logger.info("Migrating database %s" % name) - context.configure( - connection=rec["connection"], - upgrade_token="%s_upgrades" % name, - downgrade_token="%s_downgrades" % name, - target_metadata=target_metadata.get(name), - ) - context.run_migrations(engine_name=name) - - if USE_TWOPHASE: - for rec in engines.values(): - rec["transaction"].prepare() - - for rec in engines.values(): - rec["transaction"].commit() - except: - for rec in engines.values(): - rec["transaction"].rollback() - raise - finally: - for rec in engines.values(): - rec["connection"].close() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/script.py.mako b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/script.py.mako deleted file mode 100644 index c3970a5..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/multidb/script.py.mako +++ /dev/null @@ -1,45 +0,0 @@ -<%! -import re - -%>"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - - -def upgrade(engine_name): - globals()["upgrade_%s" % engine_name]() - - -def downgrade(engine_name): - globals()["downgrade_%s" % engine_name]() - -<% - db_names = config.get_main_option("databases") -%> - -## generate an "upgrade_() / downgrade_()" function -## for each database name in the ini file. - -% for db_name in re.split(r',\s*', db_names): - -def upgrade_${db_name}(): - ${context.get("%s_upgrades" % db_name, "pass")} - - -def downgrade_${db_name}(): - ${context.get("%s_downgrades" % db_name, "pass")} - -% endfor diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/README b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/README deleted file mode 100644 index ed3c28e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/README +++ /dev/null @@ -1 +0,0 @@ -Configuration that reads from a Pylons project environment. \ No newline at end of file diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/alembic.ini.mako b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/alembic.ini.mako deleted file mode 100644 index 6f6511b..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/alembic.ini.mako +++ /dev/null @@ -1,40 +0,0 @@ -# a Pylons configuration. - -[alembic] -# path to migration scripts -script_location = ${script_location} - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# timezone to use when rendering the date -# within the migration file as well as the filename. -# string value is passed to dateutil.tz.gettz() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the -# "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; this defaults -# to ${script_location}/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat ${script_location}/versions - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -pylons_config_file = ./development.ini - -# that's it ! \ No newline at end of file diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/env.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/env.py deleted file mode 100644 index f8abf44..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/env.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Pylons bootstrap environment. - -Place 'pylons_config_file' into alembic.ini, and the application will -be loaded from there. - -""" -from logging.config import fileConfig - -from paste.deploy import loadapp - -from alembic import context - - -try: - # if pylons app already in, don't create a new app - from pylons import config as pylons_config - - pylons_config["__file__"] -except: - config = context.config - # can use config['__file__'] here, i.e. the Pylons - # ini file, instead of alembic.ini - config_file = config.get_main_option("pylons_config_file") - fileConfig(config_file) - wsgi_app = loadapp("config:%s" % config_file, relative_to=".") - - -# customize this section for non-standard engine configurations. -meta = __import__( - "%s.model.meta" % wsgi_app.config["pylons.package"] -).model.meta - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = None - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - context.configure( - url=meta.engine.url, - target_metadata=target_metadata, - literal_binds=True, - ) - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - # specify here how the engine is acquired - # engine = meta.engine - raise NotImplementedError("Please specify engine connectivity here") - - with engine.connect() as connection: # noqa - context.configure( - connection=connection, target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/script.py.mako b/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/script.py.mako deleted file mode 100644 index 2c01563..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/templates/pylons/script.py.mako +++ /dev/null @@ -1,24 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/__init__.py deleted file mode 100644 index 765dd7b..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from alembic import util # noqa -from .assertions import assert_raises # noqa -from .assertions import assert_raises_message # noqa -from .assertions import eq_ # noqa -from .assertions import eq_ignore_whitespace # noqa -from .assertions import is_ # noqa -from .assertions import is_not_ # noqa -from .assertions import ne_ # noqa -from .config import requirements as requires # noqa -from .fixtures import TestBase # noqa -from .util import provide_metadata # noqa diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/assertions.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/assertions.py deleted file mode 100644 index 196e791..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/assertions.py +++ /dev/null @@ -1,222 +0,0 @@ -from __future__ import absolute_import - -import contextlib -import re -import warnings - -from sqlalchemy import exc as sa_exc -from sqlalchemy.engine import default -from sqlalchemy.util import decorator - -from . import config -from . import mock -from .exclusions import db_spec -from .. import util -from ..util.compat import py3k -from ..util.compat import text_type - - -if not util.sqla_094: - - def eq_(a, b, msg=None): - """Assert a == b, with repr messaging on failure.""" - assert a == b, msg or "%r != %r" % (a, b) - - def ne_(a, b, msg=None): - """Assert a != b, with repr messaging on failure.""" - assert a != b, msg or "%r == %r" % (a, b) - - def is_(a, b, msg=None): - """Assert a is b, with repr messaging on failure.""" - assert a is b, msg or "%r is not %r" % (a, b) - - def is_not_(a, b, msg=None): - """Assert a is not b, with repr messaging on failure.""" - assert a is not b, msg or "%r is %r" % (a, b) - - def assert_raises(except_cls, callable_, *args, **kw): - try: - callable_(*args, **kw) - success = False - except except_cls: - success = True - - # assert outside the block so it works for AssertionError too ! - assert success, "Callable did not raise an exception" - - def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): - try: - callable_(*args, **kwargs) - assert False, "Callable did not raise an exception" - except except_cls as e: - assert re.search(msg, text_type(e), re.UNICODE), "%r !~ %s" % ( - msg, - e, - ) - print(text_type(e).encode("utf-8")) - - -else: - from sqlalchemy.testing.assertions import assert_raises # noqa - from sqlalchemy.testing.assertions import assert_raises_message # noqa - from sqlalchemy.testing.assertions import eq_ # noqa - from sqlalchemy.testing.assertions import is_ # noqa - from sqlalchemy.testing.assertions import is_not_ # noqa - from sqlalchemy.testing.assertions import ne_ # noqa - - -def eq_ignore_whitespace(a, b, msg=None): - a = re.sub(r"^\s+?|\n", "", a) - a = re.sub(r" {2,}", " ", a) - b = re.sub(r"^\s+?|\n", "", b) - b = re.sub(r" {2,}", " ", b) - - # convert for unicode string rendering, - # using special escape character "!U" - if py3k: - b = re.sub(r"!U", "", b) - else: - b = re.sub(r"!U", "u", b) - - assert a == b, msg or "%r != %r" % (a, b) - - -def assert_compiled(element, assert_string, dialect=None): - dialect = _get_dialect(dialect) - eq_( - text_type(element.compile(dialect=dialect)) - .replace("\n", "") - .replace("\t", ""), - assert_string.replace("\n", "").replace("\t", ""), - ) - - -_dialect_mods = {} - - -def _get_dialect(name): - if name is None or name == "default": - return default.DefaultDialect() - else: - try: - dialect_mod = _dialect_mods[name] - except KeyError: - dialect_mod = getattr( - __import__("sqlalchemy.dialects.%s" % name).dialects, name - ) - _dialect_mods[name] = dialect_mod - d = dialect_mod.dialect() - if name == "postgresql": - d.implicit_returning = True - elif name == "mssql": - d.legacy_schema_aliasing = False - return d - - -def expect_warnings(*messages, **kw): - """Context manager which expects one or more warnings. - - With no arguments, squelches all SAWarnings emitted via - sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise - pass string expressions that will match selected warnings via regex; - all non-matching warnings are sent through. - - The expect version **asserts** that the warnings were in fact seen. - - Note that the test suite sets SAWarning warnings to raise exceptions. - - """ - return _expect_warnings(sa_exc.SAWarning, messages, **kw) - - -@contextlib.contextmanager -def expect_warnings_on(db, *messages, **kw): - """Context manager which expects one or more warnings on specific - dialects. - - The expect version **asserts** that the warnings were in fact seen. - - """ - spec = db_spec(db) - - if isinstance(db, util.string_types) and not spec(config._current): - yield - else: - with expect_warnings(*messages, **kw): - yield - - -def emits_warning(*messages): - """Decorator form of expect_warnings(). - - Note that emits_warning does **not** assert that the warnings - were in fact seen. - - """ - - @decorator - def decorate(fn, *args, **kw): - with expect_warnings(assert_=False, *messages): - return fn(*args, **kw) - - return decorate - - -def emits_warning_on(db, *messages): - """Mark a test as emitting a warning on a specific dialect. - - With no arguments, squelches all SAWarning failures. Or pass one or more - strings; these will be matched to the root of the warning description by - warnings.filterwarnings(). - - Note that emits_warning_on does **not** assert that the warnings - were in fact seen. - - """ - - @decorator - def decorate(fn, *args, **kw): - with expect_warnings_on(db, *messages): - return fn(*args, **kw) - - return decorate - - -@contextlib.contextmanager -def _expect_warnings(exc_cls, messages, regex=True, assert_=True): - - if regex: - filters = [re.compile(msg, re.I) for msg in messages] - else: - filters = messages - - seen = set(filters) - - real_warn = warnings.warn - - def our_warn(msg, exception=None, *arg, **kw): - if exception and not issubclass(exception, exc_cls): - return real_warn(msg, exception, *arg, **kw) - - if not filters: - return - - for filter_ in filters: - if (regex and filter_.match(msg)) or ( - not regex and filter_ == msg - ): - seen.discard(filter_) - break - else: - if exception is None: - real_warn(msg, *arg, **kw) - else: - real_warn(msg, exception, *arg, **kw) - - with mock.patch("warnings.warn", our_warn): - yield - - if assert_: - assert not seen, "Warnings were not seen: %s" % ", ".join( - "%r" % (s.pattern if regex else s) for s in seen - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/compat.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/compat.py deleted file mode 100644 index 9fbd50f..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/compat.py +++ /dev/null @@ -1,12 +0,0 @@ -def get_url_driver_name(url): - if "+" not in url.drivername: - return url.get_dialect().driver - else: - return url.drivername.split("+")[1] - - -def get_url_backend_name(url): - if "+" not in url.drivername: - return url.drivername - else: - return url.drivername.split("+")[0] diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/config.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/config.py deleted file mode 100644 index 7d7009e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/config.py +++ /dev/null @@ -1,91 +0,0 @@ -# testing/config.py -# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; - this should be removable when Alembic targets SQLAlchemy 1.0.0 -""" - -import collections - -requirements = None -db = None -db_url = None -db_opts = None -file_config = None -test_schema = None -test_schema_2 = None -_current = None - - -class Config(object): - def __init__(self, db, db_opts, options, file_config): - self._set_name(db) - self.db = db - self.db_opts = db_opts - self.options = options - self.file_config = file_config - self.test_schema = "test_schema" - self.test_schema_2 = "test_schema_2" - - _stack = collections.deque() - _configs = set() - - def _set_name(self, db): - if db.dialect.server_version_info: - svi = ".".join(str(tok) for tok in db.dialect.server_version_info) - self.name = "%s+%s_[%s]" % (db.name, db.driver, svi) - else: - self.name = "%s+%s" % (db.name, db.driver) - - @classmethod - def register(cls, db, db_opts, options, file_config): - """add a config as one of the global configs. - - If there are no configs set up yet, this config also - gets set as the "_current". - """ - cfg = Config(db, db_opts, options, file_config) - cls._configs.add(cfg) - return cfg - - @classmethod - def set_as_current(cls, config): - global db, _current, db_url, test_schema, test_schema_2, db_opts - _current = config - db_url = config.db.url - db_opts = config.db_opts - test_schema = config.test_schema - test_schema_2 = config.test_schema_2 - db = config.db - - @classmethod - def push_engine(cls, db): - assert _current, "Can't push without a default Config set up" - cls.push( - Config( - db, _current.db_opts, _current.options, _current.file_config - ) - ) - - @classmethod - def push(cls, config): - cls._stack.append(_current) - cls.set_as_current(config) - - @classmethod - def reset(cls): - if cls._stack: - cls.set_as_current(cls._stack[0]) - cls._stack.clear() - - @classmethod - def all_configs(cls): - return cls._configs - - @classmethod - def all_dbs(cls): - for cfg in cls.all_configs(): - yield cfg.db diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/engines.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/engines.py deleted file mode 100644 index 68d0068..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/engines.py +++ /dev/null @@ -1,27 +0,0 @@ -# testing/engines.py -# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; - this should be removable when Alembic targets SQLAlchemy 1.0.0. -""" - -from __future__ import absolute_import - -from . import config - - -def testing_engine(url=None, options=None): - """Produce an engine configured by --options with optional overrides.""" - - from sqlalchemy import create_engine - - url = url or config.db.url - if options is None: - options = config.db_opts - - engine = create_engine(url, **options) - - return engine diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/env.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/env.py deleted file mode 100644 index 04ebfe5..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/env.py +++ /dev/null @@ -1,496 +0,0 @@ -#!coding: utf-8 - -import os -import shutil -import textwrap - -from . import engines -from . import provision -from .. import util -from ..script import Script -from ..script import ScriptDirectory -from ..util.compat import get_current_bytecode_suffixes -from ..util.compat import has_pep3147 -from ..util.compat import u - - -def _get_staging_directory(): - if provision.FOLLOWER_IDENT: - return "scratch_%s" % provision.FOLLOWER_IDENT - else: - return "scratch" - - -def staging_env(create=True, template="generic", sourceless=False): - from alembic import command, script - - cfg = _testing_config() - if create: - path = os.path.join(_get_staging_directory(), "scripts") - if os.path.exists(path): - shutil.rmtree(path) - command.init(cfg, path, template=template) - if sourceless: - try: - # do an import so that a .pyc/.pyo is generated. - util.load_python_file(path, "env.py") - except AttributeError: - # we don't have the migration context set up yet - # so running the .env py throws this exception. - # theoretically we could be using py_compiler here to - # generate .pyc/.pyo without importing but not really - # worth it. - pass - assert sourceless in ( - "pep3147_envonly", - "simple", - "pep3147_everything", - ), sourceless - make_sourceless( - os.path.join(path, "env.py"), - "pep3147" if "pep3147" in sourceless else "simple", - ) - - sc = script.ScriptDirectory.from_config(cfg) - return sc - - -def clear_staging_env(): - shutil.rmtree(_get_staging_directory(), True) - - -def script_file_fixture(txt): - dir_ = os.path.join(_get_staging_directory(), "scripts") - path = os.path.join(dir_, "script.py.mako") - with open(path, "w") as f: - f.write(txt) - - -def env_file_fixture(txt): - dir_ = os.path.join(_get_staging_directory(), "scripts") - txt = ( - """ -from alembic import context - -config = context.config -""" - + txt - ) - - path = os.path.join(dir_, "env.py") - pyc_path = util.pyc_file_from_path(path) - if pyc_path: - os.unlink(pyc_path) - - with open(path, "w") as f: - f.write(txt) - - -def _sqlite_file_db(tempname="foo.db"): - dir_ = os.path.join(_get_staging_directory(), "scripts") - url = "sqlite:///%s/%s" % (dir_, tempname) - return engines.testing_engine(url=url) - - -def _sqlite_testing_config(sourceless=False): - dir_ = os.path.join(_get_staging_directory(), "scripts") - url = "sqlite:///%s/foo.db" % dir_ - - return _write_config_file( - """ -[alembic] -script_location = %s -sqlalchemy.url = %s -sourceless = %s - -[loggers] -keys = root - -[handlers] -keys = console - -[logger_root] -level = WARN -handlers = console -qualname = - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatters] -keys = generic - -[formatter_generic] -format = %%(levelname)-5.5s [%%(name)s] %%(message)s -datefmt = %%H:%%M:%%S - """ - % (dir_, url, "true" if sourceless else "false") - ) - - -def _multi_dir_testing_config(sourceless=False, extra_version_location=""): - dir_ = os.path.join(_get_staging_directory(), "scripts") - url = "sqlite:///%s/foo.db" % dir_ - - return _write_config_file( - """ -[alembic] -script_location = %s -sqlalchemy.url = %s -sourceless = %s -version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/ %s - -[loggers] -keys = root - -[handlers] -keys = console - -[logger_root] -level = WARN -handlers = console -qualname = - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatters] -keys = generic - -[formatter_generic] -format = %%(levelname)-5.5s [%%(name)s] %%(message)s -datefmt = %%H:%%M:%%S - """ - % ( - dir_, - url, - "true" if sourceless else "false", - extra_version_location, - ) - ) - - -def _no_sql_testing_config(dialect="postgresql", directives=""): - """use a postgresql url with no host so that - connections guaranteed to fail""" - dir_ = os.path.join(_get_staging_directory(), "scripts") - return _write_config_file( - """ -[alembic] -script_location = %s -sqlalchemy.url = %s:// -%s - -[loggers] -keys = root - -[handlers] -keys = console - -[logger_root] -level = WARN -handlers = console -qualname = - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatters] -keys = generic - -[formatter_generic] -format = %%(levelname)-5.5s [%%(name)s] %%(message)s -datefmt = %%H:%%M:%%S - -""" - % (dir_, dialect, directives) - ) - - -def _write_config_file(text): - cfg = _testing_config() - with open(cfg.config_file_name, "w") as f: - f.write(text) - return cfg - - -def _testing_config(): - from alembic.config import Config - - if not os.access(_get_staging_directory(), os.F_OK): - os.mkdir(_get_staging_directory()) - return Config(os.path.join(_get_staging_directory(), "test_alembic.ini")) - - -def write_script( - scriptdir, rev_id, content, encoding="ascii", sourceless=False -): - old = scriptdir.revision_map.get_revision(rev_id) - path = old.path - - content = textwrap.dedent(content) - if encoding: - content = content.encode(encoding) - with open(path, "wb") as fp: - fp.write(content) - pyc_path = util.pyc_file_from_path(path) - if pyc_path: - os.unlink(pyc_path) - script = Script._from_path(scriptdir, path) - old = scriptdir.revision_map.get_revision(script.revision) - if old.down_revision != script.down_revision: - raise Exception( - "Can't change down_revision " "on a refresh operation." - ) - scriptdir.revision_map.add_revision(script, _replace=True) - - if sourceless: - make_sourceless( - path, "pep3147" if sourceless == "pep3147_everything" else "simple" - ) - - -def make_sourceless(path, style): - - import py_compile - - py_compile.compile(path) - - if style == "simple" and has_pep3147(): - pyc_path = util.pyc_file_from_path(path) - suffix = get_current_bytecode_suffixes()[0] - filepath, ext = os.path.splitext(path) - simple_pyc_path = filepath + suffix - shutil.move(pyc_path, simple_pyc_path) - pyc_path = simple_pyc_path - elif style == "pep3147" and not has_pep3147(): - raise NotImplementedError() - else: - assert style in ("pep3147", "simple") - pyc_path = util.pyc_file_from_path(path) - - assert os.access(pyc_path, os.F_OK) - - os.unlink(path) - - -def three_rev_fixture(cfg): - a = util.rev_id() - b = util.rev_id() - c = util.rev_id() - - script = ScriptDirectory.from_config(cfg) - script.generate_revision(a, "revision a", refresh=True) - write_script( - script, - a, - """\ -"Rev A" -revision = '%s' -down_revision = None - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 1") - - -def downgrade(): - op.execute("DROP STEP 1") - -""" - % a, - ) - - script.generate_revision(b, "revision b", refresh=True) - write_script( - script, - b, - u( - """# coding: utf-8 -"Rev B, méil, %3" -revision = '{}' -down_revision = '{}' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 2") - - -def downgrade(): - op.execute("DROP STEP 2") - -""" - ).format(b, a), - encoding="utf-8", - ) - - script.generate_revision(c, "revision c", refresh=True) - write_script( - script, - c, - """\ -"Rev C" -revision = '%s' -down_revision = '%s' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 3") - - -def downgrade(): - op.execute("DROP STEP 3") - -""" - % (c, b), - ) - return a, b, c - - -def multi_heads_fixture(cfg, a, b, c): - """Create a multiple head fixture from the three-revs fixture""" - - d = util.rev_id() - e = util.rev_id() - f = util.rev_id() - - script = ScriptDirectory.from_config(cfg) - script.generate_revision( - d, "revision d from b", head=b, splice=True, refresh=True - ) - write_script( - script, - d, - """\ -"Rev D" -revision = '%s' -down_revision = '%s' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 4") - - -def downgrade(): - op.execute("DROP STEP 4") - -""" - % (d, b), - ) - - script.generate_revision( - e, "revision e from d", head=d, splice=True, refresh=True - ) - write_script( - script, - e, - """\ -"Rev E" -revision = '%s' -down_revision = '%s' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 5") - - -def downgrade(): - op.execute("DROP STEP 5") - -""" - % (e, d), - ) - - script.generate_revision( - f, "revision f from b", head=b, splice=True, refresh=True - ) - write_script( - script, - f, - """\ -"Rev F" -revision = '%s' -down_revision = '%s' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 6") - - -def downgrade(): - op.execute("DROP STEP 6") - -""" - % (f, b), - ) - - return d, e, f - - -def _multidb_testing_config(engines): - """alembic.ini fixture to work exactly with the 'multidb' template""" - - dir_ = os.path.join(_get_staging_directory(), "scripts") - - databases = ", ".join(engines.keys()) - engines = "\n\n".join( - "[%s]\n" "sqlalchemy.url = %s" % (key, value.url) - for key, value in engines.items() - ) - - return _write_config_file( - """ -[alembic] -script_location = %s -sourceless = false - -databases = %s - -%s -[loggers] -keys = root - -[handlers] -keys = console - -[logger_root] -level = WARN -handlers = console -qualname = - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatters] -keys = generic - -[formatter_generic] -format = %%(levelname)-5.5s [%%(name)s] %%(message)s -datefmt = %%H:%%M:%%S - """ - % (dir_, databases, engines) - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/exclusions.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/exclusions.py deleted file mode 100644 index af18593..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/exclusions.py +++ /dev/null @@ -1,437 +0,0 @@ -# testing/exclusions.py -# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; - this should be removable when Alembic targets SQLAlchemy 1.0.0 -""" - - -import contextlib -import operator - -from sqlalchemy import util -from sqlalchemy.util import decorator - -from . import config -from .compat import get_url_backend_name -from .compat import get_url_driver_name -from .plugin.plugin_base import SkipTest -from ..util import compat - - -def skip_if(predicate, reason=None): - rule = compound() - pred = _as_predicate(predicate, reason) - rule.skips.add(pred) - return rule - - -def fails_if(predicate, reason=None): - rule = compound() - pred = _as_predicate(predicate, reason) - rule.fails.add(pred) - return rule - - -class compound(object): - def __init__(self): - self.fails = set() - self.skips = set() - self.tags = set() - - def __add__(self, other): - return self.add(other) - - def add(self, *others): - copy = compound() - copy.fails.update(self.fails) - copy.skips.update(self.skips) - copy.tags.update(self.tags) - for other in others: - copy.fails.update(other.fails) - copy.skips.update(other.skips) - copy.tags.update(other.tags) - return copy - - def not_(self): - copy = compound() - copy.fails.update(NotPredicate(fail) for fail in self.fails) - copy.skips.update(NotPredicate(skip) for skip in self.skips) - copy.tags.update(self.tags) - return copy - - @property - def enabled(self): - return self.enabled_for_config(config._current) - - def enabled_for_config(self, config): - for predicate in self.skips.union(self.fails): - if predicate(config): - return False - else: - return True - - def matching_config_reasons(self, config): - return [ - predicate._as_string(config) - for predicate in self.skips.union(self.fails) - if predicate(config) - ] - - def include_test(self, include_tags, exclude_tags): - return bool( - not self.tags.intersection(exclude_tags) - and (not include_tags or self.tags.intersection(include_tags)) - ) - - def _extend(self, other): - self.skips.update(other.skips) - self.fails.update(other.fails) - self.tags.update(other.tags) - - def __call__(self, fn): - if hasattr(fn, "_sa_exclusion_extend"): - fn._sa_exclusion_extend._extend(self) - return fn - - @decorator - def decorate(fn, *args, **kw): - return self._do(config._current, fn, *args, **kw) - - decorated = decorate(fn) - decorated._sa_exclusion_extend = self - return decorated - - @contextlib.contextmanager - def fail_if(self): - all_fails = compound() - all_fails.fails.update(self.skips.union(self.fails)) - - try: - yield - except Exception as ex: - all_fails._expect_failure(config._current, ex) - else: - all_fails._expect_success(config._current) - - def _do(self, config, fn, *args, **kw): - for skip in self.skips: - if skip(config): - msg = "'%s' : %s" % (fn.__name__, skip._as_string(config)) - raise SkipTest(msg) - - try: - return_value = fn(*args, **kw) - except Exception as ex: - self._expect_failure(config, ex, name=fn.__name__) - else: - self._expect_success(config, name=fn.__name__) - return return_value - - def _expect_failure(self, config, ex, name="block"): - for fail in self.fails: - if fail(config): - print( - ( - "%s failed as expected (%s): %s " - % (name, fail._as_string(config), str(ex)) - ) - ) - break - else: - compat.raise_from_cause(ex) - - def _expect_success(self, config, name="block"): - if not self.fails: - return - for fail in self.fails: - if not fail(config): - break - else: - raise AssertionError( - "Unexpected success for '%s' (%s)" - % ( - name, - " and ".join( - fail._as_string(config) for fail in self.fails - ), - ) - ) - - -def requires_tag(tagname): - return tags([tagname]) - - -def tags(tagnames): - comp = compound() - comp.tags.update(tagnames) - return comp - - -def only_if(predicate, reason=None): - predicate = _as_predicate(predicate) - return skip_if(NotPredicate(predicate), reason) - - -def succeeds_if(predicate, reason=None): - predicate = _as_predicate(predicate) - return fails_if(NotPredicate(predicate), reason) - - -class Predicate(object): - @classmethod - def as_predicate(cls, predicate, description=None): - if isinstance(predicate, compound): - return cls.as_predicate(predicate.fails.union(predicate.skips)) - - elif isinstance(predicate, Predicate): - if description and predicate.description is None: - predicate.description = description - return predicate - elif isinstance(predicate, (list, set)): - return OrPredicate( - [cls.as_predicate(pred) for pred in predicate], description - ) - elif isinstance(predicate, tuple): - return SpecPredicate(*predicate) - elif isinstance(predicate, compat.string_types): - tokens = predicate.split(" ", 2) - op = spec = None - db = tokens.pop(0) - if tokens: - op = tokens.pop(0) - if tokens: - spec = tuple(int(d) for d in tokens.pop(0).split(".")) - return SpecPredicate(db, op, spec, description=description) - elif util.callable(predicate): - return LambdaPredicate(predicate, description) - else: - assert False, "unknown predicate type: %s" % predicate - - def _format_description(self, config, negate=False): - bool_ = self(config) - if negate: - bool_ = not negate - return self.description % { - "driver": get_url_driver_name(config.db.url), - "database": get_url_backend_name(config.db.url), - "doesnt_support": "doesn't support" if bool_ else "does support", - "does_support": "does support" if bool_ else "doesn't support", - } - - def _as_string(self, config=None, negate=False): - raise NotImplementedError() - - -class BooleanPredicate(Predicate): - def __init__(self, value, description=None): - self.value = value - self.description = description or "boolean %s" % value - - def __call__(self, config): - return self.value - - def _as_string(self, config, negate=False): - return self._format_description(config, negate=negate) - - -class SpecPredicate(Predicate): - def __init__(self, db, op=None, spec=None, description=None): - self.db = db - self.op = op - self.spec = spec - self.description = description - - _ops = { - "<": operator.lt, - ">": operator.gt, - "==": operator.eq, - "!=": operator.ne, - "<=": operator.le, - ">=": operator.ge, - "in": operator.contains, - "between": lambda val, pair: val >= pair[0] and val <= pair[1], - } - - def __call__(self, config): - engine = config.db - - if "+" in self.db: - dialect, driver = self.db.split("+") - else: - dialect, driver = self.db, None - - if dialect and engine.name != dialect: - return False - if driver is not None and engine.driver != driver: - return False - - if self.op is not None: - assert driver is None, "DBAPI version specs not supported yet" - - version = _server_version(engine) - oper = ( - hasattr(self.op, "__call__") and self.op or self._ops[self.op] - ) - return oper(version, self.spec) - else: - return True - - def _as_string(self, config, negate=False): - if self.description is not None: - return self._format_description(config) - elif self.op is None: - if negate: - return "not %s" % self.db - else: - return "%s" % self.db - else: - if negate: - return "not %s %s %s" % (self.db, self.op, self.spec) - else: - return "%s %s %s" % (self.db, self.op, self.spec) - - -class LambdaPredicate(Predicate): - def __init__(self, lambda_, description=None, args=None, kw=None): - spec = compat.inspect_getargspec(lambda_) - if not spec[0]: - self.lambda_ = lambda db: lambda_() - else: - self.lambda_ = lambda_ - self.args = args or () - self.kw = kw or {} - if description: - self.description = description - elif lambda_.__doc__: - self.description = lambda_.__doc__ - else: - self.description = "custom function" - - def __call__(self, config): - return self.lambda_(config) - - def _as_string(self, config, negate=False): - return self._format_description(config) - - -class NotPredicate(Predicate): - def __init__(self, predicate, description=None): - self.predicate = predicate - self.description = description - - def __call__(self, config): - return not self.predicate(config) - - def _as_string(self, config, negate=False): - if self.description: - return self._format_description(config, not negate) - else: - return self.predicate._as_string(config, not negate) - - -class OrPredicate(Predicate): - def __init__(self, predicates, description=None): - self.predicates = predicates - self.description = description - - def __call__(self, config): - for pred in self.predicates: - if pred(config): - return True - return False - - def _eval_str(self, config, negate=False): - if negate: - conjunction = " and " - else: - conjunction = " or " - return conjunction.join( - p._as_string(config, negate=negate) for p in self.predicates - ) - - def _negation_str(self, config): - if self.description is not None: - return "Not " + self._format_description(config) - else: - return self._eval_str(config, negate=True) - - def _as_string(self, config, negate=False): - if negate: - return self._negation_str(config) - else: - if self.description is not None: - return self._format_description(config) - else: - return self._eval_str(config) - - -_as_predicate = Predicate.as_predicate - - -def _is_excluded(db, op, spec): - return SpecPredicate(db, op, spec)(config._current) - - -def _server_version(engine): - """Return a server_version_info tuple.""" - - # force metadata to be retrieved - conn = engine.connect() - version = getattr(engine.dialect, "server_version_info", ()) - conn.close() - return version - - -def db_spec(*dbs): - return OrPredicate([Predicate.as_predicate(db) for db in dbs]) - - -def open(): # noqa - return skip_if(BooleanPredicate(False, "mark as execute")) - - -def closed(): - return skip_if(BooleanPredicate(True, "marked as skip")) - - -def fails(msg=None): - return fails_if(BooleanPredicate(True, msg or "expected to fail")) - - -@decorator -def future(fn, *arg): - return fails_if(LambdaPredicate(fn), "Future feature") - - -def fails_on(db, reason=None): - return fails_if(SpecPredicate(db), reason) - - -def fails_on_everything_except(*dbs): - return succeeds_if(OrPredicate([Predicate.as_predicate(db) for db in dbs])) - - -def skip(db, reason=None): - return skip_if(SpecPredicate(db), reason) - - -def only_on(dbs, reason=None): - return only_if( - OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)]) - ) - - -def exclude(db, op, spec, reason=None): - return skip_if(SpecPredicate(db, op, spec), reason) - - -def against(config, *queries): - assert queries, "no queries sent!" - return OrPredicate([Predicate.as_predicate(query) for query in queries])( - config - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/fixtures.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/fixtures.py deleted file mode 100644 index e0b8fdf..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/fixtures.py +++ /dev/null @@ -1,178 +0,0 @@ -# coding: utf-8 -from contextlib import contextmanager -import io -import re - -from sqlalchemy import create_engine -from sqlalchemy import MetaData -from sqlalchemy import text - -import alembic -from . import mock -from .assertions import _get_dialect -from .assertions import eq_ -from .plugin.plugin_base import SkipTest -from .. import util -from ..environment import EnvironmentContext -from ..migration import MigrationContext -from ..operations import Operations -from ..util.compat import configparser -from ..util.compat import string_types -from ..util.compat import text_type - -testing_config = configparser.ConfigParser() -testing_config.read(["test.cfg"]) - - -if not util.sqla_094: - - class TestBase(object): - # A sequence of database names to always run, regardless of the - # constraints below. - __whitelist__ = () - - # A sequence of requirement names matching testing.requires decorators - __requires__ = () - - # A sequence of dialect names to exclude from the test class. - __unsupported_on__ = () - - # If present, test class is only runnable for the *single* specified - # dialect. If you need multiple, use __unsupported_on__ and invert. - __only_on__ = None - - # A sequence of no-arg callables. If any are True, the entire - # testcase is skipped. - __skip_if__ = None - - def assert_(self, val, msg=None): - assert val, msg - - # apparently a handful of tests are doing this....OK - def setup(self): - if hasattr(self, "setUp"): - self.setUp() - - def teardown(self): - if hasattr(self, "tearDown"): - self.tearDown() - - -else: - from sqlalchemy.testing.fixtures import TestBase # noqa - - -def capture_db(): - buf = [] - - def dump(sql, *multiparams, **params): - buf.append(str(sql.compile(dialect=engine.dialect))) - - engine = create_engine("postgresql://", strategy="mock", executor=dump) - return engine, buf - - -_engs = {} - - -@contextmanager -def capture_context_buffer(**kw): - if kw.pop("bytes_io", False): - buf = io.BytesIO() - else: - buf = io.StringIO() - - kw.update({"dialect_name": "sqlite", "output_buffer": buf}) - conf = EnvironmentContext.configure - - def configure(*arg, **opt): - opt.update(**kw) - return conf(*arg, **opt) - - with mock.patch.object(EnvironmentContext, "configure", configure): - yield buf - - -def op_fixture( - dialect="default", - as_sql=False, - naming_convention=None, - literal_binds=False, - native_boolean=None, -): - - opts = {} - if naming_convention: - if not util.sqla_092: - raise SkipTest( - "naming_convention feature requires " "sqla 0.9.2 or greater" - ) - opts["target_metadata"] = MetaData(naming_convention=naming_convention) - - class buffer_(object): - def __init__(self): - self.lines = [] - - def write(self, msg): - msg = msg.strip() - msg = re.sub(r"[\n\t]", "", msg) - if as_sql: - # the impl produces soft tabs, - # so search for blocks of 4 spaces - msg = re.sub(r" ", "", msg) - msg = re.sub(r"\;\n*$", "", msg) - - self.lines.append(msg) - - def flush(self): - pass - - buf = buffer_() - - class ctx(MigrationContext): - def clear_assertions(self): - buf.lines[:] = [] - - def assert_(self, *sql): - # TODO: make this more flexible about - # whitespace and such - eq_(buf.lines, list(sql)) - - def assert_contains(self, sql): - for stmt in buf.lines: - if sql in stmt: - return - else: - assert False, "Could not locate fragment %r in %r" % ( - sql, - buf.lines, - ) - - if as_sql: - opts["as_sql"] = as_sql - if literal_binds: - opts["literal_binds"] = literal_binds - ctx_dialect = _get_dialect(dialect) - if native_boolean is not None: - ctx_dialect.supports_native_boolean = native_boolean - # this is new as of SQLAlchemy 1.2.7 and is used by SQL Server, - # which breaks assumptions in the alembic test suite - ctx_dialect.non_native_boolean_check_constraint = True - if not as_sql: - - def execute(stmt, *multiparam, **param): - if isinstance(stmt, string_types): - stmt = text(stmt) - assert stmt.supports_execution - sql = text_type(stmt.compile(dialect=ctx_dialect)) - - buf.write(sql) - - connection = mock.Mock(dialect=ctx_dialect, execute=execute) - else: - opts["output_buffer"] = buf - connection = None - context = ctx(ctx_dialect, connection, opts) - - alembic.op._proxy = Operations(context) - return context diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/mock.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/mock.py deleted file mode 100644 index c0c38a0..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/mock.py +++ /dev/null @@ -1,29 +0,0 @@ -# testing/mock.py -# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Import stub for mock library. - - .. note:: - - copied/adapted from SQLAlchemy master for backwards compatibility; - this should be removable when Alembic targets SQLAlchemy 1.0.0 - -""" -from __future__ import absolute_import - -from ..util.compat import py3k - -if py3k: - from unittest.mock import MagicMock, Mock, call, patch, ANY -else: - try: - from mock import MagicMock, Mock, call, patch, ANY # noqa - except ImportError: - raise ImportError( - "SQLAlchemy's test suite requires the " - "'mock' library as of 0.8.2." - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/__init__.py deleted file mode 100644 index 98616f4..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; - this should be removable when Alembic targets SQLAlchemy 1.0.0 -""" diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/bootstrap.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/bootstrap.py deleted file mode 100644 index 4bd415d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/bootstrap.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -Bootstrapper for nose/pytest plugins. - -The entire rationale for this system is to get the modules in plugin/ -imported without importing all of the supporting library, so that we can -set up things for testing before coverage starts. - -The rationale for all of plugin/ being *in* the supporting library in the -first place is so that the testing and plugin suite is available to other -libraries, mainly external SQLAlchemy and Alembic dialects, to make use -of the same test environment and standard suites available to -SQLAlchemy/Alembic themselves without the need to ship/install a separate -package outside of SQLAlchemy. - -NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; -this should be removable when Alembic targets SQLAlchemy 1.0.0. - -""" - -import os -import sys - -bootstrap_file = locals()["bootstrap_file"] -to_bootstrap = locals()["to_bootstrap"] - - -def load_file_as_module(name): - path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name) - if sys.version_info.major >= 3: - from importlib import machinery - - mod = machinery.SourceFileLoader(name, path).load_module() - else: - import imp - - mod = imp.load_source(name, path) - return mod - - -if to_bootstrap == "pytest": - sys.modules["alembic_plugin_base"] = load_file_as_module("plugin_base") - sys.modules["alembic_pytestplugin"] = load_file_as_module("pytestplugin") -elif to_bootstrap == "nose": - sys.modules["alembic_plugin_base"] = load_file_as_module("plugin_base") - sys.modules["alembic_noseplugin"] = load_file_as_module("noseplugin") -else: - raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/noseplugin.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/noseplugin.py deleted file mode 100644 index fafb9e1..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/noseplugin.py +++ /dev/null @@ -1,106 +0,0 @@ -# plugin/noseplugin.py -# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Enhance nose with extra options and behaviors for running SQLAlchemy tests. - - -NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; -this should be removable when Alembic targets SQLAlchemy 1.0.0. - -""" - -try: - # installed by bootstrap.py - import alembic_plugin_base as plugin_base -except ImportError: - # assume we're a package, use traditional import - from . import plugin_base - -import os -import sys - -from nose.plugins import Plugin - -fixtures = None - -py3k = sys.version_info.major >= 3 - - -class NoseSQLAlchemy(Plugin): - enabled = True - - name = "sqla_testing" - score = 100 - - def options(self, parser, env=os.environ): - Plugin.options(self, parser, env) - opt = parser.add_option - - def make_option(name, **kw): - callback_ = kw.pop("callback", None) - if callback_: - - def wrap_(option, opt_str, value, parser): - callback_(opt_str, value, parser) - - kw["callback"] = wrap_ - opt(name, **kw) - - plugin_base.setup_options(make_option) - plugin_base.read_config() - - def configure(self, options, conf): - super(NoseSQLAlchemy, self).configure(options, conf) - plugin_base.pre_begin(options) - - plugin_base.set_coverage_flag(options.enable_plugin_coverage) - - def begin(self): - global fixtures - from alembic.testing import fixtures # noqa - - plugin_base.post_begin() - - def describeTest(self, test): - return "" - - def wantFunction(self, fn): - return False - - def wantMethod(self, fn): - if py3k: - if not hasattr(fn.__self__, "cls"): - return False - cls = fn.__self__.cls - else: - cls = fn.im_class - return plugin_base.want_method(cls, fn) - - def wantClass(self, cls): - return plugin_base.want_class(cls) - - def beforeTest(self, test): - plugin_base.before_test( - test, - test.test.cls.__module__, - test.test.cls, - test.test.method.__name__, - ) - - def afterTest(self, test): - plugin_base.after_test(test) - - def startContext(self, ctx): - if not isinstance(ctx, type) or not issubclass(ctx, fixtures.TestBase): - return - plugin_base.start_test_class(ctx) - - def stopContext(self, ctx): - if not isinstance(ctx, type) or not issubclass(ctx, fixtures.TestBase): - return - plugin_base.stop_test_class(ctx) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/plugin_base.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/plugin_base.py deleted file mode 100644 index 44930c8..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/plugin_base.py +++ /dev/null @@ -1,645 +0,0 @@ -# plugin/plugin_base.py -# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""Testing extensions. - -this module is designed to work as a testing-framework-agnostic library, -so that we can continue to support nose and also begin adding new -functionality via py.test. - -NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; -this should be removable when Alembic targets SQLAlchemy 1.0.0 - - -""" - -from __future__ import absolute_import - -import re -import sys - -try: - # unitttest has a SkipTest also but pytest doesn't - # honor it unless nose is imported too... - from nose import SkipTest -except ImportError: - from pytest import skip - - SkipTest = skip.Exception - -py3k = sys.version_info.major >= 3 - -if py3k: - import configparser -else: - import ConfigParser as configparser - -# late imports -fixtures = None -engines = None -provision = None -exclusions = None -warnings = None -assertions = None -requirements = None -config = None -util = None -file_config = None - - -logging = None -include_tags = set() -exclude_tags = set() -options = None - - -def setup_options(make_option): - make_option( - "--log-info", - action="callback", - type="string", - callback=_log, - help="turn on info logging for (multiple OK)", - ) - make_option( - "--log-debug", - action="callback", - type="string", - callback=_log, - help="turn on debug logging for (multiple OK)", - ) - make_option( - "--db", - action="append", - type="string", - dest="db", - help="Use prefab database uri. Multiple OK, " - "first one is run by default.", - ) - make_option( - "--dbs", - action="callback", - zeroarg_callback=_list_dbs, - help="List available prefab dbs", - ) - make_option( - "--dburi", - action="append", - type="string", - dest="dburi", - help="Database uri. Multiple OK, " "first one is run by default.", - ) - make_option( - "--dropfirst", - action="store_true", - dest="dropfirst", - help="Drop all tables in the target database first", - ) - make_option( - "--backend-only", - action="store_true", - dest="backend_only", - help="Run only tests marked with __backend__", - ) - make_option( - "--postgresql-templatedb", - type="string", - help="name of template database to use for Postgresql " - "CREATE DATABASE (defaults to current database)", - ) - make_option( - "--low-connections", - action="store_true", - dest="low_connections", - help="Use a low number of distinct connections - " - "i.e. for Oracle TNS", - ) - make_option( - "--write-idents", - type="string", - dest="write_idents", - help="write out generated follower idents to , " - "when -n is used", - ) - make_option( - "--reversetop", - action="store_true", - dest="reversetop", - default=False, - help="Use a random-ordering set implementation in the ORM " - "(helps reveal dependency issues)", - ) - make_option( - "--requirements", - action="callback", - type="string", - callback=_requirements_opt, - help="requirements class for testing, overrides setup.cfg", - ) - make_option( - "--with-cdecimal", - action="store_true", - dest="cdecimal", - default=False, - help="Monkeypatch the cdecimal library into Python 'decimal' " - "for all tests", - ) - make_option( - "--include-tag", - action="callback", - callback=_include_tag, - type="string", - help="Include tests with tag ", - ) - make_option( - "--exclude-tag", - action="callback", - callback=_exclude_tag, - type="string", - help="Exclude tests with tag ", - ) - make_option( - "--mysql-engine", - action="store", - dest="mysql_engine", - default=None, - help="Use the specified MySQL storage engine for all tables, " - "default is a db-default/InnoDB combo.", - ) - - -def configure_follower(follower_ident): - """Configure required state for a follower. - - This invokes in the parent process and typically includes - database creation. - - """ - from alembic.testing import provision - - provision.FOLLOWER_IDENT = follower_ident - - -def memoize_important_follower_config(dict_): - """Store important configuration we will need to send to a follower. - - This invokes in the parent process after normal config is set up. - - This is necessary as py.test seems to not be using forking, so we - start with nothing in memory, *but* it isn't running our argparse - callables, so we have to just copy all of that over. - - """ - dict_["memoized_config"] = { - "include_tags": include_tags, - "exclude_tags": exclude_tags, - } - - -def restore_important_follower_config(dict_): - """Restore important configuration needed by a follower. - - This invokes in the follower process. - - """ - include_tags.update(dict_["memoized_config"]["include_tags"]) - exclude_tags.update(dict_["memoized_config"]["exclude_tags"]) - - -def read_config(): - global file_config - file_config = configparser.ConfigParser() - file_config.read(["setup.cfg", "test.cfg"]) - - -def pre_begin(opt): - """things to set up early, before coverage might be setup.""" - global options - options = opt - for fn in pre_configure: - fn(options, file_config) - - -def set_coverage_flag(value): - options.has_coverage = value - - -def post_begin(): - """things to set up later, once we know coverage is running.""" - - # Lazy setup of other options (post coverage) - for fn in post_configure: - fn(options, file_config) - - # late imports, has to happen after config as well - # as nose plugins like coverage - global util, fixtures, engines, exclusions, assertions - global warnings, profiling, config, testing - from alembic.testing import config, warnings, exclusions # noqa - from alembic.testing import engines, fixtures # noqa - from sqlalchemy import util # noqa - - warnings.setup_filters() - - -def _log(opt_str, value, parser): - global logging - if not logging: - import logging - - logging.basicConfig() - - if opt_str.endswith("-info"): - logging.getLogger(value).setLevel(logging.INFO) - elif opt_str.endswith("-debug"): - logging.getLogger(value).setLevel(logging.DEBUG) - - -def _list_dbs(*args): - print("Available --db options (use --dburi to override)") - for macro in sorted(file_config.options("db")): - print("%20s\t%s" % (macro, file_config.get("db", macro))) - sys.exit(0) - - -def _requirements_opt(opt_str, value, parser): - _setup_requirements(value) - - -def _exclude_tag(opt_str, value, parser): - exclude_tags.add(value.replace("-", "_")) - - -def _include_tag(opt_str, value, parser): - include_tags.add(value.replace("-", "_")) - - -pre_configure = [] -post_configure = [] - - -def pre(fn): - pre_configure.append(fn) - return fn - - -def post(fn): - post_configure.append(fn) - return fn - - -@pre -def _setup_options(opt, file_config): - global options - options = opt - - -@pre -def _monkeypatch_cdecimal(options, file_config): - if options.cdecimal: - import cdecimal - - sys.modules["decimal"] = cdecimal - - -@post -def _engine_uri(options, file_config): - from alembic.testing import config - from alembic.testing import provision - - if options.dburi: - db_urls = list(options.dburi) - else: - db_urls = [] - - if options.db: - for db_token in options.db: - for db in re.split(r"[,\s]+", db_token): - if db not in file_config.options("db"): - raise RuntimeError( - "Unknown URI specifier '%s'. " - "Specify --dbs for known uris." % db - ) - else: - db_urls.append(file_config.get("db", db)) - - if not db_urls: - db_urls.append(file_config.get("db", "default")) - - for db_url in db_urls: - - if options.write_idents and provision.FOLLOWER_IDENT: # != 'master': - with open(options.write_idents, "a") as file_: - file_.write(provision.FOLLOWER_IDENT + " " + db_url + "\n") - - cfg = provision.setup_config( - db_url, options, file_config, provision.FOLLOWER_IDENT - ) - - if not config._current: - cfg.set_as_current(cfg) - - -@post -def _requirements(options, file_config): - - requirement_cls = file_config.get("sqla_testing", "requirement_cls") - _setup_requirements(requirement_cls) - - -def _setup_requirements(argument): - from alembic.testing import config - - if config.requirements is not None: - return - - modname, clsname = argument.split(":") - - # importlib.import_module() only introduced in 2.7, a little - # late - mod = __import__(modname) - for component in modname.split(".")[1:]: - mod = getattr(mod, component) - req_cls = getattr(mod, clsname) - - config.requirements = req_cls() - - -@post -def _prep_testing_database(options, file_config): - from alembic.testing import config - from alembic.testing.exclusions import against - from sqlalchemy import schema - from alembic import util - - from sqlalchemy import inspect - - if options.dropfirst: - for cfg in config.Config.all_configs(): - e = cfg.db - inspector = inspect(e) - try: - view_names = inspector.get_view_names() - except NotImplementedError: - pass - else: - for vname in view_names: - e.execute( - schema._DropView( - schema.Table(vname, schema.MetaData()) - ) - ) - - if config.requirements.schemas.enabled_for_config(cfg): - try: - view_names = inspector.get_view_names(schema="test_schema") - except NotImplementedError: - pass - else: - for vname in view_names: - e.execute( - schema._DropView( - schema.Table( - vname, - schema.MetaData(), - schema="test_schema", - ) - ) - ) - - for tname in reversed( - inspector.get_table_names(order_by="foreign_key") - ): - e.execute( - schema.DropTable(schema.Table(tname, schema.MetaData())) - ) - - if config.requirements.schemas.enabled_for_config(cfg): - for tname in reversed( - inspector.get_table_names( - order_by="foreign_key", schema="test_schema" - ) - ): - e.execute( - schema.DropTable( - schema.Table( - tname, schema.MetaData(), schema="test_schema" - ) - ) - ) - - if against(cfg, "postgresql") and util.sqla_100: - from sqlalchemy.dialects import postgresql - - for enum in inspector.get_enums("*"): - e.execute( - postgresql.DropEnumType( - postgresql.ENUM( - name=enum["name"], schema=enum["schema"] - ) - ) - ) - - -@post -def _reverse_topological(options, file_config): - if options.reversetop: - from sqlalchemy.orm.util import randomize_unitofwork - - randomize_unitofwork() - - -@post -def _post_setup_options(opt, file_config): - from alembic.testing import config - - config.options = options - config.file_config = file_config - - -def want_class(cls): - if not issubclass(cls, fixtures.TestBase): - return False - elif cls.__name__.startswith("_"): - return False - elif config.options.backend_only and not getattr( - cls, "__backend__", False - ): - return False - else: - return True - - -def want_method(cls, fn): - if not fn.__name__.startswith("test_"): - return False - elif fn.__module__ is None: - return False - elif include_tags: - return ( - hasattr(cls, "__tags__") - and exclusions.tags(cls.__tags__).include_test( - include_tags, exclude_tags - ) - ) or ( - hasattr(fn, "_sa_exclusion_extend") - and fn._sa_exclusion_extend.include_test( - include_tags, exclude_tags - ) - ) - elif exclude_tags and hasattr(cls, "__tags__"): - return exclusions.tags(cls.__tags__).include_test( - include_tags, exclude_tags - ) - elif exclude_tags and hasattr(fn, "_sa_exclusion_extend"): - return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags) - else: - return True - - -def generate_sub_tests(cls, module): - if getattr(cls, "__backend__", False): - for cfg in _possible_configs_for_cls(cls): - orig_name = cls.__name__ - - # we can have special chars in these names except for the - # pytest junit plugin, which is tripped up by the brackets - # and periods, so sanitize - - alpha_name = re.sub(r"[_\[\]\.]+", "_", cfg.name) - alpha_name = re.sub("_+$", "", alpha_name) - name = "%s_%s" % (cls.__name__, alpha_name) - - subcls = type( - name, - (cls,), - {"_sa_orig_cls_name": orig_name, "__only_on_config__": cfg}, - ) - setattr(module, name, subcls) - yield subcls - else: - yield cls - - -def start_test_class(cls): - _do_skips(cls) - _setup_engine(cls) - - -def stop_test_class(cls): - # from sqlalchemy import inspect - # assert not inspect(testing.db).get_table_names() - _restore_engine() - - -def _restore_engine(): - config._current.reset() - - -def _setup_engine(cls): - if getattr(cls, "__engine_options__", None): - eng = engines.testing_engine(options=cls.__engine_options__) - config._current.push_engine(eng) - - -def before_test(test, test_module_name, test_class, test_name): - pass - - -def after_test(test): - pass - - -def _possible_configs_for_cls(cls, reasons=None): - all_configs = set(config.Config.all_configs()) - - if cls.__unsupported_on__: - spec = exclusions.db_spec(*cls.__unsupported_on__) - for config_obj in list(all_configs): - if spec(config_obj): - all_configs.remove(config_obj) - - if getattr(cls, "__only_on__", None): - spec = exclusions.db_spec(*util.to_list(cls.__only_on__)) - for config_obj in list(all_configs): - if not spec(config_obj): - all_configs.remove(config_obj) - - if getattr(cls, "__only_on_config__", None): - all_configs.intersection_update([cls.__only_on_config__]) - - if hasattr(cls, "__requires__"): - requirements = config.requirements - for config_obj in list(all_configs): - for requirement in cls.__requires__: - check = getattr(requirements, requirement) - - skip_reasons = check.matching_config_reasons(config_obj) - if skip_reasons: - all_configs.remove(config_obj) - if reasons is not None: - reasons.extend(skip_reasons) - break - - if hasattr(cls, "__prefer_requires__"): - non_preferred = set() - requirements = config.requirements - for config_obj in list(all_configs): - for requirement in cls.__prefer_requires__: - check = getattr(requirements, requirement) - - if not check.enabled_for_config(config_obj): - non_preferred.add(config_obj) - if all_configs.difference(non_preferred): - all_configs.difference_update(non_preferred) - - return all_configs - - -def _do_skips(cls): - reasons = [] - all_configs = _possible_configs_for_cls(cls, reasons) - - if getattr(cls, "__skip_if__", False): - for c in getattr(cls, "__skip_if__"): - if c(): - raise SkipTest( - "'%s' skipped by %s" % (cls.__name__, c.__name__) - ) - - if not all_configs: - msg = "'%s' unsupported on any DB implementation %s%s" % ( - cls.__name__, - ", ".join( - "'%s(%s)+%s'" - % ( - config_obj.db.name, - ".".join( - str(dig) - for dig in config_obj.db.dialect.server_version_info - ), - config_obj.db.driver, - ) - for config_obj in config.Config.all_configs() - ), - ", ".join(reasons), - ) - raise SkipTest(msg) - elif hasattr(cls, "__prefer_backends__"): - non_preferred = set() - spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__)) - for config_obj in all_configs: - if not spec(config_obj): - non_preferred.add(config_obj) - if all_configs.difference(non_preferred): - all_configs.difference_update(non_preferred) - - if config._current not in all_configs: - _setup_config(all_configs.pop(), cls) - - -def _setup_config(config_obj, ctx): - config._current.push(config_obj) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/pytestplugin.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/pytestplugin.py deleted file mode 100644 index 5a3bb73..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/plugin/pytestplugin.py +++ /dev/null @@ -1,235 +0,0 @@ -"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; - this should be removable when Alembic targets SQLAlchemy 1.0.0. -""" - -try: - # installed by bootstrap.py - import alembic_plugin_base as plugin_base -except ImportError: - # assume we're a package, use traditional import - from . import plugin_base - -import argparse -import collections -import inspect -import os -import sys - -import pytest - -py3k = sys.version_info.major >= 3 - -try: - import xdist # noqa - - has_xdist = True -except ImportError: - has_xdist = False - - -def pytest_addoption(parser): - group = parser.getgroup("sqlalchemy") - - def make_option(name, **kw): - callback_ = kw.pop("callback", None) - if callback_: - - class CallableAction(argparse.Action): - def __call__( - self, parser, namespace, values, option_string=None - ): - callback_(option_string, values, parser) - - kw["action"] = CallableAction - - zeroarg_callback = kw.pop("zeroarg_callback", None) - if zeroarg_callback: - - class CallableAction(argparse.Action): - def __init__( - self, - option_strings, - dest, - default=False, - required=False, - help=None, # noqa - ): - super(CallableAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - const=True, - default=default, - required=required, - help=help, - ) - - def __call__( - self, parser, namespace, values, option_string=None - ): - zeroarg_callback(option_string, values, parser) - - kw["action"] = CallableAction - - group.addoption(name, **kw) - - plugin_base.setup_options(make_option) - plugin_base.read_config() - - -def pytest_configure(config): - if hasattr(config, "slaveinput"): - plugin_base.restore_important_follower_config(config.slaveinput) - plugin_base.configure_follower(config.slaveinput["follower_ident"]) - else: - if config.option.write_idents and os.path.exists( - config.option.write_idents - ): - os.remove(config.option.write_idents) - - plugin_base.pre_begin(config.option) - - plugin_base.set_coverage_flag( - bool(getattr(config.option, "cov_source", False)) - ) - - -def pytest_sessionstart(session): - plugin_base.post_begin() - - -if has_xdist: - import uuid - - def pytest_configure_node(node): - # the master for each node fills slaveinput dictionary - # which pytest-xdist will transfer to the subprocess - - plugin_base.memoize_important_follower_config(node.slaveinput) - - node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12] - from alembic.testing import provision - - provision.create_follower_db(node.slaveinput["follower_ident"]) - - def pytest_testnodedown(node, error): - from alembic.testing import provision - - provision.drop_follower_db(node.slaveinput["follower_ident"]) - - -def pytest_collection_modifyitems(session, config, items): - # look for all those classes that specify __backend__ and - # expand them out into per-database test cases. - - # this is much easier to do within pytest_pycollect_makeitem, however - # pytest is iterating through cls.__dict__ as makeitem is - # called which causes a "dictionary changed size" error on py3k. - # I'd submit a pullreq for them to turn it into a list first, but - # it's to suit the rather odd use case here which is that we are adding - # new classes to a module on the fly. - - rebuilt_items = collections.defaultdict(list) - items[:] = [ - item for item in items if isinstance(item.parent, pytest.Instance) - ] - test_classes = set(item.parent for item in items) - for test_class in test_classes: - for sub_cls in plugin_base.generate_sub_tests( - test_class.cls, test_class.parent.module - ): - if sub_cls is not test_class.cls: - list_ = rebuilt_items[test_class.cls] - - for inst in pytest.Class( - sub_cls.__name__, parent=test_class.parent.parent - ).collect(): - list_.extend(inst.collect()) - - newitems = [] - for item in items: - if item.parent.cls in rebuilt_items: - newitems.extend(rebuilt_items[item.parent.cls]) - rebuilt_items[item.parent.cls][:] = [] - else: - newitems.append(item) - - # seems like the functions attached to a test class aren't sorted already? - # is that true and why's that? (when using unittest, they're sorted) - items[:] = sorted( - newitems, - key=lambda item: ( - item.parent.parent.parent.name, - item.parent.parent.name, - item.name, - ), - ) - - -def pytest_pycollect_makeitem(collector, name, obj): - if inspect.isclass(obj) and plugin_base.want_class(obj): - return pytest.Class(name, parent=collector) - elif ( - inspect.isfunction(obj) - and isinstance(collector, pytest.Instance) - and plugin_base.want_method(collector.cls, obj) - ): - return pytest.Function(name, parent=collector) - else: - return [] - - -_current_class = None - - -def pytest_runtest_setup(item): - # here we seem to get called only based on what we collected - # in pytest_collection_modifyitems. So to do class-based stuff - # we have to tear that out. - global _current_class - - if not isinstance(item, pytest.Function): - return - - # ... so we're doing a little dance here to figure it out... - if _current_class is None: - class_setup(item.parent.parent) - _current_class = item.parent.parent - - # this is needed for the class-level, to ensure that the - # teardown runs after the class is completed with its own - # class-level teardown... - def finalize(): - global _current_class - class_teardown(item.parent.parent) - _current_class = None - - item.parent.parent.addfinalizer(finalize) - - test_setup(item) - - -def pytest_runtest_teardown(item): - # ...but this works better as the hook here rather than - # using a finalizer, as the finalizer seems to get in the way - # of the test reporting failures correctly (you get a bunch of - # py.test assertion stuff instead) - test_teardown(item) - - -def test_setup(item): - plugin_base.before_test( - item, item.parent.module.__name__, item.parent.cls, item.name - ) - - -def test_teardown(item): - plugin_base.after_test(item) - - -def class_setup(item): - plugin_base.start_test_class(item.cls) - - -def class_teardown(item): - plugin_base.stop_test_class(item.cls) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/provision.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/provision.py deleted file mode 100644 index 9687b75..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/provision.py +++ /dev/null @@ -1,351 +0,0 @@ -"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; - this should be removable when Alembic targets SQLAlchemy 1.0.0 -""" -import collections -import logging -import os -import time - -from sqlalchemy import create_engine -from sqlalchemy import exc -from sqlalchemy import text -from sqlalchemy.engine import url as sa_url - -from . import config -from . import engines -from .compat import get_url_backend_name -from ..util import compat - -log = logging.getLogger(__name__) - -FOLLOWER_IDENT = None - - -class register(object): - def __init__(self): - self.fns = {} - - @classmethod - def init(cls, fn): - return register().for_db("*")(fn) - - def for_db(self, dbname): - def decorate(fn): - self.fns[dbname] = fn - return self - - return decorate - - def __call__(self, cfg, *arg): - if isinstance(cfg, compat.string_types): - url = sa_url.make_url(cfg) - elif isinstance(cfg, sa_url.URL): - url = cfg - else: - url = cfg.db.url - backend = get_url_backend_name(url) - if backend in self.fns: - return self.fns[backend](cfg, *arg) - else: - return self.fns["*"](cfg, *arg) - - -def create_follower_db(follower_ident): - - for cfg in _configs_for_db_operation(): - _create_db(cfg, cfg.db, follower_ident) - - -def configure_follower(follower_ident): - for cfg in config.Config.all_configs(): - _configure_follower(cfg, follower_ident) - - -def setup_config(db_url, options, file_config, follower_ident): - if follower_ident: - db_url = _follower_url_from_main(db_url, follower_ident) - db_opts = {} - _update_db_opts(db_url, db_opts) - eng = engines.testing_engine(db_url, db_opts) - _post_configure_engine(db_url, eng, follower_ident) - eng.connect().close() - - cfg = config.Config.register(eng, db_opts, options, file_config) - if follower_ident: - _configure_follower(cfg, follower_ident) - return cfg - - -def drop_follower_db(follower_ident): - for cfg in _configs_for_db_operation(): - _drop_db(cfg, cfg.db, follower_ident) - - -def _configs_for_db_operation(): - hosts = set() - - for cfg in config.Config.all_configs(): - cfg.db.dispose() - - for cfg in config.Config.all_configs(): - url = cfg.db.url - backend = get_url_backend_name(url) - host_conf = (backend, url.username, url.host, url.database) - - if host_conf not in hosts: - yield cfg - hosts.add(host_conf) - - for cfg in config.Config.all_configs(): - cfg.db.dispose() - - -@register.init -def _create_db(cfg, eng, ident): - raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url) - - -@register.init -def _drop_db(cfg, eng, ident): - raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url) - - -@register.init -def _update_db_opts(db_url, db_opts): - pass - - -@register.init -def _configure_follower(cfg, ident): - pass - - -@register.init -def _post_configure_engine(url, engine, follower_ident): - pass - - -@register.init -def _follower_url_from_main(url, ident): - url = sa_url.make_url(url) - url.database = ident - return url - - -@_update_db_opts.for_db("mssql") -def _mssql_update_db_opts(db_url, db_opts): - db_opts["legacy_schema_aliasing"] = False - - -@_follower_url_from_main.for_db("sqlite") -def _sqlite_follower_url_from_main(url, ident): - url = sa_url.make_url(url) - if not url.database or url.database == ":memory:": - return url - else: - return sa_url.make_url("sqlite:///%s.db" % ident) - - -@_post_configure_engine.for_db("sqlite") -def _sqlite_post_configure_engine(url, engine, follower_ident): - from sqlalchemy import event - - @event.listens_for(engine, "connect") - def connect(dbapi_connection, connection_record): - # use file DBs in all cases, memory acts kind of strangely - # as an attached - if not follower_ident: - dbapi_connection.execute( - 'ATTACH DATABASE "test_schema.db" AS test_schema' - ) - else: - dbapi_connection.execute( - 'ATTACH DATABASE "%s_test_schema.db" AS test_schema' - % follower_ident - ) - - -@_create_db.for_db("postgresql") -def _pg_create_db(cfg, eng, ident): - template_db = cfg.options.postgresql_templatedb - - with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn: - try: - _pg_drop_db(cfg, conn, ident) - except Exception: - pass - if not template_db: - template_db = conn.scalar("select current_database()") - - attempt = 0 - while True: - try: - conn.execute( - "CREATE DATABASE %s TEMPLATE %s" % (ident, template_db) - ) - except exc.OperationalError as err: - attempt += 1 - if attempt >= 3: - raise - if "accessed by other users" in str(err): - log.info( - "Waiting to create %s, URI %r, " - "template DB %s is in use sleeping for .5", - ident, - eng.url, - template_db, - ) - time.sleep(0.5) - else: - break - - -@_create_db.for_db("mysql") -def _mysql_create_db(cfg, eng, ident): - with eng.connect() as conn: - try: - _mysql_drop_db(cfg, conn, ident) - except Exception: - pass - conn.execute("CREATE DATABASE %s" % ident) - conn.execute("CREATE DATABASE %s_test_schema" % ident) - conn.execute("CREATE DATABASE %s_test_schema_2" % ident) - - -@_configure_follower.for_db("mysql") -def _mysql_configure_follower(config, ident): - config.test_schema = "%s_test_schema" % ident - config.test_schema_2 = "%s_test_schema_2" % ident - - -@_create_db.for_db("sqlite") -def _sqlite_create_db(cfg, eng, ident): - pass - - -@_drop_db.for_db("postgresql") -def _pg_drop_db(cfg, eng, ident): - with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn: - conn.execute( - text( - "select pg_terminate_backend(pid) from pg_stat_activity " - "where usename=current_user and pid != pg_backend_pid() " - "and datname=:dname" - ), - dname=ident, - ) - conn.execute("DROP DATABASE %s" % ident) - - -@_drop_db.for_db("sqlite") -def _sqlite_drop_db(cfg, eng, ident): - if ident: - os.remove("%s_test_schema.db" % ident) - else: - os.remove("%s.db" % ident) - - -@_drop_db.for_db("mysql") -def _mysql_drop_db(cfg, eng, ident): - with eng.connect() as conn: - conn.execute("DROP DATABASE %s_test_schema" % ident) - conn.execute("DROP DATABASE %s_test_schema_2" % ident) - conn.execute("DROP DATABASE %s" % ident) - - -@_create_db.for_db("oracle") -def _oracle_create_db(cfg, eng, ident): - # NOTE: make sure you've run "ALTER DATABASE default tablespace users" or - # similar, so that the default tablespace is not "system"; reflection will - # fail otherwise - with eng.connect() as conn: - conn.execute("create user %s identified by xe" % ident) - conn.execute("create user %s_ts1 identified by xe" % ident) - conn.execute("create user %s_ts2 identified by xe" % ident) - conn.execute("grant dba to %s" % (ident,)) - conn.execute("grant unlimited tablespace to %s" % ident) - conn.execute("grant unlimited tablespace to %s_ts1" % ident) - conn.execute("grant unlimited tablespace to %s_ts2" % ident) - - -@_configure_follower.for_db("oracle") -def _oracle_configure_follower(config, ident): - config.test_schema = "%s_ts1" % ident - config.test_schema_2 = "%s_ts2" % ident - - -def _ora_drop_ignore(conn, dbname): - try: - conn.execute("drop user %s cascade" % dbname) - log.info("Reaped db: %s" % dbname) - return True - except exc.DatabaseError as err: - log.warning("couldn't drop db: %s" % err) - return False - - -@_drop_db.for_db("oracle") -def _oracle_drop_db(cfg, eng, ident): - with eng.connect() as conn: - # cx_Oracle seems to occasionally leak open connections when a large - # suite it run, even if we confirm we have zero references to - # connection objects. - # while there is a "kill session" command in Oracle, - # it unfortunately does not release the connection sufficiently. - _ora_drop_ignore(conn, ident) - _ora_drop_ignore(conn, "%s_ts1" % ident) - _ora_drop_ignore(conn, "%s_ts2" % ident) - - -def reap_oracle_dbs(idents_file): - log.info("Reaping Oracle dbs...") - - urls = collections.defaultdict(list) - with open(idents_file) as file_: - for line in file_: - line = line.strip() - db_name, db_url = line.split(" ") - urls[db_url].append(db_name) - - for url in urls: - if not url.startswith("oracle"): - continue - idents = urls[url] - log.info("db reaper connecting to %r", url) - eng = create_engine(url) - with eng.connect() as conn: - - log.info("identifiers in file: %s", ", ".join(idents)) - - to_reap = conn.execute( - "select u.username from all_users u where username " - "like 'TEST_%' and not exists (select username " - "from v$session where username=u.username)" - ) - all_names = set(username.lower() for (username,) in to_reap) - to_drop = set() - for name in all_names: - if name.endswith("_ts1") or name.endswith("_ts2"): - continue - elif name in idents: - to_drop.add(name) - if "%s_ts1" % name in all_names: - to_drop.add("%s_ts1" % name) - if "%s_ts2" % name in all_names: - to_drop.add("%s_ts2" % name) - - dropped = total = 0 - for total, username in enumerate(to_drop, 1): - if _ora_drop_ignore(conn, username): - dropped += 1 - log.info( - "Dropped %d out of %d stale databases detected", dropped, total - ) - - -@_follower_url_from_main.for_db("oracle") -def _oracle_follower_url_from_main(url, ident): - url = sa_url.make_url(url) - url.username = ident - url.password = "xe" - return url diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/requirements.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/requirements.py deleted file mode 100644 index 24f8667..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/requirements.py +++ /dev/null @@ -1,171 +0,0 @@ -from alembic import util -from alembic.util import sqla_compat -from . import exclusions - -if util.sqla_094: - from sqlalchemy.testing.requirements import Requirements -else: - - class Requirements(object): - pass - - -class SuiteRequirements(Requirements): - @property - def schemas(self): - """Target database must support external schemas, and have one - named 'test_schema'.""" - - return exclusions.open() - - @property - def unique_constraint_reflection(self): - def doesnt_have_check_uq_constraints(config): - from sqlalchemy import inspect - - # temporary - if config.db.name == "oracle": - return True - - insp = inspect(config.db) - try: - insp.get_unique_constraints("x") - except NotImplementedError: - return True - except TypeError: - return True - except Exception: - pass - return False - - return exclusions.skip_if(doesnt_have_check_uq_constraints) - - @property - def foreign_key_match(self): - return exclusions.open() - - @property - def check_constraints_w_enforcement(self): - """Target database must support check constraints - and also enforce them.""" - - return exclusions.open() - - @property - def reflects_pk_names(self): - return exclusions.closed() - - @property - def reflects_fk_options(self): - return exclusions.closed() - - @property - def fail_before_sqla_100(self): - return exclusions.fails_if( - lambda config: not util.sqla_100, - "SQLAlchemy 1.0.0 or greater required", - ) - - @property - def fail_before_sqla_1010(self): - return exclusions.fails_if( - lambda config: not util.sqla_1010, - "SQLAlchemy 1.0.10 or greater required", - ) - - @property - def fail_before_sqla_099(self): - return exclusions.fails_if( - lambda config: not util.sqla_099, - "SQLAlchemy 0.9.9 or greater required", - ) - - @property - def fail_before_sqla_110(self): - return exclusions.fails_if( - lambda config: not util.sqla_110, - "SQLAlchemy 1.1.0 or greater required", - ) - - @property - def sqlalchemy_092(self): - return exclusions.skip_if( - lambda config: not util.sqla_092, - "SQLAlchemy 0.9.2 or greater required", - ) - - @property - def sqlalchemy_094(self): - return exclusions.skip_if( - lambda config: not util.sqla_094, - "SQLAlchemy 0.9.4 or greater required", - ) - - @property - def sqlalchemy_099(self): - return exclusions.skip_if( - lambda config: not util.sqla_099, - "SQLAlchemy 0.9.9 or greater required", - ) - - @property - def sqlalchemy_100(self): - return exclusions.skip_if( - lambda config: not util.sqla_100, - "SQLAlchemy 1.0.0 or greater required", - ) - - @property - def sqlalchemy_1014(self): - return exclusions.skip_if( - lambda config: not util.sqla_1014, - "SQLAlchemy 1.0.14 or greater required", - ) - - @property - def sqlalchemy_1115(self): - return exclusions.skip_if( - lambda config: not util.sqla_1115, - "SQLAlchemy 1.1.15 or greater required", - ) - - @property - def sqlalchemy_110(self): - return exclusions.skip_if( - lambda config: not util.sqla_110, - "SQLAlchemy 1.1.0 or greater required", - ) - - @property - def sqlalchemy_issue_4436(self): - def check(config): - vers = sqla_compat._vers - - if vers == (1, 3, 0, 'b1'): - return True - elif vers >= (1, 2, 16): - return False - else: - return True - - return exclusions.skip_if( - check, - "SQLAlchemy 1.2.16, 1.3.0b2 or greater required", - ) - - @property - def pep3147(self): - - return exclusions.only_if(lambda config: util.compat.has_pep3147()) - - @property - def comments(self): - return exclusions.only_if( - lambda config: sqla_compat._dialect_supports_comments( - config.db.dialect - ) - ) - - @property - def comments_api(self): - return exclusions.only_if(lambda config: util.sqla_120) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/runner.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/runner.py deleted file mode 100644 index da5e0f4..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/runner.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python -# testing/runner.py -# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -""" -Nose test runner module. - -This script is a front-end to "nosetests" which -installs SQLAlchemy's testing plugin into the local environment. - -The script is intended to be used by third-party dialects and extensions -that run within SQLAlchemy's testing framework. The runner can -be invoked via:: - - python -m alembic.testing.runner - -The script is then essentially the same as the "nosetests" script, including -all of the usual Nose options. The test environment requires that a -setup.cfg is locally present including various required options. - -Note that when using this runner, Nose's "coverage" plugin will not be -able to provide coverage for SQLAlchemy itself, since SQLAlchemy is -imported into sys.modules before coverage is started. The special -script sqla_nose.py is provided as a top-level script which loads the -plugin in a special (somewhat hacky) way so that coverage against -SQLAlchemy itself is possible. - -""" -import nose - -from .plugin.noseplugin import NoseSQLAlchemy - - -def main(): - nose.main(addplugins=[NoseSQLAlchemy()]) - - -def setup_py_test(): - """Runner to use for the 'test_suite' entry of your setup.py. - - Prevents any name clash shenanigans from the command line - argument "test" that the "setup.py test" command sends - to nose. - - """ - nose.main(addplugins=[NoseSQLAlchemy()], argv=["runner"]) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/util.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/util.py deleted file mode 100644 index b2b3476..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/util.py +++ /dev/null @@ -1,19 +0,0 @@ -from sqlalchemy.util import decorator - - -@decorator -def provide_metadata(fn, *args, **kw): - """Provide bound MetaData for a single test, dropping afterwards.""" - - from . import config - from sqlalchemy import schema - - metadata = schema.MetaData(config.db) - self = args[0] - prev_meta = getattr(self, "metadata", None) - self.metadata = metadata - try: - return fn(*args, **kw) - finally: - metadata.drop_all() - self.metadata = prev_meta diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/warnings.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/warnings.py deleted file mode 100644 index 27ba706..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/testing/warnings.py +++ /dev/null @@ -1,45 +0,0 @@ -# testing/warnings.py -# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; - this should be removable when Alembic targets SQLAlchemy 0.9.4. -""" - -from __future__ import absolute_import - -import re -import warnings - -from sqlalchemy import exc as sa_exc - - -def setup_filters(): - """Set global warning behavior for the test suite.""" - warnings.filterwarnings( - "ignore", category=sa_exc.SAPendingDeprecationWarning - ) - warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning) - warnings.filterwarnings("error", category=sa_exc.SAWarning) - warnings.filterwarnings("error", category=DeprecationWarning) - - -def assert_warnings(fn, warning_msgs, regex=False): - """Assert that each of the given warnings are emitted by fn.""" - - from .assertions import eq_ - - with warnings.catch_warnings(record=True) as log: - # ensure that nothing is going into __warningregistry__ - warnings.filterwarnings("always") - - result = fn() - for warning in log: - popwarn = warning_msgs.pop(0) - if regex: - assert re.match(popwarn, str(warning.message)) - else: - eq_(popwarn, str(warning.message)) - return result diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/util/__init__.py deleted file mode 100644 index 88b7431..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -from .exc import CommandError -from .langhelpers import _with_legacy_names # noqa -from .langhelpers import asbool # noqa -from .langhelpers import dedupe_tuple # noqa -from .langhelpers import Dispatcher # noqa -from .langhelpers import immutabledict # noqa -from .langhelpers import memoized_property # noqa -from .langhelpers import ModuleClsProxy # noqa -from .langhelpers import rev_id # noqa -from .langhelpers import to_list # noqa -from .langhelpers import to_tuple # noqa -from .messaging import err # noqa -from .messaging import format_as_comma # noqa -from .messaging import msg # noqa -from .messaging import obfuscate_url_pw # noqa -from .messaging import status # noqa -from .messaging import warn # noqa -from .messaging import write_outstream # noqa -from .pyfiles import coerce_resource_to_filename # noqa -from .pyfiles import edit # noqa -from .pyfiles import load_python_file # noqa -from .pyfiles import pyc_file_from_path # noqa -from .pyfiles import template_to_file # noqa -from .sqla_compat import sqla_09 # noqa -from .sqla_compat import sqla_092 # noqa -from .sqla_compat import sqla_094 # noqa -from .sqla_compat import sqla_099 # noqa -from .sqla_compat import sqla_100 # noqa -from .sqla_compat import sqla_1010 # noqa -from .sqla_compat import sqla_1014 # noqa -from .sqla_compat import sqla_105 # noqa -from .sqla_compat import sqla_110 # noqa -from .sqla_compat import sqla_1115 # noqa -from .sqla_compat import sqla_120 # noqa -from .sqla_compat import sqla_1216 # noqa - - -if not sqla_09: - raise CommandError("SQLAlchemy 0.9.0 or greater is required. ") diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/compat.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/util/compat.py deleted file mode 100644 index 994cbc9..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/compat.py +++ /dev/null @@ -1,337 +0,0 @@ -import io -import sys - -if sys.version_info < (2, 7): - raise NotImplementedError("Python 2.7 or greater is required.") - -py27 = sys.version_info >= (2, 7) -py2k = sys.version_info.major < 3 -py3k = sys.version_info.major >= 3 -py33 = sys.version_info >= (3, 3) -py35 = sys.version_info >= (3, 5) -py36 = sys.version_info >= (3, 6) - -if py3k: - from io import StringIO -else: - # accepts strings - from StringIO import StringIO # noqa - -if py3k: - import builtins as compat_builtins - - string_types = (str,) - binary_type = bytes - text_type = str - - def callable(fn): # noqa - return hasattr(fn, "__call__") - - def u(s): - return s - - def ue(s): - return s - - range = range # noqa -else: - import __builtin__ as compat_builtins - - string_types = (basestring,) # noqa - binary_type = str - text_type = unicode # noqa - callable = callable # noqa - - def u(s): - return unicode(s, "utf-8") # noqa - - def ue(s): - return unicode(s, "unicode_escape") # noqa - - range = xrange # noqa - -if py33: - import collections.abc as collections_abc -else: - import collections as collections_abc # noqa - -if py3k: - import collections - - ArgSpec = collections.namedtuple( - "ArgSpec", ["args", "varargs", "keywords", "defaults"] - ) - - from inspect import getfullargspec as inspect_getfullargspec - - def inspect_getargspec(func): - return ArgSpec(*inspect_getfullargspec(func)[0:4]) - - -else: - from inspect import getargspec as inspect_getargspec # noqa - -if py35: - from inspect import formatannotation - - def inspect_formatargspec( - args, - varargs=None, - varkw=None, - defaults=None, - kwonlyargs=(), - kwonlydefaults={}, - annotations={}, - formatarg=str, - formatvarargs=lambda name: "*" + name, - formatvarkw=lambda name: "**" + name, - formatvalue=lambda value: "=" + repr(value), - formatreturns=lambda text: " -> " + text, - formatannotation=formatannotation, - ): - """Copy formatargspec from python 3.7 standard library. - - Python 3 has deprecated formatargspec and requested that Signature - be used instead, however this requires a full reimplementation - of formatargspec() in terms of creating Parameter objects and such. - Instead of introducing all the object-creation overhead and having - to reinvent from scratch, just copy their compatibility routine. - - """ - - def formatargandannotation(arg): - result = formatarg(arg) - if arg in annotations: - result += ": " + formatannotation(annotations[arg]) - return result - - specs = [] - if defaults: - firstdefault = len(args) - len(defaults) - for i, arg in enumerate(args): - spec = formatargandannotation(arg) - if defaults and i >= firstdefault: - spec = spec + formatvalue(defaults[i - firstdefault]) - specs.append(spec) - if varargs is not None: - specs.append(formatvarargs(formatargandannotation(varargs))) - else: - if kwonlyargs: - specs.append("*") - if kwonlyargs: - for kwonlyarg in kwonlyargs: - spec = formatargandannotation(kwonlyarg) - if kwonlydefaults and kwonlyarg in kwonlydefaults: - spec += formatvalue(kwonlydefaults[kwonlyarg]) - specs.append(spec) - if varkw is not None: - specs.append(formatvarkw(formatargandannotation(varkw))) - result = "(" + ", ".join(specs) + ")" - if "return" in annotations: - result += formatreturns(formatannotation(annotations["return"])) - return result - - -else: - from inspect import formatargspec as inspect_formatargspec # noqa - - -if py3k: - from configparser import ConfigParser as SafeConfigParser - import configparser -else: - from ConfigParser import SafeConfigParser # noqa - import ConfigParser as configparser # noqa - -if py2k: - from mako.util import parse_encoding - -if py35: - import importlib.util - import importlib.machinery - - def load_module_py(module_id, path): - spec = importlib.util.spec_from_file_location(module_id, path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module - - def load_module_pyc(module_id, path): - spec = importlib.util.spec_from_file_location(module_id, path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module - - -elif py3k: - import importlib.machinery - - def load_module_py(module_id, path): - module = importlib.machinery.SourceFileLoader( - module_id, path - ).load_module(module_id) - del sys.modules[module_id] - return module - - def load_module_pyc(module_id, path): - module = importlib.machinery.SourcelessFileLoader( - module_id, path - ).load_module(module_id) - del sys.modules[module_id] - return module - - -if py3k: - - def get_bytecode_suffixes(): - try: - return importlib.machinery.BYTECODE_SUFFIXES - except AttributeError: - return importlib.machinery.DEBUG_BYTECODE_SUFFIXES - - def get_current_bytecode_suffixes(): - if py35: - suffixes = importlib.machinery.BYTECODE_SUFFIXES - else: - if sys.flags.optimize: - suffixes = importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES - else: - suffixes = importlib.machinery.BYTECODE_SUFFIXES - - return suffixes - - def has_pep3147(): - # http://www.python.org/dev/peps/pep-3147/#detecting-pep-3147-availability - - import imp - - return hasattr(imp, "get_tag") - - -else: - import imp - - def load_module_py(module_id, path): # noqa - with open(path, "rb") as fp: - mod = imp.load_source(module_id, path, fp) - if py2k: - source_encoding = parse_encoding(fp) - if source_encoding: - mod._alembic_source_encoding = source_encoding - del sys.modules[module_id] - return mod - - def load_module_pyc(module_id, path): # noqa - with open(path, "rb") as fp: - mod = imp.load_compiled(module_id, path, fp) - # no source encoding here - del sys.modules[module_id] - return mod - - def get_current_bytecode_suffixes(): - if sys.flags.optimize: - return [".pyo"] # e.g. .pyo - else: - return [".pyc"] # e.g. .pyc - - def has_pep3147(): - return False - - -try: - exec_ = getattr(compat_builtins, "exec") -except AttributeError: - # Python 2 - def exec_(func_text, globals_, lcl): - exec("exec func_text in globals_, lcl") - - -################################################ -# cross-compatible metaclass implementation -# Copyright (c) 2010-2012 Benjamin Peterson - - -def with_metaclass(meta, base=object): - """Create a base class with a metaclass.""" - return meta("%sBase" % meta.__name__, (base,), {}) - - -################################################ - -if py3k: - - def reraise(tp, value, tb=None, cause=None): - if cause is not None: - value.__cause__ = cause - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - def raise_from_cause(exception, exc_info=None): - if exc_info is None: - exc_info = sys.exc_info() - exc_type, exc_value, exc_tb = exc_info - reraise(type(exception), exception, tb=exc_tb, cause=exc_value) - - -else: - exec( - "def reraise(tp, value, tb=None, cause=None):\n" - " raise tp, value, tb\n" - ) - - def raise_from_cause(exception, exc_info=None): - # not as nice as that of Py3K, but at least preserves - # the code line where the issue occurred - if exc_info is None: - exc_info = sys.exc_info() - exc_type, exc_value, exc_tb = exc_info - reraise(type(exception), exception, tb=exc_tb) - - -# produce a wrapper that allows encoded text to stream -# into a given buffer, but doesn't close it. -# not sure of a more idiomatic approach to this. -class EncodedIO(io.TextIOWrapper): - def close(self): - pass - - -if py2k: - # in Py2K, the io.* package is awkward because it does not - # easily wrap the file type (e.g. sys.stdout) and I can't - # figure out at all how to wrap StringIO.StringIO (used by nosetests) - # and also might be user specified too. So create a full - # adapter. - - class ActLikePy3kIO(object): - - """Produce an object capable of wrapping either - sys.stdout (e.g. file) *or* StringIO.StringIO(). - - """ - - def _false(self): - return False - - def _true(self): - return True - - readable = seekable = _false - writable = _true - closed = False - - def __init__(self, file_): - self.file_ = file_ - - def write(self, text): - return self.file_.write(text) - - def flush(self): - return self.file_.flush() - - class EncodedIO(EncodedIO): - def __init__(self, file_, encoding): - super(EncodedIO, self).__init__( - ActLikePy3kIO(file_), encoding=encoding - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/exc.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/util/exc.py deleted file mode 100644 index f7ad021..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/exc.py +++ /dev/null @@ -1,2 +0,0 @@ -class CommandError(Exception): - pass diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/langhelpers.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/util/langhelpers.py deleted file mode 100644 index bb9c8f5..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/langhelpers.py +++ /dev/null @@ -1,320 +0,0 @@ -import collections -import textwrap -import uuid -import warnings - -from .compat import callable -from .compat import collections_abc -from .compat import exec_ -from .compat import inspect_getargspec -from .compat import string_types -from .compat import with_metaclass - - -class _ModuleClsMeta(type): - def __setattr__(cls, key, value): - super(_ModuleClsMeta, cls).__setattr__(key, value) - cls._update_module_proxies(key) - - -class ModuleClsProxy(with_metaclass(_ModuleClsMeta)): - """Create module level proxy functions for the - methods on a given class. - - The functions will have a compatible signature - as the methods. - - """ - - _setups = collections.defaultdict(lambda: (set(), [])) - - @classmethod - def _update_module_proxies(cls, name): - attr_names, modules = cls._setups[cls] - for globals_, locals_ in modules: - cls._add_proxied_attribute(name, globals_, locals_, attr_names) - - def _install_proxy(self): - attr_names, modules = self._setups[self.__class__] - for globals_, locals_ in modules: - globals_["_proxy"] = self - for attr_name in attr_names: - globals_[attr_name] = getattr(self, attr_name) - - def _remove_proxy(self): - attr_names, modules = self._setups[self.__class__] - for globals_, locals_ in modules: - globals_["_proxy"] = None - for attr_name in attr_names: - del globals_[attr_name] - - @classmethod - def create_module_class_proxy(cls, globals_, locals_): - attr_names, modules = cls._setups[cls] - modules.append((globals_, locals_)) - cls._setup_proxy(globals_, locals_, attr_names) - - @classmethod - def _setup_proxy(cls, globals_, locals_, attr_names): - for methname in dir(cls): - cls._add_proxied_attribute(methname, globals_, locals_, attr_names) - - @classmethod - def _add_proxied_attribute(cls, methname, globals_, locals_, attr_names): - if not methname.startswith("_"): - meth = getattr(cls, methname) - if callable(meth): - locals_[methname] = cls._create_method_proxy( - methname, globals_, locals_ - ) - else: - attr_names.add(methname) - - @classmethod - def _create_method_proxy(cls, name, globals_, locals_): - fn = getattr(cls, name) - - def _name_error(name): - raise NameError( - "Can't invoke function '%s', as the proxy object has " - "not yet been " - "established for the Alembic '%s' class. " - "Try placing this code inside a callable." - % (name, cls.__name__) - ) - - globals_["_name_error"] = _name_error - - translations = getattr(fn, "_legacy_translations", []) - if translations: - spec = inspect_getargspec(fn) - if spec[0] and spec[0][0] == "self": - spec[0].pop(0) - - outer_args = inner_args = "*args, **kw" - translate_str = "args, kw = _translate(%r, %r, %r, args, kw)" % ( - fn.__name__, - tuple(spec), - translations, - ) - - def translate(fn_name, spec, translations, args, kw): - return_kw = {} - return_args = [] - - for oldname, newname in translations: - if oldname in kw: - warnings.warn( - "Argument %r is now named %r " - "for method %s()." % (oldname, newname, fn_name) - ) - return_kw[newname] = kw.pop(oldname) - return_kw.update(kw) - - args = list(args) - if spec[3]: - pos_only = spec[0][: -len(spec[3])] - else: - pos_only = spec[0] - for arg in pos_only: - if arg not in return_kw: - try: - return_args.append(args.pop(0)) - except IndexError: - raise TypeError( - "missing required positional argument: %s" - % arg - ) - return_args.extend(args) - - return return_args, return_kw - - globals_["_translate"] = translate - else: - outer_args = "*args, **kw" - inner_args = "*args, **kw" - translate_str = "" - - func_text = textwrap.dedent( - """\ - def %(name)s(%(args)s): - %(doc)r - %(translate)s - try: - p = _proxy - except NameError: - _name_error('%(name)s') - return _proxy.%(name)s(%(apply_kw)s) - e - """ - % { - "name": name, - "translate": translate_str, - "args": outer_args, - "apply_kw": inner_args, - "doc": fn.__doc__, - } - ) - lcl = {} - exec_(func_text, globals_, lcl) - return lcl[name] - - -def _with_legacy_names(translations): - def decorate(fn): - fn._legacy_translations = translations - return fn - - return decorate - - -def asbool(value): - return value is not None and value.lower() == "true" - - -def rev_id(): - return uuid.uuid4().hex[-12:] - - -def to_list(x, default=None): - if x is None: - return default - elif isinstance(x, string_types): - return [x] - elif isinstance(x, collections_abc.Iterable): - return list(x) - else: - return [x] - - -def to_tuple(x, default=None): - if x is None: - return default - elif isinstance(x, string_types): - return (x,) - elif isinstance(x, collections_abc.Iterable): - return tuple(x) - else: - return (x,) - - -def unique_list(seq, hashfunc=None): - seen = set() - seen_add = seen.add - if not hashfunc: - return [x for x in seq if x not in seen and not seen_add(x)] - else: - return [ - x - for x in seq - if hashfunc(x) not in seen and not seen_add(hashfunc(x)) - ] - - -def dedupe_tuple(tup): - return tuple(unique_list(tup)) - - -class memoized_property(object): - - """A read-only @property that is only evaluated once.""" - - def __init__(self, fget, doc=None): - self.fget = fget - self.__doc__ = doc or fget.__doc__ - self.__name__ = fget.__name__ - - def __get__(self, obj, cls): - if obj is None: - return self - obj.__dict__[self.__name__] = result = self.fget(obj) - return result - - -class immutabledict(dict): - def _immutable(self, *arg, **kw): - raise TypeError("%s object is immutable" % self.__class__.__name__) - - __delitem__ = ( - __setitem__ - ) = __setattr__ = clear = pop = popitem = setdefault = update = _immutable - - def __new__(cls, *args): - new = dict.__new__(cls) - dict.__init__(new, *args) - return new - - def __init__(self, *args): - pass - - def __reduce__(self): - return immutabledict, (dict(self),) - - def union(self, d): - if not self: - return immutabledict(d) - else: - d2 = immutabledict(self) - dict.update(d2, d) - return d2 - - def __repr__(self): - return "immutabledict(%s)" % dict.__repr__(self) - - -class Dispatcher(object): - def __init__(self, uselist=False): - self._registry = {} - self.uselist = uselist - - def dispatch_for(self, target, qualifier="default"): - def decorate(fn): - if self.uselist: - self._registry.setdefault((target, qualifier), []).append(fn) - else: - assert (target, qualifier) not in self._registry - self._registry[(target, qualifier)] = fn - return fn - - return decorate - - def dispatch(self, obj, qualifier="default"): - - if isinstance(obj, string_types): - targets = [obj] - elif isinstance(obj, type): - targets = obj.__mro__ - else: - targets = type(obj).__mro__ - - for spcls in targets: - if qualifier != "default" and (spcls, qualifier) in self._registry: - return self._fn_or_list(self._registry[(spcls, qualifier)]) - elif (spcls, "default") in self._registry: - return self._fn_or_list(self._registry[(spcls, "default")]) - else: - raise ValueError("no dispatch function for object: %s" % obj) - - def _fn_or_list(self, fn_or_list): - if self.uselist: - - def go(*arg, **kw): - for fn in fn_or_list: - fn(*arg, **kw) - - return go - else: - return fn_or_list - - def branch(self): - """Return a copy of this dispatcher that is independently - writable.""" - - d = Dispatcher() - if self.uselist: - d._registry.update( - (k, [fn for fn in self._registry[k]]) for k in self._registry - ) - else: - d._registry.update(self._registry) - return d diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/messaging.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/util/messaging.py deleted file mode 100644 index 1e72c19..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/messaging.py +++ /dev/null @@ -1,98 +0,0 @@ -import logging -import sys -import textwrap -import warnings - -from sqlalchemy.engine import url - -from .compat import binary_type -from .compat import collections_abc -from .compat import py27 -from .compat import string_types - -log = logging.getLogger(__name__) - -if py27: - # disable "no handler found" errors - logging.getLogger("alembic").addHandler(logging.NullHandler()) - - -try: - import fcntl - import termios - import struct - - ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0)) - _h, TERMWIDTH, _hp, _wp = struct.unpack("HHHH", ioctl) - if TERMWIDTH <= 0: # can occur if running in emacs pseudo-tty - TERMWIDTH = None -except (ImportError, IOError): - TERMWIDTH = None - - -def write_outstream(stream, *text): - encoding = getattr(stream, "encoding", "ascii") or "ascii" - for t in text: - if not isinstance(t, binary_type): - t = t.encode(encoding, "replace") - t = t.decode(encoding) - try: - stream.write(t) - except IOError: - # suppress "broken pipe" errors. - # no known way to handle this on Python 3 however - # as the exception is "ignored" (noisily) in TextIOWrapper. - break - - -def status(_statmsg, fn, *arg, **kw): - msg(_statmsg + " ...", False) - try: - ret = fn(*arg, **kw) - write_outstream(sys.stdout, " done\n") - return ret - except: - write_outstream(sys.stdout, " FAILED\n") - raise - - -def err(message): - log.error(message) - msg("FAILED: %s" % message) - sys.exit(-1) - - -def obfuscate_url_pw(u): - u = url.make_url(u) - if u.password: - u.password = "XXXXX" - return str(u) - - -def warn(msg): - warnings.warn(msg) - - -def msg(msg, newline=True): - if TERMWIDTH is None: - write_outstream(sys.stdout, msg) - if newline: - write_outstream(sys.stdout, "\n") - else: - # left indent output lines - lines = textwrap.wrap(msg, TERMWIDTH) - if len(lines) > 1: - for line in lines[0:-1]: - write_outstream(sys.stdout, " ", line, "\n") - write_outstream(sys.stdout, " ", lines[-1], ("\n" if newline else "")) - - -def format_as_comma(value): - if value is None: - return "" - elif isinstance(value, string_types): - return value - elif isinstance(value, collections_abc.Iterable): - return ", ".join(value) - else: - raise ValueError("Don't know how to comma-format %r" % value) diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/pyfiles.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/util/pyfiles.py deleted file mode 100644 index 6e9b542..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/pyfiles.py +++ /dev/null @@ -1,99 +0,0 @@ -import os -import re -import tempfile - -from mako import exceptions -from mako.template import Template - -from .compat import get_current_bytecode_suffixes -from .compat import has_pep3147 -from .compat import load_module_py -from .compat import load_module_pyc -from .exc import CommandError - - -def template_to_file(template_file, dest, output_encoding, **kw): - template = Template(filename=template_file) - try: - output = template.render_unicode(**kw).encode(output_encoding) - except: - with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as ntf: - ntf.write( - exceptions.text_error_template() - .render_unicode() - .encode(output_encoding) - ) - fname = ntf.name - raise CommandError( - "Template rendering failed; see %s for a " - "template-oriented traceback." % fname - ) - else: - with open(dest, "wb") as f: - f.write(output) - - -def coerce_resource_to_filename(fname): - """Interpret a filename as either a filesystem location or as a package - resource. - - Names that are non absolute paths and contain a colon - are interpreted as resources and coerced to a file location. - - """ - if not os.path.isabs(fname) and ":" in fname: - import pkg_resources - - fname = pkg_resources.resource_filename(*fname.split(":")) - return fname - - -def pyc_file_from_path(path): - """Given a python source path, locate the .pyc. - - """ - - if has_pep3147(): - import imp - - candidate = imp.cache_from_source(path) - if os.path.exists(candidate): - return candidate - - filepath, ext = os.path.splitext(path) - for ext in get_current_bytecode_suffixes(): - if os.path.exists(filepath + ext): - return filepath + ext - else: - return None - - -def edit(path): - """Given a source path, run the EDITOR for it""" - - import editor - - try: - editor.edit(path) - except Exception as exc: - raise CommandError("Error executing editor (%s)" % (exc,)) - - -def load_python_file(dir_, filename): - """Load a file from the given path as a Python module.""" - - module_id = re.sub(r"\W", "_", filename) - path = os.path.join(dir_, filename) - _, ext = os.path.splitext(filename) - if ext == ".py": - if os.path.exists(path): - module = load_module_py(module_id, path) - else: - pyc_path = pyc_file_from_path(path) - if pyc_path is None: - raise ImportError("Can't find Python file %s" % path) - else: - module = load_module_pyc(module_id, pyc_path) - elif ext in (".pyc", ".pyo"): - module = load_module_pyc(module_id, path) - return module diff --git a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/sqla_compat.py b/flo-token-explorer/lib/python3.6/site-packages/alembic/util/sqla_compat.py deleted file mode 100644 index 82250d0..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/alembic/util/sqla_compat.py +++ /dev/null @@ -1,243 +0,0 @@ -import re - -from sqlalchemy import __version__ -from sqlalchemy import schema -from sqlalchemy import sql -from sqlalchemy import types as sqltypes -from sqlalchemy.ext.compiler import compiles -from sqlalchemy.schema import CheckConstraint -from sqlalchemy.schema import Column -from sqlalchemy.schema import ForeignKeyConstraint -from sqlalchemy.sql.expression import _BindParamClause -from sqlalchemy.sql.expression import _TextClause as TextClause -from sqlalchemy.sql.visitors import traverse - -from . import compat - - -def _safe_int(value): - try: - return int(value) - except: - return value - - -_vers = tuple( - [_safe_int(x) for x in re.findall(r"(\d+|[abc]\d)", __version__)] -) -sqla_09 = _vers >= (0, 9, 0) -sqla_092 = _vers >= (0, 9, 2) -sqla_094 = _vers >= (0, 9, 4) -sqla_094 = _vers >= (0, 9, 4) -sqla_099 = _vers >= (0, 9, 9) -sqla_100 = _vers >= (1, 0, 0) -sqla_105 = _vers >= (1, 0, 5) -sqla_1010 = _vers >= (1, 0, 10) -sqla_110 = _vers >= (1, 1, 0) -sqla_1014 = _vers >= (1, 0, 14) -sqla_1115 = _vers >= (1, 1, 15) -sqla_120 = _vers >= (1, 2, 0) -sqla_1216 = _vers >= (1, 2, 16) - - -if sqla_110: - AUTOINCREMENT_DEFAULT = "auto" -else: - AUTOINCREMENT_DEFAULT = True - - -def _table_for_constraint(constraint): - if isinstance(constraint, ForeignKeyConstraint): - return constraint.parent - else: - return constraint.table - - -def _columns_for_constraint(constraint): - if isinstance(constraint, ForeignKeyConstraint): - return [fk.parent for fk in constraint.elements] - elif isinstance(constraint, CheckConstraint): - return _find_columns(constraint.sqltext) - else: - return list(constraint.columns) - - -def _fk_spec(constraint): - if sqla_100: - source_columns = [ - constraint.columns[key].name for key in constraint.column_keys - ] - else: - source_columns = [ - element.parent.name for element in constraint.elements - ] - - source_table = constraint.parent.name - source_schema = constraint.parent.schema - target_schema = constraint.elements[0].column.table.schema - target_table = constraint.elements[0].column.table.name - target_columns = [element.column.name for element in constraint.elements] - ondelete = constraint.ondelete - onupdate = constraint.onupdate - deferrable = constraint.deferrable - initially = constraint.initially - return ( - source_schema, - source_table, - source_columns, - target_schema, - target_table, - target_columns, - onupdate, - ondelete, - deferrable, - initially, - ) - - -def _fk_is_self_referential(constraint): - spec = constraint.elements[0]._get_colspec() - tokens = spec.split(".") - tokens.pop(-1) # colname - tablekey = ".".join(tokens) - return tablekey == constraint.parent.key - - -def _is_type_bound(constraint): - # this deals with SQLAlchemy #3260, don't copy CHECK constraints - # that will be generated by the type. - if sqla_100: - # new feature added for #3260 - return constraint._type_bound - else: - # old way, look at what we know Boolean/Enum to use - return constraint._create_rule is not None and isinstance( - getattr(constraint._create_rule, "target", None), - sqltypes.SchemaType, - ) - - -def _find_columns(clause): - """locate Column objects within the given expression.""" - - cols = set() - traverse(clause, {}, {"column": cols.add}) - return cols - - -def _remove_column_from_collection(collection, column): - """remove a column from a ColumnCollection.""" - - # workaround for older SQLAlchemy, remove the - # same object that's present - to_remove = collection[column.key] - collection.remove(to_remove) - - -def _textual_index_column(table, text_): - """a workaround for the Index construct's severe lack of flexibility""" - if isinstance(text_, compat.string_types): - c = Column(text_, sqltypes.NULLTYPE) - table.append_column(c) - return c - elif isinstance(text_, TextClause): - return _textual_index_element(table, text_) - else: - raise ValueError("String or text() construct expected") - - -class _textual_index_element(sql.ColumnElement): - """Wrap around a sqlalchemy text() construct in such a way that - we appear like a column-oriented SQL expression to an Index - construct. - - The issue here is that currently the Postgresql dialect, the biggest - recipient of functional indexes, keys all the index expressions to - the corresponding column expressions when rendering CREATE INDEX, - so the Index we create here needs to have a .columns collection that - is the same length as the .expressions collection. Ultimately - SQLAlchemy should support text() expressions in indexes. - - See SQLAlchemy issue 3174. - - """ - - __visit_name__ = "_textual_idx_element" - - def __init__(self, table, text): - self.table = table - self.text = text - self.key = text.text - self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE) - table.append_column(self.fake_column) - - def get_children(self): - return [self.fake_column] - - -@compiles(_textual_index_element) -def _render_textual_index_column(element, compiler, **kw): - return compiler.process(element.text, **kw) - - -class _literal_bindparam(_BindParamClause): - pass - - -@compiles(_literal_bindparam) -def _render_literal_bindparam(element, compiler, **kw): - return compiler.render_literal_bindparam(element, **kw) - - -def _get_index_expressions(idx): - return list(idx.expressions) - - -def _get_index_column_names(idx): - return [getattr(exp, "name", None) for exp in _get_index_expressions(idx)] - - -def _get_index_final_name(dialect, idx): - # trying to keep the truncation rules totally localized on the - # SQLA side while also stepping around the quoting issue. Ideally - # the _prepared_index_name() method on the SQLA side would have - # a quoting option or the truncation routine would be broken out. - # - # test for SQLA quoted_name construct, introduced in - # 0.9 or thereabouts. - # this doesn't work in 0.8 and the "quote" option on Index doesn't - # seem to work in 0.8 either. - if hasattr(idx.name, "quote"): - # might be quoted_name, might be truncated_name, keep it the - # same - quoted_name_cls = type(idx.name) - new_name = quoted_name_cls(str(idx.name), quote=False) - idx = schema.Index(name=new_name) - return dialect.ddl_compiler(dialect, None)._prepared_index_name(idx) - - -def _dialect_supports_comments(dialect): - if sqla_120: - return dialect.supports_comments - else: - return False - - -def _comment_attribute(obj): - """return the .comment attribute from a Table or Column""" - - if sqla_120: - return obj.comment - else: - return None - - -def _is_mariadb(mysql_dialect): - return "MariaDB" in mysql_dialect.server_version_info - - -def _mariadb_normalized_version_info(mysql_dialect): - if len(mysql_dialect.server_version_info) > 5: - return mysql_dialect.server_version_info[3:] - else: - return mysql_dialect.server_version_info diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/click/__init__.py deleted file mode 100644 index d3c3366..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -""" -click -~~~~~ - -Click is a simple Python module inspired by the stdlib optparse to make -writing command line scripts fun. Unlike other modules, it's based -around a simple API that does not come with too much magic and is -composable. - -:copyright: © 2014 by the Pallets team. -:license: BSD, see LICENSE.rst for more details. -""" - -# Core classes -from .core import Context, BaseCommand, Command, MultiCommand, Group, \ - CommandCollection, Parameter, Option, Argument - -# Globals -from .globals import get_current_context - -# Decorators -from .decorators import pass_context, pass_obj, make_pass_decorator, \ - command, group, argument, option, confirmation_option, \ - password_option, version_option, help_option - -# Types -from .types import ParamType, File, Path, Choice, IntRange, Tuple, \ - DateTime, STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED, FloatRange - -# Utilities -from .utils import echo, get_binary_stream, get_text_stream, open_file, \ - format_filename, get_app_dir, get_os_args - -# Terminal functions -from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \ - progressbar, clear, style, unstyle, secho, edit, launch, getchar, \ - pause - -# Exceptions -from .exceptions import ClickException, UsageError, BadParameter, \ - FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \ - MissingParameter - -# Formatting -from .formatting import HelpFormatter, wrap_text - -# Parsing -from .parser import OptionParser - - -__all__ = [ - # Core classes - 'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group', - 'CommandCollection', 'Parameter', 'Option', 'Argument', - - # Globals - 'get_current_context', - - # Decorators - 'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group', - 'argument', 'option', 'confirmation_option', 'password_option', - 'version_option', 'help_option', - - # Types - 'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', - 'DateTime', 'STRING', 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED', - 'FloatRange', - - # Utilities - 'echo', 'get_binary_stream', 'get_text_stream', 'open_file', - 'format_filename', 'get_app_dir', 'get_os_args', - - # Terminal functions - 'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager', - 'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch', - 'getchar', 'pause', - - # Exceptions - 'ClickException', 'UsageError', 'BadParameter', 'FileError', - 'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage', - 'MissingParameter', - - # Formatting - 'HelpFormatter', 'wrap_text', - - # Parsing - 'OptionParser', -] - - -# Controls if click should emit the warning about the use of unicode -# literals. -disable_unicode_literals_warning = False - - -__version__ = '7.0' diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/_bashcomplete.py b/flo-token-explorer/lib/python3.6/site-packages/click/_bashcomplete.py deleted file mode 100644 index a5f1084..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/_bashcomplete.py +++ /dev/null @@ -1,293 +0,0 @@ -import copy -import os -import re - -from .utils import echo -from .parser import split_arg_string -from .core import MultiCommand, Option, Argument -from .types import Choice - -try: - from collections import abc -except ImportError: - import collections as abc - -WORDBREAK = '=' - -# Note, only BASH version 4.4 and later have the nosort option. -COMPLETION_SCRIPT_BASH = ''' -%(complete_func)s() { - local IFS=$'\n' - COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ - COMP_CWORD=$COMP_CWORD \\ - %(autocomplete_var)s=complete $1 ) ) - return 0 -} - -%(complete_func)setup() { - local COMPLETION_OPTIONS="" - local BASH_VERSION_ARR=(${BASH_VERSION//./ }) - # Only BASH version 4.4 and later have the nosort option. - if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then - COMPLETION_OPTIONS="-o nosort" - fi - - complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s -} - -%(complete_func)setup -''' - -COMPLETION_SCRIPT_ZSH = ''' -%(complete_func)s() { - local -a completions - local -a completions_with_descriptions - local -a response - response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\ - COMP_CWORD=$((CURRENT-1)) \\ - %(autocomplete_var)s=\"complete_zsh\" \\ - %(script_names)s )}") - - for key descr in ${(kv)response}; do - if [[ "$descr" == "_" ]]; then - completions+=("$key") - else - completions_with_descriptions+=("$key":"$descr") - fi - done - - if [ -n "$completions_with_descriptions" ]; then - _describe -V unsorted completions_with_descriptions -U -Q - fi - - if [ -n "$completions" ]; then - compadd -U -V unsorted -Q -a completions - fi - compstate[insert]="automenu" -} - -compdef %(complete_func)s %(script_names)s -''' - -_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]') - - -def get_completion_script(prog_name, complete_var, shell): - cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_')) - script = COMPLETION_SCRIPT_ZSH if shell == 'zsh' else COMPLETION_SCRIPT_BASH - return (script % { - 'complete_func': '_%s_completion' % cf_name, - 'script_names': prog_name, - 'autocomplete_var': complete_var, - }).strip() + ';' - - -def resolve_ctx(cli, prog_name, args): - """ - Parse into a hierarchy of contexts. Contexts are connected through the parent variable. - :param cli: command definition - :param prog_name: the program that is running - :param args: full list of args - :return: the final context/command parsed - """ - ctx = cli.make_context(prog_name, args, resilient_parsing=True) - args = ctx.protected_args + ctx.args - while args: - if isinstance(ctx.command, MultiCommand): - if not ctx.command.chain: - cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) - if cmd is None: - return ctx - ctx = cmd.make_context(cmd_name, args, parent=ctx, - resilient_parsing=True) - args = ctx.protected_args + ctx.args - else: - # Walk chained subcommand contexts saving the last one. - while args: - cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) - if cmd is None: - return ctx - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False, - resilient_parsing=True) - args = sub_ctx.args - ctx = sub_ctx - args = sub_ctx.protected_args + sub_ctx.args - else: - break - return ctx - - -def start_of_option(param_str): - """ - :param param_str: param_str to check - :return: whether or not this is the start of an option declaration (i.e. starts "-" or "--") - """ - return param_str and param_str[:1] == '-' - - -def is_incomplete_option(all_args, cmd_param): - """ - :param all_args: the full original list of args supplied - :param cmd_param: the current command paramter - :return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and - corresponds to this cmd_param. In other words whether this cmd_param option can still accept - values - """ - if not isinstance(cmd_param, Option): - return False - if cmd_param.is_flag: - return False - last_option = None - for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])): - if index + 1 > cmd_param.nargs: - break - if start_of_option(arg_str): - last_option = arg_str - - return True if last_option and last_option in cmd_param.opts else False - - -def is_incomplete_argument(current_params, cmd_param): - """ - :param current_params: the current params and values for this argument as already entered - :param cmd_param: the current command parameter - :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In - other words whether or not the this cmd_param argument can still accept values - """ - if not isinstance(cmd_param, Argument): - return False - current_param_values = current_params[cmd_param.name] - if current_param_values is None: - return True - if cmd_param.nargs == -1: - return True - if isinstance(current_param_values, abc.Iterable) \ - and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs: - return True - return False - - -def get_user_autocompletions(ctx, args, incomplete, cmd_param): - """ - :param ctx: context associated with the parsed command - :param args: full list of args - :param incomplete: the incomplete text to autocomplete - :param cmd_param: command definition - :return: all the possible user-specified completions for the param - """ - results = [] - if isinstance(cmd_param.type, Choice): - # Choices don't support descriptions. - results = [(c, None) - for c in cmd_param.type.choices if str(c).startswith(incomplete)] - elif cmd_param.autocompletion is not None: - dynamic_completions = cmd_param.autocompletion(ctx=ctx, - args=args, - incomplete=incomplete) - results = [c if isinstance(c, tuple) else (c, None) - for c in dynamic_completions] - return results - - -def get_visible_commands_starting_with(ctx, starts_with): - """ - :param ctx: context associated with the parsed command - :starts_with: string that visible commands must start with. - :return: all visible (not hidden) commands that start with starts_with. - """ - for c in ctx.command.list_commands(ctx): - if c.startswith(starts_with): - command = ctx.command.get_command(ctx, c) - if not command.hidden: - yield command - - -def add_subcommand_completions(ctx, incomplete, completions_out): - # Add subcommand completions. - if isinstance(ctx.command, MultiCommand): - completions_out.extend( - [(c.name, c.get_short_help_str()) for c in get_visible_commands_starting_with(ctx, incomplete)]) - - # Walk up the context list and add any other completion possibilities from chained commands - while ctx.parent is not None: - ctx = ctx.parent - if isinstance(ctx.command, MultiCommand) and ctx.command.chain: - remaining_commands = [c for c in get_visible_commands_starting_with(ctx, incomplete) - if c.name not in ctx.protected_args] - completions_out.extend([(c.name, c.get_short_help_str()) for c in remaining_commands]) - - -def get_choices(cli, prog_name, args, incomplete): - """ - :param cli: command definition - :param prog_name: the program that is running - :param args: full list of args - :param incomplete: the incomplete text to autocomplete - :return: all the possible completions for the incomplete - """ - all_args = copy.deepcopy(args) - - ctx = resolve_ctx(cli, prog_name, args) - if ctx is None: - return [] - - # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse - # without the '=' - if start_of_option(incomplete) and WORDBREAK in incomplete: - partition_incomplete = incomplete.partition(WORDBREAK) - all_args.append(partition_incomplete[0]) - incomplete = partition_incomplete[2] - elif incomplete == WORDBREAK: - incomplete = '' - - completions = [] - if start_of_option(incomplete): - # completions for partial options - for param in ctx.command.params: - if isinstance(param, Option) and not param.hidden: - param_opts = [param_opt for param_opt in param.opts + - param.secondary_opts if param_opt not in all_args or param.multiple] - completions.extend([(o, param.help) for o in param_opts if o.startswith(incomplete)]) - return completions - # completion for option values from user supplied values - for param in ctx.command.params: - if is_incomplete_option(all_args, param): - return get_user_autocompletions(ctx, all_args, incomplete, param) - # completion for argument values from user supplied values - for param in ctx.command.params: - if is_incomplete_argument(ctx.params, param): - return get_user_autocompletions(ctx, all_args, incomplete, param) - - add_subcommand_completions(ctx, incomplete, completions) - # Sort before returning so that proper ordering can be enforced in custom types. - return sorted(completions) - - -def do_complete(cli, prog_name, include_descriptions): - cwords = split_arg_string(os.environ['COMP_WORDS']) - cword = int(os.environ['COMP_CWORD']) - args = cwords[1:cword] - try: - incomplete = cwords[cword] - except IndexError: - incomplete = '' - - for item in get_choices(cli, prog_name, args, incomplete): - echo(item[0]) - if include_descriptions: - # ZSH has trouble dealing with empty array parameters when returned from commands, so use a well defined character '_' to indicate no description is present. - echo(item[1] if item[1] else '_') - - return True - - -def bashcomplete(cli, prog_name, complete_var, complete_instr): - if complete_instr.startswith('source'): - shell = 'zsh' if complete_instr == 'source_zsh' else 'bash' - echo(get_completion_script(prog_name, complete_var, shell)) - return True - elif complete_instr == 'complete' or complete_instr == 'complete_zsh': - return do_complete(cli, prog_name, complete_instr == 'complete_zsh') - return False diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/_compat.py b/flo-token-explorer/lib/python3.6/site-packages/click/_compat.py deleted file mode 100644 index 937e230..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/_compat.py +++ /dev/null @@ -1,703 +0,0 @@ -import re -import io -import os -import sys -import codecs -from weakref import WeakKeyDictionary - - -PY2 = sys.version_info[0] == 2 -CYGWIN = sys.platform.startswith('cygwin') -# Determine local App Engine environment, per Google's own suggestion -APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and - 'Development/' in os.environ['SERVER_SOFTWARE']) -WIN = sys.platform.startswith('win') and not APP_ENGINE -DEFAULT_COLUMNS = 80 - - -_ansi_re = re.compile(r'\033\[((?:\d|;)*)([a-zA-Z])') - - -def get_filesystem_encoding(): - return sys.getfilesystemencoding() or sys.getdefaultencoding() - - -def _make_text_stream(stream, encoding, errors, - force_readable=False, force_writable=False): - if encoding is None: - encoding = get_best_encoding(stream) - if errors is None: - errors = 'replace' - return _NonClosingTextIOWrapper(stream, encoding, errors, - line_buffering=True, - force_readable=force_readable, - force_writable=force_writable) - - -def is_ascii_encoding(encoding): - """Checks if a given encoding is ascii.""" - try: - return codecs.lookup(encoding).name == 'ascii' - except LookupError: - return False - - -def get_best_encoding(stream): - """Returns the default stream encoding if not found.""" - rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding() - if is_ascii_encoding(rv): - return 'utf-8' - return rv - - -class _NonClosingTextIOWrapper(io.TextIOWrapper): - - def __init__(self, stream, encoding, errors, - force_readable=False, force_writable=False, **extra): - self._stream = stream = _FixupStream(stream, force_readable, - force_writable) - io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra) - - # The io module is a place where the Python 3 text behavior - # was forced upon Python 2, so we need to unbreak - # it to look like Python 2. - if PY2: - def write(self, x): - if isinstance(x, str) or is_bytes(x): - try: - self.flush() - except Exception: - pass - return self.buffer.write(str(x)) - return io.TextIOWrapper.write(self, x) - - def writelines(self, lines): - for line in lines: - self.write(line) - - def __del__(self): - try: - self.detach() - except Exception: - pass - - def isatty(self): - # https://bitbucket.org/pypy/pypy/issue/1803 - return self._stream.isatty() - - -class _FixupStream(object): - """The new io interface needs more from streams than streams - traditionally implement. As such, this fix-up code is necessary in - some circumstances. - - The forcing of readable and writable flags are there because some tools - put badly patched objects on sys (one such offender are certain version - of jupyter notebook). - """ - - def __init__(self, stream, force_readable=False, force_writable=False): - self._stream = stream - self._force_readable = force_readable - self._force_writable = force_writable - - def __getattr__(self, name): - return getattr(self._stream, name) - - def read1(self, size): - f = getattr(self._stream, 'read1', None) - if f is not None: - return f(size) - # We only dispatch to readline instead of read in Python 2 as we - # do not want cause problems with the different implementation - # of line buffering. - if PY2: - return self._stream.readline(size) - return self._stream.read(size) - - def readable(self): - if self._force_readable: - return True - x = getattr(self._stream, 'readable', None) - if x is not None: - return x() - try: - self._stream.read(0) - except Exception: - return False - return True - - def writable(self): - if self._force_writable: - return True - x = getattr(self._stream, 'writable', None) - if x is not None: - return x() - try: - self._stream.write('') - except Exception: - try: - self._stream.write(b'') - except Exception: - return False - return True - - def seekable(self): - x = getattr(self._stream, 'seekable', None) - if x is not None: - return x() - try: - self._stream.seek(self._stream.tell()) - except Exception: - return False - return True - - -if PY2: - text_type = unicode - bytes = str - raw_input = raw_input - string_types = (str, unicode) - int_types = (int, long) - iteritems = lambda x: x.iteritems() - range_type = xrange - - def is_bytes(x): - return isinstance(x, (buffer, bytearray)) - - _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$') - - # For Windows, we need to force stdout/stdin/stderr to binary if it's - # fetched for that. This obviously is not the most correct way to do - # it as it changes global state. Unfortunately, there does not seem to - # be a clear better way to do it as just reopening the file in binary - # mode does not change anything. - # - # An option would be to do what Python 3 does and to open the file as - # binary only, patch it back to the system, and then use a wrapper - # stream that converts newlines. It's not quite clear what's the - # correct option here. - # - # This code also lives in _winconsole for the fallback to the console - # emulation stream. - # - # There are also Windows environments where the `msvcrt` module is not - # available (which is why we use try-catch instead of the WIN variable - # here), such as the Google App Engine development server on Windows. In - # those cases there is just nothing we can do. - def set_binary_mode(f): - return f - - try: - import msvcrt - except ImportError: - pass - else: - def set_binary_mode(f): - try: - fileno = f.fileno() - except Exception: - pass - else: - msvcrt.setmode(fileno, os.O_BINARY) - return f - - try: - import fcntl - except ImportError: - pass - else: - def set_binary_mode(f): - try: - fileno = f.fileno() - except Exception: - pass - else: - flags = fcntl.fcntl(fileno, fcntl.F_GETFL) - fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) - return f - - def isidentifier(x): - return _identifier_re.search(x) is not None - - def get_binary_stdin(): - return set_binary_mode(sys.stdin) - - def get_binary_stdout(): - _wrap_std_stream('stdout') - return set_binary_mode(sys.stdout) - - def get_binary_stderr(): - _wrap_std_stream('stderr') - return set_binary_mode(sys.stderr) - - def get_text_stdin(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stdin, encoding, errors, - force_readable=True) - - def get_text_stdout(encoding=None, errors=None): - _wrap_std_stream('stdout') - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stdout, encoding, errors, - force_writable=True) - - def get_text_stderr(encoding=None, errors=None): - _wrap_std_stream('stderr') - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stderr, encoding, errors, - force_writable=True) - - def filename_to_ui(value): - if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), 'replace') - return value -else: - import io - text_type = str - raw_input = input - string_types = (str,) - int_types = (int,) - range_type = range - isidentifier = lambda x: x.isidentifier() - iteritems = lambda x: iter(x.items()) - - def is_bytes(x): - return isinstance(x, (bytes, memoryview, bytearray)) - - def _is_binary_reader(stream, default=False): - try: - return isinstance(stream.read(0), bytes) - except Exception: - return default - # This happens in some cases where the stream was already - # closed. In this case, we assume the default. - - def _is_binary_writer(stream, default=False): - try: - stream.write(b'') - except Exception: - try: - stream.write('') - return False - except Exception: - pass - return default - return True - - def _find_binary_reader(stream): - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detaching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_reader(stream, False): - return stream - - buf = getattr(stream, 'buffer', None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_reader(buf, True): - return buf - - def _find_binary_writer(stream): - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detatching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_writer(stream, False): - return stream - - buf = getattr(stream, 'buffer', None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_writer(buf, True): - return buf - - def _stream_is_misconfigured(stream): - """A stream is misconfigured if its encoding is ASCII.""" - # If the stream does not have an encoding set, we assume it's set - # to ASCII. This appears to happen in certain unittest - # environments. It's not quite clear what the correct behavior is - # but this at least will force Click to recover somehow. - return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii') - - def _is_compatible_text_stream(stream, encoding, errors): - stream_encoding = getattr(stream, 'encoding', None) - stream_errors = getattr(stream, 'errors', None) - - # Perfect match. - if stream_encoding == encoding and stream_errors == errors: - return True - - # Otherwise, it's only a compatible stream if we did not ask for - # an encoding. - if encoding is None: - return stream_encoding is not None - - return False - - def _force_correct_text_reader(text_reader, encoding, errors, - force_readable=False): - if _is_binary_reader(text_reader, False): - binary_reader = text_reader - else: - # If there is no target encoding set, we need to verify that the - # reader is not actually misconfigured. - if encoding is None and not _stream_is_misconfigured(text_reader): - return text_reader - - if _is_compatible_text_stream(text_reader, encoding, errors): - return text_reader - - # If the reader has no encoding, we try to find the underlying - # binary reader for it. If that fails because the environment is - # misconfigured, we silently go with the same reader because this - # is too common to happen. In that case, mojibake is better than - # exceptions. - binary_reader = _find_binary_reader(text_reader) - if binary_reader is None: - return text_reader - - # At this point, we default the errors to replace instead of strict - # because nobody handles those errors anyways and at this point - # we're so fundamentally fucked that nothing can repair it. - if errors is None: - errors = 'replace' - return _make_text_stream(binary_reader, encoding, errors, - force_readable=force_readable) - - def _force_correct_text_writer(text_writer, encoding, errors, - force_writable=False): - if _is_binary_writer(text_writer, False): - binary_writer = text_writer - else: - # If there is no target encoding set, we need to verify that the - # writer is not actually misconfigured. - if encoding is None and not _stream_is_misconfigured(text_writer): - return text_writer - - if _is_compatible_text_stream(text_writer, encoding, errors): - return text_writer - - # If the writer has no encoding, we try to find the underlying - # binary writer for it. If that fails because the environment is - # misconfigured, we silently go with the same writer because this - # is too common to happen. In that case, mojibake is better than - # exceptions. - binary_writer = _find_binary_writer(text_writer) - if binary_writer is None: - return text_writer - - # At this point, we default the errors to replace instead of strict - # because nobody handles those errors anyways and at this point - # we're so fundamentally fucked that nothing can repair it. - if errors is None: - errors = 'replace' - return _make_text_stream(binary_writer, encoding, errors, - force_writable=force_writable) - - def get_binary_stdin(): - reader = _find_binary_reader(sys.stdin) - if reader is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stdin.') - return reader - - def get_binary_stdout(): - writer = _find_binary_writer(sys.stdout) - if writer is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stdout.') - return writer - - def get_binary_stderr(): - writer = _find_binary_writer(sys.stderr) - if writer is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stderr.') - return writer - - def get_text_stdin(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_reader(sys.stdin, encoding, errors, - force_readable=True) - - def get_text_stdout(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stdout, encoding, errors, - force_writable=True) - - def get_text_stderr(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stderr, encoding, errors, - force_writable=True) - - def filename_to_ui(value): - if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), 'replace') - else: - value = value.encode('utf-8', 'surrogateescape') \ - .decode('utf-8', 'replace') - return value - - -def get_streerror(e, default=None): - if hasattr(e, 'strerror'): - msg = e.strerror - else: - if default is not None: - msg = default - else: - msg = str(e) - if isinstance(msg, bytes): - msg = msg.decode('utf-8', 'replace') - return msg - - -def open_stream(filename, mode='r', encoding=None, errors='strict', - atomic=False): - # Standard streams first. These are simple because they don't need - # special handling for the atomic flag. It's entirely ignored. - if filename == '-': - if any(m in mode for m in ['w', 'a', 'x']): - if 'b' in mode: - return get_binary_stdout(), False - return get_text_stdout(encoding=encoding, errors=errors), False - if 'b' in mode: - return get_binary_stdin(), False - return get_text_stdin(encoding=encoding, errors=errors), False - - # Non-atomic writes directly go out through the regular open functions. - if not atomic: - if encoding is None: - return open(filename, mode), True - return io.open(filename, mode, encoding=encoding, errors=errors), True - - # Some usability stuff for atomic writes - if 'a' in mode: - raise ValueError( - 'Appending to an existing file is not supported, because that ' - 'would involve an expensive `copy`-operation to a temporary ' - 'file. Open the file in normal `w`-mode and copy explicitly ' - 'if that\'s what you\'re after.' - ) - if 'x' in mode: - raise ValueError('Use the `overwrite`-parameter instead.') - if 'w' not in mode: - raise ValueError('Atomic writes only make sense with `w`-mode.') - - # Atomic writes are more complicated. They work by opening a file - # as a proxy in the same folder and then using the fdopen - # functionality to wrap it in a Python file. Then we wrap it in an - # atomic file that moves the file over on close. - import tempfile - fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename), - prefix='.__atomic-write') - - if encoding is not None: - f = io.open(fd, mode, encoding=encoding, errors=errors) - else: - f = os.fdopen(fd, mode) - - return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True - - -# Used in a destructor call, needs extra protection from interpreter cleanup. -if hasattr(os, 'replace'): - _replace = os.replace - _can_replace = True -else: - _replace = os.rename - _can_replace = not WIN - - -class _AtomicFile(object): - - def __init__(self, f, tmp_filename, real_filename): - self._f = f - self._tmp_filename = tmp_filename - self._real_filename = real_filename - self.closed = False - - @property - def name(self): - return self._real_filename - - def close(self, delete=False): - if self.closed: - return - self._f.close() - if not _can_replace: - try: - os.remove(self._real_filename) - except OSError: - pass - _replace(self._tmp_filename, self._real_filename) - self.closed = True - - def __getattr__(self, name): - return getattr(self._f, name) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close(delete=exc_type is not None) - - def __repr__(self): - return repr(self._f) - - -auto_wrap_for_ansi = None -colorama = None -get_winterm_size = None - - -def strip_ansi(value): - return _ansi_re.sub('', value) - - -def should_strip_ansi(stream=None, color=None): - if color is None: - if stream is None: - stream = sys.stdin - return not isatty(stream) - return not color - - -# If we're on Windows, we provide transparent integration through -# colorama. This will make ANSI colors through the echo function -# work automatically. -if WIN: - # Windows has a smaller terminal - DEFAULT_COLUMNS = 79 - - from ._winconsole import _get_windows_console_stream, _wrap_std_stream - - def _get_argv_encoding(): - import locale - return locale.getpreferredencoding() - - if PY2: - def raw_input(prompt=''): - sys.stderr.flush() - if prompt: - stdout = _default_text_stdout() - stdout.write(prompt) - stdin = _default_text_stdin() - return stdin.readline().rstrip('\r\n') - - try: - import colorama - except ImportError: - pass - else: - _ansi_stream_wrappers = WeakKeyDictionary() - - def auto_wrap_for_ansi(stream, color=None): - """This function wraps a stream so that calls through colorama - are issued to the win32 console API to recolor on demand. It - also ensures to reset the colors if a write call is interrupted - to not destroy the console afterwards. - """ - try: - cached = _ansi_stream_wrappers.get(stream) - except Exception: - cached = None - if cached is not None: - return cached - strip = should_strip_ansi(stream, color) - ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) - rv = ansi_wrapper.stream - _write = rv.write - - def _safe_write(s): - try: - return _write(s) - except: - ansi_wrapper.reset_all() - raise - - rv.write = _safe_write - try: - _ansi_stream_wrappers[stream] = rv - except Exception: - pass - return rv - - def get_winterm_size(): - win = colorama.win32.GetConsoleScreenBufferInfo( - colorama.win32.STDOUT).srWindow - return win.Right - win.Left, win.Bottom - win.Top -else: - def _get_argv_encoding(): - return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding() - - _get_windows_console_stream = lambda *x: None - _wrap_std_stream = lambda *x: None - - -def term_len(x): - return len(strip_ansi(x)) - - -def isatty(stream): - try: - return stream.isatty() - except Exception: - return False - - -def _make_cached_stream_func(src_func, wrapper_func): - cache = WeakKeyDictionary() - def func(): - stream = src_func() - try: - rv = cache.get(stream) - except Exception: - rv = None - if rv is not None: - return rv - rv = wrapper_func() - try: - stream = src_func() # In case wrapper_func() modified the stream - cache[stream] = rv - except Exception: - pass - return rv - return func - - -_default_text_stdin = _make_cached_stream_func( - lambda: sys.stdin, get_text_stdin) -_default_text_stdout = _make_cached_stream_func( - lambda: sys.stdout, get_text_stdout) -_default_text_stderr = _make_cached_stream_func( - lambda: sys.stderr, get_text_stderr) - - -binary_streams = { - 'stdin': get_binary_stdin, - 'stdout': get_binary_stdout, - 'stderr': get_binary_stderr, -} - -text_streams = { - 'stdin': get_text_stdin, - 'stdout': get_text_stdout, - 'stderr': get_text_stderr, -} diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/_termui_impl.py b/flo-token-explorer/lib/python3.6/site-packages/click/_termui_impl.py deleted file mode 100644 index 00a8e5e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/_termui_impl.py +++ /dev/null @@ -1,621 +0,0 @@ -# -*- coding: utf-8 -*- -""" -click._termui_impl -~~~~~~~~~~~~~~~~~~ - -This module contains implementations for the termui module. To keep the -import time of Click down, some infrequently used functionality is -placed in this module and only imported as needed. - -:copyright: © 2014 by the Pallets team. -:license: BSD, see LICENSE.rst for more details. -""" - -import os -import sys -import time -import math -import contextlib -from ._compat import _default_text_stdout, range_type, PY2, isatty, \ - open_stream, strip_ansi, term_len, get_best_encoding, WIN, int_types, \ - CYGWIN -from .utils import echo -from .exceptions import ClickException - - -if os.name == 'nt': - BEFORE_BAR = '\r' - AFTER_BAR = '\n' -else: - BEFORE_BAR = '\r\033[?25l' - AFTER_BAR = '\033[?25h\n' - - -def _length_hint(obj): - """Returns the length hint of an object.""" - try: - return len(obj) - except (AttributeError, TypeError): - try: - get_hint = type(obj).__length_hint__ - except AttributeError: - return None - try: - hint = get_hint(obj) - except TypeError: - return None - if hint is NotImplemented or \ - not isinstance(hint, int_types) or \ - hint < 0: - return None - return hint - - -class ProgressBar(object): - - def __init__(self, iterable, length=None, fill_char='#', empty_char=' ', - bar_template='%(bar)s', info_sep=' ', show_eta=True, - show_percent=None, show_pos=False, item_show_func=None, - label=None, file=None, color=None, width=30): - self.fill_char = fill_char - self.empty_char = empty_char - self.bar_template = bar_template - self.info_sep = info_sep - self.show_eta = show_eta - self.show_percent = show_percent - self.show_pos = show_pos - self.item_show_func = item_show_func - self.label = label or '' - if file is None: - file = _default_text_stdout() - self.file = file - self.color = color - self.width = width - self.autowidth = width == 0 - - if length is None: - length = _length_hint(iterable) - if iterable is None: - if length is None: - raise TypeError('iterable or length is required') - iterable = range_type(length) - self.iter = iter(iterable) - self.length = length - self.length_known = length is not None - self.pos = 0 - self.avg = [] - self.start = self.last_eta = time.time() - self.eta_known = False - self.finished = False - self.max_width = None - self.entered = False - self.current_item = None - self.is_hidden = not isatty(self.file) - self._last_line = None - self.short_limit = 0.5 - - def __enter__(self): - self.entered = True - self.render_progress() - return self - - def __exit__(self, exc_type, exc_value, tb): - self.render_finish() - - def __iter__(self): - if not self.entered: - raise RuntimeError('You need to use progress bars in a with block.') - self.render_progress() - return self.generator() - - def is_fast(self): - return time.time() - self.start <= self.short_limit - - def render_finish(self): - if self.is_hidden or self.is_fast(): - return - self.file.write(AFTER_BAR) - self.file.flush() - - @property - def pct(self): - if self.finished: - return 1.0 - return min(self.pos / (float(self.length) or 1), 1.0) - - @property - def time_per_iteration(self): - if not self.avg: - return 0.0 - return sum(self.avg) / float(len(self.avg)) - - @property - def eta(self): - if self.length_known and not self.finished: - return self.time_per_iteration * (self.length - self.pos) - return 0.0 - - def format_eta(self): - if self.eta_known: - t = int(self.eta) - seconds = t % 60 - t //= 60 - minutes = t % 60 - t //= 60 - hours = t % 24 - t //= 24 - if t > 0: - days = t - return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds) - else: - return '%02d:%02d:%02d' % (hours, minutes, seconds) - return '' - - def format_pos(self): - pos = str(self.pos) - if self.length_known: - pos += '/%s' % self.length - return pos - - def format_pct(self): - return ('% 4d%%' % int(self.pct * 100))[1:] - - def format_bar(self): - if self.length_known: - bar_length = int(self.pct * self.width) - bar = self.fill_char * bar_length - bar += self.empty_char * (self.width - bar_length) - elif self.finished: - bar = self.fill_char * self.width - else: - bar = list(self.empty_char * (self.width or 1)) - if self.time_per_iteration != 0: - bar[int((math.cos(self.pos * self.time_per_iteration) - / 2.0 + 0.5) * self.width)] = self.fill_char - bar = ''.join(bar) - return bar - - def format_progress_line(self): - show_percent = self.show_percent - - info_bits = [] - if self.length_known and show_percent is None: - show_percent = not self.show_pos - - if self.show_pos: - info_bits.append(self.format_pos()) - if show_percent: - info_bits.append(self.format_pct()) - if self.show_eta and self.eta_known and not self.finished: - info_bits.append(self.format_eta()) - if self.item_show_func is not None: - item_info = self.item_show_func(self.current_item) - if item_info is not None: - info_bits.append(item_info) - - return (self.bar_template % { - 'label': self.label, - 'bar': self.format_bar(), - 'info': self.info_sep.join(info_bits) - }).rstrip() - - def render_progress(self): - from .termui import get_terminal_size - - if self.is_hidden: - return - - buf = [] - # Update width in case the terminal has been resized - if self.autowidth: - old_width = self.width - self.width = 0 - clutter_length = term_len(self.format_progress_line()) - new_width = max(0, get_terminal_size()[0] - clutter_length) - if new_width < old_width: - buf.append(BEFORE_BAR) - buf.append(' ' * self.max_width) - self.max_width = new_width - self.width = new_width - - clear_width = self.width - if self.max_width is not None: - clear_width = self.max_width - - buf.append(BEFORE_BAR) - line = self.format_progress_line() - line_len = term_len(line) - if self.max_width is None or self.max_width < line_len: - self.max_width = line_len - - buf.append(line) - buf.append(' ' * (clear_width - line_len)) - line = ''.join(buf) - # Render the line only if it changed. - - if line != self._last_line and not self.is_fast(): - self._last_line = line - echo(line, file=self.file, color=self.color, nl=False) - self.file.flush() - - def make_step(self, n_steps): - self.pos += n_steps - if self.length_known and self.pos >= self.length: - self.finished = True - - if (time.time() - self.last_eta) < 1.0: - return - - self.last_eta = time.time() - - # self.avg is a rolling list of length <= 7 of steps where steps are - # defined as time elapsed divided by the total progress through - # self.length. - if self.pos: - step = (time.time() - self.start) / self.pos - else: - step = time.time() - self.start - - self.avg = self.avg[-6:] + [step] - - self.eta_known = self.length_known - - def update(self, n_steps): - self.make_step(n_steps) - self.render_progress() - - def finish(self): - self.eta_known = 0 - self.current_item = None - self.finished = True - - def generator(self): - """ - Returns a generator which yields the items added to the bar during - construction, and updates the progress bar *after* the yielded block - returns. - """ - if not self.entered: - raise RuntimeError('You need to use progress bars in a with block.') - - if self.is_hidden: - for rv in self.iter: - yield rv - else: - for rv in self.iter: - self.current_item = rv - yield rv - self.update(1) - self.finish() - self.render_progress() - - -def pager(generator, color=None): - """Decide what method to use for paging through text.""" - stdout = _default_text_stdout() - if not isatty(sys.stdin) or not isatty(stdout): - return _nullpager(stdout, generator, color) - pager_cmd = (os.environ.get('PAGER', None) or '').strip() - if pager_cmd: - if WIN: - return _tempfilepager(generator, pager_cmd, color) - return _pipepager(generator, pager_cmd, color) - if os.environ.get('TERM') in ('dumb', 'emacs'): - return _nullpager(stdout, generator, color) - if WIN or sys.platform.startswith('os2'): - return _tempfilepager(generator, 'more <', color) - if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: - return _pipepager(generator, 'less', color) - - import tempfile - fd, filename = tempfile.mkstemp() - os.close(fd) - try: - if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: - return _pipepager(generator, 'more', color) - return _nullpager(stdout, generator, color) - finally: - os.unlink(filename) - - -def _pipepager(generator, cmd, color): - """Page through text by feeding it to another program. Invoking a - pager through this might support colors. - """ - import subprocess - env = dict(os.environ) - - # If we're piping to less we might support colors under the - # condition that - cmd_detail = cmd.rsplit('/', 1)[-1].split() - if color is None and cmd_detail[0] == 'less': - less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:]) - if not less_flags: - env['LESS'] = '-R' - color = True - elif 'r' in less_flags or 'R' in less_flags: - color = True - - c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, - env=env) - encoding = get_best_encoding(c.stdin) - try: - for text in generator: - if not color: - text = strip_ansi(text) - - c.stdin.write(text.encode(encoding, 'replace')) - except (IOError, KeyboardInterrupt): - pass - else: - c.stdin.close() - - # Less doesn't respect ^C, but catches it for its own UI purposes (aborting - # search or other commands inside less). - # - # That means when the user hits ^C, the parent process (click) terminates, - # but less is still alive, paging the output and messing up the terminal. - # - # If the user wants to make the pager exit on ^C, they should set - # `LESS='-K'`. It's not our decision to make. - while True: - try: - c.wait() - except KeyboardInterrupt: - pass - else: - break - - -def _tempfilepager(generator, cmd, color): - """Page through text by invoking a program on a temporary file.""" - import tempfile - filename = tempfile.mktemp() - # TODO: This never terminates if the passed generator never terminates. - text = "".join(generator) - if not color: - text = strip_ansi(text) - encoding = get_best_encoding(sys.stdout) - with open_stream(filename, 'wb')[0] as f: - f.write(text.encode(encoding)) - try: - os.system(cmd + ' "' + filename + '"') - finally: - os.unlink(filename) - - -def _nullpager(stream, generator, color): - """Simply print unformatted text. This is the ultimate fallback.""" - for text in generator: - if not color: - text = strip_ansi(text) - stream.write(text) - - -class Editor(object): - - def __init__(self, editor=None, env=None, require_save=True, - extension='.txt'): - self.editor = editor - self.env = env - self.require_save = require_save - self.extension = extension - - def get_editor(self): - if self.editor is not None: - return self.editor - for key in 'VISUAL', 'EDITOR': - rv = os.environ.get(key) - if rv: - return rv - if WIN: - return 'notepad' - for editor in 'vim', 'nano': - if os.system('which %s >/dev/null 2>&1' % editor) == 0: - return editor - return 'vi' - - def edit_file(self, filename): - import subprocess - editor = self.get_editor() - if self.env: - environ = os.environ.copy() - environ.update(self.env) - else: - environ = None - try: - c = subprocess.Popen('%s "%s"' % (editor, filename), - env=environ, shell=True) - exit_code = c.wait() - if exit_code != 0: - raise ClickException('%s: Editing failed!' % editor) - except OSError as e: - raise ClickException('%s: Editing failed: %s' % (editor, e)) - - def edit(self, text): - import tempfile - - text = text or '' - if text and not text.endswith('\n'): - text += '\n' - - fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension) - try: - if WIN: - encoding = 'utf-8-sig' - text = text.replace('\n', '\r\n') - else: - encoding = 'utf-8' - text = text.encode(encoding) - - f = os.fdopen(fd, 'wb') - f.write(text) - f.close() - timestamp = os.path.getmtime(name) - - self.edit_file(name) - - if self.require_save \ - and os.path.getmtime(name) == timestamp: - return None - - f = open(name, 'rb') - try: - rv = f.read() - finally: - f.close() - return rv.decode('utf-8-sig').replace('\r\n', '\n') - finally: - os.unlink(name) - - -def open_url(url, wait=False, locate=False): - import subprocess - - def _unquote_file(url): - try: - import urllib - except ImportError: - import urllib - if url.startswith('file://'): - url = urllib.unquote(url[7:]) - return url - - if sys.platform == 'darwin': - args = ['open'] - if wait: - args.append('-W') - if locate: - args.append('-R') - args.append(_unquote_file(url)) - null = open('/dev/null', 'w') - try: - return subprocess.Popen(args, stderr=null).wait() - finally: - null.close() - elif WIN: - if locate: - url = _unquote_file(url) - args = 'explorer /select,"%s"' % _unquote_file( - url.replace('"', '')) - else: - args = 'start %s "" "%s"' % ( - wait and '/WAIT' or '', url.replace('"', '')) - return os.system(args) - elif CYGWIN: - if locate: - url = _unquote_file(url) - args = 'cygstart "%s"' % (os.path.dirname(url).replace('"', '')) - else: - args = 'cygstart %s "%s"' % ( - wait and '-w' or '', url.replace('"', '')) - return os.system(args) - - try: - if locate: - url = os.path.dirname(_unquote_file(url)) or '.' - else: - url = _unquote_file(url) - c = subprocess.Popen(['xdg-open', url]) - if wait: - return c.wait() - return 0 - except OSError: - if url.startswith(('http://', 'https://')) and not locate and not wait: - import webbrowser - webbrowser.open(url) - return 0 - return 1 - - -def _translate_ch_to_exc(ch): - if ch == u'\x03': - raise KeyboardInterrupt() - if ch == u'\x04' and not WIN: # Unix-like, Ctrl+D - raise EOFError() - if ch == u'\x1a' and WIN: # Windows, Ctrl+Z - raise EOFError() - - -if WIN: - import msvcrt - - @contextlib.contextmanager - def raw_terminal(): - yield - - def getchar(echo): - # The function `getch` will return a bytes object corresponding to - # the pressed character. Since Windows 10 build 1803, it will also - # return \x00 when called a second time after pressing a regular key. - # - # `getwch` does not share this probably-bugged behavior. Moreover, it - # returns a Unicode object by default, which is what we want. - # - # Either of these functions will return \x00 or \xe0 to indicate - # a special key, and you need to call the same function again to get - # the "rest" of the code. The fun part is that \u00e0 is - # "latin small letter a with grave", so if you type that on a French - # keyboard, you _also_ get a \xe0. - # E.g., consider the Up arrow. This returns \xe0 and then \x48. The - # resulting Unicode string reads as "a with grave" + "capital H". - # This is indistinguishable from when the user actually types - # "a with grave" and then "capital H". - # - # When \xe0 is returned, we assume it's part of a special-key sequence - # and call `getwch` again, but that means that when the user types - # the \u00e0 character, `getchar` doesn't return until a second - # character is typed. - # The alternative is returning immediately, but that would mess up - # cross-platform handling of arrow keys and others that start with - # \xe0. Another option is using `getch`, but then we can't reliably - # read non-ASCII characters, because return values of `getch` are - # limited to the current 8-bit codepage. - # - # Anyway, Click doesn't claim to do this Right(tm), and using `getwch` - # is doing the right thing in more situations than with `getch`. - if echo: - func = msvcrt.getwche - else: - func = msvcrt.getwch - - rv = func() - if rv in (u'\x00', u'\xe0'): - # \x00 and \xe0 are control characters that indicate special key, - # see above. - rv += func() - _translate_ch_to_exc(rv) - return rv -else: - import tty - import termios - - @contextlib.contextmanager - def raw_terminal(): - if not isatty(sys.stdin): - f = open('/dev/tty') - fd = f.fileno() - else: - fd = sys.stdin.fileno() - f = None - try: - old_settings = termios.tcgetattr(fd) - try: - tty.setraw(fd) - yield fd - finally: - termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) - sys.stdout.flush() - if f is not None: - f.close() - except termios.error: - pass - - def getchar(echo): - with raw_terminal() as fd: - ch = os.read(fd, 32) - ch = ch.decode(get_best_encoding(sys.stdin), 'replace') - if echo and isatty(sys.stdout): - sys.stdout.write(ch) - _translate_ch_to_exc(ch) - return ch diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/_textwrap.py b/flo-token-explorer/lib/python3.6/site-packages/click/_textwrap.py deleted file mode 100644 index 7e77603..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/_textwrap.py +++ /dev/null @@ -1,38 +0,0 @@ -import textwrap -from contextlib import contextmanager - - -class TextWrapper(textwrap.TextWrapper): - - def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): - space_left = max(width - cur_len, 1) - - if self.break_long_words: - last = reversed_chunks[-1] - cut = last[:space_left] - res = last[space_left:] - cur_line.append(cut) - reversed_chunks[-1] = res - elif not cur_line: - cur_line.append(reversed_chunks.pop()) - - @contextmanager - def extra_indent(self, indent): - old_initial_indent = self.initial_indent - old_subsequent_indent = self.subsequent_indent - self.initial_indent += indent - self.subsequent_indent += indent - try: - yield - finally: - self.initial_indent = old_initial_indent - self.subsequent_indent = old_subsequent_indent - - def indent_only(self, text): - rv = [] - for idx, line in enumerate(text.splitlines()): - indent = self.initial_indent - if idx > 0: - indent = self.subsequent_indent - rv.append(indent + line) - return '\n'.join(rv) diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/_unicodefun.py b/flo-token-explorer/lib/python3.6/site-packages/click/_unicodefun.py deleted file mode 100644 index 620edff..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/_unicodefun.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -import sys -import codecs - -from ._compat import PY2 - - -# If someone wants to vendor click, we want to ensure the -# correct package is discovered. Ideally we could use a -# relative import here but unfortunately Python does not -# support that. -click = sys.modules[__name__.rsplit('.', 1)[0]] - - -def _find_unicode_literals_frame(): - import __future__ - if not hasattr(sys, '_getframe'): # not all Python implementations have it - return 0 - frm = sys._getframe(1) - idx = 1 - while frm is not None: - if frm.f_globals.get('__name__', '').startswith('click.'): - frm = frm.f_back - idx += 1 - elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag: - return idx - else: - break - return 0 - - -def _check_for_unicode_literals(): - if not __debug__: - return - if not PY2 or click.disable_unicode_literals_warning: - return - bad_frame = _find_unicode_literals_frame() - if bad_frame <= 0: - return - from warnings import warn - warn(Warning('Click detected the use of the unicode_literals ' - '__future__ import. This is heavily discouraged ' - 'because it can introduce subtle bugs in your ' - 'code. You should instead use explicit u"" literals ' - 'for your unicode strings. For more information see ' - 'https://click.palletsprojects.com/python3/'), - stacklevel=bad_frame) - - -def _verify_python3_env(): - """Ensures that the environment is good for unicode on Python 3.""" - if PY2: - return - try: - import locale - fs_enc = codecs.lookup(locale.getpreferredencoding()).name - except Exception: - fs_enc = 'ascii' - if fs_enc != 'ascii': - return - - extra = '' - if os.name == 'posix': - import subprocess - try: - rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate()[0] - except OSError: - rv = b'' - good_locales = set() - has_c_utf8 = False - - # Make sure we're operating on text here. - if isinstance(rv, bytes): - rv = rv.decode('ascii', 'replace') - - for line in rv.splitlines(): - locale = line.strip() - if locale.lower().endswith(('.utf-8', '.utf8')): - good_locales.add(locale) - if locale.lower() in ('c.utf8', 'c.utf-8'): - has_c_utf8 = True - - extra += '\n\n' - if not good_locales: - extra += ( - 'Additional information: on this system no suitable UTF-8\n' - 'locales were discovered. This most likely requires resolving\n' - 'by reconfiguring the locale system.' - ) - elif has_c_utf8: - extra += ( - 'This system supports the C.UTF-8 locale which is recommended.\n' - 'You might be able to resolve your issue by exporting the\n' - 'following environment variables:\n\n' - ' export LC_ALL=C.UTF-8\n' - ' export LANG=C.UTF-8' - ) - else: - extra += ( - 'This system lists a couple of UTF-8 supporting locales that\n' - 'you can pick from. The following suitable locales were\n' - 'discovered: %s' - ) % ', '.join(sorted(good_locales)) - - bad_locale = None - for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'): - if locale and locale.lower().endswith(('.utf-8', '.utf8')): - bad_locale = locale - if locale is not None: - break - if bad_locale is not None: - extra += ( - '\n\nClick discovered that you exported a UTF-8 locale\n' - 'but the locale system could not pick up from it because\n' - 'it does not exist. The exported locale is "%s" but it\n' - 'is not supported' - ) % bad_locale - - raise RuntimeError( - 'Click will abort further execution because Python 3 was' - ' configured to use ASCII as encoding for the environment.' - ' Consult https://click.palletsprojects.com/en/7.x/python3/ for' - ' mitigation steps.' + extra - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/_winconsole.py b/flo-token-explorer/lib/python3.6/site-packages/click/_winconsole.py deleted file mode 100644 index bbb080d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/_winconsole.py +++ /dev/null @@ -1,307 +0,0 @@ -# -*- coding: utf-8 -*- -# This module is based on the excellent work by Adam Bartoš who -# provided a lot of what went into the implementation here in -# the discussion to issue1602 in the Python bug tracker. -# -# There are some general differences in regards to how this works -# compared to the original patches as we do not need to patch -# the entire interpreter but just work in our little world of -# echo and prmopt. - -import io -import os -import sys -import zlib -import time -import ctypes -import msvcrt -from ._compat import _NonClosingTextIOWrapper, text_type, PY2 -from ctypes import byref, POINTER, c_int, c_char, c_char_p, \ - c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE -try: - from ctypes import pythonapi - PyObject_GetBuffer = pythonapi.PyObject_GetBuffer - PyBuffer_Release = pythonapi.PyBuffer_Release -except ImportError: - pythonapi = None -from ctypes.wintypes import LPWSTR, LPCWSTR - - -c_ssize_p = POINTER(c_ssize_t) - -kernel32 = windll.kernel32 -GetStdHandle = kernel32.GetStdHandle -ReadConsoleW = kernel32.ReadConsoleW -WriteConsoleW = kernel32.WriteConsoleW -GetLastError = kernel32.GetLastError -GetCommandLineW = WINFUNCTYPE(LPWSTR)( - ('GetCommandLineW', windll.kernel32)) -CommandLineToArgvW = WINFUNCTYPE( - POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( - ('CommandLineToArgvW', windll.shell32)) - - -STDIN_HANDLE = GetStdHandle(-10) -STDOUT_HANDLE = GetStdHandle(-11) -STDERR_HANDLE = GetStdHandle(-12) - - -PyBUF_SIMPLE = 0 -PyBUF_WRITABLE = 1 - -ERROR_SUCCESS = 0 -ERROR_NOT_ENOUGH_MEMORY = 8 -ERROR_OPERATION_ABORTED = 995 - -STDIN_FILENO = 0 -STDOUT_FILENO = 1 -STDERR_FILENO = 2 - -EOF = b'\x1a' -MAX_BYTES_WRITTEN = 32767 - - -class Py_buffer(ctypes.Structure): - _fields_ = [ - ('buf', c_void_p), - ('obj', py_object), - ('len', c_ssize_t), - ('itemsize', c_ssize_t), - ('readonly', c_int), - ('ndim', c_int), - ('format', c_char_p), - ('shape', c_ssize_p), - ('strides', c_ssize_p), - ('suboffsets', c_ssize_p), - ('internal', c_void_p) - ] - - if PY2: - _fields_.insert(-1, ('smalltable', c_ssize_t * 2)) - - -# On PyPy we cannot get buffers so our ability to operate here is -# serverly limited. -if pythonapi is None: - get_buffer = None -else: - def get_buffer(obj, writable=False): - buf = Py_buffer() - flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE - PyObject_GetBuffer(py_object(obj), byref(buf), flags) - try: - buffer_type = c_char * buf.len - return buffer_type.from_address(buf.buf) - finally: - PyBuffer_Release(byref(buf)) - - -class _WindowsConsoleRawIOBase(io.RawIOBase): - - def __init__(self, handle): - self.handle = handle - - def isatty(self): - io.RawIOBase.isatty(self) - return True - - -class _WindowsConsoleReader(_WindowsConsoleRawIOBase): - - def readable(self): - return True - - def readinto(self, b): - bytes_to_be_read = len(b) - if not bytes_to_be_read: - return 0 - elif bytes_to_be_read % 2: - raise ValueError('cannot read odd number of bytes from ' - 'UTF-16-LE encoded console') - - buffer = get_buffer(b, writable=True) - code_units_to_be_read = bytes_to_be_read // 2 - code_units_read = c_ulong() - - rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read, - byref(code_units_read), None) - if GetLastError() == ERROR_OPERATION_ABORTED: - # wait for KeyboardInterrupt - time.sleep(0.1) - if not rv: - raise OSError('Windows error: %s' % GetLastError()) - - if buffer[0] == EOF: - return 0 - return 2 * code_units_read.value - - -class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): - - def writable(self): - return True - - @staticmethod - def _get_error_message(errno): - if errno == ERROR_SUCCESS: - return 'ERROR_SUCCESS' - elif errno == ERROR_NOT_ENOUGH_MEMORY: - return 'ERROR_NOT_ENOUGH_MEMORY' - return 'Windows error %s' % errno - - def write(self, b): - bytes_to_be_written = len(b) - buf = get_buffer(b) - code_units_to_be_written = min(bytes_to_be_written, - MAX_BYTES_WRITTEN) // 2 - code_units_written = c_ulong() - - WriteConsoleW(self.handle, buf, code_units_to_be_written, - byref(code_units_written), None) - bytes_written = 2 * code_units_written.value - - if bytes_written == 0 and bytes_to_be_written > 0: - raise OSError(self._get_error_message(GetLastError())) - return bytes_written - - -class ConsoleStream(object): - - def __init__(self, text_stream, byte_stream): - self._text_stream = text_stream - self.buffer = byte_stream - - @property - def name(self): - return self.buffer.name - - def write(self, x): - if isinstance(x, text_type): - return self._text_stream.write(x) - try: - self.flush() - except Exception: - pass - return self.buffer.write(x) - - def writelines(self, lines): - for line in lines: - self.write(line) - - def __getattr__(self, name): - return getattr(self._text_stream, name) - - def isatty(self): - return self.buffer.isatty() - - def __repr__(self): - return '' % ( - self.name, - self.encoding, - ) - - -class WindowsChunkedWriter(object): - """ - Wraps a stream (such as stdout), acting as a transparent proxy for all - attribute access apart from method 'write()' which we wrap to write in - limited chunks due to a Windows limitation on binary console streams. - """ - def __init__(self, wrapped): - # double-underscore everything to prevent clashes with names of - # attributes on the wrapped stream object. - self.__wrapped = wrapped - - def __getattr__(self, name): - return getattr(self.__wrapped, name) - - def write(self, text): - total_to_write = len(text) - written = 0 - - while written < total_to_write: - to_write = min(total_to_write - written, MAX_BYTES_WRITTEN) - self.__wrapped.write(text[written:written+to_write]) - written += to_write - - -_wrapped_std_streams = set() - - -def _wrap_std_stream(name): - # Python 2 & Windows 7 and below - if PY2 and sys.getwindowsversion()[:2] <= (6, 1) and name not in _wrapped_std_streams: - setattr(sys, name, WindowsChunkedWriter(getattr(sys, name))) - _wrapped_std_streams.add(name) - - -def _get_text_stdin(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) - return ConsoleStream(text_stream, buffer_stream) - - -def _get_text_stdout(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) - return ConsoleStream(text_stream, buffer_stream) - - -def _get_text_stderr(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) - return ConsoleStream(text_stream, buffer_stream) - - -if PY2: - def _hash_py_argv(): - return zlib.crc32('\x00'.join(sys.argv[1:])) - - _initial_argv_hash = _hash_py_argv() - - def _get_windows_argv(): - argc = c_int(0) - argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) - argv = [argv_unicode[i] for i in range(0, argc.value)] - - if not hasattr(sys, 'frozen'): - argv = argv[1:] - while len(argv) > 0: - arg = argv[0] - if not arg.startswith('-') or arg == '-': - break - argv = argv[1:] - if arg.startswith(('-c', '-m')): - break - - return argv[1:] - - -_stream_factories = { - 0: _get_text_stdin, - 1: _get_text_stdout, - 2: _get_text_stderr, -} - - -def _get_windows_console_stream(f, encoding, errors): - if get_buffer is not None and \ - encoding in ('utf-16-le', None) \ - and errors in ('strict', None) and \ - hasattr(f, 'isatty') and f.isatty(): - func = _stream_factories.get(f.fileno()) - if func is not None: - if not PY2: - f = getattr(f, 'buffer', None) - if f is None: - return None - else: - # If we are on Python 2 we need to set the stream that we - # deal with to binary mode as otherwise the exercise if a - # bit moot. The same problems apply as for - # get_binary_stdin and friends from _compat. - msvcrt.setmode(f.fileno(), os.O_BINARY) - return func(f) diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/core.py b/flo-token-explorer/lib/python3.6/site-packages/click/core.py deleted file mode 100644 index 7a1e342..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/core.py +++ /dev/null @@ -1,1856 +0,0 @@ -import errno -import inspect -import os -import sys -from contextlib import contextmanager -from itertools import repeat -from functools import update_wrapper - -from .types import convert_type, IntRange, BOOL -from .utils import PacifyFlushWrapper, make_str, make_default_short_help, \ - echo, get_os_args -from .exceptions import ClickException, UsageError, BadParameter, Abort, \ - MissingParameter, Exit -from .termui import prompt, confirm, style -from .formatting import HelpFormatter, join_options -from .parser import OptionParser, split_opt -from .globals import push_context, pop_context - -from ._compat import PY2, isidentifier, iteritems, string_types -from ._unicodefun import _check_for_unicode_literals, _verify_python3_env - - -_missing = object() - - -SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...' -SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...' - -DEPRECATED_HELP_NOTICE = ' (DEPRECATED)' -DEPRECATED_INVOKE_NOTICE = 'DeprecationWarning: ' + \ - 'The command %(name)s is deprecated.' - - -def _maybe_show_deprecated_notice(cmd): - if cmd.deprecated: - echo(style(DEPRECATED_INVOKE_NOTICE % {'name': cmd.name}, fg='red'), err=True) - - -def fast_exit(code): - """Exit without garbage collection, this speeds up exit by about 10ms for - things like bash completion. - """ - sys.stdout.flush() - sys.stderr.flush() - os._exit(code) - - -def _bashcomplete(cmd, prog_name, complete_var=None): - """Internal handler for the bash completion support.""" - if complete_var is None: - complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper() - complete_instr = os.environ.get(complete_var) - if not complete_instr: - return - - from ._bashcomplete import bashcomplete - if bashcomplete(cmd, prog_name, complete_var, complete_instr): - fast_exit(1) - - -def _check_multicommand(base_command, cmd_name, cmd, register=False): - if not base_command.chain or not isinstance(cmd, MultiCommand): - return - if register: - hint = 'It is not possible to add multi commands as children to ' \ - 'another multi command that is in chain mode' - else: - hint = 'Found a multi command as subcommand to a multi command ' \ - 'that is in chain mode. This is not supported' - raise RuntimeError('%s. Command "%s" is set to chain and "%s" was ' - 'added as subcommand but it in itself is a ' - 'multi command. ("%s" is a %s within a chained ' - '%s named "%s").' % ( - hint, base_command.name, cmd_name, - cmd_name, cmd.__class__.__name__, - base_command.__class__.__name__, - base_command.name)) - - -def batch(iterable, batch_size): - return list(zip(*repeat(iter(iterable), batch_size))) - - -def invoke_param_callback(callback, ctx, param, value): - code = getattr(callback, '__code__', None) - args = getattr(code, 'co_argcount', 3) - - if args < 3: - # This will become a warning in Click 3.0: - from warnings import warn - warn(Warning('Invoked legacy parameter callback "%s". The new ' - 'signature for such callbacks starting with ' - 'click 2.0 is (ctx, param, value).' - % callback), stacklevel=3) - return callback(ctx, value) - return callback(ctx, param, value) - - -@contextmanager -def augment_usage_errors(ctx, param=None): - """Context manager that attaches extra information to exceptions that - fly. - """ - try: - yield - except BadParameter as e: - if e.ctx is None: - e.ctx = ctx - if param is not None and e.param is None: - e.param = param - raise - except UsageError as e: - if e.ctx is None: - e.ctx = ctx - raise - - -def iter_params_for_processing(invocation_order, declaration_order): - """Given a sequence of parameters in the order as should be considered - for processing and an iterable of parameters that exist, this returns - a list in the correct order as they should be processed. - """ - def sort_key(item): - try: - idx = invocation_order.index(item) - except ValueError: - idx = float('inf') - return (not item.is_eager, idx) - - return sorted(declaration_order, key=sort_key) - - -class Context(object): - """The context is a special internal object that holds state relevant - for the script execution at every single level. It's normally invisible - to commands unless they opt-in to getting access to it. - - The context is useful as it can pass internal objects around and can - control special execution features such as reading data from - environment variables. - - A context can be used as context manager in which case it will call - :meth:`close` on teardown. - - .. versionadded:: 2.0 - Added the `resilient_parsing`, `help_option_names`, - `token_normalize_func` parameters. - - .. versionadded:: 3.0 - Added the `allow_extra_args` and `allow_interspersed_args` - parameters. - - .. versionadded:: 4.0 - Added the `color`, `ignore_unknown_options`, and - `max_content_width` parameters. - - :param command: the command class for this context. - :param parent: the parent context. - :param info_name: the info name for this invocation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it is usually - the name of the script, for commands below it it's - the name of the script. - :param obj: an arbitrary object of user data. - :param auto_envvar_prefix: the prefix to use for automatic environment - variables. If this is `None` then reading - from environment variables is disabled. This - does not affect manually set environment - variables which are always read. - :param default_map: a dictionary (like object) with default values - for parameters. - :param terminal_width: the width of the terminal. The default is - inherit from parent context. If no context - defines the terminal width then auto - detection will be applied. - :param max_content_width: the maximum width for content rendered by - Click (this currently only affects help - pages). This defaults to 80 characters if - not overridden. In other words: even if the - terminal is larger than that, Click will not - format things wider than 80 characters by - default. In addition to that, formatters might - add some safety mapping on the right. - :param resilient_parsing: if this flag is enabled then Click will - parse without any interactivity or callback - invocation. Default values will also be - ignored. This is useful for implementing - things such as completion support. - :param allow_extra_args: if this is set to `True` then extra arguments - at the end will not raise an error and will be - kept on the context. The default is to inherit - from the command. - :param allow_interspersed_args: if this is set to `False` then options - and arguments cannot be mixed. The - default is to inherit from the command. - :param ignore_unknown_options: instructs click to ignore options it does - not know and keeps them for later - processing. - :param help_option_names: optionally a list of strings that define how - the default help parameter is named. The - default is ``['--help']``. - :param token_normalize_func: an optional function that is used to - normalize tokens (options, choices, - etc.). This for instance can be used to - implement case insensitive behavior. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are used in texts that Click prints which is by - default not the case. This for instance would affect - help output. - """ - - def __init__(self, command, parent=None, info_name=None, obj=None, - auto_envvar_prefix=None, default_map=None, - terminal_width=None, max_content_width=None, - resilient_parsing=False, allow_extra_args=None, - allow_interspersed_args=None, - ignore_unknown_options=None, help_option_names=None, - token_normalize_func=None, color=None): - #: the parent context or `None` if none exists. - self.parent = parent - #: the :class:`Command` for this context. - self.command = command - #: the descriptive information name - self.info_name = info_name - #: the parsed parameters except if the value is hidden in which - #: case it's not remembered. - self.params = {} - #: the leftover arguments. - self.args = [] - #: protected arguments. These are arguments that are prepended - #: to `args` when certain parsing scenarios are encountered but - #: must be never propagated to another arguments. This is used - #: to implement nested parsing. - self.protected_args = [] - if obj is None and parent is not None: - obj = parent.obj - #: the user object stored. - self.obj = obj - self._meta = getattr(parent, 'meta', {}) - - #: A dictionary (-like object) with defaults for parameters. - if default_map is None \ - and parent is not None \ - and parent.default_map is not None: - default_map = parent.default_map.get(info_name) - self.default_map = default_map - - #: This flag indicates if a subcommand is going to be executed. A - #: group callback can use this information to figure out if it's - #: being executed directly or because the execution flow passes - #: onwards to a subcommand. By default it's None, but it can be - #: the name of the subcommand to execute. - #: - #: If chaining is enabled this will be set to ``'*'`` in case - #: any commands are executed. It is however not possible to - #: figure out which ones. If you require this knowledge you - #: should use a :func:`resultcallback`. - self.invoked_subcommand = None - - if terminal_width is None and parent is not None: - terminal_width = parent.terminal_width - #: The width of the terminal (None is autodetection). - self.terminal_width = terminal_width - - if max_content_width is None and parent is not None: - max_content_width = parent.max_content_width - #: The maximum width of formatted content (None implies a sensible - #: default which is 80 for most things). - self.max_content_width = max_content_width - - if allow_extra_args is None: - allow_extra_args = command.allow_extra_args - #: Indicates if the context allows extra args or if it should - #: fail on parsing. - #: - #: .. versionadded:: 3.0 - self.allow_extra_args = allow_extra_args - - if allow_interspersed_args is None: - allow_interspersed_args = command.allow_interspersed_args - #: Indicates if the context allows mixing of arguments and - #: options or not. - #: - #: .. versionadded:: 3.0 - self.allow_interspersed_args = allow_interspersed_args - - if ignore_unknown_options is None: - ignore_unknown_options = command.ignore_unknown_options - #: Instructs click to ignore options that a command does not - #: understand and will store it on the context for later - #: processing. This is primarily useful for situations where you - #: want to call into external programs. Generally this pattern is - #: strongly discouraged because it's not possibly to losslessly - #: forward all arguments. - #: - #: .. versionadded:: 4.0 - self.ignore_unknown_options = ignore_unknown_options - - if help_option_names is None: - if parent is not None: - help_option_names = parent.help_option_names - else: - help_option_names = ['--help'] - - #: The names for the help options. - self.help_option_names = help_option_names - - if token_normalize_func is None and parent is not None: - token_normalize_func = parent.token_normalize_func - - #: An optional normalization function for tokens. This is - #: options, choices, commands etc. - self.token_normalize_func = token_normalize_func - - #: Indicates if resilient parsing is enabled. In that case Click - #: will do its best to not cause any failures and default values - #: will be ignored. Useful for completion. - self.resilient_parsing = resilient_parsing - - # If there is no envvar prefix yet, but the parent has one and - # the command on this level has a name, we can expand the envvar - # prefix automatically. - if auto_envvar_prefix is None: - if parent is not None \ - and parent.auto_envvar_prefix is not None and \ - self.info_name is not None: - auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix, - self.info_name.upper()) - else: - auto_envvar_prefix = auto_envvar_prefix.upper() - self.auto_envvar_prefix = auto_envvar_prefix - - if color is None and parent is not None: - color = parent.color - - #: Controls if styling output is wanted or not. - self.color = color - - self._close_callbacks = [] - self._depth = 0 - - def __enter__(self): - self._depth += 1 - push_context(self) - return self - - def __exit__(self, exc_type, exc_value, tb): - self._depth -= 1 - if self._depth == 0: - self.close() - pop_context() - - @contextmanager - def scope(self, cleanup=True): - """This helper method can be used with the context object to promote - it to the current thread local (see :func:`get_current_context`). - The default behavior of this is to invoke the cleanup functions which - can be disabled by setting `cleanup` to `False`. The cleanup - functions are typically used for things such as closing file handles. - - If the cleanup is intended the context object can also be directly - used as a context manager. - - Example usage:: - - with ctx.scope(): - assert get_current_context() is ctx - - This is equivalent:: - - with ctx: - assert get_current_context() is ctx - - .. versionadded:: 5.0 - - :param cleanup: controls if the cleanup functions should be run or - not. The default is to run these functions. In - some situations the context only wants to be - temporarily pushed in which case this can be disabled. - Nested pushes automatically defer the cleanup. - """ - if not cleanup: - self._depth += 1 - try: - with self as rv: - yield rv - finally: - if not cleanup: - self._depth -= 1 - - @property - def meta(self): - """This is a dictionary which is shared with all the contexts - that are nested. It exists so that click utilities can store some - state here if they need to. It is however the responsibility of - that code to manage this dictionary well. - - The keys are supposed to be unique dotted strings. For instance - module paths are a good choice for it. What is stored in there is - irrelevant for the operation of click. However what is important is - that code that places data here adheres to the general semantics of - the system. - - Example usage:: - - LANG_KEY = __name__ + '.lang' - - def set_language(value): - ctx = get_current_context() - ctx.meta[LANG_KEY] = value - - def get_language(): - return get_current_context().meta.get(LANG_KEY, 'en_US') - - .. versionadded:: 5.0 - """ - return self._meta - - def make_formatter(self): - """Creates the formatter for the help and usage output.""" - return HelpFormatter(width=self.terminal_width, - max_width=self.max_content_width) - - def call_on_close(self, f): - """This decorator remembers a function as callback that should be - executed when the context tears down. This is most useful to bind - resource handling to the script execution. For instance, file objects - opened by the :class:`File` type will register their close callbacks - here. - - :param f: the function to execute on teardown. - """ - self._close_callbacks.append(f) - return f - - def close(self): - """Invokes all close callbacks.""" - for cb in self._close_callbacks: - cb() - self._close_callbacks = [] - - @property - def command_path(self): - """The computed command path. This is used for the ``usage`` - information on the help page. It's automatically created by - combining the info names of the chain of contexts to the root. - """ - rv = '' - if self.info_name is not None: - rv = self.info_name - if self.parent is not None: - rv = self.parent.command_path + ' ' + rv - return rv.lstrip() - - def find_root(self): - """Finds the outermost context.""" - node = self - while node.parent is not None: - node = node.parent - return node - - def find_object(self, object_type): - """Finds the closest object of a given type.""" - node = self - while node is not None: - if isinstance(node.obj, object_type): - return node.obj - node = node.parent - - def ensure_object(self, object_type): - """Like :meth:`find_object` but sets the innermost object to a - new instance of `object_type` if it does not exist. - """ - rv = self.find_object(object_type) - if rv is None: - self.obj = rv = object_type() - return rv - - def lookup_default(self, name): - """Looks up the default for a parameter name. This by default - looks into the :attr:`default_map` if available. - """ - if self.default_map is not None: - rv = self.default_map.get(name) - if callable(rv): - rv = rv() - return rv - - def fail(self, message): - """Aborts the execution of the program with a specific error - message. - - :param message: the error message to fail with. - """ - raise UsageError(message, self) - - def abort(self): - """Aborts the script.""" - raise Abort() - - def exit(self, code=0): - """Exits the application with a given exit code.""" - raise Exit(code) - - def get_usage(self): - """Helper method to get formatted usage string for the current - context and command. - """ - return self.command.get_usage(self) - - def get_help(self): - """Helper method to get formatted help page for the current - context and command. - """ - return self.command.get_help(self) - - def invoke(*args, **kwargs): - """Invokes a command callback in exactly the way it expects. There - are two ways to invoke this method: - - 1. the first argument can be a callback and all other arguments and - keyword arguments are forwarded directly to the function. - 2. the first argument is a click command object. In that case all - arguments are forwarded as well but proper click parameters - (options and click arguments) must be keyword arguments and Click - will fill in defaults. - - Note that before Click 3.2 keyword arguments were not properly filled - in against the intention of this code and no context was created. For - more information about this change and why it was done in a bugfix - release see :ref:`upgrade-to-3.2`. - """ - self, callback = args[:2] - ctx = self - - # It's also possible to invoke another command which might or - # might not have a callback. In that case we also fill - # in defaults and make a new context for this command. - if isinstance(callback, Command): - other_cmd = callback - callback = other_cmd.callback - ctx = Context(other_cmd, info_name=other_cmd.name, parent=self) - if callback is None: - raise TypeError('The given command does not have a ' - 'callback that can be invoked.') - - for param in other_cmd.params: - if param.name not in kwargs and param.expose_value: - kwargs[param.name] = param.get_default(ctx) - - args = args[2:] - with augment_usage_errors(self): - with ctx: - return callback(*args, **kwargs) - - def forward(*args, **kwargs): - """Similar to :meth:`invoke` but fills in default keyword - arguments from the current context if the other command expects - it. This cannot invoke callbacks directly, only other commands. - """ - self, cmd = args[:2] - - # It's also possible to invoke another command which might or - # might not have a callback. - if not isinstance(cmd, Command): - raise TypeError('Callback is not a command.') - - for param in self.params: - if param not in kwargs: - kwargs[param] = self.params[param] - - return self.invoke(cmd, **kwargs) - - -class BaseCommand(object): - """The base command implements the minimal API contract of commands. - Most code will never use this as it does not implement a lot of useful - functionality but it can act as the direct subclass of alternative - parsing methods that do not depend on the Click parser. - - For instance, this can be used to bridge Click and other systems like - argparse or docopt. - - Because base commands do not implement a lot of the API that other - parts of Click take for granted, they are not supported for all - operations. For instance, they cannot be used with the decorators - usually and they have no built-in callback system. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - """ - #: the default for the :attr:`Context.allow_extra_args` flag. - allow_extra_args = False - #: the default for the :attr:`Context.allow_interspersed_args` flag. - allow_interspersed_args = True - #: the default for the :attr:`Context.ignore_unknown_options` flag. - ignore_unknown_options = False - - def __init__(self, name, context_settings=None): - #: the name the command thinks it has. Upon registering a command - #: on a :class:`Group` the group will default the command name - #: with this information. You should instead use the - #: :class:`Context`\'s :attr:`~Context.info_name` attribute. - self.name = name - if context_settings is None: - context_settings = {} - #: an optional dictionary with defaults passed to the context. - self.context_settings = context_settings - - def get_usage(self, ctx): - raise NotImplementedError('Base commands cannot get usage') - - def get_help(self, ctx): - raise NotImplementedError('Base commands cannot get help') - - def make_context(self, info_name, args, parent=None, **extra): - """This function when given an info name and arguments will kick - off the parsing and create a new :class:`Context`. It does not - invoke the actual command callback though. - - :param info_name: the info name for this invokation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it's usually - the name of the script, for commands below it it's - the name of the script. - :param args: the arguments to parse as list of strings. - :param parent: the parent context if available. - :param extra: extra keyword arguments forwarded to the context - constructor. - """ - for key, value in iteritems(self.context_settings): - if key not in extra: - extra[key] = value - ctx = Context(self, info_name=info_name, parent=parent, **extra) - with ctx.scope(cleanup=False): - self.parse_args(ctx, args) - return ctx - - def parse_args(self, ctx, args): - """Given a context and a list of arguments this creates the parser - and parses the arguments, then modifies the context as necessary. - This is automatically invoked by :meth:`make_context`. - """ - raise NotImplementedError('Base commands do not know how to parse ' - 'arguments.') - - def invoke(self, ctx): - """Given a context, this invokes the command. The default - implementation is raising a not implemented error. - """ - raise NotImplementedError('Base commands are not invokable by default') - - def main(self, args=None, prog_name=None, complete_var=None, - standalone_mode=True, **extra): - """This is the way to invoke a script with all the bells and - whistles as a command line application. This will always terminate - the application after a call. If this is not wanted, ``SystemExit`` - needs to be caught. - - This method is also available by directly calling the instance of - a :class:`Command`. - - .. versionadded:: 3.0 - Added the `standalone_mode` flag to control the standalone mode. - - :param args: the arguments that should be used for parsing. If not - provided, ``sys.argv[1:]`` is used. - :param prog_name: the program name that should be used. By default - the program name is constructed by taking the file - name from ``sys.argv[0]``. - :param complete_var: the environment variable that controls the - bash completion support. The default is - ``"__COMPLETE"`` with prog_name in - uppercase. - :param standalone_mode: the default behavior is to invoke the script - in standalone mode. Click will then - handle exceptions and convert them into - error messages and the function will never - return but shut down the interpreter. If - this is set to `False` they will be - propagated to the caller and the return - value of this function is the return value - of :meth:`invoke`. - :param extra: extra keyword arguments are forwarded to the context - constructor. See :class:`Context` for more information. - """ - # If we are in Python 3, we will verify that the environment is - # sane at this point or reject further execution to avoid a - # broken script. - if not PY2: - _verify_python3_env() - else: - _check_for_unicode_literals() - - if args is None: - args = get_os_args() - else: - args = list(args) - - if prog_name is None: - prog_name = make_str(os.path.basename( - sys.argv and sys.argv[0] or __file__)) - - # Hook for the Bash completion. This only activates if the Bash - # completion is actually enabled, otherwise this is quite a fast - # noop. - _bashcomplete(self, prog_name, complete_var) - - try: - try: - with self.make_context(prog_name, args, **extra) as ctx: - rv = self.invoke(ctx) - if not standalone_mode: - return rv - # it's not safe to `ctx.exit(rv)` here! - # note that `rv` may actually contain data like "1" which - # has obvious effects - # more subtle case: `rv=[None, None]` can come out of - # chained commands which all returned `None` -- so it's not - # even always obvious that `rv` indicates success/failure - # by its truthiness/falsiness - ctx.exit() - except (EOFError, KeyboardInterrupt): - echo(file=sys.stderr) - raise Abort() - except ClickException as e: - if not standalone_mode: - raise - e.show() - sys.exit(e.exit_code) - except IOError as e: - if e.errno == errno.EPIPE: - sys.stdout = PacifyFlushWrapper(sys.stdout) - sys.stderr = PacifyFlushWrapper(sys.stderr) - sys.exit(1) - else: - raise - except Exit as e: - if standalone_mode: - sys.exit(e.exit_code) - else: - # in non-standalone mode, return the exit code - # note that this is only reached if `self.invoke` above raises - # an Exit explicitly -- thus bypassing the check there which - # would return its result - # the results of non-standalone execution may therefore be - # somewhat ambiguous: if there are codepaths which lead to - # `ctx.exit(1)` and to `return 1`, the caller won't be able to - # tell the difference between the two - return e.exit_code - except Abort: - if not standalone_mode: - raise - echo('Aborted!', file=sys.stderr) - sys.exit(1) - - def __call__(self, *args, **kwargs): - """Alias for :meth:`main`.""" - return self.main(*args, **kwargs) - - -class Command(BaseCommand): - """Commands are the basic building block of command line interfaces in - Click. A basic command handles command line parsing and might dispatch - more parsing to commands nested below it. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - :param callback: the callback to invoke. This is optional. - :param params: the parameters to register with this command. This can - be either :class:`Option` or :class:`Argument` objects. - :param help: the help string to use for this command. - :param epilog: like the help string but it's printed at the end of the - help page after everything else. - :param short_help: the short help to use for this command. This is - shown on the command listing of the parent command. - :param add_help_option: by default each command registers a ``--help`` - option. This can be disabled by this parameter. - :param hidden: hide this command from help outputs. - - :param deprecated: issues a message indicating that - the command is deprecated. - """ - - def __init__(self, name, context_settings=None, callback=None, - params=None, help=None, epilog=None, short_help=None, - options_metavar='[OPTIONS]', add_help_option=True, - hidden=False, deprecated=False): - BaseCommand.__init__(self, name, context_settings) - #: the callback to execute when the command fires. This might be - #: `None` in which case nothing happens. - self.callback = callback - #: the list of parameters for this command in the order they - #: should show up in the help page and execute. Eager parameters - #: will automatically be handled before non eager ones. - self.params = params or [] - # if a form feed (page break) is found in the help text, truncate help - # text to the content preceding the first form feed - if help and '\f' in help: - help = help.split('\f', 1)[0] - self.help = help - self.epilog = epilog - self.options_metavar = options_metavar - self.short_help = short_help - self.add_help_option = add_help_option - self.hidden = hidden - self.deprecated = deprecated - - def get_usage(self, ctx): - formatter = ctx.make_formatter() - self.format_usage(ctx, formatter) - return formatter.getvalue().rstrip('\n') - - def get_params(self, ctx): - rv = self.params - help_option = self.get_help_option(ctx) - if help_option is not None: - rv = rv + [help_option] - return rv - - def format_usage(self, ctx, formatter): - """Writes the usage line into the formatter.""" - pieces = self.collect_usage_pieces(ctx) - formatter.write_usage(ctx.command_path, ' '.join(pieces)) - - def collect_usage_pieces(self, ctx): - """Returns all the pieces that go into the usage line and returns - it as a list of strings. - """ - rv = [self.options_metavar] - for param in self.get_params(ctx): - rv.extend(param.get_usage_pieces(ctx)) - return rv - - def get_help_option_names(self, ctx): - """Returns the names for the help option.""" - all_names = set(ctx.help_option_names) - for param in self.params: - all_names.difference_update(param.opts) - all_names.difference_update(param.secondary_opts) - return all_names - - def get_help_option(self, ctx): - """Returns the help option object.""" - help_options = self.get_help_option_names(ctx) - if not help_options or not self.add_help_option: - return - - def show_help(ctx, param, value): - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - return Option(help_options, is_flag=True, - is_eager=True, expose_value=False, - callback=show_help, - help='Show this message and exit.') - - def make_parser(self, ctx): - """Creates the underlying option parser for this command.""" - parser = OptionParser(ctx) - for param in self.get_params(ctx): - param.add_to_parser(parser, ctx) - return parser - - def get_help(self, ctx): - """Formats the help into a string and returns it. This creates a - formatter and will call into the following formatting methods: - """ - formatter = ctx.make_formatter() - self.format_help(ctx, formatter) - return formatter.getvalue().rstrip('\n') - - def get_short_help_str(self, limit=45): - """Gets short help for the command or makes it by shortening the long help string.""" - return self.short_help or self.help and make_default_short_help(self.help, limit) or '' - - def format_help(self, ctx, formatter): - """Writes the help into the formatter if it exists. - - This calls into the following methods: - - - :meth:`format_usage` - - :meth:`format_help_text` - - :meth:`format_options` - - :meth:`format_epilog` - """ - self.format_usage(ctx, formatter) - self.format_help_text(ctx, formatter) - self.format_options(ctx, formatter) - self.format_epilog(ctx, formatter) - - def format_help_text(self, ctx, formatter): - """Writes the help text to the formatter if it exists.""" - if self.help: - formatter.write_paragraph() - with formatter.indentation(): - help_text = self.help - if self.deprecated: - help_text += DEPRECATED_HELP_NOTICE - formatter.write_text(help_text) - elif self.deprecated: - formatter.write_paragraph() - with formatter.indentation(): - formatter.write_text(DEPRECATED_HELP_NOTICE) - - def format_options(self, ctx, formatter): - """Writes all the options into the formatter if they exist.""" - opts = [] - for param in self.get_params(ctx): - rv = param.get_help_record(ctx) - if rv is not None: - opts.append(rv) - - if opts: - with formatter.section('Options'): - formatter.write_dl(opts) - - def format_epilog(self, ctx, formatter): - """Writes the epilog into the formatter if it exists.""" - if self.epilog: - formatter.write_paragraph() - with formatter.indentation(): - formatter.write_text(self.epilog) - - def parse_args(self, ctx, args): - parser = self.make_parser(ctx) - opts, args, param_order = parser.parse_args(args=args) - - for param in iter_params_for_processing( - param_order, self.get_params(ctx)): - value, args = param.handle_parse_result(ctx, opts, args) - - if args and not ctx.allow_extra_args and not ctx.resilient_parsing: - ctx.fail('Got unexpected extra argument%s (%s)' - % (len(args) != 1 and 's' or '', - ' '.join(map(make_str, args)))) - - ctx.args = args - return args - - def invoke(self, ctx): - """Given a context, this invokes the attached callback (if it exists) - in the right way. - """ - _maybe_show_deprecated_notice(self) - if self.callback is not None: - return ctx.invoke(self.callback, **ctx.params) - - -class MultiCommand(Command): - """A multi command is the basic implementation of a command that - dispatches to subcommands. The most common version is the - :class:`Group`. - - :param invoke_without_command: this controls how the multi command itself - is invoked. By default it's only invoked - if a subcommand is provided. - :param no_args_is_help: this controls what happens if no arguments are - provided. This option is enabled by default if - `invoke_without_command` is disabled or disabled - if it's enabled. If enabled this will add - ``--help`` as argument if no arguments are - passed. - :param subcommand_metavar: the string that is used in the documentation - to indicate the subcommand place. - :param chain: if this is set to `True` chaining of multiple subcommands - is enabled. This restricts the form of commands in that - they cannot have optional arguments but it allows - multiple commands to be chained together. - :param result_callback: the result callback to attach to this multi - command. - """ - allow_extra_args = True - allow_interspersed_args = False - - def __init__(self, name=None, invoke_without_command=False, - no_args_is_help=None, subcommand_metavar=None, - chain=False, result_callback=None, **attrs): - Command.__init__(self, name, **attrs) - if no_args_is_help is None: - no_args_is_help = not invoke_without_command - self.no_args_is_help = no_args_is_help - self.invoke_without_command = invoke_without_command - if subcommand_metavar is None: - if chain: - subcommand_metavar = SUBCOMMANDS_METAVAR - else: - subcommand_metavar = SUBCOMMAND_METAVAR - self.subcommand_metavar = subcommand_metavar - self.chain = chain - #: The result callback that is stored. This can be set or - #: overridden with the :func:`resultcallback` decorator. - self.result_callback = result_callback - - if self.chain: - for param in self.params: - if isinstance(param, Argument) and not param.required: - raise RuntimeError('Multi commands in chain mode cannot ' - 'have optional arguments.') - - def collect_usage_pieces(self, ctx): - rv = Command.collect_usage_pieces(self, ctx) - rv.append(self.subcommand_metavar) - return rv - - def format_options(self, ctx, formatter): - Command.format_options(self, ctx, formatter) - self.format_commands(ctx, formatter) - - def resultcallback(self, replace=False): - """Adds a result callback to the chain command. By default if a - result callback is already registered this will chain them but - this can be disabled with the `replace` parameter. The result - callback is invoked with the return value of the subcommand - (or the list of return values from all subcommands if chaining - is enabled) as well as the parameters as they would be passed - to the main callback. - - Example:: - - @click.group() - @click.option('-i', '--input', default=23) - def cli(input): - return 42 - - @cli.resultcallback() - def process_result(result, input): - return result + input - - .. versionadded:: 3.0 - - :param replace: if set to `True` an already existing result - callback will be removed. - """ - def decorator(f): - old_callback = self.result_callback - if old_callback is None or replace: - self.result_callback = f - return f - def function(__value, *args, **kwargs): - return f(old_callback(__value, *args, **kwargs), - *args, **kwargs) - self.result_callback = rv = update_wrapper(function, f) - return rv - return decorator - - def format_commands(self, ctx, formatter): - """Extra format methods for multi methods that adds all the commands - after the options. - """ - commands = [] - for subcommand in self.list_commands(ctx): - cmd = self.get_command(ctx, subcommand) - # What is this, the tool lied about a command. Ignore it - if cmd is None: - continue - if cmd.hidden: - continue - - commands.append((subcommand, cmd)) - - # allow for 3 times the default spacing - if len(commands): - limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) - - rows = [] - for subcommand, cmd in commands: - help = cmd.get_short_help_str(limit) - rows.append((subcommand, help)) - - if rows: - with formatter.section('Commands'): - formatter.write_dl(rows) - - def parse_args(self, ctx, args): - if not args and self.no_args_is_help and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - rest = Command.parse_args(self, ctx, args) - if self.chain: - ctx.protected_args = rest - ctx.args = [] - elif rest: - ctx.protected_args, ctx.args = rest[:1], rest[1:] - - return ctx.args - - def invoke(self, ctx): - def _process_result(value): - if self.result_callback is not None: - value = ctx.invoke(self.result_callback, value, - **ctx.params) - return value - - if not ctx.protected_args: - # If we are invoked without command the chain flag controls - # how this happens. If we are not in chain mode, the return - # value here is the return value of the command. - # If however we are in chain mode, the return value is the - # return value of the result processor invoked with an empty - # list (which means that no subcommand actually was executed). - if self.invoke_without_command: - if not self.chain: - return Command.invoke(self, ctx) - with ctx: - Command.invoke(self, ctx) - return _process_result([]) - ctx.fail('Missing command.') - - # Fetch args back out - args = ctx.protected_args + ctx.args - ctx.args = [] - ctx.protected_args = [] - - # If we're not in chain mode, we only allow the invocation of a - # single command but we also inform the current context about the - # name of the command to invoke. - if not self.chain: - # Make sure the context is entered so we do not clean up - # resources until the result processor has worked. - with ctx: - cmd_name, cmd, args = self.resolve_command(ctx, args) - ctx.invoked_subcommand = cmd_name - Command.invoke(self, ctx) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) - with sub_ctx: - return _process_result(sub_ctx.command.invoke(sub_ctx)) - - # In chain mode we create the contexts step by step, but after the - # base command has been invoked. Because at that point we do not - # know the subcommands yet, the invoked subcommand attribute is - # set to ``*`` to inform the command that subcommands are executed - # but nothing else. - with ctx: - ctx.invoked_subcommand = args and '*' or None - Command.invoke(self, ctx) - - # Otherwise we make every single context and invoke them in a - # chain. In that case the return value to the result processor - # is the list of all invoked subcommand's results. - contexts = [] - while args: - cmd_name, cmd, args = self.resolve_command(ctx, args) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False) - contexts.append(sub_ctx) - args, sub_ctx.args = sub_ctx.args, [] - - rv = [] - for sub_ctx in contexts: - with sub_ctx: - rv.append(sub_ctx.command.invoke(sub_ctx)) - return _process_result(rv) - - def resolve_command(self, ctx, args): - cmd_name = make_str(args[0]) - original_cmd_name = cmd_name - - # Get the command - cmd = self.get_command(ctx, cmd_name) - - # If we can't find the command but there is a normalization - # function available, we try with that one. - if cmd is None and ctx.token_normalize_func is not None: - cmd_name = ctx.token_normalize_func(cmd_name) - cmd = self.get_command(ctx, cmd_name) - - # If we don't find the command we want to show an error message - # to the user that it was not provided. However, there is - # something else we should do: if the first argument looks like - # an option we want to kick off parsing again for arguments to - # resolve things like --help which now should go to the main - # place. - if cmd is None and not ctx.resilient_parsing: - if split_opt(cmd_name)[0]: - self.parse_args(ctx, ctx.args) - ctx.fail('No such command "%s".' % original_cmd_name) - - return cmd_name, cmd, args[1:] - - def get_command(self, ctx, cmd_name): - """Given a context and a command name, this returns a - :class:`Command` object if it exists or returns `None`. - """ - raise NotImplementedError() - - def list_commands(self, ctx): - """Returns a list of subcommand names in the order they should - appear. - """ - return [] - - -class Group(MultiCommand): - """A group allows a command to have subcommands attached. This is the - most common way to implement nesting in Click. - - :param commands: a dictionary of commands. - """ - - def __init__(self, name=None, commands=None, **attrs): - MultiCommand.__init__(self, name, **attrs) - #: the registered subcommands by their exported names. - self.commands = commands or {} - - def add_command(self, cmd, name=None): - """Registers another :class:`Command` with this group. If the name - is not provided, the name of the command is used. - """ - name = name or cmd.name - if name is None: - raise TypeError('Command has no name.') - _check_multicommand(self, name, cmd, register=True) - self.commands[name] = cmd - - def command(self, *args, **kwargs): - """A shortcut decorator for declaring and attaching a command to - the group. This takes the same arguments as :func:`command` but - immediately registers the created command with this instance by - calling into :meth:`add_command`. - """ - def decorator(f): - cmd = command(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - return decorator - - def group(self, *args, **kwargs): - """A shortcut decorator for declaring and attaching a group to - the group. This takes the same arguments as :func:`group` but - immediately registers the created command with this instance by - calling into :meth:`add_command`. - """ - def decorator(f): - cmd = group(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - return decorator - - def get_command(self, ctx, cmd_name): - return self.commands.get(cmd_name) - - def list_commands(self, ctx): - return sorted(self.commands) - - -class CommandCollection(MultiCommand): - """A command collection is a multi command that merges multiple multi - commands together into one. This is a straightforward implementation - that accepts a list of different multi commands as sources and - provides all the commands for each of them. - """ - - def __init__(self, name=None, sources=None, **attrs): - MultiCommand.__init__(self, name, **attrs) - #: The list of registered multi commands. - self.sources = sources or [] - - def add_source(self, multi_cmd): - """Adds a new multi command to the chain dispatcher.""" - self.sources.append(multi_cmd) - - def get_command(self, ctx, cmd_name): - for source in self.sources: - rv = source.get_command(ctx, cmd_name) - if rv is not None: - if self.chain: - _check_multicommand(self, cmd_name, rv) - return rv - - def list_commands(self, ctx): - rv = set() - for source in self.sources: - rv.update(source.list_commands(ctx)) - return sorted(rv) - - -class Parameter(object): - r"""A parameter to a command comes in two versions: they are either - :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently - not supported by design as some of the internals for parsing are - intentionally not finalized. - - Some settings are supported by both options and arguments. - - .. versionchanged:: 2.0 - Changed signature for parameter callback to also be passed the - parameter. In Click 2.0, the old callback format will still work, - but it will raise a warning to give you change to migrate the - code easier. - - :param param_decls: the parameter declarations for this option or - argument. This is a list of flags or argument - names. - :param type: the type that should be used. Either a :class:`ParamType` - or a Python type. The later is converted into the former - automatically if supported. - :param required: controls if this is optional or not. - :param default: the default value if omitted. This can also be a callable, - in which case it's invoked when the default is needed - without any arguments. - :param callback: a callback that should be executed after the parameter - was matched. This is called as ``fn(ctx, param, - value)`` and needs to return the value. Before Click - 2.0, the signature was ``(ctx, value)``. - :param nargs: the number of arguments to match. If not ``1`` the return - value is a tuple instead of single value. The default for - nargs is ``1`` (except if the type is a tuple, then it's - the arity of the tuple). - :param metavar: how the value is represented in the help page. - :param expose_value: if this is `True` then the value is passed onwards - to the command callback and stored on the context, - otherwise it's skipped. - :param is_eager: eager values are processed before non eager ones. This - should not be set for arguments or it will inverse the - order of processing. - :param envvar: a string or list of strings that are environment variables - that should be checked. - """ - param_type_name = 'parameter' - - def __init__(self, param_decls=None, type=None, required=False, - default=None, callback=None, nargs=None, metavar=None, - expose_value=True, is_eager=False, envvar=None, - autocompletion=None): - self.name, self.opts, self.secondary_opts = \ - self._parse_decls(param_decls or (), expose_value) - - self.type = convert_type(type, default) - - # Default nargs to what the type tells us if we have that - # information available. - if nargs is None: - if self.type.is_composite: - nargs = self.type.arity - else: - nargs = 1 - - self.required = required - self.callback = callback - self.nargs = nargs - self.multiple = False - self.expose_value = expose_value - self.default = default - self.is_eager = is_eager - self.metavar = metavar - self.envvar = envvar - self.autocompletion = autocompletion - - @property - def human_readable_name(self): - """Returns the human readable name of this parameter. This is the - same as the name for options, but the metavar for arguments. - """ - return self.name - - def make_metavar(self): - if self.metavar is not None: - return self.metavar - metavar = self.type.get_metavar(self) - if metavar is None: - metavar = self.type.name.upper() - if self.nargs != 1: - metavar += '...' - return metavar - - def get_default(self, ctx): - """Given a context variable this calculates the default value.""" - # Otherwise go with the regular default. - if callable(self.default): - rv = self.default() - else: - rv = self.default - return self.type_cast_value(ctx, rv) - - def add_to_parser(self, parser, ctx): - pass - - def consume_value(self, ctx, opts): - value = opts.get(self.name) - if value is None: - value = self.value_from_envvar(ctx) - if value is None: - value = ctx.lookup_default(self.name) - return value - - def type_cast_value(self, ctx, value): - """Given a value this runs it properly through the type system. - This automatically handles things like `nargs` and `multiple` as - well as composite types. - """ - if self.type.is_composite: - if self.nargs <= 1: - raise TypeError('Attempted to invoke composite type ' - 'but nargs has been set to %s. This is ' - 'not supported; nargs needs to be set to ' - 'a fixed value > 1.' % self.nargs) - if self.multiple: - return tuple(self.type(x or (), self, ctx) for x in value or ()) - return self.type(value or (), self, ctx) - - def _convert(value, level): - if level == 0: - return self.type(value, self, ctx) - return tuple(_convert(x, level - 1) for x in value or ()) - return _convert(value, (self.nargs != 1) + bool(self.multiple)) - - def process_value(self, ctx, value): - """Given a value and context this runs the logic to convert the - value as necessary. - """ - # If the value we were given is None we do nothing. This way - # code that calls this can easily figure out if something was - # not provided. Otherwise it would be converted into an empty - # tuple for multiple invocations which is inconvenient. - if value is not None: - return self.type_cast_value(ctx, value) - - def value_is_missing(self, value): - if value is None: - return True - if (self.nargs != 1 or self.multiple) and value == (): - return True - return False - - def full_process_value(self, ctx, value): - value = self.process_value(ctx, value) - - if value is None and not ctx.resilient_parsing: - value = self.get_default(ctx) - - if self.required and self.value_is_missing(value): - raise MissingParameter(ctx=ctx, param=self) - - return value - - def resolve_envvar_value(self, ctx): - if self.envvar is None: - return - if isinstance(self.envvar, (tuple, list)): - for envvar in self.envvar: - rv = os.environ.get(envvar) - if rv is not None: - return rv - else: - return os.environ.get(self.envvar) - - def value_from_envvar(self, ctx): - rv = self.resolve_envvar_value(ctx) - if rv is not None and self.nargs != 1: - rv = self.type.split_envvar_value(rv) - return rv - - def handle_parse_result(self, ctx, opts, args): - with augment_usage_errors(ctx, param=self): - value = self.consume_value(ctx, opts) - try: - value = self.full_process_value(ctx, value) - except Exception: - if not ctx.resilient_parsing: - raise - value = None - if self.callback is not None: - try: - value = invoke_param_callback( - self.callback, ctx, self, value) - except Exception: - if not ctx.resilient_parsing: - raise - - if self.expose_value: - ctx.params[self.name] = value - return value, args - - def get_help_record(self, ctx): - pass - - def get_usage_pieces(self, ctx): - return [] - - def get_error_hint(self, ctx): - """Get a stringified version of the param for use in error messages to - indicate which param caused the error. - """ - hint_list = self.opts or [self.human_readable_name] - return ' / '.join('"%s"' % x for x in hint_list) - - -class Option(Parameter): - """Options are usually optional values on the command line and - have some extra features that arguments don't have. - - All other parameters are passed onwards to the parameter constructor. - - :param show_default: controls if the default value should be shown on the - help page. Normally, defaults are not shown. If this - value is a string, it shows the string instead of the - value. This is particularly useful for dynamic options. - :param show_envvar: controls if an environment variable should be shown on - the help page. Normally, environment variables - are not shown. - :param prompt: if set to `True` or a non empty string then the user will be - prompted for input. If set to `True` the prompt will be the - option name capitalized. - :param confirmation_prompt: if set then the value will need to be confirmed - if it was prompted for. - :param hide_input: if this is `True` then the input on the prompt will be - hidden from the user. This is useful for password - input. - :param is_flag: forces this option to act as a flag. The default is - auto detection. - :param flag_value: which value should be used for this flag if it's - enabled. This is set to a boolean automatically if - the option string contains a slash to mark two options. - :param multiple: if this is set to `True` then the argument is accepted - multiple times and recorded. This is similar to ``nargs`` - in how it works but supports arbitrary number of - arguments. - :param count: this flag makes an option increment an integer. - :param allow_from_autoenv: if this is enabled then the value of this - parameter will be pulled from an environment - variable in case a prefix is defined on the - context. - :param help: the help string. - :param hidden: hide this option from help outputs. - """ - param_type_name = 'option' - - def __init__(self, param_decls=None, show_default=False, - prompt=False, confirmation_prompt=False, - hide_input=False, is_flag=None, flag_value=None, - multiple=False, count=False, allow_from_autoenv=True, - type=None, help=None, hidden=False, show_choices=True, - show_envvar=False, **attrs): - default_is_missing = attrs.get('default', _missing) is _missing - Parameter.__init__(self, param_decls, type=type, **attrs) - - if prompt is True: - prompt_text = self.name.replace('_', ' ').capitalize() - elif prompt is False: - prompt_text = None - else: - prompt_text = prompt - self.prompt = prompt_text - self.confirmation_prompt = confirmation_prompt - self.hide_input = hide_input - self.hidden = hidden - - # Flags - if is_flag is None: - if flag_value is not None: - is_flag = True - else: - is_flag = bool(self.secondary_opts) - if is_flag and default_is_missing: - self.default = False - if flag_value is None: - flag_value = not self.default - self.is_flag = is_flag - self.flag_value = flag_value - if self.is_flag and isinstance(self.flag_value, bool) \ - and type is None: - self.type = BOOL - self.is_bool_flag = True - else: - self.is_bool_flag = False - - # Counting - self.count = count - if count: - if type is None: - self.type = IntRange(min=0) - if default_is_missing: - self.default = 0 - - self.multiple = multiple - self.allow_from_autoenv = allow_from_autoenv - self.help = help - self.show_default = show_default - self.show_choices = show_choices - self.show_envvar = show_envvar - - # Sanity check for stuff we don't support - if __debug__: - if self.nargs < 0: - raise TypeError('Options cannot have nargs < 0') - if self.prompt and self.is_flag and not self.is_bool_flag: - raise TypeError('Cannot prompt for flags that are not bools.') - if not self.is_bool_flag and self.secondary_opts: - raise TypeError('Got secondary option for non boolean flag.') - if self.is_bool_flag and self.hide_input \ - and self.prompt is not None: - raise TypeError('Hidden input does not work with boolean ' - 'flag prompts.') - if self.count: - if self.multiple: - raise TypeError('Options cannot be multiple and count ' - 'at the same time.') - elif self.is_flag: - raise TypeError('Options cannot be count and flags at ' - 'the same time.') - - def _parse_decls(self, decls, expose_value): - opts = [] - secondary_opts = [] - name = None - possible_names = [] - - for decl in decls: - if isidentifier(decl): - if name is not None: - raise TypeError('Name defined twice') - name = decl - else: - split_char = decl[:1] == '/' and ';' or '/' - if split_char in decl: - first, second = decl.split(split_char, 1) - first = first.rstrip() - if first: - possible_names.append(split_opt(first)) - opts.append(first) - second = second.lstrip() - if second: - secondary_opts.append(second.lstrip()) - else: - possible_names.append(split_opt(decl)) - opts.append(decl) - - if name is None and possible_names: - possible_names.sort(key=lambda x: -len(x[0])) # group long options first - name = possible_names[0][1].replace('-', '_').lower() - if not isidentifier(name): - name = None - - if name is None: - if not expose_value: - return None, opts, secondary_opts - raise TypeError('Could not determine name for option') - - if not opts and not secondary_opts: - raise TypeError('No options defined but a name was passed (%s). ' - 'Did you mean to declare an argument instead ' - 'of an option?' % name) - - return name, opts, secondary_opts - - def add_to_parser(self, parser, ctx): - kwargs = { - 'dest': self.name, - 'nargs': self.nargs, - 'obj': self, - } - - if self.multiple: - action = 'append' - elif self.count: - action = 'count' - else: - action = 'store' - - if self.is_flag: - kwargs.pop('nargs', None) - if self.is_bool_flag and self.secondary_opts: - parser.add_option(self.opts, action=action + '_const', - const=True, **kwargs) - parser.add_option(self.secondary_opts, action=action + - '_const', const=False, **kwargs) - else: - parser.add_option(self.opts, action=action + '_const', - const=self.flag_value, - **kwargs) - else: - kwargs['action'] = action - parser.add_option(self.opts, **kwargs) - - def get_help_record(self, ctx): - if self.hidden: - return - any_prefix_is_slash = [] - - def _write_opts(opts): - rv, any_slashes = join_options(opts) - if any_slashes: - any_prefix_is_slash[:] = [True] - if not self.is_flag and not self.count: - rv += ' ' + self.make_metavar() - return rv - - rv = [_write_opts(self.opts)] - if self.secondary_opts: - rv.append(_write_opts(self.secondary_opts)) - - help = self.help or '' - extra = [] - if self.show_envvar: - envvar = self.envvar - if envvar is None: - if self.allow_from_autoenv and \ - ctx.auto_envvar_prefix is not None: - envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper()) - if envvar is not None: - extra.append('env var: %s' % ( - ', '.join('%s' % d for d in envvar) - if isinstance(envvar, (list, tuple)) - else envvar, )) - if self.default is not None and self.show_default: - if isinstance(self.show_default, string_types): - default_string = '({})'.format(self.show_default) - elif isinstance(self.default, (list, tuple)): - default_string = ', '.join('%s' % d for d in self.default) - elif inspect.isfunction(self.default): - default_string = "(dynamic)" - else: - default_string = self.default - extra.append('default: {}'.format(default_string)) - - if self.required: - extra.append('required') - if extra: - help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra)) - - return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help) - - def get_default(self, ctx): - # If we're a non boolean flag out default is more complex because - # we need to look at all flags in the same group to figure out - # if we're the the default one in which case we return the flag - # value as default. - if self.is_flag and not self.is_bool_flag: - for param in ctx.command.params: - if param.name == self.name and param.default: - return param.flag_value - return None - return Parameter.get_default(self, ctx) - - def prompt_for_value(self, ctx): - """This is an alternative flow that can be activated in the full - value processing if a value does not exist. It will prompt the - user until a valid value exists and then returns the processed - value as result. - """ - # Calculate the default before prompting anything to be stable. - default = self.get_default(ctx) - - # If this is a prompt for a flag we need to handle this - # differently. - if self.is_bool_flag: - return confirm(self.prompt, default) - - return prompt(self.prompt, default=default, type=self.type, - hide_input=self.hide_input, show_choices=self.show_choices, - confirmation_prompt=self.confirmation_prompt, - value_proc=lambda x: self.process_value(ctx, x)) - - def resolve_envvar_value(self, ctx): - rv = Parameter.resolve_envvar_value(self, ctx) - if rv is not None: - return rv - if self.allow_from_autoenv and \ - ctx.auto_envvar_prefix is not None: - envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper()) - return os.environ.get(envvar) - - def value_from_envvar(self, ctx): - rv = self.resolve_envvar_value(ctx) - if rv is None: - return None - value_depth = (self.nargs != 1) + bool(self.multiple) - if value_depth > 0 and rv is not None: - rv = self.type.split_envvar_value(rv) - if self.multiple and self.nargs != 1: - rv = batch(rv, self.nargs) - return rv - - def full_process_value(self, ctx, value): - if value is None and self.prompt is not None \ - and not ctx.resilient_parsing: - return self.prompt_for_value(ctx) - return Parameter.full_process_value(self, ctx, value) - - -class Argument(Parameter): - """Arguments are positional parameters to a command. They generally - provide fewer features than options but can have infinite ``nargs`` - and are required by default. - - All parameters are passed onwards to the parameter constructor. - """ - param_type_name = 'argument' - - def __init__(self, param_decls, required=None, **attrs): - if required is None: - if attrs.get('default') is not None: - required = False - else: - required = attrs.get('nargs', 1) > 0 - Parameter.__init__(self, param_decls, required=required, **attrs) - if self.default is not None and self.nargs < 0: - raise TypeError('nargs=-1 in combination with a default value ' - 'is not supported.') - - @property - def human_readable_name(self): - if self.metavar is not None: - return self.metavar - return self.name.upper() - - def make_metavar(self): - if self.metavar is not None: - return self.metavar - var = self.type.get_metavar(self) - if not var: - var = self.name.upper() - if not self.required: - var = '[%s]' % var - if self.nargs != 1: - var += '...' - return var - - def _parse_decls(self, decls, expose_value): - if not decls: - if not expose_value: - return None, [], [] - raise TypeError('Could not determine name for argument') - if len(decls) == 1: - name = arg = decls[0] - name = name.replace('-', '_').lower() - else: - raise TypeError('Arguments take exactly one ' - 'parameter declaration, got %d' % len(decls)) - return name, [arg], [] - - def get_usage_pieces(self, ctx): - return [self.make_metavar()] - - def get_error_hint(self, ctx): - return '"%s"' % self.make_metavar() - - def add_to_parser(self, parser, ctx): - parser.add_argument(dest=self.name, nargs=self.nargs, - obj=self) - - -# Circular dependency between decorators and core -from .decorators import command, group diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/decorators.py b/flo-token-explorer/lib/python3.6/site-packages/click/decorators.py deleted file mode 100644 index c57c530..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/decorators.py +++ /dev/null @@ -1,311 +0,0 @@ -import sys -import inspect - -from functools import update_wrapper - -from ._compat import iteritems -from ._unicodefun import _check_for_unicode_literals -from .utils import echo -from .globals import get_current_context - - -def pass_context(f): - """Marks a callback as wanting to receive the current context - object as first argument. - """ - def new_func(*args, **kwargs): - return f(get_current_context(), *args, **kwargs) - return update_wrapper(new_func, f) - - -def pass_obj(f): - """Similar to :func:`pass_context`, but only pass the object on the - context onwards (:attr:`Context.obj`). This is useful if that object - represents the state of a nested system. - """ - def new_func(*args, **kwargs): - return f(get_current_context().obj, *args, **kwargs) - return update_wrapper(new_func, f) - - -def make_pass_decorator(object_type, ensure=False): - """Given an object type this creates a decorator that will work - similar to :func:`pass_obj` but instead of passing the object of the - current context, it will find the innermost context of type - :func:`object_type`. - - This generates a decorator that works roughly like this:: - - from functools import update_wrapper - - def decorator(f): - @pass_context - def new_func(ctx, *args, **kwargs): - obj = ctx.find_object(object_type) - return ctx.invoke(f, obj, *args, **kwargs) - return update_wrapper(new_func, f) - return decorator - - :param object_type: the type of the object to pass. - :param ensure: if set to `True`, a new object will be created and - remembered on the context if it's not there yet. - """ - def decorator(f): - def new_func(*args, **kwargs): - ctx = get_current_context() - if ensure: - obj = ctx.ensure_object(object_type) - else: - obj = ctx.find_object(object_type) - if obj is None: - raise RuntimeError('Managed to invoke callback without a ' - 'context object of type %r existing' - % object_type.__name__) - return ctx.invoke(f, obj, *args, **kwargs) - return update_wrapper(new_func, f) - return decorator - - -def _make_command(f, name, attrs, cls): - if isinstance(f, Command): - raise TypeError('Attempted to convert a callback into a ' - 'command twice.') - try: - params = f.__click_params__ - params.reverse() - del f.__click_params__ - except AttributeError: - params = [] - help = attrs.get('help') - if help is None: - help = inspect.getdoc(f) - if isinstance(help, bytes): - help = help.decode('utf-8') - else: - help = inspect.cleandoc(help) - attrs['help'] = help - _check_for_unicode_literals() - return cls(name=name or f.__name__.lower().replace('_', '-'), - callback=f, params=params, **attrs) - - -def command(name=None, cls=None, **attrs): - r"""Creates a new :class:`Command` and uses the decorated function as - callback. This will also automatically attach all decorated - :func:`option`\s and :func:`argument`\s as parameters to the command. - - The name of the command defaults to the name of the function. If you - want to change that, you can pass the intended name as the first - argument. - - All keyword arguments are forwarded to the underlying command class. - - Once decorated the function turns into a :class:`Command` instance - that can be invoked as a command line utility or be attached to a - command :class:`Group`. - - :param name: the name of the command. This defaults to the function - name with underscores replaced by dashes. - :param cls: the command class to instantiate. This defaults to - :class:`Command`. - """ - if cls is None: - cls = Command - def decorator(f): - cmd = _make_command(f, name, attrs, cls) - cmd.__doc__ = f.__doc__ - return cmd - return decorator - - -def group(name=None, **attrs): - """Creates a new :class:`Group` with a function as callback. This - works otherwise the same as :func:`command` just that the `cls` - parameter is set to :class:`Group`. - """ - attrs.setdefault('cls', Group) - return command(name, **attrs) - - -def _param_memo(f, param): - if isinstance(f, Command): - f.params.append(param) - else: - if not hasattr(f, '__click_params__'): - f.__click_params__ = [] - f.__click_params__.append(param) - - -def argument(*param_decls, **attrs): - """Attaches an argument to the command. All positional arguments are - passed as parameter declarations to :class:`Argument`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Argument` instance manually - and attaching it to the :attr:`Command.params` list. - - :param cls: the argument class to instantiate. This defaults to - :class:`Argument`. - """ - def decorator(f): - ArgumentClass = attrs.pop('cls', Argument) - _param_memo(f, ArgumentClass(param_decls, **attrs)) - return f - return decorator - - -def option(*param_decls, **attrs): - """Attaches an option to the command. All positional arguments are - passed as parameter declarations to :class:`Option`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Option` instance manually - and attaching it to the :attr:`Command.params` list. - - :param cls: the option class to instantiate. This defaults to - :class:`Option`. - """ - def decorator(f): - # Issue 926, copy attrs, so pre-defined options can re-use the same cls= - option_attrs = attrs.copy() - - if 'help' in option_attrs: - option_attrs['help'] = inspect.cleandoc(option_attrs['help']) - OptionClass = option_attrs.pop('cls', Option) - _param_memo(f, OptionClass(param_decls, **option_attrs)) - return f - return decorator - - -def confirmation_option(*param_decls, **attrs): - """Shortcut for confirmation prompts that can be ignored by passing - ``--yes`` as parameter. - - This is equivalent to decorating a function with :func:`option` with - the following parameters:: - - def callback(ctx, param, value): - if not value: - ctx.abort() - - @click.command() - @click.option('--yes', is_flag=True, callback=callback, - expose_value=False, prompt='Do you want to continue?') - def dropdb(): - pass - """ - def decorator(f): - def callback(ctx, param, value): - if not value: - ctx.abort() - attrs.setdefault('is_flag', True) - attrs.setdefault('callback', callback) - attrs.setdefault('expose_value', False) - attrs.setdefault('prompt', 'Do you want to continue?') - attrs.setdefault('help', 'Confirm the action without prompting.') - return option(*(param_decls or ('--yes',)), **attrs)(f) - return decorator - - -def password_option(*param_decls, **attrs): - """Shortcut for password prompts. - - This is equivalent to decorating a function with :func:`option` with - the following parameters:: - - @click.command() - @click.option('--password', prompt=True, confirmation_prompt=True, - hide_input=True) - def changeadmin(password): - pass - """ - def decorator(f): - attrs.setdefault('prompt', True) - attrs.setdefault('confirmation_prompt', True) - attrs.setdefault('hide_input', True) - return option(*(param_decls or ('--password',)), **attrs)(f) - return decorator - - -def version_option(version=None, *param_decls, **attrs): - """Adds a ``--version`` option which immediately ends the program - printing out the version number. This is implemented as an eager - option that prints the version and exits the program in the callback. - - :param version: the version number to show. If not provided Click - attempts an auto discovery via setuptools. - :param prog_name: the name of the program (defaults to autodetection) - :param message: custom message to show instead of the default - (``'%(prog)s, version %(version)s'``) - :param others: everything else is forwarded to :func:`option`. - """ - if version is None: - if hasattr(sys, '_getframe'): - module = sys._getframe(1).f_globals.get('__name__') - else: - module = '' - - def decorator(f): - prog_name = attrs.pop('prog_name', None) - message = attrs.pop('message', '%(prog)s, version %(version)s') - - def callback(ctx, param, value): - if not value or ctx.resilient_parsing: - return - prog = prog_name - if prog is None: - prog = ctx.find_root().info_name - ver = version - if ver is None: - try: - import pkg_resources - except ImportError: - pass - else: - for dist in pkg_resources.working_set: - scripts = dist.get_entry_map().get('console_scripts') or {} - for script_name, entry_point in iteritems(scripts): - if entry_point.module_name == module: - ver = dist.version - break - if ver is None: - raise RuntimeError('Could not determine version') - echo(message % { - 'prog': prog, - 'version': ver, - }, color=ctx.color) - ctx.exit() - - attrs.setdefault('is_flag', True) - attrs.setdefault('expose_value', False) - attrs.setdefault('is_eager', True) - attrs.setdefault('help', 'Show the version and exit.') - attrs['callback'] = callback - return option(*(param_decls or ('--version',)), **attrs)(f) - return decorator - - -def help_option(*param_decls, **attrs): - """Adds a ``--help`` option which immediately ends the program - printing out the help page. This is usually unnecessary to add as - this is added by default to all commands unless suppressed. - - Like :func:`version_option`, this is implemented as eager option that - prints in the callback and exits. - - All arguments are forwarded to :func:`option`. - """ - def decorator(f): - def callback(ctx, param, value): - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - attrs.setdefault('is_flag', True) - attrs.setdefault('expose_value', False) - attrs.setdefault('help', 'Show this message and exit.') - attrs.setdefault('is_eager', True) - attrs['callback'] = callback - return option(*(param_decls or ('--help',)), **attrs)(f) - return decorator - - -# Circular dependencies between core and decorators -from .core import Command, Group, Argument, Option diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/exceptions.py b/flo-token-explorer/lib/python3.6/site-packages/click/exceptions.py deleted file mode 100644 index 6fa1765..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/exceptions.py +++ /dev/null @@ -1,235 +0,0 @@ -from ._compat import PY2, filename_to_ui, get_text_stderr -from .utils import echo - - -def _join_param_hints(param_hint): - if isinstance(param_hint, (tuple, list)): - return ' / '.join('"%s"' % x for x in param_hint) - return param_hint - - -class ClickException(Exception): - """An exception that Click can handle and show to the user.""" - - #: The exit code for this exception - exit_code = 1 - - def __init__(self, message): - ctor_msg = message - if PY2: - if ctor_msg is not None: - ctor_msg = ctor_msg.encode('utf-8') - Exception.__init__(self, ctor_msg) - self.message = message - - def format_message(self): - return self.message - - def __str__(self): - return self.message - - if PY2: - __unicode__ = __str__ - - def __str__(self): - return self.message.encode('utf-8') - - def show(self, file=None): - if file is None: - file = get_text_stderr() - echo('Error: %s' % self.format_message(), file=file) - - -class UsageError(ClickException): - """An internal exception that signals a usage error. This typically - aborts any further handling. - - :param message: the error message to display. - :param ctx: optionally the context that caused this error. Click will - fill in the context automatically in some situations. - """ - exit_code = 2 - - def __init__(self, message, ctx=None): - ClickException.__init__(self, message) - self.ctx = ctx - self.cmd = self.ctx and self.ctx.command or None - - def show(self, file=None): - if file is None: - file = get_text_stderr() - color = None - hint = '' - if (self.cmd is not None and - self.cmd.get_help_option(self.ctx) is not None): - hint = ('Try "%s %s" for help.\n' - % (self.ctx.command_path, self.ctx.help_option_names[0])) - if self.ctx is not None: - color = self.ctx.color - echo(self.ctx.get_usage() + '\n%s' % hint, file=file, color=color) - echo('Error: %s' % self.format_message(), file=file, color=color) - - -class BadParameter(UsageError): - """An exception that formats out a standardized error message for a - bad parameter. This is useful when thrown from a callback or type as - Click will attach contextual information to it (for instance, which - parameter it is). - - .. versionadded:: 2.0 - - :param param: the parameter object that caused this error. This can - be left out, and Click will attach this info itself - if possible. - :param param_hint: a string that shows up as parameter name. This - can be used as alternative to `param` in cases - where custom validation should happen. If it is - a string it's used as such, if it's a list then - each item is quoted and separated. - """ - - def __init__(self, message, ctx=None, param=None, - param_hint=None): - UsageError.__init__(self, message, ctx) - self.param = param - self.param_hint = param_hint - - def format_message(self): - if self.param_hint is not None: - param_hint = self.param_hint - elif self.param is not None: - param_hint = self.param.get_error_hint(self.ctx) - else: - return 'Invalid value: %s' % self.message - param_hint = _join_param_hints(param_hint) - - return 'Invalid value for %s: %s' % (param_hint, self.message) - - -class MissingParameter(BadParameter): - """Raised if click required an option or argument but it was not - provided when invoking the script. - - .. versionadded:: 4.0 - - :param param_type: a string that indicates the type of the parameter. - The default is to inherit the parameter type from - the given `param`. Valid values are ``'parameter'``, - ``'option'`` or ``'argument'``. - """ - - def __init__(self, message=None, ctx=None, param=None, - param_hint=None, param_type=None): - BadParameter.__init__(self, message, ctx, param, param_hint) - self.param_type = param_type - - def format_message(self): - if self.param_hint is not None: - param_hint = self.param_hint - elif self.param is not None: - param_hint = self.param.get_error_hint(self.ctx) - else: - param_hint = None - param_hint = _join_param_hints(param_hint) - - param_type = self.param_type - if param_type is None and self.param is not None: - param_type = self.param.param_type_name - - msg = self.message - if self.param is not None: - msg_extra = self.param.type.get_missing_message(self.param) - if msg_extra: - if msg: - msg += '. ' + msg_extra - else: - msg = msg_extra - - return 'Missing %s%s%s%s' % ( - param_type, - param_hint and ' %s' % param_hint or '', - msg and '. ' or '.', - msg or '', - ) - - -class NoSuchOption(UsageError): - """Raised if click attempted to handle an option that does not - exist. - - .. versionadded:: 4.0 - """ - - def __init__(self, option_name, message=None, possibilities=None, - ctx=None): - if message is None: - message = 'no such option: %s' % option_name - UsageError.__init__(self, message, ctx) - self.option_name = option_name - self.possibilities = possibilities - - def format_message(self): - bits = [self.message] - if self.possibilities: - if len(self.possibilities) == 1: - bits.append('Did you mean %s?' % self.possibilities[0]) - else: - possibilities = sorted(self.possibilities) - bits.append('(Possible options: %s)' % ', '.join(possibilities)) - return ' '.join(bits) - - -class BadOptionUsage(UsageError): - """Raised if an option is generally supplied but the use of the option - was incorrect. This is for instance raised if the number of arguments - for an option is not correct. - - .. versionadded:: 4.0 - - :param option_name: the name of the option being used incorrectly. - """ - - def __init__(self, option_name, message, ctx=None): - UsageError.__init__(self, message, ctx) - self.option_name = option_name - - -class BadArgumentUsage(UsageError): - """Raised if an argument is generally supplied but the use of the argument - was incorrect. This is for instance raised if the number of values - for an argument is not correct. - - .. versionadded:: 6.0 - """ - - def __init__(self, message, ctx=None): - UsageError.__init__(self, message, ctx) - - -class FileError(ClickException): - """Raised if a file cannot be opened.""" - - def __init__(self, filename, hint=None): - ui_filename = filename_to_ui(filename) - if hint is None: - hint = 'unknown error' - ClickException.__init__(self, hint) - self.ui_filename = ui_filename - self.filename = filename - - def format_message(self): - return 'Could not open file %s: %s' % (self.ui_filename, self.message) - - -class Abort(RuntimeError): - """An internal signalling exception that signals Click to abort.""" - - -class Exit(RuntimeError): - """An exception that indicates that the application should exit with some - status code. - - :param code: the status code to exit with. - """ - def __init__(self, code=0): - self.exit_code = code diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/formatting.py b/flo-token-explorer/lib/python3.6/site-packages/click/formatting.py deleted file mode 100644 index a3d6a4d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/formatting.py +++ /dev/null @@ -1,256 +0,0 @@ -from contextlib import contextmanager -from .termui import get_terminal_size -from .parser import split_opt -from ._compat import term_len - - -# Can force a width. This is used by the test system -FORCED_WIDTH = None - - -def measure_table(rows): - widths = {} - for row in rows: - for idx, col in enumerate(row): - widths[idx] = max(widths.get(idx, 0), term_len(col)) - return tuple(y for x, y in sorted(widths.items())) - - -def iter_rows(rows, col_count): - for row in rows: - row = tuple(row) - yield row + ('',) * (col_count - len(row)) - - -def wrap_text(text, width=78, initial_indent='', subsequent_indent='', - preserve_paragraphs=False): - """A helper function that intelligently wraps text. By default, it - assumes that it operates on a single paragraph of text but if the - `preserve_paragraphs` parameter is provided it will intelligently - handle paragraphs (defined by two empty lines). - - If paragraphs are handled, a paragraph can be prefixed with an empty - line containing the ``\\b`` character (``\\x08``) to indicate that - no rewrapping should happen in that block. - - :param text: the text that should be rewrapped. - :param width: the maximum width for the text. - :param initial_indent: the initial indent that should be placed on the - first line as a string. - :param subsequent_indent: the indent string that should be placed on - each consecutive line. - :param preserve_paragraphs: if this flag is set then the wrapping will - intelligently handle paragraphs. - """ - from ._textwrap import TextWrapper - text = text.expandtabs() - wrapper = TextWrapper(width, initial_indent=initial_indent, - subsequent_indent=subsequent_indent, - replace_whitespace=False) - if not preserve_paragraphs: - return wrapper.fill(text) - - p = [] - buf = [] - indent = None - - def _flush_par(): - if not buf: - return - if buf[0].strip() == '\b': - p.append((indent or 0, True, '\n'.join(buf[1:]))) - else: - p.append((indent or 0, False, ' '.join(buf))) - del buf[:] - - for line in text.splitlines(): - if not line: - _flush_par() - indent = None - else: - if indent is None: - orig_len = term_len(line) - line = line.lstrip() - indent = orig_len - term_len(line) - buf.append(line) - _flush_par() - - rv = [] - for indent, raw, text in p: - with wrapper.extra_indent(' ' * indent): - if raw: - rv.append(wrapper.indent_only(text)) - else: - rv.append(wrapper.fill(text)) - - return '\n\n'.join(rv) - - -class HelpFormatter(object): - """This class helps with formatting text-based help pages. It's - usually just needed for very special internal cases, but it's also - exposed so that developers can write their own fancy outputs. - - At present, it always writes into memory. - - :param indent_increment: the additional increment for each level. - :param width: the width for the text. This defaults to the terminal - width clamped to a maximum of 78. - """ - - def __init__(self, indent_increment=2, width=None, max_width=None): - self.indent_increment = indent_increment - if max_width is None: - max_width = 80 - if width is None: - width = FORCED_WIDTH - if width is None: - width = max(min(get_terminal_size()[0], max_width) - 2, 50) - self.width = width - self.current_indent = 0 - self.buffer = [] - - def write(self, string): - """Writes a unicode string into the internal buffer.""" - self.buffer.append(string) - - def indent(self): - """Increases the indentation.""" - self.current_indent += self.indent_increment - - def dedent(self): - """Decreases the indentation.""" - self.current_indent -= self.indent_increment - - def write_usage(self, prog, args='', prefix='Usage: '): - """Writes a usage line into the buffer. - - :param prog: the program name. - :param args: whitespace separated list of arguments. - :param prefix: the prefix for the first line. - """ - usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog) - text_width = self.width - self.current_indent - - if text_width >= (term_len(usage_prefix) + 20): - # The arguments will fit to the right of the prefix. - indent = ' ' * term_len(usage_prefix) - self.write(wrap_text(args, text_width, - initial_indent=usage_prefix, - subsequent_indent=indent)) - else: - # The prefix is too long, put the arguments on the next line. - self.write(usage_prefix) - self.write('\n') - indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4) - self.write(wrap_text(args, text_width, - initial_indent=indent, - subsequent_indent=indent)) - - self.write('\n') - - def write_heading(self, heading): - """Writes a heading into the buffer.""" - self.write('%*s%s:\n' % (self.current_indent, '', heading)) - - def write_paragraph(self): - """Writes a paragraph into the buffer.""" - if self.buffer: - self.write('\n') - - def write_text(self, text): - """Writes re-indented text into the buffer. This rewraps and - preserves paragraphs. - """ - text_width = max(self.width - self.current_indent, 11) - indent = ' ' * self.current_indent - self.write(wrap_text(text, text_width, - initial_indent=indent, - subsequent_indent=indent, - preserve_paragraphs=True)) - self.write('\n') - - def write_dl(self, rows, col_max=30, col_spacing=2): - """Writes a definition list into the buffer. This is how options - and commands are usually formatted. - - :param rows: a list of two item tuples for the terms and values. - :param col_max: the maximum width of the first column. - :param col_spacing: the number of spaces between the first and - second column. - """ - rows = list(rows) - widths = measure_table(rows) - if len(widths) != 2: - raise TypeError('Expected two columns for definition list') - - first_col = min(widths[0], col_max) + col_spacing - - for first, second in iter_rows(rows, len(widths)): - self.write('%*s%s' % (self.current_indent, '', first)) - if not second: - self.write('\n') - continue - if term_len(first) <= first_col - col_spacing: - self.write(' ' * (first_col - term_len(first))) - else: - self.write('\n') - self.write(' ' * (first_col + self.current_indent)) - - text_width = max(self.width - first_col - 2, 10) - lines = iter(wrap_text(second, text_width).splitlines()) - if lines: - self.write(next(lines) + '\n') - for line in lines: - self.write('%*s%s\n' % ( - first_col + self.current_indent, '', line)) - else: - self.write('\n') - - @contextmanager - def section(self, name): - """Helpful context manager that writes a paragraph, a heading, - and the indents. - - :param name: the section name that is written as heading. - """ - self.write_paragraph() - self.write_heading(name) - self.indent() - try: - yield - finally: - self.dedent() - - @contextmanager - def indentation(self): - """A context manager that increases the indentation.""" - self.indent() - try: - yield - finally: - self.dedent() - - def getvalue(self): - """Returns the buffer contents.""" - return ''.join(self.buffer) - - -def join_options(options): - """Given a list of option strings this joins them in the most appropriate - way and returns them in the form ``(formatted_string, - any_prefix_is_slash)`` where the second item in the tuple is a flag that - indicates if any of the option prefixes was a slash. - """ - rv = [] - any_prefix_is_slash = False - for opt in options: - prefix = split_opt(opt)[0] - if prefix == '/': - any_prefix_is_slash = True - rv.append((len(prefix), opt)) - - rv.sort(key=lambda x: x[0]) - - rv = ', '.join(x[1] for x in rv) - return rv, any_prefix_is_slash diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/globals.py b/flo-token-explorer/lib/python3.6/site-packages/click/globals.py deleted file mode 100644 index 843b594..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/globals.py +++ /dev/null @@ -1,48 +0,0 @@ -from threading import local - - -_local = local() - - -def get_current_context(silent=False): - """Returns the current click context. This can be used as a way to - access the current context object from anywhere. This is a more implicit - alternative to the :func:`pass_context` decorator. This function is - primarily useful for helpers such as :func:`echo` which might be - interested in changing its behavior based on the current context. - - To push the current context, :meth:`Context.scope` can be used. - - .. versionadded:: 5.0 - - :param silent: is set to `True` the return value is `None` if no context - is available. The default behavior is to raise a - :exc:`RuntimeError`. - """ - try: - return getattr(_local, 'stack')[-1] - except (AttributeError, IndexError): - if not silent: - raise RuntimeError('There is no active click context.') - - -def push_context(ctx): - """Pushes a new context to the current stack.""" - _local.__dict__.setdefault('stack', []).append(ctx) - - -def pop_context(): - """Removes the top level from the stack.""" - _local.stack.pop() - - -def resolve_color_default(color=None): - """"Internal helper to get the default value of the color flag. If a - value is passed it's returned unchanged, otherwise it's looked up from - the current context. - """ - if color is not None: - return color - ctx = get_current_context(silent=True) - if ctx is not None: - return ctx.color diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/parser.py b/flo-token-explorer/lib/python3.6/site-packages/click/parser.py deleted file mode 100644 index 1c3ae9c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/parser.py +++ /dev/null @@ -1,427 +0,0 @@ -# -*- coding: utf-8 -*- -""" -click.parser -~~~~~~~~~~~~ - -This module started out as largely a copy paste from the stdlib's -optparse module with the features removed that we do not need from -optparse because we implement them in Click on a higher level (for -instance type handling, help formatting and a lot more). - -The plan is to remove more and more from here over time. - -The reason this is a different module and not optparse from the stdlib -is that there are differences in 2.x and 3.x about the error messages -generated and optparse in the stdlib uses gettext for no good reason -and might cause us issues. -""" - -import re -from collections import deque -from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \ - BadArgumentUsage - - -def _unpack_args(args, nargs_spec): - """Given an iterable of arguments and an iterable of nargs specifications, - it returns a tuple with all the unpacked arguments at the first index - and all remaining arguments as the second. - - The nargs specification is the number of arguments that should be consumed - or `-1` to indicate that this position should eat up all the remainders. - - Missing items are filled with `None`. - """ - args = deque(args) - nargs_spec = deque(nargs_spec) - rv = [] - spos = None - - def _fetch(c): - try: - if spos is None: - return c.popleft() - else: - return c.pop() - except IndexError: - return None - - while nargs_spec: - nargs = _fetch(nargs_spec) - if nargs == 1: - rv.append(_fetch(args)) - elif nargs > 1: - x = [_fetch(args) for _ in range(nargs)] - # If we're reversed, we're pulling in the arguments in reverse, - # so we need to turn them around. - if spos is not None: - x.reverse() - rv.append(tuple(x)) - elif nargs < 0: - if spos is not None: - raise TypeError('Cannot have two nargs < 0') - spos = len(rv) - rv.append(None) - - # spos is the position of the wildcard (star). If it's not `None`, - # we fill it with the remainder. - if spos is not None: - rv[spos] = tuple(args) - args = [] - rv[spos + 1:] = reversed(rv[spos + 1:]) - - return tuple(rv), list(args) - - -def _error_opt_args(nargs, opt): - if nargs == 1: - raise BadOptionUsage(opt, '%s option requires an argument' % opt) - raise BadOptionUsage(opt, '%s option requires %d arguments' % (opt, nargs)) - - -def split_opt(opt): - first = opt[:1] - if first.isalnum(): - return '', opt - if opt[1:2] == first: - return opt[:2], opt[2:] - return first, opt[1:] - - -def normalize_opt(opt, ctx): - if ctx is None or ctx.token_normalize_func is None: - return opt - prefix, opt = split_opt(opt) - return prefix + ctx.token_normalize_func(opt) - - -def split_arg_string(string): - """Given an argument string this attempts to split it into small parts.""" - rv = [] - for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'" - r'|"([^"\\]*(?:\\.[^"\\]*)*)"' - r'|\S+)\s*', string, re.S): - arg = match.group().strip() - if arg[:1] == arg[-1:] and arg[:1] in '"\'': - arg = arg[1:-1].encode('ascii', 'backslashreplace') \ - .decode('unicode-escape') - try: - arg = type(string)(arg) - except UnicodeError: - pass - rv.append(arg) - return rv - - -class Option(object): - - def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): - self._short_opts = [] - self._long_opts = [] - self.prefixes = set() - - for opt in opts: - prefix, value = split_opt(opt) - if not prefix: - raise ValueError('Invalid start character for option (%s)' - % opt) - self.prefixes.add(prefix[0]) - if len(prefix) == 1 and len(value) == 1: - self._short_opts.append(opt) - else: - self._long_opts.append(opt) - self.prefixes.add(prefix) - - if action is None: - action = 'store' - - self.dest = dest - self.action = action - self.nargs = nargs - self.const = const - self.obj = obj - - @property - def takes_value(self): - return self.action in ('store', 'append') - - def process(self, value, state): - if self.action == 'store': - state.opts[self.dest] = value - elif self.action == 'store_const': - state.opts[self.dest] = self.const - elif self.action == 'append': - state.opts.setdefault(self.dest, []).append(value) - elif self.action == 'append_const': - state.opts.setdefault(self.dest, []).append(self.const) - elif self.action == 'count': - state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 - else: - raise ValueError('unknown action %r' % self.action) - state.order.append(self.obj) - - -class Argument(object): - - def __init__(self, dest, nargs=1, obj=None): - self.dest = dest - self.nargs = nargs - self.obj = obj - - def process(self, value, state): - if self.nargs > 1: - holes = sum(1 for x in value if x is None) - if holes == len(value): - value = None - elif holes != 0: - raise BadArgumentUsage('argument %s takes %d values' - % (self.dest, self.nargs)) - state.opts[self.dest] = value - state.order.append(self.obj) - - -class ParsingState(object): - - def __init__(self, rargs): - self.opts = {} - self.largs = [] - self.rargs = rargs - self.order = [] - - -class OptionParser(object): - """The option parser is an internal class that is ultimately used to - parse options and arguments. It's modelled after optparse and brings - a similar but vastly simplified API. It should generally not be used - directly as the high level Click classes wrap it for you. - - It's not nearly as extensible as optparse or argparse as it does not - implement features that are implemented on a higher level (such as - types or defaults). - - :param ctx: optionally the :class:`~click.Context` where this parser - should go with. - """ - - def __init__(self, ctx=None): - #: The :class:`~click.Context` for this parser. This might be - #: `None` for some advanced use cases. - self.ctx = ctx - #: This controls how the parser deals with interspersed arguments. - #: If this is set to `False`, the parser will stop on the first - #: non-option. Click uses this to implement nested subcommands - #: safely. - self.allow_interspersed_args = True - #: This tells the parser how to deal with unknown options. By - #: default it will error out (which is sensible), but there is a - #: second mode where it will ignore it and continue processing - #: after shifting all the unknown options into the resulting args. - self.ignore_unknown_options = False - if ctx is not None: - self.allow_interspersed_args = ctx.allow_interspersed_args - self.ignore_unknown_options = ctx.ignore_unknown_options - self._short_opt = {} - self._long_opt = {} - self._opt_prefixes = set(['-', '--']) - self._args = [] - - def add_option(self, opts, dest, action=None, nargs=1, const=None, - obj=None): - """Adds a new option named `dest` to the parser. The destination - is not inferred (unlike with optparse) and needs to be explicitly - provided. Action can be any of ``store``, ``store_const``, - ``append``, ``appnd_const`` or ``count``. - - The `obj` can be used to identify the option in the order list - that is returned from the parser. - """ - if obj is None: - obj = dest - opts = [normalize_opt(opt, self.ctx) for opt in opts] - option = Option(opts, dest, action=action, nargs=nargs, - const=const, obj=obj) - self._opt_prefixes.update(option.prefixes) - for opt in option._short_opts: - self._short_opt[opt] = option - for opt in option._long_opts: - self._long_opt[opt] = option - - def add_argument(self, dest, nargs=1, obj=None): - """Adds a positional argument named `dest` to the parser. - - The `obj` can be used to identify the option in the order list - that is returned from the parser. - """ - if obj is None: - obj = dest - self._args.append(Argument(dest=dest, nargs=nargs, obj=obj)) - - def parse_args(self, args): - """Parses positional arguments and returns ``(values, args, order)`` - for the parsed options and arguments as well as the leftover - arguments if there are any. The order is a list of objects as they - appear on the command line. If arguments appear multiple times they - will be memorized multiple times as well. - """ - state = ParsingState(args) - try: - self._process_args_for_options(state) - self._process_args_for_args(state) - except UsageError: - if self.ctx is None or not self.ctx.resilient_parsing: - raise - return state.opts, state.largs, state.order - - def _process_args_for_args(self, state): - pargs, args = _unpack_args(state.largs + state.rargs, - [x.nargs for x in self._args]) - - for idx, arg in enumerate(self._args): - arg.process(pargs[idx], state) - - state.largs = args - state.rargs = [] - - def _process_args_for_options(self, state): - while state.rargs: - arg = state.rargs.pop(0) - arglen = len(arg) - # Double dashes always handled explicitly regardless of what - # prefixes are valid. - if arg == '--': - return - elif arg[:1] in self._opt_prefixes and arglen > 1: - self._process_opts(arg, state) - elif self.allow_interspersed_args: - state.largs.append(arg) - else: - state.rargs.insert(0, arg) - return - - # Say this is the original argument list: - # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] - # ^ - # (we are about to process arg(i)). - # - # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of - # [arg0, ..., arg(i-1)] (any options and their arguments will have - # been removed from largs). - # - # The while loop will usually consume 1 or more arguments per pass. - # If it consumes 1 (eg. arg is an option that takes no arguments), - # then after _process_arg() is done the situation is: - # - # largs = subset of [arg0, ..., arg(i)] - # rargs = [arg(i+1), ..., arg(N-1)] - # - # If allow_interspersed_args is false, largs will always be - # *empty* -- still a subset of [arg0, ..., arg(i-1)], but - # not a very interesting subset! - - def _match_long_opt(self, opt, explicit_value, state): - if opt not in self._long_opt: - possibilities = [word for word in self._long_opt - if word.startswith(opt)] - raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx) - - option = self._long_opt[opt] - if option.takes_value: - # At this point it's safe to modify rargs by injecting the - # explicit value, because no exception is raised in this - # branch. This means that the inserted value will be fully - # consumed. - if explicit_value is not None: - state.rargs.insert(0, explicit_value) - - nargs = option.nargs - if len(state.rargs) < nargs: - _error_opt_args(nargs, opt) - elif nargs == 1: - value = state.rargs.pop(0) - else: - value = tuple(state.rargs[:nargs]) - del state.rargs[:nargs] - - elif explicit_value is not None: - raise BadOptionUsage(opt, '%s option does not take a value' % opt) - - else: - value = None - - option.process(value, state) - - def _match_short_opt(self, arg, state): - stop = False - i = 1 - prefix = arg[0] - unknown_options = [] - - for ch in arg[1:]: - opt = normalize_opt(prefix + ch, self.ctx) - option = self._short_opt.get(opt) - i += 1 - - if not option: - if self.ignore_unknown_options: - unknown_options.append(ch) - continue - raise NoSuchOption(opt, ctx=self.ctx) - if option.takes_value: - # Any characters left in arg? Pretend they're the - # next arg, and stop consuming characters of arg. - if i < len(arg): - state.rargs.insert(0, arg[i:]) - stop = True - - nargs = option.nargs - if len(state.rargs) < nargs: - _error_opt_args(nargs, opt) - elif nargs == 1: - value = state.rargs.pop(0) - else: - value = tuple(state.rargs[:nargs]) - del state.rargs[:nargs] - - else: - value = None - - option.process(value, state) - - if stop: - break - - # If we got any unknown options we re-combinate the string of the - # remaining options and re-attach the prefix, then report that - # to the state as new larg. This way there is basic combinatorics - # that can be achieved while still ignoring unknown arguments. - if self.ignore_unknown_options and unknown_options: - state.largs.append(prefix + ''.join(unknown_options)) - - def _process_opts(self, arg, state): - explicit_value = None - # Long option handling happens in two parts. The first part is - # supporting explicitly attached values. In any case, we will try - # to long match the option first. - if '=' in arg: - long_opt, explicit_value = arg.split('=', 1) - else: - long_opt = arg - norm_long_opt = normalize_opt(long_opt, self.ctx) - - # At this point we will match the (assumed) long option through - # the long option matching code. Note that this allows options - # like "-foo" to be matched as long options. - try: - self._match_long_opt(norm_long_opt, explicit_value, state) - except NoSuchOption: - # At this point the long option matching failed, and we need - # to try with short options. However there is a special rule - # which says, that if we have a two character options prefix - # (applies to "--foo" for instance), we do not dispatch to the - # short option code and will instead raise the no option - # error. - if arg[:2] not in self._opt_prefixes: - return self._match_short_opt(arg, state) - if not self.ignore_unknown_options: - raise - state.largs.append(arg) diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/termui.py b/flo-token-explorer/lib/python3.6/site-packages/click/termui.py deleted file mode 100644 index bf9a3aa..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/termui.py +++ /dev/null @@ -1,606 +0,0 @@ -import os -import sys -import struct -import inspect -import itertools - -from ._compat import raw_input, text_type, string_types, \ - isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN -from .utils import echo -from .exceptions import Abort, UsageError -from .types import convert_type, Choice, Path -from .globals import resolve_color_default - - -# The prompt functions to use. The doc tools currently override these -# functions to customize how they work. -visible_prompt_func = raw_input - -_ansi_colors = { - 'black': 30, - 'red': 31, - 'green': 32, - 'yellow': 33, - 'blue': 34, - 'magenta': 35, - 'cyan': 36, - 'white': 37, - 'reset': 39, - 'bright_black': 90, - 'bright_red': 91, - 'bright_green': 92, - 'bright_yellow': 93, - 'bright_blue': 94, - 'bright_magenta': 95, - 'bright_cyan': 96, - 'bright_white': 97, -} -_ansi_reset_all = '\033[0m' - - -def hidden_prompt_func(prompt): - import getpass - return getpass.getpass(prompt) - - -def _build_prompt(text, suffix, show_default=False, default=None, show_choices=True, type=None): - prompt = text - if type is not None and show_choices and isinstance(type, Choice): - prompt += ' (' + ", ".join(map(str, type.choices)) + ')' - if default is not None and show_default: - prompt = '%s [%s]' % (prompt, default) - return prompt + suffix - - -def prompt(text, default=None, hide_input=False, confirmation_prompt=False, - type=None, value_proc=None, prompt_suffix=': ', show_default=True, - err=False, show_choices=True): - """Prompts a user for input. This is a convenience function that can - be used to prompt a user for input later. - - If the user aborts the input by sending a interrupt signal, this - function will catch it and raise a :exc:`Abort` exception. - - .. versionadded:: 7.0 - Added the show_choices parameter. - - .. versionadded:: 6.0 - Added unicode support for cmd.exe on Windows. - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param text: the text to show for the prompt. - :param default: the default value to use if no input happens. If this - is not given it will prompt until it's aborted. - :param hide_input: if this is set to true then the input value will - be hidden. - :param confirmation_prompt: asks for confirmation for the value. - :param type: the type to use to check the value against. - :param value_proc: if this parameter is provided it's a function that - is invoked instead of the type conversion to - convert a value. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - :param show_choices: Show or hide choices if the passed type is a Choice. - For example if type is a Choice of either day or week, - show_choices is true and text is "Group by" then the - prompt will be "Group by (day, week): ". - """ - result = None - - def prompt_func(text): - f = hide_input and hidden_prompt_func or visible_prompt_func - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(text, nl=False, err=err) - return f('') - except (KeyboardInterrupt, EOFError): - # getpass doesn't print a newline if the user aborts input with ^C. - # Allegedly this behavior is inherited from getpass(3). - # A doc bug has been filed at https://bugs.python.org/issue24711 - if hide_input: - echo(None, err=err) - raise Abort() - - if value_proc is None: - value_proc = convert_type(type, default) - - prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) - - while 1: - while 1: - value = prompt_func(prompt) - if value: - break - elif default is not None: - if isinstance(value_proc, Path): - # validate Path default value(exists, dir_okay etc.) - value = default - break - return default - try: - result = value_proc(value) - except UsageError as e: - echo('Error: %s' % e.message, err=err) - continue - if not confirmation_prompt: - return result - while 1: - value2 = prompt_func('Repeat for confirmation: ') - if value2: - break - if value == value2: - return result - echo('Error: the two entered values do not match', err=err) - - -def confirm(text, default=False, abort=False, prompt_suffix=': ', - show_default=True, err=False): - """Prompts for confirmation (yes/no question). - - If the user aborts the input by sending a interrupt signal this - function will catch it and raise a :exc:`Abort` exception. - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param text: the question to ask. - :param default: the default for the prompt. - :param abort: if this is set to `True` a negative answer aborts the - exception by raising :exc:`Abort`. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - prompt = _build_prompt(text, prompt_suffix, show_default, - default and 'Y/n' or 'y/N') - while 1: - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(prompt, nl=False, err=err) - value = visible_prompt_func('').lower().strip() - except (KeyboardInterrupt, EOFError): - raise Abort() - if value in ('y', 'yes'): - rv = True - elif value in ('n', 'no'): - rv = False - elif value == '': - rv = default - else: - echo('Error: invalid input', err=err) - continue - break - if abort and not rv: - raise Abort() - return rv - - -def get_terminal_size(): - """Returns the current size of the terminal as tuple in the form - ``(width, height)`` in columns and rows. - """ - # If shutil has get_terminal_size() (Python 3.3 and later) use that - if sys.version_info >= (3, 3): - import shutil - shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None) - if shutil_get_terminal_size: - sz = shutil_get_terminal_size() - return sz.columns, sz.lines - - # We provide a sensible default for get_winterm_size() when being invoked - # inside a subprocess. Without this, it would not provide a useful input. - if get_winterm_size is not None: - size = get_winterm_size() - if size == (0, 0): - return (79, 24) - else: - return size - - def ioctl_gwinsz(fd): - try: - import fcntl - import termios - cr = struct.unpack( - 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) - except Exception: - return - return cr - - cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) - if not cr: - try: - fd = os.open(os.ctermid(), os.O_RDONLY) - try: - cr = ioctl_gwinsz(fd) - finally: - os.close(fd) - except Exception: - pass - if not cr or not cr[0] or not cr[1]: - cr = (os.environ.get('LINES', 25), - os.environ.get('COLUMNS', DEFAULT_COLUMNS)) - return int(cr[1]), int(cr[0]) - - -def echo_via_pager(text_or_generator, color=None): - """This function takes a text and shows it via an environment specific - pager on stdout. - - .. versionchanged:: 3.0 - Added the `color` flag. - - :param text_or_generator: the text to page, or alternatively, a - generator emitting the text to page. - :param color: controls if the pager supports ANSI colors or not. The - default is autodetection. - """ - color = resolve_color_default(color) - - if inspect.isgeneratorfunction(text_or_generator): - i = text_or_generator() - elif isinstance(text_or_generator, string_types): - i = [text_or_generator] - else: - i = iter(text_or_generator) - - # convert every element of i to a text type if necessary - text_generator = (el if isinstance(el, string_types) else text_type(el) - for el in i) - - from ._termui_impl import pager - return pager(itertools.chain(text_generator, "\n"), color) - - -def progressbar(iterable=None, length=None, label=None, show_eta=True, - show_percent=None, show_pos=False, - item_show_func=None, fill_char='#', empty_char='-', - bar_template='%(label)s [%(bar)s] %(info)s', - info_sep=' ', width=36, file=None, color=None): - """This function creates an iterable context manager that can be used - to iterate over something while showing a progress bar. It will - either iterate over the `iterable` or `length` items (that are counted - up). While iteration happens, this function will print a rendered - progress bar to the given `file` (defaults to stdout) and will attempt - to calculate remaining time and more. By default, this progress bar - will not be rendered if the file is not a terminal. - - The context manager creates the progress bar. When the context - manager is entered the progress bar is already displayed. With every - iteration over the progress bar, the iterable passed to the bar is - advanced and the bar is updated. When the context manager exits, - a newline is printed and the progress bar is finalized on screen. - - No printing must happen or the progress bar will be unintentionally - destroyed. - - Example usage:: - - with progressbar(items) as bar: - for item in bar: - do_something_with(item) - - Alternatively, if no iterable is specified, one can manually update the - progress bar through the `update()` method instead of directly - iterating over the progress bar. The update method accepts the number - of steps to increment the bar with:: - - with progressbar(length=chunks.total_bytes) as bar: - for chunk in chunks: - process_chunk(chunk) - bar.update(chunks.bytes) - - .. versionadded:: 2.0 - - .. versionadded:: 4.0 - Added the `color` parameter. Added a `update` method to the - progressbar object. - - :param iterable: an iterable to iterate over. If not provided the length - is required. - :param length: the number of items to iterate over. By default the - progressbar will attempt to ask the iterator about its - length, which might or might not work. If an iterable is - also provided this parameter can be used to override the - length. If an iterable is not provided the progress bar - will iterate over a range of that length. - :param label: the label to show next to the progress bar. - :param show_eta: enables or disables the estimated time display. This is - automatically disabled if the length cannot be - determined. - :param show_percent: enables or disables the percentage display. The - default is `True` if the iterable has a length or - `False` if not. - :param show_pos: enables or disables the absolute position display. The - default is `False`. - :param item_show_func: a function called with the current item which - can return a string to show the current item - next to the progress bar. Note that the current - item can be `None`! - :param fill_char: the character to use to show the filled part of the - progress bar. - :param empty_char: the character to use to show the non-filled part of - the progress bar. - :param bar_template: the format string to use as template for the bar. - The parameters in it are ``label`` for the label, - ``bar`` for the progress bar and ``info`` for the - info section. - :param info_sep: the separator between multiple info items (eta etc.) - :param width: the width of the progress bar in characters, 0 means full - terminal width - :param file: the file to write to. If this is not a terminal then - only the label is printed. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are included anywhere in the progress bar output - which is not the case by default. - """ - from ._termui_impl import ProgressBar - color = resolve_color_default(color) - return ProgressBar(iterable=iterable, length=length, show_eta=show_eta, - show_percent=show_percent, show_pos=show_pos, - item_show_func=item_show_func, fill_char=fill_char, - empty_char=empty_char, bar_template=bar_template, - info_sep=info_sep, file=file, label=label, - width=width, color=color) - - -def clear(): - """Clears the terminal screen. This will have the effect of clearing - the whole visible space of the terminal and moving the cursor to the - top left. This does not do anything if not connected to a terminal. - - .. versionadded:: 2.0 - """ - if not isatty(sys.stdout): - return - # If we're on Windows and we don't have colorama available, then we - # clear the screen by shelling out. Otherwise we can use an escape - # sequence. - if WIN: - os.system('cls') - else: - sys.stdout.write('\033[2J\033[1;1H') - - -def style(text, fg=None, bg=None, bold=None, dim=None, underline=None, - blink=None, reverse=None, reset=True): - """Styles a text with ANSI styles and returns the new string. By - default the styling is self contained which means that at the end - of the string a reset code is issued. This can be prevented by - passing ``reset=False``. - - Examples:: - - click.echo(click.style('Hello World!', fg='green')) - click.echo(click.style('ATTENTION!', blink=True)) - click.echo(click.style('Some things', reverse=True, fg='cyan')) - - Supported color names: - - * ``black`` (might be a gray) - * ``red`` - * ``green`` - * ``yellow`` (might be an orange) - * ``blue`` - * ``magenta`` - * ``cyan`` - * ``white`` (might be light gray) - * ``bright_black`` - * ``bright_red`` - * ``bright_green`` - * ``bright_yellow`` - * ``bright_blue`` - * ``bright_magenta`` - * ``bright_cyan`` - * ``bright_white`` - * ``reset`` (reset the color code only) - - .. versionadded:: 2.0 - - .. versionadded:: 7.0 - Added support for bright colors. - - :param text: the string to style with ansi codes. - :param fg: if provided this will become the foreground color. - :param bg: if provided this will become the background color. - :param bold: if provided this will enable or disable bold mode. - :param dim: if provided this will enable or disable dim mode. This is - badly supported. - :param underline: if provided this will enable or disable underline. - :param blink: if provided this will enable or disable blinking. - :param reverse: if provided this will enable or disable inverse - rendering (foreground becomes background and the - other way round). - :param reset: by default a reset-all code is added at the end of the - string which means that styles do not carry over. This - can be disabled to compose styles. - """ - bits = [] - if fg: - try: - bits.append('\033[%dm' % (_ansi_colors[fg])) - except KeyError: - raise TypeError('Unknown color %r' % fg) - if bg: - try: - bits.append('\033[%dm' % (_ansi_colors[bg] + 10)) - except KeyError: - raise TypeError('Unknown color %r' % bg) - if bold is not None: - bits.append('\033[%dm' % (1 if bold else 22)) - if dim is not None: - bits.append('\033[%dm' % (2 if dim else 22)) - if underline is not None: - bits.append('\033[%dm' % (4 if underline else 24)) - if blink is not None: - bits.append('\033[%dm' % (5 if blink else 25)) - if reverse is not None: - bits.append('\033[%dm' % (7 if reverse else 27)) - bits.append(text) - if reset: - bits.append(_ansi_reset_all) - return ''.join(bits) - - -def unstyle(text): - """Removes ANSI styling information from a string. Usually it's not - necessary to use this function as Click's echo function will - automatically remove styling if necessary. - - .. versionadded:: 2.0 - - :param text: the text to remove style information from. - """ - return strip_ansi(text) - - -def secho(message=None, file=None, nl=True, err=False, color=None, **styles): - """This function combines :func:`echo` and :func:`style` into one - call. As such the following two calls are the same:: - - click.secho('Hello World!', fg='green') - click.echo(click.style('Hello World!', fg='green')) - - All keyword arguments are forwarded to the underlying functions - depending on which one they go with. - - .. versionadded:: 2.0 - """ - if message is not None: - message = style(message, **styles) - return echo(message, file=file, nl=nl, err=err, color=color) - - -def edit(text=None, editor=None, env=None, require_save=True, - extension='.txt', filename=None): - r"""Edits the given text in the defined editor. If an editor is given - (should be the full path to the executable but the regular operating - system search path is used for finding the executable) it overrides - the detected editor. Optionally, some environment variables can be - used. If the editor is closed without changes, `None` is returned. In - case a file is edited directly the return value is always `None` and - `require_save` and `extension` are ignored. - - If the editor cannot be opened a :exc:`UsageError` is raised. - - Note for Windows: to simplify cross-platform usage, the newlines are - automatically converted from POSIX to Windows and vice versa. As such, - the message here will have ``\n`` as newline markers. - - :param text: the text to edit. - :param editor: optionally the editor to use. Defaults to automatic - detection. - :param env: environment variables to forward to the editor. - :param require_save: if this is true, then not saving in the editor - will make the return value become `None`. - :param extension: the extension to tell the editor about. This defaults - to `.txt` but changing this might change syntax - highlighting. - :param filename: if provided it will edit this file instead of the - provided text contents. It will not use a temporary - file as an indirection in that case. - """ - from ._termui_impl import Editor - editor = Editor(editor=editor, env=env, require_save=require_save, - extension=extension) - if filename is None: - return editor.edit(text) - editor.edit_file(filename) - - -def launch(url, wait=False, locate=False): - """This function launches the given URL (or filename) in the default - viewer application for this file type. If this is an executable, it - might launch the executable in a new session. The return value is - the exit code of the launched application. Usually, ``0`` indicates - success. - - Examples:: - - click.launch('https://click.palletsprojects.com/') - click.launch('/my/downloaded/file', locate=True) - - .. versionadded:: 2.0 - - :param url: URL or filename of the thing to launch. - :param wait: waits for the program to stop. - :param locate: if this is set to `True` then instead of launching the - application associated with the URL it will attempt to - launch a file manager with the file located. This - might have weird effects if the URL does not point to - the filesystem. - """ - from ._termui_impl import open_url - return open_url(url, wait=wait, locate=locate) - - -# If this is provided, getchar() calls into this instead. This is used -# for unittesting purposes. -_getchar = None - - -def getchar(echo=False): - """Fetches a single character from the terminal and returns it. This - will always return a unicode character and under certain rare - circumstances this might return more than one character. The - situations which more than one character is returned is when for - whatever reason multiple characters end up in the terminal buffer or - standard input was not actually a terminal. - - Note that this will always read from the terminal, even if something - is piped into the standard input. - - Note for Windows: in rare cases when typing non-ASCII characters, this - function might wait for a second character and then return both at once. - This is because certain Unicode characters look like special-key markers. - - .. versionadded:: 2.0 - - :param echo: if set to `True`, the character read will also show up on - the terminal. The default is to not show it. - """ - f = _getchar - if f is None: - from ._termui_impl import getchar as f - return f(echo) - - -def raw_terminal(): - from ._termui_impl import raw_terminal as f - return f() - - -def pause(info='Press any key to continue ...', err=False): - """This command stops execution and waits for the user to press any - key to continue. This is similar to the Windows batch "pause" - command. If the program is not run through a terminal, this command - will instead do nothing. - - .. versionadded:: 2.0 - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param info: the info string to print before pausing. - :param err: if set to message goes to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - if not isatty(sys.stdin) or not isatty(sys.stdout): - return - try: - if info: - echo(info, nl=False, err=err) - try: - getchar() - except (KeyboardInterrupt, EOFError): - pass - finally: - if info: - echo(err=err) diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/testing.py b/flo-token-explorer/lib/python3.6/site-packages/click/testing.py deleted file mode 100644 index 1b2924e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/testing.py +++ /dev/null @@ -1,374 +0,0 @@ -import os -import sys -import shutil -import tempfile -import contextlib -import shlex - -from ._compat import iteritems, PY2, string_types - - -# If someone wants to vendor click, we want to ensure the -# correct package is discovered. Ideally we could use a -# relative import here but unfortunately Python does not -# support that. -clickpkg = sys.modules[__name__.rsplit('.', 1)[0]] - - -if PY2: - from cStringIO import StringIO -else: - import io - from ._compat import _find_binary_reader - - -class EchoingStdin(object): - - def __init__(self, input, output): - self._input = input - self._output = output - - def __getattr__(self, x): - return getattr(self._input, x) - - def _echo(self, rv): - self._output.write(rv) - return rv - - def read(self, n=-1): - return self._echo(self._input.read(n)) - - def readline(self, n=-1): - return self._echo(self._input.readline(n)) - - def readlines(self): - return [self._echo(x) for x in self._input.readlines()] - - def __iter__(self): - return iter(self._echo(x) for x in self._input) - - def __repr__(self): - return repr(self._input) - - -def make_input_stream(input, charset): - # Is already an input stream. - if hasattr(input, 'read'): - if PY2: - return input - rv = _find_binary_reader(input) - if rv is not None: - return rv - raise TypeError('Could not find binary reader for input stream.') - - if input is None: - input = b'' - elif not isinstance(input, bytes): - input = input.encode(charset) - if PY2: - return StringIO(input) - return io.BytesIO(input) - - -class Result(object): - """Holds the captured result of an invoked CLI script.""" - - def __init__(self, runner, stdout_bytes, stderr_bytes, exit_code, - exception, exc_info=None): - #: The runner that created the result - self.runner = runner - #: The standard output as bytes. - self.stdout_bytes = stdout_bytes - #: The standard error as bytes, or False(y) if not available - self.stderr_bytes = stderr_bytes - #: The exit code as integer. - self.exit_code = exit_code - #: The exception that happened if one did. - self.exception = exception - #: The traceback - self.exc_info = exc_info - - @property - def output(self): - """The (standard) output as unicode string.""" - return self.stdout - - @property - def stdout(self): - """The standard output as unicode string.""" - return self.stdout_bytes.decode(self.runner.charset, 'replace') \ - .replace('\r\n', '\n') - - @property - def stderr(self): - """The standard error as unicode string.""" - if not self.stderr_bytes: - raise ValueError("stderr not separately captured") - return self.stderr_bytes.decode(self.runner.charset, 'replace') \ - .replace('\r\n', '\n') - - - def __repr__(self): - return '<%s %s>' % ( - type(self).__name__, - self.exception and repr(self.exception) or 'okay', - ) - - -class CliRunner(object): - """The CLI runner provides functionality to invoke a Click command line - script for unittesting purposes in a isolated environment. This only - works in single-threaded systems without any concurrency as it changes the - global interpreter state. - - :param charset: the character set for the input and output data. This is - UTF-8 by default and should not be changed currently as - the reporting to Click only works in Python 2 properly. - :param env: a dictionary with environment variables for overriding. - :param echo_stdin: if this is set to `True`, then reading from stdin writes - to stdout. This is useful for showing examples in - some circumstances. Note that regular prompts - will automatically echo the input. - :param mix_stderr: if this is set to `False`, then stdout and stderr are - preserved as independent streams. This is useful for - Unix-philosophy apps that have predictable stdout and - noisy stderr, such that each may be measured - independently - """ - - def __init__(self, charset=None, env=None, echo_stdin=False, - mix_stderr=True): - if charset is None: - charset = 'utf-8' - self.charset = charset - self.env = env or {} - self.echo_stdin = echo_stdin - self.mix_stderr = mix_stderr - - def get_default_prog_name(self, cli): - """Given a command object it will return the default program name - for it. The default is the `name` attribute or ``"root"`` if not - set. - """ - return cli.name or 'root' - - def make_env(self, overrides=None): - """Returns the environment overrides for invoking a script.""" - rv = dict(self.env) - if overrides: - rv.update(overrides) - return rv - - @contextlib.contextmanager - def isolation(self, input=None, env=None, color=False): - """A context manager that sets up the isolation for invoking of a - command line tool. This sets up stdin with the given input data - and `os.environ` with the overrides from the given dictionary. - This also rebinds some internals in Click to be mocked (like the - prompt functionality). - - This is automatically done in the :meth:`invoke` method. - - .. versionadded:: 4.0 - The ``color`` parameter was added. - - :param input: the input stream to put into sys.stdin. - :param env: the environment overrides as dictionary. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - """ - input = make_input_stream(input, self.charset) - - old_stdin = sys.stdin - old_stdout = sys.stdout - old_stderr = sys.stderr - old_forced_width = clickpkg.formatting.FORCED_WIDTH - clickpkg.formatting.FORCED_WIDTH = 80 - - env = self.make_env(env) - - if PY2: - bytes_output = StringIO() - if self.echo_stdin: - input = EchoingStdin(input, bytes_output) - sys.stdout = bytes_output - if not self.mix_stderr: - bytes_error = StringIO() - sys.stderr = bytes_error - else: - bytes_output = io.BytesIO() - if self.echo_stdin: - input = EchoingStdin(input, bytes_output) - input = io.TextIOWrapper(input, encoding=self.charset) - sys.stdout = io.TextIOWrapper( - bytes_output, encoding=self.charset) - if not self.mix_stderr: - bytes_error = io.BytesIO() - sys.stderr = io.TextIOWrapper( - bytes_error, encoding=self.charset) - - if self.mix_stderr: - sys.stderr = sys.stdout - - sys.stdin = input - - def visible_input(prompt=None): - sys.stdout.write(prompt or '') - val = input.readline().rstrip('\r\n') - sys.stdout.write(val + '\n') - sys.stdout.flush() - return val - - def hidden_input(prompt=None): - sys.stdout.write((prompt or '') + '\n') - sys.stdout.flush() - return input.readline().rstrip('\r\n') - - def _getchar(echo): - char = sys.stdin.read(1) - if echo: - sys.stdout.write(char) - sys.stdout.flush() - return char - - default_color = color - - def should_strip_ansi(stream=None, color=None): - if color is None: - return not default_color - return not color - - old_visible_prompt_func = clickpkg.termui.visible_prompt_func - old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func - old__getchar_func = clickpkg.termui._getchar - old_should_strip_ansi = clickpkg.utils.should_strip_ansi - clickpkg.termui.visible_prompt_func = visible_input - clickpkg.termui.hidden_prompt_func = hidden_input - clickpkg.termui._getchar = _getchar - clickpkg.utils.should_strip_ansi = should_strip_ansi - - old_env = {} - try: - for key, value in iteritems(env): - old_env[key] = os.environ.get(key) - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - yield (bytes_output, not self.mix_stderr and bytes_error) - finally: - for key, value in iteritems(old_env): - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - sys.stdout = old_stdout - sys.stderr = old_stderr - sys.stdin = old_stdin - clickpkg.termui.visible_prompt_func = old_visible_prompt_func - clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func - clickpkg.termui._getchar = old__getchar_func - clickpkg.utils.should_strip_ansi = old_should_strip_ansi - clickpkg.formatting.FORCED_WIDTH = old_forced_width - - def invoke(self, cli, args=None, input=None, env=None, - catch_exceptions=True, color=False, mix_stderr=False, **extra): - """Invokes a command in an isolated environment. The arguments are - forwarded directly to the command line script, the `extra` keyword - arguments are passed to the :meth:`~clickpkg.Command.main` function of - the command. - - This returns a :class:`Result` object. - - .. versionadded:: 3.0 - The ``catch_exceptions`` parameter was added. - - .. versionchanged:: 3.0 - The result object now has an `exc_info` attribute with the - traceback if available. - - .. versionadded:: 4.0 - The ``color`` parameter was added. - - :param cli: the command to invoke - :param args: the arguments to invoke. It may be given as an iterable - or a string. When given as string it will be interpreted - as a Unix shell command. More details at - :func:`shlex.split`. - :param input: the input data for `sys.stdin`. - :param env: the environment overrides. - :param catch_exceptions: Whether to catch any other exceptions than - ``SystemExit``. - :param extra: the keyword arguments to pass to :meth:`main`. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - """ - exc_info = None - with self.isolation(input=input, env=env, color=color) as outstreams: - exception = None - exit_code = 0 - - if isinstance(args, string_types): - args = shlex.split(args) - - try: - prog_name = extra.pop("prog_name") - except KeyError: - prog_name = self.get_default_prog_name(cli) - - try: - cli.main(args=args or (), prog_name=prog_name, **extra) - except SystemExit as e: - exc_info = sys.exc_info() - exit_code = e.code - if exit_code is None: - exit_code = 0 - - if exit_code != 0: - exception = e - - if not isinstance(exit_code, int): - sys.stdout.write(str(exit_code)) - sys.stdout.write('\n') - exit_code = 1 - - except Exception as e: - if not catch_exceptions: - raise - exception = e - exit_code = 1 - exc_info = sys.exc_info() - finally: - sys.stdout.flush() - stdout = outstreams[0].getvalue() - stderr = outstreams[1] and outstreams[1].getvalue() - - return Result(runner=self, - stdout_bytes=stdout, - stderr_bytes=stderr, - exit_code=exit_code, - exception=exception, - exc_info=exc_info) - - @contextlib.contextmanager - def isolated_filesystem(self): - """A context manager that creates a temporary folder and changes - the current working directory to it for isolated filesystem tests. - """ - cwd = os.getcwd() - t = tempfile.mkdtemp() - os.chdir(t) - try: - yield t - finally: - os.chdir(cwd) - try: - shutil.rmtree(t) - except (OSError, IOError): - pass diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/types.py b/flo-token-explorer/lib/python3.6/site-packages/click/types.py deleted file mode 100644 index 1f88032..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/types.py +++ /dev/null @@ -1,668 +0,0 @@ -import os -import stat -from datetime import datetime - -from ._compat import open_stream, text_type, filename_to_ui, \ - get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2 -from .exceptions import BadParameter -from .utils import safecall, LazyFile - - -class ParamType(object): - """Helper for converting values through types. The following is - necessary for a valid type: - - * it needs a name - * it needs to pass through None unchanged - * it needs to convert from a string - * it needs to convert its result type through unchanged - (eg: needs to be idempotent) - * it needs to be able to deal with param and context being `None`. - This can be the case when the object is used with prompt - inputs. - """ - is_composite = False - - #: the descriptive name of this type - name = None - - #: if a list of this type is expected and the value is pulled from a - #: string environment variable, this is what splits it up. `None` - #: means any whitespace. For all parameters the general rule is that - #: whitespace splits them up. The exception are paths and files which - #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on - #: Windows). - envvar_list_splitter = None - - def __call__(self, value, param=None, ctx=None): - if value is not None: - return self.convert(value, param, ctx) - - def get_metavar(self, param): - """Returns the metavar default for this param if it provides one.""" - - def get_missing_message(self, param): - """Optionally might return extra information about a missing - parameter. - - .. versionadded:: 2.0 - """ - - def convert(self, value, param, ctx): - """Converts the value. This is not invoked for values that are - `None` (the missing value). - """ - return value - - def split_envvar_value(self, rv): - """Given a value from an environment variable this splits it up - into small chunks depending on the defined envvar list splitter. - - If the splitter is set to `None`, which means that whitespace splits, - then leading and trailing whitespace is ignored. Otherwise, leading - and trailing splitters usually lead to empty items being included. - """ - return (rv or '').split(self.envvar_list_splitter) - - def fail(self, message, param=None, ctx=None): - """Helper method to fail with an invalid value message.""" - raise BadParameter(message, ctx=ctx, param=param) - - -class CompositeParamType(ParamType): - is_composite = True - - @property - def arity(self): - raise NotImplementedError() - - -class FuncParamType(ParamType): - - def __init__(self, func): - self.name = func.__name__ - self.func = func - - def convert(self, value, param, ctx): - try: - return self.func(value) - except ValueError: - try: - value = text_type(value) - except UnicodeError: - value = str(value).decode('utf-8', 'replace') - self.fail(value, param, ctx) - - -class UnprocessedParamType(ParamType): - name = 'text' - - def convert(self, value, param, ctx): - return value - - def __repr__(self): - return 'UNPROCESSED' - - -class StringParamType(ParamType): - name = 'text' - - def convert(self, value, param, ctx): - if isinstance(value, bytes): - enc = _get_argv_encoding() - try: - value = value.decode(enc) - except UnicodeError: - fs_enc = get_filesystem_encoding() - if fs_enc != enc: - try: - value = value.decode(fs_enc) - except UnicodeError: - value = value.decode('utf-8', 'replace') - return value - return value - - def __repr__(self): - return 'STRING' - - -class Choice(ParamType): - """The choice type allows a value to be checked against a fixed set - of supported values. All of these values have to be strings. - - You should only pass a list or tuple of choices. Other iterables - (like generators) may lead to surprising results. - - See :ref:`choice-opts` for an example. - - :param case_sensitive: Set to false to make choices case - insensitive. Defaults to true. - """ - - name = 'choice' - - def __init__(self, choices, case_sensitive=True): - self.choices = choices - self.case_sensitive = case_sensitive - - def get_metavar(self, param): - return '[%s]' % '|'.join(self.choices) - - def get_missing_message(self, param): - return 'Choose from:\n\t%s.' % ',\n\t'.join(self.choices) - - def convert(self, value, param, ctx): - # Exact match - if value in self.choices: - return value - - # Match through normalization and case sensitivity - # first do token_normalize_func, then lowercase - # preserve original `value` to produce an accurate message in - # `self.fail` - normed_value = value - normed_choices = self.choices - - if ctx is not None and \ - ctx.token_normalize_func is not None: - normed_value = ctx.token_normalize_func(value) - normed_choices = [ctx.token_normalize_func(choice) for choice in - self.choices] - - if not self.case_sensitive: - normed_value = normed_value.lower() - normed_choices = [choice.lower() for choice in normed_choices] - - if normed_value in normed_choices: - return normed_value - - self.fail('invalid choice: %s. (choose from %s)' % - (value, ', '.join(self.choices)), param, ctx) - - def __repr__(self): - return 'Choice(%r)' % list(self.choices) - - -class DateTime(ParamType): - """The DateTime type converts date strings into `datetime` objects. - - The format strings which are checked are configurable, but default to some - common (non-timezone aware) ISO 8601 formats. - - When specifying *DateTime* formats, you should only pass a list or a tuple. - Other iterables, like generators, may lead to surprising results. - - The format strings are processed using ``datetime.strptime``, and this - consequently defines the format strings which are allowed. - - Parsing is tried using each format, in order, and the first format which - parses successfully is used. - - :param formats: A list or tuple of date format strings, in the order in - which they should be tried. Defaults to - ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``, - ``'%Y-%m-%d %H:%M:%S'``. - """ - name = 'datetime' - - def __init__(self, formats=None): - self.formats = formats or [ - '%Y-%m-%d', - '%Y-%m-%dT%H:%M:%S', - '%Y-%m-%d %H:%M:%S' - ] - - def get_metavar(self, param): - return '[{}]'.format('|'.join(self.formats)) - - def _try_to_convert_date(self, value, format): - try: - return datetime.strptime(value, format) - except ValueError: - return None - - def convert(self, value, param, ctx): - # Exact match - for format in self.formats: - dtime = self._try_to_convert_date(value, format) - if dtime: - return dtime - - self.fail( - 'invalid datetime format: {}. (choose from {})'.format( - value, ', '.join(self.formats))) - - def __repr__(self): - return 'DateTime' - - -class IntParamType(ParamType): - name = 'integer' - - def convert(self, value, param, ctx): - try: - return int(value) - except (ValueError, UnicodeError): - self.fail('%s is not a valid integer' % value, param, ctx) - - def __repr__(self): - return 'INT' - - -class IntRange(IntParamType): - """A parameter that works similar to :data:`click.INT` but restricts - the value to fit into a range. The default behavior is to fail if the - value falls outside the range, but it can also be silently clamped - between the two edges. - - See :ref:`ranges` for an example. - """ - name = 'integer range' - - def __init__(self, min=None, max=None, clamp=False): - self.min = min - self.max = max - self.clamp = clamp - - def convert(self, value, param, ctx): - rv = IntParamType.convert(self, value, param, ctx) - if self.clamp: - if self.min is not None and rv < self.min: - return self.min - if self.max is not None and rv > self.max: - return self.max - if self.min is not None and rv < self.min or \ - self.max is not None and rv > self.max: - if self.min is None: - self.fail('%s is bigger than the maximum valid value ' - '%s.' % (rv, self.max), param, ctx) - elif self.max is None: - self.fail('%s is smaller than the minimum valid value ' - '%s.' % (rv, self.min), param, ctx) - else: - self.fail('%s is not in the valid range of %s to %s.' - % (rv, self.min, self.max), param, ctx) - return rv - - def __repr__(self): - return 'IntRange(%r, %r)' % (self.min, self.max) - - -class FloatParamType(ParamType): - name = 'float' - - def convert(self, value, param, ctx): - try: - return float(value) - except (UnicodeError, ValueError): - self.fail('%s is not a valid floating point value' % - value, param, ctx) - - def __repr__(self): - return 'FLOAT' - - -class FloatRange(FloatParamType): - """A parameter that works similar to :data:`click.FLOAT` but restricts - the value to fit into a range. The default behavior is to fail if the - value falls outside the range, but it can also be silently clamped - between the two edges. - - See :ref:`ranges` for an example. - """ - name = 'float range' - - def __init__(self, min=None, max=None, clamp=False): - self.min = min - self.max = max - self.clamp = clamp - - def convert(self, value, param, ctx): - rv = FloatParamType.convert(self, value, param, ctx) - if self.clamp: - if self.min is not None and rv < self.min: - return self.min - if self.max is not None and rv > self.max: - return self.max - if self.min is not None and rv < self.min or \ - self.max is not None and rv > self.max: - if self.min is None: - self.fail('%s is bigger than the maximum valid value ' - '%s.' % (rv, self.max), param, ctx) - elif self.max is None: - self.fail('%s is smaller than the minimum valid value ' - '%s.' % (rv, self.min), param, ctx) - else: - self.fail('%s is not in the valid range of %s to %s.' - % (rv, self.min, self.max), param, ctx) - return rv - - def __repr__(self): - return 'FloatRange(%r, %r)' % (self.min, self.max) - - -class BoolParamType(ParamType): - name = 'boolean' - - def convert(self, value, param, ctx): - if isinstance(value, bool): - return bool(value) - value = value.lower() - if value in ('true', 't', '1', 'yes', 'y'): - return True - elif value in ('false', 'f', '0', 'no', 'n'): - return False - self.fail('%s is not a valid boolean' % value, param, ctx) - - def __repr__(self): - return 'BOOL' - - -class UUIDParameterType(ParamType): - name = 'uuid' - - def convert(self, value, param, ctx): - import uuid - try: - if PY2 and isinstance(value, text_type): - value = value.encode('ascii') - return uuid.UUID(value) - except (UnicodeError, ValueError): - self.fail('%s is not a valid UUID value' % value, param, ctx) - - def __repr__(self): - return 'UUID' - - -class File(ParamType): - """Declares a parameter to be a file for reading or writing. The file - is automatically closed once the context tears down (after the command - finished working). - - Files can be opened for reading or writing. The special value ``-`` - indicates stdin or stdout depending on the mode. - - By default, the file is opened for reading text data, but it can also be - opened in binary mode or for writing. The encoding parameter can be used - to force a specific encoding. - - The `lazy` flag controls if the file should be opened immediately or upon - first IO. The default is to be non-lazy for standard input and output - streams as well as files opened for reading, `lazy` otherwise. When opening a - file lazily for reading, it is still opened temporarily for validation, but - will not be held open until first IO. lazy is mainly useful when opening - for writing to avoid creating the file until it is needed. - - Starting with Click 2.0, files can also be opened atomically in which - case all writes go into a separate file in the same folder and upon - completion the file will be moved over to the original location. This - is useful if a file regularly read by other users is modified. - - See :ref:`file-args` for more information. - """ - name = 'filename' - envvar_list_splitter = os.path.pathsep - - def __init__(self, mode='r', encoding=None, errors='strict', lazy=None, - atomic=False): - self.mode = mode - self.encoding = encoding - self.errors = errors - self.lazy = lazy - self.atomic = atomic - - def resolve_lazy_flag(self, value): - if self.lazy is not None: - return self.lazy - if value == '-': - return False - elif 'w' in self.mode: - return True - return False - - def convert(self, value, param, ctx): - try: - if hasattr(value, 'read') or hasattr(value, 'write'): - return value - - lazy = self.resolve_lazy_flag(value) - - if lazy: - f = LazyFile(value, self.mode, self.encoding, self.errors, - atomic=self.atomic) - if ctx is not None: - ctx.call_on_close(f.close_intelligently) - return f - - f, should_close = open_stream(value, self.mode, - self.encoding, self.errors, - atomic=self.atomic) - # If a context is provided, we automatically close the file - # at the end of the context execution (or flush out). If a - # context does not exist, it's the caller's responsibility to - # properly close the file. This for instance happens when the - # type is used with prompts. - if ctx is not None: - if should_close: - ctx.call_on_close(safecall(f.close)) - else: - ctx.call_on_close(safecall(f.flush)) - return f - except (IOError, OSError) as e: - self.fail('Could not open file: %s: %s' % ( - filename_to_ui(value), - get_streerror(e), - ), param, ctx) - - -class Path(ParamType): - """The path type is similar to the :class:`File` type but it performs - different checks. First of all, instead of returning an open file - handle it returns just the filename. Secondly, it can perform various - basic checks about what the file or directory should be. - - .. versionchanged:: 6.0 - `allow_dash` was added. - - :param exists: if set to true, the file or directory needs to exist for - this value to be valid. If this is not required and a - file does indeed not exist, then all further checks are - silently skipped. - :param file_okay: controls if a file is a possible value. - :param dir_okay: controls if a directory is a possible value. - :param writable: if true, a writable check is performed. - :param readable: if true, a readable check is performed. - :param resolve_path: if this is true, then the path is fully resolved - before the value is passed onwards. This means - that it's absolute and symlinks are resolved. It - will not expand a tilde-prefix, as this is - supposed to be done by the shell only. - :param allow_dash: If this is set to `True`, a single dash to indicate - standard streams is permitted. - :param path_type: optionally a string type that should be used to - represent the path. The default is `None` which - means the return value will be either bytes or - unicode depending on what makes most sense given the - input data Click deals with. - """ - envvar_list_splitter = os.path.pathsep - - def __init__(self, exists=False, file_okay=True, dir_okay=True, - writable=False, readable=True, resolve_path=False, - allow_dash=False, path_type=None): - self.exists = exists - self.file_okay = file_okay - self.dir_okay = dir_okay - self.writable = writable - self.readable = readable - self.resolve_path = resolve_path - self.allow_dash = allow_dash - self.type = path_type - - if self.file_okay and not self.dir_okay: - self.name = 'file' - self.path_type = 'File' - elif self.dir_okay and not self.file_okay: - self.name = 'directory' - self.path_type = 'Directory' - else: - self.name = 'path' - self.path_type = 'Path' - - def coerce_path_result(self, rv): - if self.type is not None and not isinstance(rv, self.type): - if self.type is text_type: - rv = rv.decode(get_filesystem_encoding()) - else: - rv = rv.encode(get_filesystem_encoding()) - return rv - - def convert(self, value, param, ctx): - rv = value - - is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-') - - if not is_dash: - if self.resolve_path: - rv = os.path.realpath(rv) - - try: - st = os.stat(rv) - except OSError: - if not self.exists: - return self.coerce_path_result(rv) - self.fail('%s "%s" does not exist.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - - if not self.file_okay and stat.S_ISREG(st.st_mode): - self.fail('%s "%s" is a file.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - if not self.dir_okay and stat.S_ISDIR(st.st_mode): - self.fail('%s "%s" is a directory.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - if self.writable and not os.access(value, os.W_OK): - self.fail('%s "%s" is not writable.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - if self.readable and not os.access(value, os.R_OK): - self.fail('%s "%s" is not readable.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - - return self.coerce_path_result(rv) - - -class Tuple(CompositeParamType): - """The default behavior of Click is to apply a type on a value directly. - This works well in most cases, except for when `nargs` is set to a fixed - count and different types should be used for different items. In this - case the :class:`Tuple` type can be used. This type can only be used - if `nargs` is set to a fixed number. - - For more information see :ref:`tuple-type`. - - This can be selected by using a Python tuple literal as a type. - - :param types: a list of types that should be used for the tuple items. - """ - - def __init__(self, types): - self.types = [convert_type(ty) for ty in types] - - @property - def name(self): - return "<" + " ".join(ty.name for ty in self.types) + ">" - - @property - def arity(self): - return len(self.types) - - def convert(self, value, param, ctx): - if len(value) != len(self.types): - raise TypeError('It would appear that nargs is set to conflict ' - 'with the composite type arity.') - return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) - - -def convert_type(ty, default=None): - """Converts a callable or python ty into the most appropriate param - ty. - """ - guessed_type = False - if ty is None and default is not None: - if isinstance(default, tuple): - ty = tuple(map(type, default)) - else: - ty = type(default) - guessed_type = True - - if isinstance(ty, tuple): - return Tuple(ty) - if isinstance(ty, ParamType): - return ty - if ty is text_type or ty is str or ty is None: - return STRING - if ty is int: - return INT - # Booleans are only okay if not guessed. This is done because for - # flags the default value is actually a bit of a lie in that it - # indicates which of the flags is the one we want. See get_default() - # for more information. - if ty is bool and not guessed_type: - return BOOL - if ty is float: - return FLOAT - if guessed_type: - return STRING - - # Catch a common mistake - if __debug__: - try: - if issubclass(ty, ParamType): - raise AssertionError('Attempted to use an uninstantiated ' - 'parameter type (%s).' % ty) - except TypeError: - pass - return FuncParamType(ty) - - -#: A dummy parameter type that just does nothing. From a user's -#: perspective this appears to just be the same as `STRING` but internally -#: no string conversion takes place. This is necessary to achieve the -#: same bytes/unicode behavior on Python 2/3 in situations where you want -#: to not convert argument types. This is usually useful when working -#: with file paths as they can appear in bytes and unicode. -#: -#: For path related uses the :class:`Path` type is a better choice but -#: there are situations where an unprocessed type is useful which is why -#: it is is provided. -#: -#: .. versionadded:: 4.0 -UNPROCESSED = UnprocessedParamType() - -#: A unicode string parameter type which is the implicit default. This -#: can also be selected by using ``str`` as type. -STRING = StringParamType() - -#: An integer parameter. This can also be selected by using ``int`` as -#: type. -INT = IntParamType() - -#: A floating point value parameter. This can also be selected by using -#: ``float`` as type. -FLOAT = FloatParamType() - -#: A boolean parameter. This is the default for boolean flags. This can -#: also be selected by using ``bool`` as a type. -BOOL = BoolParamType() - -#: A UUID parameter. -UUID = UUIDParameterType() diff --git a/flo-token-explorer/lib/python3.6/site-packages/click/utils.py b/flo-token-explorer/lib/python3.6/site-packages/click/utils.py deleted file mode 100644 index fc84369..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/click/utils.py +++ /dev/null @@ -1,440 +0,0 @@ -import os -import sys - -from .globals import resolve_color_default - -from ._compat import text_type, open_stream, get_filesystem_encoding, \ - get_streerror, string_types, PY2, binary_streams, text_streams, \ - filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \ - _default_text_stdout, _default_text_stderr, is_bytes, WIN - -if not PY2: - from ._compat import _find_binary_writer -elif WIN: - from ._winconsole import _get_windows_argv, \ - _hash_py_argv, _initial_argv_hash - - -echo_native_types = string_types + (bytes, bytearray) - - -def _posixify(name): - return '-'.join(name.split()).lower() - - -def safecall(func): - """Wraps a function so that it swallows exceptions.""" - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception: - pass - return wrapper - - -def make_str(value): - """Converts a value into a valid string.""" - if isinstance(value, bytes): - try: - return value.decode(get_filesystem_encoding()) - except UnicodeError: - return value.decode('utf-8', 'replace') - return text_type(value) - - -def make_default_short_help(help, max_length=45): - """Return a condensed version of help string.""" - words = help.split() - total_length = 0 - result = [] - done = False - - for word in words: - if word[-1:] == '.': - done = True - new_length = result and 1 + len(word) or len(word) - if total_length + new_length > max_length: - result.append('...') - done = True - else: - if result: - result.append(' ') - result.append(word) - if done: - break - total_length += new_length - - return ''.join(result) - - -class LazyFile(object): - """A lazy file works like a regular file but it does not fully open - the file but it does perform some basic checks early to see if the - filename parameter does make sense. This is useful for safely opening - files for writing. - """ - - def __init__(self, filename, mode='r', encoding=None, errors='strict', - atomic=False): - self.name = filename - self.mode = mode - self.encoding = encoding - self.errors = errors - self.atomic = atomic - - if filename == '-': - self._f, self.should_close = open_stream(filename, mode, - encoding, errors) - else: - if 'r' in mode: - # Open and close the file in case we're opening it for - # reading so that we can catch at least some errors in - # some cases early. - open(filename, mode).close() - self._f = None - self.should_close = True - - def __getattr__(self, name): - return getattr(self.open(), name) - - def __repr__(self): - if self._f is not None: - return repr(self._f) - return '' % (self.name, self.mode) - - def open(self): - """Opens the file if it's not yet open. This call might fail with - a :exc:`FileError`. Not handling this error will produce an error - that Click shows. - """ - if self._f is not None: - return self._f - try: - rv, self.should_close = open_stream(self.name, self.mode, - self.encoding, - self.errors, - atomic=self.atomic) - except (IOError, OSError) as e: - from .exceptions import FileError - raise FileError(self.name, hint=get_streerror(e)) - self._f = rv - return rv - - def close(self): - """Closes the underlying file, no matter what.""" - if self._f is not None: - self._f.close() - - def close_intelligently(self): - """This function only closes the file if it was opened by the lazy - file wrapper. For instance this will never close stdin. - """ - if self.should_close: - self.close() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close_intelligently() - - def __iter__(self): - self.open() - return iter(self._f) - - -class KeepOpenFile(object): - - def __init__(self, file): - self._file = file - - def __getattr__(self, name): - return getattr(self._file, name) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - pass - - def __repr__(self): - return repr(self._file) - - def __iter__(self): - return iter(self._file) - - -def echo(message=None, file=None, nl=True, err=False, color=None): - """Prints a message plus a newline to the given file or stdout. On - first sight, this looks like the print function, but it has improved - support for handling Unicode and binary data that does not fail no - matter how badly configured the system is. - - Primarily it means that you can print binary data as well as Unicode - data on both 2.x and 3.x to the given file in the most appropriate way - possible. This is a very carefree function in that it will try its - best to not fail. As of Click 6.0 this includes support for unicode - output on the Windows console. - - In addition to that, if `colorama`_ is installed, the echo function will - also support clever handling of ANSI codes. Essentially it will then - do the following: - - - add transparent handling of ANSI color codes on Windows. - - hide ANSI codes automatically if the destination file is not a - terminal. - - .. _colorama: https://pypi.org/project/colorama/ - - .. versionchanged:: 6.0 - As of Click 6.0 the echo function will properly support unicode - output on the windows console. Not that click does not modify - the interpreter in any way which means that `sys.stdout` or the - print statement or function will still not provide unicode support. - - .. versionchanged:: 2.0 - Starting with version 2.0 of Click, the echo function will work - with colorama if it's installed. - - .. versionadded:: 3.0 - The `err` parameter was added. - - .. versionchanged:: 4.0 - Added the `color` flag. - - :param message: the message to print - :param file: the file to write to (defaults to ``stdout``) - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``. This is faster and easier than calling - :func:`get_text_stderr` yourself. - :param nl: if set to `True` (the default) a newline is printed afterwards. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. - """ - if file is None: - if err: - file = _default_text_stderr() - else: - file = _default_text_stdout() - - # Convert non bytes/text into the native string type. - if message is not None and not isinstance(message, echo_native_types): - message = text_type(message) - - if nl: - message = message or u'' - if isinstance(message, text_type): - message += u'\n' - else: - message += b'\n' - - # If there is a message, and we're in Python 3, and the value looks - # like bytes, we manually need to find the binary stream and write the - # message in there. This is done separately so that most stream - # types will work as you would expect. Eg: you can write to StringIO - # for other cases. - if message and not PY2 and is_bytes(message): - binary_file = _find_binary_writer(file) - if binary_file is not None: - file.flush() - binary_file.write(message) - binary_file.flush() - return - - # ANSI-style support. If there is no message or we are dealing with - # bytes nothing is happening. If we are connected to a file we want - # to strip colors. If we are on windows we either wrap the stream - # to strip the color or we use the colorama support to translate the - # ansi codes to API calls. - if message and not is_bytes(message): - color = resolve_color_default(color) - if should_strip_ansi(file, color): - message = strip_ansi(message) - elif WIN: - if auto_wrap_for_ansi is not None: - file = auto_wrap_for_ansi(file) - elif not color: - message = strip_ansi(message) - - if message: - file.write(message) - file.flush() - - -def get_binary_stream(name): - """Returns a system stream for byte processing. This essentially - returns the stream from the sys module with the given name but it - solves some compatibility issues between different Python versions. - Primarily this function is necessary for getting binary streams on - Python 3. - - :param name: the name of the stream to open. Valid names are ``'stdin'``, - ``'stdout'`` and ``'stderr'`` - """ - opener = binary_streams.get(name) - if opener is None: - raise TypeError('Unknown standard stream %r' % name) - return opener() - - -def get_text_stream(name, encoding=None, errors='strict'): - """Returns a system stream for text processing. This usually returns - a wrapped stream around a binary stream returned from - :func:`get_binary_stream` but it also can take shortcuts on Python 3 - for already correctly configured streams. - - :param name: the name of the stream to open. Valid names are ``'stdin'``, - ``'stdout'`` and ``'stderr'`` - :param encoding: overrides the detected default encoding. - :param errors: overrides the default error mode. - """ - opener = text_streams.get(name) - if opener is None: - raise TypeError('Unknown standard stream %r' % name) - return opener(encoding, errors) - - -def open_file(filename, mode='r', encoding=None, errors='strict', - lazy=False, atomic=False): - """This is similar to how the :class:`File` works but for manual - usage. Files are opened non lazy by default. This can open regular - files as well as stdin/stdout if ``'-'`` is passed. - - If stdin/stdout is returned the stream is wrapped so that the context - manager will not close the stream accidentally. This makes it possible - to always use the function like this without having to worry to - accidentally close a standard stream:: - - with open_file(filename) as f: - ... - - .. versionadded:: 3.0 - - :param filename: the name of the file to open (or ``'-'`` for stdin/stdout). - :param mode: the mode in which to open the file. - :param encoding: the encoding to use. - :param errors: the error handling for this file. - :param lazy: can be flipped to true to open the file lazily. - :param atomic: in atomic mode writes go into a temporary file and it's - moved on close. - """ - if lazy: - return LazyFile(filename, mode, encoding, errors, atomic=atomic) - f, should_close = open_stream(filename, mode, encoding, errors, - atomic=atomic) - if not should_close: - f = KeepOpenFile(f) - return f - - -def get_os_args(): - """This returns the argument part of sys.argv in the most appropriate - form for processing. What this means is that this return value is in - a format that works for Click to process but does not necessarily - correspond well to what's actually standard for the interpreter. - - On most environments the return value is ``sys.argv[:1]`` unchanged. - However if you are on Windows and running Python 2 the return value - will actually be a list of unicode strings instead because the - default behavior on that platform otherwise will not be able to - carry all possible values that sys.argv can have. - - .. versionadded:: 6.0 - """ - # We can only extract the unicode argv if sys.argv has not been - # changed since the startup of the application. - if PY2 and WIN and _initial_argv_hash == _hash_py_argv(): - return _get_windows_argv() - return sys.argv[1:] - - -def format_filename(filename, shorten=False): - """Formats a filename for user display. The main purpose of this - function is to ensure that the filename can be displayed at all. This - will decode the filename to unicode if necessary in a way that it will - not fail. Optionally, it can shorten the filename to not include the - full path to the filename. - - :param filename: formats a filename for UI display. This will also convert - the filename into unicode without failing. - :param shorten: this optionally shortens the filename to strip of the - path that leads up to it. - """ - if shorten: - filename = os.path.basename(filename) - return filename_to_ui(filename) - - -def get_app_dir(app_name, roaming=True, force_posix=False): - r"""Returns the config folder for the application. The default behavior - is to return whatever is most appropriate for the operating system. - - To give you an idea, for an app called ``"Foo Bar"``, something like - the following folders could be returned: - - Mac OS X: - ``~/Library/Application Support/Foo Bar`` - Mac OS X (POSIX): - ``~/.foo-bar`` - Unix: - ``~/.config/foo-bar`` - Unix (POSIX): - ``~/.foo-bar`` - Win XP (roaming): - ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar`` - Win XP (not roaming): - ``C:\Documents and Settings\\Application Data\Foo Bar`` - Win 7 (roaming): - ``C:\Users\\AppData\Roaming\Foo Bar`` - Win 7 (not roaming): - ``C:\Users\\AppData\Local\Foo Bar`` - - .. versionadded:: 2.0 - - :param app_name: the application name. This should be properly capitalized - and can contain whitespace. - :param roaming: controls if the folder should be roaming or not on Windows. - Has no affect otherwise. - :param force_posix: if this is set to `True` then on any POSIX system the - folder will be stored in the home folder with a leading - dot instead of the XDG config home or darwin's - application support folder. - """ - if WIN: - key = roaming and 'APPDATA' or 'LOCALAPPDATA' - folder = os.environ.get(key) - if folder is None: - folder = os.path.expanduser('~') - return os.path.join(folder, app_name) - if force_posix: - return os.path.join(os.path.expanduser('~/.' + _posixify(app_name))) - if sys.platform == 'darwin': - return os.path.join(os.path.expanduser( - '~/Library/Application Support'), app_name) - return os.path.join( - os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')), - _posixify(app_name)) - - -class PacifyFlushWrapper(object): - """This wrapper is used to catch and suppress BrokenPipeErrors resulting - from ``.flush()`` being called on broken pipe during the shutdown/final-GC - of the Python interpreter. Notably ``.flush()`` is always called on - ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any - other cleanup code, and the case where the underlying file is not a broken - pipe, all calls and attributes are proxied. - """ - - def __init__(self, wrapped): - self.wrapped = wrapped - - def flush(self): - try: - self.wrapped.flush() - except IOError as e: - import errno - if e.errno != errno.EPIPE: - raise - - def __getattr__(self, attr): - return getattr(self.wrapped, attr) diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/__init__.py deleted file mode 100644 index 0defb82..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# -*- coding: utf-8 -*- -try: - from ._version import version as __version__ -except ImportError: - __version__ = 'unknown' - -__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', - 'utils', 'zoneinfo'] diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/_common.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/_common.py deleted file mode 100644 index 4eb2659..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/_common.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Common code used in multiple modules. -""" - - -class weekday(object): - __slots__ = ["weekday", "n"] - - def __init__(self, weekday, n=None): - self.weekday = weekday - self.n = n - - def __call__(self, n): - if n == self.n: - return self - else: - return self.__class__(self.weekday, n) - - def __eq__(self, other): - try: - if self.weekday != other.weekday or self.n != other.n: - return False - except AttributeError: - return False - return True - - def __hash__(self): - return hash(( - self.weekday, - self.n, - )) - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] - if not self.n: - return s - else: - return "%s(%+d)" % (s, self.n) - -# vim:ts=4:sw=4:et diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/_version.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/_version.py deleted file mode 100644 index 670d7ab..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/_version.py +++ /dev/null @@ -1,4 +0,0 @@ -# coding: utf-8 -# file generated by setuptools_scm -# don't change, don't track in version control -version = '2.8.0' diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/easter.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/easter.py deleted file mode 100644 index 53b7c78..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/easter.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a generic easter computing method for any given year, using -Western, Orthodox or Julian algorithms. -""" - -import datetime - -__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] - -EASTER_JULIAN = 1 -EASTER_ORTHODOX = 2 -EASTER_WESTERN = 3 - - -def easter(year, method=EASTER_WESTERN): - """ - This method was ported from the work done by GM Arts, - on top of the algorithm by Claus Tondering, which was - based in part on the algorithm of Ouding (1940), as - quoted in "Explanatory Supplement to the Astronomical - Almanac", P. Kenneth Seidelmann, editor. - - This algorithm implements three different easter - calculation methods: - - 1 - Original calculation in Julian calendar, valid in - dates after 326 AD - 2 - Original method, with date converted to Gregorian - calendar, valid in years 1583 to 4099 - 3 - Revised method, in Gregorian calendar, valid in - years 1583 to 4099 as well - - These methods are represented by the constants: - - * ``EASTER_JULIAN = 1`` - * ``EASTER_ORTHODOX = 2`` - * ``EASTER_WESTERN = 3`` - - The default method is method 3. - - More about the algorithm may be found at: - - `GM Arts: Easter Algorithms `_ - - and - - `The Calendar FAQ: Easter `_ - - """ - - if not (1 <= method <= 3): - raise ValueError("invalid method") - - # g - Golden year - 1 - # c - Century - # h - (23 - Epact) mod 30 - # i - Number of days from March 21 to Paschal Full Moon - # j - Weekday for PFM (0=Sunday, etc) - # p - Number of days from March 21 to Sunday on or before PFM - # (-6 to 28 methods 1 & 3, to 56 for method 2) - # e - Extra days to add for method 2 (converting Julian - # date to Gregorian date) - - y = year - g = y % 19 - e = 0 - if method < 3: - # Old method - i = (19*g + 15) % 30 - j = (y + y//4 + i) % 7 - if method == 2: - # Extra dates to convert Julian to Gregorian date - e = 10 - if y > 1600: - e = e + y//100 - 16 - (y//100 - 16)//4 - else: - # New method - c = y//100 - h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 - i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) - j = (y + y//4 + i + 2 - c + c//4) % 7 - - # p can be from -6 to 56 corresponding to dates 22 March to 23 May - # (later dates apply to method 2, although 23 May never actually occurs) - p = i - j + e - d = 1 + (p + 27 + (p + 6)//40) % 31 - m = 3 + (p + 26)//30 - return datetime.date(int(y), int(m), int(d)) diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/parser/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/parser/__init__.py deleted file mode 100644 index 216762c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/parser/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -from ._parser import parse, parser, parserinfo -from ._parser import DEFAULTPARSER, DEFAULTTZPARSER -from ._parser import UnknownTimezoneWarning - -from ._parser import __doc__ - -from .isoparser import isoparser, isoparse - -__all__ = ['parse', 'parser', 'parserinfo', - 'isoparse', 'isoparser', - 'UnknownTimezoneWarning'] - - -### -# Deprecate portions of the private interface so that downstream code that -# is improperly relying on it is given *some* notice. - - -def __deprecated_private_func(f): - from functools import wraps - import warnings - - msg = ('{name} is a private function and may break without warning, ' - 'it will be moved and or renamed in future versions.') - msg = msg.format(name=f.__name__) - - @wraps(f) - def deprecated_func(*args, **kwargs): - warnings.warn(msg, DeprecationWarning) - return f(*args, **kwargs) - - return deprecated_func - -def __deprecate_private_class(c): - import warnings - - msg = ('{name} is a private class and may break without warning, ' - 'it will be moved and or renamed in future versions.') - msg = msg.format(name=c.__name__) - - class private_class(c): - __doc__ = c.__doc__ - - def __init__(self, *args, **kwargs): - warnings.warn(msg, DeprecationWarning) - super(private_class, self).__init__(*args, **kwargs) - - private_class.__name__ = c.__name__ - - return private_class - - -from ._parser import _timelex, _resultbase -from ._parser import _tzparser, _parsetz - -_timelex = __deprecate_private_class(_timelex) -_tzparser = __deprecate_private_class(_tzparser) -_resultbase = __deprecate_private_class(_resultbase) -_parsetz = __deprecated_private_func(_parsetz) diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/parser/_parser.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/parser/_parser.py deleted file mode 100644 index 0da0f3e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/parser/_parser.py +++ /dev/null @@ -1,1580 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a generic date/time string parser which is able to parse -most known formats to represent a date and/or time. - -This module attempts to be forgiving with regards to unlikely input formats, -returning a datetime object even for dates which are ambiguous. If an element -of a date/time stamp is omitted, the following rules are applied: - -- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour - on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is - specified. -- If a time zone is omitted, a timezone-naive datetime is returned. - -If any other elements are missing, they are taken from the -:class:`datetime.datetime` object passed to the parameter ``default``. If this -results in a day number exceeding the valid number of days per month, the -value falls back to the end of the month. - -Additional resources about date/time string formats can be found below: - -- `A summary of the international standard date and time notation - `_ -- `W3C Date and Time Formats `_ -- `Time Formats (Planetary Rings Node) `_ -- `CPAN ParseDate module - `_ -- `Java SimpleDateFormat Class - `_ -""" -from __future__ import unicode_literals - -import datetime -import re -import string -import time -import warnings - -from calendar import monthrange -from io import StringIO - -import six -from six import integer_types, text_type - -from decimal import Decimal - -from warnings import warn - -from .. import relativedelta -from .. import tz - -__all__ = ["parse", "parserinfo"] - - -# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth -# making public and/or figuring out if there is something we can -# take off their plate. -class _timelex(object): - # Fractional seconds are sometimes split by a comma - _split_decimal = re.compile("([.,])") - - def __init__(self, instream): - if six.PY2: - # In Python 2, we can't duck type properly because unicode has - # a 'decode' function, and we'd be double-decoding - if isinstance(instream, (bytes, bytearray)): - instream = instream.decode() - else: - if getattr(instream, 'decode', None) is not None: - instream = instream.decode() - - if isinstance(instream, text_type): - instream = StringIO(instream) - elif getattr(instream, 'read', None) is None: - raise TypeError('Parser must be a string or character stream, not ' - '{itype}'.format(itype=instream.__class__.__name__)) - - self.instream = instream - self.charstack = [] - self.tokenstack = [] - self.eof = False - - def get_token(self): - """ - This function breaks the time string into lexical units (tokens), which - can be parsed by the parser. Lexical units are demarcated by changes in - the character set, so any continuous string of letters is considered - one unit, any continuous string of numbers is considered one unit. - - The main complication arises from the fact that dots ('.') can be used - both as separators (e.g. "Sep.20.2009") or decimal points (e.g. - "4:30:21.447"). As such, it is necessary to read the full context of - any dot-separated strings before breaking it into tokens; as such, this - function maintains a "token stack", for when the ambiguous context - demands that multiple tokens be parsed at once. - """ - if self.tokenstack: - return self.tokenstack.pop(0) - - seenletters = False - token = None - state = None - - while not self.eof: - # We only realize that we've reached the end of a token when we - # find a character that's not part of the current token - since - # that character may be part of the next token, it's stored in the - # charstack. - if self.charstack: - nextchar = self.charstack.pop(0) - else: - nextchar = self.instream.read(1) - while nextchar == '\x00': - nextchar = self.instream.read(1) - - if not nextchar: - self.eof = True - break - elif not state: - # First character of the token - determines if we're starting - # to parse a word, a number or something else. - token = nextchar - if self.isword(nextchar): - state = 'a' - elif self.isnum(nextchar): - state = '0' - elif self.isspace(nextchar): - token = ' ' - break # emit token - else: - break # emit token - elif state == 'a': - # If we've already started reading a word, we keep reading - # letters until we find something that's not part of a word. - seenletters = True - if self.isword(nextchar): - token += nextchar - elif nextchar == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0': - # If we've already started reading a number, we keep reading - # numbers until we find something that doesn't fit. - if self.isnum(nextchar): - token += nextchar - elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == 'a.': - # If we've seen some letters and a dot separator, continue - # parsing, and the tokens will be broken up later. - seenletters = True - if nextchar == '.' or self.isword(nextchar): - token += nextchar - elif self.isnum(nextchar) and token[-1] == '.': - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0.': - # If we've seen at least one dot separator, keep going, we'll - # break up the tokens later. - if nextchar == '.' or self.isnum(nextchar): - token += nextchar - elif self.isword(nextchar) and token[-1] == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - - if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or - token[-1] in '.,')): - l = self._split_decimal.split(token) - token = l[0] - for tok in l[1:]: - if tok: - self.tokenstack.append(tok) - - if state == '0.' and token.count('.') == 0: - token = token.replace(',', '.') - - return token - - def __iter__(self): - return self - - def __next__(self): - token = self.get_token() - if token is None: - raise StopIteration - - return token - - def next(self): - return self.__next__() # Python 2.x support - - @classmethod - def split(cls, s): - return list(cls(s)) - - @classmethod - def isword(cls, nextchar): - """ Whether or not the next character is part of a word """ - return nextchar.isalpha() - - @classmethod - def isnum(cls, nextchar): - """ Whether the next character is part of a number """ - return nextchar.isdigit() - - @classmethod - def isspace(cls, nextchar): - """ Whether the next character is whitespace """ - return nextchar.isspace() - - -class _resultbase(object): - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def _repr(self, classname): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, repr(value))) - return "%s(%s)" % (classname, ", ".join(l)) - - def __len__(self): - return (sum(getattr(self, attr) is not None - for attr in self.__slots__)) - - def __repr__(self): - return self._repr(self.__class__.__name__) - - -class parserinfo(object): - """ - Class which handles what inputs are accepted. Subclass this to customize - the language and acceptable values for each parameter. - - :param dayfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the day (``True``) or month (``False``). If - ``yearfirst`` is set to ``True``, this distinguishes between YDM - and YMD. Default is ``False``. - - :param yearfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the year. If ``True``, the first number is taken - to be the year, otherwise the last number is taken to be the year. - Default is ``False``. - """ - - # m from a.m/p.m, t from ISO T separator - JUMP = [" ", ".", ",", ";", "-", "/", "'", - "at", "on", "and", "ad", "m", "t", "of", - "st", "nd", "rd", "th"] - - WEEKDAYS = [("Mon", "Monday"), - ("Tue", "Tuesday"), # TODO: "Tues" - ("Wed", "Wednesday"), - ("Thu", "Thursday"), # TODO: "Thurs" - ("Fri", "Friday"), - ("Sat", "Saturday"), - ("Sun", "Sunday")] - MONTHS = [("Jan", "January"), - ("Feb", "February"), # TODO: "Febr" - ("Mar", "March"), - ("Apr", "April"), - ("May", "May"), - ("Jun", "June"), - ("Jul", "July"), - ("Aug", "August"), - ("Sep", "Sept", "September"), - ("Oct", "October"), - ("Nov", "November"), - ("Dec", "December")] - HMS = [("h", "hour", "hours"), - ("m", "minute", "minutes"), - ("s", "second", "seconds")] - AMPM = [("am", "a"), - ("pm", "p")] - UTCZONE = ["UTC", "GMT", "Z", "z"] - PERTAIN = ["of"] - TZOFFSET = {} - # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate", - # "Anno Domini", "Year of Our Lord"] - - def __init__(self, dayfirst=False, yearfirst=False): - self._jump = self._convert(self.JUMP) - self._weekdays = self._convert(self.WEEKDAYS) - self._months = self._convert(self.MONTHS) - self._hms = self._convert(self.HMS) - self._ampm = self._convert(self.AMPM) - self._utczone = self._convert(self.UTCZONE) - self._pertain = self._convert(self.PERTAIN) - - self.dayfirst = dayfirst - self.yearfirst = yearfirst - - self._year = time.localtime().tm_year - self._century = self._year // 100 * 100 - - def _convert(self, lst): - dct = {} - for i, v in enumerate(lst): - if isinstance(v, tuple): - for v in v: - dct[v.lower()] = i - else: - dct[v.lower()] = i - return dct - - def jump(self, name): - return name.lower() in self._jump - - def weekday(self, name): - try: - return self._weekdays[name.lower()] - except KeyError: - pass - return None - - def month(self, name): - try: - return self._months[name.lower()] + 1 - except KeyError: - pass - return None - - def hms(self, name): - try: - return self._hms[name.lower()] - except KeyError: - return None - - def ampm(self, name): - try: - return self._ampm[name.lower()] - except KeyError: - return None - - def pertain(self, name): - return name.lower() in self._pertain - - def utczone(self, name): - return name.lower() in self._utczone - - def tzoffset(self, name): - if name in self._utczone: - return 0 - - return self.TZOFFSET.get(name) - - def convertyear(self, year, century_specified=False): - """ - Converts two-digit years to year within [-50, 49] - range of self._year (current local time) - """ - - # Function contract is that the year is always positive - assert year >= 0 - - if year < 100 and not century_specified: - # assume current century to start - year += self._century - - if year >= self._year + 50: # if too far in future - year -= 100 - elif year < self._year - 50: # if too far in past - year += 100 - - return year - - def validate(self, res): - # move to info - if res.year is not None: - res.year = self.convertyear(res.year, res.century_specified) - - if ((res.tzoffset == 0 and not res.tzname) or - (res.tzname == 'Z' or res.tzname == 'z')): - res.tzname = "UTC" - res.tzoffset = 0 - elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): - res.tzoffset = 0 - return True - - -class _ymd(list): - def __init__(self, *args, **kwargs): - super(self.__class__, self).__init__(*args, **kwargs) - self.century_specified = False - self.dstridx = None - self.mstridx = None - self.ystridx = None - - @property - def has_year(self): - return self.ystridx is not None - - @property - def has_month(self): - return self.mstridx is not None - - @property - def has_day(self): - return self.dstridx is not None - - def could_be_day(self, value): - if self.has_day: - return False - elif not self.has_month: - return 1 <= value <= 31 - elif not self.has_year: - # Be permissive, assume leapyear - month = self[self.mstridx] - return 1 <= value <= monthrange(2000, month)[1] - else: - month = self[self.mstridx] - year = self[self.ystridx] - return 1 <= value <= monthrange(year, month)[1] - - def append(self, val, label=None): - if hasattr(val, '__len__'): - if val.isdigit() and len(val) > 2: - self.century_specified = True - if label not in [None, 'Y']: # pragma: no cover - raise ValueError(label) - label = 'Y' - elif val > 100: - self.century_specified = True - if label not in [None, 'Y']: # pragma: no cover - raise ValueError(label) - label = 'Y' - - super(self.__class__, self).append(int(val)) - - if label == 'M': - if self.has_month: - raise ValueError('Month is already set') - self.mstridx = len(self) - 1 - elif label == 'D': - if self.has_day: - raise ValueError('Day is already set') - self.dstridx = len(self) - 1 - elif label == 'Y': - if self.has_year: - raise ValueError('Year is already set') - self.ystridx = len(self) - 1 - - def _resolve_from_stridxs(self, strids): - """ - Try to resolve the identities of year/month/day elements using - ystridx, mstridx, and dstridx, if enough of these are specified. - """ - if len(self) == 3 and len(strids) == 2: - # we can back out the remaining stridx value - missing = [x for x in range(3) if x not in strids.values()] - key = [x for x in ['y', 'm', 'd'] if x not in strids] - assert len(missing) == len(key) == 1 - key = key[0] - val = missing[0] - strids[key] = val - - assert len(self) == len(strids) # otherwise this should not be called - out = {key: self[strids[key]] for key in strids} - return (out.get('y'), out.get('m'), out.get('d')) - - def resolve_ymd(self, yearfirst, dayfirst): - len_ymd = len(self) - year, month, day = (None, None, None) - - strids = (('y', self.ystridx), - ('m', self.mstridx), - ('d', self.dstridx)) - - strids = {key: val for key, val in strids if val is not None} - if (len(self) == len(strids) > 0 or - (len(self) == 3 and len(strids) == 2)): - return self._resolve_from_stridxs(strids) - - mstridx = self.mstridx - - if len_ymd > 3: - raise ValueError("More than three YMD values") - elif len_ymd == 1 or (mstridx is not None and len_ymd == 2): - # One member, or two members with a month string - if mstridx is not None: - month = self[mstridx] - # since mstridx is 0 or 1, self[mstridx-1] always - # looks up the other element - other = self[mstridx - 1] - else: - other = self[0] - - if len_ymd > 1 or mstridx is None: - if other > 31: - year = other - else: - day = other - - elif len_ymd == 2: - # Two members with numbers - if self[0] > 31: - # 99-01 - year, month = self - elif self[1] > 31: - # 01-99 - month, year = self - elif dayfirst and self[1] <= 12: - # 13-01 - day, month = self - else: - # 01-13 - month, day = self - - elif len_ymd == 3: - # Three members - if mstridx == 0: - if self[1] > 31: - # Apr-2003-25 - month, year, day = self - else: - month, day, year = self - elif mstridx == 1: - if self[0] > 31 or (yearfirst and self[2] <= 31): - # 99-Jan-01 - year, month, day = self - else: - # 01-Jan-01 - # Give precendence to day-first, since - # two-digit years is usually hand-written. - day, month, year = self - - elif mstridx == 2: - # WTF!? - if self[1] > 31: - # 01-99-Jan - day, year, month = self - else: - # 99-01-Jan - year, day, month = self - - else: - if (self[0] > 31 or - self.ystridx == 0 or - (yearfirst and self[1] <= 12 and self[2] <= 31)): - # 99-01-01 - if dayfirst and self[2] <= 12: - year, day, month = self - else: - year, month, day = self - elif self[0] > 12 or (dayfirst and self[1] <= 12): - # 13-01-01 - day, month, year = self - else: - # 01-13-01 - month, day, year = self - - return year, month, day - - -class parser(object): - def __init__(self, info=None): - self.info = info or parserinfo() - - def parse(self, timestr, default=None, - ignoretz=False, tzinfos=None, **kwargs): - """ - Parse the date/time string into a :class:`datetime.datetime` object. - - :param timestr: - Any date/time string using the supported formats. - - :param default: - The default datetime object, if this is a datetime object and not - ``None``, elements specified in ``timestr`` replace elements in the - default object. - - :param ignoretz: - If set ``True``, time zones in parsed strings are ignored and a - naive :class:`datetime.datetime` object is returned. - - :param tzinfos: - Additional time zone names / aliases which may be present in the - string. This argument maps time zone names (and optionally offsets - from those time zones) to time zones. This parameter can be a - dictionary with timezone aliases mapping time zone names to time - zones or a function taking two parameters (``tzname`` and - ``tzoffset``) and returning a time zone. - - The timezones to which the names are mapped can be an integer - offset from UTC in seconds or a :class:`tzinfo` object. - - .. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from dateutil.parser import parse - >>> from dateutil.tz import gettz - >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} - >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) - >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, - tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) - - This parameter is ignored if ``ignoretz`` is set. - - :param \\*\\*kwargs: - Keyword arguments as passed to ``_parse()``. - - :return: - Returns a :class:`datetime.datetime` object or, if the - ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the - first element being a :class:`datetime.datetime` object, the second - a tuple containing the fuzzy tokens. - - :raises ValueError: - Raised for invalid or unknown string format, if the provided - :class:`tzinfo` is not in a valid format, or if an invalid date - would be created. - - :raises TypeError: - Raised for non-string or character stream input. - - :raises OverflowError: - Raised if the parsed date exceeds the largest valid C integer on - your system. - """ - - if default is None: - default = datetime.datetime.now().replace(hour=0, minute=0, - second=0, microsecond=0) - - res, skipped_tokens = self._parse(timestr, **kwargs) - - if res is None: - raise ValueError("Unknown string format:", timestr) - - if len(res) == 0: - raise ValueError("String does not contain a date:", timestr) - - ret = self._build_naive(res, default) - - if not ignoretz: - ret = self._build_tzaware(ret, res, tzinfos) - - if kwargs.get('fuzzy_with_tokens', False): - return ret, skipped_tokens - else: - return ret - - class _result(_resultbase): - __slots__ = ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond", - "tzname", "tzoffset", "ampm","any_unused_tokens"] - - def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, - fuzzy_with_tokens=False): - """ - Private method which performs the heavy lifting of parsing, called from - ``parse()``, which passes on its ``kwargs`` to this function. - - :param timestr: - The string to parse. - - :param dayfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the day (``True``) or month (``False``). If - ``yearfirst`` is set to ``True``, this distinguishes between YDM - and YMD. If set to ``None``, this value is retrieved from the - current :class:`parserinfo` object (which itself defaults to - ``False``). - - :param yearfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the year. If ``True``, the first number is taken - to be the year, otherwise the last number is taken to be the year. - If this is set to ``None``, the value is retrieved from the current - :class:`parserinfo` object (which itself defaults to ``False``). - - :param fuzzy: - Whether to allow fuzzy parsing, allowing for string like "Today is - January 1, 2047 at 8:21:00AM". - - :param fuzzy_with_tokens: - If ``True``, ``fuzzy`` is automatically set to True, and the parser - will return a tuple where the first element is the parsed - :class:`datetime.datetime` datetimestamp and the second element is - a tuple containing the portions of the string which were ignored: - - .. doctest:: - - >>> from dateutil.parser import parse - >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) - (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) - - """ - if fuzzy_with_tokens: - fuzzy = True - - info = self.info - - if dayfirst is None: - dayfirst = info.dayfirst - - if yearfirst is None: - yearfirst = info.yearfirst - - res = self._result() - l = _timelex.split(timestr) # Splits the timestr into tokens - - skipped_idxs = [] - - # year/month/day list - ymd = _ymd() - - len_l = len(l) - i = 0 - try: - while i < len_l: - - # Check if it's a number - value_repr = l[i] - try: - value = float(value_repr) - except ValueError: - value = None - - if value is not None: - # Numeric token - i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy) - - # Check weekday - elif info.weekday(l[i]) is not None: - value = info.weekday(l[i]) - res.weekday = value - - # Check month name - elif info.month(l[i]) is not None: - value = info.month(l[i]) - ymd.append(value, 'M') - - if i + 1 < len_l: - if l[i + 1] in ('-', '/'): - # Jan-01[-99] - sep = l[i + 1] - ymd.append(l[i + 2]) - - if i + 3 < len_l and l[i + 3] == sep: - # Jan-01-99 - ymd.append(l[i + 4]) - i += 2 - - i += 2 - - elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and - info.pertain(l[i + 2])): - # Jan of 01 - # In this case, 01 is clearly year - if l[i + 4].isdigit(): - # Convert it here to become unambiguous - value = int(l[i + 4]) - year = str(info.convertyear(value)) - ymd.append(year, 'Y') - else: - # Wrong guess - pass - # TODO: not hit in tests - i += 4 - - # Check am/pm - elif info.ampm(l[i]) is not None: - value = info.ampm(l[i]) - val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy) - - if val_is_ampm: - res.hour = self._adjust_ampm(res.hour, value) - res.ampm = value - - elif fuzzy: - skipped_idxs.append(i) - - # Check for a timezone name - elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]): - res.tzname = l[i] - res.tzoffset = info.tzoffset(res.tzname) - - # Check for something like GMT+3, or BRST+3. Notice - # that it doesn't mean "I am 3 hours after GMT", but - # "my time +3 is GMT". If found, we reverse the - # logic so that timezone parsing code will get it - # right. - if i + 1 < len_l and l[i + 1] in ('+', '-'): - l[i + 1] = ('+', '-')[l[i + 1] == '+'] - res.tzoffset = None - if info.utczone(res.tzname): - # With something like GMT+3, the timezone - # is *not* GMT. - res.tzname = None - - # Check for a numbered timezone - elif res.hour is not None and l[i] in ('+', '-'): - signal = (-1, 1)[l[i] == '+'] - len_li = len(l[i + 1]) - - # TODO: check that l[i + 1] is integer? - if len_li == 4: - # -0300 - hour_offset = int(l[i + 1][:2]) - min_offset = int(l[i + 1][2:]) - elif i + 2 < len_l and l[i + 2] == ':': - # -03:00 - hour_offset = int(l[i + 1]) - min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like? - i += 2 - elif len_li <= 2: - # -[0]3 - hour_offset = int(l[i + 1][:2]) - min_offset = 0 - else: - raise ValueError(timestr) - - res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60) - - # Look for a timezone name between parenthesis - if (i + 5 < len_l and - info.jump(l[i + 2]) and l[i + 3] == '(' and - l[i + 5] == ')' and - 3 <= len(l[i + 4]) and - self._could_be_tzname(res.hour, res.tzname, - None, l[i + 4])): - # -0300 (BRST) - res.tzname = l[i + 4] - i += 4 - - i += 1 - - # Check jumps - elif not (info.jump(l[i]) or fuzzy): - raise ValueError(timestr) - - else: - skipped_idxs.append(i) - i += 1 - - # Process year/month/day - year, month, day = ymd.resolve_ymd(yearfirst, dayfirst) - - res.century_specified = ymd.century_specified - res.year = year - res.month = month - res.day = day - - except (IndexError, ValueError): - return None, None - - if not info.validate(res): - return None, None - - if fuzzy_with_tokens: - skipped_tokens = self._recombine_skipped(l, skipped_idxs) - return res, tuple(skipped_tokens) - else: - return res, None - - def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy): - # Token is a number - value_repr = tokens[idx] - try: - value = self._to_decimal(value_repr) - except Exception as e: - six.raise_from(ValueError('Unknown numeric token'), e) - - len_li = len(value_repr) - - len_l = len(tokens) - - if (len(ymd) == 3 and len_li in (2, 4) and - res.hour is None and - (idx + 1 >= len_l or - (tokens[idx + 1] != ':' and - info.hms(tokens[idx + 1]) is None))): - # 19990101T23[59] - s = tokens[idx] - res.hour = int(s[:2]) - - if len_li == 4: - res.minute = int(s[2:]) - - elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6): - # YYMMDD or HHMMSS[.ss] - s = tokens[idx] - - if not ymd and '.' not in tokens[idx]: - ymd.append(s[:2]) - ymd.append(s[2:4]) - ymd.append(s[4:]) - else: - # 19990101T235959[.59] - - # TODO: Check if res attributes already set. - res.hour = int(s[:2]) - res.minute = int(s[2:4]) - res.second, res.microsecond = self._parsems(s[4:]) - - elif len_li in (8, 12, 14): - # YYYYMMDD - s = tokens[idx] - ymd.append(s[:4], 'Y') - ymd.append(s[4:6]) - ymd.append(s[6:8]) - - if len_li > 8: - res.hour = int(s[8:10]) - res.minute = int(s[10:12]) - - if len_li > 12: - res.second = int(s[12:]) - - elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None: - # HH[ ]h or MM[ ]m or SS[.ss][ ]s - hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True) - (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx) - if hms is not None: - # TODO: checking that hour/minute/second are not - # already set? - self._assign_hms(res, value_repr, hms) - - elif idx + 2 < len_l and tokens[idx + 1] == ':': - # HH:MM[:SS[.ss]] - res.hour = int(value) - value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this? - (res.minute, res.second) = self._parse_min_sec(value) - - if idx + 4 < len_l and tokens[idx + 3] == ':': - res.second, res.microsecond = self._parsems(tokens[idx + 4]) - - idx += 2 - - idx += 2 - - elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'): - sep = tokens[idx + 1] - ymd.append(value_repr) - - if idx + 2 < len_l and not info.jump(tokens[idx + 2]): - if tokens[idx + 2].isdigit(): - # 01-01[-01] - ymd.append(tokens[idx + 2]) - else: - # 01-Jan[-01] - value = info.month(tokens[idx + 2]) - - if value is not None: - ymd.append(value, 'M') - else: - raise ValueError() - - if idx + 3 < len_l and tokens[idx + 3] == sep: - # We have three members - value = info.month(tokens[idx + 4]) - - if value is not None: - ymd.append(value, 'M') - else: - ymd.append(tokens[idx + 4]) - idx += 2 - - idx += 1 - idx += 1 - - elif idx + 1 >= len_l or info.jump(tokens[idx + 1]): - if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None: - # 12 am - hour = int(value) - res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2])) - idx += 1 - else: - # Year, month or day - ymd.append(value) - idx += 1 - - elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24): - # 12am - hour = int(value) - res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1])) - idx += 1 - - elif ymd.could_be_day(value): - ymd.append(value) - - elif not fuzzy: - raise ValueError() - - return idx - - def _find_hms_idx(self, idx, tokens, info, allow_jump): - len_l = len(tokens) - - if idx+1 < len_l and info.hms(tokens[idx+1]) is not None: - # There is an "h", "m", or "s" label following this token. We take - # assign the upcoming label to the current token. - # e.g. the "12" in 12h" - hms_idx = idx + 1 - - elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and - info.hms(tokens[idx+2]) is not None): - # There is a space and then an "h", "m", or "s" label. - # e.g. the "12" in "12 h" - hms_idx = idx + 2 - - elif idx > 0 and info.hms(tokens[idx-1]) is not None: - # There is a "h", "m", or "s" preceeding this token. Since neither - # of the previous cases was hit, there is no label following this - # token, so we use the previous label. - # e.g. the "04" in "12h04" - hms_idx = idx-1 - - elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and - info.hms(tokens[idx-2]) is not None): - # If we are looking at the final token, we allow for a - # backward-looking check to skip over a space. - # TODO: Are we sure this is the right condition here? - hms_idx = idx - 2 - - else: - hms_idx = None - - return hms_idx - - def _assign_hms(self, res, value_repr, hms): - # See GH issue #427, fixing float rounding - value = self._to_decimal(value_repr) - - if hms == 0: - # Hour - res.hour = int(value) - if value % 1: - res.minute = int(60*(value % 1)) - - elif hms == 1: - (res.minute, res.second) = self._parse_min_sec(value) - - elif hms == 2: - (res.second, res.microsecond) = self._parsems(value_repr) - - def _could_be_tzname(self, hour, tzname, tzoffset, token): - return (hour is not None and - tzname is None and - tzoffset is None and - len(token) <= 5 and - (all(x in string.ascii_uppercase for x in token) - or token in self.info.UTCZONE)) - - def _ampm_valid(self, hour, ampm, fuzzy): - """ - For fuzzy parsing, 'a' or 'am' (both valid English words) - may erroneously trigger the AM/PM flag. Deal with that - here. - """ - val_is_ampm = True - - # If there's already an AM/PM flag, this one isn't one. - if fuzzy and ampm is not None: - val_is_ampm = False - - # If AM/PM is found and hour is not, raise a ValueError - if hour is None: - if fuzzy: - val_is_ampm = False - else: - raise ValueError('No hour specified with AM or PM flag.') - elif not 0 <= hour <= 12: - # If AM/PM is found, it's a 12 hour clock, so raise - # an error for invalid range - if fuzzy: - val_is_ampm = False - else: - raise ValueError('Invalid hour specified for 12-hour clock.') - - return val_is_ampm - - def _adjust_ampm(self, hour, ampm): - if hour < 12 and ampm == 1: - hour += 12 - elif hour == 12 and ampm == 0: - hour = 0 - return hour - - def _parse_min_sec(self, value): - # TODO: Every usage of this function sets res.second to the return - # value. Are there any cases where second will be returned as None and - # we *dont* want to set res.second = None? - minute = int(value) - second = None - - sec_remainder = value % 1 - if sec_remainder: - second = int(60 * sec_remainder) - return (minute, second) - - def _parsems(self, value): - """Parse a I[.F] seconds value into (seconds, microseconds).""" - if "." not in value: - return int(value), 0 - else: - i, f = value.split(".") - return int(i), int(f.ljust(6, "0")[:6]) - - def _parse_hms(self, idx, tokens, info, hms_idx): - # TODO: Is this going to admit a lot of false-positives for when we - # just happen to have digits and "h", "m" or "s" characters in non-date - # text? I guess hex hashes won't have that problem, but there's plenty - # of random junk out there. - if hms_idx is None: - hms = None - new_idx = idx - elif hms_idx > idx: - hms = info.hms(tokens[hms_idx]) - new_idx = hms_idx - else: - # Looking backwards, increment one. - hms = info.hms(tokens[hms_idx]) + 1 - new_idx = idx - - return (new_idx, hms) - - def _recombine_skipped(self, tokens, skipped_idxs): - """ - >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"] - >>> skipped_idxs = [0, 1, 2, 5] - >>> _recombine_skipped(tokens, skipped_idxs) - ["foo bar", "baz"] - """ - skipped_tokens = [] - for i, idx in enumerate(sorted(skipped_idxs)): - if i > 0 and idx - 1 == skipped_idxs[i - 1]: - skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx] - else: - skipped_tokens.append(tokens[idx]) - - return skipped_tokens - - def _build_tzinfo(self, tzinfos, tzname, tzoffset): - if callable(tzinfos): - tzdata = tzinfos(tzname, tzoffset) - else: - tzdata = tzinfos.get(tzname) - # handle case where tzinfo is paased an options that returns None - # eg tzinfos = {'BRST' : None} - if isinstance(tzdata, datetime.tzinfo) or tzdata is None: - tzinfo = tzdata - elif isinstance(tzdata, text_type): - tzinfo = tz.tzstr(tzdata) - elif isinstance(tzdata, integer_types): - tzinfo = tz.tzoffset(tzname, tzdata) - return tzinfo - - def _build_tzaware(self, naive, res, tzinfos): - if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)): - tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset) - aware = naive.replace(tzinfo=tzinfo) - aware = self._assign_tzname(aware, res.tzname) - - elif res.tzname and res.tzname in time.tzname: - aware = naive.replace(tzinfo=tz.tzlocal()) - - # Handle ambiguous local datetime - aware = self._assign_tzname(aware, res.tzname) - - # This is mostly relevant for winter GMT zones parsed in the UK - if (aware.tzname() != res.tzname and - res.tzname in self.info.UTCZONE): - aware = aware.replace(tzinfo=tz.tzutc()) - - elif res.tzoffset == 0: - aware = naive.replace(tzinfo=tz.tzutc()) - - elif res.tzoffset: - aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) - - elif not res.tzname and not res.tzoffset: - # i.e. no timezone information was found. - aware = naive - - elif res.tzname: - # tz-like string was parsed but we don't know what to do - # with it - warnings.warn("tzname {tzname} identified but not understood. " - "Pass `tzinfos` argument in order to correctly " - "return a timezone-aware datetime. In a future " - "version, this will raise an " - "exception.".format(tzname=res.tzname), - category=UnknownTimezoneWarning) - aware = naive - - return aware - - def _build_naive(self, res, default): - repl = {} - for attr in ("year", "month", "day", "hour", - "minute", "second", "microsecond"): - value = getattr(res, attr) - if value is not None: - repl[attr] = value - - if 'day' not in repl: - # If the default day exceeds the last day of the month, fall back - # to the end of the month. - cyear = default.year if res.year is None else res.year - cmonth = default.month if res.month is None else res.month - cday = default.day if res.day is None else res.day - - if cday > monthrange(cyear, cmonth)[1]: - repl['day'] = monthrange(cyear, cmonth)[1] - - naive = default.replace(**repl) - - if res.weekday is not None and not res.day: - naive = naive + relativedelta.relativedelta(weekday=res.weekday) - - return naive - - def _assign_tzname(self, dt, tzname): - if dt.tzname() != tzname: - new_dt = tz.enfold(dt, fold=1) - if new_dt.tzname() == tzname: - return new_dt - - return dt - - def _to_decimal(self, val): - try: - decimal_value = Decimal(val) - # See GH 662, edge case, infinite value should not be converted via `_to_decimal` - if not decimal_value.is_finite(): - raise ValueError("Converted decimal value is infinite or NaN") - except Exception as e: - msg = "Could not convert %s to decimal" % val - six.raise_from(ValueError(msg), e) - else: - return decimal_value - - -DEFAULTPARSER = parser() - - -def parse(timestr, parserinfo=None, **kwargs): - """ - - Parse a string in one of the supported formats, using the - ``parserinfo`` parameters. - - :param timestr: - A string containing a date/time stamp. - - :param parserinfo: - A :class:`parserinfo` object containing parameters for the parser. - If ``None``, the default arguments to the :class:`parserinfo` - constructor are used. - - The ``**kwargs`` parameter takes the following keyword arguments: - - :param default: - The default datetime object, if this is a datetime object and not - ``None``, elements specified in ``timestr`` replace elements in the - default object. - - :param ignoretz: - If set ``True``, time zones in parsed strings are ignored and a naive - :class:`datetime` object is returned. - - :param tzinfos: - Additional time zone names / aliases which may be present in the - string. This argument maps time zone names (and optionally offsets - from those time zones) to time zones. This parameter can be a - dictionary with timezone aliases mapping time zone names to time - zones or a function taking two parameters (``tzname`` and - ``tzoffset``) and returning a time zone. - - The timezones to which the names are mapped can be an integer - offset from UTC in seconds or a :class:`tzinfo` object. - - .. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from dateutil.parser import parse - >>> from dateutil.tz import gettz - >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} - >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) - >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, - tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) - - This parameter is ignored if ``ignoretz`` is set. - - :param dayfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the day (``True``) or month (``False``). If - ``yearfirst`` is set to ``True``, this distinguishes between YDM and - YMD. If set to ``None``, this value is retrieved from the current - :class:`parserinfo` object (which itself defaults to ``False``). - - :param yearfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the year. If ``True``, the first number is taken to - be the year, otherwise the last number is taken to be the year. If - this is set to ``None``, the value is retrieved from the current - :class:`parserinfo` object (which itself defaults to ``False``). - - :param fuzzy: - Whether to allow fuzzy parsing, allowing for string like "Today is - January 1, 2047 at 8:21:00AM". - - :param fuzzy_with_tokens: - If ``True``, ``fuzzy`` is automatically set to True, and the parser - will return a tuple where the first element is the parsed - :class:`datetime.datetime` datetimestamp and the second element is - a tuple containing the portions of the string which were ignored: - - .. doctest:: - - >>> from dateutil.parser import parse - >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) - (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) - - :return: - Returns a :class:`datetime.datetime` object or, if the - ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the - first element being a :class:`datetime.datetime` object, the second - a tuple containing the fuzzy tokens. - - :raises ValueError: - Raised for invalid or unknown string format, if the provided - :class:`tzinfo` is not in a valid format, or if an invalid date - would be created. - - :raises OverflowError: - Raised if the parsed date exceeds the largest valid C integer on - your system. - """ - if parserinfo: - return parser(parserinfo).parse(timestr, **kwargs) - else: - return DEFAULTPARSER.parse(timestr, **kwargs) - - -class _tzparser(object): - - class _result(_resultbase): - - __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", - "start", "end"] - - class _attr(_resultbase): - __slots__ = ["month", "week", "weekday", - "yday", "jyday", "day", "time"] - - def __repr__(self): - return self._repr("") - - def __init__(self): - _resultbase.__init__(self) - self.start = self._attr() - self.end = self._attr() - - def parse(self, tzstr): - res = self._result() - l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x] - used_idxs = list() - try: - - len_l = len(l) - - i = 0 - while i < len_l: - # BRST+3[BRDT[+2]] - j = i - while j < len_l and not [x for x in l[j] - if x in "0123456789:,-+"]: - j += 1 - if j != i: - if not res.stdabbr: - offattr = "stdoffset" - res.stdabbr = "".join(l[i:j]) - else: - offattr = "dstoffset" - res.dstabbr = "".join(l[i:j]) - - for ii in range(j): - used_idxs.append(ii) - i = j - if (i < len_l and (l[i] in ('+', '-') or l[i][0] in - "0123456789")): - if l[i] in ('+', '-'): - # Yes, that's right. See the TZ variable - # documentation. - signal = (1, -1)[l[i] == '+'] - used_idxs.append(i) - i += 1 - else: - signal = -1 - len_li = len(l[i]) - if len_li == 4: - # -0300 - setattr(res, offattr, (int(l[i][:2]) * 3600 + - int(l[i][2:]) * 60) * signal) - elif i + 1 < len_l and l[i + 1] == ':': - # -03:00 - setattr(res, offattr, - (int(l[i]) * 3600 + - int(l[i + 2]) * 60) * signal) - used_idxs.append(i) - i += 2 - elif len_li <= 2: - # -[0]3 - setattr(res, offattr, - int(l[i][:2]) * 3600 * signal) - else: - return None - used_idxs.append(i) - i += 1 - if res.dstabbr: - break - else: - break - - - if i < len_l: - for j in range(i, len_l): - if l[j] == ';': - l[j] = ',' - - assert l[i] == ',' - - i += 1 - - if i >= len_l: - pass - elif (8 <= l.count(',') <= 9 and - not [y for x in l[i:] if x != ',' - for y in x if y not in "0123456789+-"]): - # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] - for x in (res.start, res.end): - x.month = int(l[i]) - used_idxs.append(i) - i += 2 - if l[i] == '-': - value = int(l[i + 1]) * -1 - used_idxs.append(i) - i += 1 - else: - value = int(l[i]) - used_idxs.append(i) - i += 2 - if value: - x.week = value - x.weekday = (int(l[i]) - 1) % 7 - else: - x.day = int(l[i]) - used_idxs.append(i) - i += 2 - x.time = int(l[i]) - used_idxs.append(i) - i += 2 - if i < len_l: - if l[i] in ('-', '+'): - signal = (-1, 1)[l[i] == "+"] - used_idxs.append(i) - i += 1 - else: - signal = 1 - used_idxs.append(i) - res.dstoffset = (res.stdoffset + int(l[i]) * signal) - - # This was a made-up format that is not in normal use - warn(('Parsed time zone "%s"' % tzstr) + - 'is in a non-standard dateutil-specific format, which ' + - 'is now deprecated; support for parsing this format ' + - 'will be removed in future versions. It is recommended ' + - 'that you switch to a standard format like the GNU ' + - 'TZ variable format.', tz.DeprecatedTzFormatWarning) - elif (l.count(',') == 2 and l[i:].count('/') <= 2 and - not [y for x in l[i:] if x not in (',', '/', 'J', 'M', - '.', '-', ':') - for y in x if y not in "0123456789"]): - for x in (res.start, res.end): - if l[i] == 'J': - # non-leap year day (1 based) - used_idxs.append(i) - i += 1 - x.jyday = int(l[i]) - elif l[i] == 'M': - # month[-.]week[-.]weekday - used_idxs.append(i) - i += 1 - x.month = int(l[i]) - used_idxs.append(i) - i += 1 - assert l[i] in ('-', '.') - used_idxs.append(i) - i += 1 - x.week = int(l[i]) - if x.week == 5: - x.week = -1 - used_idxs.append(i) - i += 1 - assert l[i] in ('-', '.') - used_idxs.append(i) - i += 1 - x.weekday = (int(l[i]) - 1) % 7 - else: - # year day (zero based) - x.yday = int(l[i]) + 1 - - used_idxs.append(i) - i += 1 - - if i < len_l and l[i] == '/': - used_idxs.append(i) - i += 1 - # start time - len_li = len(l[i]) - if len_li == 4: - # -0300 - x.time = (int(l[i][:2]) * 3600 + - int(l[i][2:]) * 60) - elif i + 1 < len_l and l[i + 1] == ':': - # -03:00 - x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60 - used_idxs.append(i) - i += 2 - if i + 1 < len_l and l[i + 1] == ':': - used_idxs.append(i) - i += 2 - x.time += int(l[i]) - elif len_li <= 2: - # -[0]3 - x.time = (int(l[i][:2]) * 3600) - else: - return None - used_idxs.append(i) - i += 1 - - assert i == len_l or l[i] == ',' - - i += 1 - - assert i >= len_l - - except (IndexError, ValueError, AssertionError): - return None - - unused_idxs = set(range(len_l)).difference(used_idxs) - res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"}) - return res - - -DEFAULTTZPARSER = _tzparser() - - -def _parsetz(tzstr): - return DEFAULTTZPARSER.parse(tzstr) - -class UnknownTimezoneWarning(RuntimeWarning): - """Raised when the parser finds a timezone it cannot parse into a tzinfo""" -# vim:ts=4:sw=4:et diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/parser/isoparser.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/parser/isoparser.py deleted file mode 100644 index e3cf6d8..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/parser/isoparser.py +++ /dev/null @@ -1,411 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a parser for ISO-8601 strings - -It is intended to support all valid date, time and datetime formats per the -ISO-8601 specification. - -..versionadded:: 2.7.0 -""" -from datetime import datetime, timedelta, time, date -import calendar -from dateutil import tz - -from functools import wraps - -import re -import six - -__all__ = ["isoparse", "isoparser"] - - -def _takes_ascii(f): - @wraps(f) - def func(self, str_in, *args, **kwargs): - # If it's a stream, read the whole thing - str_in = getattr(str_in, 'read', lambda: str_in)() - - # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII - if isinstance(str_in, six.text_type): - # ASCII is the same in UTF-8 - try: - str_in = str_in.encode('ascii') - except UnicodeEncodeError as e: - msg = 'ISO-8601 strings should contain only ASCII characters' - six.raise_from(ValueError(msg), e) - - return f(self, str_in, *args, **kwargs) - - return func - - -class isoparser(object): - def __init__(self, sep=None): - """ - :param sep: - A single character that separates date and time portions. If - ``None``, the parser will accept any single character. - For strict ISO-8601 adherence, pass ``'T'``. - """ - if sep is not None: - if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): - raise ValueError('Separator must be a single, non-numeric ' + - 'ASCII character') - - sep = sep.encode('ascii') - - self._sep = sep - - @_takes_ascii - def isoparse(self, dt_str): - """ - Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. - - An ISO-8601 datetime string consists of a date portion, followed - optionally by a time portion - the date and time portions are separated - by a single character separator, which is ``T`` in the official - standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be - combined with a time portion. - - Supported date formats are: - - Common: - - - ``YYYY`` - - ``YYYY-MM`` or ``YYYYMM`` - - ``YYYY-MM-DD`` or ``YYYYMMDD`` - - Uncommon: - - - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) - - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day - - The ISO week and day numbering follows the same logic as - :func:`datetime.date.isocalendar`. - - Supported time formats are: - - - ``hh`` - - ``hh:mm`` or ``hhmm`` - - ``hh:mm:ss`` or ``hhmmss`` - - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits) - - Midnight is a special case for `hh`, as the standard supports both - 00:00 and 24:00 as a representation. The decimal separator can be - either a dot or a comma. - - - .. caution:: - - Support for fractional components other than seconds is part of the - ISO-8601 standard, but is not currently implemented in this parser. - - Supported time zone offset formats are: - - - `Z` (UTC) - - `±HH:MM` - - `±HHMM` - - `±HH` - - Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, - with the exception of UTC, which will be represented as - :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such - as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. - - :param dt_str: - A string or stream containing only an ISO-8601 datetime string - - :return: - Returns a :class:`datetime.datetime` representing the string. - Unspecified components default to their lowest value. - - .. warning:: - - As of version 2.7.0, the strictness of the parser should not be - considered a stable part of the contract. Any valid ISO-8601 string - that parses correctly with the default settings will continue to - parse correctly in future versions, but invalid strings that - currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not - guaranteed to continue failing in future versions if they encode - a valid date. - - .. versionadded:: 2.7.0 - """ - components, pos = self._parse_isodate(dt_str) - - if len(dt_str) > pos: - if self._sep is None or dt_str[pos:pos + 1] == self._sep: - components += self._parse_isotime(dt_str[pos + 1:]) - else: - raise ValueError('String contains unknown ISO components') - - if len(components) > 3 and components[3] == 24: - components[3] = 0 - return datetime(*components) + timedelta(days=1) - - return datetime(*components) - - @_takes_ascii - def parse_isodate(self, datestr): - """ - Parse the date portion of an ISO string. - - :param datestr: - The string portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.date` object - """ - components, pos = self._parse_isodate(datestr) - if pos < len(datestr): - raise ValueError('String contains unknown ISO ' + - 'components: {}'.format(datestr)) - return date(*components) - - @_takes_ascii - def parse_isotime(self, timestr): - """ - Parse the time portion of an ISO string. - - :param timestr: - The time portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.time` object - """ - components = self._parse_isotime(timestr) - if components[0] == 24: - components[0] = 0 - return time(*components) - - @_takes_ascii - def parse_tzstr(self, tzstr, zero_as_utc=True): - """ - Parse a valid ISO time zone string. - - See :func:`isoparser.isoparse` for details on supported formats. - - :param tzstr: - A string representing an ISO time zone offset - - :param zero_as_utc: - Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones - - :return: - Returns :class:`dateutil.tz.tzoffset` for offsets and - :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is - specified) offsets equivalent to UTC. - """ - return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) - - # Constants - _DATE_SEP = b'-' - _TIME_SEP = b':' - _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)') - - def _parse_isodate(self, dt_str): - try: - return self._parse_isodate_common(dt_str) - except ValueError: - return self._parse_isodate_uncommon(dt_str) - - def _parse_isodate_common(self, dt_str): - len_str = len(dt_str) - components = [1, 1, 1] - - if len_str < 4: - raise ValueError('ISO string too short') - - # Year - components[0] = int(dt_str[0:4]) - pos = 4 - if pos >= len_str: - return components, pos - - has_sep = dt_str[pos:pos + 1] == self._DATE_SEP - if has_sep: - pos += 1 - - # Month - if len_str - pos < 2: - raise ValueError('Invalid common month') - - components[1] = int(dt_str[pos:pos + 2]) - pos += 2 - - if pos >= len_str: - if has_sep: - return components, pos - else: - raise ValueError('Invalid ISO format') - - if has_sep: - if dt_str[pos:pos + 1] != self._DATE_SEP: - raise ValueError('Invalid separator in ISO string') - pos += 1 - - # Day - if len_str - pos < 2: - raise ValueError('Invalid common day') - components[2] = int(dt_str[pos:pos + 2]) - return components, pos + 2 - - def _parse_isodate_uncommon(self, dt_str): - if len(dt_str) < 4: - raise ValueError('ISO string too short') - - # All ISO formats start with the year - year = int(dt_str[0:4]) - - has_sep = dt_str[4:5] == self._DATE_SEP - - pos = 4 + has_sep # Skip '-' if it's there - if dt_str[pos:pos + 1] == b'W': - # YYYY-?Www-?D? - pos += 1 - weekno = int(dt_str[pos:pos + 2]) - pos += 2 - - dayno = 1 - if len(dt_str) > pos: - if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: - raise ValueError('Inconsistent use of dash separator') - - pos += has_sep - - dayno = int(dt_str[pos:pos + 1]) - pos += 1 - - base_date = self._calculate_weekdate(year, weekno, dayno) - else: - # YYYYDDD or YYYY-DDD - if len(dt_str) - pos < 3: - raise ValueError('Invalid ordinal day') - - ordinal_day = int(dt_str[pos:pos + 3]) - pos += 3 - - if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): - raise ValueError('Invalid ordinal day' + - ' {} for year {}'.format(ordinal_day, year)) - - base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) - - components = [base_date.year, base_date.month, base_date.day] - return components, pos - - def _calculate_weekdate(self, year, week, day): - """ - Calculate the day of corresponding to the ISO year-week-day calendar. - - This function is effectively the inverse of - :func:`datetime.date.isocalendar`. - - :param year: - The year in the ISO calendar - - :param week: - The week in the ISO calendar - range is [1, 53] - - :param day: - The day in the ISO calendar - range is [1 (MON), 7 (SUN)] - - :return: - Returns a :class:`datetime.date` - """ - if not 0 < week < 54: - raise ValueError('Invalid week: {}'.format(week)) - - if not 0 < day < 8: # Range is 1-7 - raise ValueError('Invalid weekday: {}'.format(day)) - - # Get week 1 for the specific year: - jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it - week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) - - # Now add the specific number of weeks and days to get what we want - week_offset = (week - 1) * 7 + (day - 1) - return week_1 + timedelta(days=week_offset) - - def _parse_isotime(self, timestr): - len_str = len(timestr) - components = [0, 0, 0, 0, None] - pos = 0 - comp = -1 - - if len(timestr) < 2: - raise ValueError('ISO time too short') - - has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP - - while pos < len_str and comp < 5: - comp += 1 - - if timestr[pos:pos + 1] in b'-+Zz': - # Detect time zone boundary - components[-1] = self._parse_tzstr(timestr[pos:]) - pos = len_str - break - - if comp < 3: - # Hour, minute, second - components[comp] = int(timestr[pos:pos + 2]) - pos += 2 - if (has_sep and pos < len_str and - timestr[pos:pos + 1] == self._TIME_SEP): - pos += 1 - - if comp == 3: - # Fraction of a second - frac = self._FRACTION_REGEX.match(timestr[pos:]) - if not frac: - continue - - us_str = frac.group(1)[:6] # Truncate to microseconds - components[comp] = int(us_str) * 10**(6 - len(us_str)) - pos += len(frac.group()) - - if pos < len_str: - raise ValueError('Unused components in ISO string') - - if components[0] == 24: - # Standard supports 00:00 and 24:00 as representations of midnight - if any(component != 0 for component in components[1:4]): - raise ValueError('Hour may only be 24 at 24:00:00.000') - - return components - - def _parse_tzstr(self, tzstr, zero_as_utc=True): - if tzstr == b'Z' or tzstr == b'z': - return tz.tzutc() - - if len(tzstr) not in {3, 5, 6}: - raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') - - if tzstr[0:1] == b'-': - mult = -1 - elif tzstr[0:1] == b'+': - mult = 1 - else: - raise ValueError('Time zone offset requires sign') - - hours = int(tzstr[1:3]) - if len(tzstr) == 3: - minutes = 0 - else: - minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) - - if zero_as_utc and hours == 0 and minutes == 0: - return tz.tzutc() - else: - if minutes > 59: - raise ValueError('Invalid minutes in time zone offset') - - if hours > 23: - raise ValueError('Invalid hours in time zone offset') - - return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) - - -DEFAULT_ISOPARSER = isoparser() -isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/relativedelta.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/relativedelta.py deleted file mode 100644 index c65c66e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/relativedelta.py +++ /dev/null @@ -1,599 +0,0 @@ -# -*- coding: utf-8 -*- -import datetime -import calendar - -import operator -from math import copysign - -from six import integer_types -from warnings import warn - -from ._common import weekday - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) - -__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - - -class relativedelta(object): - """ - The relativedelta type is designed to be applied to an existing datetime and - can replace specific components of that datetime, or represents an interval - of time. - - It is based on the specification of the excellent work done by M.-A. Lemburg - in his - `mx.DateTime `_ extension. - However, notice that this type does *NOT* implement the same algorithm as - his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. - - There are two different ways to build a relativedelta instance. The - first one is passing it two date/datetime classes:: - - relativedelta(datetime1, datetime2) - - The second one is passing it any number of the following keyword arguments:: - - relativedelta(arg1=x,arg2=y,arg3=z...) - - year, month, day, hour, minute, second, microsecond: - Absolute information (argument is singular); adding or subtracting a - relativedelta with absolute information does not perform an arithmetic - operation, but rather REPLACES the corresponding value in the - original datetime with the value(s) in relativedelta. - - years, months, weeks, days, hours, minutes, seconds, microseconds: - Relative information, may be negative (argument is plural); adding - or subtracting a relativedelta with relative information performs - the corresponding aritmetic operation on the original datetime value - with the information in the relativedelta. - - weekday: - One of the weekday instances (MO, TU, etc) available in the - relativedelta module. These instances may receive a parameter N, - specifying the Nth weekday, which could be positive or negative - (like MO(+1) or MO(-2)). Not specifying it is the same as specifying - +1. You can also use an integer, where 0=MO. This argument is always - relative e.g. if the calculated date is already Monday, using MO(1) - or MO(-1) won't change the day. To effectively make it absolute, use - it in combination with the day argument (e.g. day=1, MO(1) for first - Monday of the month). - - leapdays: - Will add given days to the date found, if year is a leap - year, and the date found is post 28 of february. - - yearday, nlyearday: - Set the yearday or the non-leap year day (jump leap days). - These are converted to day/month/leapdays information. - - There are relative and absolute forms of the keyword - arguments. The plural is relative, and the singular is - absolute. For each argument in the order below, the absolute form - is applied first (by setting each attribute to that value) and - then the relative form (by adding the value to the attribute). - - The order of attributes considered when this relativedelta is - added to a datetime is: - - 1. Year - 2. Month - 3. Day - 4. Hours - 5. Minutes - 6. Seconds - 7. Microseconds - - Finally, weekday is applied, using the rule described above. - - For example - - >>> from datetime import datetime - >>> from dateutil.relativedelta import relativedelta, MO - >>> dt = datetime(2018, 4, 9, 13, 37, 0) - >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) - >>> dt + delta - datetime.datetime(2018, 4, 2, 14, 37) - - First, the day is set to 1 (the first of the month), then 25 hours - are added, to get to the 2nd day and 14th hour, finally the - weekday is applied, but since the 2nd is already a Monday there is - no effect. - - """ - - def __init__(self, dt1=None, dt2=None, - years=0, months=0, days=0, leapdays=0, weeks=0, - hours=0, minutes=0, seconds=0, microseconds=0, - year=None, month=None, day=None, weekday=None, - yearday=None, nlyearday=None, - hour=None, minute=None, second=None, microsecond=None): - - if dt1 and dt2: - # datetime is a subclass of date. So both must be date - if not (isinstance(dt1, datetime.date) and - isinstance(dt2, datetime.date)): - raise TypeError("relativedelta only diffs datetime/date") - - # We allow two dates, or two datetimes, so we coerce them to be - # of the same type - if (isinstance(dt1, datetime.datetime) != - isinstance(dt2, datetime.datetime)): - if not isinstance(dt1, datetime.datetime): - dt1 = datetime.datetime.fromordinal(dt1.toordinal()) - elif not isinstance(dt2, datetime.datetime): - dt2 = datetime.datetime.fromordinal(dt2.toordinal()) - - self.years = 0 - self.months = 0 - self.days = 0 - self.leapdays = 0 - self.hours = 0 - self.minutes = 0 - self.seconds = 0 - self.microseconds = 0 - self.year = None - self.month = None - self.day = None - self.weekday = None - self.hour = None - self.minute = None - self.second = None - self.microsecond = None - self._has_time = 0 - - # Get year / month delta between the two - months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) - self._set_months(months) - - # Remove the year/month delta so the timedelta is just well-defined - # time units (seconds, days and microseconds) - dtm = self.__radd__(dt2) - - # If we've overshot our target, make an adjustment - if dt1 < dt2: - compare = operator.gt - increment = 1 - else: - compare = operator.lt - increment = -1 - - while compare(dt1, dtm): - months += increment - self._set_months(months) - dtm = self.__radd__(dt2) - - # Get the timedelta between the "months-adjusted" date and dt1 - delta = dt1 - dtm - self.seconds = delta.seconds + delta.days * 86400 - self.microseconds = delta.microseconds - else: - # Check for non-integer values in integer-only quantities - if any(x is not None and x != int(x) for x in (years, months)): - raise ValueError("Non-integer years and months are " - "ambiguous and not currently supported.") - - # Relative information - self.years = int(years) - self.months = int(months) - self.days = days + weeks * 7 - self.leapdays = leapdays - self.hours = hours - self.minutes = minutes - self.seconds = seconds - self.microseconds = microseconds - - # Absolute information - self.year = year - self.month = month - self.day = day - self.hour = hour - self.minute = minute - self.second = second - self.microsecond = microsecond - - if any(x is not None and int(x) != x - for x in (year, month, day, hour, - minute, second, microsecond)): - # For now we'll deprecate floats - later it'll be an error. - warn("Non-integer value passed as absolute information. " + - "This is not a well-defined condition and will raise " + - "errors in future versions.", DeprecationWarning) - - if isinstance(weekday, integer_types): - self.weekday = weekdays[weekday] - else: - self.weekday = weekday - - yday = 0 - if nlyearday: - yday = nlyearday - elif yearday: - yday = yearday - if yearday > 59: - self.leapdays = -1 - if yday: - ydayidx = [31, 59, 90, 120, 151, 181, 212, - 243, 273, 304, 334, 366] - for idx, ydays in enumerate(ydayidx): - if yday <= ydays: - self.month = idx+1 - if idx == 0: - self.day = yday - else: - self.day = yday-ydayidx[idx-1] - break - else: - raise ValueError("invalid year day (%d)" % yday) - - self._fix() - - def _fix(self): - if abs(self.microseconds) > 999999: - s = _sign(self.microseconds) - div, mod = divmod(self.microseconds * s, 1000000) - self.microseconds = mod * s - self.seconds += div * s - if abs(self.seconds) > 59: - s = _sign(self.seconds) - div, mod = divmod(self.seconds * s, 60) - self.seconds = mod * s - self.minutes += div * s - if abs(self.minutes) > 59: - s = _sign(self.minutes) - div, mod = divmod(self.minutes * s, 60) - self.minutes = mod * s - self.hours += div * s - if abs(self.hours) > 23: - s = _sign(self.hours) - div, mod = divmod(self.hours * s, 24) - self.hours = mod * s - self.days += div * s - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years += div * s - if (self.hours or self.minutes or self.seconds or self.microseconds - or self.hour is not None or self.minute is not None or - self.second is not None or self.microsecond is not None): - self._has_time = 1 - else: - self._has_time = 0 - - @property - def weeks(self): - return int(self.days / 7.0) - - @weeks.setter - def weeks(self, value): - self.days = self.days - (self.weeks * 7) + value * 7 - - def _set_months(self, months): - self.months = months - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years = div * s - else: - self.years = 0 - - def normalized(self): - """ - Return a version of this object represented entirely using integer - values for the relative attributes. - - >>> relativedelta(days=1.5, hours=2).normalized() - relativedelta(days=+1, hours=+14) - - :return: - Returns a :class:`dateutil.relativedelta.relativedelta` object. - """ - # Cascade remainders down (rounding each to roughly nearest microsecond) - days = int(self.days) - - hours_f = round(self.hours + 24 * (self.days - days), 11) - hours = int(hours_f) - - minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) - minutes = int(minutes_f) - - seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) - seconds = int(seconds_f) - - microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) - - # Constructor carries overflow back up with call to _fix() - return self.__class__(years=self.years, months=self.months, - days=days, hours=hours, minutes=minutes, - seconds=seconds, microseconds=microseconds, - leapdays=self.leapdays, year=self.year, - month=self.month, day=self.day, - weekday=self.weekday, hour=self.hour, - minute=self.minute, second=self.second, - microsecond=self.microsecond) - - def __add__(self, other): - if isinstance(other, relativedelta): - return self.__class__(years=other.years + self.years, - months=other.months + self.months, - days=other.days + self.days, - hours=other.hours + self.hours, - minutes=other.minutes + self.minutes, - seconds=other.seconds + self.seconds, - microseconds=(other.microseconds + - self.microseconds), - leapdays=other.leapdays or self.leapdays, - year=(other.year if other.year is not None - else self.year), - month=(other.month if other.month is not None - else self.month), - day=(other.day if other.day is not None - else self.day), - weekday=(other.weekday if other.weekday is not None - else self.weekday), - hour=(other.hour if other.hour is not None - else self.hour), - minute=(other.minute if other.minute is not None - else self.minute), - second=(other.second if other.second is not None - else self.second), - microsecond=(other.microsecond if other.microsecond - is not None else - self.microsecond)) - if isinstance(other, datetime.timedelta): - return self.__class__(years=self.years, - months=self.months, - days=self.days + other.days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds + other.seconds, - microseconds=self.microseconds + other.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - if not isinstance(other, datetime.date): - return NotImplemented - elif self._has_time and not isinstance(other, datetime.datetime): - other = datetime.datetime.fromordinal(other.toordinal()) - year = (self.year or other.year)+self.years - month = self.month or other.month - if self.months: - assert 1 <= abs(self.months) <= 12 - month += self.months - if month > 12: - year += 1 - month -= 12 - elif month < 1: - year -= 1 - month += 12 - day = min(calendar.monthrange(year, month)[1], - self.day or other.day) - repl = {"year": year, "month": month, "day": day} - for attr in ["hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - repl[attr] = value - days = self.days - if self.leapdays and month > 2 and calendar.isleap(year): - days += self.leapdays - ret = (other.replace(**repl) - + datetime.timedelta(days=days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds, - microseconds=self.microseconds)) - if self.weekday: - weekday, nth = self.weekday.weekday, self.weekday.n or 1 - jumpdays = (abs(nth) - 1) * 7 - if nth > 0: - jumpdays += (7 - ret.weekday() + weekday) % 7 - else: - jumpdays += (ret.weekday() - weekday) % 7 - jumpdays *= -1 - ret += datetime.timedelta(days=jumpdays) - return ret - - def __radd__(self, other): - return self.__add__(other) - - def __rsub__(self, other): - return self.__neg__().__radd__(other) - - def __sub__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented # In case the other object defines __rsub__ - return self.__class__(years=self.years - other.years, - months=self.months - other.months, - days=self.days - other.days, - hours=self.hours - other.hours, - minutes=self.minutes - other.minutes, - seconds=self.seconds - other.seconds, - microseconds=self.microseconds - other.microseconds, - leapdays=self.leapdays or other.leapdays, - year=(self.year if self.year is not None - else other.year), - month=(self.month if self.month is not None else - other.month), - day=(self.day if self.day is not None else - other.day), - weekday=(self.weekday if self.weekday is not None else - other.weekday), - hour=(self.hour if self.hour is not None else - other.hour), - minute=(self.minute if self.minute is not None else - other.minute), - second=(self.second if self.second is not None else - other.second), - microsecond=(self.microsecond if self.microsecond - is not None else - other.microsecond)) - - def __abs__(self): - return self.__class__(years=abs(self.years), - months=abs(self.months), - days=abs(self.days), - hours=abs(self.hours), - minutes=abs(self.minutes), - seconds=abs(self.seconds), - microseconds=abs(self.microseconds), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __neg__(self): - return self.__class__(years=-self.years, - months=-self.months, - days=-self.days, - hours=-self.hours, - minutes=-self.minutes, - seconds=-self.seconds, - microseconds=-self.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __bool__(self): - return not (not self.years and - not self.months and - not self.days and - not self.hours and - not self.minutes and - not self.seconds and - not self.microseconds and - not self.leapdays and - self.year is None and - self.month is None and - self.day is None and - self.weekday is None and - self.hour is None and - self.minute is None and - self.second is None and - self.microsecond is None) - # Compatibility with Python 2.x - __nonzero__ = __bool__ - - def __mul__(self, other): - try: - f = float(other) - except TypeError: - return NotImplemented - - return self.__class__(years=int(self.years * f), - months=int(self.months * f), - days=int(self.days * f), - hours=int(self.hours * f), - minutes=int(self.minutes * f), - seconds=int(self.seconds * f), - microseconds=int(self.microseconds * f), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - __rmul__ = __mul__ - - def __eq__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented - if self.weekday or other.weekday: - if not self.weekday or not other.weekday: - return False - if self.weekday.weekday != other.weekday.weekday: - return False - n1, n2 = self.weekday.n, other.weekday.n - if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): - return False - return (self.years == other.years and - self.months == other.months and - self.days == other.days and - self.hours == other.hours and - self.minutes == other.minutes and - self.seconds == other.seconds and - self.microseconds == other.microseconds and - self.leapdays == other.leapdays and - self.year == other.year and - self.month == other.month and - self.day == other.day and - self.hour == other.hour and - self.minute == other.minute and - self.second == other.second and - self.microsecond == other.microsecond) - - def __hash__(self): - return hash(( - self.weekday, - self.years, - self.months, - self.days, - self.hours, - self.minutes, - self.seconds, - self.microseconds, - self.leapdays, - self.year, - self.month, - self.day, - self.hour, - self.minute, - self.second, - self.microsecond, - )) - - def __ne__(self, other): - return not self.__eq__(other) - - def __div__(self, other): - try: - reciprocal = 1 / float(other) - except TypeError: - return NotImplemented - - return self.__mul__(reciprocal) - - __truediv__ = __div__ - - def __repr__(self): - l = [] - for attr in ["years", "months", "days", "leapdays", - "hours", "minutes", "seconds", "microseconds"]: - value = getattr(self, attr) - if value: - l.append("{attr}={value:+g}".format(attr=attr, value=value)) - for attr in ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - l.append("{attr}={value}".format(attr=attr, value=repr(value))) - return "{classname}({attrs})".format(classname=self.__class__.__name__, - attrs=", ".join(l)) - - -def _sign(x): - return int(copysign(1, x)) - -# vim:ts=4:sw=4:et diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/rrule.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/rrule.py deleted file mode 100644 index 20a0c4a..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/rrule.py +++ /dev/null @@ -1,1736 +0,0 @@ -# -*- coding: utf-8 -*- -""" -The rrule module offers a small, complete, and very fast, implementation of -the recurrence rules documented in the -`iCalendar RFC `_, -including support for caching of results. -""" -import itertools -import datetime -import calendar -import re -import sys - -try: - from math import gcd -except ImportError: - from fractions import gcd - -from six import advance_iterator, integer_types -from six.moves import _thread, range -import heapq - -from ._common import weekday as weekdaybase -from .tz import tzutc, tzlocal - -# For warning about deprecation of until and count -from warnings import warn - -__all__ = ["rrule", "rruleset", "rrulestr", - "YEARLY", "MONTHLY", "WEEKLY", "DAILY", - "HOURLY", "MINUTELY", "SECONDLY", - "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - -# Every mask is 7 days longer to handle cross-year weekly periods. -M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + - [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) -M365MASK = list(M366MASK) -M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) -MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -MDAY365MASK = list(MDAY366MASK) -M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) -NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -NMDAY365MASK = list(NMDAY366MASK) -M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) -M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) -WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 -del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] -MDAY365MASK = tuple(MDAY365MASK) -M365MASK = tuple(M365MASK) - -FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] - -(YEARLY, - MONTHLY, - WEEKLY, - DAILY, - HOURLY, - MINUTELY, - SECONDLY) = list(range(7)) - -# Imported on demand. -easter = None -parser = None - - -class weekday(weekdaybase): - """ - This version of weekday does not allow n = 0. - """ - def __init__(self, wkday, n=None): - if n == 0: - raise ValueError("Can't create weekday with n==0") - - super(weekday, self).__init__(wkday, n) - - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) - - -def _invalidates_cache(f): - """ - Decorator for rruleset methods which may invalidate the - cached length. - """ - def inner_func(self, *args, **kwargs): - rv = f(self, *args, **kwargs) - self._invalidate_cache() - return rv - - return inner_func - - -class rrulebase(object): - def __init__(self, cache=False): - if cache: - self._cache = [] - self._cache_lock = _thread.allocate_lock() - self._invalidate_cache() - else: - self._cache = None - self._cache_complete = False - self._len = None - - def __iter__(self): - if self._cache_complete: - return iter(self._cache) - elif self._cache is None: - return self._iter() - else: - return self._iter_cached() - - def _invalidate_cache(self): - if self._cache is not None: - self._cache = [] - self._cache_complete = False - self._cache_gen = self._iter() - - if self._cache_lock.locked(): - self._cache_lock.release() - - self._len = None - - def _iter_cached(self): - i = 0 - gen = self._cache_gen - cache = self._cache - acquire = self._cache_lock.acquire - release = self._cache_lock.release - while gen: - if i == len(cache): - acquire() - if self._cache_complete: - break - try: - for j in range(10): - cache.append(advance_iterator(gen)) - except StopIteration: - self._cache_gen = gen = None - self._cache_complete = True - break - release() - yield cache[i] - i += 1 - while i < self._len: - yield cache[i] - i += 1 - - def __getitem__(self, item): - if self._cache_complete: - return self._cache[item] - elif isinstance(item, slice): - if item.step and item.step < 0: - return list(iter(self))[item] - else: - return list(itertools.islice(self, - item.start or 0, - item.stop or sys.maxsize, - item.step or 1)) - elif item >= 0: - gen = iter(self) - try: - for i in range(item+1): - res = advance_iterator(gen) - except StopIteration: - raise IndexError - return res - else: - return list(iter(self))[item] - - def __contains__(self, item): - if self._cache_complete: - return item in self._cache - else: - for i in self: - if i == item: - return True - elif i > item: - return False - return False - - # __len__() introduces a large performance penality. - def count(self): - """ Returns the number of recurrences in this set. It will have go - trough the whole recurrence, if this hasn't been done before. """ - if self._len is None: - for x in self: - pass - return self._len - - def before(self, dt, inc=False): - """ Returns the last recurrence before the given datetime instance. The - inc keyword defines what happens if dt is an occurrence. With - inc=True, if dt itself is an occurrence, it will be returned. """ - if self._cache_complete: - gen = self._cache - else: - gen = self - last = None - if inc: - for i in gen: - if i > dt: - break - last = i - else: - for i in gen: - if i >= dt: - break - last = i - return last - - def after(self, dt, inc=False): - """ Returns the first recurrence after the given datetime instance. The - inc keyword defines what happens if dt is an occurrence. With - inc=True, if dt itself is an occurrence, it will be returned. """ - if self._cache_complete: - gen = self._cache - else: - gen = self - if inc: - for i in gen: - if i >= dt: - return i - else: - for i in gen: - if i > dt: - return i - return None - - def xafter(self, dt, count=None, inc=False): - """ - Generator which yields up to `count` recurrences after the given - datetime instance, equivalent to `after`. - - :param dt: - The datetime at which to start generating recurrences. - - :param count: - The maximum number of recurrences to generate. If `None` (default), - dates are generated until the recurrence rule is exhausted. - - :param inc: - If `dt` is an instance of the rule and `inc` is `True`, it is - included in the output. - - :yields: Yields a sequence of `datetime` objects. - """ - - if self._cache_complete: - gen = self._cache - else: - gen = self - - # Select the comparison function - if inc: - comp = lambda dc, dtc: dc >= dtc - else: - comp = lambda dc, dtc: dc > dtc - - # Generate dates - n = 0 - for d in gen: - if comp(d, dt): - if count is not None: - n += 1 - if n > count: - break - - yield d - - def between(self, after, before, inc=False, count=1): - """ Returns all the occurrences of the rrule between after and before. - The inc keyword defines what happens if after and/or before are - themselves occurrences. With inc=True, they will be included in the - list, if they are found in the recurrence set. """ - if self._cache_complete: - gen = self._cache - else: - gen = self - started = False - l = [] - if inc: - for i in gen: - if i > before: - break - elif not started: - if i >= after: - started = True - l.append(i) - else: - l.append(i) - else: - for i in gen: - if i >= before: - break - elif not started: - if i > after: - started = True - l.append(i) - else: - l.append(i) - return l - - -class rrule(rrulebase): - """ - That's the base of the rrule operation. It accepts all the keywords - defined in the RFC as its constructor parameters (except byday, - which was renamed to byweekday) and more. The constructor prototype is:: - - rrule(freq) - - Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, - or SECONDLY. - - .. note:: - Per RFC section 3.3.10, recurrence instances falling on invalid dates - and times are ignored rather than coerced: - - Recurrence rules may generate recurrence instances with an invalid - date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM - on a day where the local time is moved forward by an hour at 1:00 - AM). Such recurrence instances MUST be ignored and MUST NOT be - counted as part of the recurrence set. - - This can lead to possibly surprising behavior when, for example, the - start date occurs at the end of the month: - - >>> from dateutil.rrule import rrule, MONTHLY - >>> from datetime import datetime - >>> start_date = datetime(2014, 12, 31) - >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) - ... # doctest: +NORMALIZE_WHITESPACE - [datetime.datetime(2014, 12, 31, 0, 0), - datetime.datetime(2015, 1, 31, 0, 0), - datetime.datetime(2015, 3, 31, 0, 0), - datetime.datetime(2015, 5, 31, 0, 0)] - - Additionally, it supports the following keyword arguments: - - :param dtstart: - The recurrence start. Besides being the base for the recurrence, - missing parameters in the final recurrence instances will also be - extracted from this date. If not given, datetime.now() will be used - instead. - :param interval: - The interval between each freq iteration. For example, when using - YEARLY, an interval of 2 means once every two years, but with HOURLY, - it means once every two hours. The default interval is 1. - :param wkst: - The week start day. Must be one of the MO, TU, WE constants, or an - integer, specifying the first day of the week. This will affect - recurrences based on weekly periods. The default week start is got - from calendar.firstweekday(), and may be modified by - calendar.setfirstweekday(). - :param count: - If given, this determines how many occurrences will be generated. - - .. note:: - As of version 2.5.0, the use of the keyword ``until`` in conjunction - with ``count`` is deprecated, to make sure ``dateutil`` is fully - compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` - **must not** occur in the same call to ``rrule``. - :param until: - If given, this must be a datetime instance specifying the upper-bound - limit of the recurrence. The last recurrence in the rule is the greatest - datetime that is less than or equal to the value specified in the - ``until`` parameter. - - .. note:: - As of version 2.5.0, the use of the keyword ``until`` in conjunction - with ``count`` is deprecated, to make sure ``dateutil`` is fully - compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` - **must not** occur in the same call to ``rrule``. - :param bysetpos: - If given, it must be either an integer, or a sequence of integers, - positive or negative. Each given integer will specify an occurrence - number, corresponding to the nth occurrence of the rule inside the - frequency period. For example, a bysetpos of -1 if combined with a - MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will - result in the last work day of every month. - :param bymonth: - If given, it must be either an integer, or a sequence of integers, - meaning the months to apply the recurrence to. - :param bymonthday: - If given, it must be either an integer, or a sequence of integers, - meaning the month days to apply the recurrence to. - :param byyearday: - If given, it must be either an integer, or a sequence of integers, - meaning the year days to apply the recurrence to. - :param byeaster: - If given, it must be either an integer, or a sequence of integers, - positive or negative. Each integer will define an offset from the - Easter Sunday. Passing the offset 0 to byeaster will yield the Easter - Sunday itself. This is an extension to the RFC specification. - :param byweekno: - If given, it must be either an integer, or a sequence of integers, - meaning the week numbers to apply the recurrence to. Week numbers - have the meaning described in ISO8601, that is, the first week of - the year is that containing at least four days of the new year. - :param byweekday: - If given, it must be either an integer (0 == MO), a sequence of - integers, one of the weekday constants (MO, TU, etc), or a sequence - of these constants. When given, these variables will define the - weekdays where the recurrence will be applied. It's also possible to - use an argument n for the weekday instances, which will mean the nth - occurrence of this weekday in the period. For example, with MONTHLY, - or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the - first friday of the month where the recurrence happens. Notice that in - the RFC documentation, this is specified as BYDAY, but was renamed to - avoid the ambiguity of that keyword. - :param byhour: - If given, it must be either an integer, or a sequence of integers, - meaning the hours to apply the recurrence to. - :param byminute: - If given, it must be either an integer, or a sequence of integers, - meaning the minutes to apply the recurrence to. - :param bysecond: - If given, it must be either an integer, or a sequence of integers, - meaning the seconds to apply the recurrence to. - :param cache: - If given, it must be a boolean value specifying to enable or disable - caching of results. If you will use the same rrule instance multiple - times, enabling caching will improve the performance considerably. - """ - def __init__(self, freq, dtstart=None, - interval=1, wkst=None, count=None, until=None, bysetpos=None, - bymonth=None, bymonthday=None, byyearday=None, byeaster=None, - byweekno=None, byweekday=None, - byhour=None, byminute=None, bysecond=None, - cache=False): - super(rrule, self).__init__(cache) - global easter - if not dtstart: - if until and until.tzinfo: - dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) - else: - dtstart = datetime.datetime.now().replace(microsecond=0) - elif not isinstance(dtstart, datetime.datetime): - dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) - else: - dtstart = dtstart.replace(microsecond=0) - self._dtstart = dtstart - self._tzinfo = dtstart.tzinfo - self._freq = freq - self._interval = interval - self._count = count - - # Cache the original byxxx rules, if they are provided, as the _byxxx - # attributes do not necessarily map to the inputs, and this can be - # a problem in generating the strings. Only store things if they've - # been supplied (the string retrieval will just use .get()) - self._original_rule = {} - - if until and not isinstance(until, datetime.datetime): - until = datetime.datetime.fromordinal(until.toordinal()) - self._until = until - - if self._dtstart and self._until: - if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): - # According to RFC5545 Section 3.3.10: - # https://tools.ietf.org/html/rfc5545#section-3.3.10 - # - # > If the "DTSTART" property is specified as a date with UTC - # > time or a date with local time and time zone reference, - # > then the UNTIL rule part MUST be specified as a date with - # > UTC time. - raise ValueError( - 'RRULE UNTIL values must be specified in UTC when DTSTART ' - 'is timezone-aware' - ) - - if count is not None and until: - warn("Using both 'count' and 'until' is inconsistent with RFC 5545" - " and has been deprecated in dateutil. Future versions will " - "raise an error.", DeprecationWarning) - - if wkst is None: - self._wkst = calendar.firstweekday() - elif isinstance(wkst, integer_types): - self._wkst = wkst - else: - self._wkst = wkst.weekday - - if bysetpos is None: - self._bysetpos = None - elif isinstance(bysetpos, integer_types): - if bysetpos == 0 or not (-366 <= bysetpos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - self._bysetpos = (bysetpos,) - else: - self._bysetpos = tuple(bysetpos) - for pos in self._bysetpos: - if pos == 0 or not (-366 <= pos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - - if self._bysetpos: - self._original_rule['bysetpos'] = self._bysetpos - - if (byweekno is None and byyearday is None and bymonthday is None and - byweekday is None and byeaster is None): - if freq == YEARLY: - if bymonth is None: - bymonth = dtstart.month - self._original_rule['bymonth'] = None - bymonthday = dtstart.day - self._original_rule['bymonthday'] = None - elif freq == MONTHLY: - bymonthday = dtstart.day - self._original_rule['bymonthday'] = None - elif freq == WEEKLY: - byweekday = dtstart.weekday() - self._original_rule['byweekday'] = None - - # bymonth - if bymonth is None: - self._bymonth = None - else: - if isinstance(bymonth, integer_types): - bymonth = (bymonth,) - - self._bymonth = tuple(sorted(set(bymonth))) - - if 'bymonth' not in self._original_rule: - self._original_rule['bymonth'] = self._bymonth - - # byyearday - if byyearday is None: - self._byyearday = None - else: - if isinstance(byyearday, integer_types): - byyearday = (byyearday,) - - self._byyearday = tuple(sorted(set(byyearday))) - self._original_rule['byyearday'] = self._byyearday - - # byeaster - if byeaster is not None: - if not easter: - from dateutil import easter - if isinstance(byeaster, integer_types): - self._byeaster = (byeaster,) - else: - self._byeaster = tuple(sorted(byeaster)) - - self._original_rule['byeaster'] = self._byeaster - else: - self._byeaster = None - - # bymonthday - if bymonthday is None: - self._bymonthday = () - self._bynmonthday = () - else: - if isinstance(bymonthday, integer_types): - bymonthday = (bymonthday,) - - bymonthday = set(bymonthday) # Ensure it's unique - - self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) - self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) - - # Storing positive numbers first, then negative numbers - if 'bymonthday' not in self._original_rule: - self._original_rule['bymonthday'] = tuple( - itertools.chain(self._bymonthday, self._bynmonthday)) - - # byweekno - if byweekno is None: - self._byweekno = None - else: - if isinstance(byweekno, integer_types): - byweekno = (byweekno,) - - self._byweekno = tuple(sorted(set(byweekno))) - - self._original_rule['byweekno'] = self._byweekno - - # byweekday / bynweekday - if byweekday is None: - self._byweekday = None - self._bynweekday = None - else: - # If it's one of the valid non-sequence types, convert to a - # single-element sequence before the iterator that builds the - # byweekday set. - if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): - byweekday = (byweekday,) - - self._byweekday = set() - self._bynweekday = set() - for wday in byweekday: - if isinstance(wday, integer_types): - self._byweekday.add(wday) - elif not wday.n or freq > MONTHLY: - self._byweekday.add(wday.weekday) - else: - self._bynweekday.add((wday.weekday, wday.n)) - - if not self._byweekday: - self._byweekday = None - elif not self._bynweekday: - self._bynweekday = None - - if self._byweekday is not None: - self._byweekday = tuple(sorted(self._byweekday)) - orig_byweekday = [weekday(x) for x in self._byweekday] - else: - orig_byweekday = () - - if self._bynweekday is not None: - self._bynweekday = tuple(sorted(self._bynweekday)) - orig_bynweekday = [weekday(*x) for x in self._bynweekday] - else: - orig_bynweekday = () - - if 'byweekday' not in self._original_rule: - self._original_rule['byweekday'] = tuple(itertools.chain( - orig_byweekday, orig_bynweekday)) - - # byhour - if byhour is None: - if freq < HOURLY: - self._byhour = {dtstart.hour} - else: - self._byhour = None - else: - if isinstance(byhour, integer_types): - byhour = (byhour,) - - if freq == HOURLY: - self._byhour = self.__construct_byset(start=dtstart.hour, - byxxx=byhour, - base=24) - else: - self._byhour = set(byhour) - - self._byhour = tuple(sorted(self._byhour)) - self._original_rule['byhour'] = self._byhour - - # byminute - if byminute is None: - if freq < MINUTELY: - self._byminute = {dtstart.minute} - else: - self._byminute = None - else: - if isinstance(byminute, integer_types): - byminute = (byminute,) - - if freq == MINUTELY: - self._byminute = self.__construct_byset(start=dtstart.minute, - byxxx=byminute, - base=60) - else: - self._byminute = set(byminute) - - self._byminute = tuple(sorted(self._byminute)) - self._original_rule['byminute'] = self._byminute - - # bysecond - if bysecond is None: - if freq < SECONDLY: - self._bysecond = ((dtstart.second,)) - else: - self._bysecond = None - else: - if isinstance(bysecond, integer_types): - bysecond = (bysecond,) - - self._bysecond = set(bysecond) - - if freq == SECONDLY: - self._bysecond = self.__construct_byset(start=dtstart.second, - byxxx=bysecond, - base=60) - else: - self._bysecond = set(bysecond) - - self._bysecond = tuple(sorted(self._bysecond)) - self._original_rule['bysecond'] = self._bysecond - - if self._freq >= HOURLY: - self._timeset = None - else: - self._timeset = [] - for hour in self._byhour: - for minute in self._byminute: - for second in self._bysecond: - self._timeset.append( - datetime.time(hour, minute, second, - tzinfo=self._tzinfo)) - self._timeset.sort() - self._timeset = tuple(self._timeset) - - def __str__(self): - """ - Output a string that would generate this RRULE if passed to rrulestr. - This is mostly compatible with RFC5545, except for the - dateutil-specific extension BYEASTER. - """ - - output = [] - h, m, s = [None] * 3 - if self._dtstart: - output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) - h, m, s = self._dtstart.timetuple()[3:6] - - parts = ['FREQ=' + FREQNAMES[self._freq]] - if self._interval != 1: - parts.append('INTERVAL=' + str(self._interval)) - - if self._wkst: - parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) - - if self._count is not None: - parts.append('COUNT=' + str(self._count)) - - if self._until: - parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) - - if self._original_rule.get('byweekday') is not None: - # The str() method on weekday objects doesn't generate - # RFC5545-compliant strings, so we should modify that. - original_rule = dict(self._original_rule) - wday_strings = [] - for wday in original_rule['byweekday']: - if wday.n: - wday_strings.append('{n:+d}{wday}'.format( - n=wday.n, - wday=repr(wday)[0:2])) - else: - wday_strings.append(repr(wday)) - - original_rule['byweekday'] = wday_strings - else: - original_rule = self._original_rule - - partfmt = '{name}={vals}' - for name, key in [('BYSETPOS', 'bysetpos'), - ('BYMONTH', 'bymonth'), - ('BYMONTHDAY', 'bymonthday'), - ('BYYEARDAY', 'byyearday'), - ('BYWEEKNO', 'byweekno'), - ('BYDAY', 'byweekday'), - ('BYHOUR', 'byhour'), - ('BYMINUTE', 'byminute'), - ('BYSECOND', 'bysecond'), - ('BYEASTER', 'byeaster')]: - value = original_rule.get(key) - if value: - parts.append(partfmt.format(name=name, vals=(','.join(str(v) - for v in value)))) - - output.append('RRULE:' + ';'.join(parts)) - return '\n'.join(output) - - def replace(self, **kwargs): - """Return new rrule with same attributes except for those attributes given new - values by whichever keyword arguments are specified.""" - new_kwargs = {"interval": self._interval, - "count": self._count, - "dtstart": self._dtstart, - "freq": self._freq, - "until": self._until, - "wkst": self._wkst, - "cache": False if self._cache is None else True } - new_kwargs.update(self._original_rule) - new_kwargs.update(kwargs) - return rrule(**new_kwargs) - - def _iter(self): - year, month, day, hour, minute, second, weekday, yearday, _ = \ - self._dtstart.timetuple() - - # Some local variables to speed things up a bit - freq = self._freq - interval = self._interval - wkst = self._wkst - until = self._until - bymonth = self._bymonth - byweekno = self._byweekno - byyearday = self._byyearday - byweekday = self._byweekday - byeaster = self._byeaster - bymonthday = self._bymonthday - bynmonthday = self._bynmonthday - bysetpos = self._bysetpos - byhour = self._byhour - byminute = self._byminute - bysecond = self._bysecond - - ii = _iterinfo(self) - ii.rebuild(year, month) - - getdayset = {YEARLY: ii.ydayset, - MONTHLY: ii.mdayset, - WEEKLY: ii.wdayset, - DAILY: ii.ddayset, - HOURLY: ii.ddayset, - MINUTELY: ii.ddayset, - SECONDLY: ii.ddayset}[freq] - - if freq < HOURLY: - timeset = self._timeset - else: - gettimeset = {HOURLY: ii.htimeset, - MINUTELY: ii.mtimeset, - SECONDLY: ii.stimeset}[freq] - if ((freq >= HOURLY and - self._byhour and hour not in self._byhour) or - (freq >= MINUTELY and - self._byminute and minute not in self._byminute) or - (freq >= SECONDLY and - self._bysecond and second not in self._bysecond)): - timeset = () - else: - timeset = gettimeset(hour, minute, second) - - total = 0 - count = self._count - while True: - # Get dayset with the right frequency - dayset, start, end = getdayset(year, month, day) - - # Do the "hard" work ;-) - filtered = False - for i in dayset[start:end]: - if ((bymonth and ii.mmask[i] not in bymonth) or - (byweekno and not ii.wnomask[i]) or - (byweekday and ii.wdaymask[i] not in byweekday) or - (ii.nwdaymask and not ii.nwdaymask[i]) or - (byeaster and not ii.eastermask[i]) or - ((bymonthday or bynmonthday) and - ii.mdaymask[i] not in bymonthday and - ii.nmdaymask[i] not in bynmonthday) or - (byyearday and - ((i < ii.yearlen and i+1 not in byyearday and - -ii.yearlen+i not in byyearday) or - (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and - -ii.nextyearlen+i-ii.yearlen not in byyearday)))): - dayset[i] = None - filtered = True - - # Output results - if bysetpos and timeset: - poslist = [] - for pos in bysetpos: - if pos < 0: - daypos, timepos = divmod(pos, len(timeset)) - else: - daypos, timepos = divmod(pos-1, len(timeset)) - try: - i = [x for x in dayset[start:end] - if x is not None][daypos] - time = timeset[timepos] - except IndexError: - pass - else: - date = datetime.date.fromordinal(ii.yearordinal+i) - res = datetime.datetime.combine(date, time) - if res not in poslist: - poslist.append(res) - poslist.sort() - for res in poslist: - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - if count is not None: - count -= 1 - if count < 0: - self._len = total - return - total += 1 - yield res - else: - for i in dayset[start:end]: - if i is not None: - date = datetime.date.fromordinal(ii.yearordinal + i) - for time in timeset: - res = datetime.datetime.combine(date, time) - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - if count is not None: - count -= 1 - if count < 0: - self._len = total - return - - total += 1 - yield res - - # Handle frequency and interval - fixday = False - if freq == YEARLY: - year += interval - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == MONTHLY: - month += interval - if month > 12: - div, mod = divmod(month, 12) - month = mod - year += div - if month == 0: - month = 12 - year -= 1 - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == WEEKLY: - if wkst > weekday: - day += -(weekday+1+(6-wkst))+self._interval*7 - else: - day += -(weekday-wkst)+self._interval*7 - weekday = wkst - fixday = True - elif freq == DAILY: - day += interval - fixday = True - elif freq == HOURLY: - if filtered: - # Jump to one iteration before next day - hour += ((23-hour)//interval)*interval - - if byhour: - ndays, hour = self.__mod_distance(value=hour, - byxxx=self._byhour, - base=24) - else: - ndays, hour = divmod(hour+interval, 24) - - if ndays: - day += ndays - fixday = True - - timeset = gettimeset(hour, minute, second) - elif freq == MINUTELY: - if filtered: - # Jump to one iteration before next day - minute += ((1439-(hour*60+minute))//interval)*interval - - valid = False - rep_rate = (24*60) - for j in range(rep_rate // gcd(interval, rep_rate)): - if byminute: - nhours, minute = \ - self.__mod_distance(value=minute, - byxxx=self._byminute, - base=60) - else: - nhours, minute = divmod(minute+interval, 60) - - div, hour = divmod(hour+nhours, 24) - if div: - day += div - fixday = True - filtered = False - - if not byhour or hour in byhour: - valid = True - break - - if not valid: - raise ValueError('Invalid combination of interval and ' + - 'byhour resulting in empty rule.') - - timeset = gettimeset(hour, minute, second) - elif freq == SECONDLY: - if filtered: - # Jump to one iteration before next day - second += (((86399 - (hour * 3600 + minute * 60 + second)) - // interval) * interval) - - rep_rate = (24 * 3600) - valid = False - for j in range(0, rep_rate // gcd(interval, rep_rate)): - if bysecond: - nminutes, second = \ - self.__mod_distance(value=second, - byxxx=self._bysecond, - base=60) - else: - nminutes, second = divmod(second+interval, 60) - - div, minute = divmod(minute+nminutes, 60) - if div: - hour += div - div, hour = divmod(hour, 24) - if div: - day += div - fixday = True - - if ((not byhour or hour in byhour) and - (not byminute or minute in byminute) and - (not bysecond or second in bysecond)): - valid = True - break - - if not valid: - raise ValueError('Invalid combination of interval, ' + - 'byhour and byminute resulting in empty' + - ' rule.') - - timeset = gettimeset(hour, minute, second) - - if fixday and day > 28: - daysinmonth = calendar.monthrange(year, month)[1] - if day > daysinmonth: - while day > daysinmonth: - day -= daysinmonth - month += 1 - if month == 13: - month = 1 - year += 1 - if year > datetime.MAXYEAR: - self._len = total - return - daysinmonth = calendar.monthrange(year, month)[1] - ii.rebuild(year, month) - - def __construct_byset(self, start, byxxx, base): - """ - If a `BYXXX` sequence is passed to the constructor at the same level as - `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some - specifications which cannot be reached given some starting conditions. - - This occurs whenever the interval is not coprime with the base of a - given unit and the difference between the starting position and the - ending position is not coprime with the greatest common denominator - between the interval and the base. For example, with a FREQ of hourly - starting at 17:00 and an interval of 4, the only valid values for - BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not - coprime. - - :param start: - Specifies the starting position. - :param byxxx: - An iterable containing the list of allowed values. - :param base: - The largest allowable value for the specified frequency (e.g. - 24 hours, 60 minutes). - - This does not preserve the type of the iterable, returning a set, since - the values should be unique and the order is irrelevant, this will - speed up later lookups. - - In the event of an empty set, raises a :exception:`ValueError`, as this - results in an empty rrule. - """ - - cset = set() - - # Support a single byxxx value. - if isinstance(byxxx, integer_types): - byxxx = (byxxx, ) - - for num in byxxx: - i_gcd = gcd(self._interval, base) - # Use divmod rather than % because we need to wrap negative nums. - if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: - cset.add(num) - - if len(cset) == 0: - raise ValueError("Invalid rrule byxxx generates an empty set.") - - return cset - - def __mod_distance(self, value, byxxx, base): - """ - Calculates the next value in a sequence where the `FREQ` parameter is - specified along with a `BYXXX` parameter at the same "level" - (e.g. `HOURLY` specified with `BYHOUR`). - - :param value: - The old value of the component. - :param byxxx: - The `BYXXX` set, which should have been generated by - `rrule._construct_byset`, or something else which checks that a - valid rule is present. - :param base: - The largest allowable value for the specified frequency (e.g. - 24 hours, 60 minutes). - - If a valid value is not found after `base` iterations (the maximum - number before the sequence would start to repeat), this raises a - :exception:`ValueError`, as no valid values were found. - - This returns a tuple of `divmod(n*interval, base)`, where `n` is the - smallest number of `interval` repetitions until the next specified - value in `byxxx` is found. - """ - accumulator = 0 - for ii in range(1, base + 1): - # Using divmod() over % to account for negative intervals - div, value = divmod(value + self._interval, base) - accumulator += div - if value in byxxx: - return (accumulator, value) - - -class _iterinfo(object): - __slots__ = ["rrule", "lastyear", "lastmonth", - "yearlen", "nextyearlen", "yearordinal", "yearweekday", - "mmask", "mrange", "mdaymask", "nmdaymask", - "wdaymask", "wnomask", "nwdaymask", "eastermask"] - - def __init__(self, rrule): - for attr in self.__slots__: - setattr(self, attr, None) - self.rrule = rrule - - def rebuild(self, year, month): - # Every mask is 7 days longer to handle cross-year weekly periods. - rr = self.rrule - if year != self.lastyear: - self.yearlen = 365 + calendar.isleap(year) - self.nextyearlen = 365 + calendar.isleap(year + 1) - firstyday = datetime.date(year, 1, 1) - self.yearordinal = firstyday.toordinal() - self.yearweekday = firstyday.weekday() - - wday = datetime.date(year, 1, 1).weekday() - if self.yearlen == 365: - self.mmask = M365MASK - self.mdaymask = MDAY365MASK - self.nmdaymask = NMDAY365MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M365RANGE - else: - self.mmask = M366MASK - self.mdaymask = MDAY366MASK - self.nmdaymask = NMDAY366MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M366RANGE - - if not rr._byweekno: - self.wnomask = None - else: - self.wnomask = [0]*(self.yearlen+7) - # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) - no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 - if no1wkst >= 4: - no1wkst = 0 - # Number of days in the year, plus the days we got - # from last year. - wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 - else: - # Number of days in the year, minus the days we - # left in last year. - wyearlen = self.yearlen-no1wkst - div, mod = divmod(wyearlen, 7) - numweeks = div+mod//4 - for n in rr._byweekno: - if n < 0: - n += numweeks+1 - if not (0 < n <= numweeks): - continue - if n > 1: - i = no1wkst+(n-1)*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - else: - i = no1wkst - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if 1 in rr._byweekno: - # Check week number 1 of next year as well - # TODO: Check -numweeks for next year. - i = no1wkst+numweeks*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - if i < self.yearlen: - # If week starts in next year, we - # don't care about it. - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if no1wkst: - # Check last week number of last year as - # well. If no1wkst is 0, either the year - # started on week start, or week number 1 - # got days from last year, so there are no - # days from last year's last week number in - # this year. - if -1 not in rr._byweekno: - lyearweekday = datetime.date(year-1, 1, 1).weekday() - lno1wkst = (7-lyearweekday+rr._wkst) % 7 - lyearlen = 365+calendar.isleap(year-1) - if lno1wkst >= 4: - lno1wkst = 0 - lnumweeks = 52+(lyearlen + - (lyearweekday-rr._wkst) % 7) % 7//4 - else: - lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 - else: - lnumweeks = -1 - if lnumweeks in rr._byweekno: - for i in range(no1wkst): - self.wnomask[i] = 1 - - if (rr._bynweekday and (month != self.lastmonth or - year != self.lastyear)): - ranges = [] - if rr._freq == YEARLY: - if rr._bymonth: - for month in rr._bymonth: - ranges.append(self.mrange[month-1:month+1]) - else: - ranges = [(0, self.yearlen)] - elif rr._freq == MONTHLY: - ranges = [self.mrange[month-1:month+1]] - if ranges: - # Weekly frequency won't get here, so we may not - # care about cross-year weekly periods. - self.nwdaymask = [0]*self.yearlen - for first, last in ranges: - last -= 1 - for wday, n in rr._bynweekday: - if n < 0: - i = last+(n+1)*7 - i -= (self.wdaymask[i]-wday) % 7 - else: - i = first+(n-1)*7 - i += (7-self.wdaymask[i]+wday) % 7 - if first <= i <= last: - self.nwdaymask[i] = 1 - - if rr._byeaster: - self.eastermask = [0]*(self.yearlen+7) - eyday = easter.easter(year).toordinal()-self.yearordinal - for offset in rr._byeaster: - self.eastermask[eyday+offset] = 1 - - self.lastyear = year - self.lastmonth = month - - def ydayset(self, year, month, day): - return list(range(self.yearlen)), 0, self.yearlen - - def mdayset(self, year, month, day): - dset = [None]*self.yearlen - start, end = self.mrange[month-1:month+1] - for i in range(start, end): - dset[i] = i - return dset, start, end - - def wdayset(self, year, month, day): - # We need to handle cross-year weeks here. - dset = [None]*(self.yearlen+7) - i = datetime.date(year, month, day).toordinal()-self.yearordinal - start = i - for j in range(7): - dset[i] = i - i += 1 - # if (not (0 <= i < self.yearlen) or - # self.wdaymask[i] == self.rrule._wkst): - # This will cross the year boundary, if necessary. - if self.wdaymask[i] == self.rrule._wkst: - break - return dset, start, i - - def ddayset(self, year, month, day): - dset = [None] * self.yearlen - i = datetime.date(year, month, day).toordinal() - self.yearordinal - dset[i] = i - return dset, i, i + 1 - - def htimeset(self, hour, minute, second): - tset = [] - rr = self.rrule - for minute in rr._byminute: - for second in rr._bysecond: - tset.append(datetime.time(hour, minute, second, - tzinfo=rr._tzinfo)) - tset.sort() - return tset - - def mtimeset(self, hour, minute, second): - tset = [] - rr = self.rrule - for second in rr._bysecond: - tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) - tset.sort() - return tset - - def stimeset(self, hour, minute, second): - return (datetime.time(hour, minute, second, - tzinfo=self.rrule._tzinfo),) - - -class rruleset(rrulebase): - """ The rruleset type allows more complex recurrence setups, mixing - multiple rules, dates, exclusion rules, and exclusion dates. The type - constructor takes the following keyword arguments: - - :param cache: If True, caching of results will be enabled, improving - performance of multiple queries considerably. """ - - class _genitem(object): - def __init__(self, genlist, gen): - try: - self.dt = advance_iterator(gen) - genlist.append(self) - except StopIteration: - pass - self.genlist = genlist - self.gen = gen - - def __next__(self): - try: - self.dt = advance_iterator(self.gen) - except StopIteration: - if self.genlist[0] is self: - heapq.heappop(self.genlist) - else: - self.genlist.remove(self) - heapq.heapify(self.genlist) - - next = __next__ - - def __lt__(self, other): - return self.dt < other.dt - - def __gt__(self, other): - return self.dt > other.dt - - def __eq__(self, other): - return self.dt == other.dt - - def __ne__(self, other): - return self.dt != other.dt - - def __init__(self, cache=False): - super(rruleset, self).__init__(cache) - self._rrule = [] - self._rdate = [] - self._exrule = [] - self._exdate = [] - - @_invalidates_cache - def rrule(self, rrule): - """ Include the given :py:class:`rrule` instance in the recurrence set - generation. """ - self._rrule.append(rrule) - - @_invalidates_cache - def rdate(self, rdate): - """ Include the given :py:class:`datetime` instance in the recurrence - set generation. """ - self._rdate.append(rdate) - - @_invalidates_cache - def exrule(self, exrule): - """ Include the given rrule instance in the recurrence set exclusion - list. Dates which are part of the given recurrence rules will not - be generated, even if some inclusive rrule or rdate matches them. - """ - self._exrule.append(exrule) - - @_invalidates_cache - def exdate(self, exdate): - """ Include the given datetime instance in the recurrence set - exclusion list. Dates included that way will not be generated, - even if some inclusive rrule or rdate matches them. """ - self._exdate.append(exdate) - - def _iter(self): - rlist = [] - self._rdate.sort() - self._genitem(rlist, iter(self._rdate)) - for gen in [iter(x) for x in self._rrule]: - self._genitem(rlist, gen) - exlist = [] - self._exdate.sort() - self._genitem(exlist, iter(self._exdate)) - for gen in [iter(x) for x in self._exrule]: - self._genitem(exlist, gen) - lastdt = None - total = 0 - heapq.heapify(rlist) - heapq.heapify(exlist) - while rlist: - ritem = rlist[0] - if not lastdt or lastdt != ritem.dt: - while exlist and exlist[0] < ritem: - exitem = exlist[0] - advance_iterator(exitem) - if exlist and exlist[0] is exitem: - heapq.heapreplace(exlist, exitem) - if not exlist or ritem != exlist[0]: - total += 1 - yield ritem.dt - lastdt = ritem.dt - advance_iterator(ritem) - if rlist and rlist[0] is ritem: - heapq.heapreplace(rlist, ritem) - self._len = total - - - - -class _rrulestr(object): - """ Parses a string representation of a recurrence rule or set of - recurrence rules. - - :param s: - Required, a string defining one or more recurrence rules. - - :param dtstart: - If given, used as the default recurrence start if not specified in the - rule string. - - :param cache: - If set ``True`` caching of results will be enabled, improving - performance of multiple queries considerably. - - :param unfold: - If set ``True`` indicates that a rule string is split over more - than one line and should be joined before processing. - - :param forceset: - If set ``True`` forces a :class:`dateutil.rrule.rruleset` to - be returned. - - :param compatible: - If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``. - - :param ignoretz: - If set ``True``, time zones in parsed strings are ignored and a naive - :class:`datetime.datetime` object is returned. - - :param tzids: - If given, a callable or mapping used to retrieve a - :class:`datetime.tzinfo` from a string representation. - Defaults to :func:`dateutil.tz.gettz`. - - :param tzinfos: - Additional time zone names / aliases which may be present in a string - representation. See :func:`dateutil.parser.parse` for more - information. - - :return: - Returns a :class:`dateutil.rrule.rruleset` or - :class:`dateutil.rrule.rrule` - """ - - _freq_map = {"YEARLY": YEARLY, - "MONTHLY": MONTHLY, - "WEEKLY": WEEKLY, - "DAILY": DAILY, - "HOURLY": HOURLY, - "MINUTELY": MINUTELY, - "SECONDLY": SECONDLY} - - _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, - "FR": 4, "SA": 5, "SU": 6} - - def _handle_int(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = int(value) - - def _handle_int_list(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = [int(x) for x in value.split(',')] - - _handle_INTERVAL = _handle_int - _handle_COUNT = _handle_int - _handle_BYSETPOS = _handle_int_list - _handle_BYMONTH = _handle_int_list - _handle_BYMONTHDAY = _handle_int_list - _handle_BYYEARDAY = _handle_int_list - _handle_BYEASTER = _handle_int_list - _handle_BYWEEKNO = _handle_int_list - _handle_BYHOUR = _handle_int_list - _handle_BYMINUTE = _handle_int_list - _handle_BYSECOND = _handle_int_list - - def _handle_FREQ(self, rrkwargs, name, value, **kwargs): - rrkwargs["freq"] = self._freq_map[value] - - def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): - global parser - if not parser: - from dateutil import parser - try: - rrkwargs["until"] = parser.parse(value, - ignoretz=kwargs.get("ignoretz"), - tzinfos=kwargs.get("tzinfos")) - except ValueError: - raise ValueError("invalid until date") - - def _handle_WKST(self, rrkwargs, name, value, **kwargs): - rrkwargs["wkst"] = self._weekday_map[value] - - def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): - """ - Two ways to specify this: +1MO or MO(+1) - """ - l = [] - for wday in value.split(','): - if '(' in wday: - # If it's of the form TH(+1), etc. - splt = wday.split('(') - w = splt[0] - n = int(splt[1][:-1]) - elif len(wday): - # If it's of the form +1MO - for i in range(len(wday)): - if wday[i] not in '+-0123456789': - break - n = wday[:i] or None - w = wday[i:] - if n: - n = int(n) - else: - raise ValueError("Invalid (empty) BYDAY specification.") - - l.append(weekdays[self._weekday_map[w]](n)) - rrkwargs["byweekday"] = l - - _handle_BYDAY = _handle_BYWEEKDAY - - def _parse_rfc_rrule(self, line, - dtstart=None, - cache=False, - ignoretz=False, - tzinfos=None): - if line.find(':') != -1: - name, value = line.split(':') - if name != "RRULE": - raise ValueError("unknown parameter name") - else: - value = line - rrkwargs = {} - for pair in value.split(';'): - name, value = pair.split('=') - name = name.upper() - value = value.upper() - try: - getattr(self, "_handle_"+name)(rrkwargs, name, value, - ignoretz=ignoretz, - tzinfos=tzinfos) - except AttributeError: - raise ValueError("unknown parameter '%s'" % name) - except (KeyError, ValueError): - raise ValueError("invalid '%s': %s" % (name, value)) - return rrule(dtstart=dtstart, cache=cache, **rrkwargs) - - def _parse_date_value(self, date_value, parms, rule_tzids, - ignoretz, tzids, tzinfos): - global parser - if not parser: - from dateutil import parser - - datevals = [] - value_found = False - TZID = None - - for parm in parms: - if parm.startswith("TZID="): - try: - tzkey = rule_tzids[parm.split('TZID=')[-1]] - except KeyError: - continue - if tzids is None: - from . import tz - tzlookup = tz.gettz - elif callable(tzids): - tzlookup = tzids - else: - tzlookup = getattr(tzids, 'get', None) - if tzlookup is None: - msg = ('tzids must be a callable, mapping, or None, ' - 'not %s' % tzids) - raise ValueError(msg) - - TZID = tzlookup(tzkey) - continue - - # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found - # only once. - if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}: - raise ValueError("unsupported parm: " + parm) - else: - if value_found: - msg = ("Duplicate value parameter found in: " + parm) - raise ValueError(msg) - value_found = True - - for datestr in date_value.split(','): - date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos) - if TZID is not None: - if date.tzinfo is None: - date = date.replace(tzinfo=TZID) - else: - raise ValueError('DTSTART/EXDATE specifies multiple timezone') - datevals.append(date) - - return datevals - - def _parse_rfc(self, s, - dtstart=None, - cache=False, - unfold=False, - forceset=False, - compatible=False, - ignoretz=False, - tzids=None, - tzinfos=None): - global parser - if compatible: - forceset = True - unfold = True - - TZID_NAMES = dict(map( - lambda x: (x.upper(), x), - re.findall('TZID=(?P[^:]+):', s) - )) - s = s.upper() - if not s.strip(): - raise ValueError("empty string") - if unfold: - lines = s.splitlines() - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - else: - lines = s.split() - if (not forceset and len(lines) == 1 and (s.find(':') == -1 or - s.startswith('RRULE:'))): - return self._parse_rfc_rrule(lines[0], cache=cache, - dtstart=dtstart, ignoretz=ignoretz, - tzinfos=tzinfos) - else: - rrulevals = [] - rdatevals = [] - exrulevals = [] - exdatevals = [] - for line in lines: - if not line: - continue - if line.find(':') == -1: - name = "RRULE" - value = line - else: - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError("empty property name") - name = parms[0] - parms = parms[1:] - if name == "RRULE": - for parm in parms: - raise ValueError("unsupported RRULE parm: "+parm) - rrulevals.append(value) - elif name == "RDATE": - for parm in parms: - if parm != "VALUE=DATE-TIME": - raise ValueError("unsupported RDATE parm: "+parm) - rdatevals.append(value) - elif name == "EXRULE": - for parm in parms: - raise ValueError("unsupported EXRULE parm: "+parm) - exrulevals.append(value) - elif name == "EXDATE": - exdatevals.extend( - self._parse_date_value(value, parms, - TZID_NAMES, ignoretz, - tzids, tzinfos) - ) - elif name == "DTSTART": - dtvals = self._parse_date_value(value, parms, TZID_NAMES, - ignoretz, tzids, tzinfos) - if len(dtvals) != 1: - raise ValueError("Multiple DTSTART values specified:" + - value) - dtstart = dtvals[0] - else: - raise ValueError("unsupported property: "+name) - if (forceset or len(rrulevals) > 1 or rdatevals - or exrulevals or exdatevals): - if not parser and (rdatevals or exdatevals): - from dateutil import parser - rset = rruleset(cache=cache) - for value in rrulevals: - rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in rdatevals: - for datestr in value.split(','): - rset.rdate(parser.parse(datestr, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exrulevals: - rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exdatevals: - rset.exdate(value) - if compatible and dtstart: - rset.rdate(dtstart) - return rset - else: - return self._parse_rfc_rrule(rrulevals[0], - dtstart=dtstart, - cache=cache, - ignoretz=ignoretz, - tzinfos=tzinfos) - - def __call__(self, s, **kwargs): - return self._parse_rfc(s, **kwargs) - - -rrulestr = _rrulestr() - -# vim:ts=4:sw=4:et diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/__init__.py deleted file mode 100644 index 5a2d9cd..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# -*- coding: utf-8 -*- -from .tz import * -from .tz import __doc__ - -#: Convenience constant providing a :class:`tzutc()` instance -#: -#: .. versionadded:: 2.7.0 -UTC = tzutc() - -__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", - "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", - "enfold", "datetime_ambiguous", "datetime_exists", - "resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"] - - -class DeprecatedTzFormatWarning(Warning): - """Warning raised when time zones are parsed from deprecated formats.""" diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/_common.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/_common.py deleted file mode 100644 index 594e082..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/_common.py +++ /dev/null @@ -1,419 +0,0 @@ -from six import PY2 - -from functools import wraps - -from datetime import datetime, timedelta, tzinfo - - -ZERO = timedelta(0) - -__all__ = ['tzname_in_python2', 'enfold'] - - -def tzname_in_python2(namefunc): - """Change unicode output into bytestrings in Python 2 - - tzname() API changed in Python 3. It used to return bytes, but was changed - to unicode strings - """ - if PY2: - @wraps(namefunc) - def adjust_encoding(*args, **kwargs): - name = namefunc(*args, **kwargs) - if name is not None: - name = name.encode() - - return name - - return adjust_encoding - else: - return namefunc - - -# The following is adapted from Alexander Belopolsky's tz library -# https://github.com/abalkin/tz -if hasattr(datetime, 'fold'): - # This is the pre-python 3.6 fold situation - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - return dt.replace(fold=fold) - -else: - class _DatetimeWithFold(datetime): - """ - This is a class designed to provide a PEP 495-compliant interface for - Python versions before 3.6. It is used only for dates in a fold, so - the ``fold`` attribute is fixed at ``1``. - - .. versionadded:: 2.6.0 - """ - __slots__ = () - - def replace(self, *args, **kwargs): - """ - Return a datetime with the same attributes, except for those - attributes given new values by whichever keyword arguments are - specified. Note that tzinfo=None can be specified to create a naive - datetime from an aware datetime with no conversion of date and time - data. - - This is reimplemented in ``_DatetimeWithFold`` because pypy3 will - return a ``datetime.datetime`` even if ``fold`` is unchanged. - """ - argnames = ( - 'year', 'month', 'day', 'hour', 'minute', 'second', - 'microsecond', 'tzinfo' - ) - - for arg, argname in zip(args, argnames): - if argname in kwargs: - raise TypeError('Duplicate argument: {}'.format(argname)) - - kwargs[argname] = arg - - for argname in argnames: - if argname not in kwargs: - kwargs[argname] = getattr(self, argname) - - dt_class = self.__class__ if kwargs.get('fold', 1) else datetime - - return dt_class(**kwargs) - - @property - def fold(self): - return 1 - - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - if getattr(dt, 'fold', 0) == fold: - return dt - - args = dt.timetuple()[:6] - args += (dt.microsecond, dt.tzinfo) - - if fold: - return _DatetimeWithFold(*args) - else: - return datetime(*args) - - -def _validate_fromutc_inputs(f): - """ - The CPython version of ``fromutc`` checks that the input is a ``datetime`` - object and that ``self`` is attached as its ``tzinfo``. - """ - @wraps(f) - def fromutc(self, dt): - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - return f(self, dt) - - return fromutc - - -class _tzinfo(tzinfo): - """ - Base class for all ``dateutil`` ``tzinfo`` objects. - """ - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - - dt = dt.replace(tzinfo=self) - - wall_0 = enfold(dt, fold=0) - wall_1 = enfold(dt, fold=1) - - same_offset = wall_0.utcoffset() == wall_1.utcoffset() - same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) - - return same_dt and not same_offset - - def _fold_status(self, dt_utc, dt_wall): - """ - Determine the fold status of a "wall" datetime, given a representation - of the same datetime as a (naive) UTC datetime. This is calculated based - on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all - datetimes, and that this offset is the actual number of hours separating - ``dt_utc`` and ``dt_wall``. - - :param dt_utc: - Representation of the datetime as UTC - - :param dt_wall: - Representation of the datetime as "wall time". This parameter must - either have a `fold` attribute or have a fold-naive - :class:`datetime.tzinfo` attached, otherwise the calculation may - fail. - """ - if self.is_ambiguous(dt_wall): - delta_wall = dt_wall - dt_utc - _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) - else: - _fold = 0 - - return _fold - - def _fold(self, dt): - return getattr(dt, 'fold', 0) - - def _fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - - # Re-implement the algorithm from Python's datetime.py - dtoff = dt.utcoffset() - if dtoff is None: - raise ValueError("fromutc() requires a non-None utcoffset() " - "result") - - # The original datetime.py code assumes that `dst()` defaults to - # zero during ambiguous times. PEP 495 inverts this presumption, so - # for pre-PEP 495 versions of python, we need to tweak the algorithm. - dtdst = dt.dst() - if dtdst is None: - raise ValueError("fromutc() requires a non-None dst() result") - delta = dtoff - dtdst - - dt += delta - # Set fold=1 so we can default to being in the fold for - # ambiguous dates. - dtdst = enfold(dt, fold=1).dst() - if dtdst is None: - raise ValueError("fromutc(): dt.dst gave inconsistent " - "results; cannot convert") - return dt + dtdst - - @_validate_fromutc_inputs - def fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurance, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - dt_wall = self._fromutc(dt) - - # Calculate the fold status given the two datetimes. - _fold = self._fold_status(dt, dt_wall) - - # Set the default fold value for ambiguous dates - return enfold(dt_wall, fold=_fold) - - -class tzrangebase(_tzinfo): - """ - This is an abstract base class for time zones represented by an annual - transition into and out of DST. Child classes should implement the following - methods: - - * ``__init__(self, *args, **kwargs)`` - * ``transitions(self, year)`` - this is expected to return a tuple of - datetimes representing the DST on and off transitions in standard - time. - - A fully initialized ``tzrangebase`` subclass should also provide the - following attributes: - * ``hasdst``: Boolean whether or not the zone uses DST. - * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects - representing the respective UTC offsets. - * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short - abbreviations in DST and STD, respectively. - * ``_hasdst``: Whether or not the zone has DST. - - .. versionadded:: 2.6.0 - """ - def __init__(self): - raise NotImplementedError('tzrangebase is an abstract base class') - - def utcoffset(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_base_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - if self._isdst(dt): - return self._dst_abbr - else: - return self._std_abbr - - def fromutc(self, dt): - """ Given a datetime in UTC, return local time """ - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - # Get transitions - if there are none, fixed offset - transitions = self.transitions(dt.year) - if transitions is None: - return dt + self.utcoffset(dt) - - # Get the transition times in UTC - dston, dstoff = transitions - - dston -= self._std_offset - dstoff -= self._std_offset - - utc_transitions = (dston, dstoff) - dt_utc = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt_utc, utc_transitions) - - if isdst: - dt_wall = dt + self._dst_offset - else: - dt_wall = dt + self._std_offset - - _fold = int(not isdst and self.is_ambiguous(dt_wall)) - - return enfold(dt_wall, fold=_fold) - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - if not self.hasdst: - return False - - start, end = self.transitions(dt.year) - - dt = dt.replace(tzinfo=None) - return (end <= dt < end + self._dst_base_offset) - - def _isdst(self, dt): - if not self.hasdst: - return False - elif dt is None: - return None - - transitions = self.transitions(dt.year) - - if transitions is None: - return False - - dt = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt, transitions) - - # Handle ambiguous dates - if not isdst and self.is_ambiguous(dt): - return not self._fold(dt) - else: - return isdst - - def _naive_isdst(self, dt, transitions): - dston, dstoff = transitions - - dt = dt.replace(tzinfo=None) - - if dston < dstoff: - isdst = dston <= dt < dstoff - else: - isdst = not dstoff <= dt < dston - - return isdst - - @property - def _dst_base_offset(self): - return self._dst_offset - self._std_offset - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(...)" % self.__class__.__name__ - - __reduce__ = object.__reduce__ diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/_factories.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/_factories.py deleted file mode 100644 index d2560eb..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/_factories.py +++ /dev/null @@ -1,73 +0,0 @@ -from datetime import timedelta -import weakref -from collections import OrderedDict - - -class _TzSingleton(type): - def __init__(cls, *args, **kwargs): - cls.__instance = None - super(_TzSingleton, cls).__init__(*args, **kwargs) - - def __call__(cls): - if cls.__instance is None: - cls.__instance = super(_TzSingleton, cls).__call__() - return cls.__instance - - -class _TzFactory(type): - def instance(cls, *args, **kwargs): - """Alternate constructor that returns a fresh instance""" - return type.__call__(cls, *args, **kwargs) - - -class _TzOffsetFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - def __call__(cls, name, offset): - if isinstance(offset, timedelta): - key = (name, offset.total_seconds()) - else: - key = (name, offset) - - instance = cls.__instances.get(key, None) - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(name, offset)) - - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - # Remove an item if the strong cache is overpopulated - # TODO: Maybe this should be under a lock? - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - - -class _TzStrFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - def __call__(cls, s, posix_offset=False): - key = (s, posix_offset) - instance = cls.__instances.get(key, None) - - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(s, posix_offset)) - - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - - # Remove an item if the strong cache is overpopulated - # TODO: Maybe this should be under a lock? - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/tz.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/tz.py deleted file mode 100644 index d05414e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/tz.py +++ /dev/null @@ -1,1836 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers timezone implementations subclassing the abstract -:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format -files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, -etc), TZ environment string (in all known formats), given ranges (with help -from relative deltas), local machine timezone, fixed offset timezone, and UTC -timezone. -""" -import datetime -import struct -import time -import sys -import os -import bisect -import weakref -from collections import OrderedDict - -import six -from six import string_types -from six.moves import _thread -from ._common import tzname_in_python2, _tzinfo -from ._common import tzrangebase, enfold -from ._common import _validate_fromutc_inputs - -from ._factories import _TzSingleton, _TzOffsetFactory -from ._factories import _TzStrFactory -try: - from .win import tzwin, tzwinlocal -except ImportError: - tzwin = tzwinlocal = None - -# For warning about rounding tzinfo -from warnings import warn - -ZERO = datetime.timedelta(0) -EPOCH = datetime.datetime.utcfromtimestamp(0) -EPOCHORDINAL = EPOCH.toordinal() - - -@six.add_metaclass(_TzSingleton) -class tzutc(datetime.tzinfo): - """ - This is a tzinfo object that represents the UTC time zone. - - **Examples:** - - .. doctest:: - - >>> from datetime import * - >>> from dateutil.tz import * - - >>> datetime.now() - datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) - - >>> datetime.now(tzutc()) - datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) - - >>> datetime.now(tzutc()).tzname() - 'UTC' - - .. versionchanged:: 2.7.0 - ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will - always return the same object. - - .. doctest:: - - >>> from dateutil.tz import tzutc, UTC - >>> tzutc() is tzutc() - True - >>> tzutc() is UTC - True - """ - def utcoffset(self, dt): - return ZERO - - def dst(self, dt): - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return "UTC" - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - return False - - @_validate_fromutc_inputs - def fromutc(self, dt): - """ - Fast track version of fromutc() returns the original ``dt`` object for - any valid :py:class:`datetime.datetime` object. - """ - return dt - - def __eq__(self, other): - if not isinstance(other, (tzutc, tzoffset)): - return NotImplemented - - return (isinstance(other, tzutc) or - (isinstance(other, tzoffset) and other._offset == ZERO)) - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - - -@six.add_metaclass(_TzOffsetFactory) -class tzoffset(datetime.tzinfo): - """ - A simple class for representing a fixed offset from UTC. - - :param name: - The timezone name, to be returned when ``tzname()`` is called. - :param offset: - The time zone offset in seconds, or (since version 2.6.0, represented - as a :py:class:`datetime.timedelta` object). - """ - def __init__(self, name, offset): - self._name = name - - try: - # Allow a timedelta - offset = offset.total_seconds() - except (TypeError, AttributeError): - pass - - self._offset = datetime.timedelta(seconds=_get_supported_offset(offset)) - - def utcoffset(self, dt): - return self._offset - - def dst(self, dt): - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._name - - @_validate_fromutc_inputs - def fromutc(self, dt): - return dt + self._offset - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - return False - - def __eq__(self, other): - if not isinstance(other, tzoffset): - return NotImplemented - - return self._offset == other._offset - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(%s, %s)" % (self.__class__.__name__, - repr(self._name), - int(self._offset.total_seconds())) - - __reduce__ = object.__reduce__ - - -class tzlocal(_tzinfo): - """ - A :class:`tzinfo` subclass built around the ``time`` timezone functions. - """ - def __init__(self): - super(tzlocal, self).__init__() - - self._std_offset = datetime.timedelta(seconds=-time.timezone) - if time.daylight: - self._dst_offset = datetime.timedelta(seconds=-time.altzone) - else: - self._dst_offset = self._std_offset - - self._dst_saved = self._dst_offset - self._std_offset - self._hasdst = bool(self._dst_saved) - self._tznames = tuple(time.tzname) - - def utcoffset(self, dt): - if dt is None and self._hasdst: - return None - - if self._isdst(dt): - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - if dt is None and self._hasdst: - return None - - if self._isdst(dt): - return self._dst_offset - self._std_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._tznames[self._isdst(dt)] - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - naive_dst = self._naive_is_dst(dt) - return (not naive_dst and - (naive_dst != self._naive_is_dst(dt - self._dst_saved))) - - def _naive_is_dst(self, dt): - timestamp = _datetime_to_timestamp(dt) - return time.localtime(timestamp + time.timezone).tm_isdst - - def _isdst(self, dt, fold_naive=True): - # We can't use mktime here. It is unstable when deciding if - # the hour near to a change is DST or not. - # - # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, - # dt.minute, dt.second, dt.weekday(), 0, -1)) - # return time.localtime(timestamp).tm_isdst - # - # The code above yields the following result: - # - # >>> import tz, datetime - # >>> t = tz.tzlocal() - # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - # 'BRDT' - # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() - # 'BRST' - # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - # 'BRST' - # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() - # 'BRDT' - # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - # 'BRDT' - # - # Here is a more stable implementation: - # - if not self._hasdst: - return False - - # Check for ambiguous times: - dstval = self._naive_is_dst(dt) - fold = getattr(dt, 'fold', None) - - if self.is_ambiguous(dt): - if fold is not None: - return not self._fold(dt) - else: - return True - - return dstval - - def __eq__(self, other): - if isinstance(other, tzlocal): - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset) - elif isinstance(other, tzutc): - return (not self._hasdst and - self._tznames[0] in {'UTC', 'GMT'} and - self._std_offset == ZERO) - elif isinstance(other, tzoffset): - return (not self._hasdst and - self._tznames[0] == other._name and - self._std_offset == other._offset) - else: - return NotImplemented - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - - -class _ttinfo(object): - __slots__ = ["offset", "delta", "isdst", "abbr", - "isstd", "isgmt", "dstoffset"] - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def __repr__(self): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, repr(value))) - return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) - - def __eq__(self, other): - if not isinstance(other, _ttinfo): - return NotImplemented - - return (self.offset == other.offset and - self.delta == other.delta and - self.isdst == other.isdst and - self.abbr == other.abbr and - self.isstd == other.isstd and - self.isgmt == other.isgmt and - self.dstoffset == other.dstoffset) - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __getstate__(self): - state = {} - for name in self.__slots__: - state[name] = getattr(self, name, None) - return state - - def __setstate__(self, state): - for name in self.__slots__: - if name in state: - setattr(self, name, state[name]) - - -class _tzfile(object): - """ - Lightweight class for holding the relevant transition and time zone - information read from binary tzfiles. - """ - attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list', - 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] - - def __init__(self, **kwargs): - for attr in self.attrs: - setattr(self, attr, kwargs.get(attr, None)) - - -class tzfile(_tzinfo): - """ - This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)`` - format timezone files to extract current and historical zone information. - - :param fileobj: - This can be an opened file stream or a file name that the time zone - information can be read from. - - :param filename: - This is an optional parameter specifying the source of the time zone - information in the event that ``fileobj`` is a file object. If omitted - and ``fileobj`` is a file stream, this parameter will be set either to - ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. - - See `Sources for Time Zone and Daylight Saving Time Data - `_ for more information. - Time zone files can be compiled from the `IANA Time Zone database files - `_ with the `zic time zone compiler - `_ - - .. note:: - - Only construct a ``tzfile`` directly if you have a specific timezone - file on disk that you want to read into a Python ``tzinfo`` object. - If you want to get a ``tzfile`` representing a specific IANA zone, - (e.g. ``'America/New_York'``), you should call - :func:`dateutil.tz.gettz` with the zone identifier. - - - **Examples:** - - Using the US Eastern time zone as an example, we can see that a ``tzfile`` - provides time zone information for the standard Daylight Saving offsets: - - .. testsetup:: tzfile - - from dateutil.tz import gettz - from datetime import datetime - - .. doctest:: tzfile - - >>> NYC = gettz('America/New_York') - >>> NYC - tzfile('/usr/share/zoneinfo/America/New_York') - - >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST - 2016-01-03 00:00:00-05:00 - - >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT - 2016-07-07 00:00:00-04:00 - - - The ``tzfile`` structure contains a fully history of the time zone, - so historical dates will also have the right offsets. For example, before - the adoption of the UTC standards, New York used local solar mean time: - - .. doctest:: tzfile - - >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT - 1901-04-12 00:00:00-04:56 - - And during World War II, New York was on "Eastern War Time", which was a - state of permanent daylight saving time: - - .. doctest:: tzfile - - >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT - 1944-02-07 00:00:00-04:00 - - """ - - def __init__(self, fileobj, filename=None): - super(tzfile, self).__init__() - - file_opened_here = False - if isinstance(fileobj, string_types): - self._filename = fileobj - fileobj = open(fileobj, 'rb') - file_opened_here = True - elif filename is not None: - self._filename = filename - elif hasattr(fileobj, "name"): - self._filename = fileobj.name - else: - self._filename = repr(fileobj) - - if fileobj is not None: - if not file_opened_here: - fileobj = _nullcontext(fileobj) - - with fileobj as file_stream: - tzobj = self._read_tzfile(file_stream) - - self._set_tzdata(tzobj) - - def _set_tzdata(self, tzobj): - """ Set the time zone data of this object from a _tzfile object """ - # Copy the relevant attributes over as private attributes - for attr in _tzfile.attrs: - setattr(self, '_' + attr, getattr(tzobj, attr)) - - def _read_tzfile(self, fileobj): - out = _tzfile() - - # From tzfile(5): - # - # The time zone information files used by tzset(3) - # begin with the magic characters "TZif" to identify - # them as time zone information files, followed by - # sixteen bytes reserved for future use, followed by - # six four-byte values of type long, written in a - # ``standard'' byte order (the high-order byte - # of the value is written first). - if fileobj.read(4).decode() != "TZif": - raise ValueError("magic not found") - - fileobj.read(16) - - ( - # The number of UTC/local indicators stored in the file. - ttisgmtcnt, - - # The number of standard/wall indicators stored in the file. - ttisstdcnt, - - # The number of leap seconds for which data is - # stored in the file. - leapcnt, - - # The number of "transition times" for which data - # is stored in the file. - timecnt, - - # The number of "local time types" for which data - # is stored in the file (must not be zero). - typecnt, - - # The number of characters of "time zone - # abbreviation strings" stored in the file. - charcnt, - - ) = struct.unpack(">6l", fileobj.read(24)) - - # The above header is followed by tzh_timecnt four-byte - # values of type long, sorted in ascending order. - # These values are written in ``standard'' byte order. - # Each is used as a transition time (as returned by - # time(2)) at which the rules for computing local time - # change. - - if timecnt: - out.trans_list_utc = list(struct.unpack(">%dl" % timecnt, - fileobj.read(timecnt*4))) - else: - out.trans_list_utc = [] - - # Next come tzh_timecnt one-byte values of type unsigned - # char; each one tells which of the different types of - # ``local time'' types described in the file is associated - # with the same-indexed transition time. These values - # serve as indices into an array of ttinfo structures that - # appears next in the file. - - if timecnt: - out.trans_idx = struct.unpack(">%dB" % timecnt, - fileobj.read(timecnt)) - else: - out.trans_idx = [] - - # Each ttinfo structure is written as a four-byte value - # for tt_gmtoff of type long, in a standard byte - # order, followed by a one-byte value for tt_isdst - # and a one-byte value for tt_abbrind. In each - # structure, tt_gmtoff gives the number of - # seconds to be added to UTC, tt_isdst tells whether - # tm_isdst should be set by localtime(3), and - # tt_abbrind serves as an index into the array of - # time zone abbreviation characters that follow the - # ttinfo structure(s) in the file. - - ttinfo = [] - - for i in range(typecnt): - ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) - - abbr = fileobj.read(charcnt).decode() - - # Then there are tzh_leapcnt pairs of four-byte - # values, written in standard byte order; the - # first value of each pair gives the time (as - # returned by time(2)) at which a leap second - # occurs; the second gives the total number of - # leap seconds to be applied after the given time. - # The pairs of values are sorted in ascending order - # by time. - - # Not used, for now (but seek for correct file position) - if leapcnt: - fileobj.seek(leapcnt * 8, os.SEEK_CUR) - - # Then there are tzh_ttisstdcnt standard/wall - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as standard - # time or wall clock time, and are used when - # a time zone file is used in handling POSIX-style - # time zone environment variables. - - if ttisstdcnt: - isstd = struct.unpack(">%db" % ttisstdcnt, - fileobj.read(ttisstdcnt)) - - # Finally, there are tzh_ttisgmtcnt UTC/local - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as UTC or - # local time, and are used when a time zone file - # is used in handling POSIX-style time zone envi- - # ronment variables. - - if ttisgmtcnt: - isgmt = struct.unpack(">%db" % ttisgmtcnt, - fileobj.read(ttisgmtcnt)) - - # Build ttinfo list - out.ttinfo_list = [] - for i in range(typecnt): - gmtoff, isdst, abbrind = ttinfo[i] - gmtoff = _get_supported_offset(gmtoff) - tti = _ttinfo() - tti.offset = gmtoff - tti.dstoffset = datetime.timedelta(0) - tti.delta = datetime.timedelta(seconds=gmtoff) - tti.isdst = isdst - tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] - tti.isstd = (ttisstdcnt > i and isstd[i] != 0) - tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) - out.ttinfo_list.append(tti) - - # Replace ttinfo indexes for ttinfo objects. - out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] - - # Set standard, dst, and before ttinfos. before will be - # used when a given time is before any transitions, - # and will be set to the first non-dst ttinfo, or to - # the first dst, if all of them are dst. - out.ttinfo_std = None - out.ttinfo_dst = None - out.ttinfo_before = None - if out.ttinfo_list: - if not out.trans_list_utc: - out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] - else: - for i in range(timecnt-1, -1, -1): - tti = out.trans_idx[i] - if not out.ttinfo_std and not tti.isdst: - out.ttinfo_std = tti - elif not out.ttinfo_dst and tti.isdst: - out.ttinfo_dst = tti - - if out.ttinfo_std and out.ttinfo_dst: - break - else: - if out.ttinfo_dst and not out.ttinfo_std: - out.ttinfo_std = out.ttinfo_dst - - for tti in out.ttinfo_list: - if not tti.isdst: - out.ttinfo_before = tti - break - else: - out.ttinfo_before = out.ttinfo_list[0] - - # Now fix transition times to become relative to wall time. - # - # I'm not sure about this. In my tests, the tz source file - # is setup to wall time, and in the binary file isstd and - # isgmt are off, so it should be in wall time. OTOH, it's - # always in gmt time. Let me know if you have comments - # about this. - lastdst = None - lastoffset = None - lastdstoffset = None - lastbaseoffset = None - out.trans_list = [] - - for i, tti in enumerate(out.trans_idx): - offset = tti.offset - dstoffset = 0 - - if lastdst is not None: - if tti.isdst: - if not lastdst: - dstoffset = offset - lastoffset - - if not dstoffset and lastdstoffset: - dstoffset = lastdstoffset - - tti.dstoffset = datetime.timedelta(seconds=dstoffset) - lastdstoffset = dstoffset - - # If a time zone changes its base offset during a DST transition, - # then you need to adjust by the previous base offset to get the - # transition time in local time. Otherwise you use the current - # base offset. Ideally, I would have some mathematical proof of - # why this is true, but I haven't really thought about it enough. - baseoffset = offset - dstoffset - adjustment = baseoffset - if (lastbaseoffset is not None and baseoffset != lastbaseoffset - and tti.isdst != lastdst): - # The base DST has changed - adjustment = lastbaseoffset - - lastdst = tti.isdst - lastoffset = offset - lastbaseoffset = baseoffset - - out.trans_list.append(out.trans_list_utc[i] + adjustment) - - out.trans_idx = tuple(out.trans_idx) - out.trans_list = tuple(out.trans_list) - out.trans_list_utc = tuple(out.trans_list_utc) - - return out - - def _find_last_transition(self, dt, in_utc=False): - # If there's no list, there are no transitions to find - if not self._trans_list: - return None - - timestamp = _datetime_to_timestamp(dt) - - # Find where the timestamp fits in the transition list - if the - # timestamp is a transition time, it's part of the "after" period. - trans_list = self._trans_list_utc if in_utc else self._trans_list - idx = bisect.bisect_right(trans_list, timestamp) - - # We want to know when the previous transition was, so subtract off 1 - return idx - 1 - - def _get_ttinfo(self, idx): - # For no list or after the last transition, default to _ttinfo_std - if idx is None or (idx + 1) >= len(self._trans_list): - return self._ttinfo_std - - # If there is a list and the time is before it, return _ttinfo_before - if idx < 0: - return self._ttinfo_before - - return self._trans_idx[idx] - - def _find_ttinfo(self, dt): - idx = self._resolve_ambiguous_time(dt) - - return self._get_ttinfo(idx) - - def fromutc(self, dt): - """ - The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`. - - :param dt: - A :py:class:`datetime.datetime` object. - - :raises TypeError: - Raised if ``dt`` is not a :py:class:`datetime.datetime` object. - - :raises ValueError: - Raised if this is called with a ``dt`` which does not have this - ``tzinfo`` attached. - - :return: - Returns a :py:class:`datetime.datetime` object representing the - wall time in ``self``'s time zone. - """ - # These isinstance checks are in datetime.tzinfo, so we'll preserve - # them, even if we don't care about duck typing. - if not isinstance(dt, datetime.datetime): - raise TypeError("fromutc() requires a datetime argument") - - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - # First treat UTC as wall time and get the transition we're in. - idx = self._find_last_transition(dt, in_utc=True) - tti = self._get_ttinfo(idx) - - dt_out = dt + datetime.timedelta(seconds=tti.offset) - - fold = self.is_ambiguous(dt_out, idx=idx) - - return enfold(dt_out, fold=int(fold)) - - def is_ambiguous(self, dt, idx=None): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - if idx is None: - idx = self._find_last_transition(dt) - - # Calculate the difference in offsets from current to previous - timestamp = _datetime_to_timestamp(dt) - tti = self._get_ttinfo(idx) - - if idx is None or idx <= 0: - return False - - od = self._get_ttinfo(idx - 1).offset - tti.offset - tt = self._trans_list[idx] # Transition time - - return timestamp < tt + od - - def _resolve_ambiguous_time(self, dt): - idx = self._find_last_transition(dt) - - # If we have no transitions, return the index - _fold = self._fold(dt) - if idx is None or idx == 0: - return idx - - # If it's ambiguous and we're in a fold, shift to a different index. - idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) - - return idx - idx_offset - - def utcoffset(self, dt): - if dt is None: - return None - - if not self._ttinfo_std: - return ZERO - - return self._find_ttinfo(dt).delta - - def dst(self, dt): - if dt is None: - return None - - if not self._ttinfo_dst: - return ZERO - - tti = self._find_ttinfo(dt) - - if not tti.isdst: - return ZERO - - # The documentation says that utcoffset()-dst() must - # be constant for every dt. - return tti.dstoffset - - @tzname_in_python2 - def tzname(self, dt): - if not self._ttinfo_std or dt is None: - return None - return self._find_ttinfo(dt).abbr - - def __eq__(self, other): - if not isinstance(other, tzfile): - return NotImplemented - return (self._trans_list == other._trans_list and - self._trans_idx == other._trans_idx and - self._ttinfo_list == other._ttinfo_list) - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) - - def __reduce__(self): - return self.__reduce_ex__(None) - - def __reduce_ex__(self, protocol): - return (self.__class__, (None, self._filename), self.__dict__) - - -class tzrange(tzrangebase): - """ - The ``tzrange`` object is a time zone specified by a set of offsets and - abbreviations, equivalent to the way the ``TZ`` variable can be specified - in POSIX-like systems, but using Python delta objects to specify DST - start, end and offsets. - - :param stdabbr: - The abbreviation for standard time (e.g. ``'EST'``). - - :param stdoffset: - An integer or :class:`datetime.timedelta` object or equivalent - specifying the base offset from UTC. - - If unspecified, +00:00 is used. - - :param dstabbr: - The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). - - If specified, with no other DST information, DST is assumed to occur - and the default behavior or ``dstoffset``, ``start`` and ``end`` is - used. If unspecified and no other DST information is specified, it - is assumed that this zone has no DST. - - If this is unspecified and other DST information is *is* specified, - DST occurs in the zone but the time zone abbreviation is left - unchanged. - - :param dstoffset: - A an integer or :class:`datetime.timedelta` object or equivalent - specifying the UTC offset during DST. If unspecified and any other DST - information is specified, it is assumed to be the STD offset +1 hour. - - :param start: - A :class:`relativedelta.relativedelta` object or equivalent specifying - the time and time of year that daylight savings time starts. To - specify, for example, that DST starts at 2AM on the 2nd Sunday in - March, pass: - - ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` - - If unspecified and any other DST information is specified, the default - value is 2 AM on the first Sunday in April. - - :param end: - A :class:`relativedelta.relativedelta` object or equivalent - representing the time and time of year that daylight savings time - ends, with the same specification method as in ``start``. One note is - that this should point to the first time in the *standard* zone, so if - a transition occurs at 2AM in the DST zone and the clocks are set back - 1 hour to 1AM, set the ``hours`` parameter to +1. - - - **Examples:** - - .. testsetup:: tzrange - - from dateutil.tz import tzrange, tzstr - - .. doctest:: tzrange - - >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") - True - - >>> from dateutil.relativedelta import * - >>> range1 = tzrange("EST", -18000, "EDT") - >>> range2 = tzrange("EST", -18000, "EDT", -14400, - ... relativedelta(hours=+2, month=4, day=1, - ... weekday=SU(+1)), - ... relativedelta(hours=+1, month=10, day=31, - ... weekday=SU(-1))) - >>> tzstr('EST5EDT') == range1 == range2 - True - - """ - def __init__(self, stdabbr, stdoffset=None, - dstabbr=None, dstoffset=None, - start=None, end=None): - - global relativedelta - from dateutil import relativedelta - - self._std_abbr = stdabbr - self._dst_abbr = dstabbr - - try: - stdoffset = stdoffset.total_seconds() - except (TypeError, AttributeError): - pass - - try: - dstoffset = dstoffset.total_seconds() - except (TypeError, AttributeError): - pass - - if stdoffset is not None: - self._std_offset = datetime.timedelta(seconds=stdoffset) - else: - self._std_offset = ZERO - - if dstoffset is not None: - self._dst_offset = datetime.timedelta(seconds=dstoffset) - elif dstabbr and stdoffset is not None: - self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) - else: - self._dst_offset = ZERO - - if dstabbr and start is None: - self._start_delta = relativedelta.relativedelta( - hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) - else: - self._start_delta = start - - if dstabbr and end is None: - self._end_delta = relativedelta.relativedelta( - hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) - else: - self._end_delta = end - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = bool(self._start_delta) - - def transitions(self, year): - """ - For a given year, get the DST on and off transition times, expressed - always on the standard time side. For zones with no transitions, this - function returns ``None``. - - :param year: - The year whose transitions you would like to query. - - :return: - Returns a :class:`tuple` of :class:`datetime.datetime` objects, - ``(dston, dstoff)`` for zones with an annual DST transition, or - ``None`` for fixed offset zones. - """ - if not self.hasdst: - return None - - base_year = datetime.datetime(year, 1, 1) - - start = base_year + self._start_delta - end = base_year + self._end_delta - - return (start, end) - - def __eq__(self, other): - if not isinstance(other, tzrange): - return NotImplemented - - return (self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr and - self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._start_delta == other._start_delta and - self._end_delta == other._end_delta) - - @property - def _dst_base_offset(self): - return self._dst_base_offset_ - - -@six.add_metaclass(_TzStrFactory) -class tzstr(tzrange): - """ - ``tzstr`` objects are time zone objects specified by a time-zone string as - it would be passed to a ``TZ`` variable on POSIX-style systems (see - the `GNU C Library: TZ Variable`_ for more details). - - There is one notable exception, which is that POSIX-style time zones use an - inverted offset format, so normally ``GMT+3`` would be parsed as an offset - 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an - offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX - behavior, pass a ``True`` value to ``posix_offset``. - - The :class:`tzrange` object provides the same functionality, but is - specified using :class:`relativedelta.relativedelta` objects. rather than - strings. - - :param s: - A time zone string in ``TZ`` variable format. This can be a - :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: - :class:`unicode`) or a stream emitting unicode characters - (e.g. :class:`StringIO`). - - :param posix_offset: - Optional. If set to ``True``, interpret strings such as ``GMT+3`` or - ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the - POSIX standard. - - .. caution:: - - Prior to version 2.7.0, this function also supported time zones - in the format: - - * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600`` - * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600`` - - This format is non-standard and has been deprecated; this function - will raise a :class:`DeprecatedTZFormatWarning` until - support is removed in a future version. - - .. _`GNU C Library: TZ Variable`: - https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html - """ - def __init__(self, s, posix_offset=False): - global parser - from dateutil.parser import _parser as parser - - self._s = s - - res = parser._parsetz(s) - if res is None or res.any_unused_tokens: - raise ValueError("unknown string format") - - # Here we break the compatibility with the TZ variable handling. - # GMT-3 actually *means* the timezone -3. - if res.stdabbr in ("GMT", "UTC") and not posix_offset: - res.stdoffset *= -1 - - # We must initialize it first, since _delta() needs - # _std_offset and _dst_offset set. Use False in start/end - # to avoid building it two times. - tzrange.__init__(self, res.stdabbr, res.stdoffset, - res.dstabbr, res.dstoffset, - start=False, end=False) - - if not res.dstabbr: - self._start_delta = None - self._end_delta = None - else: - self._start_delta = self._delta(res.start) - if self._start_delta: - self._end_delta = self._delta(res.end, isend=1) - - self.hasdst = bool(self._start_delta) - - def _delta(self, x, isend=0): - from dateutil import relativedelta - kwargs = {} - if x.month is not None: - kwargs["month"] = x.month - if x.weekday is not None: - kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) - if x.week > 0: - kwargs["day"] = 1 - else: - kwargs["day"] = 31 - elif x.day: - kwargs["day"] = x.day - elif x.yday is not None: - kwargs["yearday"] = x.yday - elif x.jyday is not None: - kwargs["nlyearday"] = x.jyday - if not kwargs: - # Default is to start on first sunday of april, and end - # on last sunday of october. - if not isend: - kwargs["month"] = 4 - kwargs["day"] = 1 - kwargs["weekday"] = relativedelta.SU(+1) - else: - kwargs["month"] = 10 - kwargs["day"] = 31 - kwargs["weekday"] = relativedelta.SU(-1) - if x.time is not None: - kwargs["seconds"] = x.time - else: - # Default is 2AM. - kwargs["seconds"] = 7200 - if isend: - # Convert to standard time, to follow the documented way - # of working with the extra hour. See the documentation - # of the tzinfo class. - delta = self._dst_offset - self._std_offset - kwargs["seconds"] -= delta.seconds + delta.days * 86400 - return relativedelta.relativedelta(**kwargs) - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._s)) - - -class _tzicalvtzcomp(object): - def __init__(self, tzoffsetfrom, tzoffsetto, isdst, - tzname=None, rrule=None): - self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) - self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) - self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom - self.isdst = isdst - self.tzname = tzname - self.rrule = rrule - - -class _tzicalvtz(_tzinfo): - def __init__(self, tzid, comps=[]): - super(_tzicalvtz, self).__init__() - - self._tzid = tzid - self._comps = comps - self._cachedate = [] - self._cachecomp = [] - self._cache_lock = _thread.allocate_lock() - - def _find_comp(self, dt): - if len(self._comps) == 1: - return self._comps[0] - - dt = dt.replace(tzinfo=None) - - try: - with self._cache_lock: - return self._cachecomp[self._cachedate.index( - (dt, self._fold(dt)))] - except ValueError: - pass - - lastcompdt = None - lastcomp = None - - for comp in self._comps: - compdt = self._find_compdt(comp, dt) - - if compdt and (not lastcompdt or lastcompdt < compdt): - lastcompdt = compdt - lastcomp = comp - - if not lastcomp: - # RFC says nothing about what to do when a given - # time is before the first onset date. We'll look for the - # first standard component, or the first component, if - # none is found. - for comp in self._comps: - if not comp.isdst: - lastcomp = comp - break - else: - lastcomp = comp[0] - - with self._cache_lock: - self._cachedate.insert(0, (dt, self._fold(dt))) - self._cachecomp.insert(0, lastcomp) - - if len(self._cachedate) > 10: - self._cachedate.pop() - self._cachecomp.pop() - - return lastcomp - - def _find_compdt(self, comp, dt): - if comp.tzoffsetdiff < ZERO and self._fold(dt): - dt -= comp.tzoffsetdiff - - compdt = comp.rrule.before(dt, inc=True) - - return compdt - - def utcoffset(self, dt): - if dt is None: - return None - - return self._find_comp(dt).tzoffsetto - - def dst(self, dt): - comp = self._find_comp(dt) - if comp.isdst: - return comp.tzoffsetdiff - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._find_comp(dt).tzname - - def __repr__(self): - return "" % repr(self._tzid) - - __reduce__ = object.__reduce__ - - -class tzical(object): - """ - This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure - as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects. - - :param `fileobj`: - A file or stream in iCalendar format, which should be UTF-8 encoded - with CRLF endings. - - .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545 - """ - def __init__(self, fileobj): - global rrule - from dateutil import rrule - - if isinstance(fileobj, string_types): - self._s = fileobj - # ical should be encoded in UTF-8 with CRLF - fileobj = open(fileobj, 'r') - else: - self._s = getattr(fileobj, 'name', repr(fileobj)) - fileobj = _nullcontext(fileobj) - - self._vtz = {} - - with fileobj as fobj: - self._parse_rfc(fobj.read()) - - def keys(self): - """ - Retrieves the available time zones as a list. - """ - return list(self._vtz.keys()) - - def get(self, tzid=None): - """ - Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. - - :param tzid: - If there is exactly one time zone available, omitting ``tzid`` - or passing :py:const:`None` value returns it. Otherwise a valid - key (which can be retrieved from :func:`keys`) is required. - - :raises ValueError: - Raised if ``tzid`` is not specified but there are either more - or fewer than 1 zone defined. - - :returns: - Returns either a :py:class:`datetime.tzinfo` object representing - the relevant time zone or :py:const:`None` if the ``tzid`` was - not found. - """ - if tzid is None: - if len(self._vtz) == 0: - raise ValueError("no timezones defined") - elif len(self._vtz) > 1: - raise ValueError("more than one timezone available") - tzid = next(iter(self._vtz)) - - return self._vtz.get(tzid) - - def _parse_offset(self, s): - s = s.strip() - if not s: - raise ValueError("empty offset") - if s[0] in ('+', '-'): - signal = (-1, +1)[s[0] == '+'] - s = s[1:] - else: - signal = +1 - if len(s) == 4: - return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal - elif len(s) == 6: - return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal - else: - raise ValueError("invalid offset: " + s) - - def _parse_rfc(self, s): - lines = s.splitlines() - if not lines: - raise ValueError("empty string") - - # Unfold - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - - tzid = None - comps = [] - invtz = False - comptype = None - for line in lines: - if not line: - continue - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError("empty property name") - name = parms[0].upper() - parms = parms[1:] - if invtz: - if name == "BEGIN": - if value in ("STANDARD", "DAYLIGHT"): - # Process component - pass - else: - raise ValueError("unknown component: "+value) - comptype = value - founddtstart = False - tzoffsetfrom = None - tzoffsetto = None - rrulelines = [] - tzname = None - elif name == "END": - if value == "VTIMEZONE": - if comptype: - raise ValueError("component not closed: "+comptype) - if not tzid: - raise ValueError("mandatory TZID not found") - if not comps: - raise ValueError( - "at least one component is needed") - # Process vtimezone - self._vtz[tzid] = _tzicalvtz(tzid, comps) - invtz = False - elif value == comptype: - if not founddtstart: - raise ValueError("mandatory DTSTART not found") - if tzoffsetfrom is None: - raise ValueError( - "mandatory TZOFFSETFROM not found") - if tzoffsetto is None: - raise ValueError( - "mandatory TZOFFSETFROM not found") - # Process component - rr = None - if rrulelines: - rr = rrule.rrulestr("\n".join(rrulelines), - compatible=True, - ignoretz=True, - cache=True) - comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, - (comptype == "DAYLIGHT"), - tzname, rr) - comps.append(comp) - comptype = None - else: - raise ValueError("invalid component end: "+value) - elif comptype: - if name == "DTSTART": - # DTSTART in VTIMEZONE takes a subset of valid RRULE - # values under RFC 5545. - for parm in parms: - if parm != 'VALUE=DATE-TIME': - msg = ('Unsupported DTSTART param in ' + - 'VTIMEZONE: ' + parm) - raise ValueError(msg) - rrulelines.append(line) - founddtstart = True - elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): - rrulelines.append(line) - elif name == "TZOFFSETFROM": - if parms: - raise ValueError( - "unsupported %s parm: %s " % (name, parms[0])) - tzoffsetfrom = self._parse_offset(value) - elif name == "TZOFFSETTO": - if parms: - raise ValueError( - "unsupported TZOFFSETTO parm: "+parms[0]) - tzoffsetto = self._parse_offset(value) - elif name == "TZNAME": - if parms: - raise ValueError( - "unsupported TZNAME parm: "+parms[0]) - tzname = value - elif name == "COMMENT": - pass - else: - raise ValueError("unsupported property: "+name) - else: - if name == "TZID": - if parms: - raise ValueError( - "unsupported TZID parm: "+parms[0]) - tzid = value - elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): - pass - else: - raise ValueError("unsupported property: "+name) - elif name == "BEGIN" and value == "VTIMEZONE": - tzid = None - comps = [] - invtz = True - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._s)) - - -if sys.platform != "win32": - TZFILES = ["/etc/localtime", "localtime"] - TZPATHS = ["/usr/share/zoneinfo", - "/usr/lib/zoneinfo", - "/usr/share/lib/zoneinfo", - "/etc/zoneinfo"] -else: - TZFILES = [] - TZPATHS = [] - - -def __get_gettz(): - tzlocal_classes = (tzlocal,) - if tzwinlocal is not None: - tzlocal_classes += (tzwinlocal,) - - class GettzFunc(object): - """ - Retrieve a time zone object from a string representation - - This function is intended to retrieve the :py:class:`tzinfo` subclass - that best represents the time zone that would be used if a POSIX - `TZ variable`_ were set to the same value. - - If no argument or an empty string is passed to ``gettz``, local time - is returned: - - .. code-block:: python3 - - >>> gettz() - tzfile('/etc/localtime') - - This function is also the preferred way to map IANA tz database keys - to :class:`tzfile` objects: - - .. code-block:: python3 - - >>> gettz('Pacific/Kiritimati') - tzfile('/usr/share/zoneinfo/Pacific/Kiritimati') - - On Windows, the standard is extended to include the Windows-specific - zone names provided by the operating system: - - .. code-block:: python3 - - >>> gettz('Egypt Standard Time') - tzwin('Egypt Standard Time') - - Passing a GNU ``TZ`` style string time zone specification returns a - :class:`tzstr` object: - - .. code-block:: python3 - - >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') - tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') - - :param name: - A time zone name (IANA, or, on Windows, Windows keys), location of - a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone - specifier. An empty string, no argument or ``None`` is interpreted - as local time. - - :return: - Returns an instance of one of ``dateutil``'s :py:class:`tzinfo` - subclasses. - - .. versionchanged:: 2.7.0 - - After version 2.7.0, any two calls to ``gettz`` using the same - input strings will return the same object: - - .. code-block:: python3 - - >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago') - True - - In addition to improving performance, this ensures that - `"same zone" semantics`_ are used for datetimes in the same zone. - - - .. _`TZ variable`: - https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html - - .. _`"same zone" semantics`: - https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html - """ - def __init__(self): - - self.__instances = weakref.WeakValueDictionary() - self.__strong_cache_size = 8 - self.__strong_cache = OrderedDict() - self._cache_lock = _thread.allocate_lock() - - def __call__(self, name=None): - with self._cache_lock: - rv = self.__instances.get(name, None) - - if rv is None: - rv = self.nocache(name=name) - if not (name is None - or isinstance(rv, tzlocal_classes) - or rv is None): - # tzlocal is slightly more complicated than the other - # time zone providers because it depends on environment - # at construction time, so don't cache that. - # - # We also cannot store weak references to None, so we - # will also not store that. - self.__instances[name] = rv - else: - # No need for strong caching, return immediately - return rv - - self.__strong_cache[name] = self.__strong_cache.pop(name, rv) - - if len(self.__strong_cache) > self.__strong_cache_size: - self.__strong_cache.popitem(last=False) - - return rv - - def set_cache_size(self, size): - with self._cache_lock: - self.__strong_cache_size = size - while len(self.__strong_cache) > size: - self.__strong_cache.popitem(last=False) - - def cache_clear(self): - with self._cache_lock: - self.__instances = weakref.WeakValueDictionary() - self.__strong_cache.clear() - - @staticmethod - def nocache(name=None): - """A non-cached version of gettz""" - tz = None - if not name: - try: - name = os.environ["TZ"] - except KeyError: - pass - if name is None or name == ":": - for filepath in TZFILES: - if not os.path.isabs(filepath): - filename = filepath - for path in TZPATHS: - filepath = os.path.join(path, filename) - if os.path.isfile(filepath): - break - else: - continue - if os.path.isfile(filepath): - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = tzlocal() - else: - if name.startswith(":"): - name = name[1:] - if os.path.isabs(name): - if os.path.isfile(name): - tz = tzfile(name) - else: - tz = None - else: - for path in TZPATHS: - filepath = os.path.join(path, name) - if not os.path.isfile(filepath): - filepath = filepath.replace(' ', '_') - if not os.path.isfile(filepath): - continue - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = None - if tzwin is not None: - try: - tz = tzwin(name) - except (WindowsError, UnicodeEncodeError): - # UnicodeEncodeError is for Python 2.7 compat - tz = None - - if not tz: - from dateutil.zoneinfo import get_zonefile_instance - tz = get_zonefile_instance().get(name) - - if not tz: - for c in name: - # name is not a tzstr unless it has at least - # one offset. For short values of "name", an - # explicit for loop seems to be the fastest way - # To determine if a string contains a digit - if c in "0123456789": - try: - tz = tzstr(name) - except ValueError: - pass - break - else: - if name in ("GMT", "UTC"): - tz = tzutc() - elif name in time.tzname: - tz = tzlocal() - return tz - - return GettzFunc() - - -gettz = __get_gettz() -del __get_gettz - - -def datetime_exists(dt, tz=None): - """ - Given a datetime and a time zone, determine whether or not a given datetime - would fall in a gap. - - :param dt: - A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` - is provided.) - - :param tz: - A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If - ``None`` or not provided, the datetime's own time zone will be used. - - :return: - Returns a boolean value whether or not the "wall time" exists in - ``tz``. - - .. versionadded:: 2.7.0 - """ - if tz is None: - if dt.tzinfo is None: - raise ValueError('Datetime is naive and no time zone provided.') - tz = dt.tzinfo - - dt = dt.replace(tzinfo=None) - - # This is essentially a test of whether or not the datetime can survive - # a round trip to UTC. - dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz) - dt_rt = dt_rt.replace(tzinfo=None) - - return dt == dt_rt - - -def datetime_ambiguous(dt, tz=None): - """ - Given a datetime and a time zone, determine whether or not a given datetime - is ambiguous (i.e if there are two times differentiated only by their DST - status). - - :param dt: - A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` - is provided.) - - :param tz: - A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If - ``None`` or not provided, the datetime's own time zone will be used. - - :return: - Returns a boolean value whether or not the "wall time" is ambiguous in - ``tz``. - - .. versionadded:: 2.6.0 - """ - if tz is None: - if dt.tzinfo is None: - raise ValueError('Datetime is naive and no time zone provided.') - - tz = dt.tzinfo - - # If a time zone defines its own "is_ambiguous" function, we'll use that. - is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) - if is_ambiguous_fn is not None: - try: - return tz.is_ambiguous(dt) - except Exception: - pass - - # If it doesn't come out and tell us it's ambiguous, we'll just check if - # the fold attribute has any effect on this particular date and time. - dt = dt.replace(tzinfo=tz) - wall_0 = enfold(dt, fold=0) - wall_1 = enfold(dt, fold=1) - - same_offset = wall_0.utcoffset() == wall_1.utcoffset() - same_dst = wall_0.dst() == wall_1.dst() - - return not (same_offset and same_dst) - - -def resolve_imaginary(dt): - """ - Given a datetime that may be imaginary, return an existing datetime. - - This function assumes that an imaginary datetime represents what the - wall time would be in a zone had the offset transition not occurred, so - it will always fall forward by the transition's change in offset. - - .. doctest:: - - >>> from dateutil import tz - >>> from datetime import datetime - >>> NYC = tz.gettz('America/New_York') - >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC))) - 2017-03-12 03:30:00-04:00 - - >>> KIR = tz.gettz('Pacific/Kiritimati') - >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR))) - 1995-01-02 12:30:00+14:00 - - As a note, :func:`datetime.astimezone` is guaranteed to produce a valid, - existing datetime, so a round-trip to and from UTC is sufficient to get - an extant datetime, however, this generally "falls back" to an earlier time - rather than falling forward to the STD side (though no guarantees are made - about this behavior). - - :param dt: - A :class:`datetime.datetime` which may or may not exist. - - :return: - Returns an existing :class:`datetime.datetime`. If ``dt`` was not - imaginary, the datetime returned is guaranteed to be the same object - passed to the function. - - .. versionadded:: 2.7.0 - """ - if dt.tzinfo is not None and not datetime_exists(dt): - - curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset() - old_offset = (dt - datetime.timedelta(hours=24)).utcoffset() - - dt += curr_offset - old_offset - - return dt - - -def _datetime_to_timestamp(dt): - """ - Convert a :class:`datetime.datetime` object to an epoch timestamp in - seconds since January 1, 1970, ignoring the time zone. - """ - return (dt.replace(tzinfo=None) - EPOCH).total_seconds() - - -if sys.version_info >= (3, 6): - def _get_supported_offset(second_offset): - return second_offset -else: - def _get_supported_offset(second_offset): - # For python pre-3.6, round to full-minutes if that's not the case. - # Python's datetime doesn't accept sub-minute timezones. Check - # http://python.org/sf/1447945 or https://bugs.python.org/issue5288 - # for some information. - old_offset = second_offset - calculated_offset = 60 * ((second_offset + 30) // 60) - return calculated_offset - - -try: - # Python 3.7 feature - from contextmanager import nullcontext as _nullcontext -except ImportError: - class _nullcontext(object): - """ - Class for wrapping contexts so that they are passed through in a - with statement. - """ - def __init__(self, context): - self.context = context - - def __enter__(self): - return self.context - - def __exit__(*args, **kwargs): - pass - -# vim:ts=4:sw=4:et diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/win.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/win.py deleted file mode 100644 index cde07ba..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tz/win.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module provides an interface to the native time zone data on Windows, -including :py:class:`datetime.tzinfo` implementations. - -Attempting to import this module on a non-Windows platform will raise an -:py:obj:`ImportError`. -""" -# This code was originally contributed by Jeffrey Harris. -import datetime -import struct - -from six.moves import winreg -from six import text_type - -try: - import ctypes - from ctypes import wintypes -except ValueError: - # ValueError is raised on non-Windows systems for some horrible reason. - raise ImportError("Running tzwin on non-Windows system") - -from ._common import tzrangebase - -__all__ = ["tzwin", "tzwinlocal", "tzres"] - -ONEWEEK = datetime.timedelta(7) - -TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" -TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" -TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" - - -def _settzkeyname(): - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - try: - winreg.OpenKey(handle, TZKEYNAMENT).Close() - TZKEYNAME = TZKEYNAMENT - except WindowsError: - TZKEYNAME = TZKEYNAME9X - handle.Close() - return TZKEYNAME - - -TZKEYNAME = _settzkeyname() - - -class tzres(object): - """ - Class for accessing ``tzres.dll``, which contains timezone name related - resources. - - .. versionadded:: 2.5.0 - """ - p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char - - def __init__(self, tzres_loc='tzres.dll'): - # Load the user32 DLL so we can load strings from tzres - user32 = ctypes.WinDLL('user32') - - # Specify the LoadStringW function - user32.LoadStringW.argtypes = (wintypes.HINSTANCE, - wintypes.UINT, - wintypes.LPWSTR, - ctypes.c_int) - - self.LoadStringW = user32.LoadStringW - self._tzres = ctypes.WinDLL(tzres_loc) - self.tzres_loc = tzres_loc - - def load_name(self, offset): - """ - Load a timezone name from a DLL offset (integer). - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.load_name(112)) - 'Eastern Standard Time' - - :param offset: - A positive integer value referring to a string from the tzres dll. - - .. note:: - - Offsets found in the registry are generally of the form - ``@tzres.dll,-114``. The offset in this case is 114, not -114. - - """ - resource = self.p_wchar() - lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) - nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) - return resource[:nchar] - - def name_from_string(self, tzname_str): - """ - Parse strings as returned from the Windows registry into the time zone - name as defined in the registry. - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.name_from_string('@tzres.dll,-251')) - 'Dateline Daylight Time' - >>> print(tzr.name_from_string('Eastern Standard Time')) - 'Eastern Standard Time' - - :param tzname_str: - A timezone name string as returned from a Windows registry key. - - :return: - Returns the localized timezone string from tzres.dll if the string - is of the form `@tzres.dll,-offset`, else returns the input string. - """ - if not tzname_str.startswith('@'): - return tzname_str - - name_splt = tzname_str.split(',-') - try: - offset = int(name_splt[1]) - except: - raise ValueError("Malformed timezone string.") - - return self.load_name(offset) - - -class tzwinbase(tzrangebase): - """tzinfo class based on win32's timezones available in the registry.""" - def __init__(self): - raise NotImplementedError('tzwinbase is an abstract base class') - - def __eq__(self, other): - # Compare on all relevant dimensions, including name. - if not isinstance(other, tzwinbase): - return NotImplemented - - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._stddayofweek == other._stddayofweek and - self._dstdayofweek == other._dstdayofweek and - self._stdweeknumber == other._stdweeknumber and - self._dstweeknumber == other._dstweeknumber and - self._stdhour == other._stdhour and - self._dsthour == other._dsthour and - self._stdminute == other._stdminute and - self._dstminute == other._dstminute and - self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr) - - @staticmethod - def list(): - """Return a list of all time zones known to the system.""" - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZKEYNAME) as tzkey: - result = [winreg.EnumKey(tzkey, i) - for i in range(winreg.QueryInfoKey(tzkey)[0])] - return result - - def display(self): - """ - Return the display name of the time zone. - """ - return self._display - - def transitions(self, year): - """ - For a given year, get the DST on and off transition times, expressed - always on the standard time side. For zones with no transitions, this - function returns ``None``. - - :param year: - The year whose transitions you would like to query. - - :return: - Returns a :class:`tuple` of :class:`datetime.datetime` objects, - ``(dston, dstoff)`` for zones with an annual DST transition, or - ``None`` for fixed offset zones. - """ - - if not self.hasdst: - return None - - dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, - self._dsthour, self._dstminute, - self._dstweeknumber) - - dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, - self._stdhour, self._stdminute, - self._stdweeknumber) - - # Ambiguous dates default to the STD side - dstoff -= self._dst_base_offset - - return dston, dstoff - - def _get_hasdst(self): - return self._dstmonth != 0 - - @property - def _dst_base_offset(self): - return self._dst_base_offset_ - - -class tzwin(tzwinbase): - """ - Time zone object created from the zone info in the Windows registry - - These are similar to :py:class:`dateutil.tz.tzrange` objects in that - the time zone data is provided in the format of a single offset rule - for either 0 or 2 time zone transitions per year. - - :param: name - The name of a Windows time zone key, e.g. "Eastern Standard Time". - The full list of keys can be retrieved with :func:`tzwin.list`. - """ - - def __init__(self, name): - self._name = name - - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - keydict = valuestodict(tzkey) - - self._std_abbr = keydict["Std"] - self._dst_abbr = keydict["Dlt"] - - self._display = keydict["Display"] - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=3l16h", keydict["TZI"]) - stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 - dstoffset = stdoffset-tup[2] # + DaylightBias * -1 - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs - # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[4:9] - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[12:17] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwin(%s)" % repr(self._name) - - def __reduce__(self): - return (self.__class__, (self._name,)) - - -class tzwinlocal(tzwinbase): - """ - Class representing the local time zone information in the Windows registry - - While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time` - module) to retrieve time zone information, ``tzwinlocal`` retrieves the - rules directly from the Windows registry and creates an object like - :class:`dateutil.tz.tzwin`. - - Because Windows does not have an equivalent of :func:`time.tzset`, on - Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the - time zone settings *at the time that the process was started*, meaning - changes to the machine's time zone settings during the run of a program - on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`. - Because ``tzwinlocal`` reads the registry directly, it is unaffected by - this issue. - """ - def __init__(self): - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: - keydict = valuestodict(tzlocalkey) - - self._std_abbr = keydict["StandardName"] - self._dst_abbr = keydict["DaylightName"] - - try: - tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, - sn=self._std_abbr) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - _keydict = valuestodict(tzkey) - self._display = _keydict["Display"] - except OSError: - self._display = None - - stdoffset = -keydict["Bias"]-keydict["StandardBias"] - dstoffset = stdoffset-keydict["DaylightBias"] - - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # For reasons unclear, in this particular key, the day of week has been - # moved to the END of the SYSTEMTIME structure. - tup = struct.unpack("=8h", keydict["StandardStart"]) - - (self._stdmonth, - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[1:5] - - self._stddayofweek = tup[7] - - tup = struct.unpack("=8h", keydict["DaylightStart"]) - - (self._dstmonth, - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[1:5] - - self._dstdayofweek = tup[7] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwinlocal()" - - def __str__(self): - # str will return the standard name, not the daylight name. - return "tzwinlocal(%s)" % repr(self._std_abbr) - - def __reduce__(self): - return (self.__class__, ()) - - -def picknthweekday(year, month, dayofweek, hour, minute, whichweek): - """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ - first = datetime.datetime(year, month, 1, hour, minute) - - # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), - # Because 7 % 7 = 0 - weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) - wd = weekdayone + ((whichweek - 1) * ONEWEEK) - if (wd.month != month): - wd -= ONEWEEK - - return wd - - -def valuestodict(key): - """Convert a registry key's values to a dictionary.""" - dout = {} - size = winreg.QueryInfoKey(key)[1] - tz_res = None - - for i in range(size): - key_name, value, dtype = winreg.EnumValue(key, i) - if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: - # If it's a DWORD (32-bit integer), it's stored as unsigned - convert - # that to a proper signed integer - if value & (1 << 31): - value = value - (1 << 32) - elif dtype == winreg.REG_SZ: - # If it's a reference to the tzres DLL, load the actual string - if value.startswith('@tzres'): - tz_res = tz_res or tzres() - value = tz_res.name_from_string(value) - - value = value.rstrip('\x00') # Remove trailing nulls - - dout[key_name] = value - - return dout diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tzwin.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/tzwin.py deleted file mode 100644 index cebc673..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/tzwin.py +++ /dev/null @@ -1,2 +0,0 @@ -# tzwin has moved to dateutil.tz.win -from .tz.win import * diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/utils.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/utils.py deleted file mode 100644 index ebcce6a..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/utils.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers general convenience and utility functions for dealing with -datetimes. - -.. versionadded:: 2.7.0 -""" -from __future__ import unicode_literals - -from datetime import datetime, time - - -def today(tzinfo=None): - """ - Returns a :py:class:`datetime` representing the current day at midnight - - :param tzinfo: - The time zone to attach (also used to determine the current day). - - :return: - A :py:class:`datetime.datetime` object representing the current day - at midnight. - """ - - dt = datetime.now(tzinfo) - return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) - - -def default_tzinfo(dt, tzinfo): - """ - Sets the the ``tzinfo`` parameter on naive datetimes only - - This is useful for example when you are provided a datetime that may have - either an implicit or explicit time zone, such as when parsing a time zone - string. - - .. doctest:: - - >>> from dateutil.tz import tzoffset - >>> from dateutil.parser import parse - >>> from dateutil.utils import default_tzinfo - >>> dflt_tz = tzoffset("EST", -18000) - >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) - 2014-01-01 12:30:00+00:00 - >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) - 2014-01-01 12:30:00-05:00 - - :param dt: - The datetime on which to replace the time zone - - :param tzinfo: - The :py:class:`datetime.tzinfo` subclass instance to assign to - ``dt`` if (and only if) it is naive. - - :return: - Returns an aware :py:class:`datetime.datetime`. - """ - if dt.tzinfo is not None: - return dt - else: - return dt.replace(tzinfo=tzinfo) - - -def within_delta(dt1, dt2, delta): - """ - Useful for comparing two datetimes that may a negilible difference - to be considered equal. - """ - delta = abs(delta) - difference = dt1 - dt2 - return -delta <= difference <= delta diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/zoneinfo/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/zoneinfo/__init__.py deleted file mode 100644 index 34f11ad..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/zoneinfo/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -import warnings -import json - -from tarfile import TarFile -from pkgutil import get_data -from io import BytesIO - -from dateutil.tz import tzfile as _tzfile - -__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] - -ZONEFILENAME = "dateutil-zoneinfo.tar.gz" -METADATA_FN = 'METADATA' - - -class tzfile(_tzfile): - def __reduce__(self): - return (gettz, (self._filename,)) - - -def getzoneinfofile_stream(): - try: - return BytesIO(get_data(__name__, ZONEFILENAME)) - except IOError as e: # TODO switch to FileNotFoundError? - warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) - return None - - -class ZoneInfoFile(object): - def __init__(self, zonefile_stream=None): - if zonefile_stream is not None: - with TarFile.open(fileobj=zonefile_stream) as tf: - self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name) - for zf in tf.getmembers() - if zf.isfile() and zf.name != METADATA_FN} - # deal with links: They'll point to their parent object. Less - # waste of memory - links = {zl.name: self.zones[zl.linkname] - for zl in tf.getmembers() if - zl.islnk() or zl.issym()} - self.zones.update(links) - try: - metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) - metadata_str = metadata_json.read().decode('UTF-8') - self.metadata = json.loads(metadata_str) - except KeyError: - # no metadata in tar file - self.metadata = None - else: - self.zones = {} - self.metadata = None - - def get(self, name, default=None): - """ - Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method - for retrieving zones from the zone dictionary. - - :param name: - The name of the zone to retrieve. (Generally IANA zone names) - - :param default: - The value to return in the event of a missing key. - - .. versionadded:: 2.6.0 - - """ - return self.zones.get(name, default) - - -# The current API has gettz as a module function, although in fact it taps into -# a stateful class. So as a workaround for now, without changing the API, we -# will create a new "global" class instance the first time a user requests a -# timezone. Ugly, but adheres to the api. -# -# TODO: Remove after deprecation period. -_CLASS_ZONE_INSTANCE = [] - - -def get_zonefile_instance(new_instance=False): - """ - This is a convenience function which provides a :class:`ZoneInfoFile` - instance using the data provided by the ``dateutil`` package. By default, it - caches a single instance of the ZoneInfoFile object and returns that. - - :param new_instance: - If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and - used as the cached instance for the next call. Otherwise, new instances - are created only as necessary. - - :return: - Returns a :class:`ZoneInfoFile` object. - - .. versionadded:: 2.6 - """ - if new_instance: - zif = None - else: - zif = getattr(get_zonefile_instance, '_cached_instance', None) - - if zif is None: - zif = ZoneInfoFile(getzoneinfofile_stream()) - - get_zonefile_instance._cached_instance = zif - - return zif - - -def gettz(name): - """ - This retrieves a time zone from the local zoneinfo tarball that is packaged - with dateutil. - - :param name: - An IANA-style time zone name, as found in the zoneinfo file. - - :return: - Returns a :class:`dateutil.tz.tzfile` time zone object. - - .. warning:: - It is generally inadvisable to use this function, and it is only - provided for API compatibility with earlier versions. This is *not* - equivalent to ``dateutil.tz.gettz()``, which selects an appropriate - time zone based on the inputs, favoring system zoneinfo. This is ONLY - for accessing the dateutil-specific zoneinfo (which may be out of - date compared to the system zoneinfo). - - .. deprecated:: 2.6 - If you need to use a specific zoneinfofile over the system zoneinfo, - instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call - :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. - - Use :func:`get_zonefile_instance` to retrieve an instance of the - dateutil-provided zoneinfo. - """ - warnings.warn("zoneinfo.gettz() will be removed in future versions, " - "to use the dateutil-provided zoneinfo files, instantiate a " - "ZoneInfoFile object and use ZoneInfoFile.zones.get() " - "instead. See the documentation for details.", - DeprecationWarning) - - if len(_CLASS_ZONE_INSTANCE) == 0: - _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) - return _CLASS_ZONE_INSTANCE[0].zones.get(name) - - -def gettz_db_metadata(): - """ Get the zonefile metadata - - See `zonefile_metadata`_ - - :returns: - A dictionary with the database metadata - - .. deprecated:: 2.6 - See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, - query the attribute ``zoneinfo.ZoneInfoFile.metadata``. - """ - warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " - "versions, to use the dateutil-provided zoneinfo files, " - "ZoneInfoFile object and query the 'metadata' attribute " - "instead. See the documentation for details.", - DeprecationWarning) - - if len(_CLASS_ZONE_INSTANCE) == 0: - _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) - return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/flo-token-explorer/lib/python3.6/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz deleted file mode 100644 index 124f3e1..0000000 Binary files a/flo-token-explorer/lib/python3.6/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz and /dev/null differ diff --git a/flo-token-explorer/lib/python3.6/site-packages/dateutil/zoneinfo/rebuild.py b/flo-token-explorer/lib/python3.6/site-packages/dateutil/zoneinfo/rebuild.py deleted file mode 100644 index 78f0d1a..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/dateutil/zoneinfo/rebuild.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -import os -import tempfile -import shutil -import json -from subprocess import check_call -from tarfile import TarFile - -from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME - - -def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): - """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* - - filename is the timezone tarball from ``ftp.iana.org/tz``. - - """ - tmpdir = tempfile.mkdtemp() - zonedir = os.path.join(tmpdir, "zoneinfo") - moduledir = os.path.dirname(__file__) - try: - with TarFile.open(filename) as tf: - for name in zonegroups: - tf.extract(name, tmpdir) - filepaths = [os.path.join(tmpdir, n) for n in zonegroups] - try: - check_call(["zic", "-d", zonedir] + filepaths) - except OSError as e: - _print_on_nosuchfile(e) - raise - # write metadata file - with open(os.path.join(zonedir, METADATA_FN), 'w') as f: - json.dump(metadata, f, indent=4, sort_keys=True) - target = os.path.join(moduledir, ZONEFILENAME) - with TarFile.open(target, "w:%s" % format) as tf: - for entry in os.listdir(zonedir): - entrypath = os.path.join(zonedir, entry) - tf.add(entrypath, entry) - finally: - shutil.rmtree(tmpdir) - - -def _print_on_nosuchfile(e): - """Print helpful troubleshooting message - - e is an exception raised by subprocess.check_call() - - """ - if e.errno == 2: - logging.error( - "Could not find zic. Perhaps you need to install " - "libc-bin or some other package that provides it, " - "or it's not in your PATH?") diff --git a/flo-token-explorer/lib/python3.6/site-packages/easy_install.py b/flo-token-explorer/lib/python3.6/site-packages/easy_install.py deleted file mode 100644 index d87e984..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/easy_install.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Run the EasyInstall command""" - -if __name__ == '__main__': - from setuptools.command.easy_install import main - main() diff --git a/flo-token-explorer/lib/python3.6/site-packages/editor.py b/flo-token-explorer/lib/python3.6/site-packages/editor.py deleted file mode 100755 index 6fc73f1..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/editor.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python -"""Tools for invoking editors programmatically.""" - -from __future__ import print_function - -import sys -import locale -import os.path -import subprocess -import tempfile -from distutils.spawn import find_executable - - -__all__ = [ - 'edit', - 'get_editor', - 'EditorError', -] - -__version__ = '1.0.4' - - -class EditorError(RuntimeError): - pass - - -def get_default_editors(): - # TODO: Make platform-specific - return [ - 'editor', - 'vim', - 'emacs', - 'nano', - ] - - -def get_editor_args(editor): - if editor in ['vim', 'gvim', 'vim.basic', 'vim.tiny']: - return ['-f', '-o'] - - elif editor == 'emacs': - return ['-nw'] - - elif editor == 'gedit': - return ['-w', '--new-window'] - - elif editor == 'nano': - return ['-R'] - - else: - return [] - - -def get_editor(): - # Get the editor from the environment. Prefer VISUAL to EDITOR - editor = os.environ.get('VISUAL') or os.environ.get('EDITOR') - if editor: - return editor - - # None found in the environment. Fallback to platform-specific defaults. - for ed in get_default_editors(): - path = find_executable(ed) - if path is not None: - return path - - raise EditorError("Unable to find a viable editor on this system." - "Please consider setting your $EDITOR variable") - - -def get_tty_filename(): - if sys.platform == 'win32': - return 'CON:' - return '/dev/tty' - - -def edit(filename=None, contents=None, use_tty=None, suffix=''): - editor = get_editor() - args = [editor] + get_editor_args(os.path.basename(os.path.realpath(editor))) - - if use_tty is None: - use_tty = sys.stdin.isatty() and not sys.stdout.isatty() - - if filename is None: - tmp = tempfile.NamedTemporaryFile(suffix=suffix) - filename = tmp.name - - if contents is not None: - # For python3 only. If str is passed instead of bytes, encode default - if hasattr(contents, 'encode'): - contents = contents.encode() - - with open(filename, mode='wb') as f: - f.write(contents) - - args += [filename] - - stdout = None - if use_tty: - stdout = open(get_tty_filename(), 'wb') - - proc = subprocess.Popen(args, close_fds=True, stdout=stdout) - proc.communicate() - - with open(filename, mode='rb') as f: - return f.read() - - -def _get_editor(ns): - print(get_editor()) - - -def _edit(ns): - contents = ns.contents - if contents is not None: - contents = contents.encode(locale.getpreferredencoding()) - print(edit(filename=ns.path, contents=contents)) - - -if __name__ == '__main__': - import argparse - ap = argparse.ArgumentParser() - sp = ap.add_subparsers() - - cmd = sp.add_parser('get-editor') - cmd.set_defaults(cmd=_get_editor) - - cmd = sp.add_parser('edit') - cmd.set_defaults(cmd=_edit) - cmd.add_argument('path', type=str, nargs='?') - cmd.add_argument('--contents', type=str) - - ns = ap.parse_args() - ns.cmd(ns) diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/flask/__init__.py deleted file mode 100644 index ded1982..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask - ~~~~~ - - A microframework based on Werkzeug. It's extensively documented - and follows best practice patterns. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -__version__ = '1.0.2' - -# utilities we import from Werkzeug and Jinja2 that are unused -# in the module but are exported as public interface. -from werkzeug.exceptions import abort -from werkzeug.utils import redirect -from jinja2 import Markup, escape - -from .app import Flask, Request, Response -from .config import Config -from .helpers import url_for, flash, send_file, send_from_directory, \ - get_flashed_messages, get_template_attribute, make_response, safe_join, \ - stream_with_context -from .globals import current_app, g, request, session, _request_ctx_stack, \ - _app_ctx_stack -from .ctx import has_request_context, has_app_context, \ - after_this_request, copy_current_request_context -from .blueprints import Blueprint -from .templating import render_template, render_template_string - -# the signals -from .signals import signals_available, template_rendered, request_started, \ - request_finished, got_request_exception, request_tearing_down, \ - appcontext_tearing_down, appcontext_pushed, \ - appcontext_popped, message_flashed, before_render_template - -# We're not exposing the actual json module but a convenient wrapper around -# it. -from . import json - -# This was the only thing that Flask used to export at one point and it had -# a more generic name. -jsonify = json.jsonify - -# backwards compat, goes away in 1.0 -from .sessions import SecureCookieSession as Session -json_available = True diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/__main__.py b/flo-token-explorer/lib/python3.6/site-packages/flask/__main__.py deleted file mode 100644 index 4aee654..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/__main__.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.__main__ - ~~~~~~~~~~~~~~ - - Alias for flask.run for the command line. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -if __name__ == '__main__': - from .cli import main - main(as_module=True) diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/_compat.py b/flo-token-explorer/lib/python3.6/site-packages/flask/_compat.py deleted file mode 100644 index a3b5b9c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/_compat.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask._compat - ~~~~~~~~~~~~~ - - Some py2/py3 compatibility support based on a stripped down - version of six so we don't have to depend on a specific version - of it. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import sys - -PY2 = sys.version_info[0] == 2 -_identity = lambda x: x - - -if not PY2: - text_type = str - string_types = (str,) - integer_types = (int,) - - iterkeys = lambda d: iter(d.keys()) - itervalues = lambda d: iter(d.values()) - iteritems = lambda d: iter(d.items()) - - from inspect import getfullargspec as getargspec - from io import StringIO - - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - implements_to_string = _identity - -else: - text_type = unicode - string_types = (str, unicode) - integer_types = (int, long) - - iterkeys = lambda d: d.iterkeys() - itervalues = lambda d: d.itervalues() - iteritems = lambda d: d.iteritems() - - from inspect import getargspec - from cStringIO import StringIO - - exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') - - def implements_to_string(cls): - cls.__unicode__ = cls.__str__ - cls.__str__ = lambda x: x.__unicode__().encode('utf-8') - return cls - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a - # dummy metaclass for one level of class instantiation that replaces - # itself with the actual metaclass. - class metaclass(type): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -# Certain versions of pypy have a bug where clearing the exception stack -# breaks the __exit__ function in a very peculiar way. The second level of -# exception blocks is necessary because pypy seems to forget to check if an -# exception happened until the next bytecode instruction? -# -# Relevant PyPy bugfix commit: -# https://bitbucket.org/pypy/pypy/commits/77ecf91c635a287e88e60d8ddb0f4e9df4003301 -# According to ronan on #pypy IRC, it is released in PyPy2 2.3 and later -# versions. -# -# Ubuntu 14.04 has PyPy 2.2.1, which does exhibit this bug. -BROKEN_PYPY_CTXMGR_EXIT = False -if hasattr(sys, 'pypy_version_info'): - class _Mgr(object): - def __enter__(self): - return self - def __exit__(self, *args): - if hasattr(sys, 'exc_clear'): - # Python 3 (PyPy3) doesn't have exc_clear - sys.exc_clear() - try: - try: - with _Mgr(): - raise AssertionError() - except: - raise - except TypeError: - BROKEN_PYPY_CTXMGR_EXIT = True - except AssertionError: - pass diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/app.py b/flo-token-explorer/lib/python3.6/site-packages/flask/app.py deleted file mode 100644 index 87c5900..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/app.py +++ /dev/null @@ -1,2315 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.app - ~~~~~~~~~ - - This module implements the central WSGI application object. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import os -import sys -import warnings -from datetime import timedelta -from functools import update_wrapper -from itertools import chain -from threading import Lock - -from werkzeug.datastructures import Headers, ImmutableDict -from werkzeug.exceptions import BadRequest, BadRequestKeyError, HTTPException, \ - InternalServerError, MethodNotAllowed, default_exceptions -from werkzeug.routing import BuildError, Map, RequestRedirect, Rule - -from . import cli, json -from ._compat import integer_types, reraise, string_types, text_type -from .config import Config, ConfigAttribute -from .ctx import AppContext, RequestContext, _AppCtxGlobals -from .globals import _request_ctx_stack, g, request, session -from .helpers import ( - _PackageBoundObject, - _endpoint_from_view_func, find_package, get_env, get_debug_flag, - get_flashed_messages, locked_cached_property, url_for, get_load_dotenv -) -from .logging import create_logger -from .sessions import SecureCookieSessionInterface -from .signals import appcontext_tearing_down, got_request_exception, \ - request_finished, request_started, request_tearing_down -from .templating import DispatchingJinjaLoader, Environment, \ - _default_template_ctx_processor -from .wrappers import Request, Response - -# a singleton sentinel value for parameter defaults -_sentinel = object() - - -def _make_timedelta(value): - if not isinstance(value, timedelta): - return timedelta(seconds=value) - return value - - -def setupmethod(f): - """Wraps a method so that it performs a check in debug mode if the - first request was already handled. - """ - def wrapper_func(self, *args, **kwargs): - if self.debug and self._got_first_request: - raise AssertionError('A setup function was called after the ' - 'first request was handled. This usually indicates a bug ' - 'in the application where a module was not imported ' - 'and decorators or other functionality was called too late.\n' - 'To fix this make sure to import all your view modules, ' - 'database models and everything related at a central place ' - 'before the application starts serving requests.') - return f(self, *args, **kwargs) - return update_wrapper(wrapper_func, f) - - -class Flask(_PackageBoundObject): - """The flask object implements a WSGI application and acts as the central - object. It is passed the name of the module or package of the - application. Once it is created it will act as a central registry for - the view functions, the URL rules, template configuration and much more. - - The name of the package is used to resolve resources from inside the - package or the folder the module is contained in depending on if the - package parameter resolves to an actual python package (a folder with - an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file). - - For more information about resource loading, see :func:`open_resource`. - - Usually you create a :class:`Flask` instance in your main module or - in the :file:`__init__.py` file of your package like this:: - - from flask import Flask - app = Flask(__name__) - - .. admonition:: About the First Parameter - - The idea of the first parameter is to give Flask an idea of what - belongs to your application. This name is used to find resources - on the filesystem, can be used by extensions to improve debugging - information and a lot more. - - So it's important what you provide there. If you are using a single - module, `__name__` is always the correct value. If you however are - using a package, it's usually recommended to hardcode the name of - your package there. - - For example if your application is defined in :file:`yourapplication/app.py` - you should create it with one of the two versions below:: - - app = Flask('yourapplication') - app = Flask(__name__.split('.')[0]) - - Why is that? The application will work even with `__name__`, thanks - to how resources are looked up. However it will make debugging more - painful. Certain extensions can make assumptions based on the - import name of your application. For example the Flask-SQLAlchemy - extension will look for the code in your application that triggered - an SQL query in debug mode. If the import name is not properly set - up, that debugging information is lost. (For example it would only - pick up SQL queries in `yourapplication.app` and not - `yourapplication.views.frontend`) - - .. versionadded:: 0.7 - The `static_url_path`, `static_folder`, and `template_folder` - parameters were added. - - .. versionadded:: 0.8 - The `instance_path` and `instance_relative_config` parameters were - added. - - .. versionadded:: 0.11 - The `root_path` parameter was added. - - .. versionadded:: 1.0 - The ``host_matching`` and ``static_host`` parameters were added. - - .. versionadded:: 1.0 - The ``subdomain_matching`` parameter was added. Subdomain - matching needs to be enabled manually now. Setting - :data:`SERVER_NAME` does not implicitly enable it. - - :param import_name: the name of the application package - :param static_url_path: can be used to specify a different path for the - static files on the web. Defaults to the name - of the `static_folder` folder. - :param static_folder: the folder with static files that should be served - at `static_url_path`. Defaults to the ``'static'`` - folder in the root path of the application. - :param static_host: the host to use when adding the static route. - Defaults to None. Required when using ``host_matching=True`` - with a ``static_folder`` configured. - :param host_matching: set ``url_map.host_matching`` attribute. - Defaults to False. - :param subdomain_matching: consider the subdomain relative to - :data:`SERVER_NAME` when matching routes. Defaults to False. - :param template_folder: the folder that contains the templates that should - be used by the application. Defaults to - ``'templates'`` folder in the root path of the - application. - :param instance_path: An alternative instance path for the application. - By default the folder ``'instance'`` next to the - package or module is assumed to be the instance - path. - :param instance_relative_config: if set to ``True`` relative filenames - for loading the config are assumed to - be relative to the instance path instead - of the application root. - :param root_path: Flask by default will automatically calculate the path - to the root of the application. In certain situations - this cannot be achieved (for instance if the package - is a Python 3 namespace package) and needs to be - manually defined. - """ - - #: The class that is used for request objects. See :class:`~flask.Request` - #: for more information. - request_class = Request - - #: The class that is used for response objects. See - #: :class:`~flask.Response` for more information. - response_class = Response - - #: The class that is used for the Jinja environment. - #: - #: .. versionadded:: 0.11 - jinja_environment = Environment - - #: The class that is used for the :data:`~flask.g` instance. - #: - #: Example use cases for a custom class: - #: - #: 1. Store arbitrary attributes on flask.g. - #: 2. Add a property for lazy per-request database connectors. - #: 3. Return None instead of AttributeError on unexpected attributes. - #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g. - #: - #: In Flask 0.9 this property was called `request_globals_class` but it - #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the - #: flask.g object is now application context scoped. - #: - #: .. versionadded:: 0.10 - app_ctx_globals_class = _AppCtxGlobals - - #: The class that is used for the ``config`` attribute of this app. - #: Defaults to :class:`~flask.Config`. - #: - #: Example use cases for a custom class: - #: - #: 1. Default values for certain config options. - #: 2. Access to config values through attributes in addition to keys. - #: - #: .. versionadded:: 0.11 - config_class = Config - - #: The testing flag. Set this to ``True`` to enable the test mode of - #: Flask extensions (and in the future probably also Flask itself). - #: For example this might activate test helpers that have an - #: additional runtime cost which should not be enabled by default. - #: - #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the - #: default it's implicitly enabled. - #: - #: This attribute can also be configured from the config with the - #: ``TESTING`` configuration key. Defaults to ``False``. - testing = ConfigAttribute('TESTING') - - #: If a secret key is set, cryptographic components can use this to - #: sign cookies and other things. Set this to a complex random value - #: when you want to use the secure cookie for instance. - #: - #: This attribute can also be configured from the config with the - #: :data:`SECRET_KEY` configuration key. Defaults to ``None``. - secret_key = ConfigAttribute('SECRET_KEY') - - #: The secure cookie uses this for the name of the session cookie. - #: - #: This attribute can also be configured from the config with the - #: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'`` - session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME') - - #: A :class:`~datetime.timedelta` which is used to set the expiration - #: date of a permanent session. The default is 31 days which makes a - #: permanent session survive for roughly one month. - #: - #: This attribute can also be configured from the config with the - #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to - #: ``timedelta(days=31)`` - permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME', - get_converter=_make_timedelta) - - #: A :class:`~datetime.timedelta` which is used as default cache_timeout - #: for the :func:`send_file` functions. The default is 12 hours. - #: - #: This attribute can also be configured from the config with the - #: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration - #: variable can also be set with an integer value used as seconds. - #: Defaults to ``timedelta(hours=12)`` - send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT', - get_converter=_make_timedelta) - - #: Enable this if you want to use the X-Sendfile feature. Keep in - #: mind that the server has to support this. This only affects files - #: sent with the :func:`send_file` method. - #: - #: .. versionadded:: 0.2 - #: - #: This attribute can also be configured from the config with the - #: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``. - use_x_sendfile = ConfigAttribute('USE_X_SENDFILE') - - #: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`. - #: - #: .. versionadded:: 0.10 - json_encoder = json.JSONEncoder - - #: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`. - #: - #: .. versionadded:: 0.10 - json_decoder = json.JSONDecoder - - #: Options that are passed directly to the Jinja2 environment. - jinja_options = ImmutableDict( - extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_'] - ) - - #: Default configuration parameters. - default_config = ImmutableDict({ - 'ENV': None, - 'DEBUG': None, - 'TESTING': False, - 'PROPAGATE_EXCEPTIONS': None, - 'PRESERVE_CONTEXT_ON_EXCEPTION': None, - 'SECRET_KEY': None, - 'PERMANENT_SESSION_LIFETIME': timedelta(days=31), - 'USE_X_SENDFILE': False, - 'SERVER_NAME': None, - 'APPLICATION_ROOT': '/', - 'SESSION_COOKIE_NAME': 'session', - 'SESSION_COOKIE_DOMAIN': None, - 'SESSION_COOKIE_PATH': None, - 'SESSION_COOKIE_HTTPONLY': True, - 'SESSION_COOKIE_SECURE': False, - 'SESSION_COOKIE_SAMESITE': None, - 'SESSION_REFRESH_EACH_REQUEST': True, - 'MAX_CONTENT_LENGTH': None, - 'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12), - 'TRAP_BAD_REQUEST_ERRORS': None, - 'TRAP_HTTP_EXCEPTIONS': False, - 'EXPLAIN_TEMPLATE_LOADING': False, - 'PREFERRED_URL_SCHEME': 'http', - 'JSON_AS_ASCII': True, - 'JSON_SORT_KEYS': True, - 'JSONIFY_PRETTYPRINT_REGULAR': False, - 'JSONIFY_MIMETYPE': 'application/json', - 'TEMPLATES_AUTO_RELOAD': None, - 'MAX_COOKIE_SIZE': 4093, - }) - - #: The rule object to use for URL rules created. This is used by - #: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`. - #: - #: .. versionadded:: 0.7 - url_rule_class = Rule - - #: the test client that is used with when `test_client` is used. - #: - #: .. versionadded:: 0.7 - test_client_class = None - - #: The :class:`~click.testing.CliRunner` subclass, by default - #: :class:`~flask.testing.FlaskCliRunner` that is used by - #: :meth:`test_cli_runner`. Its ``__init__`` method should take a - #: Flask app object as the first argument. - #: - #: .. versionadded:: 1.0 - test_cli_runner_class = None - - #: the session interface to use. By default an instance of - #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here. - #: - #: .. versionadded:: 0.8 - session_interface = SecureCookieSessionInterface() - - # TODO remove the next three attrs when Sphinx :inherited-members: works - # https://github.com/sphinx-doc/sphinx/issues/741 - - #: The name of the package or module that this app belongs to. Do not - #: change this once it is set by the constructor. - import_name = None - - #: Location of the template files to be added to the template lookup. - #: ``None`` if templates should not be added. - template_folder = None - - #: Absolute path to the package on the filesystem. Used to look up - #: resources contained in the package. - root_path = None - - def __init__( - self, - import_name, - static_url_path=None, - static_folder='static', - static_host=None, - host_matching=False, - subdomain_matching=False, - template_folder='templates', - instance_path=None, - instance_relative_config=False, - root_path=None - ): - _PackageBoundObject.__init__( - self, - import_name, - template_folder=template_folder, - root_path=root_path - ) - - if static_url_path is not None: - self.static_url_path = static_url_path - - if static_folder is not None: - self.static_folder = static_folder - - if instance_path is None: - instance_path = self.auto_find_instance_path() - elif not os.path.isabs(instance_path): - raise ValueError( - 'If an instance path is provided it must be absolute.' - ' A relative path was given instead.' - ) - - #: Holds the path to the instance folder. - #: - #: .. versionadded:: 0.8 - self.instance_path = instance_path - - #: The configuration dictionary as :class:`Config`. This behaves - #: exactly like a regular dictionary but supports additional methods - #: to load a config from files. - self.config = self.make_config(instance_relative_config) - - #: A dictionary of all view functions registered. The keys will - #: be function names which are also used to generate URLs and - #: the values are the function objects themselves. - #: To register a view function, use the :meth:`route` decorator. - self.view_functions = {} - - #: A dictionary of all registered error handlers. The key is ``None`` - #: for error handlers active on the application, otherwise the key is - #: the name of the blueprint. Each key points to another dictionary - #: where the key is the status code of the http exception. The - #: special key ``None`` points to a list of tuples where the first item - #: is the class for the instance check and the second the error handler - #: function. - #: - #: To register an error handler, use the :meth:`errorhandler` - #: decorator. - self.error_handler_spec = {} - - #: A list of functions that are called when :meth:`url_for` raises a - #: :exc:`~werkzeug.routing.BuildError`. Each function registered here - #: is called with `error`, `endpoint` and `values`. If a function - #: returns ``None`` or raises a :exc:`BuildError` the next function is - #: tried. - #: - #: .. versionadded:: 0.9 - self.url_build_error_handlers = [] - - #: A dictionary with lists of functions that will be called at the - #: beginning of each request. The key of the dictionary is the name of - #: the blueprint this function is active for, or ``None`` for all - #: requests. To register a function, use the :meth:`before_request` - #: decorator. - self.before_request_funcs = {} - - #: A list of functions that will be called at the beginning of the - #: first request to this instance. To register a function, use the - #: :meth:`before_first_request` decorator. - #: - #: .. versionadded:: 0.8 - self.before_first_request_funcs = [] - - #: A dictionary with lists of functions that should be called after - #: each request. The key of the dictionary is the name of the blueprint - #: this function is active for, ``None`` for all requests. This can for - #: example be used to close database connections. To register a function - #: here, use the :meth:`after_request` decorator. - self.after_request_funcs = {} - - #: A dictionary with lists of functions that are called after - #: each request, even if an exception has occurred. The key of the - #: dictionary is the name of the blueprint this function is active for, - #: ``None`` for all requests. These functions are not allowed to modify - #: the request, and their return values are ignored. If an exception - #: occurred while processing the request, it gets passed to each - #: teardown_request function. To register a function here, use the - #: :meth:`teardown_request` decorator. - #: - #: .. versionadded:: 0.7 - self.teardown_request_funcs = {} - - #: A list of functions that are called when the application context - #: is destroyed. Since the application context is also torn down - #: if the request ends this is the place to store code that disconnects - #: from databases. - #: - #: .. versionadded:: 0.9 - self.teardown_appcontext_funcs = [] - - #: A dictionary with lists of functions that are called before the - #: :attr:`before_request_funcs` functions. The key of the dictionary is - #: the name of the blueprint this function is active for, or ``None`` - #: for all requests. To register a function, use - #: :meth:`url_value_preprocessor`. - #: - #: .. versionadded:: 0.7 - self.url_value_preprocessors = {} - - #: A dictionary with lists of functions that can be used as URL value - #: preprocessors. The key ``None`` here is used for application wide - #: callbacks, otherwise the key is the name of the blueprint. - #: Each of these functions has the chance to modify the dictionary - #: of URL values before they are used as the keyword arguments of the - #: view function. For each function registered this one should also - #: provide a :meth:`url_defaults` function that adds the parameters - #: automatically again that were removed that way. - #: - #: .. versionadded:: 0.7 - self.url_default_functions = {} - - #: A dictionary with list of functions that are called without argument - #: to populate the template context. The key of the dictionary is the - #: name of the blueprint this function is active for, ``None`` for all - #: requests. Each returns a dictionary that the template context is - #: updated with. To register a function here, use the - #: :meth:`context_processor` decorator. - self.template_context_processors = { - None: [_default_template_ctx_processor] - } - - #: A list of shell context processor functions that should be run - #: when a shell context is created. - #: - #: .. versionadded:: 0.11 - self.shell_context_processors = [] - - #: all the attached blueprints in a dictionary by name. Blueprints - #: can be attached multiple times so this dictionary does not tell - #: you how often they got attached. - #: - #: .. versionadded:: 0.7 - self.blueprints = {} - self._blueprint_order = [] - - #: a place where extensions can store application specific state. For - #: example this is where an extension could store database engines and - #: similar things. For backwards compatibility extensions should register - #: themselves like this:: - #: - #: if not hasattr(app, 'extensions'): - #: app.extensions = {} - #: app.extensions['extensionname'] = SomeObject() - #: - #: The key must match the name of the extension module. For example in - #: case of a "Flask-Foo" extension in `flask_foo`, the key would be - #: ``'foo'``. - #: - #: .. versionadded:: 0.7 - self.extensions = {} - - #: The :class:`~werkzeug.routing.Map` for this instance. You can use - #: this to change the routing converters after the class was created - #: but before any routes are connected. Example:: - #: - #: from werkzeug.routing import BaseConverter - #: - #: class ListConverter(BaseConverter): - #: def to_python(self, value): - #: return value.split(',') - #: def to_url(self, values): - #: return ','.join(super(ListConverter, self).to_url(value) - #: for value in values) - #: - #: app = Flask(__name__) - #: app.url_map.converters['list'] = ListConverter - self.url_map = Map() - - self.url_map.host_matching = host_matching - self.subdomain_matching = subdomain_matching - - # tracks internally if the application already handled at least one - # request. - self._got_first_request = False - self._before_request_lock = Lock() - - # Add a static route using the provided static_url_path, static_host, - # and static_folder if there is a configured static_folder. - # Note we do this without checking if static_folder exists. - # For one, it might be created while the server is running (e.g. during - # development). Also, Google App Engine stores static files somewhere - if self.has_static_folder: - assert bool(static_host) == host_matching, 'Invalid static_host/host_matching combination' - self.add_url_rule( - self.static_url_path + '/', - endpoint='static', - host=static_host, - view_func=self.send_static_file - ) - - #: The click command line context for this application. Commands - #: registered here show up in the :command:`flask` command once the - #: application has been discovered. The default commands are - #: provided by Flask itself and can be overridden. - #: - #: This is an instance of a :class:`click.Group` object. - self.cli = cli.AppGroup(self.name) - - @locked_cached_property - def name(self): - """The name of the application. This is usually the import name - with the difference that it's guessed from the run file if the - import name is main. This name is used as a display name when - Flask needs the name of the application. It can be set and overridden - to change the value. - - .. versionadded:: 0.8 - """ - if self.import_name == '__main__': - fn = getattr(sys.modules['__main__'], '__file__', None) - if fn is None: - return '__main__' - return os.path.splitext(os.path.basename(fn))[0] - return self.import_name - - @property - def propagate_exceptions(self): - """Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration - value in case it's set, otherwise a sensible default is returned. - - .. versionadded:: 0.7 - """ - rv = self.config['PROPAGATE_EXCEPTIONS'] - if rv is not None: - return rv - return self.testing or self.debug - - @property - def preserve_context_on_exception(self): - """Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION`` - configuration value in case it's set, otherwise a sensible default - is returned. - - .. versionadded:: 0.7 - """ - rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION'] - if rv is not None: - return rv - return self.debug - - @locked_cached_property - def logger(self): - """The ``'flask.app'`` logger, a standard Python - :class:`~logging.Logger`. - - In debug mode, the logger's :attr:`~logging.Logger.level` will be set - to :data:`~logging.DEBUG`. - - If there are no handlers configured, a default handler will be added. - See :ref:`logging` for more information. - - .. versionchanged:: 1.0 - Behavior was simplified. The logger is always named - ``flask.app``. The level is only set during configuration, it - doesn't check ``app.debug`` each time. Only one format is used, - not different ones depending on ``app.debug``. No handlers are - removed, and a handler is only added if no handlers are already - configured. - - .. versionadded:: 0.3 - """ - return create_logger(self) - - @locked_cached_property - def jinja_env(self): - """The Jinja2 environment used to load templates.""" - return self.create_jinja_environment() - - @property - def got_first_request(self): - """This attribute is set to ``True`` if the application started - handling the first request. - - .. versionadded:: 0.8 - """ - return self._got_first_request - - def make_config(self, instance_relative=False): - """Used to create the config attribute by the Flask constructor. - The `instance_relative` parameter is passed in from the constructor - of Flask (there named `instance_relative_config`) and indicates if - the config should be relative to the instance path or the root path - of the application. - - .. versionadded:: 0.8 - """ - root_path = self.root_path - if instance_relative: - root_path = self.instance_path - defaults = dict(self.default_config) - defaults['ENV'] = get_env() - defaults['DEBUG'] = get_debug_flag() - return self.config_class(root_path, defaults) - - def auto_find_instance_path(self): - """Tries to locate the instance path if it was not provided to the - constructor of the application class. It will basically calculate - the path to a folder named ``instance`` next to your main file or - the package. - - .. versionadded:: 0.8 - """ - prefix, package_path = find_package(self.import_name) - if prefix is None: - return os.path.join(package_path, 'instance') - return os.path.join(prefix, 'var', self.name + '-instance') - - def open_instance_resource(self, resource, mode='rb'): - """Opens a resource from the application's instance folder - (:attr:`instance_path`). Otherwise works like - :meth:`open_resource`. Instance resources can also be opened for - writing. - - :param resource: the name of the resource. To access resources within - subfolders use forward slashes as separator. - :param mode: resource file opening mode, default is 'rb'. - """ - return open(os.path.join(self.instance_path, resource), mode) - - def _get_templates_auto_reload(self): - """Reload templates when they are changed. Used by - :meth:`create_jinja_environment`. - - This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If - not set, it will be enabled in debug mode. - - .. versionadded:: 1.0 - This property was added but the underlying config and behavior - already existed. - """ - rv = self.config['TEMPLATES_AUTO_RELOAD'] - return rv if rv is not None else self.debug - - def _set_templates_auto_reload(self, value): - self.config['TEMPLATES_AUTO_RELOAD'] = value - - templates_auto_reload = property( - _get_templates_auto_reload, _set_templates_auto_reload - ) - del _get_templates_auto_reload, _set_templates_auto_reload - - def create_jinja_environment(self): - """Creates the Jinja2 environment based on :attr:`jinja_options` - and :meth:`select_jinja_autoescape`. Since 0.7 this also adds - the Jinja2 globals and filters after initialization. Override - this function to customize the behavior. - - .. versionadded:: 0.5 - .. versionchanged:: 0.11 - ``Environment.auto_reload`` set in accordance with - ``TEMPLATES_AUTO_RELOAD`` configuration option. - """ - options = dict(self.jinja_options) - - if 'autoescape' not in options: - options['autoescape'] = self.select_jinja_autoescape - - if 'auto_reload' not in options: - options['auto_reload'] = self.templates_auto_reload - - rv = self.jinja_environment(self, **options) - rv.globals.update( - url_for=url_for, - get_flashed_messages=get_flashed_messages, - config=self.config, - # request, session and g are normally added with the - # context processor for efficiency reasons but for imported - # templates we also want the proxies in there. - request=request, - session=session, - g=g - ) - rv.filters['tojson'] = json.tojson_filter - return rv - - def create_global_jinja_loader(self): - """Creates the loader for the Jinja2 environment. Can be used to - override just the loader and keeping the rest unchanged. It's - discouraged to override this function. Instead one should override - the :meth:`jinja_loader` function instead. - - The global loader dispatches between the loaders of the application - and the individual blueprints. - - .. versionadded:: 0.7 - """ - return DispatchingJinjaLoader(self) - - def select_jinja_autoescape(self, filename): - """Returns ``True`` if autoescaping should be active for the given - template name. If no template name is given, returns `True`. - - .. versionadded:: 0.5 - """ - if filename is None: - return True - return filename.endswith(('.html', '.htm', '.xml', '.xhtml')) - - def update_template_context(self, context): - """Update the template context with some commonly used variables. - This injects request, session, config and g into the template - context as well as everything template context processors want - to inject. Note that the as of Flask 0.6, the original values - in the context will not be overridden if a context processor - decides to return a value with the same key. - - :param context: the context as a dictionary that is updated in place - to add extra variables. - """ - funcs = self.template_context_processors[None] - reqctx = _request_ctx_stack.top - if reqctx is not None: - bp = reqctx.request.blueprint - if bp is not None and bp in self.template_context_processors: - funcs = chain(funcs, self.template_context_processors[bp]) - orig_ctx = context.copy() - for func in funcs: - context.update(func()) - # make sure the original values win. This makes it possible to - # easier add new variables in context processors without breaking - # existing views. - context.update(orig_ctx) - - def make_shell_context(self): - """Returns the shell context for an interactive shell for this - application. This runs all the registered shell context - processors. - - .. versionadded:: 0.11 - """ - rv = {'app': self, 'g': g} - for processor in self.shell_context_processors: - rv.update(processor()) - return rv - - #: What environment the app is running in. Flask and extensions may - #: enable behaviors based on the environment, such as enabling debug - #: mode. This maps to the :data:`ENV` config key. This is set by the - #: :envvar:`FLASK_ENV` environment variable and may not behave as - #: expected if set in code. - #: - #: **Do not enable development when deploying in production.** - #: - #: Default: ``'production'`` - env = ConfigAttribute('ENV') - - def _get_debug(self): - return self.config['DEBUG'] - - def _set_debug(self, value): - self.config['DEBUG'] = value - self.jinja_env.auto_reload = self.templates_auto_reload - - #: Whether debug mode is enabled. When using ``flask run`` to start - #: the development server, an interactive debugger will be shown for - #: unhandled exceptions, and the server will be reloaded when code - #: changes. This maps to the :data:`DEBUG` config key. This is - #: enabled when :attr:`env` is ``'development'`` and is overridden - #: by the ``FLASK_DEBUG`` environment variable. It may not behave as - #: expected if set in code. - #: - #: **Do not enable debug mode when deploying in production.** - #: - #: Default: ``True`` if :attr:`env` is ``'development'``, or - #: ``False`` otherwise. - debug = property(_get_debug, _set_debug) - del _get_debug, _set_debug - - def run(self, host=None, port=None, debug=None, - load_dotenv=True, **options): - """Runs the application on a local development server. - - Do not use ``run()`` in a production setting. It is not intended to - meet security and performance requirements for a production server. - Instead, see :ref:`deployment` for WSGI server recommendations. - - If the :attr:`debug` flag is set the server will automatically reload - for code changes and show a debugger in case an exception happened. - - If you want to run the application in debug mode, but disable the - code execution on the interactive debugger, you can pass - ``use_evalex=False`` as parameter. This will keep the debugger's - traceback screen active, but disable code execution. - - It is not recommended to use this function for development with - automatic reloading as this is badly supported. Instead you should - be using the :command:`flask` command line script's ``run`` support. - - .. admonition:: Keep in Mind - - Flask will suppress any server error with a generic error page - unless it is in debug mode. As such to enable just the - interactive debugger without the code reloading, you have to - invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``. - Setting ``use_debugger`` to ``True`` without being in debug mode - won't catch any exceptions because there won't be any to - catch. - - :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to - have the server available externally as well. Defaults to - ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable - if present. - :param port: the port of the webserver. Defaults to ``5000`` or the - port defined in the ``SERVER_NAME`` config variable if present. - :param debug: if given, enable or disable debug mode. See - :attr:`debug`. - :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv` - files to set environment variables. Will also change the working - directory to the directory containing the first file found. - :param options: the options to be forwarded to the underlying Werkzeug - server. See :func:`werkzeug.serving.run_simple` for more - information. - - .. versionchanged:: 1.0 - If installed, python-dotenv will be used to load environment - variables from :file:`.env` and :file:`.flaskenv` files. - - If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG` - environment variables will override :attr:`env` and - :attr:`debug`. - - Threaded mode is enabled by default. - - .. versionchanged:: 0.10 - The default port is now picked from the ``SERVER_NAME`` - variable. - """ - # Change this into a no-op if the server is invoked from the - # command line. Have a look at cli.py for more information. - if os.environ.get('FLASK_RUN_FROM_CLI') == 'true': - from .debughelpers import explain_ignored_app_run - explain_ignored_app_run() - return - - if get_load_dotenv(load_dotenv): - cli.load_dotenv() - - # if set, let env vars override previous values - if 'FLASK_ENV' in os.environ: - self.env = get_env() - self.debug = get_debug_flag() - elif 'FLASK_DEBUG' in os.environ: - self.debug = get_debug_flag() - - # debug passed to method overrides all other sources - if debug is not None: - self.debug = bool(debug) - - _host = '127.0.0.1' - _port = 5000 - server_name = self.config.get('SERVER_NAME') - sn_host, sn_port = None, None - - if server_name: - sn_host, _, sn_port = server_name.partition(':') - - host = host or sn_host or _host - port = int(port or sn_port or _port) - - options.setdefault('use_reloader', self.debug) - options.setdefault('use_debugger', self.debug) - options.setdefault('threaded', True) - - cli.show_server_banner(self.env, self.debug, self.name, False) - - from werkzeug.serving import run_simple - - try: - run_simple(host, port, self, **options) - finally: - # reset the first request information if the development server - # reset normally. This makes it possible to restart the server - # without reloader and that stuff from an interactive shell. - self._got_first_request = False - - def test_client(self, use_cookies=True, **kwargs): - """Creates a test client for this application. For information - about unit testing head over to :ref:`testing`. - - Note that if you are testing for assertions or exceptions in your - application code, you must set ``app.testing = True`` in order for the - exceptions to propagate to the test client. Otherwise, the exception - will be handled by the application (not visible to the test client) and - the only indication of an AssertionError or other exception will be a - 500 status code response to the test client. See the :attr:`testing` - attribute. For example:: - - app.testing = True - client = app.test_client() - - The test client can be used in a ``with`` block to defer the closing down - of the context until the end of the ``with`` block. This is useful if - you want to access the context locals for testing:: - - with app.test_client() as c: - rv = c.get('/?vodka=42') - assert request.args['vodka'] == '42' - - Additionally, you may pass optional keyword arguments that will then - be passed to the application's :attr:`test_client_class` constructor. - For example:: - - from flask.testing import FlaskClient - - class CustomClient(FlaskClient): - def __init__(self, *args, **kwargs): - self._authentication = kwargs.pop("authentication") - super(CustomClient,self).__init__( *args, **kwargs) - - app.test_client_class = CustomClient - client = app.test_client(authentication='Basic ....') - - See :class:`~flask.testing.FlaskClient` for more information. - - .. versionchanged:: 0.4 - added support for ``with`` block usage for the client. - - .. versionadded:: 0.7 - The `use_cookies` parameter was added as well as the ability - to override the client to be used by setting the - :attr:`test_client_class` attribute. - - .. versionchanged:: 0.11 - Added `**kwargs` to support passing additional keyword arguments to - the constructor of :attr:`test_client_class`. - """ - cls = self.test_client_class - if cls is None: - from flask.testing import FlaskClient as cls - return cls(self, self.response_class, use_cookies=use_cookies, **kwargs) - - def test_cli_runner(self, **kwargs): - """Create a CLI runner for testing CLI commands. - See :ref:`testing-cli`. - - Returns an instance of :attr:`test_cli_runner_class`, by default - :class:`~flask.testing.FlaskCliRunner`. The Flask app object is - passed as the first argument. - - .. versionadded:: 1.0 - """ - cls = self.test_cli_runner_class - - if cls is None: - from flask.testing import FlaskCliRunner as cls - - return cls(self, **kwargs) - - def open_session(self, request): - """Creates or opens a new session. Default implementation stores all - session data in a signed cookie. This requires that the - :attr:`secret_key` is set. Instead of overriding this method - we recommend replacing the :class:`session_interface`. - - .. deprecated: 1.0 - Will be removed in 1.1. Use ``session_interface.open_session`` - instead. - - :param request: an instance of :attr:`request_class`. - """ - - warnings.warn(DeprecationWarning( - '"open_session" is deprecated and will be removed in 1.1. Use' - ' "session_interface.open_session" instead.' - )) - return self.session_interface.open_session(self, request) - - def save_session(self, session, response): - """Saves the session if it needs updates. For the default - implementation, check :meth:`open_session`. Instead of overriding this - method we recommend replacing the :class:`session_interface`. - - .. deprecated: 1.0 - Will be removed in 1.1. Use ``session_interface.save_session`` - instead. - - :param session: the session to be saved (a - :class:`~werkzeug.contrib.securecookie.SecureCookie` - object) - :param response: an instance of :attr:`response_class` - """ - - warnings.warn(DeprecationWarning( - '"save_session" is deprecated and will be removed in 1.1. Use' - ' "session_interface.save_session" instead.' - )) - return self.session_interface.save_session(self, session, response) - - def make_null_session(self): - """Creates a new instance of a missing session. Instead of overriding - this method we recommend replacing the :class:`session_interface`. - - .. deprecated: 1.0 - Will be removed in 1.1. Use ``session_interface.make_null_session`` - instead. - - .. versionadded:: 0.7 - """ - - warnings.warn(DeprecationWarning( - '"make_null_session" is deprecated and will be removed in 1.1. Use' - ' "session_interface.make_null_session" instead.' - )) - return self.session_interface.make_null_session(self) - - @setupmethod - def register_blueprint(self, blueprint, **options): - """Register a :class:`~flask.Blueprint` on the application. Keyword - arguments passed to this method will override the defaults set on the - blueprint. - - Calls the blueprint's :meth:`~flask.Blueprint.register` method after - recording the blueprint in the application's :attr:`blueprints`. - - :param blueprint: The blueprint to register. - :param url_prefix: Blueprint routes will be prefixed with this. - :param subdomain: Blueprint routes will match on this subdomain. - :param url_defaults: Blueprint routes will use these default values for - view arguments. - :param options: Additional keyword arguments are passed to - :class:`~flask.blueprints.BlueprintSetupState`. They can be - accessed in :meth:`~flask.Blueprint.record` callbacks. - - .. versionadded:: 0.7 - """ - first_registration = False - - if blueprint.name in self.blueprints: - assert self.blueprints[blueprint.name] is blueprint, ( - 'A name collision occurred between blueprints %r and %r. Both' - ' share the same name "%s". Blueprints that are created on the' - ' fly need unique names.' % ( - blueprint, self.blueprints[blueprint.name], blueprint.name - ) - ) - else: - self.blueprints[blueprint.name] = blueprint - self._blueprint_order.append(blueprint) - first_registration = True - - blueprint.register(self, options, first_registration) - - def iter_blueprints(self): - """Iterates over all blueprints by the order they were registered. - - .. versionadded:: 0.11 - """ - return iter(self._blueprint_order) - - @setupmethod - def add_url_rule(self, rule, endpoint=None, view_func=None, - provide_automatic_options=None, **options): - """Connects a URL rule. Works exactly like the :meth:`route` - decorator. If a view_func is provided it will be registered with the - endpoint. - - Basically this example:: - - @app.route('/') - def index(): - pass - - Is equivalent to the following:: - - def index(): - pass - app.add_url_rule('/', 'index', index) - - If the view_func is not provided you will need to connect the endpoint - to a view function like so:: - - app.view_functions['index'] = index - - Internally :meth:`route` invokes :meth:`add_url_rule` so if you want - to customize the behavior via subclassing you only need to change - this method. - - For more information refer to :ref:`url-route-registrations`. - - .. versionchanged:: 0.2 - `view_func` parameter added. - - .. versionchanged:: 0.6 - ``OPTIONS`` is added automatically as method. - - :param rule: the URL rule as string - :param endpoint: the endpoint for the registered URL rule. Flask - itself assumes the name of the view function as - endpoint - :param view_func: the function to call when serving a request to the - provided endpoint - :param provide_automatic_options: controls whether the ``OPTIONS`` - method should be added automatically. This can also be controlled - by setting the ``view_func.provide_automatic_options = False`` - before adding the rule. - :param options: the options to be forwarded to the underlying - :class:`~werkzeug.routing.Rule` object. A change - to Werkzeug is handling of method options. methods - is a list of methods this rule should be limited - to (``GET``, ``POST`` etc.). By default a rule - just listens for ``GET`` (and implicitly ``HEAD``). - Starting with Flask 0.6, ``OPTIONS`` is implicitly - added and handled by the standard request handling. - """ - if endpoint is None: - endpoint = _endpoint_from_view_func(view_func) - options['endpoint'] = endpoint - methods = options.pop('methods', None) - - # if the methods are not given and the view_func object knows its - # methods we can use that instead. If neither exists, we go with - # a tuple of only ``GET`` as default. - if methods is None: - methods = getattr(view_func, 'methods', None) or ('GET',) - if isinstance(methods, string_types): - raise TypeError('Allowed methods have to be iterables of strings, ' - 'for example: @app.route(..., methods=["POST"])') - methods = set(item.upper() for item in methods) - - # Methods that should always be added - required_methods = set(getattr(view_func, 'required_methods', ())) - - # starting with Flask 0.8 the view_func object can disable and - # force-enable the automatic options handling. - if provide_automatic_options is None: - provide_automatic_options = getattr(view_func, - 'provide_automatic_options', None) - - if provide_automatic_options is None: - if 'OPTIONS' not in methods: - provide_automatic_options = True - required_methods.add('OPTIONS') - else: - provide_automatic_options = False - - # Add the required methods now. - methods |= required_methods - - rule = self.url_rule_class(rule, methods=methods, **options) - rule.provide_automatic_options = provide_automatic_options - - self.url_map.add(rule) - if view_func is not None: - old_func = self.view_functions.get(endpoint) - if old_func is not None and old_func != view_func: - raise AssertionError('View function mapping is overwriting an ' - 'existing endpoint function: %s' % endpoint) - self.view_functions[endpoint] = view_func - - def route(self, rule, **options): - """A decorator that is used to register a view function for a - given URL rule. This does the same thing as :meth:`add_url_rule` - but is intended for decorator usage:: - - @app.route('/') - def index(): - return 'Hello World' - - For more information refer to :ref:`url-route-registrations`. - - :param rule: the URL rule as string - :param endpoint: the endpoint for the registered URL rule. Flask - itself assumes the name of the view function as - endpoint - :param options: the options to be forwarded to the underlying - :class:`~werkzeug.routing.Rule` object. A change - to Werkzeug is handling of method options. methods - is a list of methods this rule should be limited - to (``GET``, ``POST`` etc.). By default a rule - just listens for ``GET`` (and implicitly ``HEAD``). - Starting with Flask 0.6, ``OPTIONS`` is implicitly - added and handled by the standard request handling. - """ - def decorator(f): - endpoint = options.pop('endpoint', None) - self.add_url_rule(rule, endpoint, f, **options) - return f - return decorator - - @setupmethod - def endpoint(self, endpoint): - """A decorator to register a function as an endpoint. - Example:: - - @app.endpoint('example.endpoint') - def example(): - return "example" - - :param endpoint: the name of the endpoint - """ - def decorator(f): - self.view_functions[endpoint] = f - return f - return decorator - - @staticmethod - def _get_exc_class_and_code(exc_class_or_code): - """Ensure that we register only exceptions as handler keys""" - if isinstance(exc_class_or_code, integer_types): - exc_class = default_exceptions[exc_class_or_code] - else: - exc_class = exc_class_or_code - - assert issubclass(exc_class, Exception) - - if issubclass(exc_class, HTTPException): - return exc_class, exc_class.code - else: - return exc_class, None - - @setupmethod - def errorhandler(self, code_or_exception): - """Register a function to handle errors by code or exception class. - - A decorator that is used to register a function given an - error code. Example:: - - @app.errorhandler(404) - def page_not_found(error): - return 'This page does not exist', 404 - - You can also register handlers for arbitrary exceptions:: - - @app.errorhandler(DatabaseError) - def special_exception_handler(error): - return 'Database connection failed', 500 - - .. versionadded:: 0.7 - Use :meth:`register_error_handler` instead of modifying - :attr:`error_handler_spec` directly, for application wide error - handlers. - - .. versionadded:: 0.7 - One can now additionally also register custom exception types - that do not necessarily have to be a subclass of the - :class:`~werkzeug.exceptions.HTTPException` class. - - :param code_or_exception: the code as integer for the handler, or - an arbitrary exception - """ - def decorator(f): - self._register_error_handler(None, code_or_exception, f) - return f - return decorator - - @setupmethod - def register_error_handler(self, code_or_exception, f): - """Alternative error attach function to the :meth:`errorhandler` - decorator that is more straightforward to use for non decorator - usage. - - .. versionadded:: 0.7 - """ - self._register_error_handler(None, code_or_exception, f) - - @setupmethod - def _register_error_handler(self, key, code_or_exception, f): - """ - :type key: None|str - :type code_or_exception: int|T<=Exception - :type f: callable - """ - if isinstance(code_or_exception, HTTPException): # old broken behavior - raise ValueError( - 'Tried to register a handler for an exception instance {0!r}.' - ' Handlers can only be registered for exception classes or' - ' HTTP error codes.'.format(code_or_exception) - ) - - try: - exc_class, code = self._get_exc_class_and_code(code_or_exception) - except KeyError: - raise KeyError( - "'{0}' is not a recognized HTTP error code. Use a subclass of" - " HTTPException with that code instead.".format(code_or_exception) - ) - - handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {}) - handlers[exc_class] = f - - @setupmethod - def template_filter(self, name=None): - """A decorator that is used to register custom template filter. - You can specify a name for the filter, otherwise the function - name will be used. Example:: - - @app.template_filter() - def reverse(s): - return s[::-1] - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def decorator(f): - self.add_template_filter(f, name=name) - return f - return decorator - - @setupmethod - def add_template_filter(self, f, name=None): - """Register a custom template filter. Works exactly like the - :meth:`template_filter` decorator. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - self.jinja_env.filters[name or f.__name__] = f - - @setupmethod - def template_test(self, name=None): - """A decorator that is used to register custom template test. - You can specify a name for the test, otherwise the function - name will be used. Example:: - - @app.template_test() - def is_prime(n): - if n == 2: - return True - for i in range(2, int(math.ceil(math.sqrt(n))) + 1): - if n % i == 0: - return False - return True - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - def decorator(f): - self.add_template_test(f, name=name) - return f - return decorator - - @setupmethod - def add_template_test(self, f, name=None): - """Register a custom template test. Works exactly like the - :meth:`template_test` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - self.jinja_env.tests[name or f.__name__] = f - - @setupmethod - def template_global(self, name=None): - """A decorator that is used to register a custom template global function. - You can specify a name for the global function, otherwise the function - name will be used. Example:: - - @app.template_global() - def double(n): - return 2 * n - - .. versionadded:: 0.10 - - :param name: the optional name of the global function, otherwise the - function name will be used. - """ - def decorator(f): - self.add_template_global(f, name=name) - return f - return decorator - - @setupmethod - def add_template_global(self, f, name=None): - """Register a custom template global function. Works exactly like the - :meth:`template_global` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the global function, otherwise the - function name will be used. - """ - self.jinja_env.globals[name or f.__name__] = f - - @setupmethod - def before_request(self, f): - """Registers a function to run before each request. - - For example, this can be used to open a database connection, or to load - the logged in user from the session. - - The function will be called without any arguments. If it returns a - non-None value, the value is handled as if it was the return value from - the view, and further request handling is stopped. - """ - self.before_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def before_first_request(self, f): - """Registers a function to be run before the first request to this - instance of the application. - - The function will be called without any arguments and its return - value is ignored. - - .. versionadded:: 0.8 - """ - self.before_first_request_funcs.append(f) - return f - - @setupmethod - def after_request(self, f): - """Register a function to be run after each request. - - Your function must take one parameter, an instance of - :attr:`response_class` and return a new response object or the - same (see :meth:`process_response`). - - As of Flask 0.7 this function might not be executed at the end of the - request in case an unhandled exception occurred. - """ - self.after_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def teardown_request(self, f): - """Register a function to be run at the end of each request, - regardless of whether there was an exception or not. These functions - are executed when the request context is popped, even if not an - actual request was performed. - - Example:: - - ctx = app.test_request_context() - ctx.push() - ... - ctx.pop() - - When ``ctx.pop()`` is executed in the above example, the teardown - functions are called just before the request context moves from the - stack of active contexts. This becomes relevant if you are using - such constructs in tests. - - Generally teardown functions must take every necessary step to avoid - that they will fail. If they do execute code that might fail they - will have to surround the execution of these code by try/except - statements and log occurring errors. - - When a teardown function was called because of an exception it will - be passed an error object. - - The return values of teardown functions are ignored. - - .. admonition:: Debug Note - - In debug mode Flask will not tear down a request on an exception - immediately. Instead it will keep it alive so that the interactive - debugger can still access it. This behavior can be controlled - by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable. - """ - self.teardown_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def teardown_appcontext(self, f): - """Registers a function to be called when the application context - ends. These functions are typically also called when the request - context is popped. - - Example:: - - ctx = app.app_context() - ctx.push() - ... - ctx.pop() - - When ``ctx.pop()`` is executed in the above example, the teardown - functions are called just before the app context moves from the - stack of active contexts. This becomes relevant if you are using - such constructs in tests. - - Since a request context typically also manages an application - context it would also be called when you pop a request context. - - When a teardown function was called because of an unhandled exception - it will be passed an error object. If an :meth:`errorhandler` is - registered, it will handle the exception and the teardown will not - receive it. - - The return values of teardown functions are ignored. - - .. versionadded:: 0.9 - """ - self.teardown_appcontext_funcs.append(f) - return f - - @setupmethod - def context_processor(self, f): - """Registers a template context processor function.""" - self.template_context_processors[None].append(f) - return f - - @setupmethod - def shell_context_processor(self, f): - """Registers a shell context processor function. - - .. versionadded:: 0.11 - """ - self.shell_context_processors.append(f) - return f - - @setupmethod - def url_value_preprocessor(self, f): - """Register a URL value preprocessor function for all view - functions in the application. These functions will be called before the - :meth:`before_request` functions. - - The function can modify the values captured from the matched url before - they are passed to the view. For example, this can be used to pop a - common language code value and place it in ``g`` rather than pass it to - every view. - - The function is passed the endpoint name and values dict. The return - value is ignored. - """ - self.url_value_preprocessors.setdefault(None, []).append(f) - return f - - @setupmethod - def url_defaults(self, f): - """Callback function for URL defaults for all view functions of the - application. It's called with the endpoint and values and should - update the values passed in place. - """ - self.url_default_functions.setdefault(None, []).append(f) - return f - - def _find_error_handler(self, e): - """Return a registered error handler for an exception in this order: - blueprint handler for a specific code, app handler for a specific code, - blueprint handler for an exception class, app handler for an exception - class, or ``None`` if a suitable handler is not found. - """ - exc_class, code = self._get_exc_class_and_code(type(e)) - - for name, c in ( - (request.blueprint, code), (None, code), - (request.blueprint, None), (None, None) - ): - handler_map = self.error_handler_spec.setdefault(name, {}).get(c) - - if not handler_map: - continue - - for cls in exc_class.__mro__: - handler = handler_map.get(cls) - - if handler is not None: - return handler - - def handle_http_exception(self, e): - """Handles an HTTP exception. By default this will invoke the - registered error handlers and fall back to returning the - exception as response. - - .. versionadded:: 0.3 - """ - # Proxy exceptions don't have error codes. We want to always return - # those unchanged as errors - if e.code is None: - return e - - handler = self._find_error_handler(e) - if handler is None: - return e - return handler(e) - - def trap_http_exception(self, e): - """Checks if an HTTP exception should be trapped or not. By default - this will return ``False`` for all exceptions except for a bad request - key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It - also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``. - - This is called for all HTTP exceptions raised by a view function. - If it returns ``True`` for any exception the error handler for this - exception is not called and it shows up as regular exception in the - traceback. This is helpful for debugging implicitly raised HTTP - exceptions. - - .. versionchanged:: 1.0 - Bad request errors are not trapped by default in debug mode. - - .. versionadded:: 0.8 - """ - if self.config['TRAP_HTTP_EXCEPTIONS']: - return True - - trap_bad_request = self.config['TRAP_BAD_REQUEST_ERRORS'] - - # if unset, trap key errors in debug mode - if ( - trap_bad_request is None and self.debug - and isinstance(e, BadRequestKeyError) - ): - return True - - if trap_bad_request: - return isinstance(e, BadRequest) - - return False - - def handle_user_exception(self, e): - """This method is called whenever an exception occurs that should be - handled. A special case are - :class:`~werkzeug.exception.HTTPException`\s which are forwarded by - this function to the :meth:`handle_http_exception` method. This - function will either return a response value or reraise the - exception with the same traceback. - - .. versionchanged:: 1.0 - Key errors raised from request data like ``form`` show the the bad - key in debug mode rather than a generic bad request message. - - .. versionadded:: 0.7 - """ - exc_type, exc_value, tb = sys.exc_info() - assert exc_value is e - # ensure not to trash sys.exc_info() at that point in case someone - # wants the traceback preserved in handle_http_exception. Of course - # we cannot prevent users from trashing it themselves in a custom - # trap_http_exception method so that's their fault then. - - # MultiDict passes the key to the exception, but that's ignored - # when generating the response message. Set an informative - # description for key errors in debug mode or when trapping errors. - if ( - (self.debug or self.config['TRAP_BAD_REQUEST_ERRORS']) - and isinstance(e, BadRequestKeyError) - # only set it if it's still the default description - and e.description is BadRequestKeyError.description - ): - e.description = "KeyError: '{0}'".format(*e.args) - - if isinstance(e, HTTPException) and not self.trap_http_exception(e): - return self.handle_http_exception(e) - - handler = self._find_error_handler(e) - - if handler is None: - reraise(exc_type, exc_value, tb) - return handler(e) - - def handle_exception(self, e): - """Default exception handling that kicks in when an exception - occurs that is not caught. In debug mode the exception will - be re-raised immediately, otherwise it is logged and the handler - for a 500 internal server error is used. If no such handler - exists, a default 500 internal server error message is displayed. - - .. versionadded:: 0.3 - """ - exc_type, exc_value, tb = sys.exc_info() - - got_request_exception.send(self, exception=e) - handler = self._find_error_handler(InternalServerError()) - - if self.propagate_exceptions: - # if we want to repropagate the exception, we can attempt to - # raise it with the whole traceback in case we can do that - # (the function was actually called from the except part) - # otherwise, we just raise the error again - if exc_value is e: - reraise(exc_type, exc_value, tb) - else: - raise e - - self.log_exception((exc_type, exc_value, tb)) - if handler is None: - return InternalServerError() - return self.finalize_request(handler(e), from_error_handler=True) - - def log_exception(self, exc_info): - """Logs an exception. This is called by :meth:`handle_exception` - if debugging is disabled and right before the handler is called. - The default implementation logs the exception as error on the - :attr:`logger`. - - .. versionadded:: 0.8 - """ - self.logger.error('Exception on %s [%s]' % ( - request.path, - request.method - ), exc_info=exc_info) - - def raise_routing_exception(self, request): - """Exceptions that are recording during routing are reraised with - this method. During debug we are not reraising redirect requests - for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising - a different error instead to help debug situations. - - :internal: - """ - if not self.debug \ - or not isinstance(request.routing_exception, RequestRedirect) \ - or request.method in ('GET', 'HEAD', 'OPTIONS'): - raise request.routing_exception - - from .debughelpers import FormDataRoutingRedirect - raise FormDataRoutingRedirect(request) - - def dispatch_request(self): - """Does the request dispatching. Matches the URL and returns the - return value of the view or error handler. This does not have to - be a response object. In order to convert the return value to a - proper response object, call :func:`make_response`. - - .. versionchanged:: 0.7 - This no longer does the exception handling, this code was - moved to the new :meth:`full_dispatch_request`. - """ - req = _request_ctx_stack.top.request - if req.routing_exception is not None: - self.raise_routing_exception(req) - rule = req.url_rule - # if we provide automatic options for this URL and the - # request came with the OPTIONS method, reply automatically - if getattr(rule, 'provide_automatic_options', False) \ - and req.method == 'OPTIONS': - return self.make_default_options_response() - # otherwise dispatch to the handler for that endpoint - return self.view_functions[rule.endpoint](**req.view_args) - - def full_dispatch_request(self): - """Dispatches the request and on top of that performs request - pre and postprocessing as well as HTTP exception catching and - error handling. - - .. versionadded:: 0.7 - """ - self.try_trigger_before_first_request_functions() - try: - request_started.send(self) - rv = self.preprocess_request() - if rv is None: - rv = self.dispatch_request() - except Exception as e: - rv = self.handle_user_exception(e) - return self.finalize_request(rv) - - def finalize_request(self, rv, from_error_handler=False): - """Given the return value from a view function this finalizes - the request by converting it into a response and invoking the - postprocessing functions. This is invoked for both normal - request dispatching as well as error handlers. - - Because this means that it might be called as a result of a - failure a special safe mode is available which can be enabled - with the `from_error_handler` flag. If enabled, failures in - response processing will be logged and otherwise ignored. - - :internal: - """ - response = self.make_response(rv) - try: - response = self.process_response(response) - request_finished.send(self, response=response) - except Exception: - if not from_error_handler: - raise - self.logger.exception('Request finalizing failed with an ' - 'error while handling an error') - return response - - def try_trigger_before_first_request_functions(self): - """Called before each request and will ensure that it triggers - the :attr:`before_first_request_funcs` and only exactly once per - application instance (which means process usually). - - :internal: - """ - if self._got_first_request: - return - with self._before_request_lock: - if self._got_first_request: - return - for func in self.before_first_request_funcs: - func() - self._got_first_request = True - - def make_default_options_response(self): - """This method is called to create the default ``OPTIONS`` response. - This can be changed through subclassing to change the default - behavior of ``OPTIONS`` responses. - - .. versionadded:: 0.7 - """ - adapter = _request_ctx_stack.top.url_adapter - if hasattr(adapter, 'allowed_methods'): - methods = adapter.allowed_methods() - else: - # fallback for Werkzeug < 0.7 - methods = [] - try: - adapter.match(method='--') - except MethodNotAllowed as e: - methods = e.valid_methods - except HTTPException as e: - pass - rv = self.response_class() - rv.allow.update(methods) - return rv - - def should_ignore_error(self, error): - """This is called to figure out if an error should be ignored - or not as far as the teardown system is concerned. If this - function returns ``True`` then the teardown handlers will not be - passed the error. - - .. versionadded:: 0.10 - """ - return False - - def make_response(self, rv): - """Convert the return value from a view function to an instance of - :attr:`response_class`. - - :param rv: the return value from the view function. The view function - must return a response. Returning ``None``, or the view ending - without returning, is not allowed. The following types are allowed - for ``view_rv``: - - ``str`` (``unicode`` in Python 2) - A response object is created with the string encoded to UTF-8 - as the body. - - ``bytes`` (``str`` in Python 2) - A response object is created with the bytes as the body. - - ``tuple`` - Either ``(body, status, headers)``, ``(body, status)``, or - ``(body, headers)``, where ``body`` is any of the other types - allowed here, ``status`` is a string or an integer, and - ``headers`` is a dictionary or a list of ``(key, value)`` - tuples. If ``body`` is a :attr:`response_class` instance, - ``status`` overwrites the exiting value and ``headers`` are - extended. - - :attr:`response_class` - The object is returned unchanged. - - other :class:`~werkzeug.wrappers.Response` class - The object is coerced to :attr:`response_class`. - - :func:`callable` - The function is called as a WSGI application. The result is - used to create a response object. - - .. versionchanged:: 0.9 - Previously a tuple was interpreted as the arguments for the - response object. - """ - - status = headers = None - - # unpack tuple returns - if isinstance(rv, tuple): - len_rv = len(rv) - - # a 3-tuple is unpacked directly - if len_rv == 3: - rv, status, headers = rv - # decide if a 2-tuple has status or headers - elif len_rv == 2: - if isinstance(rv[1], (Headers, dict, tuple, list)): - rv, headers = rv - else: - rv, status = rv - # other sized tuples are not allowed - else: - raise TypeError( - 'The view function did not return a valid response tuple.' - ' The tuple must have the form (body, status, headers),' - ' (body, status), or (body, headers).' - ) - - # the body must not be None - if rv is None: - raise TypeError( - 'The view function did not return a valid response. The' - ' function either returned None or ended without a return' - ' statement.' - ) - - # make sure the body is an instance of the response class - if not isinstance(rv, self.response_class): - if isinstance(rv, (text_type, bytes, bytearray)): - # let the response class set the status and headers instead of - # waiting to do it manually, so that the class can handle any - # special logic - rv = self.response_class(rv, status=status, headers=headers) - status = headers = None - else: - # evaluate a WSGI callable, or coerce a different response - # class to the correct type - try: - rv = self.response_class.force_type(rv, request.environ) - except TypeError as e: - new_error = TypeError( - '{e}\nThe view function did not return a valid' - ' response. The return type must be a string, tuple,' - ' Response instance, or WSGI callable, but it was a' - ' {rv.__class__.__name__}.'.format(e=e, rv=rv) - ) - reraise(TypeError, new_error, sys.exc_info()[2]) - - # prefer the status if it was provided - if status is not None: - if isinstance(status, (text_type, bytes, bytearray)): - rv.status = status - else: - rv.status_code = status - - # extend existing headers with provided headers - if headers: - rv.headers.extend(headers) - - return rv - - def create_url_adapter(self, request): - """Creates a URL adapter for the given request. The URL adapter - is created at a point where the request context is not yet set - up so the request is passed explicitly. - - .. versionadded:: 0.6 - - .. versionchanged:: 0.9 - This can now also be called without a request object when the - URL adapter is created for the application context. - - .. versionchanged:: 1.0 - :data:`SERVER_NAME` no longer implicitly enables subdomain - matching. Use :attr:`subdomain_matching` instead. - """ - if request is not None: - # If subdomain matching is disabled (the default), use the - # default subdomain in all cases. This should be the default - # in Werkzeug but it currently does not have that feature. - subdomain = ((self.url_map.default_subdomain or None) - if not self.subdomain_matching else None) - return self.url_map.bind_to_environ( - request.environ, - server_name=self.config['SERVER_NAME'], - subdomain=subdomain) - # We need at the very least the server name to be set for this - # to work. - if self.config['SERVER_NAME'] is not None: - return self.url_map.bind( - self.config['SERVER_NAME'], - script_name=self.config['APPLICATION_ROOT'], - url_scheme=self.config['PREFERRED_URL_SCHEME']) - - def inject_url_defaults(self, endpoint, values): - """Injects the URL defaults for the given endpoint directly into - the values dictionary passed. This is used internally and - automatically called on URL building. - - .. versionadded:: 0.7 - """ - funcs = self.url_default_functions.get(None, ()) - if '.' in endpoint: - bp = endpoint.rsplit('.', 1)[0] - funcs = chain(funcs, self.url_default_functions.get(bp, ())) - for func in funcs: - func(endpoint, values) - - def handle_url_build_error(self, error, endpoint, values): - """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`. - """ - exc_type, exc_value, tb = sys.exc_info() - for handler in self.url_build_error_handlers: - try: - rv = handler(error, endpoint, values) - if rv is not None: - return rv - except BuildError as e: - # make error available outside except block (py3) - error = e - - # At this point we want to reraise the exception. If the error is - # still the same one we can reraise it with the original traceback, - # otherwise we raise it from here. - if error is exc_value: - reraise(exc_type, exc_value, tb) - raise error - - def preprocess_request(self): - """Called before the request is dispatched. Calls - :attr:`url_value_preprocessors` registered with the app and the - current blueprint (if any). Then calls :attr:`before_request_funcs` - registered with the app and the blueprint. - - If any :meth:`before_request` handler returns a non-None value, the - value is handled as if it was the return value from the view, and - further request handling is stopped. - """ - - bp = _request_ctx_stack.top.request.blueprint - - funcs = self.url_value_preprocessors.get(None, ()) - if bp is not None and bp in self.url_value_preprocessors: - funcs = chain(funcs, self.url_value_preprocessors[bp]) - for func in funcs: - func(request.endpoint, request.view_args) - - funcs = self.before_request_funcs.get(None, ()) - if bp is not None and bp in self.before_request_funcs: - funcs = chain(funcs, self.before_request_funcs[bp]) - for func in funcs: - rv = func() - if rv is not None: - return rv - - def process_response(self, response): - """Can be overridden in order to modify the response object - before it's sent to the WSGI server. By default this will - call all the :meth:`after_request` decorated functions. - - .. versionchanged:: 0.5 - As of Flask 0.5 the functions registered for after request - execution are called in reverse order of registration. - - :param response: a :attr:`response_class` object. - :return: a new response object or the same, has to be an - instance of :attr:`response_class`. - """ - ctx = _request_ctx_stack.top - bp = ctx.request.blueprint - funcs = ctx._after_request_functions - if bp is not None and bp in self.after_request_funcs: - funcs = chain(funcs, reversed(self.after_request_funcs[bp])) - if None in self.after_request_funcs: - funcs = chain(funcs, reversed(self.after_request_funcs[None])) - for handler in funcs: - response = handler(response) - if not self.session_interface.is_null_session(ctx.session): - self.session_interface.save_session(self, ctx.session, response) - return response - - def do_teardown_request(self, exc=_sentinel): - """Called after the request is dispatched and the response is - returned, right before the request context is popped. - - This calls all functions decorated with - :meth:`teardown_request`, and :meth:`Blueprint.teardown_request` - if a blueprint handled the request. Finally, the - :data:`request_tearing_down` signal is sent. - - This is called by - :meth:`RequestContext.pop() `, - which may be delayed during testing to maintain access to - resources. - - :param exc: An unhandled exception raised while dispatching the - request. Detected from the current exception information if - not passed. Passed to each teardown function. - - .. versionchanged:: 0.9 - Added the ``exc`` argument. - """ - if exc is _sentinel: - exc = sys.exc_info()[1] - funcs = reversed(self.teardown_request_funcs.get(None, ())) - bp = _request_ctx_stack.top.request.blueprint - if bp is not None and bp in self.teardown_request_funcs: - funcs = chain(funcs, reversed(self.teardown_request_funcs[bp])) - for func in funcs: - func(exc) - request_tearing_down.send(self, exc=exc) - - def do_teardown_appcontext(self, exc=_sentinel): - """Called right before the application context is popped. - - When handling a request, the application context is popped - after the request context. See :meth:`do_teardown_request`. - - This calls all functions decorated with - :meth:`teardown_appcontext`. Then the - :data:`appcontext_tearing_down` signal is sent. - - This is called by - :meth:`AppContext.pop() `. - - .. versionadded:: 0.9 - """ - if exc is _sentinel: - exc = sys.exc_info()[1] - for func in reversed(self.teardown_appcontext_funcs): - func(exc) - appcontext_tearing_down.send(self, exc=exc) - - def app_context(self): - """Create an :class:`~flask.ctx.AppContext`. Use as a ``with`` - block to push the context, which will make :data:`current_app` - point at this application. - - An application context is automatically pushed by - :meth:`RequestContext.push() ` - when handling a request, and when running a CLI command. Use - this to manually create a context outside of these situations. - - :: - - with app.app_context(): - init_db() - - See :doc:`/appcontext`. - - .. versionadded:: 0.9 - """ - return AppContext(self) - - def request_context(self, environ): - """Create a :class:`~flask.ctx.RequestContext` representing a - WSGI environment. Use a ``with`` block to push the context, - which will make :data:`request` point at this request. - - See :doc:`/reqcontext`. - - Typically you should not call this from your own code. A request - context is automatically pushed by the :meth:`wsgi_app` when - handling a request. Use :meth:`test_request_context` to create - an environment and context instead of this method. - - :param environ: a WSGI environment - """ - return RequestContext(self, environ) - - def test_request_context(self, *args, **kwargs): - """Create a :class:`~flask.ctx.RequestContext` for a WSGI - environment created from the given values. This is mostly useful - during testing, where you may want to run a function that uses - request data without dispatching a full request. - - See :doc:`/reqcontext`. - - Use a ``with`` block to push the context, which will make - :data:`request` point at the request for the created - environment. :: - - with test_request_context(...): - generate_report() - - When using the shell, it may be easier to push and pop the - context manually to avoid indentation. :: - - ctx = app.test_request_context(...) - ctx.push() - ... - ctx.pop() - - Takes the same arguments as Werkzeug's - :class:`~werkzeug.test.EnvironBuilder`, with some defaults from - the application. See the linked Werkzeug docs for most of the - available arguments. Flask-specific behavior is listed here. - - :param path: URL path being requested. - :param base_url: Base URL where the app is being served, which - ``path`` is relative to. If not given, built from - :data:`PREFERRED_URL_SCHEME`, ``subdomain``, - :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`. - :param subdomain: Subdomain name to append to - :data:`SERVER_NAME`. - :param url_scheme: Scheme to use instead of - :data:`PREFERRED_URL_SCHEME`. - :param data: The request body, either as a string or a dict of - form keys and values. - :param json: If given, this is serialized as JSON and passed as - ``data``. Also defaults ``content_type`` to - ``application/json``. - :param args: other positional arguments passed to - :class:`~werkzeug.test.EnvironBuilder`. - :param kwargs: other keyword arguments passed to - :class:`~werkzeug.test.EnvironBuilder`. - """ - from flask.testing import make_test_environ_builder - - builder = make_test_environ_builder(self, *args, **kwargs) - - try: - return self.request_context(builder.get_environ()) - finally: - builder.close() - - def wsgi_app(self, environ, start_response): - """The actual WSGI application. This is not implemented in - :meth:`__call__` so that middlewares can be applied without - losing a reference to the app object. Instead of doing this:: - - app = MyMiddleware(app) - - It's a better idea to do this instead:: - - app.wsgi_app = MyMiddleware(app.wsgi_app) - - Then you still have the original application object around and - can continue to call methods on it. - - .. versionchanged:: 0.7 - Teardown events for the request and app contexts are called - even if an unhandled error occurs. Other events may not be - called depending on when an error occurs during dispatch. - See :ref:`callbacks-and-errors`. - - :param environ: A WSGI environment. - :param start_response: A callable accepting a status code, - a list of headers, and an optional exception context to - start the response. - """ - ctx = self.request_context(environ) - error = None - try: - try: - ctx.push() - response = self.full_dispatch_request() - except Exception as e: - error = e - response = self.handle_exception(e) - except: - error = sys.exc_info()[1] - raise - return response(environ, start_response) - finally: - if self.should_ignore_error(error): - error = None - ctx.auto_pop(error) - - def __call__(self, environ, start_response): - """The WSGI server calls the Flask application object as the - WSGI application. This calls :meth:`wsgi_app` which can be - wrapped to applying middleware.""" - return self.wsgi_app(environ, start_response) - - def __repr__(self): - return '<%s %r>' % ( - self.__class__.__name__, - self.name, - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/blueprints.py b/flo-token-explorer/lib/python3.6/site-packages/flask/blueprints.py deleted file mode 100644 index 5ce5561..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/blueprints.py +++ /dev/null @@ -1,448 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.blueprints - ~~~~~~~~~~~~~~~~ - - Blueprints are the recommended way to implement larger or more - pluggable applications in Flask 0.7 and later. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" -from functools import update_wrapper -from werkzeug.urls import url_join - -from .helpers import _PackageBoundObject, _endpoint_from_view_func - - -class BlueprintSetupState(object): - """Temporary holder object for registering a blueprint with the - application. An instance of this class is created by the - :meth:`~flask.Blueprint.make_setup_state` method and later passed - to all register callback functions. - """ - - def __init__(self, blueprint, app, options, first_registration): - #: a reference to the current application - self.app = app - - #: a reference to the blueprint that created this setup state. - self.blueprint = blueprint - - #: a dictionary with all options that were passed to the - #: :meth:`~flask.Flask.register_blueprint` method. - self.options = options - - #: as blueprints can be registered multiple times with the - #: application and not everything wants to be registered - #: multiple times on it, this attribute can be used to figure - #: out if the blueprint was registered in the past already. - self.first_registration = first_registration - - subdomain = self.options.get('subdomain') - if subdomain is None: - subdomain = self.blueprint.subdomain - - #: The subdomain that the blueprint should be active for, ``None`` - #: otherwise. - self.subdomain = subdomain - - url_prefix = self.options.get('url_prefix') - if url_prefix is None: - url_prefix = self.blueprint.url_prefix - #: The prefix that should be used for all URLs defined on the - #: blueprint. - self.url_prefix = url_prefix - - #: A dictionary with URL defaults that is added to each and every - #: URL that was defined with the blueprint. - self.url_defaults = dict(self.blueprint.url_values_defaults) - self.url_defaults.update(self.options.get('url_defaults', ())) - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """A helper method to register a rule (and optionally a view function) - to the application. The endpoint is automatically prefixed with the - blueprint's name. - """ - if self.url_prefix is not None: - if rule: - rule = '/'.join(( - self.url_prefix.rstrip('/'), rule.lstrip('/'))) - else: - rule = self.url_prefix - options.setdefault('subdomain', self.subdomain) - if endpoint is None: - endpoint = _endpoint_from_view_func(view_func) - defaults = self.url_defaults - if 'defaults' in options: - defaults = dict(defaults, **options.pop('defaults')) - self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint), - view_func, defaults=defaults, **options) - - -class Blueprint(_PackageBoundObject): - """Represents a blueprint. A blueprint is an object that records - functions that will be called with the - :class:`~flask.blueprints.BlueprintSetupState` later to register functions - or other things on the main application. See :ref:`blueprints` for more - information. - - .. versionadded:: 0.7 - """ - - warn_on_modifications = False - _got_registered_once = False - - #: Blueprint local JSON decoder class to use. - #: Set to ``None`` to use the app's :class:`~flask.app.Flask.json_encoder`. - json_encoder = None - #: Blueprint local JSON decoder class to use. - #: Set to ``None`` to use the app's :class:`~flask.app.Flask.json_decoder`. - json_decoder = None - - # TODO remove the next three attrs when Sphinx :inherited-members: works - # https://github.com/sphinx-doc/sphinx/issues/741 - - #: The name of the package or module that this app belongs to. Do not - #: change this once it is set by the constructor. - import_name = None - - #: Location of the template files to be added to the template lookup. - #: ``None`` if templates should not be added. - template_folder = None - - #: Absolute path to the package on the filesystem. Used to look up - #: resources contained in the package. - root_path = None - - def __init__(self, name, import_name, static_folder=None, - static_url_path=None, template_folder=None, - url_prefix=None, subdomain=None, url_defaults=None, - root_path=None): - _PackageBoundObject.__init__(self, import_name, template_folder, - root_path=root_path) - self.name = name - self.url_prefix = url_prefix - self.subdomain = subdomain - self.static_folder = static_folder - self.static_url_path = static_url_path - self.deferred_functions = [] - if url_defaults is None: - url_defaults = {} - self.url_values_defaults = url_defaults - - def record(self, func): - """Registers a function that is called when the blueprint is - registered on the application. This function is called with the - state as argument as returned by the :meth:`make_setup_state` - method. - """ - if self._got_registered_once and self.warn_on_modifications: - from warnings import warn - warn(Warning('The blueprint was already registered once ' - 'but is getting modified now. These changes ' - 'will not show up.')) - self.deferred_functions.append(func) - - def record_once(self, func): - """Works like :meth:`record` but wraps the function in another - function that will ensure the function is only called once. If the - blueprint is registered a second time on the application, the - function passed is not called. - """ - def wrapper(state): - if state.first_registration: - func(state) - return self.record(update_wrapper(wrapper, func)) - - def make_setup_state(self, app, options, first_registration=False): - """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState` - object that is later passed to the register callback functions. - Subclasses can override this to return a subclass of the setup state. - """ - return BlueprintSetupState(self, app, options, first_registration) - - def register(self, app, options, first_registration=False): - """Called by :meth:`Flask.register_blueprint` to register all views - and callbacks registered on the blueprint with the application. Creates - a :class:`.BlueprintSetupState` and calls each :meth:`record` callback - with it. - - :param app: The application this blueprint is being registered with. - :param options: Keyword arguments forwarded from - :meth:`~Flask.register_blueprint`. - :param first_registration: Whether this is the first time this - blueprint has been registered on the application. - """ - self._got_registered_once = True - state = self.make_setup_state(app, options, first_registration) - - if self.has_static_folder: - state.add_url_rule( - self.static_url_path + '/', - view_func=self.send_static_file, endpoint='static' - ) - - for deferred in self.deferred_functions: - deferred(state) - - def route(self, rule, **options): - """Like :meth:`Flask.route` but for a blueprint. The endpoint for the - :func:`url_for` function is prefixed with the name of the blueprint. - """ - def decorator(f): - endpoint = options.pop("endpoint", f.__name__) - self.add_url_rule(rule, endpoint, f, **options) - return f - return decorator - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for - the :func:`url_for` function is prefixed with the name of the blueprint. - """ - if endpoint: - assert '.' not in endpoint, "Blueprint endpoints should not contain dots" - if view_func and hasattr(view_func, '__name__'): - assert '.' not in view_func.__name__, "Blueprint view function name should not contain dots" - self.record(lambda s: - s.add_url_rule(rule, endpoint, view_func, **options)) - - def endpoint(self, endpoint): - """Like :meth:`Flask.endpoint` but for a blueprint. This does not - prefix the endpoint with the blueprint name, this has to be done - explicitly by the user of this method. If the endpoint is prefixed - with a `.` it will be registered to the current blueprint, otherwise - it's an application independent endpoint. - """ - def decorator(f): - def register_endpoint(state): - state.app.view_functions[endpoint] = f - self.record_once(register_endpoint) - return f - return decorator - - def app_template_filter(self, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.template_filter` but for a blueprint. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_filter(f, name=name) - return f - return decorator - - def add_app_template_filter(self, f, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.add_template_filter` but for a blueprint. Works exactly - like the :meth:`app_template_filter` decorator. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.filters[name or f.__name__] = f - self.record_once(register_template) - - def app_template_test(self, name=None): - """Register a custom template test, available application wide. Like - :meth:`Flask.template_test` but for a blueprint. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_test(f, name=name) - return f - return decorator - - def add_app_template_test(self, f, name=None): - """Register a custom template test, available application wide. Like - :meth:`Flask.add_template_test` but for a blueprint. Works exactly - like the :meth:`app_template_test` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.tests[name or f.__name__] = f - self.record_once(register_template) - - def app_template_global(self, name=None): - """Register a custom template global, available application wide. Like - :meth:`Flask.template_global` but for a blueprint. - - .. versionadded:: 0.10 - - :param name: the optional name of the global, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_global(f, name=name) - return f - return decorator - - def add_app_template_global(self, f, name=None): - """Register a custom template global, available application wide. Like - :meth:`Flask.add_template_global` but for a blueprint. Works exactly - like the :meth:`app_template_global` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the global, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.globals[name or f.__name__] = f - self.record_once(register_template) - - def before_request(self, f): - """Like :meth:`Flask.before_request` but for a blueprint. This function - is only executed before each request that is handled by a function of - that blueprint. - """ - self.record_once(lambda s: s.app.before_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def before_app_request(self, f): - """Like :meth:`Flask.before_request`. Such a function is executed - before each request, even if outside of a blueprint. - """ - self.record_once(lambda s: s.app.before_request_funcs - .setdefault(None, []).append(f)) - return f - - def before_app_first_request(self, f): - """Like :meth:`Flask.before_first_request`. Such a function is - executed before the first request to the application. - """ - self.record_once(lambda s: s.app.before_first_request_funcs.append(f)) - return f - - def after_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. This function - is only executed after each request that is handled by a function of - that blueprint. - """ - self.record_once(lambda s: s.app.after_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def after_app_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. Such a function - is executed after each request, even if outside of the blueprint. - """ - self.record_once(lambda s: s.app.after_request_funcs - .setdefault(None, []).append(f)) - return f - - def teardown_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. This - function is only executed when tearing down requests handled by a - function of that blueprint. Teardown request functions are executed - when the request context is popped, even when no actual request was - performed. - """ - self.record_once(lambda s: s.app.teardown_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def teardown_app_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. Such a - function is executed when tearing down each request, even if outside of - the blueprint. - """ - self.record_once(lambda s: s.app.teardown_request_funcs - .setdefault(None, []).append(f)) - return f - - def context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. This - function is only executed for requests handled by a blueprint. - """ - self.record_once(lambda s: s.app.template_context_processors - .setdefault(self.name, []).append(f)) - return f - - def app_context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. Such a - function is executed each request, even if outside of the blueprint. - """ - self.record_once(lambda s: s.app.template_context_processors - .setdefault(None, []).append(f)) - return f - - def app_errorhandler(self, code): - """Like :meth:`Flask.errorhandler` but for a blueprint. This - handler is used for all requests, even if outside of the blueprint. - """ - def decorator(f): - self.record_once(lambda s: s.app.errorhandler(code)(f)) - return f - return decorator - - def url_value_preprocessor(self, f): - """Registers a function as URL value preprocessor for this - blueprint. It's called before the view functions are called and - can modify the url values provided. - """ - self.record_once(lambda s: s.app.url_value_preprocessors - .setdefault(self.name, []).append(f)) - return f - - def url_defaults(self, f): - """Callback function for URL defaults for this blueprint. It's called - with the endpoint and values and should update the values passed - in place. - """ - self.record_once(lambda s: s.app.url_default_functions - .setdefault(self.name, []).append(f)) - return f - - def app_url_value_preprocessor(self, f): - """Same as :meth:`url_value_preprocessor` but application wide. - """ - self.record_once(lambda s: s.app.url_value_preprocessors - .setdefault(None, []).append(f)) - return f - - def app_url_defaults(self, f): - """Same as :meth:`url_defaults` but application wide. - """ - self.record_once(lambda s: s.app.url_default_functions - .setdefault(None, []).append(f)) - return f - - def errorhandler(self, code_or_exception): - """Registers an error handler that becomes active for this blueprint - only. Please be aware that routing does not happen local to a - blueprint so an error handler for 404 usually is not handled by - a blueprint unless it is caused inside a view function. Another - special case is the 500 internal server error which is always looked - up from the application. - - Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator - of the :class:`~flask.Flask` object. - """ - def decorator(f): - self.record_once(lambda s: s.app._register_error_handler( - self.name, code_or_exception, f)) - return f - return decorator - - def register_error_handler(self, code_or_exception, f): - """Non-decorator version of the :meth:`errorhandler` error attach - function, akin to the :meth:`~flask.Flask.register_error_handler` - application-wide function of the :class:`~flask.Flask` object but - for error handlers limited to this blueprint. - - .. versionadded:: 0.11 - """ - self.record_once(lambda s: s.app._register_error_handler( - self.name, code_or_exception, f)) diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/cli.py b/flo-token-explorer/lib/python3.6/site-packages/flask/cli.py deleted file mode 100644 index efc1733..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/cli.py +++ /dev/null @@ -1,898 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.cli - ~~~~~~~~~ - - A simple command line application to run flask apps. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import print_function - -import ast -import inspect -import os -import re -import ssl -import sys -import traceback -from functools import update_wrapper -from operator import attrgetter -from threading import Lock, Thread - -import click -from werkzeug.utils import import_string - -from . import __version__ -from ._compat import getargspec, iteritems, reraise, text_type -from .globals import current_app -from .helpers import get_debug_flag, get_env, get_load_dotenv - -try: - import dotenv -except ImportError: - dotenv = None - - -class NoAppException(click.UsageError): - """Raised if an application cannot be found or loaded.""" - - -def find_best_app(script_info, module): - """Given a module instance this tries to find the best possible - application in the module or raises an exception. - """ - from . import Flask - - # Search for the most common names first. - for attr_name in ('app', 'application'): - app = getattr(module, attr_name, None) - - if isinstance(app, Flask): - return app - - # Otherwise find the only object that is a Flask instance. - matches = [ - v for k, v in iteritems(module.__dict__) if isinstance(v, Flask) - ] - - if len(matches) == 1: - return matches[0] - elif len(matches) > 1: - raise NoAppException( - 'Detected multiple Flask applications in module "{module}". Use ' - '"FLASK_APP={module}:name" to specify the correct ' - 'one.'.format(module=module.__name__) - ) - - # Search for app factory functions. - for attr_name in ('create_app', 'make_app'): - app_factory = getattr(module, attr_name, None) - - if inspect.isfunction(app_factory): - try: - app = call_factory(script_info, app_factory) - - if isinstance(app, Flask): - return app - except TypeError: - if not _called_with_wrong_args(app_factory): - raise - raise NoAppException( - 'Detected factory "{factory}" in module "{module}", but ' - 'could not call it without arguments. Use ' - '"FLASK_APP=\'{module}:{factory}(args)\'" to specify ' - 'arguments.'.format( - factory=attr_name, module=module.__name__ - ) - ) - - raise NoAppException( - 'Failed to find Flask application or factory in module "{module}". ' - 'Use "FLASK_APP={module}:name to specify one.'.format( - module=module.__name__ - ) - ) - - -def call_factory(script_info, app_factory, arguments=()): - """Takes an app factory, a ``script_info` object and optionally a tuple - of arguments. Checks for the existence of a script_info argument and calls - the app_factory depending on that and the arguments provided. - """ - args_spec = getargspec(app_factory) - arg_names = args_spec.args - arg_defaults = args_spec.defaults - - if 'script_info' in arg_names: - return app_factory(*arguments, script_info=script_info) - elif arguments: - return app_factory(*arguments) - elif not arguments and len(arg_names) == 1 and arg_defaults is None: - return app_factory(script_info) - - return app_factory() - - -def _called_with_wrong_args(factory): - """Check whether calling a function raised a ``TypeError`` because - the call failed or because something in the factory raised the - error. - - :param factory: the factory function that was called - :return: true if the call failed - """ - tb = sys.exc_info()[2] - - try: - while tb is not None: - if tb.tb_frame.f_code is factory.__code__: - # in the factory, it was called successfully - return False - - tb = tb.tb_next - - # didn't reach the factory - return True - finally: - del tb - - -def find_app_by_string(script_info, module, app_name): - """Checks if the given string is a variable name or a function. If it is a - function, it checks for specified arguments and whether it takes a - ``script_info`` argument and calls the function with the appropriate - arguments. - """ - from flask import Flask - match = re.match(r'^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$', app_name) - - if not match: - raise NoAppException( - '"{name}" is not a valid variable name or function ' - 'expression.'.format(name=app_name) - ) - - name, args = match.groups() - - try: - attr = getattr(module, name) - except AttributeError as e: - raise NoAppException(e.args[0]) - - if inspect.isfunction(attr): - if args: - try: - args = ast.literal_eval('({args},)'.format(args=args)) - except (ValueError, SyntaxError)as e: - raise NoAppException( - 'Could not parse the arguments in ' - '"{app_name}".'.format(e=e, app_name=app_name) - ) - else: - args = () - - try: - app = call_factory(script_info, attr, args) - except TypeError as e: - if not _called_with_wrong_args(attr): - raise - - raise NoAppException( - '{e}\nThe factory "{app_name}" in module "{module}" could not ' - 'be called with the specified arguments.'.format( - e=e, app_name=app_name, module=module.__name__ - ) - ) - else: - app = attr - - if isinstance(app, Flask): - return app - - raise NoAppException( - 'A valid Flask application was not obtained from ' - '"{module}:{app_name}".'.format( - module=module.__name__, app_name=app_name - ) - ) - - -def prepare_import(path): - """Given a filename this will try to calculate the python path, add it - to the search path and return the actual module name that is expected. - """ - path = os.path.realpath(path) - - if os.path.splitext(path)[1] == '.py': - path = os.path.splitext(path)[0] - - if os.path.basename(path) == '__init__': - path = os.path.dirname(path) - - module_name = [] - - # move up until outside package structure (no __init__.py) - while True: - path, name = os.path.split(path) - module_name.append(name) - - if not os.path.exists(os.path.join(path, '__init__.py')): - break - - if sys.path[0] != path: - sys.path.insert(0, path) - - return '.'.join(module_name[::-1]) - - -def locate_app(script_info, module_name, app_name, raise_if_not_found=True): - __traceback_hide__ = True - - try: - __import__(module_name) - except ImportError: - # Reraise the ImportError if it occurred within the imported module. - # Determine this by checking whether the trace has a depth > 1. - if sys.exc_info()[-1].tb_next: - raise NoAppException( - 'While importing "{name}", an ImportError was raised:' - '\n\n{tb}'.format(name=module_name, tb=traceback.format_exc()) - ) - elif raise_if_not_found: - raise NoAppException( - 'Could not import "{name}".'.format(name=module_name) - ) - else: - return - - module = sys.modules[module_name] - - if app_name is None: - return find_best_app(script_info, module) - else: - return find_app_by_string(script_info, module, app_name) - - -def get_version(ctx, param, value): - if not value or ctx.resilient_parsing: - return - message = 'Flask %(version)s\nPython %(python_version)s' - click.echo(message % { - 'version': __version__, - 'python_version': sys.version, - }, color=ctx.color) - ctx.exit() - - -version_option = click.Option( - ['--version'], - help='Show the flask version', - expose_value=False, - callback=get_version, - is_flag=True, - is_eager=True -) - - -class DispatchingApp(object): - """Special application that dispatches to a Flask application which - is imported by name in a background thread. If an error happens - it is recorded and shown as part of the WSGI handling which in case - of the Werkzeug debugger means that it shows up in the browser. - """ - - def __init__(self, loader, use_eager_loading=False): - self.loader = loader - self._app = None - self._lock = Lock() - self._bg_loading_exc_info = None - if use_eager_loading: - self._load_unlocked() - else: - self._load_in_background() - - def _load_in_background(self): - def _load_app(): - __traceback_hide__ = True - with self._lock: - try: - self._load_unlocked() - except Exception: - self._bg_loading_exc_info = sys.exc_info() - t = Thread(target=_load_app, args=()) - t.start() - - def _flush_bg_loading_exception(self): - __traceback_hide__ = True - exc_info = self._bg_loading_exc_info - if exc_info is not None: - self._bg_loading_exc_info = None - reraise(*exc_info) - - def _load_unlocked(self): - __traceback_hide__ = True - self._app = rv = self.loader() - self._bg_loading_exc_info = None - return rv - - def __call__(self, environ, start_response): - __traceback_hide__ = True - if self._app is not None: - return self._app(environ, start_response) - self._flush_bg_loading_exception() - with self._lock: - if self._app is not None: - rv = self._app - else: - rv = self._load_unlocked() - return rv(environ, start_response) - - -class ScriptInfo(object): - """Help object to deal with Flask applications. This is usually not - necessary to interface with as it's used internally in the dispatching - to click. In future versions of Flask this object will most likely play - a bigger role. Typically it's created automatically by the - :class:`FlaskGroup` but you can also manually create it and pass it - onwards as click object. - """ - - def __init__(self, app_import_path=None, create_app=None): - #: Optionally the import path for the Flask application. - self.app_import_path = app_import_path or os.environ.get('FLASK_APP') - #: Optionally a function that is passed the script info to create - #: the instance of the application. - self.create_app = create_app - #: A dictionary with arbitrary data that can be associated with - #: this script info. - self.data = {} - self._loaded_app = None - - def load_app(self): - """Loads the Flask app (if not yet loaded) and returns it. Calling - this multiple times will just result in the already loaded app to - be returned. - """ - __traceback_hide__ = True - - if self._loaded_app is not None: - return self._loaded_app - - app = None - - if self.create_app is not None: - app = call_factory(self, self.create_app) - else: - if self.app_import_path: - path, name = (self.app_import_path.split(':', 1) + [None])[:2] - import_name = prepare_import(path) - app = locate_app(self, import_name, name) - else: - for path in ('wsgi.py', 'app.py'): - import_name = prepare_import(path) - app = locate_app(self, import_name, None, - raise_if_not_found=False) - - if app: - break - - if not app: - raise NoAppException( - 'Could not locate a Flask application. You did not provide ' - 'the "FLASK_APP" environment variable, and a "wsgi.py" or ' - '"app.py" module was not found in the current directory.' - ) - - debug = get_debug_flag() - - # Update the app's debug flag through the descriptor so that other - # values repopulate as well. - if debug is not None: - app.debug = debug - - self._loaded_app = app - return app - - -pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True) - - -def with_appcontext(f): - """Wraps a callback so that it's guaranteed to be executed with the - script's application context. If callbacks are registered directly - to the ``app.cli`` object then they are wrapped with this function - by default unless it's disabled. - """ - @click.pass_context - def decorator(__ctx, *args, **kwargs): - with __ctx.ensure_object(ScriptInfo).load_app().app_context(): - return __ctx.invoke(f, *args, **kwargs) - return update_wrapper(decorator, f) - - -class AppGroup(click.Group): - """This works similar to a regular click :class:`~click.Group` but it - changes the behavior of the :meth:`command` decorator so that it - automatically wraps the functions in :func:`with_appcontext`. - - Not to be confused with :class:`FlaskGroup`. - """ - - def command(self, *args, **kwargs): - """This works exactly like the method of the same name on a regular - :class:`click.Group` but it wraps callbacks in :func:`with_appcontext` - unless it's disabled by passing ``with_appcontext=False``. - """ - wrap_for_ctx = kwargs.pop('with_appcontext', True) - def decorator(f): - if wrap_for_ctx: - f = with_appcontext(f) - return click.Group.command(self, *args, **kwargs)(f) - return decorator - - def group(self, *args, **kwargs): - """This works exactly like the method of the same name on a regular - :class:`click.Group` but it defaults the group class to - :class:`AppGroup`. - """ - kwargs.setdefault('cls', AppGroup) - return click.Group.group(self, *args, **kwargs) - - -class FlaskGroup(AppGroup): - """Special subclass of the :class:`AppGroup` group that supports - loading more commands from the configured Flask app. Normally a - developer does not have to interface with this class but there are - some very advanced use cases for which it makes sense to create an - instance of this. - - For information as of why this is useful see :ref:`custom-scripts`. - - :param add_default_commands: if this is True then the default run and - shell commands wil be added. - :param add_version_option: adds the ``--version`` option. - :param create_app: an optional callback that is passed the script info and - returns the loaded app. - :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv` - files to set environment variables. Will also change the working - directory to the directory containing the first file found. - - .. versionchanged:: 1.0 - If installed, python-dotenv will be used to load environment variables - from :file:`.env` and :file:`.flaskenv` files. - """ - - def __init__(self, add_default_commands=True, create_app=None, - add_version_option=True, load_dotenv=True, **extra): - params = list(extra.pop('params', None) or ()) - - if add_version_option: - params.append(version_option) - - AppGroup.__init__(self, params=params, **extra) - self.create_app = create_app - self.load_dotenv = load_dotenv - - if add_default_commands: - self.add_command(run_command) - self.add_command(shell_command) - self.add_command(routes_command) - - self._loaded_plugin_commands = False - - def _load_plugin_commands(self): - if self._loaded_plugin_commands: - return - try: - import pkg_resources - except ImportError: - self._loaded_plugin_commands = True - return - - for ep in pkg_resources.iter_entry_points('flask.commands'): - self.add_command(ep.load(), ep.name) - self._loaded_plugin_commands = True - - def get_command(self, ctx, name): - self._load_plugin_commands() - - # We load built-in commands first as these should always be the - # same no matter what the app does. If the app does want to - # override this it needs to make a custom instance of this group - # and not attach the default commands. - # - # This also means that the script stays functional in case the - # application completely fails. - rv = AppGroup.get_command(self, ctx, name) - if rv is not None: - return rv - - info = ctx.ensure_object(ScriptInfo) - try: - rv = info.load_app().cli.get_command(ctx, name) - if rv is not None: - return rv - except NoAppException: - pass - - def list_commands(self, ctx): - self._load_plugin_commands() - - # The commands available is the list of both the application (if - # available) plus the builtin commands. - rv = set(click.Group.list_commands(self, ctx)) - info = ctx.ensure_object(ScriptInfo) - try: - rv.update(info.load_app().cli.list_commands(ctx)) - except Exception: - # Here we intentionally swallow all exceptions as we don't - # want the help page to break if the app does not exist. - # If someone attempts to use the command we try to create - # the app again and this will give us the error. - # However, we will not do so silently because that would confuse - # users. - traceback.print_exc() - return sorted(rv) - - def main(self, *args, **kwargs): - # Set a global flag that indicates that we were invoked from the - # command line interface. This is detected by Flask.run to make the - # call into a no-op. This is necessary to avoid ugly errors when the - # script that is loaded here also attempts to start a server. - os.environ['FLASK_RUN_FROM_CLI'] = 'true' - - if get_load_dotenv(self.load_dotenv): - load_dotenv() - - obj = kwargs.get('obj') - - if obj is None: - obj = ScriptInfo(create_app=self.create_app) - - kwargs['obj'] = obj - kwargs.setdefault('auto_envvar_prefix', 'FLASK') - return super(FlaskGroup, self).main(*args, **kwargs) - - -def _path_is_ancestor(path, other): - """Take ``other`` and remove the length of ``path`` from it. Then join it - to ``path``. If it is the original value, ``path`` is an ancestor of - ``other``.""" - return os.path.join(path, other[len(path):].lstrip(os.sep)) == other - - -def load_dotenv(path=None): - """Load "dotenv" files in order of precedence to set environment variables. - - If an env var is already set it is not overwritten, so earlier files in the - list are preferred over later files. - - Changes the current working directory to the location of the first file - found, with the assumption that it is in the top level project directory - and will be where the Python path should import local packages from. - - This is a no-op if `python-dotenv`_ is not installed. - - .. _python-dotenv: https://github.com/theskumar/python-dotenv#readme - - :param path: Load the file at this location instead of searching. - :return: ``True`` if a file was loaded. - - .. versionadded:: 1.0 - """ - if dotenv is None: - if path or os.path.exists('.env') or os.path.exists('.flaskenv'): - click.secho( - ' * Tip: There are .env files present.' - ' Do "pip install python-dotenv" to use them.', - fg='yellow') - return - - if path is not None: - return dotenv.load_dotenv(path) - - new_dir = None - - for name in ('.env', '.flaskenv'): - path = dotenv.find_dotenv(name, usecwd=True) - - if not path: - continue - - if new_dir is None: - new_dir = os.path.dirname(path) - - dotenv.load_dotenv(path) - - if new_dir and os.getcwd() != new_dir: - os.chdir(new_dir) - - return new_dir is not None # at least one file was located and loaded - - -def show_server_banner(env, debug, app_import_path, eager_loading): - """Show extra startup messages the first time the server is run, - ignoring the reloader. - """ - if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': - return - - if app_import_path is not None: - message = ' * Serving Flask app "{0}"'.format(app_import_path) - - if not eager_loading: - message += ' (lazy loading)' - - click.echo(message) - - click.echo(' * Environment: {0}'.format(env)) - - if env == 'production': - click.secho( - ' WARNING: Do not use the development server in a production' - ' environment.', fg='red') - click.secho(' Use a production WSGI server instead.', dim=True) - - if debug is not None: - click.echo(' * Debug mode: {0}'.format('on' if debug else 'off')) - - -class CertParamType(click.ParamType): - """Click option type for the ``--cert`` option. Allows either an - existing file, the string ``'adhoc'``, or an import for a - :class:`~ssl.SSLContext` object. - """ - - name = 'path' - - def __init__(self): - self.path_type = click.Path( - exists=True, dir_okay=False, resolve_path=True) - - def convert(self, value, param, ctx): - try: - return self.path_type(value, param, ctx) - except click.BadParameter: - value = click.STRING(value, param, ctx).lower() - - if value == 'adhoc': - try: - import OpenSSL - except ImportError: - raise click.BadParameter( - 'Using ad-hoc certificates requires pyOpenSSL.', - ctx, param) - - return value - - obj = import_string(value, silent=True) - - if sys.version_info < (2, 7): - if obj: - return obj - else: - if isinstance(obj, ssl.SSLContext): - return obj - - raise - - -def _validate_key(ctx, param, value): - """The ``--key`` option must be specified when ``--cert`` is a file. - Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed. - """ - cert = ctx.params.get('cert') - is_adhoc = cert == 'adhoc' - - if sys.version_info < (2, 7): - is_context = cert and not isinstance(cert, (text_type, bytes)) - else: - is_context = isinstance(cert, ssl.SSLContext) - - if value is not None: - if is_adhoc: - raise click.BadParameter( - 'When "--cert" is "adhoc", "--key" is not used.', - ctx, param) - - if is_context: - raise click.BadParameter( - 'When "--cert" is an SSLContext object, "--key is not used.', - ctx, param) - - if not cert: - raise click.BadParameter( - '"--cert" must also be specified.', - ctx, param) - - ctx.params['cert'] = cert, value - - else: - if cert and not (is_adhoc or is_context): - raise click.BadParameter( - 'Required when using "--cert".', - ctx, param) - - return value - - -@click.command('run', short_help='Runs a development server.') -@click.option('--host', '-h', default='127.0.0.1', - help='The interface to bind to.') -@click.option('--port', '-p', default=5000, - help='The port to bind to.') -@click.option('--cert', type=CertParamType(), - help='Specify a certificate file to use HTTPS.') -@click.option('--key', - type=click.Path(exists=True, dir_okay=False, resolve_path=True), - callback=_validate_key, expose_value=False, - help='The key file to use when specifying a certificate.') -@click.option('--reload/--no-reload', default=None, - help='Enable or disable the reloader. By default the reloader ' - 'is active if debug is enabled.') -@click.option('--debugger/--no-debugger', default=None, - help='Enable or disable the debugger. By default the debugger ' - 'is active if debug is enabled.') -@click.option('--eager-loading/--lazy-loader', default=None, - help='Enable or disable eager loading. By default eager ' - 'loading is enabled if the reloader is disabled.') -@click.option('--with-threads/--without-threads', default=True, - help='Enable or disable multithreading.') -@pass_script_info -def run_command(info, host, port, reload, debugger, eager_loading, - with_threads, cert): - """Run a local development server. - - This server is for development purposes only. It does not provide - the stability, security, or performance of production WSGI servers. - - The reloader and debugger are enabled by default if - FLASK_ENV=development or FLASK_DEBUG=1. - """ - debug = get_debug_flag() - - if reload is None: - reload = debug - - if debugger is None: - debugger = debug - - if eager_loading is None: - eager_loading = not reload - - show_server_banner(get_env(), debug, info.app_import_path, eager_loading) - app = DispatchingApp(info.load_app, use_eager_loading=eager_loading) - - from werkzeug.serving import run_simple - run_simple(host, port, app, use_reloader=reload, use_debugger=debugger, - threaded=with_threads, ssl_context=cert) - - -@click.command('shell', short_help='Runs a shell in the app context.') -@with_appcontext -def shell_command(): - """Runs an interactive Python shell in the context of a given - Flask application. The application will populate the default - namespace of this shell according to it's configuration. - - This is useful for executing small snippets of management code - without having to manually configure the application. - """ - import code - from flask.globals import _app_ctx_stack - app = _app_ctx_stack.top.app - banner = 'Python %s on %s\nApp: %s [%s]\nInstance: %s' % ( - sys.version, - sys.platform, - app.import_name, - app.env, - app.instance_path, - ) - ctx = {} - - # Support the regular Python interpreter startup script if someone - # is using it. - startup = os.environ.get('PYTHONSTARTUP') - if startup and os.path.isfile(startup): - with open(startup, 'r') as f: - eval(compile(f.read(), startup, 'exec'), ctx) - - ctx.update(app.make_shell_context()) - - code.interact(banner=banner, local=ctx) - - -@click.command('routes', short_help='Show the routes for the app.') -@click.option( - '--sort', '-s', - type=click.Choice(('endpoint', 'methods', 'rule', 'match')), - default='endpoint', - help=( - 'Method to sort routes by. "match" is the order that Flask will match ' - 'routes when dispatching a request.' - ) -) -@click.option( - '--all-methods', - is_flag=True, - help="Show HEAD and OPTIONS methods." -) -@with_appcontext -def routes_command(sort, all_methods): - """Show all registered routes with endpoints and methods.""" - - rules = list(current_app.url_map.iter_rules()) - if not rules: - click.echo('No routes were registered.') - return - - ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS')) - - if sort in ('endpoint', 'rule'): - rules = sorted(rules, key=attrgetter(sort)) - elif sort == 'methods': - rules = sorted(rules, key=lambda rule: sorted(rule.methods)) - - rule_methods = [ - ', '.join(sorted(rule.methods - ignored_methods)) for rule in rules - ] - - headers = ('Endpoint', 'Methods', 'Rule') - widths = ( - max(len(rule.endpoint) for rule in rules), - max(len(methods) for methods in rule_methods), - max(len(rule.rule) for rule in rules), - ) - widths = [max(len(h), w) for h, w in zip(headers, widths)] - row = '{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}'.format(*widths) - - click.echo(row.format(*headers).strip()) - click.echo(row.format(*('-' * width for width in widths))) - - for rule, methods in zip(rules, rule_methods): - click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip()) - - -cli = FlaskGroup(help="""\ -A general utility script for Flask applications. - -Provides commands from Flask, extensions, and the application. Loads the -application defined in the FLASK_APP environment variable, or from a wsgi.py -file. Setting the FLASK_ENV environment variable to 'development' will enable -debug mode. - -\b - {prefix}{cmd} FLASK_APP=hello.py - {prefix}{cmd} FLASK_ENV=development - {prefix}flask run -""".format( - cmd='export' if os.name == 'posix' else 'set', - prefix='$ ' if os.name == 'posix' else '> ' -)) - - -def main(as_module=False): - args = sys.argv[1:] - - if as_module: - this_module = 'flask' - - if sys.version_info < (2, 7): - this_module += '.cli' - - name = 'python -m ' + this_module - - # Python rewrites "python -m flask" to the path to the file in argv. - # Restore the original command so that the reloader works. - sys.argv = ['-m', this_module] + args - else: - name = None - - cli.main(args=args, prog_name=name) - - -if __name__ == '__main__': - main(as_module=True) diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/config.py b/flo-token-explorer/lib/python3.6/site-packages/flask/config.py deleted file mode 100644 index d6074ba..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/config.py +++ /dev/null @@ -1,265 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.config - ~~~~~~~~~~~~ - - Implements the configuration related objects. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import os -import types -import errno - -from werkzeug.utils import import_string -from ._compat import string_types, iteritems -from . import json - - -class ConfigAttribute(object): - """Makes an attribute forward to the config""" - - def __init__(self, name, get_converter=None): - self.__name__ = name - self.get_converter = get_converter - - def __get__(self, obj, type=None): - if obj is None: - return self - rv = obj.config[self.__name__] - if self.get_converter is not None: - rv = self.get_converter(rv) - return rv - - def __set__(self, obj, value): - obj.config[self.__name__] = value - - -class Config(dict): - """Works exactly like a dict but provides ways to fill it from files - or special dictionaries. There are two common patterns to populate the - config. - - Either you can fill the config from a config file:: - - app.config.from_pyfile('yourconfig.cfg') - - Or alternatively you can define the configuration options in the - module that calls :meth:`from_object` or provide an import path to - a module that should be loaded. It is also possible to tell it to - use the same module and with that provide the configuration values - just before the call:: - - DEBUG = True - SECRET_KEY = 'development key' - app.config.from_object(__name__) - - In both cases (loading from any Python file or loading from modules), - only uppercase keys are added to the config. This makes it possible to use - lowercase values in the config file for temporary values that are not added - to the config or to define the config keys in the same file that implements - the application. - - Probably the most interesting way to load configurations is from an - environment variable pointing to a file:: - - app.config.from_envvar('YOURAPPLICATION_SETTINGS') - - In this case before launching the application you have to set this - environment variable to the file you want to use. On Linux and OS X - use the export statement:: - - export YOURAPPLICATION_SETTINGS='/path/to/config/file' - - On windows use `set` instead. - - :param root_path: path to which files are read relative from. When the - config object is created by the application, this is - the application's :attr:`~flask.Flask.root_path`. - :param defaults: an optional dictionary of default values - """ - - def __init__(self, root_path, defaults=None): - dict.__init__(self, defaults or {}) - self.root_path = root_path - - def from_envvar(self, variable_name, silent=False): - """Loads a configuration from an environment variable pointing to - a configuration file. This is basically just a shortcut with nicer - error messages for this line of code:: - - app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) - - :param variable_name: name of the environment variable - :param silent: set to ``True`` if you want silent failure for missing - files. - :return: bool. ``True`` if able to load config, ``False`` otherwise. - """ - rv = os.environ.get(variable_name) - if not rv: - if silent: - return False - raise RuntimeError('The environment variable %r is not set ' - 'and as such configuration could not be ' - 'loaded. Set this variable and make it ' - 'point to a configuration file' % - variable_name) - return self.from_pyfile(rv, silent=silent) - - def from_pyfile(self, filename, silent=False): - """Updates the values in the config from a Python file. This function - behaves as if the file was imported as module with the - :meth:`from_object` function. - - :param filename: the filename of the config. This can either be an - absolute filename or a filename relative to the - root path. - :param silent: set to ``True`` if you want silent failure for missing - files. - - .. versionadded:: 0.7 - `silent` parameter. - """ - filename = os.path.join(self.root_path, filename) - d = types.ModuleType('config') - d.__file__ = filename - try: - with open(filename, mode='rb') as config_file: - exec(compile(config_file.read(), filename, 'exec'), d.__dict__) - except IOError as e: - if silent and e.errno in ( - errno.ENOENT, errno.EISDIR, errno.ENOTDIR - ): - return False - e.strerror = 'Unable to load configuration file (%s)' % e.strerror - raise - self.from_object(d) - return True - - def from_object(self, obj): - """Updates the values from the given object. An object can be of one - of the following two types: - - - a string: in this case the object with that name will be imported - - an actual object reference: that object is used directly - - Objects are usually either modules or classes. :meth:`from_object` - loads only the uppercase attributes of the module/class. A ``dict`` - object will not work with :meth:`from_object` because the keys of a - ``dict`` are not attributes of the ``dict`` class. - - Example of module-based configuration:: - - app.config.from_object('yourapplication.default_config') - from yourapplication import default_config - app.config.from_object(default_config) - - You should not use this function to load the actual configuration but - rather configuration defaults. The actual config should be loaded - with :meth:`from_pyfile` and ideally from a location not within the - package because the package might be installed system wide. - - See :ref:`config-dev-prod` for an example of class-based configuration - using :meth:`from_object`. - - :param obj: an import name or object - """ - if isinstance(obj, string_types): - obj = import_string(obj) - for key in dir(obj): - if key.isupper(): - self[key] = getattr(obj, key) - - def from_json(self, filename, silent=False): - """Updates the values in the config from a JSON file. This function - behaves as if the JSON object was a dictionary and passed to the - :meth:`from_mapping` function. - - :param filename: the filename of the JSON file. This can either be an - absolute filename or a filename relative to the - root path. - :param silent: set to ``True`` if you want silent failure for missing - files. - - .. versionadded:: 0.11 - """ - filename = os.path.join(self.root_path, filename) - - try: - with open(filename) as json_file: - obj = json.loads(json_file.read()) - except IOError as e: - if silent and e.errno in (errno.ENOENT, errno.EISDIR): - return False - e.strerror = 'Unable to load configuration file (%s)' % e.strerror - raise - return self.from_mapping(obj) - - def from_mapping(self, *mapping, **kwargs): - """Updates the config like :meth:`update` ignoring items with non-upper - keys. - - .. versionadded:: 0.11 - """ - mappings = [] - if len(mapping) == 1: - if hasattr(mapping[0], 'items'): - mappings.append(mapping[0].items()) - else: - mappings.append(mapping[0]) - elif len(mapping) > 1: - raise TypeError( - 'expected at most 1 positional argument, got %d' % len(mapping) - ) - mappings.append(kwargs.items()) - for mapping in mappings: - for (key, value) in mapping: - if key.isupper(): - self[key] = value - return True - - def get_namespace(self, namespace, lowercase=True, trim_namespace=True): - """Returns a dictionary containing a subset of configuration options - that match the specified namespace/prefix. Example usage:: - - app.config['IMAGE_STORE_TYPE'] = 'fs' - app.config['IMAGE_STORE_PATH'] = '/var/app/images' - app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' - image_store_config = app.config.get_namespace('IMAGE_STORE_') - - The resulting dictionary `image_store_config` would look like:: - - { - 'type': 'fs', - 'path': '/var/app/images', - 'base_url': 'http://img.website.com' - } - - This is often useful when configuration options map directly to - keyword arguments in functions or class constructors. - - :param namespace: a configuration namespace - :param lowercase: a flag indicating if the keys of the resulting - dictionary should be lowercase - :param trim_namespace: a flag indicating if the keys of the resulting - dictionary should not include the namespace - - .. versionadded:: 0.11 - """ - rv = {} - for k, v in iteritems(self): - if not k.startswith(namespace): - continue - if trim_namespace: - key = k[len(namespace):] - else: - key = k - if lowercase: - key = key.lower() - rv[key] = v - return rv - - def __repr__(self): - return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self)) diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/ctx.py b/flo-token-explorer/lib/python3.6/site-packages/flask/ctx.py deleted file mode 100644 index 8472c92..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/ctx.py +++ /dev/null @@ -1,457 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.ctx - ~~~~~~~~~ - - Implements the objects required to keep the context. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import sys -from functools import update_wrapper - -from werkzeug.exceptions import HTTPException - -from .globals import _request_ctx_stack, _app_ctx_stack -from .signals import appcontext_pushed, appcontext_popped -from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise - - -# a singleton sentinel value for parameter defaults -_sentinel = object() - - -class _AppCtxGlobals(object): - """A plain object. Used as a namespace for storing data during an - application context. - - Creating an app context automatically creates this object, which is - made available as the :data:`g` proxy. - - .. describe:: 'key' in g - - Check whether an attribute is present. - - .. versionadded:: 0.10 - - .. describe:: iter(g) - - Return an iterator over the attribute names. - - .. versionadded:: 0.10 - """ - - def get(self, name, default=None): - """Get an attribute by name, or a default value. Like - :meth:`dict.get`. - - :param name: Name of attribute to get. - :param default: Value to return if the attribute is not present. - - .. versionadded:: 0.10 - """ - return self.__dict__.get(name, default) - - def pop(self, name, default=_sentinel): - """Get and remove an attribute by name. Like :meth:`dict.pop`. - - :param name: Name of attribute to pop. - :param default: Value to return if the attribute is not present, - instead of raise a ``KeyError``. - - .. versionadded:: 0.11 - """ - if default is _sentinel: - return self.__dict__.pop(name) - else: - return self.__dict__.pop(name, default) - - def setdefault(self, name, default=None): - """Get the value of an attribute if it is present, otherwise - set and return a default value. Like :meth:`dict.setdefault`. - - :param name: Name of attribute to get. - :param: default: Value to set and return if the attribute is not - present. - - .. versionadded:: 0.11 - """ - return self.__dict__.setdefault(name, default) - - def __contains__(self, item): - return item in self.__dict__ - - def __iter__(self): - return iter(self.__dict__) - - def __repr__(self): - top = _app_ctx_stack.top - if top is not None: - return '' % top.app.name - return object.__repr__(self) - - -def after_this_request(f): - """Executes a function after this request. This is useful to modify - response objects. The function is passed the response object and has - to return the same or a new one. - - Example:: - - @app.route('/') - def index(): - @after_this_request - def add_header(response): - response.headers['X-Foo'] = 'Parachute' - return response - return 'Hello World!' - - This is more useful if a function other than the view function wants to - modify a response. For instance think of a decorator that wants to add - some headers without converting the return value into a response object. - - .. versionadded:: 0.9 - """ - _request_ctx_stack.top._after_request_functions.append(f) - return f - - -def copy_current_request_context(f): - """A helper function that decorates a function to retain the current - request context. This is useful when working with greenlets. The moment - the function is decorated a copy of the request context is created and - then pushed when the function is called. - - Example:: - - import gevent - from flask import copy_current_request_context - - @app.route('/') - def index(): - @copy_current_request_context - def do_some_work(): - # do some work here, it can access flask.request like you - # would otherwise in the view function. - ... - gevent.spawn(do_some_work) - return 'Regular response' - - .. versionadded:: 0.10 - """ - top = _request_ctx_stack.top - if top is None: - raise RuntimeError('This decorator can only be used at local scopes ' - 'when a request context is on the stack. For instance within ' - 'view functions.') - reqctx = top.copy() - def wrapper(*args, **kwargs): - with reqctx: - return f(*args, **kwargs) - return update_wrapper(wrapper, f) - - -def has_request_context(): - """If you have code that wants to test if a request context is there or - not this function can be used. For instance, you may want to take advantage - of request information if the request object is available, but fail - silently if it is unavailable. - - :: - - class User(db.Model): - - def __init__(self, username, remote_addr=None): - self.username = username - if remote_addr is None and has_request_context(): - remote_addr = request.remote_addr - self.remote_addr = remote_addr - - Alternatively you can also just test any of the context bound objects - (such as :class:`request` or :class:`g` for truthness):: - - class User(db.Model): - - def __init__(self, username, remote_addr=None): - self.username = username - if remote_addr is None and request: - remote_addr = request.remote_addr - self.remote_addr = remote_addr - - .. versionadded:: 0.7 - """ - return _request_ctx_stack.top is not None - - -def has_app_context(): - """Works like :func:`has_request_context` but for the application - context. You can also just do a boolean check on the - :data:`current_app` object instead. - - .. versionadded:: 0.9 - """ - return _app_ctx_stack.top is not None - - -class AppContext(object): - """The application context binds an application object implicitly - to the current thread or greenlet, similar to how the - :class:`RequestContext` binds request information. The application - context is also implicitly created if a request context is created - but the application is not on top of the individual application - context. - """ - - def __init__(self, app): - self.app = app - self.url_adapter = app.create_url_adapter(None) - self.g = app.app_ctx_globals_class() - - # Like request context, app contexts can be pushed multiple times - # but there a basic "refcount" is enough to track them. - self._refcnt = 0 - - def push(self): - """Binds the app context to the current context.""" - self._refcnt += 1 - if hasattr(sys, 'exc_clear'): - sys.exc_clear() - _app_ctx_stack.push(self) - appcontext_pushed.send(self.app) - - def pop(self, exc=_sentinel): - """Pops the app context.""" - try: - self._refcnt -= 1 - if self._refcnt <= 0: - if exc is _sentinel: - exc = sys.exc_info()[1] - self.app.do_teardown_appcontext(exc) - finally: - rv = _app_ctx_stack.pop() - assert rv is self, 'Popped wrong app context. (%r instead of %r)' \ - % (rv, self) - appcontext_popped.send(self.app) - - def __enter__(self): - self.push() - return self - - def __exit__(self, exc_type, exc_value, tb): - self.pop(exc_value) - - if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: - reraise(exc_type, exc_value, tb) - - -class RequestContext(object): - """The request context contains all request relevant information. It is - created at the beginning of the request and pushed to the - `_request_ctx_stack` and removed at the end of it. It will create the - URL adapter and request object for the WSGI environment provided. - - Do not attempt to use this class directly, instead use - :meth:`~flask.Flask.test_request_context` and - :meth:`~flask.Flask.request_context` to create this object. - - When the request context is popped, it will evaluate all the - functions registered on the application for teardown execution - (:meth:`~flask.Flask.teardown_request`). - - The request context is automatically popped at the end of the request - for you. In debug mode the request context is kept around if - exceptions happen so that interactive debuggers have a chance to - introspect the data. With 0.4 this can also be forced for requests - that did not fail and outside of ``DEBUG`` mode. By setting - ``'flask._preserve_context'`` to ``True`` on the WSGI environment the - context will not pop itself at the end of the request. This is used by - the :meth:`~flask.Flask.test_client` for example to implement the - deferred cleanup functionality. - - You might find this helpful for unittests where you need the - information from the context local around for a little longer. Make - sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in - that situation, otherwise your unittests will leak memory. - """ - - def __init__(self, app, environ, request=None): - self.app = app - if request is None: - request = app.request_class(environ) - self.request = request - self.url_adapter = app.create_url_adapter(self.request) - self.flashes = None - self.session = None - - # Request contexts can be pushed multiple times and interleaved with - # other request contexts. Now only if the last level is popped we - # get rid of them. Additionally if an application context is missing - # one is created implicitly so for each level we add this information - self._implicit_app_ctx_stack = [] - - # indicator if the context was preserved. Next time another context - # is pushed the preserved context is popped. - self.preserved = False - - # remembers the exception for pop if there is one in case the context - # preservation kicks in. - self._preserved_exc = None - - # Functions that should be executed after the request on the response - # object. These will be called before the regular "after_request" - # functions. - self._after_request_functions = [] - - self.match_request() - - def _get_g(self): - return _app_ctx_stack.top.g - def _set_g(self, value): - _app_ctx_stack.top.g = value - g = property(_get_g, _set_g) - del _get_g, _set_g - - def copy(self): - """Creates a copy of this request context with the same request object. - This can be used to move a request context to a different greenlet. - Because the actual request object is the same this cannot be used to - move a request context to a different thread unless access to the - request object is locked. - - .. versionadded:: 0.10 - """ - return self.__class__(self.app, - environ=self.request.environ, - request=self.request - ) - - def match_request(self): - """Can be overridden by a subclass to hook into the matching - of the request. - """ - try: - url_rule, self.request.view_args = \ - self.url_adapter.match(return_rule=True) - self.request.url_rule = url_rule - except HTTPException as e: - self.request.routing_exception = e - - def push(self): - """Binds the request context to the current context.""" - # If an exception occurs in debug mode or if context preservation is - # activated under exception situations exactly one context stays - # on the stack. The rationale is that you want to access that - # information under debug situations. However if someone forgets to - # pop that context again we want to make sure that on the next push - # it's invalidated, otherwise we run at risk that something leaks - # memory. This is usually only a problem in test suite since this - # functionality is not active in production environments. - top = _request_ctx_stack.top - if top is not None and top.preserved: - top.pop(top._preserved_exc) - - # Before we push the request context we have to ensure that there - # is an application context. - app_ctx = _app_ctx_stack.top - if app_ctx is None or app_ctx.app != self.app: - app_ctx = self.app.app_context() - app_ctx.push() - self._implicit_app_ctx_stack.append(app_ctx) - else: - self._implicit_app_ctx_stack.append(None) - - if hasattr(sys, 'exc_clear'): - sys.exc_clear() - - _request_ctx_stack.push(self) - - # Open the session at the moment that the request context is available. - # This allows a custom open_session method to use the request context. - # Only open a new session if this is the first time the request was - # pushed, otherwise stream_with_context loses the session. - if self.session is None: - session_interface = self.app.session_interface - self.session = session_interface.open_session( - self.app, self.request - ) - - if self.session is None: - self.session = session_interface.make_null_session(self.app) - - def pop(self, exc=_sentinel): - """Pops the request context and unbinds it by doing that. This will - also trigger the execution of functions registered by the - :meth:`~flask.Flask.teardown_request` decorator. - - .. versionchanged:: 0.9 - Added the `exc` argument. - """ - app_ctx = self._implicit_app_ctx_stack.pop() - - try: - clear_request = False - if not self._implicit_app_ctx_stack: - self.preserved = False - self._preserved_exc = None - if exc is _sentinel: - exc = sys.exc_info()[1] - self.app.do_teardown_request(exc) - - # If this interpreter supports clearing the exception information - # we do that now. This will only go into effect on Python 2.x, - # on 3.x it disappears automatically at the end of the exception - # stack. - if hasattr(sys, 'exc_clear'): - sys.exc_clear() - - request_close = getattr(self.request, 'close', None) - if request_close is not None: - request_close() - clear_request = True - finally: - rv = _request_ctx_stack.pop() - - # get rid of circular dependencies at the end of the request - # so that we don't require the GC to be active. - if clear_request: - rv.request.environ['werkzeug.request'] = None - - # Get rid of the app as well if necessary. - if app_ctx is not None: - app_ctx.pop(exc) - - assert rv is self, 'Popped wrong request context. ' \ - '(%r instead of %r)' % (rv, self) - - def auto_pop(self, exc): - if self.request.environ.get('flask._preserve_context') or \ - (exc is not None and self.app.preserve_context_on_exception): - self.preserved = True - self._preserved_exc = exc - else: - self.pop(exc) - - def __enter__(self): - self.push() - return self - - def __exit__(self, exc_type, exc_value, tb): - # do not pop the request stack if we are in debug mode and an - # exception happened. This will allow the debugger to still - # access the request object in the interactive shell. Furthermore - # the context can be force kept alive for the test client. - # See flask.testing for how this works. - self.auto_pop(exc_value) - - if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: - reraise(exc_type, exc_value, tb) - - def __repr__(self): - return '<%s \'%s\' [%s] of %s>' % ( - self.__class__.__name__, - self.request.url, - self.request.method, - self.app.name, - ) diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/debughelpers.py b/flo-token-explorer/lib/python3.6/site-packages/flask/debughelpers.py deleted file mode 100644 index e9765f2..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/debughelpers.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.debughelpers - ~~~~~~~~~~~~~~~~~~ - - Various helpers to make the development experience better. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import os -from warnings import warn - -from ._compat import implements_to_string, text_type -from .app import Flask -from .blueprints import Blueprint -from .globals import _request_ctx_stack - - -class UnexpectedUnicodeError(AssertionError, UnicodeError): - """Raised in places where we want some better error reporting for - unexpected unicode or binary data. - """ - - -@implements_to_string -class DebugFilesKeyError(KeyError, AssertionError): - """Raised from request.files during debugging. The idea is that it can - provide a better error message than just a generic KeyError/BadRequest. - """ - - def __init__(self, request, key): - form_matches = request.form.getlist(key) - buf = ['You tried to access the file "%s" in the request.files ' - 'dictionary but it does not exist. The mimetype for the request ' - 'is "%s" instead of "multipart/form-data" which means that no ' - 'file contents were transmitted. To fix this error you should ' - 'provide enctype="multipart/form-data" in your form.' % - (key, request.mimetype)] - if form_matches: - buf.append('\n\nThe browser instead transmitted some file names. ' - 'This was submitted: %s' % ', '.join('"%s"' % x - for x in form_matches)) - self.msg = ''.join(buf) - - def __str__(self): - return self.msg - - -class FormDataRoutingRedirect(AssertionError): - """This exception is raised by Flask in debug mode if it detects a - redirect caused by the routing system when the request method is not - GET, HEAD or OPTIONS. Reasoning: form data will be dropped. - """ - - def __init__(self, request): - exc = request.routing_exception - buf = ['A request was sent to this URL (%s) but a redirect was ' - 'issued automatically by the routing system to "%s".' - % (request.url, exc.new_url)] - - # In case just a slash was appended we can be extra helpful - if request.base_url + '/' == exc.new_url.split('?')[0]: - buf.append(' The URL was defined with a trailing slash so ' - 'Flask will automatically redirect to the URL ' - 'with the trailing slash if it was accessed ' - 'without one.') - - buf.append(' Make sure to directly send your %s-request to this URL ' - 'since we can\'t make browsers or HTTP clients redirect ' - 'with form data reliably or without user interaction.' % - request.method) - buf.append('\n\nNote: this exception is only raised in debug mode') - AssertionError.__init__(self, ''.join(buf).encode('utf-8')) - - -def attach_enctype_error_multidict(request): - """Since Flask 0.8 we're monkeypatching the files object in case a - request is detected that does not use multipart form data but the files - object is accessed. - """ - oldcls = request.files.__class__ - class newcls(oldcls): - def __getitem__(self, key): - try: - return oldcls.__getitem__(self, key) - except KeyError: - if key not in request.form: - raise - raise DebugFilesKeyError(request, key) - newcls.__name__ = oldcls.__name__ - newcls.__module__ = oldcls.__module__ - request.files.__class__ = newcls - - -def _dump_loader_info(loader): - yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__) - for key, value in sorted(loader.__dict__.items()): - if key.startswith('_'): - continue - if isinstance(value, (tuple, list)): - if not all(isinstance(x, (str, text_type)) for x in value): - continue - yield '%s:' % key - for item in value: - yield ' - %s' % item - continue - elif not isinstance(value, (str, text_type, int, float, bool)): - continue - yield '%s: %r' % (key, value) - - -def explain_template_loading_attempts(app, template, attempts): - """This should help developers understand what failed""" - info = ['Locating template "%s":' % template] - total_found = 0 - blueprint = None - reqctx = _request_ctx_stack.top - if reqctx is not None and reqctx.request.blueprint is not None: - blueprint = reqctx.request.blueprint - - for idx, (loader, srcobj, triple) in enumerate(attempts): - if isinstance(srcobj, Flask): - src_info = 'application "%s"' % srcobj.import_name - elif isinstance(srcobj, Blueprint): - src_info = 'blueprint "%s" (%s)' % (srcobj.name, - srcobj.import_name) - else: - src_info = repr(srcobj) - - info.append('% 5d: trying loader of %s' % ( - idx + 1, src_info)) - - for line in _dump_loader_info(loader): - info.append(' %s' % line) - - if triple is None: - detail = 'no match' - else: - detail = 'found (%r)' % (triple[1] or '') - total_found += 1 - info.append(' -> %s' % detail) - - seems_fishy = False - if total_found == 0: - info.append('Error: the template could not be found.') - seems_fishy = True - elif total_found > 1: - info.append('Warning: multiple loaders returned a match for the template.') - seems_fishy = True - - if blueprint is not None and seems_fishy: - info.append(' The template was looked up from an endpoint that ' - 'belongs to the blueprint "%s".' % blueprint) - info.append(' Maybe you did not place a template in the right folder?') - info.append(' See http://flask.pocoo.org/docs/blueprints/#templates') - - app.logger.info('\n'.join(info)) - - -def explain_ignored_app_run(): - if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': - warn(Warning('Silently ignoring app.run() because the ' - 'application is run from the flask command line ' - 'executable. Consider putting app.run() behind an ' - 'if __name__ == "__main__" guard to silence this ' - 'warning.'), stacklevel=3) diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/globals.py b/flo-token-explorer/lib/python3.6/site-packages/flask/globals.py deleted file mode 100644 index 7d50a6f..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/globals.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.globals - ~~~~~~~~~~~~~ - - Defines all the global objects that are proxies to the current - active context. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -from functools import partial -from werkzeug.local import LocalStack, LocalProxy - - -_request_ctx_err_msg = '''\ -Working outside of request context. - -This typically means that you attempted to use functionality that needed -an active HTTP request. Consult the documentation on testing for -information about how to avoid this problem.\ -''' -_app_ctx_err_msg = '''\ -Working outside of application context. - -This typically means that you attempted to use functionality that needed -to interface with the current application object in some way. To solve -this, set up an application context with app.app_context(). See the -documentation for more information.\ -''' - - -def _lookup_req_object(name): - top = _request_ctx_stack.top - if top is None: - raise RuntimeError(_request_ctx_err_msg) - return getattr(top, name) - - -def _lookup_app_object(name): - top = _app_ctx_stack.top - if top is None: - raise RuntimeError(_app_ctx_err_msg) - return getattr(top, name) - - -def _find_app(): - top = _app_ctx_stack.top - if top is None: - raise RuntimeError(_app_ctx_err_msg) - return top.app - - -# context locals -_request_ctx_stack = LocalStack() -_app_ctx_stack = LocalStack() -current_app = LocalProxy(_find_app) -request = LocalProxy(partial(_lookup_req_object, 'request')) -session = LocalProxy(partial(_lookup_req_object, 'session')) -g = LocalProxy(partial(_lookup_app_object, 'g')) diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/helpers.py b/flo-token-explorer/lib/python3.6/site-packages/flask/helpers.py deleted file mode 100644 index df0b91f..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/helpers.py +++ /dev/null @@ -1,1044 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.helpers - ~~~~~~~~~~~~~ - - Implements various helpers. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import os -import socket -import sys -import pkgutil -import posixpath -import mimetypes -from time import time -from zlib import adler32 -from threading import RLock -import unicodedata -from werkzeug.routing import BuildError -from functools import update_wrapper - -from werkzeug.urls import url_quote -from werkzeug.datastructures import Headers, Range -from werkzeug.exceptions import BadRequest, NotFound, \ - RequestedRangeNotSatisfiable - -from werkzeug.wsgi import wrap_file -from jinja2 import FileSystemLoader - -from .signals import message_flashed -from .globals import session, _request_ctx_stack, _app_ctx_stack, \ - current_app, request -from ._compat import string_types, text_type, PY2 - -# sentinel -_missing = object() - - -# what separators does this operating system provide that are not a slash? -# this is used by the send_from_directory function to ensure that nobody is -# able to access files from outside the filesystem. -_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] - if sep not in (None, '/')) - - -def get_env(): - """Get the environment the app is running in, indicated by the - :envvar:`FLASK_ENV` environment variable. The default is - ``'production'``. - """ - return os.environ.get('FLASK_ENV') or 'production' - - -def get_debug_flag(): - """Get whether debug mode should be enabled for the app, indicated - by the :envvar:`FLASK_DEBUG` environment variable. The default is - ``True`` if :func:`.get_env` returns ``'development'``, or ``False`` - otherwise. - """ - val = os.environ.get('FLASK_DEBUG') - - if not val: - return get_env() == 'development' - - return val.lower() not in ('0', 'false', 'no') - - -def get_load_dotenv(default=True): - """Get whether the user has disabled loading dotenv files by setting - :envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load the - files. - - :param default: What to return if the env var isn't set. - """ - val = os.environ.get('FLASK_SKIP_DOTENV') - - if not val: - return default - - return val.lower() in ('0', 'false', 'no') - - -def _endpoint_from_view_func(view_func): - """Internal helper that returns the default endpoint for a given - function. This always is the function name. - """ - assert view_func is not None, 'expected view func if endpoint ' \ - 'is not provided.' - return view_func.__name__ - - -def stream_with_context(generator_or_function): - """Request contexts disappear when the response is started on the server. - This is done for efficiency reasons and to make it less likely to encounter - memory leaks with badly written WSGI middlewares. The downside is that if - you are using streamed responses, the generator cannot access request bound - information any more. - - This function however can help you keep the context around for longer:: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - @stream_with_context - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(generate()) - - Alternatively it can also be used around a specific generator:: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(stream_with_context(generate())) - - .. versionadded:: 0.9 - """ - try: - gen = iter(generator_or_function) - except TypeError: - def decorator(*args, **kwargs): - gen = generator_or_function(*args, **kwargs) - return stream_with_context(gen) - return update_wrapper(decorator, generator_or_function) - - def generator(): - ctx = _request_ctx_stack.top - if ctx is None: - raise RuntimeError('Attempted to stream with context but ' - 'there was no context in the first place to keep around.') - with ctx: - # Dummy sentinel. Has to be inside the context block or we're - # not actually keeping the context around. - yield None - - # The try/finally is here so that if someone passes a WSGI level - # iterator in we're still running the cleanup logic. Generators - # don't need that because they are closed on their destruction - # automatically. - try: - for item in gen: - yield item - finally: - if hasattr(gen, 'close'): - gen.close() - - # The trick is to start the generator. Then the code execution runs until - # the first dummy None is yielded at which point the context was already - # pushed. This item is discarded. Then when the iteration continues the - # real generator is executed. - wrapped_g = generator() - next(wrapped_g) - return wrapped_g - - -def make_response(*args): - """Sometimes it is necessary to set additional headers in a view. Because - views do not have to return response objects but can return a value that - is converted into a response object by Flask itself, it becomes tricky to - add headers to it. This function can be called instead of using a return - and you will get a response object which you can use to attach headers. - - If view looked like this and you want to add a new header:: - - def index(): - return render_template('index.html', foo=42) - - You can now do something like this:: - - def index(): - response = make_response(render_template('index.html', foo=42)) - response.headers['X-Parachutes'] = 'parachutes are cool' - return response - - This function accepts the very same arguments you can return from a - view function. This for example creates a response with a 404 error - code:: - - response = make_response(render_template('not_found.html'), 404) - - The other use case of this function is to force the return value of a - view function into a response which is helpful with view - decorators:: - - response = make_response(view_function()) - response.headers['X-Parachutes'] = 'parachutes are cool' - - Internally this function does the following things: - - - if no arguments are passed, it creates a new response argument - - if one argument is passed, :meth:`flask.Flask.make_response` - is invoked with it. - - if more than one argument is passed, the arguments are passed - to the :meth:`flask.Flask.make_response` function as tuple. - - .. versionadded:: 0.6 - """ - if not args: - return current_app.response_class() - if len(args) == 1: - args = args[0] - return current_app.make_response(args) - - -def url_for(endpoint, **values): - """Generates a URL to the given endpoint with the method provided. - - Variable arguments that are unknown to the target endpoint are appended - to the generated URL as query arguments. If the value of a query argument - is ``None``, the whole pair is skipped. In case blueprints are active - you can shortcut references to the same blueprint by prefixing the - local endpoint with a dot (``.``). - - This will reference the index function local to the current blueprint:: - - url_for('.index') - - For more information, head over to the :ref:`Quickstart `. - - To integrate applications, :class:`Flask` has a hook to intercept URL build - errors through :attr:`Flask.url_build_error_handlers`. The `url_for` - function results in a :exc:`~werkzeug.routing.BuildError` when the current - app does not have a URL for the given endpoint and values. When it does, the - :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if - it is not ``None``, which can return a string to use as the result of - `url_for` (instead of `url_for`'s default to raise the - :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception. - An example:: - - def external_url_handler(error, endpoint, values): - "Looks up an external URL when `url_for` cannot build a URL." - # This is an example of hooking the build_error_handler. - # Here, lookup_url is some utility function you've built - # which looks up the endpoint in some external URL registry. - url = lookup_url(endpoint, **values) - if url is None: - # External lookup did not have a URL. - # Re-raise the BuildError, in context of original traceback. - exc_type, exc_value, tb = sys.exc_info() - if exc_value is error: - raise exc_type, exc_value, tb - else: - raise error - # url_for will use this result, instead of raising BuildError. - return url - - app.url_build_error_handlers.append(external_url_handler) - - Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and - `endpoint` and `values` are the arguments passed into `url_for`. Note - that this is for building URLs outside the current application, and not for - handling 404 NotFound errors. - - .. versionadded:: 0.10 - The `_scheme` parameter was added. - - .. versionadded:: 0.9 - The `_anchor` and `_method` parameters were added. - - .. versionadded:: 0.9 - Calls :meth:`Flask.handle_build_error` on - :exc:`~werkzeug.routing.BuildError`. - - :param endpoint: the endpoint of the URL (name of the function) - :param values: the variable arguments of the URL rule - :param _external: if set to ``True``, an absolute URL is generated. Server - address can be changed via ``SERVER_NAME`` configuration variable which - defaults to `localhost`. - :param _scheme: a string specifying the desired URL scheme. The `_external` - parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default - behavior uses the same scheme as the current request, or - ``PREFERRED_URL_SCHEME`` from the :ref:`app configuration ` if no - request context is available. As of Werkzeug 0.10, this also can be set - to an empty string to build protocol-relative URLs. - :param _anchor: if provided this is added as anchor to the URL. - :param _method: if provided this explicitly specifies an HTTP method. - """ - appctx = _app_ctx_stack.top - reqctx = _request_ctx_stack.top - - if appctx is None: - raise RuntimeError( - 'Attempted to generate a URL without the application context being' - ' pushed. This has to be executed when application context is' - ' available.' - ) - - # If request specific information is available we have some extra - # features that support "relative" URLs. - if reqctx is not None: - url_adapter = reqctx.url_adapter - blueprint_name = request.blueprint - - if endpoint[:1] == '.': - if blueprint_name is not None: - endpoint = blueprint_name + endpoint - else: - endpoint = endpoint[1:] - - external = values.pop('_external', False) - - # Otherwise go with the url adapter from the appctx and make - # the URLs external by default. - else: - url_adapter = appctx.url_adapter - - if url_adapter is None: - raise RuntimeError( - 'Application was not able to create a URL adapter for request' - ' independent URL generation. You might be able to fix this by' - ' setting the SERVER_NAME config variable.' - ) - - external = values.pop('_external', True) - - anchor = values.pop('_anchor', None) - method = values.pop('_method', None) - scheme = values.pop('_scheme', None) - appctx.app.inject_url_defaults(endpoint, values) - - # This is not the best way to deal with this but currently the - # underlying Werkzeug router does not support overriding the scheme on - # a per build call basis. - old_scheme = None - if scheme is not None: - if not external: - raise ValueError('When specifying _scheme, _external must be True') - old_scheme = url_adapter.url_scheme - url_adapter.url_scheme = scheme - - try: - try: - rv = url_adapter.build(endpoint, values, method=method, - force_external=external) - finally: - if old_scheme is not None: - url_adapter.url_scheme = old_scheme - except BuildError as error: - # We need to inject the values again so that the app callback can - # deal with that sort of stuff. - values['_external'] = external - values['_anchor'] = anchor - values['_method'] = method - values['_scheme'] = scheme - return appctx.app.handle_url_build_error(error, endpoint, values) - - if anchor is not None: - rv += '#' + url_quote(anchor) - return rv - - -def get_template_attribute(template_name, attribute): - """Loads a macro (or variable) a template exports. This can be used to - invoke a macro from within Python code. If you for example have a - template named :file:`_cider.html` with the following contents: - - .. sourcecode:: html+jinja - - {% macro hello(name) %}Hello {{ name }}!{% endmacro %} - - You can access this from Python code like this:: - - hello = get_template_attribute('_cider.html', 'hello') - return hello('World') - - .. versionadded:: 0.2 - - :param template_name: the name of the template - :param attribute: the name of the variable of macro to access - """ - return getattr(current_app.jinja_env.get_template(template_name).module, - attribute) - - -def flash(message, category='message'): - """Flashes a message to the next request. In order to remove the - flashed message from the session and to display it to the user, - the template has to call :func:`get_flashed_messages`. - - .. versionchanged:: 0.3 - `category` parameter added. - - :param message: the message to be flashed. - :param category: the category for the message. The following values - are recommended: ``'message'`` for any kind of message, - ``'error'`` for errors, ``'info'`` for information - messages and ``'warning'`` for warnings. However any - kind of string can be used as category. - """ - # Original implementation: - # - # session.setdefault('_flashes', []).append((category, message)) - # - # This assumed that changes made to mutable structures in the session are - # always in sync with the session object, which is not true for session - # implementations that use external storage for keeping their keys/values. - flashes = session.get('_flashes', []) - flashes.append((category, message)) - session['_flashes'] = flashes - message_flashed.send(current_app._get_current_object(), - message=message, category=category) - - -def get_flashed_messages(with_categories=False, category_filter=[]): - """Pulls all flashed messages from the session and returns them. - Further calls in the same request to the function will return - the same messages. By default just the messages are returned, - but when `with_categories` is set to ``True``, the return value will - be a list of tuples in the form ``(category, message)`` instead. - - Filter the flashed messages to one or more categories by providing those - categories in `category_filter`. This allows rendering categories in - separate html blocks. The `with_categories` and `category_filter` - arguments are distinct: - - * `with_categories` controls whether categories are returned with message - text (``True`` gives a tuple, where ``False`` gives just the message text). - * `category_filter` filters the messages down to only those matching the - provided categories. - - See :ref:`message-flashing-pattern` for examples. - - .. versionchanged:: 0.3 - `with_categories` parameter added. - - .. versionchanged:: 0.9 - `category_filter` parameter added. - - :param with_categories: set to ``True`` to also receive categories. - :param category_filter: whitelist of categories to limit return values - """ - flashes = _request_ctx_stack.top.flashes - if flashes is None: - _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \ - if '_flashes' in session else [] - if category_filter: - flashes = list(filter(lambda f: f[0] in category_filter, flashes)) - if not with_categories: - return [x[1] for x in flashes] - return flashes - - -def send_file(filename_or_fp, mimetype=None, as_attachment=False, - attachment_filename=None, add_etags=True, - cache_timeout=None, conditional=False, last_modified=None): - """Sends the contents of a file to the client. This will use the - most efficient method available and configured. By default it will - try to use the WSGI server's file_wrapper support. Alternatively - you can set the application's :attr:`~Flask.use_x_sendfile` attribute - to ``True`` to directly emit an ``X-Sendfile`` header. This however - requires support of the underlying webserver for ``X-Sendfile``. - - By default it will try to guess the mimetype for you, but you can - also explicitly provide one. For extra security you probably want - to send certain files as attachment (HTML for instance). The mimetype - guessing requires a `filename` or an `attachment_filename` to be - provided. - - ETags will also be attached automatically if a `filename` is provided. You - can turn this off by setting `add_etags=False`. - - If `conditional=True` and `filename` is provided, this method will try to - upgrade the response stream to support range requests. This will allow - the request to be answered with partial content response. - - Please never pass filenames to this function from user sources; - you should use :func:`send_from_directory` instead. - - .. versionadded:: 0.2 - - .. versionadded:: 0.5 - The `add_etags`, `cache_timeout` and `conditional` parameters were - added. The default behavior is now to attach etags. - - .. versionchanged:: 0.7 - mimetype guessing and etag support for file objects was - deprecated because it was unreliable. Pass a filename if you are - able to, otherwise attach an etag yourself. This functionality - will be removed in Flask 1.0 - - .. versionchanged:: 0.9 - cache_timeout pulls its default from application config, when None. - - .. versionchanged:: 0.12 - The filename is no longer automatically inferred from file objects. If - you want to use automatic mimetype and etag support, pass a filepath via - `filename_or_fp` or `attachment_filename`. - - .. versionchanged:: 0.12 - The `attachment_filename` is preferred over `filename` for MIME-type - detection. - - .. versionchanged:: 1.0 - UTF-8 filenames, as specified in `RFC 2231`_, are supported. - - .. _RFC 2231: https://tools.ietf.org/html/rfc2231#section-4 - - :param filename_or_fp: the filename of the file to send. - This is relative to the :attr:`~Flask.root_path` - if a relative path is specified. - Alternatively a file object might be provided in - which case ``X-Sendfile`` might not work and fall - back to the traditional method. Make sure that the - file pointer is positioned at the start of data to - send before calling :func:`send_file`. - :param mimetype: the mimetype of the file if provided. If a file path is - given, auto detection happens as fallback, otherwise an - error will be raised. - :param as_attachment: set to ``True`` if you want to send this file with - a ``Content-Disposition: attachment`` header. - :param attachment_filename: the filename for the attachment if it - differs from the file's filename. - :param add_etags: set to ``False`` to disable attaching of etags. - :param conditional: set to ``True`` to enable conditional responses. - - :param cache_timeout: the timeout in seconds for the headers. When ``None`` - (default), this value is set by - :meth:`~Flask.get_send_file_max_age` of - :data:`~flask.current_app`. - :param last_modified: set the ``Last-Modified`` header to this value, - a :class:`~datetime.datetime` or timestamp. - If a file was passed, this overrides its mtime. - """ - mtime = None - fsize = None - if isinstance(filename_or_fp, string_types): - filename = filename_or_fp - if not os.path.isabs(filename): - filename = os.path.join(current_app.root_path, filename) - file = None - if attachment_filename is None: - attachment_filename = os.path.basename(filename) - else: - file = filename_or_fp - filename = None - - if mimetype is None: - if attachment_filename is not None: - mimetype = mimetypes.guess_type(attachment_filename)[0] \ - or 'application/octet-stream' - - if mimetype is None: - raise ValueError( - 'Unable to infer MIME-type because no filename is available. ' - 'Please set either `attachment_filename`, pass a filepath to ' - '`filename_or_fp` or set your own MIME-type via `mimetype`.' - ) - - headers = Headers() - if as_attachment: - if attachment_filename is None: - raise TypeError('filename unavailable, required for ' - 'sending as attachment') - - try: - attachment_filename = attachment_filename.encode('latin-1') - except UnicodeEncodeError: - filenames = { - 'filename': unicodedata.normalize( - 'NFKD', attachment_filename).encode('latin-1', 'ignore'), - 'filename*': "UTF-8''%s" % url_quote(attachment_filename), - } - else: - filenames = {'filename': attachment_filename} - - headers.add('Content-Disposition', 'attachment', **filenames) - - if current_app.use_x_sendfile and filename: - if file is not None: - file.close() - headers['X-Sendfile'] = filename - fsize = os.path.getsize(filename) - headers['Content-Length'] = fsize - data = None - else: - if file is None: - file = open(filename, 'rb') - mtime = os.path.getmtime(filename) - fsize = os.path.getsize(filename) - headers['Content-Length'] = fsize - data = wrap_file(request.environ, file) - - rv = current_app.response_class(data, mimetype=mimetype, headers=headers, - direct_passthrough=True) - - if last_modified is not None: - rv.last_modified = last_modified - elif mtime is not None: - rv.last_modified = mtime - - rv.cache_control.public = True - if cache_timeout is None: - cache_timeout = current_app.get_send_file_max_age(filename) - if cache_timeout is not None: - rv.cache_control.max_age = cache_timeout - rv.expires = int(time() + cache_timeout) - - if add_etags and filename is not None: - from warnings import warn - - try: - rv.set_etag('%s-%s-%s' % ( - os.path.getmtime(filename), - os.path.getsize(filename), - adler32( - filename.encode('utf-8') if isinstance(filename, text_type) - else filename - ) & 0xffffffff - )) - except OSError: - warn('Access %s failed, maybe it does not exist, so ignore etags in ' - 'headers' % filename, stacklevel=2) - - if conditional: - try: - rv = rv.make_conditional(request, accept_ranges=True, - complete_length=fsize) - except RequestedRangeNotSatisfiable: - if file is not None: - file.close() - raise - # make sure we don't send x-sendfile for servers that - # ignore the 304 status code for x-sendfile. - if rv.status_code == 304: - rv.headers.pop('x-sendfile', None) - return rv - - -def safe_join(directory, *pathnames): - """Safely join `directory` and zero or more untrusted `pathnames` - components. - - Example usage:: - - @app.route('/wiki/') - def wiki_page(filename): - filename = safe_join(app.config['WIKI_FOLDER'], filename) - with open(filename, 'rb') as fd: - content = fd.read() # Read and process the file content... - - :param directory: the trusted base directory. - :param pathnames: the untrusted pathnames relative to that directory. - :raises: :class:`~werkzeug.exceptions.NotFound` if one or more passed - paths fall out of its boundaries. - """ - - parts = [directory] - - for filename in pathnames: - if filename != '': - filename = posixpath.normpath(filename) - - if ( - any(sep in filename for sep in _os_alt_seps) - or os.path.isabs(filename) - or filename == '..' - or filename.startswith('../') - ): - raise NotFound() - - parts.append(filename) - - return posixpath.join(*parts) - - -def send_from_directory(directory, filename, **options): - """Send a file from a given directory with :func:`send_file`. This - is a secure way to quickly expose static files from an upload folder - or something similar. - - Example usage:: - - @app.route('/uploads/') - def download_file(filename): - return send_from_directory(app.config['UPLOAD_FOLDER'], - filename, as_attachment=True) - - .. admonition:: Sending files and Performance - - It is strongly recommended to activate either ``X-Sendfile`` support in - your webserver or (if no authentication happens) to tell the webserver - to serve files for the given path on its own without calling into the - web application for improved performance. - - .. versionadded:: 0.5 - - :param directory: the directory where all the files are stored. - :param filename: the filename relative to that directory to - download. - :param options: optional keyword arguments that are directly - forwarded to :func:`send_file`. - """ - filename = safe_join(directory, filename) - if not os.path.isabs(filename): - filename = os.path.join(current_app.root_path, filename) - try: - if not os.path.isfile(filename): - raise NotFound() - except (TypeError, ValueError): - raise BadRequest() - options.setdefault('conditional', True) - return send_file(filename, **options) - - -def get_root_path(import_name): - """Returns the path to a package or cwd if that cannot be found. This - returns the path of a package or the folder that contains a module. - - Not to be confused with the package path returned by :func:`find_package`. - """ - # Module already imported and has a file attribute. Use that first. - mod = sys.modules.get(import_name) - if mod is not None and hasattr(mod, '__file__'): - return os.path.dirname(os.path.abspath(mod.__file__)) - - # Next attempt: check the loader. - loader = pkgutil.get_loader(import_name) - - # Loader does not exist or we're referring to an unloaded main module - # or a main module without path (interactive sessions), go with the - # current working directory. - if loader is None or import_name == '__main__': - return os.getcwd() - - # For .egg, zipimporter does not have get_filename until Python 2.7. - # Some other loaders might exhibit the same behavior. - if hasattr(loader, 'get_filename'): - filepath = loader.get_filename(import_name) - else: - # Fall back to imports. - __import__(import_name) - mod = sys.modules[import_name] - filepath = getattr(mod, '__file__', None) - - # If we don't have a filepath it might be because we are a - # namespace package. In this case we pick the root path from the - # first module that is contained in our package. - if filepath is None: - raise RuntimeError('No root path can be found for the provided ' - 'module "%s". This can happen because the ' - 'module came from an import hook that does ' - 'not provide file name information or because ' - 'it\'s a namespace package. In this case ' - 'the root path needs to be explicitly ' - 'provided.' % import_name) - - # filepath is import_name.py for a module, or __init__.py for a package. - return os.path.dirname(os.path.abspath(filepath)) - - -def _matching_loader_thinks_module_is_package(loader, mod_name): - """Given the loader that loaded a module and the module this function - attempts to figure out if the given module is actually a package. - """ - # If the loader can tell us if something is a package, we can - # directly ask the loader. - if hasattr(loader, 'is_package'): - return loader.is_package(mod_name) - # importlib's namespace loaders do not have this functionality but - # all the modules it loads are packages, so we can take advantage of - # this information. - elif (loader.__class__.__module__ == '_frozen_importlib' and - loader.__class__.__name__ == 'NamespaceLoader'): - return True - # Otherwise we need to fail with an error that explains what went - # wrong. - raise AttributeError( - ('%s.is_package() method is missing but is required by Flask of ' - 'PEP 302 import hooks. If you do not use import hooks and ' - 'you encounter this error please file a bug against Flask.') % - loader.__class__.__name__) - - -def find_package(import_name): - """Finds a package and returns the prefix (or None if the package is - not installed) as well as the folder that contains the package or - module as a tuple. The package path returned is the module that would - have to be added to the pythonpath in order to make it possible to - import the module. The prefix is the path below which a UNIX like - folder structure exists (lib, share etc.). - """ - root_mod_name = import_name.split('.')[0] - loader = pkgutil.get_loader(root_mod_name) - if loader is None or import_name == '__main__': - # import name is not found, or interactive/main module - package_path = os.getcwd() - else: - # For .egg, zipimporter does not have get_filename until Python 2.7. - if hasattr(loader, 'get_filename'): - filename = loader.get_filename(root_mod_name) - elif hasattr(loader, 'archive'): - # zipimporter's loader.archive points to the .egg or .zip - # archive filename is dropped in call to dirname below. - filename = loader.archive - else: - # At least one loader is missing both get_filename and archive: - # Google App Engine's HardenedModulesHook - # - # Fall back to imports. - __import__(import_name) - filename = sys.modules[import_name].__file__ - package_path = os.path.abspath(os.path.dirname(filename)) - - # In case the root module is a package we need to chop of the - # rightmost part. This needs to go through a helper function - # because of python 3.3 namespace packages. - if _matching_loader_thinks_module_is_package( - loader, root_mod_name): - package_path = os.path.dirname(package_path) - - site_parent, site_folder = os.path.split(package_path) - py_prefix = os.path.abspath(sys.prefix) - if package_path.startswith(py_prefix): - return py_prefix, package_path - elif site_folder.lower() == 'site-packages': - parent, folder = os.path.split(site_parent) - # Windows like installations - if folder.lower() == 'lib': - base_dir = parent - # UNIX like installations - elif os.path.basename(parent).lower() == 'lib': - base_dir = os.path.dirname(parent) - else: - base_dir = site_parent - return base_dir, package_path - return None, package_path - - -class locked_cached_property(object): - """A decorator that converts a function into a lazy property. The - function wrapped is called the first time to retrieve the result - and then that calculated result is used the next time you access - the value. Works like the one in Werkzeug but has a lock for - thread safety. - """ - - def __init__(self, func, name=None, doc=None): - self.__name__ = name or func.__name__ - self.__module__ = func.__module__ - self.__doc__ = doc or func.__doc__ - self.func = func - self.lock = RLock() - - def __get__(self, obj, type=None): - if obj is None: - return self - with self.lock: - value = obj.__dict__.get(self.__name__, _missing) - if value is _missing: - value = self.func(obj) - obj.__dict__[self.__name__] = value - return value - - -class _PackageBoundObject(object): - #: The name of the package or module that this app belongs to. Do not - #: change this once it is set by the constructor. - import_name = None - - #: Location of the template files to be added to the template lookup. - #: ``None`` if templates should not be added. - template_folder = None - - #: Absolute path to the package on the filesystem. Used to look up - #: resources contained in the package. - root_path = None - - def __init__(self, import_name, template_folder=None, root_path=None): - self.import_name = import_name - self.template_folder = template_folder - - if root_path is None: - root_path = get_root_path(self.import_name) - - self.root_path = root_path - self._static_folder = None - self._static_url_path = None - - def _get_static_folder(self): - if self._static_folder is not None: - return os.path.join(self.root_path, self._static_folder) - - def _set_static_folder(self, value): - self._static_folder = value - - static_folder = property( - _get_static_folder, _set_static_folder, - doc='The absolute path to the configured static folder.' - ) - del _get_static_folder, _set_static_folder - - def _get_static_url_path(self): - if self._static_url_path is not None: - return self._static_url_path - - if self.static_folder is not None: - return '/' + os.path.basename(self.static_folder) - - def _set_static_url_path(self, value): - self._static_url_path = value - - static_url_path = property( - _get_static_url_path, _set_static_url_path, - doc='The URL prefix that the static route will be registered for.' - ) - del _get_static_url_path, _set_static_url_path - - @property - def has_static_folder(self): - """This is ``True`` if the package bound object's container has a - folder for static files. - - .. versionadded:: 0.5 - """ - return self.static_folder is not None - - @locked_cached_property - def jinja_loader(self): - """The Jinja loader for this package bound object. - - .. versionadded:: 0.5 - """ - if self.template_folder is not None: - return FileSystemLoader(os.path.join(self.root_path, - self.template_folder)) - - def get_send_file_max_age(self, filename): - """Provides default cache_timeout for the :func:`send_file` functions. - - By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from - the configuration of :data:`~flask.current_app`. - - Static file functions such as :func:`send_from_directory` use this - function, and :func:`send_file` calls this function on - :data:`~flask.current_app` when the given cache_timeout is ``None``. If a - cache_timeout is given in :func:`send_file`, that timeout is used; - otherwise, this method is called. - - This allows subclasses to change the behavior when sending files based - on the filename. For example, to set the cache timeout for .js files - to 60 seconds:: - - class MyFlask(flask.Flask): - def get_send_file_max_age(self, name): - if name.lower().endswith('.js'): - return 60 - return flask.Flask.get_send_file_max_age(self, name) - - .. versionadded:: 0.9 - """ - return total_seconds(current_app.send_file_max_age_default) - - def send_static_file(self, filename): - """Function used internally to send static files from the static - folder to the browser. - - .. versionadded:: 0.5 - """ - if not self.has_static_folder: - raise RuntimeError('No static folder for this object') - # Ensure get_send_file_max_age is called in all cases. - # Here, we ensure get_send_file_max_age is called for Blueprints. - cache_timeout = self.get_send_file_max_age(filename) - return send_from_directory(self.static_folder, filename, - cache_timeout=cache_timeout) - - def open_resource(self, resource, mode='rb'): - """Opens a resource from the application's resource folder. To see - how this works, consider the following folder structure:: - - /myapplication.py - /schema.sql - /static - /style.css - /templates - /layout.html - /index.html - - If you want to open the :file:`schema.sql` file you would do the - following:: - - with app.open_resource('schema.sql') as f: - contents = f.read() - do_something_with(contents) - - :param resource: the name of the resource. To access resources within - subfolders use forward slashes as separator. - :param mode: resource file opening mode, default is 'rb'. - """ - if mode not in ('r', 'rb'): - raise ValueError('Resources can only be opened for reading') - return open(os.path.join(self.root_path, resource), mode) - - -def total_seconds(td): - """Returns the total seconds from a timedelta object. - - :param timedelta td: the timedelta to be converted in seconds - - :returns: number of seconds - :rtype: int - """ - return td.days * 60 * 60 * 24 + td.seconds - - -def is_ip(value): - """Determine if the given string is an IP address. - - Python 2 on Windows doesn't provide ``inet_pton``, so this only - checks IPv4 addresses in that environment. - - :param value: value to check - :type value: str - - :return: True if string is an IP address - :rtype: bool - """ - if PY2 and os.name == 'nt': - try: - socket.inet_aton(value) - return True - except socket.error: - return False - - for family in (socket.AF_INET, socket.AF_INET6): - try: - socket.inet_pton(family, value) - except socket.error: - pass - else: - return True - - return False diff --git a/flo-token-explorer/lib/python3.6/site-packages/flask/json/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/flask/json/__init__.py deleted file mode 100644 index fbe6b92..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/flask/json/__init__.py +++ /dev/null @@ -1,327 +0,0 @@ -# -*- coding: utf-8 -*- -""" -flask.json -~~~~~~~~~~ - -:copyright: © 2010 by the Pallets team. -:license: BSD, see LICENSE for more details. -""" -import codecs -import io -import uuid -from datetime import date, datetime -from flask.globals import current_app, request -from flask._compat import text_type, PY2 - -from werkzeug.http import http_date -from jinja2 import Markup - -# Use the same json implementation as itsdangerous on which we -# depend anyways. -from itsdangerous import json as _json - - -# Figure out if simplejson escapes slashes. This behavior was changed -# from one version to another without reason. -_slash_escape = '\\/' not in _json.dumps('/') - - -__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump', - 'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder', - 'jsonify'] - - -def _wrap_reader_for_text(fp, encoding): - if isinstance(fp.read(0), bytes): - fp = io.TextIOWrapper(io.BufferedReader(fp), encoding) - return fp - - -def _wrap_writer_for_text(fp, encoding): - try: - fp.write('') - except TypeError: - fp = io.TextIOWrapper(fp, encoding) - return fp - - -class JSONEncoder(_json.JSONEncoder): - """The default Flask JSON encoder. This one extends the default simplejson - encoder by also supporting ``datetime`` objects, ``UUID`` as well as - ``Markup`` objects which are serialized as RFC 822 datetime strings (same - as the HTTP date format). In order to support more data types override the - :meth:`default` method. - """ - - def default(self, o): - """Implement this method in a subclass such that it returns a - serializable object for ``o``, or calls the base implementation (to - raise a :exc:`TypeError`). - - For example, to support arbitrary iterators, you could implement - default like this:: - - def default(self, o): - try: - iterable = iter(o) - except TypeError: - pass - else: - return list(iterable) - return JSONEncoder.default(self, o) - """ - if isinstance(o, datetime): - return http_date(o.utctimetuple()) - if isinstance(o, date): - return http_date(o.timetuple()) - if isinstance(o, uuid.UUID): - return str(o) - if hasattr(o, '__html__'): - return text_type(o.__html__()) - return _json.JSONEncoder.default(self, o) - - -class JSONDecoder(_json.JSONDecoder): - """The default JSON decoder. This one does not change the behavior from - the default simplejson decoder. Consult the :mod:`json` documentation - for more information. This decoder is not only used for the load - functions of this module but also :attr:`~flask.Request`. - """ - - -def _dump_arg_defaults(kwargs): - """Inject default arguments for dump functions.""" - if current_app: - bp = current_app.blueprints.get(request.blueprint) if request else None - kwargs.setdefault( - 'cls', - bp.json_encoder if bp and bp.json_encoder - else current_app.json_encoder - ) - - if not current_app.config['JSON_AS_ASCII']: - kwargs.setdefault('ensure_ascii', False) - - kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS']) - else: - kwargs.setdefault('sort_keys', True) - kwargs.setdefault('cls', JSONEncoder) - - -def _load_arg_defaults(kwargs): - """Inject default arguments for load functions.""" - if current_app: - bp = current_app.blueprints.get(request.blueprint) if request else None - kwargs.setdefault( - 'cls', - bp.json_decoder if bp and bp.json_decoder - else current_app.json_decoder - ) - else: - kwargs.setdefault('cls', JSONDecoder) - - -def detect_encoding(data): - """Detect which UTF codec was used to encode the given bytes. - - The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is - accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big - or little endian. Some editors or libraries may prepend a BOM. - - :param data: Bytes in unknown UTF encoding. - :return: UTF encoding name - """ - head = data[:4] - - if head[:3] == codecs.BOM_UTF8: - return 'utf-8-sig' - - if b'\x00' not in head: - return 'utf-8' - - if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE): - return 'utf-32' - - if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE): - return 'utf-16' - - if len(head) == 4: - if head[:3] == b'\x00\x00\x00': - return 'utf-32-be' - - if head[::2] == b'\x00\x00': - return 'utf-16-be' - - if head[1:] == b'\x00\x00\x00': - return 'utf-32-le' - - if head[1::2] == b'\x00\x00': - return 'utf-16-le' - - if len(head) == 2: - return 'utf-16-be' if head.startswith(b'\x00') else 'utf-16-le' - - return 'utf-8' - - -def dumps(obj, **kwargs): - """Serialize ``obj`` to a JSON formatted ``str`` by using the application's - configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an - application on the stack. - - This function can return ``unicode`` strings or ascii-only bytestrings by - default which coerce into unicode strings automatically. That behavior by - default is controlled by the ``JSON_AS_ASCII`` configuration variable - and can be overridden by the simplejson ``ensure_ascii`` parameter. - """ - _dump_arg_defaults(kwargs) - encoding = kwargs.pop('encoding', None) - rv = _json.dumps(obj, **kwargs) - if encoding is not None and isinstance(rv, text_type): - rv = rv.encode(encoding) - return rv - - -def dump(obj, fp, **kwargs): - """Like :func:`dumps` but writes into a file object.""" - _dump_arg_defaults(kwargs) - encoding = kwargs.pop('encoding', None) - if encoding is not None: - fp = _wrap_writer_for_text(fp, encoding) - _json.dump(obj, fp, **kwargs) - - -def loads(s, **kwargs): - """Unserialize a JSON object from a string ``s`` by using the application's - configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an - application on the stack. - """ - _load_arg_defaults(kwargs) - if isinstance(s, bytes): - encoding = kwargs.pop('encoding', None) - if encoding is None: - encoding = detect_encoding(s) - s = s.decode(encoding) - return _json.loads(s, **kwargs) - - -def load(fp, **kwargs): - """Like :func:`loads` but reads from a file object. - """ - _load_arg_defaults(kwargs) - if not PY2: - fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8') - return _json.load(fp, **kwargs) - - -def htmlsafe_dumps(obj, **kwargs): - """Works exactly like :func:`dumps` but is safe for use in `` -
-''' - -__all__ = ["RecaptchaWidget"] - - -class RecaptchaWidget(object): - - def recaptcha_html(self, public_key): - html = current_app.config.get('RECAPTCHA_HTML') - if html: - return Markup(html) - params = current_app.config.get('RECAPTCHA_PARAMETERS') - script = RECAPTCHA_SCRIPT - if params: - script += u'?' + url_encode(params) - - attrs = current_app.config.get('RECAPTCHA_DATA_ATTRS', {}) - attrs['sitekey'] = public_key - snippet = u' '.join([u'data-%s="%s"' % (k, attrs[k]) for k in attrs]) - return Markup(RECAPTCHA_TEMPLATE % (script, snippet)) - - def __call__(self, field, error=None, **kwargs): - """Returns the recaptcha input HTML.""" - - try: - public_key = current_app.config['RECAPTCHA_PUBLIC_KEY'] - except KeyError: - raise RuntimeError("RECAPTCHA_PUBLIC_KEY config not set") - - return self.recaptcha_html(public_key) diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/LICENSE.txt b/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/LICENSE.txt deleted file mode 100644 index 65865a9..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/LICENSE.txt +++ /dev/null @@ -1,23 +0,0 @@ -2009-2018 (c) Benoît Chesneau -2009-2015 (c) Paul J. Davis - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/METADATA deleted file mode 100644 index 148e500..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/METADATA +++ /dev/null @@ -1,111 +0,0 @@ -Metadata-Version: 2.1 -Name: gunicorn -Version: 19.9.0 -Summary: WSGI HTTP Server for UNIX -Home-page: http://gunicorn.org -Author: Benoit Chesneau -Author-email: benoitc@e-engura.com -License: MIT -Platform: UNKNOWN -Classifier: Development Status :: 4 - Beta -Classifier: Environment :: Other Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: MacOS :: MacOS X -Classifier: Operating System :: POSIX -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.2 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Topic :: Internet -Classifier: Topic :: Utilities -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Internet :: WWW/HTTP -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Server -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Requires-Python: >=2.6, !=3.0.*, !=3.1.* -Provides-Extra: tornado -Provides-Extra: gthread -Provides-Extra: eventlet -Provides-Extra: gevent -Provides-Extra: eventlet -Requires-Dist: eventlet (>=0.9.7); extra == 'eventlet' -Provides-Extra: gevent -Requires-Dist: gevent (>=0.13); extra == 'gevent' -Provides-Extra: gthread -Provides-Extra: tornado -Requires-Dist: tornado (>=0.2); extra == 'tornado' - -Gunicorn --------- - -.. image:: https://img.shields.io/pypi/v/gunicorn.svg?style=flat - :alt: PyPI version - :target: https://pypi.python.org/pypi/gunicorn - -.. image:: https://img.shields.io/pypi/pyversions/gunicorn.svg - :alt: Supported Python versions - :target: https://pypi.python.org/pypi/gunicorn - -.. image:: https://travis-ci.org/benoitc/gunicorn.svg?branch=master - :alt: Build Status - :target: https://travis-ci.org/benoitc/gunicorn - -Gunicorn 'Green Unicorn' is a Python WSGI HTTP Server for UNIX. It's a pre-fork -worker model ported from Ruby's Unicorn_ project. The Gunicorn server is broadly -compatible with various web frameworks, simply implemented, light on server -resource usage, and fairly speedy. - -Feel free to join us in `#gunicorn`_ on Freenode_. - -Documentation -------------- - -The documentation is hosted at http://docs.gunicorn.org. - -Installation ------------- - -Gunicorn requires **Python 2.x >= 2.6** or **Python 3.x >= 3.2**. - -Install from PyPI:: - - $ pip install gunicorn - - -Usage ------ - -Basic usage:: - - $ gunicorn [OPTIONS] APP_MODULE - -Where ``APP_MODULE`` is of the pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``. The -module name can be a full dotted path. The variable name refers to a WSGI -callable that should be found in the specified module. - -Example with test app:: - - $ cd examples - $ gunicorn --workers=2 test:app - - -License -------- - -Gunicorn is released under the MIT License. See the LICENSE_ file for more -details. - -.. _Unicorn: https://bogomips.org/unicorn/ -.. _`#gunicorn`: https://webchat.freenode.net/?channels=gunicorn -.. _Freenode: https://freenode.net/ -.. _LICENSE: https://github.com/benoitc/gunicorn/blob/master/LICENSE - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/RECORD deleted file mode 100644 index 580aa4e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/RECORD +++ /dev/null @@ -1,89 +0,0 @@ -gunicorn/__init__.py,sha256=kRm2HQJytwQi-xmyUfM0cOMyls23DfFDgGFbGI2Gj68,255 -gunicorn/_compat.py,sha256=5cXb6vMfVzInDq-AHNyZfsK-UG5NetDn62nPfqylHSU,9355 -gunicorn/arbiter.py,sha256=AbJNSFnTmx9Qd-vZAqEH3y5fz8ydPmyli_BERNIwdyE,20158 -gunicorn/argparse_compat.py,sha256=gsHDGwo4BSJWHdiaEXy0Emr96NKC0LDYmK5nB7PE8Qc,87791 -gunicorn/config.py,sha256=wYeAJFMweU3FXNF4BdfgZzPC94vUXUnuYgI6lNk-5_U,53420 -gunicorn/debug.py,sha256=UUw-eteLEm_OQ98D6K3XtDjx4Dya2H35zdiu8z7F7uc,2289 -gunicorn/errors.py,sha256=JlDBjag90gMiRwLHG3xzEJzDOntSl1iM32R277-U6j0,919 -gunicorn/glogging.py,sha256=bvnX-sky6HgqJor2JZ9VKZZzT4uh_yOgknkYegB7D7Y,15581 -gunicorn/pidfile.py,sha256=_69tsfF1aHklrMrJe2sHERovMduRByVTv99my7yQ874,2357 -gunicorn/reloader.py,sha256=CPNfYAAvJHazX3NAM7qysSRt0fpiHBGPqBlB0tYKhxs,3839 -gunicorn/selectors.py,sha256=14_UESrpE3AQKXWKeeAUG9vBTzJ0yTYDGtEo6xOtlDY,18997 -gunicorn/six.py,sha256=6N-6RCENPfBtMpN5UmgDfDKmJebbbuPu_Dk3Zf8ngww,27344 -gunicorn/sock.py,sha256=gX2FsdsOGMCtSHbDXn7lsiYYYRc3roQklIJLip1oZQo,6019 -gunicorn/systemd.py,sha256=ffhv17cdv-hDeFAJi1eAVtJskkVciV6cQU75Q2oplqg,1362 -gunicorn/util.py,sha256=Ns_a8Pf7MkaEi0KbV3GsP9aVQ2a_S45EjSE6Iyg2tYU,16229 -gunicorn/app/__init__.py,sha256=GuqstqdkizeV4HRbd8aGMBn0Q8IDOyRU1wMMNqNe5GY,127 -gunicorn/app/base.py,sha256=LKxyziLMPNlK3qm6dPMieELBqfLfmwBFnn9SB-KBogE,6652 -gunicorn/app/pasterapp.py,sha256=AGzZnUpcpw8O8KrizxTgdJBZ4lQdrHgsV0gdx7FVTs8,6046 -gunicorn/app/wsgiapp.py,sha256=ny71qjegQHl_bGMjNfq_aemPrmGEpH2bMRIdph6bj4Q,1870 -gunicorn/http/__init__.py,sha256=b4TF3x5F0VYOPTOeNYwRGR1EYHBaPMhZRMoNeuD5-n0,277 -gunicorn/http/_sendfile.py,sha256=Eqd-s3HlvLuyfGjqaH_Jk72cAtEV8hQv5tb1M1AqcBU,2217 -gunicorn/http/body.py,sha256=MmlZpj_6oRPj3oPVSMQZr0X3KH6ikntxDnVcLgfekZs,7345 -gunicorn/http/errors.py,sha256=sNjF2lm4m2qyZ9l95_U33FRxPXpxXzjnZyYqWS-hxd4,2850 -gunicorn/http/message.py,sha256=G5po0upwbrTyIggb_IEAItIjSi_aDoWYLPQ62o8pOI4,12257 -gunicorn/http/parser.py,sha256=IRMvp0veP4wL8Z4vgNV72CPydCNPdNNIy9u-DlDvvSo,1294 -gunicorn/http/unreader.py,sha256=s4kDW5euiJPsDuHzCqFXUtHCApqIxpShb9dtAyjJw9Y,2019 -gunicorn/http/wsgi.py,sha256=SETzcFoLggub2aMuGduTVELBwJGg9YvvDbkiFbugkwU,12856 -gunicorn/instrument/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -gunicorn/instrument/statsd.py,sha256=5xueDuTZMFtmS8ayGT4sU_OyB9qkEv4Agk-eJwAmhJM,4434 -gunicorn/workers/__init__.py,sha256=arPaAM8HxcK39L2dmDzmMhpK9bsyLOJymuCcBz_qqw0,774 -gunicorn/workers/_gaiohttp.py,sha256=llho90CjwpeAB9ehrYeGmD9VZZAPdcNpVwnrBA3GEZA,5079 -gunicorn/workers/base.py,sha256=nzo4KfCQkO3Y2HKuKVk-xInZUiYay_A5B9e_9NVXU28,9121 -gunicorn/workers/base_async.py,sha256=54VkS3S_wrFD7v3jInhFfkeBhaPnV5UN-cu-i5MoXkc,5575 -gunicorn/workers/gaiohttp.py,sha256=3rhXky6APkhI0D9nwXlogLo_Jd9v98CiEuCy9inzCU4,823 -gunicorn/workers/geventlet.py,sha256=mE-Zw3zh8lOZVaprXcfaoBMmwKeDj6sZzdjmgIsvHXw,4258 -gunicorn/workers/ggevent.py,sha256=OV5KCJ3qlJP5E46sjyWQKGbQ5xGR2SOrZlEtLhIB89s,7412 -gunicorn/workers/gthread.py,sha256=HIoWuylHZfH1wlSh4eZ8wxo1kQ5abvdUaFfKfIsgQvI,12009 -gunicorn/workers/gtornado.py,sha256=LtBWnEX7MNpeGX-YmlBoV1_OOhjkdytFmt1pzOlRPZk,5044 -gunicorn/workers/sync.py,sha256=_vd1JATNLG4MgJppNJG5KWBIzLGYqRzhEAQVz9H11LI,7153 -gunicorn/workers/workertmp.py,sha256=6QINPBrriLvezgkC_hclOOeXLi_owMt_SOA5KPEIN-A,1459 -gunicorn-19.9.0.dist-info/LICENSE.txt,sha256=eJ_hG5Lhyr-890S1_MOSyb1cZ5hgOk6J-SW2M3mE0d8,1136 -gunicorn-19.9.0.dist-info/METADATA,sha256=SBjzTcJcbKUR9ev_rvypyWJYU0qgHvm8KzgfG6FtniE,3388 -gunicorn-19.9.0.dist-info/RECORD,, -gunicorn-19.9.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 -gunicorn-19.9.0.dist-info/entry_points.txt,sha256=XeFINKRdSUKwJwaVSolO24PuV_YeO71IMF-rOra5JO8,184 -gunicorn-19.9.0.dist-info/top_level.txt,sha256=cdMaa2yhxb8do-WioY9qRHUCfwf55YztjwQCncaInoE,9 -../../../bin/gunicorn,sha256=r5HxclbMx8GM6sIJV-sKWsxxWTyvn1iX9YwShzsli58,282 -../../../bin/gunicorn_paster,sha256=0DkyIkBt6c62kdEjvcm1pqmxTfz4O7d9m1bNigmlppM,284 -gunicorn-19.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -gunicorn/workers/__pycache__/gtornado.cpython-36.pyc,, -gunicorn/workers/__pycache__/sync.cpython-36.pyc,, -gunicorn/workers/__pycache__/ggevent.cpython-36.pyc,, -gunicorn/workers/__pycache__/gthread.cpython-36.pyc,, -gunicorn/workers/__pycache__/gaiohttp.cpython-36.pyc,, -gunicorn/workers/__pycache__/base.cpython-36.pyc,, -gunicorn/workers/__pycache__/_gaiohttp.cpython-36.pyc,, -gunicorn/workers/__pycache__/base_async.cpython-36.pyc,, -gunicorn/workers/__pycache__/workertmp.cpython-36.pyc,, -gunicorn/workers/__pycache__/__init__.cpython-36.pyc,, -gunicorn/workers/__pycache__/geventlet.cpython-36.pyc,, -gunicorn/http/__pycache__/errors.cpython-36.pyc,, -gunicorn/http/__pycache__/parser.cpython-36.pyc,, -gunicorn/http/__pycache__/body.cpython-36.pyc,, -gunicorn/http/__pycache__/message.cpython-36.pyc,, -gunicorn/http/__pycache__/wsgi.cpython-36.pyc,, -gunicorn/http/__pycache__/__init__.cpython-36.pyc,, -gunicorn/http/__pycache__/unreader.cpython-36.pyc,, -gunicorn/http/__pycache__/_sendfile.cpython-36.pyc,, -gunicorn/__pycache__/selectors.cpython-36.pyc,, -gunicorn/__pycache__/arbiter.cpython-36.pyc,, -gunicorn/__pycache__/_compat.cpython-36.pyc,, -gunicorn/__pycache__/glogging.cpython-36.pyc,, -gunicorn/__pycache__/systemd.cpython-36.pyc,, -gunicorn/__pycache__/config.cpython-36.pyc,, -gunicorn/__pycache__/sock.cpython-36.pyc,, -gunicorn/__pycache__/util.cpython-36.pyc,, -gunicorn/__pycache__/six.cpython-36.pyc,, -gunicorn/__pycache__/errors.cpython-36.pyc,, -gunicorn/__pycache__/argparse_compat.cpython-36.pyc,, -gunicorn/__pycache__/reloader.cpython-36.pyc,, -gunicorn/__pycache__/__init__.cpython-36.pyc,, -gunicorn/__pycache__/pidfile.cpython-36.pyc,, -gunicorn/__pycache__/debug.cpython-36.pyc,, -gunicorn/instrument/__pycache__/statsd.cpython-36.pyc,, -gunicorn/instrument/__pycache__/__init__.cpython-36.pyc,, -gunicorn/app/__pycache__/wsgiapp.cpython-36.pyc,, -gunicorn/app/__pycache__/base.cpython-36.pyc,, -gunicorn/app/__pycache__/pasterapp.cpython-36.pyc,, -gunicorn/app/__pycache__/__init__.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/WHEEL deleted file mode 100644 index 1316c41..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/entry_points.txt b/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/entry_points.txt deleted file mode 100644 index d5b5aa1..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/entry_points.txt +++ /dev/null @@ -1,8 +0,0 @@ - - [console_scripts] - gunicorn=gunicorn.app.wsgiapp:run - gunicorn_paster=gunicorn.app.pasterapp:run - - [paste.server_runner] - main=gunicorn.app.pasterapp:paste_server - \ No newline at end of file diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/top_level.txt deleted file mode 100644 index 8f22dcc..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn-19.9.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -gunicorn diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/__init__.py deleted file mode 100644 index 7820479..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -version_info = (19, 9, 0) -__version__ = ".".join([str(v) for v in version_info]) -SERVER_SOFTWARE = "gunicorn/%s" % __version__ diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/_compat.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/_compat.py deleted file mode 100644 index 39dbfdf..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/_compat.py +++ /dev/null @@ -1,298 +0,0 @@ -import sys - -from gunicorn import six - -PY26 = (sys.version_info[:2] == (2, 6)) -PY33 = (sys.version_info >= (3, 3)) - - -def _check_if_pyc(fname): - """Return True if the extension is .pyc, False if .py - and None if otherwise""" - from imp import find_module - from os.path import realpath, dirname, basename, splitext - - # Normalize the file-path for the find_module() - filepath = realpath(fname) - dirpath = dirname(filepath) - module_name = splitext(basename(filepath))[0] - - # Validate and fetch - try: - fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath]) - except ImportError: - raise IOError("Cannot find config file. " - "Path maybe incorrect! : {0}".format(filepath)) - return pytype, fileobj, fullpath - - -def _get_codeobj(pyfile): - """ Returns the code object, given a python file """ - from imp import PY_COMPILED, PY_SOURCE - - result, fileobj, fullpath = _check_if_pyc(pyfile) - - # WARNING: - # fp.read() can blowup if the module is extremely large file. - # Lookout for overflow errors. - try: - data = fileobj.read() - finally: - fileobj.close() - - # This is a .pyc file. Treat accordingly. - if result is PY_COMPILED: - # .pyc format is as follows: - # 0 - 4 bytes: Magic number, which changes with each create of .pyc file. - # First 2 bytes change with each marshal of .pyc file. Last 2 bytes is "\r\n". - # 4 - 8 bytes: Datetime value, when the .py was last changed. - # 8 - EOF: Marshalled code object data. - # So to get code object, just read the 8th byte onwards till EOF, and - # UN-marshal it. - import marshal - code_obj = marshal.loads(data[8:]) - - elif result is PY_SOURCE: - # This is a .py file. - code_obj = compile(data, fullpath, 'exec') - - else: - # Unsupported extension - raise Exception("Input file is unknown format: {0}".format(fullpath)) - - # Return code object - return code_obj - -if six.PY3: - def execfile_(fname, *args): - if fname.endswith(".pyc"): - code = _get_codeobj(fname) - else: - code = compile(open(fname, 'rb').read(), fname, 'exec') - return six.exec_(code, *args) - - def bytes_to_str(b): - if isinstance(b, six.text_type): - return b - return str(b, 'latin1') - - import urllib.parse - - def unquote_to_wsgi_str(string): - return _unquote_to_bytes(string).decode('latin-1') - - _unquote_to_bytes = urllib.parse.unquote_to_bytes - -else: - def execfile_(fname, *args): - """ Overriding PY2 execfile() implementation to support .pyc files """ - if fname.endswith(".pyc"): - return six.exec_(_get_codeobj(fname), *args) - return execfile(fname, *args) - - def bytes_to_str(s): - if isinstance(s, unicode): - return s.encode('utf-8') - return s - - import urllib - unquote_to_wsgi_str = urllib.unquote - - -# The following code adapted from trollius.py33_exceptions -def _wrap_error(exc, mapping, key): - if key not in mapping: - return - new_err_cls = mapping[key] - new_err = new_err_cls(*exc.args) - - # raise a new exception with the original traceback - six.reraise(new_err_cls, new_err, - exc.__traceback__ if hasattr(exc, '__traceback__') else sys.exc_info()[2]) - -if PY33: - import builtins - - BlockingIOError = builtins.BlockingIOError - BrokenPipeError = builtins.BrokenPipeError - ChildProcessError = builtins.ChildProcessError - ConnectionRefusedError = builtins.ConnectionRefusedError - ConnectionResetError = builtins.ConnectionResetError - InterruptedError = builtins.InterruptedError - ConnectionAbortedError = builtins.ConnectionAbortedError - PermissionError = builtins.PermissionError - FileNotFoundError = builtins.FileNotFoundError - ProcessLookupError = builtins.ProcessLookupError - - def wrap_error(func, *args, **kw): - return func(*args, **kw) -else: - import errno - import select - import socket - - class BlockingIOError(OSError): - pass - - class BrokenPipeError(OSError): - pass - - class ChildProcessError(OSError): - pass - - class ConnectionRefusedError(OSError): - pass - - class InterruptedError(OSError): - pass - - class ConnectionResetError(OSError): - pass - - class ConnectionAbortedError(OSError): - pass - - class PermissionError(OSError): - pass - - class FileNotFoundError(OSError): - pass - - class ProcessLookupError(OSError): - pass - - _MAP_ERRNO = { - errno.EACCES: PermissionError, - errno.EAGAIN: BlockingIOError, - errno.EALREADY: BlockingIOError, - errno.ECHILD: ChildProcessError, - errno.ECONNABORTED: ConnectionAbortedError, - errno.ECONNREFUSED: ConnectionRefusedError, - errno.ECONNRESET: ConnectionResetError, - errno.EINPROGRESS: BlockingIOError, - errno.EINTR: InterruptedError, - errno.ENOENT: FileNotFoundError, - errno.EPERM: PermissionError, - errno.EPIPE: BrokenPipeError, - errno.ESHUTDOWN: BrokenPipeError, - errno.EWOULDBLOCK: BlockingIOError, - errno.ESRCH: ProcessLookupError, - } - - def wrap_error(func, *args, **kw): - """ - Wrap socket.error, IOError, OSError, select.error to raise new specialized - exceptions of Python 3.3 like InterruptedError (PEP 3151). - """ - try: - return func(*args, **kw) - except (socket.error, IOError, OSError) as exc: - if hasattr(exc, 'winerror'): - _wrap_error(exc, _MAP_ERRNO, exc.winerror) - # _MAP_ERRNO does not contain all Windows errors. - # For some errors like "file not found", exc.errno should - # be used (ex: ENOENT). - _wrap_error(exc, _MAP_ERRNO, exc.errno) - raise - except select.error as exc: - if exc.args: - _wrap_error(exc, _MAP_ERRNO, exc.args[0]) - raise - -if PY26: - from urlparse import ( - _parse_cache, MAX_CACHE_SIZE, clear_cache, _splitnetloc, SplitResult, - scheme_chars, - ) - - def urlsplit(url, scheme='', allow_fragments=True): - """Parse a URL into 5 components: - :///?# - Return a 5-tuple: (scheme, netloc, path, query, fragment). - Note that we don't break the components up in smaller bits - (e.g. netloc is a single string) and we don't expand % escapes.""" - allow_fragments = bool(allow_fragments) - key = url, scheme, allow_fragments, type(url), type(scheme) - cached = _parse_cache.get(key, None) - if cached: - return cached - if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth - clear_cache() - netloc = query = fragment = '' - i = url.find(':') - if i > 0: - if url[:i] == 'http': # optimize the common case - scheme = url[:i].lower() - url = url[i+1:] - if url[:2] == '//': - netloc, url = _splitnetloc(url, 2) - if (('[' in netloc and ']' not in netloc) or - (']' in netloc and '[' not in netloc)): - raise ValueError("Invalid IPv6 URL") - if allow_fragments and '#' in url: - url, fragment = url.split('#', 1) - if '?' in url: - url, query = url.split('?', 1) - v = SplitResult(scheme, netloc, url, query, fragment) - _parse_cache[key] = v - return v - for c in url[:i]: - if c not in scheme_chars: - break - else: - # make sure "url" is not actually a port number (in which case - # "scheme" is really part of the path) - rest = url[i+1:] - if not rest or any(c not in '0123456789' for c in rest): - # not a port number - scheme, url = url[:i].lower(), rest - - if url[:2] == '//': - netloc, url = _splitnetloc(url, 2) - if (('[' in netloc and ']' not in netloc) or - (']' in netloc and '[' not in netloc)): - raise ValueError("Invalid IPv6 URL") - if allow_fragments and '#' in url: - url, fragment = url.split('#', 1) - if '?' in url: - url, query = url.split('?', 1) - v = SplitResult(scheme, netloc, url, query, fragment) - _parse_cache[key] = v - return v - -else: - from gunicorn.six.moves.urllib.parse import urlsplit - - -import inspect - -if hasattr(inspect, 'signature'): - positionals = ( - inspect.Parameter.POSITIONAL_ONLY, - inspect.Parameter.POSITIONAL_OR_KEYWORD, - ) - - def get_arity(f): - sig = inspect.signature(f) - arity = 0 - - for param in sig.parameters.values(): - if param.kind in positionals: - arity += 1 - - return arity -else: - def get_arity(f): - return len(inspect.getargspec(f)[0]) - - -try: - import html - - def html_escape(s): - return html.escape(s) -except ImportError: - import cgi - - def html_escape(s): - return cgi.escape(s, quote=True) diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/__init__.py deleted file mode 100644 index 87f0611..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/base.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/base.py deleted file mode 100644 index e468c95..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/base.py +++ /dev/null @@ -1,223 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. -from __future__ import print_function - -import os -import sys -import traceback - -from gunicorn._compat import execfile_ -from gunicorn import util -from gunicorn.arbiter import Arbiter -from gunicorn.config import Config, get_default_config_file -from gunicorn import debug - -class BaseApplication(object): - """ - An application interface for configuring and loading - the various necessities for any given web framework. - """ - def __init__(self, usage=None, prog=None): - self.usage = usage - self.cfg = None - self.callable = None - self.prog = prog - self.logger = None - self.do_load_config() - - def do_load_config(self): - """ - Loads the configuration - """ - try: - self.load_default_config() - self.load_config() - except Exception as e: - print("\nError: %s" % str(e), file=sys.stderr) - sys.stderr.flush() - sys.exit(1) - - def load_default_config(self): - # init configuration - self.cfg = Config(self.usage, prog=self.prog) - - def init(self, parser, opts, args): - raise NotImplementedError - - def load(self): - raise NotImplementedError - - def load_config(self): - """ - This method is used to load the configuration from one or several input(s). - Custom Command line, configuration file. - You have to override this method in your class. - """ - raise NotImplementedError - - def reload(self): - self.do_load_config() - if self.cfg.spew: - debug.spew() - - def wsgi(self): - if self.callable is None: - self.callable = self.load() - return self.callable - - def run(self): - try: - Arbiter(self).run() - except RuntimeError as e: - print("\nError: %s\n" % e, file=sys.stderr) - sys.stderr.flush() - sys.exit(1) - - -class Application(BaseApplication): - - # 'init' and 'load' methods are implemented by WSGIApplication. - # pylint: disable=abstract-method - - def chdir(self): - # chdir to the configured path before loading, - # default is the current dir - os.chdir(self.cfg.chdir) - - # add the path to sys.path - if self.cfg.chdir not in sys.path: - sys.path.insert(0, self.cfg.chdir) - - def get_config_from_filename(self, filename): - - if not os.path.exists(filename): - raise RuntimeError("%r doesn't exist" % filename) - - cfg = { - "__builtins__": __builtins__, - "__name__": "__config__", - "__file__": filename, - "__doc__": None, - "__package__": None - } - try: - execfile_(filename, cfg, cfg) - except Exception: - print("Failed to read config file: %s" % filename, file=sys.stderr) - traceback.print_exc() - sys.stderr.flush() - sys.exit(1) - - return cfg - - def get_config_from_module_name(self, module_name): - return vars(util.import_module(module_name)) - - def load_config_from_module_name_or_filename(self, location): - """ - Loads the configuration file: the file is a python file, otherwise raise an RuntimeError - Exception or stop the process if the configuration file contains a syntax error. - """ - - if location.startswith("python:"): - module_name = location[len("python:"):] - cfg = self.get_config_from_module_name(module_name) - else: - if location.startswith("file:"): - filename = location[len("file:"):] - else: - filename = location - cfg = self.get_config_from_filename(filename) - - for k, v in cfg.items(): - # Ignore unknown names - if k not in self.cfg.settings: - continue - try: - self.cfg.set(k.lower(), v) - except: - print("Invalid value for %s: %s\n" % (k, v), file=sys.stderr) - sys.stderr.flush() - raise - - return cfg - - def load_config_from_file(self, filename): - return self.load_config_from_module_name_or_filename(location=filename) - - def load_config(self): - # parse console args - parser = self.cfg.parser() - args = parser.parse_args() - - # optional settings from apps - cfg = self.init(parser, args, args.args) - - # set up import paths and follow symlinks - self.chdir() - - # Load up the any app specific configuration - if cfg: - for k, v in cfg.items(): - self.cfg.set(k.lower(), v) - - env_args = parser.parse_args(self.cfg.get_cmd_args_from_env()) - - if args.config: - self.load_config_from_file(args.config) - elif env_args.config: - self.load_config_from_file(env_args.config) - else: - default_config = get_default_config_file() - if default_config is not None: - self.load_config_from_file(default_config) - - # Load up environment configuration - for k, v in vars(env_args).items(): - if v is None: - continue - if k == "args": - continue - self.cfg.set(k.lower(), v) - - # Lastly, update the configuration with any command line settings. - for k, v in vars(args).items(): - if v is None: - continue - if k == "args": - continue - self.cfg.set(k.lower(), v) - - # current directory might be changed by the config now - # set up import paths and follow symlinks - self.chdir() - - def run(self): - if self.cfg.check_config: - try: - self.load() - except: - msg = "\nError while loading the application:\n" - print(msg, file=sys.stderr) - traceback.print_exc() - sys.stderr.flush() - sys.exit(1) - sys.exit(0) - - if self.cfg.spew: - debug.spew() - - if self.cfg.daemon: - util.daemonize(self.cfg.enable_stdio_inheritance) - - # set python paths - if self.cfg.pythonpath: - paths = self.cfg.pythonpath.split(",") - for path in paths: - pythonpath = os.path.abspath(path) - if pythonpath not in sys.path: - sys.path.insert(0, pythonpath) - - super(Application, self).run() diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/pasterapp.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/pasterapp.py deleted file mode 100644 index dbcd339..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/pasterapp.py +++ /dev/null @@ -1,209 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. -from __future__ import print_function - -# pylint: skip-file - -import os -import pkg_resources -import sys - -try: - import configparser as ConfigParser -except ImportError: - import ConfigParser - -from paste.deploy import loadapp, loadwsgi -SERVER = loadwsgi.SERVER - -from gunicorn.app.base import Application -from gunicorn.config import Config, get_default_config_file -from gunicorn import util - - -def _has_logging_config(paste_file): - cfg_parser = ConfigParser.ConfigParser() - cfg_parser.read([paste_file]) - return cfg_parser.has_section('loggers') - - -def paste_config(gconfig, config_url, relative_to, global_conf=None): - # add entry to pkg_resources - sys.path.insert(0, relative_to) - pkg_resources.working_set.add_entry(relative_to) - - config_url = config_url.split('#')[0] - cx = loadwsgi.loadcontext(SERVER, config_url, relative_to=relative_to, - global_conf=global_conf) - gc, lc = cx.global_conf.copy(), cx.local_conf.copy() - cfg = {} - - host, port = lc.pop('host', ''), lc.pop('port', '') - if host and port: - cfg['bind'] = '%s:%s' % (host, port) - elif host: - cfg['bind'] = host.split(',') - - cfg['default_proc_name'] = gc.get('__file__') - - # init logging configuration - config_file = config_url.split(':')[1] - if _has_logging_config(config_file): - cfg.setdefault('logconfig', config_file) - - for k, v in gc.items(): - if k not in gconfig.settings: - continue - cfg[k] = v - - for k, v in lc.items(): - if k not in gconfig.settings: - continue - cfg[k] = v - - return cfg - - -def load_pasteapp(config_url, relative_to, global_conf=None): - return loadapp(config_url, relative_to=relative_to, - global_conf=global_conf) - -class PasterBaseApplication(Application): - gcfg = None - - def app_config(self): - return paste_config(self.cfg, self.cfgurl, self.relpath, - global_conf=self.gcfg) - - def load_config(self): - super(PasterBaseApplication, self).load_config() - - # reload logging conf - if hasattr(self, "cfgfname"): - parser = ConfigParser.ConfigParser() - parser.read([self.cfgfname]) - if parser.has_section('loggers'): - from logging.config import fileConfig - config_file = os.path.abspath(self.cfgfname) - fileConfig(config_file, dict(__file__=config_file, - here=os.path.dirname(config_file))) - - -class PasterApplication(PasterBaseApplication): - - def init(self, parser, opts, args): - if len(args) != 1: - parser.error("No application name specified.") - - cwd = util.getcwd() - cfgfname = os.path.normpath(os.path.join(cwd, args[0])) - cfgfname = os.path.abspath(cfgfname) - if not os.path.exists(cfgfname): - parser.error("Config file not found: %s" % cfgfname) - - self.cfgurl = 'config:%s' % cfgfname - self.relpath = os.path.dirname(cfgfname) - self.cfgfname = cfgfname - - sys.path.insert(0, self.relpath) - pkg_resources.working_set.add_entry(self.relpath) - - return self.app_config() - - def load(self): - # chdir to the configured path before loading, - # default is the current dir - os.chdir(self.cfg.chdir) - - return load_pasteapp(self.cfgurl, self.relpath, global_conf=self.gcfg) - - -class PasterServerApplication(PasterBaseApplication): - - def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, **kwargs): - # pylint: disable=super-init-not-called - self.cfg = Config() - self.gcfg = gcfg # need to hold this for app_config - self.app = app - self.callable = None - - gcfg = gcfg or {} - cfgfname = gcfg.get("__file__") - if cfgfname is not None: - self.cfgurl = 'config:%s' % cfgfname - self.relpath = os.path.dirname(cfgfname) - self.cfgfname = cfgfname - - cfg = kwargs.copy() - - if port and not host.startswith("unix:"): - bind = "%s:%s" % (host, port) - else: - bind = host - cfg["bind"] = bind.split(',') - - if gcfg: - for k, v in gcfg.items(): - cfg[k] = v - cfg["default_proc_name"] = cfg['__file__'] - - try: - for k, v in cfg.items(): - if k.lower() in self.cfg.settings and v is not None: - self.cfg.set(k.lower(), v) - except Exception as e: - print("\nConfig error: %s" % str(e), file=sys.stderr) - sys.stderr.flush() - sys.exit(1) - - if cfg.get("config"): - self.load_config_from_file(cfg["config"]) - else: - default_config = get_default_config_file() - if default_config is not None: - self.load_config_from_file(default_config) - - def load(self): - return self.app - - -def run(): - """\ - The ``gunicorn_paster`` command for launching Paster compatible - applications like Pylons or Turbogears2 - """ - util.warn("""This command is deprecated. - - You should now use the `--paste` option. Ex.: - - gunicorn --paste development.ini - """) - - from gunicorn.app.pasterapp import PasterApplication - PasterApplication("%(prog)s [OPTIONS] pasteconfig.ini").run() - - -def paste_server(app, gcfg=None, host="127.0.0.1", port=None, **kwargs): - """\ - A paster server. - - Then entry point in your paster ini file should looks like this: - - [server:main] - use = egg:gunicorn#main - host = 127.0.0.1 - port = 5000 - - """ - - util.warn("""This command is deprecated. - - You should now use the `--paste` option. Ex.: - - gunicorn --paste development.ini - """) - - from gunicorn.app.pasterapp import PasterServerApplication - PasterServerApplication(app, gcfg=gcfg, host=host, port=port, **kwargs).run() diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/wsgiapp.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/wsgiapp.py deleted file mode 100644 index 2205944..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/app/wsgiapp.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import os - -from gunicorn.errors import ConfigError -from gunicorn.app.base import Application -from gunicorn import util - - -class WSGIApplication(Application): - def init(self, parser, opts, args): - if opts.paste: - app_name = 'main' - path = opts.paste - if '#' in path: - path, app_name = path.split('#') - path = os.path.abspath(os.path.normpath( - os.path.join(util.getcwd(), path))) - - if not os.path.exists(path): - raise ConfigError("%r not found" % path) - - # paste application, load the config - self.cfgurl = 'config:%s#%s' % (path, app_name) - self.relpath = os.path.dirname(path) - - from .pasterapp import paste_config - return paste_config(self.cfg, self.cfgurl, self.relpath) - - if len(args) < 1: - parser.error("No application module specified.") - - self.cfg.set("default_proc_name", args[0]) - self.app_uri = args[0] - - def load_wsgiapp(self): - # load the app - return util.import_app(self.app_uri) - - def load_pasteapp(self): - # load the paste app - from .pasterapp import load_pasteapp - return load_pasteapp(self.cfgurl, self.relpath, global_conf=self.cfg.paste_global_conf) - - def load(self): - if self.cfg.paste is not None: - return self.load_pasteapp() - else: - return self.load_wsgiapp() - - -def run(): - """\ - The ``gunicorn`` command line runner for launching Gunicorn with - generic WSGI applications. - """ - from gunicorn.app.wsgiapp import WSGIApplication - WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run() - - -if __name__ == '__main__': - run() diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/arbiter.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/arbiter.py deleted file mode 100644 index 083ee6a..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/arbiter.py +++ /dev/null @@ -1,646 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. -from __future__ import print_function - -import errno -import os -import random -import select -import signal -import sys -import time -import traceback - -from gunicorn.errors import HaltServer, AppImportError -from gunicorn.pidfile import Pidfile -from gunicorn import sock, systemd, util - -from gunicorn import __version__, SERVER_SOFTWARE - - -class Arbiter(object): - """ - Arbiter maintain the workers processes alive. It launches or - kills them if needed. It also manages application reloading - via SIGHUP/USR2. - """ - - # A flag indicating if a worker failed to - # to boot. If a worker process exist with - # this error code, the arbiter will terminate. - WORKER_BOOT_ERROR = 3 - - # A flag indicating if an application failed to be loaded - APP_LOAD_ERROR = 4 - - START_CTX = {} - - LISTENERS = [] - WORKERS = {} - PIPE = [] - - # I love dynamic languages - SIG_QUEUE = [] - SIGNALS = [getattr(signal, "SIG%s" % x) - for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()] - SIG_NAMES = dict( - (getattr(signal, name), name[3:].lower()) for name in dir(signal) - if name[:3] == "SIG" and name[3] != "_" - ) - - def __init__(self, app): - os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE - - self._num_workers = None - self._last_logged_active_worker_count = None - self.log = None - - self.setup(app) - - self.pidfile = None - self.systemd = False - self.worker_age = 0 - self.reexec_pid = 0 - self.master_pid = 0 - self.master_name = "Master" - - cwd = util.getcwd() - - args = sys.argv[:] - args.insert(0, sys.executable) - - # init start context - self.START_CTX = { - "args": args, - "cwd": cwd, - 0: sys.executable - } - - def _get_num_workers(self): - return self._num_workers - - def _set_num_workers(self, value): - old_value = self._num_workers - self._num_workers = value - self.cfg.nworkers_changed(self, value, old_value) - num_workers = property(_get_num_workers, _set_num_workers) - - def setup(self, app): - self.app = app - self.cfg = app.cfg - - if self.log is None: - self.log = self.cfg.logger_class(app.cfg) - - # reopen files - if 'GUNICORN_FD' in os.environ: - self.log.reopen_files() - - self.worker_class = self.cfg.worker_class - self.address = self.cfg.address - self.num_workers = self.cfg.workers - self.timeout = self.cfg.timeout - self.proc_name = self.cfg.proc_name - - self.log.debug('Current configuration:\n{0}'.format( - '\n'.join( - ' {0}: {1}'.format(config, value.value) - for config, value - in sorted(self.cfg.settings.items(), - key=lambda setting: setting[1])))) - - # set enviroment' variables - if self.cfg.env: - for k, v in self.cfg.env.items(): - os.environ[k] = v - - if self.cfg.preload_app: - self.app.wsgi() - - def start(self): - """\ - Initialize the arbiter. Start listening and set pidfile if needed. - """ - self.log.info("Starting gunicorn %s", __version__) - - if 'GUNICORN_PID' in os.environ: - self.master_pid = int(os.environ.get('GUNICORN_PID')) - self.proc_name = self.proc_name + ".2" - self.master_name = "Master.2" - - self.pid = os.getpid() - if self.cfg.pidfile is not None: - pidname = self.cfg.pidfile - if self.master_pid != 0: - pidname += ".2" - self.pidfile = Pidfile(pidname) - self.pidfile.create(self.pid) - self.cfg.on_starting(self) - - self.init_signals() - - if not self.LISTENERS: - fds = None - listen_fds = systemd.listen_fds() - if listen_fds: - self.systemd = True - fds = range(systemd.SD_LISTEN_FDS_START, - systemd.SD_LISTEN_FDS_START + listen_fds) - - elif self.master_pid: - fds = [] - for fd in os.environ.pop('GUNICORN_FD').split(','): - fds.append(int(fd)) - - self.LISTENERS = sock.create_sockets(self.cfg, self.log, fds) - - listeners_str = ",".join([str(l) for l in self.LISTENERS]) - self.log.debug("Arbiter booted") - self.log.info("Listening at: %s (%s)", listeners_str, self.pid) - self.log.info("Using worker: %s", self.cfg.worker_class_str) - - # check worker class requirements - if hasattr(self.worker_class, "check_config"): - self.worker_class.check_config(self.cfg, self.log) - - self.cfg.when_ready(self) - - def init_signals(self): - """\ - Initialize master signal handling. Most of the signals - are queued. Child signals only wake up the master. - """ - # close old PIPE - for p in self.PIPE: - os.close(p) - - # initialize the pipe - self.PIPE = pair = os.pipe() - for p in pair: - util.set_non_blocking(p) - util.close_on_exec(p) - - self.log.close_on_exec() - - # initialize all signals - for s in self.SIGNALS: - signal.signal(s, self.signal) - signal.signal(signal.SIGCHLD, self.handle_chld) - - def signal(self, sig, frame): - if len(self.SIG_QUEUE) < 5: - self.SIG_QUEUE.append(sig) - self.wakeup() - - def run(self): - "Main master loop." - self.start() - util._setproctitle("master [%s]" % self.proc_name) - - try: - self.manage_workers() - - while True: - self.maybe_promote_master() - - sig = self.SIG_QUEUE.pop(0) if self.SIG_QUEUE else None - if sig is None: - self.sleep() - self.murder_workers() - self.manage_workers() - continue - - if sig not in self.SIG_NAMES: - self.log.info("Ignoring unknown signal: %s", sig) - continue - - signame = self.SIG_NAMES.get(sig) - handler = getattr(self, "handle_%s" % signame, None) - if not handler: - self.log.error("Unhandled signal: %s", signame) - continue - self.log.info("Handling signal: %s", signame) - handler() - self.wakeup() - except StopIteration: - self.halt() - except KeyboardInterrupt: - self.halt() - except HaltServer as inst: - self.halt(reason=inst.reason, exit_status=inst.exit_status) - except SystemExit: - raise - except Exception: - self.log.info("Unhandled exception in main loop", - exc_info=True) - self.stop(False) - if self.pidfile is not None: - self.pidfile.unlink() - sys.exit(-1) - - def handle_chld(self, sig, frame): - "SIGCHLD handling" - self.reap_workers() - self.wakeup() - - def handle_hup(self): - """\ - HUP handling. - - Reload configuration - - Start the new worker processes with a new configuration - - Gracefully shutdown the old worker processes - """ - self.log.info("Hang up: %s", self.master_name) - self.reload() - - def handle_term(self): - "SIGTERM handling" - raise StopIteration - - def handle_int(self): - "SIGINT handling" - self.stop(False) - raise StopIteration - - def handle_quit(self): - "SIGQUIT handling" - self.stop(False) - raise StopIteration - - def handle_ttin(self): - """\ - SIGTTIN handling. - Increases the number of workers by one. - """ - self.num_workers += 1 - self.manage_workers() - - def handle_ttou(self): - """\ - SIGTTOU handling. - Decreases the number of workers by one. - """ - if self.num_workers <= 1: - return - self.num_workers -= 1 - self.manage_workers() - - def handle_usr1(self): - """\ - SIGUSR1 handling. - Kill all workers by sending them a SIGUSR1 - """ - self.log.reopen_files() - self.kill_workers(signal.SIGUSR1) - - def handle_usr2(self): - """\ - SIGUSR2 handling. - Creates a new master/worker set as a slave of the current - master without affecting old workers. Use this to do live - deployment with the ability to backout a change. - """ - self.reexec() - - def handle_winch(self): - """SIGWINCH handling""" - if self.cfg.daemon: - self.log.info("graceful stop of workers") - self.num_workers = 0 - self.kill_workers(signal.SIGTERM) - else: - self.log.debug("SIGWINCH ignored. Not daemonized") - - def maybe_promote_master(self): - if self.master_pid == 0: - return - - if self.master_pid != os.getppid(): - self.log.info("Master has been promoted.") - # reset master infos - self.master_name = "Master" - self.master_pid = 0 - self.proc_name = self.cfg.proc_name - del os.environ['GUNICORN_PID'] - # rename the pidfile - if self.pidfile is not None: - self.pidfile.rename(self.cfg.pidfile) - # reset proctitle - util._setproctitle("master [%s]" % self.proc_name) - - def wakeup(self): - """\ - Wake up the arbiter by writing to the PIPE - """ - try: - os.write(self.PIPE[1], b'.') - except IOError as e: - if e.errno not in [errno.EAGAIN, errno.EINTR]: - raise - - def halt(self, reason=None, exit_status=0): - """ halt arbiter """ - self.stop() - self.log.info("Shutting down: %s", self.master_name) - if reason is not None: - self.log.info("Reason: %s", reason) - if self.pidfile is not None: - self.pidfile.unlink() - self.cfg.on_exit(self) - sys.exit(exit_status) - - def sleep(self): - """\ - Sleep until PIPE is readable or we timeout. - A readable PIPE means a signal occurred. - """ - try: - ready = select.select([self.PIPE[0]], [], [], 1.0) - if not ready[0]: - return - while os.read(self.PIPE[0], 1): - pass - except (select.error, OSError) as e: - # TODO: select.error is a subclass of OSError since Python 3.3. - error_number = getattr(e, 'errno', e.args[0]) - if error_number not in [errno.EAGAIN, errno.EINTR]: - raise - except KeyboardInterrupt: - sys.exit() - - def stop(self, graceful=True): - """\ - Stop workers - - :attr graceful: boolean, If True (the default) workers will be - killed gracefully (ie. trying to wait for the current connection) - """ - - unlink = self.reexec_pid == self.master_pid == 0 and not self.systemd - sock.close_sockets(self.LISTENERS, unlink) - - self.LISTENERS = [] - sig = signal.SIGTERM - if not graceful: - sig = signal.SIGQUIT - limit = time.time() + self.cfg.graceful_timeout - # instruct the workers to exit - self.kill_workers(sig) - # wait until the graceful timeout - while self.WORKERS and time.time() < limit: - time.sleep(0.1) - - self.kill_workers(signal.SIGKILL) - - def reexec(self): - """\ - Relaunch the master and workers. - """ - if self.reexec_pid != 0: - self.log.warning("USR2 signal ignored. Child exists.") - return - - if self.master_pid != 0: - self.log.warning("USR2 signal ignored. Parent exists.") - return - - master_pid = os.getpid() - self.reexec_pid = os.fork() - if self.reexec_pid != 0: - return - - self.cfg.pre_exec(self) - - environ = self.cfg.env_orig.copy() - environ['GUNICORN_PID'] = str(master_pid) - - if self.systemd: - environ['LISTEN_PID'] = str(os.getpid()) - environ['LISTEN_FDS'] = str(len(self.LISTENERS)) - else: - environ['GUNICORN_FD'] = ','.join( - str(l.fileno()) for l in self.LISTENERS) - - os.chdir(self.START_CTX['cwd']) - - # exec the process using the original environment - os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ) - - def reload(self): - old_address = self.cfg.address - - # reset old environment - for k in self.cfg.env: - if k in self.cfg.env_orig: - # reset the key to the value it had before - # we launched gunicorn - os.environ[k] = self.cfg.env_orig[k] - else: - # delete the value set by gunicorn - try: - del os.environ[k] - except KeyError: - pass - - # reload conf - self.app.reload() - self.setup(self.app) - - # reopen log files - self.log.reopen_files() - - # do we need to change listener ? - if old_address != self.cfg.address: - # close all listeners - for l in self.LISTENERS: - l.close() - # init new listeners - self.LISTENERS = sock.create_sockets(self.cfg, self.log) - listeners_str = ",".join([str(l) for l in self.LISTENERS]) - self.log.info("Listening at: %s", listeners_str) - - # do some actions on reload - self.cfg.on_reload(self) - - # unlink pidfile - if self.pidfile is not None: - self.pidfile.unlink() - - # create new pidfile - if self.cfg.pidfile is not None: - self.pidfile = Pidfile(self.cfg.pidfile) - self.pidfile.create(self.pid) - - # set new proc_name - util._setproctitle("master [%s]" % self.proc_name) - - # spawn new workers - for _ in range(self.cfg.workers): - self.spawn_worker() - - # manage workers - self.manage_workers() - - def murder_workers(self): - """\ - Kill unused/idle workers - """ - if not self.timeout: - return - workers = list(self.WORKERS.items()) - for (pid, worker) in workers: - try: - if time.time() - worker.tmp.last_update() <= self.timeout: - continue - except (OSError, ValueError): - continue - - if not worker.aborted: - self.log.critical("WORKER TIMEOUT (pid:%s)", pid) - worker.aborted = True - self.kill_worker(pid, signal.SIGABRT) - else: - self.kill_worker(pid, signal.SIGKILL) - - def reap_workers(self): - """\ - Reap workers to avoid zombie processes - """ - try: - while True: - wpid, status = os.waitpid(-1, os.WNOHANG) - if not wpid: - break - if self.reexec_pid == wpid: - self.reexec_pid = 0 - else: - # A worker was terminated. If the termination reason was - # that it could not boot, we'll shut it down to avoid - # infinite start/stop cycles. - exitcode = status >> 8 - if exitcode == self.WORKER_BOOT_ERROR: - reason = "Worker failed to boot." - raise HaltServer(reason, self.WORKER_BOOT_ERROR) - if exitcode == self.APP_LOAD_ERROR: - reason = "App failed to load." - raise HaltServer(reason, self.APP_LOAD_ERROR) - - worker = self.WORKERS.pop(wpid, None) - if not worker: - continue - worker.tmp.close() - self.cfg.child_exit(self, worker) - except OSError as e: - if e.errno != errno.ECHILD: - raise - - def manage_workers(self): - """\ - Maintain the number of workers by spawning or killing - as required. - """ - if len(self.WORKERS.keys()) < self.num_workers: - self.spawn_workers() - - workers = self.WORKERS.items() - workers = sorted(workers, key=lambda w: w[1].age) - while len(workers) > self.num_workers: - (pid, _) = workers.pop(0) - self.kill_worker(pid, signal.SIGTERM) - - active_worker_count = len(workers) - if self._last_logged_active_worker_count != active_worker_count: - self._last_logged_active_worker_count = active_worker_count - self.log.debug("{0} workers".format(active_worker_count), - extra={"metric": "gunicorn.workers", - "value": active_worker_count, - "mtype": "gauge"}) - - def spawn_worker(self): - self.worker_age += 1 - worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS, - self.app, self.timeout / 2.0, - self.cfg, self.log) - self.cfg.pre_fork(self, worker) - pid = os.fork() - if pid != 0: - worker.pid = pid - self.WORKERS[pid] = worker - return pid - - # Do not inherit the temporary files of other workers - for sibling in self.WORKERS.values(): - sibling.tmp.close() - - # Process Child - worker.pid = os.getpid() - try: - util._setproctitle("worker [%s]" % self.proc_name) - self.log.info("Booting worker with pid: %s", worker.pid) - self.cfg.post_fork(self, worker) - worker.init_process() - sys.exit(0) - except SystemExit: - raise - except AppImportError as e: - self.log.debug("Exception while loading the application", - exc_info=True) - print("%s" % e, file=sys.stderr) - sys.stderr.flush() - sys.exit(self.APP_LOAD_ERROR) - except: - self.log.exception("Exception in worker process") - if not worker.booted: - sys.exit(self.WORKER_BOOT_ERROR) - sys.exit(-1) - finally: - self.log.info("Worker exiting (pid: %s)", worker.pid) - try: - worker.tmp.close() - self.cfg.worker_exit(self, worker) - except: - self.log.warning("Exception during worker exit:\n%s", - traceback.format_exc()) - - def spawn_workers(self): - """\ - Spawn new workers as needed. - - This is where a worker process leaves the main loop - of the master process. - """ - - for _ in range(self.num_workers - len(self.WORKERS.keys())): - self.spawn_worker() - time.sleep(0.1 * random.random()) - - def kill_workers(self, sig): - """\ - Kill all workers with the signal `sig` - :attr sig: `signal.SIG*` value - """ - worker_pids = list(self.WORKERS.keys()) - for pid in worker_pids: - self.kill_worker(pid, sig) - - def kill_worker(self, pid, sig): - """\ - Kill a worker - - :attr pid: int, worker pid - :attr sig: `signal.SIG*` value - """ - try: - os.kill(pid, sig) - except OSError as e: - if e.errno == errno.ESRCH: - try: - worker = self.WORKERS.pop(pid) - worker.tmp.close() - self.cfg.worker_exit(self, worker) - return - except (KeyError, OSError): - return - raise diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/argparse_compat.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/argparse_compat.py deleted file mode 100644 index 32d948c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/argparse_compat.py +++ /dev/null @@ -1,2362 +0,0 @@ -# Author: Steven J. Bethard . - -"""Command-line parsing library - -This module is an optparse-inspired command-line parsing library that: - - - handles both optional and positional arguments - - produces highly informative usage messages - - supports parsers that dispatch to sub-parsers - -The following is a simple usage example that sums integers from the -command-line and writes the result to a file:: - - parser = argparse.ArgumentParser( - description='sum the integers at the command line') - parser.add_argument( - 'integers', metavar='int', nargs='+', type=int, - help='an integer to be summed') - parser.add_argument( - '--log', default=sys.stdout, type=argparse.FileType('w'), - help='the file where the sum should be written') - args = parser.parse_args() - args.log.write('%s' % sum(args.integers)) - args.log.close() - -The module contains the following public classes: - - - ArgumentParser -- The main entry point for command-line parsing. As the - example above shows, the add_argument() method is used to populate - the parser with actions for optional and positional arguments. Then - the parse_args() method is invoked to convert the args at the - command-line into an object with attributes. - - - ArgumentError -- The exception raised by ArgumentParser objects when - there are errors with the parser's actions. Errors raised while - parsing the command-line are caught by ArgumentParser and emitted - as command-line messages. - - - FileType -- A factory for defining types of files to be created. As the - example above shows, instances of FileType are typically passed as - the type= argument of add_argument() calls. - - - Action -- The base class for parser actions. Typically actions are - selected by passing strings like 'store_true' or 'append_const' to - the action= argument of add_argument(). However, for greater - customization of ArgumentParser actions, subclasses of Action may - be defined and passed as the action= argument. - - - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, - ArgumentDefaultsHelpFormatter -- Formatter classes which - may be passed as the formatter_class= argument to the - ArgumentParser constructor. HelpFormatter is the default, - RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser - not to change the formatting for help text, and - ArgumentDefaultsHelpFormatter adds information about argument defaults - to the help. - -All other classes in this module are considered implementation details. -(Also note that HelpFormatter and RawDescriptionHelpFormatter are only -considered public as object names -- the API of the formatter objects is -still considered an implementation detail.) -""" - -__version__ = '1.2.1' -__all__ = [ - 'ArgumentParser', - 'ArgumentError', - 'ArgumentTypeError', - 'FileType', - 'HelpFormatter', - 'ArgumentDefaultsHelpFormatter', - 'RawDescriptionHelpFormatter', - 'RawTextHelpFormatter', - 'Namespace', - 'Action', - 'ONE_OR_MORE', - 'OPTIONAL', - 'PARSER', - 'REMAINDER', - 'SUPPRESS', - 'ZERO_OR_MORE', -] - - -import copy as _copy -import os as _os -import re as _re -import sys as _sys -import textwrap as _textwrap - -from gettext import gettext as _ - -try: - set -except NameError: - # for python < 2.4 compatibility (sets module is there since 2.3): - from sets import Set as set - -try: - basestring -except NameError: - basestring = str - -try: - sorted -except NameError: - # for python < 2.4 compatibility: - def sorted(iterable, reverse=False): - result = list(iterable) - result.sort() - if reverse: - result.reverse() - return result - - -def _callable(obj): - return hasattr(obj, '__call__') or hasattr(obj, '__bases__') - - -SUPPRESS = '==SUPPRESS==' - -OPTIONAL = '?' -ZERO_OR_MORE = '*' -ONE_OR_MORE = '+' -PARSER = 'A...' -REMAINDER = '...' -_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' - -# ============================= -# Utility functions and classes -# ============================= - -class _AttributeHolder(object): - """Abstract base class that provides __repr__. - - The __repr__ method returns a string in the format:: - ClassName(attr=name, attr=name, ...) - The attributes are determined either by a class-level attribute, - '_kwarg_names', or by inspecting the instance __dict__. - """ - - def __repr__(self): - type_name = type(self).__name__ - arg_strings = [] - for arg in self._get_args(): - arg_strings.append(repr(arg)) - for name, value in self._get_kwargs(): - arg_strings.append('%s=%r' % (name, value)) - return '%s(%s)' % (type_name, ', '.join(arg_strings)) - - def _get_kwargs(self): - return sorted(self.__dict__.items()) - - def _get_args(self): - return [] - - -def _ensure_value(namespace, name, value): - if getattr(namespace, name, None) is None: - setattr(namespace, name, value) - return getattr(namespace, name) - - -# =============== -# Formatting Help -# =============== - -class HelpFormatter(object): - """Formatter for generating usage messages and argument help strings. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def __init__(self, - prog, - indent_increment=2, - max_help_position=24, - width=None): - - # default setting for width - if width is None: - try: - width = int(_os.environ['COLUMNS']) - except (KeyError, ValueError): - width = 80 - width -= 2 - - self._prog = prog - self._indent_increment = indent_increment - self._max_help_position = max_help_position - self._width = width - - self._current_indent = 0 - self._level = 0 - self._action_max_length = 0 - - self._root_section = self._Section(self, None) - self._current_section = self._root_section - - self._whitespace_matcher = _re.compile(r'\s+') - self._long_break_matcher = _re.compile(r'\n\n\n+') - - # =============================== - # Section and indentation methods - # =============================== - def _indent(self): - self._current_indent += self._indent_increment - self._level += 1 - - def _dedent(self): - self._current_indent -= self._indent_increment - assert self._current_indent >= 0, 'Indent decreased below 0.' - self._level -= 1 - - class _Section(object): - - def __init__(self, formatter, parent, heading=None): - self.formatter = formatter - self.parent = parent - self.heading = heading - self.items = [] - - def format_help(self): - # format the indented section - if self.parent is not None: - self.formatter._indent() - join = self.formatter._join_parts - for func, args in self.items: - func(*args) - item_help = join([func(*args) for func, args in self.items]) - if self.parent is not None: - self.formatter._dedent() - - # return nothing if the section was empty - if not item_help: - return '' - - # add the heading if the section was non-empty - if self.heading is not SUPPRESS and self.heading is not None: - current_indent = self.formatter._current_indent - heading = '%*s%s:\n' % (current_indent, '', self.heading) - else: - heading = '' - - # join the section-initial newline, the heading and the help - return join(['\n', heading, item_help, '\n']) - - def _add_item(self, func, args): - self._current_section.items.append((func, args)) - - # ======================== - # Message building methods - # ======================== - def start_section(self, heading): - self._indent() - section = self._Section(self, self._current_section, heading) - self._add_item(section.format_help, []) - self._current_section = section - - def end_section(self): - self._current_section = self._current_section.parent - self._dedent() - - def add_text(self, text): - if text is not SUPPRESS and text is not None: - self._add_item(self._format_text, [text]) - - def add_usage(self, usage, actions, groups, prefix=None): - if usage is not SUPPRESS: - args = usage, actions, groups, prefix - self._add_item(self._format_usage, args) - - def add_argument(self, action): - if action.help is not SUPPRESS: - - # find all invocations - get_invocation = self._format_action_invocation - invocations = [get_invocation(action)] - for subaction in self._iter_indented_subactions(action): - invocations.append(get_invocation(subaction)) - - # update the maximum item length - invocation_length = max([len(s) for s in invocations]) - action_length = invocation_length + self._current_indent - self._action_max_length = max(self._action_max_length, - action_length) - - # add the item to the list - self._add_item(self._format_action, [action]) - - def add_arguments(self, actions): - for action in actions: - self.add_argument(action) - - # ======================= - # Help-formatting methods - # ======================= - def format_help(self): - help = self._root_section.format_help() - if help: - help = self._long_break_matcher.sub('\n\n', help) - help = help.strip('\n') + '\n' - return help - - def _join_parts(self, part_strings): - return ''.join([part - for part in part_strings - if part and part is not SUPPRESS]) - - def _format_usage(self, usage, actions, groups, prefix): - if prefix is None: - prefix = _('usage: ') - - # if usage is specified, use that - if usage is not None: - usage = usage % dict(prog=self._prog) - - # if no optionals or positionals are available, usage is just prog - elif usage is None and not actions: - usage = '%(prog)s' % dict(prog=self._prog) - - # if optionals and positionals are available, calculate usage - elif usage is None: - prog = '%(prog)s' % dict(prog=self._prog) - - # split optionals from positionals - optionals = [] - positionals = [] - for action in actions: - if action.option_strings: - optionals.append(action) - else: - positionals.append(action) - - # build full usage string - format = self._format_actions_usage - action_usage = format(optionals + positionals, groups) - usage = ' '.join([s for s in [prog, action_usage] if s]) - - # wrap the usage parts if it's too long - text_width = self._width - self._current_indent - if len(prefix) + len(usage) > text_width: - - # break usage into wrappable parts - part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' - opt_usage = format(optionals, groups) - pos_usage = format(positionals, groups) - opt_parts = _re.findall(part_regexp, opt_usage) - pos_parts = _re.findall(part_regexp, pos_usage) - assert ' '.join(opt_parts) == opt_usage - assert ' '.join(pos_parts) == pos_usage - - # helper for wrapping lines - def get_lines(parts, indent, prefix=None): - lines = [] - line = [] - if prefix is not None: - line_len = len(prefix) - 1 - else: - line_len = len(indent) - 1 - for part in parts: - if line_len + 1 + len(part) > text_width: - lines.append(indent + ' '.join(line)) - line = [] - line_len = len(indent) - 1 - line.append(part) - line_len += len(part) + 1 - if line: - lines.append(indent + ' '.join(line)) - if prefix is not None: - lines[0] = lines[0][len(indent):] - return lines - - # if prog is short, follow it with optionals or positionals - if len(prefix) + len(prog) <= 0.75 * text_width: - indent = ' ' * (len(prefix) + len(prog) + 1) - if opt_parts: - lines = get_lines([prog] + opt_parts, indent, prefix) - lines.extend(get_lines(pos_parts, indent)) - elif pos_parts: - lines = get_lines([prog] + pos_parts, indent, prefix) - else: - lines = [prog] - - # if prog is long, put it on its own line - else: - indent = ' ' * len(prefix) - parts = opt_parts + pos_parts - lines = get_lines(parts, indent) - if len(lines) > 1: - lines = [] - lines.extend(get_lines(opt_parts, indent)) - lines.extend(get_lines(pos_parts, indent)) - lines = [prog] + lines - - # join lines into usage - usage = '\n'.join(lines) - - # prefix with 'usage:' - return '%s%s\n\n' % (prefix, usage) - - def _format_actions_usage(self, actions, groups): - # find group indices and identify actions in groups - group_actions = set() - inserts = {} - for group in groups: - try: - start = actions.index(group._group_actions[0]) - except ValueError: - continue - else: - end = start + len(group._group_actions) - if actions[start:end] == group._group_actions: - for action in group._group_actions: - group_actions.add(action) - if not group.required: - if start in inserts: - inserts[start] += ' [' - else: - inserts[start] = '[' - inserts[end] = ']' - else: - if start in inserts: - inserts[start] += ' (' - else: - inserts[start] = '(' - inserts[end] = ')' - for i in range(start + 1, end): - inserts[i] = '|' - - # collect all actions format strings - parts = [] - for i, action in enumerate(actions): - - # suppressed arguments are marked with None - # remove | separators for suppressed arguments - if action.help is SUPPRESS: - parts.append(None) - if inserts.get(i) == '|': - inserts.pop(i) - elif inserts.get(i + 1) == '|': - inserts.pop(i + 1) - - # produce all arg strings - elif not action.option_strings: - part = self._format_args(action, action.dest) - - # if it's in a group, strip the outer [] - if action in group_actions: - if part[0] == '[' and part[-1] == ']': - part = part[1:-1] - - # add the action string to the list - parts.append(part) - - # produce the first way to invoke the option in brackets - else: - option_string = action.option_strings[0] - - # if the Optional doesn't take a value, format is: - # -s or --long - if action.nargs == 0: - part = '%s' % option_string - - # if the Optional takes a value, format is: - # -s ARGS or --long ARGS - else: - default = action.dest.upper() - args_string = self._format_args(action, default) - part = '%s %s' % (option_string, args_string) - - # make it look optional if it's not required or in a group - if not action.required and action not in group_actions: - part = '[%s]' % part - - # add the action string to the list - parts.append(part) - - # insert things at the necessary indices - for i in sorted(inserts, reverse=True): - parts[i:i] = [inserts[i]] - - # join all the action items with spaces - text = ' '.join([item for item in parts if item is not None]) - - # clean up separators for mutually exclusive groups - open = r'[\[(]' - close = r'[\])]' - text = _re.sub(r'(%s) ' % open, r'\1', text) - text = _re.sub(r' (%s)' % close, r'\1', text) - text = _re.sub(r'%s *%s' % (open, close), r'', text) - text = _re.sub(r'\(([^|]*)\)', r'\1', text) - text = text.strip() - - # return the text - return text - - def _format_text(self, text): - if '%(prog)' in text: - text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent - indent = ' ' * self._current_indent - return self._fill_text(text, text_width, indent) + '\n\n' - - def _format_action(self, action): - # determine the required width and the entry label - help_position = min(self._action_max_length + 2, - self._max_help_position) - help_width = self._width - help_position - action_width = help_position - self._current_indent - 2 - action_header = self._format_action_invocation(action) - - # ho nelp; start on same line and add a final newline - if not action.help: - tup = self._current_indent, '', action_header - action_header = '%*s%s\n' % tup - - # short action name; start on the same line and pad two spaces - elif len(action_header) <= action_width: - tup = self._current_indent, '', action_width, action_header - action_header = '%*s%-*s ' % tup - indent_first = 0 - - # long action name; start on the next line - else: - tup = self._current_indent, '', action_header - action_header = '%*s%s\n' % tup - indent_first = help_position - - # collect the pieces of the action help - parts = [action_header] - - # if there was help for the action, add lines of help text - if action.help: - help_text = self._expand_help(action) - help_lines = self._split_lines(help_text, help_width) - parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) - for line in help_lines[1:]: - parts.append('%*s%s\n' % (help_position, '', line)) - - # or add a newline if the description doesn't end with one - elif not action_header.endswith('\n'): - parts.append('\n') - - # if there are any sub-actions, add their help as well - for subaction in self._iter_indented_subactions(action): - parts.append(self._format_action(subaction)) - - # return a single string - return self._join_parts(parts) - - def _format_action_invocation(self, action): - if not action.option_strings: - metavar, = self._metavar_formatter(action, action.dest)(1) - return metavar - - else: - parts = [] - - # if the Optional doesn't take a value, format is: - # -s, --long - if action.nargs == 0: - parts.extend(action.option_strings) - - # if the Optional takes a value, format is: - # -s ARGS, --long ARGS - else: - default = action.dest.upper() - args_string = self._format_args(action, default) - for option_string in action.option_strings: - parts.append('%s %s' % (option_string, args_string)) - - return ', '.join(parts) - - def _metavar_formatter(self, action, default_metavar): - if action.metavar is not None: - result = action.metavar - elif action.choices is not None: - choice_strs = [str(choice) for choice in action.choices] - result = '{%s}' % ','.join(choice_strs) - else: - result = default_metavar - - def format(tuple_size): - if isinstance(result, tuple): - return result - else: - return (result, ) * tuple_size - return format - - def _format_args(self, action, default_metavar): - get_metavar = self._metavar_formatter(action, default_metavar) - if action.nargs is None: - result = '%s' % get_metavar(1) - elif action.nargs == OPTIONAL: - result = '[%s]' % get_metavar(1) - elif action.nargs == ZERO_OR_MORE: - result = '[%s [%s ...]]' % get_metavar(2) - elif action.nargs == ONE_OR_MORE: - result = '%s [%s ...]' % get_metavar(2) - elif action.nargs == REMAINDER: - result = '...' - elif action.nargs == PARSER: - result = '%s ...' % get_metavar(1) - else: - formats = ['%s' for _ in range(action.nargs)] - result = ' '.join(formats) % get_metavar(action.nargs) - return result - - def _expand_help(self, action): - params = dict(vars(action), prog=self._prog) - for name in list(params): - if params[name] is SUPPRESS: - del params[name] - for name in list(params): - if hasattr(params[name], '__name__'): - params[name] = params[name].__name__ - if params.get('choices') is not None: - choices_str = ', '.join([str(c) for c in params['choices']]) - params['choices'] = choices_str - return self._get_help_string(action) % params - - def _iter_indented_subactions(self, action): - try: - get_subactions = action._get_subactions - except AttributeError: - pass - else: - self._indent() - for subaction in get_subactions(): - yield subaction - self._dedent() - - def _split_lines(self, text, width): - text = self._whitespace_matcher.sub(' ', text).strip() - return _textwrap.wrap(text, width) - - def _fill_text(self, text, width, indent): - text = self._whitespace_matcher.sub(' ', text).strip() - return _textwrap.fill(text, width, initial_indent=indent, - subsequent_indent=indent) - - def _get_help_string(self, action): - return action.help - - -class RawDescriptionHelpFormatter(HelpFormatter): - """Help message formatter which retains any formatting in descriptions. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _fill_text(self, text, width, indent): - return ''.join([indent + line for line in text.splitlines(True)]) - - -class RawTextHelpFormatter(RawDescriptionHelpFormatter): - """Help message formatter which retains formatting of all help text. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _split_lines(self, text, width): - return text.splitlines() - - -class ArgumentDefaultsHelpFormatter(HelpFormatter): - """Help message formatter which adds default values to argument help. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _get_help_string(self, action): - help = action.help - if '%(default)' not in action.help: - if action.default is not SUPPRESS: - defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] - if action.option_strings or action.nargs in defaulting_nargs: - help += ' (default: %(default)s)' - return help - - -# ===================== -# Options and Arguments -# ===================== - -def _get_action_name(argument): - if argument is None: - return None - elif argument.option_strings: - return '/'.join(argument.option_strings) - elif argument.metavar not in (None, SUPPRESS): - return argument.metavar - elif argument.dest not in (None, SUPPRESS): - return argument.dest - else: - return None - - -class ArgumentError(Exception): - """An error from creating or using an argument (optional or positional). - - The string value of this exception is the message, augmented with - information about the argument that caused it. - """ - - def __init__(self, argument, message): - self.argument_name = _get_action_name(argument) - self.message = message - - def __str__(self): - if self.argument_name is None: - format = '%(message)s' - else: - format = 'argument %(argument_name)s: %(message)s' - return format % dict(message=self.message, - argument_name=self.argument_name) - - -class ArgumentTypeError(Exception): - """An error from trying to convert a command line string to a type.""" - pass - - -# ============== -# Action classes -# ============== - -class Action(_AttributeHolder): - """Information about how to convert command line strings to Python objects. - - Action objects are used by an ArgumentParser to represent the information - needed to parse a single argument from one or more strings from the - command line. The keyword arguments to the Action constructor are also - all attributes of Action instances. - - Keyword Arguments: - - - option_strings -- A list of command-line option strings which - should be associated with this action. - - - dest -- The name of the attribute to hold the created object(s) - - - nargs -- The number of command-line arguments that should be - consumed. By default, one argument will be consumed and a single - value will be produced. Other values include: - - N (an integer) consumes N arguments (and produces a list) - - '?' consumes zero or one arguments - - '*' consumes zero or more arguments (and produces a list) - - '+' consumes one or more arguments (and produces a list) - Note that the difference between the default and nargs=1 is that - with the default, a single value will be produced, while with - nargs=1, a list containing a single value will be produced. - - - const -- The value to be produced if the option is specified and the - option uses an action that takes no values. - - - default -- The value to be produced if the option is not specified. - - - type -- The type which the command-line arguments should be converted - to, should be one of 'string', 'int', 'float', 'complex' or a - callable object that accepts a single string argument. If None, - 'string' is assumed. - - - choices -- A container of values that should be allowed. If not None, - after a command-line argument has been converted to the appropriate - type, an exception will be raised if it is not a member of this - collection. - - - required -- True if the action must always be specified at the - command line. This is only meaningful for optional command-line - arguments. - - - help -- The help string describing the argument. - - - metavar -- The name to be used for the option's argument with the - help string. If None, the 'dest' value will be used as the name. - """ - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): - self.option_strings = option_strings - self.dest = dest - self.nargs = nargs - self.const = const - self.default = default - self.type = type - self.choices = choices - self.required = required - self.help = help - self.metavar = metavar - - def _get_kwargs(self): - names = [ - 'option_strings', - 'dest', - 'nargs', - 'const', - 'default', - 'type', - 'choices', - 'help', - 'metavar', - ] - return [(name, getattr(self, name)) for name in names] - - def __call__(self, parser, namespace, values, option_string=None): - raise NotImplementedError(_('.__call__() not defined')) - - -class _StoreAction(Action): - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): - if nargs == 0: - raise ValueError('nargs for store actions must be > 0; if you ' - 'have nothing to store, actions such as store ' - 'true or store const may be more appropriate') - if const is not None and nargs != OPTIONAL: - raise ValueError('nargs must be %r to supply const' % OPTIONAL) - super(_StoreAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=nargs, - const=const, - default=default, - type=type, - choices=choices, - required=required, - help=help, - metavar=metavar) - - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, values) - - -class _StoreConstAction(Action): - - def __init__(self, - option_strings, - dest, - const, - default=None, - required=False, - help=None, - metavar=None): - super(_StoreConstAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - const=const, - default=default, - required=required, - help=help) - - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, self.const) - - -class _StoreTrueAction(_StoreConstAction): - - def __init__(self, - option_strings, - dest, - default=False, - required=False, - help=None): - super(_StoreTrueAction, self).__init__( - option_strings=option_strings, - dest=dest, - const=True, - default=default, - required=required, - help=help) - - -class _StoreFalseAction(_StoreConstAction): - - def __init__(self, - option_strings, - dest, - default=True, - required=False, - help=None): - super(_StoreFalseAction, self).__init__( - option_strings=option_strings, - dest=dest, - const=False, - default=default, - required=required, - help=help) - - -class _AppendAction(Action): - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): - if nargs == 0: - raise ValueError('nargs for append actions must be > 0; if arg ' - 'strings are not supplying the value to append, ' - 'the append const action may be more appropriate') - if const is not None and nargs != OPTIONAL: - raise ValueError('nargs must be %r to supply const' % OPTIONAL) - super(_AppendAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=nargs, - const=const, - default=default, - type=type, - choices=choices, - required=required, - help=help, - metavar=metavar) - - def __call__(self, parser, namespace, values, option_string=None): - items = _copy.copy(_ensure_value(namespace, self.dest, [])) - items.append(values) - setattr(namespace, self.dest, items) - - -class _AppendConstAction(Action): - - def __init__(self, - option_strings, - dest, - const, - default=None, - required=False, - help=None, - metavar=None): - super(_AppendConstAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - const=const, - default=default, - required=required, - help=help, - metavar=metavar) - - def __call__(self, parser, namespace, values, option_string=None): - items = _copy.copy(_ensure_value(namespace, self.dest, [])) - items.append(self.const) - setattr(namespace, self.dest, items) - - -class _CountAction(Action): - - def __init__(self, - option_strings, - dest, - default=None, - required=False, - help=None): - super(_CountAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - default=default, - required=required, - help=help) - - def __call__(self, parser, namespace, values, option_string=None): - new_count = _ensure_value(namespace, self.dest, 0) + 1 - setattr(namespace, self.dest, new_count) - - -class _HelpAction(Action): - - def __init__(self, - option_strings, - dest=SUPPRESS, - default=SUPPRESS, - help=None): - super(_HelpAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - nargs=0, - help=help) - - def __call__(self, parser, namespace, values, option_string=None): - parser.print_help() - parser.exit() - - -class _VersionAction(Action): - - def __init__(self, - option_strings, - version=None, - dest=SUPPRESS, - default=SUPPRESS, - help="show program's version number and exit"): - super(_VersionAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - nargs=0, - help=help) - self.version = version - - def __call__(self, parser, namespace, values, option_string=None): - version = self.version - if version is None: - version = parser.version - formatter = parser._get_formatter() - formatter.add_text(version) - parser.exit(message=formatter.format_help()) - - -class _SubParsersAction(Action): - - class _ChoicesPseudoAction(Action): - - def __init__(self, name, help): - sup = super(_SubParsersAction._ChoicesPseudoAction, self) - sup.__init__(option_strings=[], dest=name, help=help) - - def __init__(self, - option_strings, - prog, - parser_class, - dest=SUPPRESS, - help=None, - metavar=None): - - self._prog_prefix = prog - self._parser_class = parser_class - self._name_parser_map = {} - self._choices_actions = [] - - super(_SubParsersAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=PARSER, - choices=self._name_parser_map, - help=help, - metavar=metavar) - - def add_parser(self, name, **kwargs): - # set prog from the existing prefix - if kwargs.get('prog') is None: - kwargs['prog'] = '%s %s' % (self._prog_prefix, name) - - # create a pseudo-action to hold the choice help - if 'help' in kwargs: - help = kwargs.pop('help') - choice_action = self._ChoicesPseudoAction(name, help) - self._choices_actions.append(choice_action) - - # create the parser and add it to the map - parser = self._parser_class(**kwargs) - self._name_parser_map[name] = parser - return parser - - def _get_subactions(self): - return self._choices_actions - - def __call__(self, parser, namespace, values, option_string=None): - parser_name = values[0] - arg_strings = values[1:] - - # set the parser name if requested - if self.dest is not SUPPRESS: - setattr(namespace, self.dest, parser_name) - - # select the parser - try: - parser = self._name_parser_map[parser_name] - except KeyError: - tup = parser_name, ', '.join(self._name_parser_map) - msg = _('unknown parser %r (choices: %s)' % tup) - raise ArgumentError(self, msg) - - # parse all the remaining options into the namespace - # store any unrecognized options on the object, so that the top - # level parser can decide what to do with them - namespace, arg_strings = parser.parse_known_args(arg_strings, namespace) - if arg_strings: - vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) - getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) - - -# ============== -# Type classes -# ============== - -class FileType(object): - """Factory for creating file object types - - Instances of FileType are typically passed as type= arguments to the - ArgumentParser add_argument() method. - - Keyword Arguments: - - mode -- A string indicating how the file is to be opened. Accepts the - same values as the builtin open() function. - - bufsize -- The file's desired buffer size. Accepts the same values as - the builtin open() function. - """ - - def __init__(self, mode='r', bufsize=None): - self._mode = mode - self._bufsize = bufsize - - def __call__(self, string): - # the special argument "-" means sys.std{in,out} - if string == '-': - if 'r' in self._mode: - return _sys.stdin - elif 'w' in self._mode: - return _sys.stdout - else: - msg = _('argument "-" with mode %r' % self._mode) - raise ValueError(msg) - - # all other arguments are used as file names - if self._bufsize: - return open(string, self._mode, self._bufsize) - else: - return open(string, self._mode) - - def __repr__(self): - args = [self._mode, self._bufsize] - args_str = ', '.join([repr(arg) for arg in args if arg is not None]) - return '%s(%s)' % (type(self).__name__, args_str) - -# =========================== -# Optional and Positional Parsing -# =========================== - -class Namespace(_AttributeHolder): - """Simple object for storing attributes. - - Implements equality by attribute names and values, and provides a simple - string representation. - """ - - def __init__(self, **kwargs): - for name in kwargs: - setattr(self, name, kwargs[name]) - - __hash__ = None - - def __eq__(self, other): - return vars(self) == vars(other) - - def __ne__(self, other): - return not (self == other) - - def __contains__(self, key): - return key in self.__dict__ - - -class _ActionsContainer(object): - - def __init__(self, - description, - prefix_chars, - argument_default, - conflict_handler): - super(_ActionsContainer, self).__init__() - - self.description = description - self.argument_default = argument_default - self.prefix_chars = prefix_chars - self.conflict_handler = conflict_handler - - # set up registries - self._registries = {} - - # register actions - self.register('action', None, _StoreAction) - self.register('action', 'store', _StoreAction) - self.register('action', 'store_const', _StoreConstAction) - self.register('action', 'store_true', _StoreTrueAction) - self.register('action', 'store_false', _StoreFalseAction) - self.register('action', 'append', _AppendAction) - self.register('action', 'append_const', _AppendConstAction) - self.register('action', 'count', _CountAction) - self.register('action', 'help', _HelpAction) - self.register('action', 'version', _VersionAction) - self.register('action', 'parsers', _SubParsersAction) - - # raise an exception if the conflict handler is invalid - self._get_handler() - - # action storage - self._actions = [] - self._option_string_actions = {} - - # groups - self._action_groups = [] - self._mutually_exclusive_groups = [] - - # defaults storage - self._defaults = {} - - # determines whether an "option" looks like a negative number - self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') - - # whether or not there are any optionals that look like negative - # numbers -- uses a list so it can be shared and edited - self._has_negative_number_optionals = [] - - # ==================== - # Registration methods - # ==================== - def register(self, registry_name, value, object): - registry = self._registries.setdefault(registry_name, {}) - registry[value] = object - - def _registry_get(self, registry_name, value, default=None): - return self._registries[registry_name].get(value, default) - - # ================================== - # Namespace default accessor methods - # ================================== - def set_defaults(self, **kwargs): - self._defaults.update(kwargs) - - # if these defaults match any existing arguments, replace - # the previous default on the object with the new one - for action in self._actions: - if action.dest in kwargs: - action.default = kwargs[action.dest] - - def get_default(self, dest): - for action in self._actions: - if action.dest == dest and action.default is not None: - return action.default - return self._defaults.get(dest, None) - - - # ======================= - # Adding argument actions - # ======================= - def add_argument(self, *args, **kwargs): - """ - add_argument(dest, ..., name=value, ...) - add_argument(option_string, option_string, ..., name=value, ...) - """ - - # if no positional args are supplied or only one is supplied and - # it doesn't look like an option string, parse a positional - # argument - chars = self.prefix_chars - if not args or len(args) == 1 and args[0][0] not in chars: - if args and 'dest' in kwargs: - raise ValueError('dest supplied twice for positional argument') - kwargs = self._get_positional_kwargs(*args, **kwargs) - - # otherwise, we're adding an optional argument - else: - kwargs = self._get_optional_kwargs(*args, **kwargs) - - # if no default was supplied, use the parser-level default - if 'default' not in kwargs: - dest = kwargs['dest'] - if dest in self._defaults: - kwargs['default'] = self._defaults[dest] - elif self.argument_default is not None: - kwargs['default'] = self.argument_default - - # create the action object, and add it to the parser - action_class = self._pop_action_class(kwargs) - if not _callable(action_class): - raise ValueError('unknown action "%s"' % action_class) - action = action_class(**kwargs) - - # raise an error if the action type is not callable - type_func = self._registry_get('type', action.type, action.type) - if not _callable(type_func): - raise ValueError('%r is not callable' % type_func) - - return self._add_action(action) - - def add_argument_group(self, *args, **kwargs): - group = _ArgumentGroup(self, *args, **kwargs) - self._action_groups.append(group) - return group - - def add_mutually_exclusive_group(self, **kwargs): - group = _MutuallyExclusiveGroup(self, **kwargs) - self._mutually_exclusive_groups.append(group) - return group - - def _add_action(self, action): - # resolve any conflicts - self._check_conflict(action) - - # add to actions list - self._actions.append(action) - action.container = self - - # index the action by any option strings it has - for option_string in action.option_strings: - self._option_string_actions[option_string] = action - - # set the flag if any option strings look like negative numbers - for option_string in action.option_strings: - if self._negative_number_matcher.match(option_string): - if not self._has_negative_number_optionals: - self._has_negative_number_optionals.append(True) - - # return the created action - return action - - def _remove_action(self, action): - self._actions.remove(action) - - def _add_container_actions(self, container): - # collect groups by titles - title_group_map = {} - for group in self._action_groups: - if group.title in title_group_map: - msg = _('cannot merge actions - two groups are named %r') - raise ValueError(msg % (group.title)) - title_group_map[group.title] = group - - # map each action to its group - group_map = {} - for group in container._action_groups: - - # if a group with the title exists, use that, otherwise - # create a new group matching the container's group - if group.title not in title_group_map: - title_group_map[group.title] = self.add_argument_group( - title=group.title, - description=group.description, - conflict_handler=group.conflict_handler) - - # map the actions to their new group - for action in group._group_actions: - group_map[action] = title_group_map[group.title] - - # add container's mutually exclusive groups - # NOTE: if add_mutually_exclusive_group ever gains title= and - # description= then this code will need to be expanded as above - for group in container._mutually_exclusive_groups: - mutex_group = self.add_mutually_exclusive_group( - required=group.required) - - # map the actions to their new mutex group - for action in group._group_actions: - group_map[action] = mutex_group - - # add all actions to this container or their group - for action in container._actions: - group_map.get(action, self)._add_action(action) - - def _get_positional_kwargs(self, dest, **kwargs): - # make sure required is not specified - if 'required' in kwargs: - msg = _("'required' is an invalid argument for positionals") - raise TypeError(msg) - - # mark positional arguments as required if at least one is - # always required - if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: - kwargs['required'] = True - if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: - kwargs['required'] = True - - # return the keyword arguments with no option strings - return dict(kwargs, dest=dest, option_strings=[]) - - def _get_optional_kwargs(self, *args, **kwargs): - # determine short and long option strings - option_strings = [] - long_option_strings = [] - for option_string in args: - # error on strings that don't start with an appropriate prefix - if not option_string[0] in self.prefix_chars: - msg = _('invalid option string %r: ' - 'must start with a character %r') - tup = option_string, self.prefix_chars - raise ValueError(msg % tup) - - # strings starting with two prefix characters are long options - option_strings.append(option_string) - if option_string[0] in self.prefix_chars: - if len(option_string) > 1: - if option_string[1] in self.prefix_chars: - long_option_strings.append(option_string) - - # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' - dest = kwargs.pop('dest', None) - if dest is None: - if long_option_strings: - dest_option_string = long_option_strings[0] - else: - dest_option_string = option_strings[0] - dest = dest_option_string.lstrip(self.prefix_chars) - if not dest: - msg = _('dest= is required for options like %r') - raise ValueError(msg % option_string) - dest = dest.replace('-', '_') - - # return the updated keyword arguments - return dict(kwargs, dest=dest, option_strings=option_strings) - - def _pop_action_class(self, kwargs, default=None): - action = kwargs.pop('action', default) - return self._registry_get('action', action, action) - - def _get_handler(self): - # determine function from conflict handler string - handler_func_name = '_handle_conflict_%s' % self.conflict_handler - try: - return getattr(self, handler_func_name) - except AttributeError: - msg = _('invalid conflict_resolution value: %r') - raise ValueError(msg % self.conflict_handler) - - def _check_conflict(self, action): - - # find all options that conflict with this option - confl_optionals = [] - for option_string in action.option_strings: - if option_string in self._option_string_actions: - confl_optional = self._option_string_actions[option_string] - confl_optionals.append((option_string, confl_optional)) - - # resolve any conflicts - if confl_optionals: - conflict_handler = self._get_handler() - conflict_handler(action, confl_optionals) - - def _handle_conflict_error(self, action, conflicting_actions): - message = _('conflicting option string(s): %s') - conflict_string = ', '.join([option_string - for option_string, action - in conflicting_actions]) - raise ArgumentError(action, message % conflict_string) - - def _handle_conflict_resolve(self, action, conflicting_actions): - - # remove all conflicting options - for option_string, action in conflicting_actions: - - # remove the conflicting option - action.option_strings.remove(option_string) - self._option_string_actions.pop(option_string, None) - - # if the option now has no option string, remove it from the - # container holding it - if not action.option_strings: - action.container._remove_action(action) - - -class _ArgumentGroup(_ActionsContainer): - - def __init__(self, container, title=None, description=None, **kwargs): - # add any missing keyword arguments by checking the container - update = kwargs.setdefault - update('conflict_handler', container.conflict_handler) - update('prefix_chars', container.prefix_chars) - update('argument_default', container.argument_default) - super_init = super(_ArgumentGroup, self).__init__ - super_init(description=description, **kwargs) - - # group attributes - self.title = title - self._group_actions = [] - - # share most attributes with the container - self._registries = container._registries - self._actions = container._actions - self._option_string_actions = container._option_string_actions - self._defaults = container._defaults - self._has_negative_number_optionals = \ - container._has_negative_number_optionals - - def _add_action(self, action): - action = super(_ArgumentGroup, self)._add_action(action) - self._group_actions.append(action) - return action - - def _remove_action(self, action): - super(_ArgumentGroup, self)._remove_action(action) - self._group_actions.remove(action) - - -class _MutuallyExclusiveGroup(_ArgumentGroup): - - def __init__(self, container, required=False): - super(_MutuallyExclusiveGroup, self).__init__(container) - self.required = required - self._container = container - - def _add_action(self, action): - if action.required: - msg = _('mutually exclusive arguments must be optional') - raise ValueError(msg) - action = self._container._add_action(action) - self._group_actions.append(action) - return action - - def _remove_action(self, action): - self._container._remove_action(action) - self._group_actions.remove(action) - - -class ArgumentParser(_AttributeHolder, _ActionsContainer): - """Object for parsing command line strings into Python objects. - - Keyword Arguments: - - prog -- The name of the program (default: sys.argv[0]) - - usage -- A usage message (default: auto-generated from arguments) - - description -- A description of what the program does - - epilog -- Text following the argument descriptions - - parents -- Parsers whose arguments should be copied into this one - - formatter_class -- HelpFormatter class for printing help messages - - prefix_chars -- Characters that prefix optional arguments - - fromfile_prefix_chars -- Characters that prefix files containing - additional arguments - - argument_default -- The default value for all arguments - - conflict_handler -- String indicating how to handle conflicts - - add_help -- Add a -h/-help option - """ - - def __init__(self, - prog=None, - usage=None, - description=None, - epilog=None, - version=None, - parents=[], - formatter_class=HelpFormatter, - prefix_chars='-', - fromfile_prefix_chars=None, - argument_default=None, - conflict_handler='error', - add_help=True): - - if version is not None: - import warnings - warnings.warn( - """The "version" argument to ArgumentParser is deprecated. """ - """Please use """ - """"add_argument(..., action='version', version="N", ...)" """ - """instead""", DeprecationWarning) - - superinit = super(ArgumentParser, self).__init__ - superinit(description=description, - prefix_chars=prefix_chars, - argument_default=argument_default, - conflict_handler=conflict_handler) - - # default setting for prog - if prog is None: - prog = _os.path.basename(_sys.argv[0]) - - self.prog = prog - self.usage = usage - self.epilog = epilog - self.version = version - self.formatter_class = formatter_class - self.fromfile_prefix_chars = fromfile_prefix_chars - self.add_help = add_help - - add_group = self.add_argument_group - self._positionals = add_group(_('positional arguments')) - self._optionals = add_group(_('optional arguments')) - self._subparsers = None - - # register types - def identity(string): - return string - self.register('type', None, identity) - - # add help and version arguments if necessary - # (using explicit default to override global argument_default) - if '-' in prefix_chars: - default_prefix = '-' - else: - default_prefix = prefix_chars[0] - if self.add_help: - self.add_argument( - default_prefix+'h', default_prefix*2+'help', - action='help', default=SUPPRESS, - help=_('show this help message and exit')) - if self.version: - self.add_argument( - default_prefix+'v', default_prefix*2+'version', - action='version', default=SUPPRESS, - version=self.version, - help=_("show program's version number and exit")) - - # add parent arguments and defaults - for parent in parents: - self._add_container_actions(parent) - try: - defaults = parent._defaults - except AttributeError: - pass - else: - self._defaults.update(defaults) - - # ======================= - # Pretty __repr__ methods - # ======================= - def _get_kwargs(self): - names = [ - 'prog', - 'usage', - 'description', - 'version', - 'formatter_class', - 'conflict_handler', - 'add_help', - ] - return [(name, getattr(self, name)) for name in names] - - # ================================== - # Optional/Positional adding methods - # ================================== - def add_subparsers(self, **kwargs): - if self._subparsers is not None: - self.error(_('cannot have multiple subparser arguments')) - - # add the parser class to the arguments if it's not present - kwargs.setdefault('parser_class', type(self)) - - if 'title' in kwargs or 'description' in kwargs: - title = _(kwargs.pop('title', 'subcommands')) - description = _(kwargs.pop('description', None)) - self._subparsers = self.add_argument_group(title, description) - else: - self._subparsers = self._positionals - - # prog defaults to the usage message of this parser, skipping - # optional arguments and with no "usage:" prefix - if kwargs.get('prog') is None: - formatter = self._get_formatter() - positionals = self._get_positional_actions() - groups = self._mutually_exclusive_groups - formatter.add_usage(self.usage, positionals, groups, '') - kwargs['prog'] = formatter.format_help().strip() - - # create the parsers action and add it to the positionals list - parsers_class = self._pop_action_class(kwargs, 'parsers') - action = parsers_class(option_strings=[], **kwargs) - self._subparsers._add_action(action) - - # return the created parsers action - return action - - def _add_action(self, action): - if action.option_strings: - self._optionals._add_action(action) - else: - self._positionals._add_action(action) - return action - - def _get_optional_actions(self): - return [action - for action in self._actions - if action.option_strings] - - def _get_positional_actions(self): - return [action - for action in self._actions - if not action.option_strings] - - # ===================================== - # Command line argument parsing methods - # ===================================== - def parse_args(self, args=None, namespace=None): - args, argv = self.parse_known_args(args, namespace) - if argv: - msg = _('unrecognized arguments: %s') - self.error(msg % ' '.join(argv)) - return args - - def parse_known_args(self, args=None, namespace=None): - # args default to the system args - if args is None: - args = _sys.argv[1:] - - # default Namespace built from parser defaults - if namespace is None: - namespace = Namespace() - - # add any action defaults that aren't present - for action in self._actions: - if action.dest is not SUPPRESS: - if not hasattr(namespace, action.dest): - if action.default is not SUPPRESS: - default = action.default - if isinstance(action.default, basestring): - default = self._get_value(action, default) - setattr(namespace, action.dest, default) - - # add any parser defaults that aren't present - for dest in self._defaults: - if not hasattr(namespace, dest): - setattr(namespace, dest, self._defaults[dest]) - - # parse the arguments and exit if there are any errors - try: - namespace, args = self._parse_known_args(args, namespace) - if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): - args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) - delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) - return namespace, args - except ArgumentError: - err = _sys.exc_info()[1] - self.error(str(err)) - - def _parse_known_args(self, arg_strings, namespace): - # replace arg strings that are file references - if self.fromfile_prefix_chars is not None: - arg_strings = self._read_args_from_files(arg_strings) - - # map all mutually exclusive arguments to the other arguments - # they can't occur with - action_conflicts = {} - for mutex_group in self._mutually_exclusive_groups: - group_actions = mutex_group._group_actions - for i, mutex_action in enumerate(mutex_group._group_actions): - conflicts = action_conflicts.setdefault(mutex_action, []) - conflicts.extend(group_actions[:i]) - conflicts.extend(group_actions[i + 1:]) - - # find all option indices, and determine the arg_string_pattern - # which has an 'O' if there is an option at an index, - # an 'A' if there is an argument, or a '-' if there is a '--' - option_string_indices = {} - arg_string_pattern_parts = [] - arg_strings_iter = iter(arg_strings) - for i, arg_string in enumerate(arg_strings_iter): - - # all args after -- are non-options - if arg_string == '--': - arg_string_pattern_parts.append('-') - for arg_string in arg_strings_iter: - arg_string_pattern_parts.append('A') - - # otherwise, add the arg to the arg strings - # and note the index if it was an option - else: - option_tuple = self._parse_optional(arg_string) - if option_tuple is None: - pattern = 'A' - else: - option_string_indices[i] = option_tuple - pattern = 'O' - arg_string_pattern_parts.append(pattern) - - # join the pieces together to form the pattern - arg_strings_pattern = ''.join(arg_string_pattern_parts) - - # converts arg strings to the appropriate and then takes the action - seen_actions = set() - seen_non_default_actions = set() - - def take_action(action, argument_strings, option_string=None): - seen_actions.add(action) - argument_values = self._get_values(action, argument_strings) - - # error if this argument is not allowed with other previously - # seen arguments, assuming that actions that use the default - # value don't really count as "present" - if argument_values is not action.default: - seen_non_default_actions.add(action) - for conflict_action in action_conflicts.get(action, []): - if conflict_action in seen_non_default_actions: - msg = _('not allowed with argument %s') - action_name = _get_action_name(conflict_action) - raise ArgumentError(action, msg % action_name) - - # take the action if we didn't receive a SUPPRESS value - # (e.g. from a default) - if argument_values is not SUPPRESS: - action(self, namespace, argument_values, option_string) - - # function to convert arg_strings into an optional action - def consume_optional(start_index): - - # get the optional identified at this index - option_tuple = option_string_indices[start_index] - action, option_string, explicit_arg = option_tuple - - # identify additional optionals in the same arg string - # (e.g. -xyz is the same as -x -y -z if no args are required) - match_argument = self._match_argument - action_tuples = [] - while True: - - # if we found no optional action, skip it - if action is None: - extras.append(arg_strings[start_index]) - return start_index + 1 - - # if there is an explicit argument, try to match the - # optional's string arguments to only this - if explicit_arg is not None: - arg_count = match_argument(action, 'A') - - # if the action is a single-dash option and takes no - # arguments, try to parse more single-dash options out - # of the tail of the option string - chars = self.prefix_chars - if arg_count == 0 and option_string[1] not in chars: - action_tuples.append((action, [], option_string)) - char = option_string[0] - option_string = char + explicit_arg[0] - new_explicit_arg = explicit_arg[1:] or None - optionals_map = self._option_string_actions - if option_string in optionals_map: - action = optionals_map[option_string] - explicit_arg = new_explicit_arg - else: - msg = _('ignored explicit argument %r') - raise ArgumentError(action, msg % explicit_arg) - - # if the action expect exactly one argument, we've - # successfully matched the option; exit the loop - elif arg_count == 1: - stop = start_index + 1 - args = [explicit_arg] - action_tuples.append((action, args, option_string)) - break - - # error if a double-dash option did not use the - # explicit argument - else: - msg = _('ignored explicit argument %r') - raise ArgumentError(action, msg % explicit_arg) - - # if there is no explicit argument, try to match the - # optional's string arguments with the following strings - # if successful, exit the loop - else: - start = start_index + 1 - selected_patterns = arg_strings_pattern[start:] - arg_count = match_argument(action, selected_patterns) - stop = start + arg_count - args = arg_strings[start:stop] - action_tuples.append((action, args, option_string)) - break - - # add the Optional to the list and return the index at which - # the Optional's string args stopped - assert action_tuples - for action, args, option_string in action_tuples: - take_action(action, args, option_string) - return stop - - # the list of Positionals left to be parsed; this is modified - # by consume_positionals() - positionals = self._get_positional_actions() - - # function to convert arg_strings into positional actions - def consume_positionals(start_index): - # match as many Positionals as possible - match_partial = self._match_arguments_partial - selected_pattern = arg_strings_pattern[start_index:] - arg_counts = match_partial(positionals, selected_pattern) - - # slice off the appropriate arg strings for each Positional - # and add the Positional and its args to the list - for action, arg_count in zip(positionals, arg_counts): - args = arg_strings[start_index: start_index + arg_count] - start_index += arg_count - take_action(action, args) - - # slice off the Positionals that we just parsed and return the - # index at which the Positionals' string args stopped - positionals[:] = positionals[len(arg_counts):] - return start_index - - # consume Positionals and Optionals alternately, until we have - # passed the last option string - extras = [] - start_index = 0 - if option_string_indices: - max_option_string_index = max(option_string_indices) - else: - max_option_string_index = -1 - while start_index <= max_option_string_index: - - # consume any Positionals preceding the next option - next_option_string_index = min([ - index - for index in option_string_indices - if index >= start_index]) - if start_index != next_option_string_index: - positionals_end_index = consume_positionals(start_index) - - # only try to parse the next optional if we didn't consume - # the option string during the positionals parsing - if positionals_end_index > start_index: - start_index = positionals_end_index - continue - else: - start_index = positionals_end_index - - # if we consumed all the positionals we could and we're not - # at the index of an option string, there were extra arguments - if start_index not in option_string_indices: - strings = arg_strings[start_index:next_option_string_index] - extras.extend(strings) - start_index = next_option_string_index - - # consume the next optional and any arguments for it - start_index = consume_optional(start_index) - - # consume any positionals following the last Optional - stop_index = consume_positionals(start_index) - - # if we didn't consume all the argument strings, there were extras - extras.extend(arg_strings[stop_index:]) - - # if we didn't use all the Positional objects, there were too few - # arg strings supplied. - if positionals: - self.error(_('too few arguments')) - - # make sure all required actions were present - for action in self._actions: - if action.required: - if action not in seen_actions: - name = _get_action_name(action) - self.error(_('argument %s is required') % name) - - # make sure all required groups had one option present - for group in self._mutually_exclusive_groups: - if group.required: - for action in group._group_actions: - if action in seen_non_default_actions: - break - - # if no actions were used, report the error - else: - names = [_get_action_name(action) - for action in group._group_actions - if action.help is not SUPPRESS] - msg = _('one of the arguments %s is required') - self.error(msg % ' '.join(names)) - - # return the updated namespace and the extra arguments - return namespace, extras - - def _read_args_from_files(self, arg_strings): - # expand arguments referencing files - new_arg_strings = [] - for arg_string in arg_strings: - - # for regular arguments, just add them back into the list - if arg_string[0] not in self.fromfile_prefix_chars: - new_arg_strings.append(arg_string) - - # replace arguments referencing files with the file content - else: - try: - args_file = open(arg_string[1:]) - try: - arg_strings = [] - for arg_line in args_file.read().splitlines(): - for arg in self.convert_arg_line_to_args(arg_line): - arg_strings.append(arg) - arg_strings = self._read_args_from_files(arg_strings) - new_arg_strings.extend(arg_strings) - finally: - args_file.close() - except IOError: - err = _sys.exc_info()[1] - self.error(str(err)) - - # return the modified argument list - return new_arg_strings - - def convert_arg_line_to_args(self, arg_line): - return [arg_line] - - def _match_argument(self, action, arg_strings_pattern): - # match the pattern for this action to the arg strings - nargs_pattern = self._get_nargs_pattern(action) - match = _re.match(nargs_pattern, arg_strings_pattern) - - # raise an exception if we weren't able to find a match - if match is None: - nargs_errors = { - None: _('expected one argument'), - OPTIONAL: _('expected at most one argument'), - ONE_OR_MORE: _('expected at least one argument'), - } - default = _('expected %s argument(s)') % action.nargs - msg = nargs_errors.get(action.nargs, default) - raise ArgumentError(action, msg) - - # return the number of arguments matched - return len(match.group(1)) - - def _match_arguments_partial(self, actions, arg_strings_pattern): - # progressively shorten the actions list by slicing off the - # final actions until we find a match - result = [] - for i in range(len(actions), 0, -1): - actions_slice = actions[:i] - pattern = ''.join([self._get_nargs_pattern(action) - for action in actions_slice]) - match = _re.match(pattern, arg_strings_pattern) - if match is not None: - result.extend([len(string) for string in match.groups()]) - break - - # return the list of arg string counts - return result - - def _parse_optional(self, arg_string): - # if it's an empty string, it was meant to be a positional - if not arg_string: - return None - - # if it doesn't start with a prefix, it was meant to be positional - if not arg_string[0] in self.prefix_chars: - return None - - # if the option string is present in the parser, return the action - if arg_string in self._option_string_actions: - action = self._option_string_actions[arg_string] - return action, arg_string, None - - # if it's just a single character, it was meant to be positional - if len(arg_string) == 1: - return None - - # if the option string before the "=" is present, return the action - if '=' in arg_string: - option_string, explicit_arg = arg_string.split('=', 1) - if option_string in self._option_string_actions: - action = self._option_string_actions[option_string] - return action, option_string, explicit_arg - - # search through all possible prefixes of the option string - # and all actions in the parser for possible interpretations - option_tuples = self._get_option_tuples(arg_string) - - # if multiple actions match, the option string was ambiguous - if len(option_tuples) > 1: - options = ', '.join([option_string - for action, option_string, explicit_arg in option_tuples]) - tup = arg_string, options - self.error(_('ambiguous option: %s could match %s') % tup) - - # if exactly one action matched, this segmentation is good, - # so return the parsed action - elif len(option_tuples) == 1: - option_tuple, = option_tuples - return option_tuple - - # if it was not found as an option, but it looks like a negative - # number, it was meant to be positional - # unless there are negative-number-like options - if self._negative_number_matcher.match(arg_string): - if not self._has_negative_number_optionals: - return None - - # if it contains a space, it was meant to be a positional - if ' ' in arg_string: - return None - - # it was meant to be an optional but there is no such option - # in this parser (though it might be a valid option in a subparser) - return None, arg_string, None - - def _get_option_tuples(self, option_string): - result = [] - - # option strings starting with two prefix characters are only - # split at the '=' - chars = self.prefix_chars - if option_string[0] in chars and option_string[1] in chars: - if '=' in option_string: - option_prefix, explicit_arg = option_string.split('=', 1) - else: - option_prefix = option_string - explicit_arg = None - for option_string in self._option_string_actions: - if option_string.startswith(option_prefix): - action = self._option_string_actions[option_string] - tup = action, option_string, explicit_arg - result.append(tup) - - # single character options can be concatenated with their arguments - # but multiple character options always have to have their argument - # separate - elif option_string[0] in chars and option_string[1] not in chars: - option_prefix = option_string - explicit_arg = None - short_option_prefix = option_string[:2] - short_explicit_arg = option_string[2:] - - for option_string in self._option_string_actions: - if option_string == short_option_prefix: - action = self._option_string_actions[option_string] - tup = action, option_string, short_explicit_arg - result.append(tup) - elif option_string.startswith(option_prefix): - action = self._option_string_actions[option_string] - tup = action, option_string, explicit_arg - result.append(tup) - - # shouldn't ever get here - else: - self.error(_('unexpected option string: %s') % option_string) - - # return the collected option tuples - return result - - def _get_nargs_pattern(self, action): - # in all examples below, we have to allow for '--' args - # which are represented as '-' in the pattern - nargs = action.nargs - - # the default (None) is assumed to be a single argument - if nargs is None: - nargs_pattern = '(-*A-*)' - - # allow zero or one arguments - elif nargs == OPTIONAL: - nargs_pattern = '(-*A?-*)' - - # allow zero or more arguments - elif nargs == ZERO_OR_MORE: - nargs_pattern = '(-*[A-]*)' - - # allow one or more arguments - elif nargs == ONE_OR_MORE: - nargs_pattern = '(-*A[A-]*)' - - # allow any number of options or arguments - elif nargs == REMAINDER: - nargs_pattern = '([-AO]*)' - - # allow one argument followed by any number of options or arguments - elif nargs == PARSER: - nargs_pattern = '(-*A[-AO]*)' - - # all others should be integers - else: - nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) - - # if this is an optional action, -- is not allowed - if action.option_strings: - nargs_pattern = nargs_pattern.replace('-*', '') - nargs_pattern = nargs_pattern.replace('-', '') - - # return the pattern - return nargs_pattern - - # ======================== - # Value conversion methods - # ======================== - def _get_values(self, action, arg_strings): - # for everything but PARSER args, strip out '--' - if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] - - # optional argument produces a default when not present - if not arg_strings and action.nargs == OPTIONAL: - if action.option_strings: - value = action.const - else: - value = action.default - if isinstance(value, basestring): - value = self._get_value(action, value) - self._check_value(action, value) - - # when nargs='*' on a positional, if there were no command-line - # args, use the default if it is anything other than None - elif (not arg_strings and action.nargs == ZERO_OR_MORE and - not action.option_strings): - if action.default is not None: - value = action.default - else: - value = arg_strings - self._check_value(action, value) - - # single argument or optional argument produces a single value - elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: - arg_string, = arg_strings - value = self._get_value(action, arg_string) - self._check_value(action, value) - - # REMAINDER arguments convert all values, checking none - elif action.nargs == REMAINDER: - value = [self._get_value(action, v) for v in arg_strings] - - # PARSER arguments convert all values, but check only the first - elif action.nargs == PARSER: - value = [self._get_value(action, v) for v in arg_strings] - self._check_value(action, value[0]) - - # all other types of nargs produce a list - else: - value = [self._get_value(action, v) for v in arg_strings] - for v in value: - self._check_value(action, v) - - # return the converted value - return value - - def _get_value(self, action, arg_string): - type_func = self._registry_get('type', action.type, action.type) - if not _callable(type_func): - msg = _('%r is not callable') - raise ArgumentError(action, msg % type_func) - - # convert the value to the appropriate type - try: - result = type_func(arg_string) - - # ArgumentTypeErrors indicate errors - except ArgumentTypeError: - name = getattr(action.type, '__name__', repr(action.type)) - msg = str(_sys.exc_info()[1]) - raise ArgumentError(action, msg) - - # TypeErrors or ValueErrors also indicate errors - except (TypeError, ValueError): - name = getattr(action.type, '__name__', repr(action.type)) - msg = _('invalid %s value: %r') - raise ArgumentError(action, msg % (name, arg_string)) - - # return the converted value - return result - - def _check_value(self, action, value): - # converted value must be one of the choices (if specified) - if action.choices is not None and value not in action.choices: - tup = value, ', '.join(map(repr, action.choices)) - msg = _('invalid choice: %r (choose from %s)') % tup - raise ArgumentError(action, msg) - - # ======================= - # Help-formatting methods - # ======================= - def format_usage(self): - formatter = self._get_formatter() - formatter.add_usage(self.usage, self._actions, - self._mutually_exclusive_groups) - return formatter.format_help() - - def format_help(self): - formatter = self._get_formatter() - - # usage - formatter.add_usage(self.usage, self._actions, - self._mutually_exclusive_groups) - - # description - formatter.add_text(self.description) - - # positionals, optionals and user-defined groups - for action_group in self._action_groups: - formatter.start_section(action_group.title) - formatter.add_text(action_group.description) - formatter.add_arguments(action_group._group_actions) - formatter.end_section() - - # epilog - formatter.add_text(self.epilog) - - # determine help from format above - return formatter.format_help() - - def format_version(self): - import warnings - warnings.warn( - 'The format_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - formatter = self._get_formatter() - formatter.add_text(self.version) - return formatter.format_help() - - def _get_formatter(self): - return self.formatter_class(prog=self.prog) - - # ===================== - # Help-printing methods - # ===================== - def print_usage(self, file=None): - if file is None: - file = _sys.stdout - self._print_message(self.format_usage(), file) - - def print_help(self, file=None): - if file is None: - file = _sys.stdout - self._print_message(self.format_help(), file) - - def print_version(self, file=None): - import warnings - warnings.warn( - 'The print_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - self._print_message(self.format_version(), file) - - def _print_message(self, message, file=None): - if message: - if file is None: - file = _sys.stderr - file.write(message) - - # =============== - # Exiting methods - # =============== - def exit(self, status=0, message=None): - if message: - self._print_message(message, _sys.stderr) - _sys.exit(status) - - def error(self, message): - """error(message: string) - - Prints a usage message incorporating the message to stderr and - exits. - - If you override this in a subclass, it should not return -- it - should either exit or raise an exception. - """ - self.print_usage(_sys.stderr) - self.exit(2, _('%s: error: %s\n') % (self.prog, message)) diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/config.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/config.py deleted file mode 100644 index aa97894..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/config.py +++ /dev/null @@ -1,1950 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -# Please remember to run "make -C docs html" after update "desc" attributes. - -import copy -import grp -import inspect -try: - import argparse -except ImportError: # python 2.6 - from . import argparse_compat as argparse -import os -import pwd -import re -import ssl -import sys -import textwrap -import shlex - -from gunicorn import __version__ -from gunicorn import _compat -from gunicorn.errors import ConfigError -from gunicorn.reloader import reloader_engines -from gunicorn import six -from gunicorn import util - -KNOWN_SETTINGS = [] -PLATFORM = sys.platform - - -def make_settings(ignore=None): - settings = {} - ignore = ignore or () - for s in KNOWN_SETTINGS: - setting = s() - if setting.name in ignore: - continue - settings[setting.name] = setting.copy() - return settings - - -def auto_int(_, x): - # for compatible with octal numbers in python3 - if re.match(r'0(\d)', x, re.IGNORECASE): - x = x.replace('0', '0o', 1) - return int(x, 0) - - -class Config(object): - - def __init__(self, usage=None, prog=None): - self.settings = make_settings() - self.usage = usage - self.prog = prog or os.path.basename(sys.argv[0]) - self.env_orig = os.environ.copy() - - def __getattr__(self, name): - if name not in self.settings: - raise AttributeError("No configuration setting for: %s" % name) - return self.settings[name].get() - - def __setattr__(self, name, value): - if name != "settings" and name in self.settings: - raise AttributeError("Invalid access!") - super(Config, self).__setattr__(name, value) - - def set(self, name, value): - if name not in self.settings: - raise AttributeError("No configuration setting for: %s" % name) - self.settings[name].set(value) - - def get_cmd_args_from_env(self): - if 'GUNICORN_CMD_ARGS' in self.env_orig: - return shlex.split(self.env_orig['GUNICORN_CMD_ARGS']) - return [] - - def parser(self): - kwargs = { - "usage": self.usage, - "prog": self.prog - } - parser = argparse.ArgumentParser(**kwargs) - parser.add_argument("-v", "--version", - action="version", default=argparse.SUPPRESS, - version="%(prog)s (version " + __version__ + ")\n", - help="show program's version number and exit") - parser.add_argument("args", nargs="*", help=argparse.SUPPRESS) - - keys = sorted(self.settings, key=self.settings.__getitem__) - for k in keys: - self.settings[k].add_option(parser) - - return parser - - @property - def worker_class_str(self): - uri = self.settings['worker_class'].get() - - ## are we using a threaded worker? - is_sync = uri.endswith('SyncWorker') or uri == 'sync' - if is_sync and self.threads > 1: - return "threads" - return uri - - @property - def worker_class(self): - uri = self.settings['worker_class'].get() - - ## are we using a threaded worker? - is_sync = uri.endswith('SyncWorker') or uri == 'sync' - if is_sync and self.threads > 1: - uri = "gunicorn.workers.gthread.ThreadWorker" - - worker_class = util.load_class(uri) - if hasattr(worker_class, "setup"): - worker_class.setup() - return worker_class - - @property - def address(self): - s = self.settings['bind'].get() - return [util.parse_address(_compat.bytes_to_str(bind)) for bind in s] - - @property - def uid(self): - return self.settings['user'].get() - - @property - def gid(self): - return self.settings['group'].get() - - @property - def proc_name(self): - pn = self.settings['proc_name'].get() - if pn is not None: - return pn - else: - return self.settings['default_proc_name'].get() - - @property - def logger_class(self): - uri = self.settings['logger_class'].get() - if uri == "simple": - # support the default - uri = LoggerClass.default - - # if default logger is in use, and statsd is on, automagically switch - # to the statsd logger - if uri == LoggerClass.default: - if 'statsd_host' in self.settings and self.settings['statsd_host'].value is not None: - uri = "gunicorn.instrument.statsd.Statsd" - - logger_class = util.load_class( - uri, - default="gunicorn.glogging.Logger", - section="gunicorn.loggers") - - if hasattr(logger_class, "install"): - logger_class.install() - return logger_class - - @property - def is_ssl(self): - return self.certfile or self.keyfile - - @property - def ssl_options(self): - opts = {} - for name, value in self.settings.items(): - if value.section == 'SSL': - opts[name] = value.get() - return opts - - @property - def env(self): - raw_env = self.settings['raw_env'].get() - env = {} - - if not raw_env: - return env - - for e in raw_env: - s = _compat.bytes_to_str(e) - try: - k, v = s.split('=', 1) - except ValueError: - raise RuntimeError("environment setting %r invalid" % s) - - env[k] = v - - return env - - @property - def sendfile(self): - if self.settings['sendfile'].get() is not None: - return False - - if 'SENDFILE' in os.environ: - sendfile = os.environ['SENDFILE'].lower() - return sendfile in ['y', '1', 'yes', 'true'] - - return True - - @property - def reuse_port(self): - return self.settings['reuse_port'].get() - - @property - def paste_global_conf(self): - raw_global_conf = self.settings['raw_paste_global_conf'].get() - if raw_global_conf is None: - return None - - global_conf = {} - for e in raw_global_conf: - s = _compat.bytes_to_str(e) - try: - k, v = re.split(r'(?= 0.9.7 (or install it via - ``pip install gunicorn[eventlet]``) - * ``gevent`` - Requires gevent >= 0.13 (or install it via - ``pip install gunicorn[gevent]``) - * ``tornado`` - Requires tornado >= 0.2 (or install it via - ``pip install gunicorn[tornado]``) - * ``gthread`` - Python 2 requires the futures package to be installed - (or install it via ``pip install gunicorn[gthread]``) - * ``gaiohttp`` - Deprecated. - - Optionally, you can provide your own worker by giving Gunicorn a - Python path to a subclass of ``gunicorn.workers.base.Worker``. - This alternative syntax will load the gevent class: - ``gunicorn.workers.ggevent.GeventWorker``. - - .. deprecated:: 19.8 - The ``gaiohttp`` worker is deprecated. Please use - ``aiohttp.worker.GunicornWebWorker`` instead. See - :ref:`asyncio-workers` for more information on how to use it. - """ - -class WorkerThreads(Setting): - name = "threads" - section = "Worker Processes" - cli = ["--threads"] - meta = "INT" - validator = validate_pos_int - type = int - default = 1 - desc = """\ - The number of worker threads for handling requests. - - Run each worker with the specified number of threads. - - A positive integer generally in the ``2-4 x $(NUM_CORES)`` range. - You'll want to vary this a bit to find the best for your particular - application's work load. - - If it is not defined, the default is ``1``. - - This setting only affects the Gthread worker type. - - .. note:: - If you try to use the ``sync`` worker type and set the ``threads`` - setting to more than 1, the ``gthread`` worker type will be used - instead. - """ - - -class WorkerConnections(Setting): - name = "worker_connections" - section = "Worker Processes" - cli = ["--worker-connections"] - meta = "INT" - validator = validate_pos_int - type = int - default = 1000 - desc = """\ - The maximum number of simultaneous clients. - - This setting only affects the Eventlet and Gevent worker types. - """ - - -class MaxRequests(Setting): - name = "max_requests" - section = "Worker Processes" - cli = ["--max-requests"] - meta = "INT" - validator = validate_pos_int - type = int - default = 0 - desc = """\ - The maximum number of requests a worker will process before restarting. - - Any value greater than zero will limit the number of requests a work - will process before automatically restarting. This is a simple method - to help limit the damage of memory leaks. - - If this is set to zero (the default) then the automatic worker - restarts are disabled. - """ - - -class MaxRequestsJitter(Setting): - name = "max_requests_jitter" - section = "Worker Processes" - cli = ["--max-requests-jitter"] - meta = "INT" - validator = validate_pos_int - type = int - default = 0 - desc = """\ - The maximum jitter to add to the *max_requests* setting. - - The jitter causes the restart per worker to be randomized by - ``randint(0, max_requests_jitter)``. This is intended to stagger worker - restarts to avoid all workers restarting at the same time. - - .. versionadded:: 19.2 - """ - - -class Timeout(Setting): - name = "timeout" - section = "Worker Processes" - cli = ["-t", "--timeout"] - meta = "INT" - validator = validate_pos_int - type = int - default = 30 - desc = """\ - Workers silent for more than this many seconds are killed and restarted. - - Generally set to thirty seconds. Only set this noticeably higher if - you're sure of the repercussions for sync workers. For the non sync - workers it just means that the worker process is still communicating and - is not tied to the length of time required to handle a single request. - """ - - -class GracefulTimeout(Setting): - name = "graceful_timeout" - section = "Worker Processes" - cli = ["--graceful-timeout"] - meta = "INT" - validator = validate_pos_int - type = int - default = 30 - desc = """\ - Timeout for graceful workers restart. - - After receiving a restart signal, workers have this much time to finish - serving requests. Workers still alive after the timeout (starting from - the receipt of the restart signal) are force killed. - """ - - -class Keepalive(Setting): - name = "keepalive" - section = "Worker Processes" - cli = ["--keep-alive"] - meta = "INT" - validator = validate_pos_int - type = int - default = 2 - desc = """\ - The number of seconds to wait for requests on a Keep-Alive connection. - - Generally set in the 1-5 seconds range for servers with direct connection - to the client (e.g. when you don't have separate load balancer). When - Gunicorn is deployed behind a load balancer, it often makes sense to - set this to a higher value. - - .. note:: - ``sync`` worker does not support persistent connections and will - ignore this option. - """ - - -class LimitRequestLine(Setting): - name = "limit_request_line" - section = "Security" - cli = ["--limit-request-line"] - meta = "INT" - validator = validate_pos_int - type = int - default = 4094 - desc = """\ - The maximum size of HTTP request line in bytes. - - This parameter is used to limit the allowed size of a client's - HTTP request-line. Since the request-line consists of the HTTP - method, URI, and protocol version, this directive places a - restriction on the length of a request-URI allowed for a request - on the server. A server needs this value to be large enough to - hold any of its resource names, including any information that - might be passed in the query part of a GET request. Value is a number - from 0 (unlimited) to 8190. - - This parameter can be used to prevent any DDOS attack. - """ - - -class LimitRequestFields(Setting): - name = "limit_request_fields" - section = "Security" - cli = ["--limit-request-fields"] - meta = "INT" - validator = validate_pos_int - type = int - default = 100 - desc = """\ - Limit the number of HTTP headers fields in a request. - - This parameter is used to limit the number of headers in a request to - prevent DDOS attack. Used with the *limit_request_field_size* it allows - more safety. By default this value is 100 and can't be larger than - 32768. - """ - - -class LimitRequestFieldSize(Setting): - name = "limit_request_field_size" - section = "Security" - cli = ["--limit-request-field_size"] - meta = "INT" - validator = validate_pos_int - type = int - default = 8190 - desc = """\ - Limit the allowed size of an HTTP request header field. - - Value is a positive number or 0. Setting it to 0 will allow unlimited - header field sizes. - - .. warning:: - Setting this parameter to a very high or unlimited value can open - up for DDOS attacks. - """ - - -class Reload(Setting): - name = "reload" - section = 'Debugging' - cli = ['--reload'] - validator = validate_bool - action = 'store_true' - default = False - - desc = '''\ - Restart workers when code changes. - - This setting is intended for development. It will cause workers to be - restarted whenever application code changes. - - The reloader is incompatible with application preloading. When using a - paste configuration be sure that the server block does not import any - application code or the reload will not work as designed. - - The default behavior is to attempt inotify with a fallback to file - system polling. Generally, inotify should be preferred if available - because it consumes less system resources. - - .. note:: - In order to use the inotify reloader, you must have the ``inotify`` - package installed. - ''' - - -class ReloadEngine(Setting): - name = "reload_engine" - section = "Debugging" - cli = ["--reload-engine"] - meta = "STRING" - validator = validate_reload_engine - default = "auto" - desc = """\ - The implementation that should be used to power :ref:`reload`. - - Valid engines are: - - * 'auto' - * 'poll' - * 'inotify' (requires inotify) - - .. versionadded:: 19.7 - """ - - -class ReloadExtraFiles(Setting): - name = "reload_extra_files" - action = "append" - section = "Debugging" - cli = ["--reload-extra-file"] - meta = "FILES" - validator = validate_list_of_existing_files - default = [] - desc = """\ - Extends :ref:`reload` option to also watch and reload on additional files - (e.g., templates, configurations, specifications, etc.). - - .. versionadded:: 19.8 - """ - - -class Spew(Setting): - name = "spew" - section = "Debugging" - cli = ["--spew"] - validator = validate_bool - action = "store_true" - default = False - desc = """\ - Install a trace function that spews every line executed by the server. - - This is the nuclear option. - """ - - -class ConfigCheck(Setting): - name = "check_config" - section = "Debugging" - cli = ["--check-config"] - validator = validate_bool - action = "store_true" - default = False - desc = """\ - Check the configuration. - """ - - -class PreloadApp(Setting): - name = "preload_app" - section = "Server Mechanics" - cli = ["--preload"] - validator = validate_bool - action = "store_true" - default = False - desc = """\ - Load application code before the worker processes are forked. - - By preloading an application you can save some RAM resources as well as - speed up server boot times. Although, if you defer application loading - to each worker process, you can reload your application code easily by - restarting workers. - """ - - -class Sendfile(Setting): - name = "sendfile" - section = "Server Mechanics" - cli = ["--no-sendfile"] - validator = validate_bool - action = "store_const" - const = False - - desc = """\ - Disables the use of ``sendfile()``. - - If not set, the value of the ``SENDFILE`` environment variable is used - to enable or disable its usage. - - .. versionadded:: 19.2 - .. versionchanged:: 19.4 - Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow - disabling. - .. versionchanged:: 19.6 - added support for the ``SENDFILE`` environment variable - """ - - -class ReusePort(Setting): - name = "reuse_port" - section = "Server Mechanics" - cli = ["--reuse-port"] - validator = validate_bool - action = "store_true" - default = False - - desc = """\ - Set the ``SO_REUSEPORT`` flag on the listening socket. - - .. versionadded:: 19.8 - """ - - -class Chdir(Setting): - name = "chdir" - section = "Server Mechanics" - cli = ["--chdir"] - validator = validate_chdir - default = util.getcwd() - desc = """\ - Chdir to specified directory before apps loading. - """ - - -class Daemon(Setting): - name = "daemon" - section = "Server Mechanics" - cli = ["-D", "--daemon"] - validator = validate_bool - action = "store_true" - default = False - desc = """\ - Daemonize the Gunicorn process. - - Detaches the server from the controlling terminal and enters the - background. - """ - -class Env(Setting): - name = "raw_env" - action = "append" - section = "Server Mechanics" - cli = ["-e", "--env"] - meta = "ENV" - validator = validate_list_string - default = [] - - desc = """\ - Set environment variable (key=value). - - Pass variables to the execution environment. Ex.:: - - $ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app - - and test for the foo variable environment in your application. - """ - - -class Pidfile(Setting): - name = "pidfile" - section = "Server Mechanics" - cli = ["-p", "--pid"] - meta = "FILE" - validator = validate_string - default = None - desc = """\ - A filename to use for the PID file. - - If not set, no PID file will be written. - """ - -class WorkerTmpDir(Setting): - name = "worker_tmp_dir" - section = "Server Mechanics" - cli = ["--worker-tmp-dir"] - meta = "DIR" - validator = validate_string - default = None - desc = """\ - A directory to use for the worker heartbeat temporary file. - - If not set, the default temporary directory will be used. - - .. note:: - The current heartbeat system involves calling ``os.fchmod`` on - temporary file handlers and may block a worker for arbitrary time - if the directory is on a disk-backed filesystem. - - See :ref:`blocking-os-fchmod` for more detailed information - and a solution for avoiding this problem. - """ - - -class User(Setting): - name = "user" - section = "Server Mechanics" - cli = ["-u", "--user"] - meta = "USER" - validator = validate_user - default = os.geteuid() - desc = """\ - Switch worker processes to run as this user. - - A valid user id (as an integer) or the name of a user that can be - retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not - change the worker process user. - """ - - -class Group(Setting): - name = "group" - section = "Server Mechanics" - cli = ["-g", "--group"] - meta = "GROUP" - validator = validate_group - default = os.getegid() - desc = """\ - Switch worker process to run as this group. - - A valid group id (as an integer) or the name of a user that can be - retrieved with a call to ``pwd.getgrnam(value)`` or ``None`` to not - change the worker processes group. - """ - -class Umask(Setting): - name = "umask" - section = "Server Mechanics" - cli = ["-m", "--umask"] - meta = "INT" - validator = validate_pos_int - type = auto_int - default = 0 - desc = """\ - A bit mask for the file mode on files written by Gunicorn. - - Note that this affects unix socket permissions. - - A valid value for the ``os.umask(mode)`` call or a string compatible - with ``int(value, 0)`` (``0`` means Python guesses the base, so values - like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal - representations) - """ - - -class Initgroups(Setting): - name = "initgroups" - section = "Server Mechanics" - cli = ["--initgroups"] - validator = validate_bool - action = 'store_true' - default = False - - desc = """\ - If true, set the worker process's group access list with all of the - groups of which the specified username is a member, plus the specified - group id. - - .. versionadded:: 19.7 - """ - - -class TmpUploadDir(Setting): - name = "tmp_upload_dir" - section = "Server Mechanics" - meta = "DIR" - validator = validate_string - default = None - desc = """\ - Directory to store temporary request data as they are read. - - This may disappear in the near future. - - This path should be writable by the process permissions set for Gunicorn - workers. If not specified, Gunicorn will choose a system generated - temporary directory. - """ - - -class SecureSchemeHeader(Setting): - name = "secure_scheme_headers" - section = "Server Mechanics" - validator = validate_dict - default = { - "X-FORWARDED-PROTOCOL": "ssl", - "X-FORWARDED-PROTO": "https", - "X-FORWARDED-SSL": "on" - } - desc = """\ - - A dictionary containing headers and values that the front-end proxy - uses to indicate HTTPS requests. These tell Gunicorn to set - ``wsgi.url_scheme`` to ``https``, so your application can tell that the - request is secure. - - The dictionary should map upper-case header names to exact string - values. The value comparisons are case-sensitive, unlike the header - names, so make sure they're exactly what your front-end proxy sends - when handling HTTPS requests. - - It is important that your front-end proxy configuration ensures that - the headers defined here can not be passed directly from the client. - """ - - -class ForwardedAllowIPS(Setting): - name = "forwarded_allow_ips" - section = "Server Mechanics" - cli = ["--forwarded-allow-ips"] - meta = "STRING" - validator = validate_string_to_list - default = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1") - desc = """\ - Front-end's IPs from which allowed to handle set secure headers. - (comma separate). - - Set to ``*`` to disable checking of Front-end IPs (useful for setups - where you don't know in advance the IP address of Front-end, but - you still trust the environment). - - By default, the value of the ``FORWARDED_ALLOW_IPS`` environment - variable. If it is not defined, the default is ``"127.0.0.1"``. - """ - - -class AccessLog(Setting): - name = "accesslog" - section = "Logging" - cli = ["--access-logfile"] - meta = "FILE" - validator = validate_string - default = None - desc = """\ - The Access log file to write to. - - ``'-'`` means log to stdout. - """ - -class DisableRedirectAccessToSyslog(Setting): - name = "disable_redirect_access_to_syslog" - section = "Logging" - cli = ["--disable-redirect-access-to-syslog"] - validator = validate_bool - action = 'store_true' - default = False - desc = """\ - Disable redirect access logs to syslog. - - .. versionadded:: 19.8 - """ - - -class AccessLogFormat(Setting): - name = "access_log_format" - section = "Logging" - cli = ["--access-logformat"] - meta = "STRING" - validator = validate_string - default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' - desc = """\ - The access log format. - - =========== =========== - Identifier Description - =========== =========== - h remote address - l ``'-'`` - u user name - t date of the request - r status line (e.g. ``GET / HTTP/1.1``) - m request method - U URL path without query string - q query string - H protocol - s status - B response length - b response length or ``'-'`` (CLF format) - f referer - a user agent - T request time in seconds - D request time in microseconds - L request time in decimal seconds - p process ID - {Header}i request header - {Header}o response header - {Variable}e environment variable - =========== =========== - """ - - -class ErrorLog(Setting): - name = "errorlog" - section = "Logging" - cli = ["--error-logfile", "--log-file"] - meta = "FILE" - validator = validate_string - default = '-' - desc = """\ - The Error log file to write to. - - Using ``'-'`` for FILE makes gunicorn log to stderr. - - .. versionchanged:: 19.2 - Log to stderr by default. - - """ - - -class Loglevel(Setting): - name = "loglevel" - section = "Logging" - cli = ["--log-level"] - meta = "LEVEL" - validator = validate_string - default = "info" - desc = """\ - The granularity of Error log outputs. - - Valid level names are: - - * debug - * info - * warning - * error - * critical - """ - - -class CaptureOutput(Setting): - name = "capture_output" - section = "Logging" - cli = ["--capture-output"] - validator = validate_bool - action = 'store_true' - default = False - desc = """\ - Redirect stdout/stderr to specified file in :ref:`errorlog`. - - .. versionadded:: 19.6 - """ - - -class LoggerClass(Setting): - name = "logger_class" - section = "Logging" - cli = ["--logger-class"] - meta = "STRING" - validator = validate_class - default = "gunicorn.glogging.Logger" - desc = """\ - The logger you want to use to log events in Gunicorn. - - The default class (``gunicorn.glogging.Logger``) handle most of - normal usages in logging. It provides error and access logging. - - You can provide your own logger by giving Gunicorn a - Python path to a subclass like ``gunicorn.glogging.Logger``. - """ - - -class LogConfig(Setting): - name = "logconfig" - section = "Logging" - cli = ["--log-config"] - meta = "FILE" - validator = validate_string - default = None - desc = """\ - The log config file to use. - Gunicorn uses the standard Python logging module's Configuration - file format. - """ - - -class LogConfigDict(Setting): - name = "logconfig_dict" - section = "Logging" - cli = ["--log-config-dict"] - validator = validate_dict - default = {} - desc = """\ - The log config dictionary to use, using the standard Python - logging module's dictionary configuration format. This option - takes precedence over the :ref:`logconfig` option, which uses the - older file configuration format. - - Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig - - .. versionadded:: 19.8 - """ - - -class SyslogTo(Setting): - name = "syslog_addr" - section = "Logging" - cli = ["--log-syslog-to"] - meta = "SYSLOG_ADDR" - validator = validate_string - - if PLATFORM == "darwin": - default = "unix:///var/run/syslog" - elif PLATFORM in ('freebsd', 'dragonfly', ): - default = "unix:///var/run/log" - elif PLATFORM == "openbsd": - default = "unix:///dev/log" - else: - default = "udp://localhost:514" - - desc = """\ - Address to send syslog messages. - - Address is a string of the form: - - * ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream`` - for the stream driver or ``dgram`` for the dgram driver. - ``stream`` is the default. - * ``udp://HOST:PORT`` : for UDP sockets - * ``tcp://HOST:PORT`` : for TCP sockets - - """ - - -class Syslog(Setting): - name = "syslog" - section = "Logging" - cli = ["--log-syslog"] - validator = validate_bool - action = 'store_true' - default = False - desc = """\ - Send *Gunicorn* logs to syslog. - - .. versionchanged:: 19.8 - You can now disable sending access logs by using the - :ref:`disable-redirect-access-to-syslog` setting. - """ - - -class SyslogPrefix(Setting): - name = "syslog_prefix" - section = "Logging" - cli = ["--log-syslog-prefix"] - meta = "SYSLOG_PREFIX" - validator = validate_string - default = None - desc = """\ - Makes Gunicorn use the parameter as program-name in the syslog entries. - - All entries will be prefixed by ``gunicorn.``. By default the - program name is the name of the process. - """ - - -class SyslogFacility(Setting): - name = "syslog_facility" - section = "Logging" - cli = ["--log-syslog-facility"] - meta = "SYSLOG_FACILITY" - validator = validate_string - default = "user" - desc = """\ - Syslog facility name - """ - - -class EnableStdioInheritance(Setting): - name = "enable_stdio_inheritance" - section = "Logging" - cli = ["-R", "--enable-stdio-inheritance"] - validator = validate_bool - default = False - action = "store_true" - desc = """\ - Enable stdio inheritance. - - Enable inheritance for stdio file descriptors in daemon mode. - - Note: To disable the Python stdout buffering, you can to set the user - environment variable ``PYTHONUNBUFFERED`` . - """ - - -# statsD monitoring -class StatsdHost(Setting): - name = "statsd_host" - section = "Logging" - cli = ["--statsd-host"] - meta = "STATSD_ADDR" - default = None - validator = validate_hostport - desc = """\ - ``host:port`` of the statsd server to log to. - - .. versionadded:: 19.1 - """ - -class StatsdPrefix(Setting): - name = "statsd_prefix" - section = "Logging" - cli = ["--statsd-prefix"] - meta = "STATSD_PREFIX" - default = "" - validator = validate_string - desc = """\ - Prefix to use when emitting statsd metrics (a trailing ``.`` is added, - if not provided). - - .. versionadded:: 19.2 - """ - - -class Procname(Setting): - name = "proc_name" - section = "Process Naming" - cli = ["-n", "--name"] - meta = "STRING" - validator = validate_string - default = None - desc = """\ - A base to use with setproctitle for process naming. - - This affects things like ``ps`` and ``top``. If you're going to be - running more than one instance of Gunicorn you'll probably want to set a - name to tell them apart. This requires that you install the setproctitle - module. - - If not set, the *default_proc_name* setting will be used. - """ - - -class DefaultProcName(Setting): - name = "default_proc_name" - section = "Process Naming" - validator = validate_string - default = "gunicorn" - desc = """\ - Internal setting that is adjusted for each type of application. - """ - - -class PythonPath(Setting): - name = "pythonpath" - section = "Server Mechanics" - cli = ["--pythonpath"] - meta = "STRING" - validator = validate_string - default = None - desc = """\ - A comma-separated list of directories to add to the Python path. - - e.g. - ``'/home/djangoprojects/myproject,/home/python/mylibrary'``. - """ - - -class Paste(Setting): - name = "paste" - section = "Server Mechanics" - cli = ["--paste", "--paster"] - meta = "STRING" - validator = validate_string - default = None - desc = """\ - Load a PasteDeploy config file. The argument may contain a ``#`` - symbol followed by the name of an app section from the config file, - e.g. ``production.ini#admin``. - - At this time, using alternate server blocks is not supported. Use the - command line arguments to control server configuration instead. - """ - - -class OnStarting(Setting): - name = "on_starting" - section = "Server Hooks" - validator = validate_callable(1) - type = six.callable - - def on_starting(server): - pass - default = staticmethod(on_starting) - desc = """\ - Called just before the master process is initialized. - - The callable needs to accept a single instance variable for the Arbiter. - """ - - -class OnReload(Setting): - name = "on_reload" - section = "Server Hooks" - validator = validate_callable(1) - type = six.callable - - def on_reload(server): - pass - default = staticmethod(on_reload) - desc = """\ - Called to recycle workers during a reload via SIGHUP. - - The callable needs to accept a single instance variable for the Arbiter. - """ - - -class WhenReady(Setting): - name = "when_ready" - section = "Server Hooks" - validator = validate_callable(1) - type = six.callable - - def when_ready(server): - pass - default = staticmethod(when_ready) - desc = """\ - Called just after the server is started. - - The callable needs to accept a single instance variable for the Arbiter. - """ - - -class Prefork(Setting): - name = "pre_fork" - section = "Server Hooks" - validator = validate_callable(2) - type = six.callable - - def pre_fork(server, worker): - pass - default = staticmethod(pre_fork) - desc = """\ - Called just before a worker is forked. - - The callable needs to accept two instance variables for the Arbiter and - new Worker. - """ - - -class Postfork(Setting): - name = "post_fork" - section = "Server Hooks" - validator = validate_callable(2) - type = six.callable - - def post_fork(server, worker): - pass - default = staticmethod(post_fork) - desc = """\ - Called just after a worker has been forked. - - The callable needs to accept two instance variables for the Arbiter and - new Worker. - """ - - -class PostWorkerInit(Setting): - name = "post_worker_init" - section = "Server Hooks" - validator = validate_callable(1) - type = six.callable - - def post_worker_init(worker): - pass - - default = staticmethod(post_worker_init) - desc = """\ - Called just after a worker has initialized the application. - - The callable needs to accept one instance variable for the initialized - Worker. - """ - -class WorkerInt(Setting): - name = "worker_int" - section = "Server Hooks" - validator = validate_callable(1) - type = six.callable - - def worker_int(worker): - pass - - default = staticmethod(worker_int) - desc = """\ - Called just after a worker exited on SIGINT or SIGQUIT. - - The callable needs to accept one instance variable for the initialized - Worker. - """ - - -class WorkerAbort(Setting): - name = "worker_abort" - section = "Server Hooks" - validator = validate_callable(1) - type = six.callable - - def worker_abort(worker): - pass - - default = staticmethod(worker_abort) - desc = """\ - Called when a worker received the SIGABRT signal. - - This call generally happens on timeout. - - The callable needs to accept one instance variable for the initialized - Worker. - """ - - -class PreExec(Setting): - name = "pre_exec" - section = "Server Hooks" - validator = validate_callable(1) - type = six.callable - - def pre_exec(server): - pass - default = staticmethod(pre_exec) - desc = """\ - Called just before a new master process is forked. - - The callable needs to accept a single instance variable for the Arbiter. - """ - - -class PreRequest(Setting): - name = "pre_request" - section = "Server Hooks" - validator = validate_callable(2) - type = six.callable - - def pre_request(worker, req): - worker.log.debug("%s %s" % (req.method, req.path)) - default = staticmethod(pre_request) - desc = """\ - Called just before a worker processes the request. - - The callable needs to accept two instance variables for the Worker and - the Request. - """ - - -class PostRequest(Setting): - name = "post_request" - section = "Server Hooks" - validator = validate_post_request - type = six.callable - - def post_request(worker, req, environ, resp): - pass - default = staticmethod(post_request) - desc = """\ - Called after a worker processes the request. - - The callable needs to accept two instance variables for the Worker and - the Request. - """ - - -class ChildExit(Setting): - name = "child_exit" - section = "Server Hooks" - validator = validate_callable(2) - type = six.callable - - def child_exit(server, worker): - pass - default = staticmethod(child_exit) - desc = """\ - Called just after a worker has been exited, in the master process. - - The callable needs to accept two instance variables for the Arbiter and - the just-exited Worker. - - .. versionadded:: 19.7 - """ - - -class WorkerExit(Setting): - name = "worker_exit" - section = "Server Hooks" - validator = validate_callable(2) - type = six.callable - - def worker_exit(server, worker): - pass - default = staticmethod(worker_exit) - desc = """\ - Called just after a worker has been exited, in the worker process. - - The callable needs to accept two instance variables for the Arbiter and - the just-exited Worker. - """ - - -class NumWorkersChanged(Setting): - name = "nworkers_changed" - section = "Server Hooks" - validator = validate_callable(3) - type = six.callable - - def nworkers_changed(server, new_value, old_value): - pass - default = staticmethod(nworkers_changed) - desc = """\ - Called just after *num_workers* has been changed. - - The callable needs to accept an instance variable of the Arbiter and - two integers of number of workers after and before change. - - If the number of workers is set for the first time, *old_value* would - be ``None``. - """ - -class OnExit(Setting): - name = "on_exit" - section = "Server Hooks" - validator = validate_callable(1) - - def on_exit(server): - pass - - default = staticmethod(on_exit) - desc = """\ - Called just before exiting Gunicorn. - - The callable needs to accept a single instance variable for the Arbiter. - """ - - -class ProxyProtocol(Setting): - name = "proxy_protocol" - section = "Server Mechanics" - cli = ["--proxy-protocol"] - validator = validate_bool - default = False - action = "store_true" - desc = """\ - Enable detect PROXY protocol (PROXY mode). - - Allow using HTTP and Proxy together. It may be useful for work with - stunnel as HTTPS frontend and Gunicorn as HTTP server. - - PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt - - Example for stunnel config:: - - [https] - protocol = proxy - accept = 443 - connect = 80 - cert = /etc/ssl/certs/stunnel.pem - key = /etc/ssl/certs/stunnel.key - """ - - -class ProxyAllowFrom(Setting): - name = "proxy_allow_ips" - section = "Server Mechanics" - cli = ["--proxy-allow-from"] - validator = validate_string_to_list - default = "127.0.0.1" - desc = """\ - Front-end's IPs from which allowed accept proxy requests (comma separate). - - Set to ``*`` to disable checking of Front-end IPs (useful for setups - where you don't know in advance the IP address of Front-end, but - you still trust the environment) - """ - - -class KeyFile(Setting): - name = "keyfile" - section = "SSL" - cli = ["--keyfile"] - meta = "FILE" - validator = validate_string - default = None - desc = """\ - SSL key file - """ - - -class CertFile(Setting): - name = "certfile" - section = "SSL" - cli = ["--certfile"] - meta = "FILE" - validator = validate_string - default = None - desc = """\ - SSL certificate file - """ - -class SSLVersion(Setting): - name = "ssl_version" - section = "SSL" - cli = ["--ssl-version"] - validator = validate_pos_int - default = ssl.PROTOCOL_SSLv23 - desc = """\ - SSL version to use (see stdlib ssl module's) - - .. versionchanged:: 19.7 - The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to - ``ssl.PROTOCOL_SSLv23``. - """ - -class CertReqs(Setting): - name = "cert_reqs" - section = "SSL" - cli = ["--cert-reqs"] - validator = validate_pos_int - default = ssl.CERT_NONE - desc = """\ - Whether client certificate is required (see stdlib ssl module's) - """ - -class CACerts(Setting): - name = "ca_certs" - section = "SSL" - cli = ["--ca-certs"] - meta = "FILE" - validator = validate_string - default = None - desc = """\ - CA certificates file - """ - -class SuppressRaggedEOFs(Setting): - name = "suppress_ragged_eofs" - section = "SSL" - cli = ["--suppress-ragged-eofs"] - action = "store_true" - default = True - validator = validate_bool - desc = """\ - Suppress ragged EOFs (see stdlib ssl module's) - """ - -class DoHandshakeOnConnect(Setting): - name = "do_handshake_on_connect" - section = "SSL" - cli = ["--do-handshake-on-connect"] - validator = validate_bool - action = "store_true" - default = False - desc = """\ - Whether to perform SSL handshake on socket connect (see stdlib ssl module's) - """ - - -if sys.version_info >= (2, 7): - class Ciphers(Setting): - name = "ciphers" - section = "SSL" - cli = ["--ciphers"] - validator = validate_string - default = 'TLSv1' - desc = """\ - Ciphers to use (see stdlib ssl module's) - """ - - -class PasteGlobalConf(Setting): - name = "raw_paste_global_conf" - action = "append" - section = "Server Mechanics" - cli = ["--paste-global"] - meta = "CONF" - validator = validate_list_string - default = [] - - desc = """\ - Set a PasteDeploy global config variable in ``key=value`` form. - - The option can be specified multiple times. - - The variables are passed to the the PasteDeploy entrypoint. Example:: - - $ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2 - - .. versionadded:: 19.7 - """ diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/debug.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/debug.py deleted file mode 100644 index 996fe1b..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/debug.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -"""The debug module contains utilities and functions for better -debugging Gunicorn.""" - -import sys -import linecache -import re -import inspect - -__all__ = ['spew', 'unspew'] - -_token_spliter = re.compile(r'\W+') - - -class Spew(object): - - def __init__(self, trace_names=None, show_values=True): - self.trace_names = trace_names - self.show_values = show_values - - def __call__(self, frame, event, arg): - if event == 'line': - lineno = frame.f_lineno - if '__file__' in frame.f_globals: - filename = frame.f_globals['__file__'] - if (filename.endswith('.pyc') or - filename.endswith('.pyo')): - filename = filename[:-1] - name = frame.f_globals['__name__'] - line = linecache.getline(filename, lineno) - else: - name = '[unknown]' - try: - src = inspect.getsourcelines(frame) - line = src[lineno] - except IOError: - line = 'Unknown code named [%s]. VM instruction #%d' % ( - frame.f_code.co_name, frame.f_lasti) - if self.trace_names is None or name in self.trace_names: - print('%s:%s: %s' % (name, lineno, line.rstrip())) - if not self.show_values: - return self - details = [] - tokens = _token_spliter.split(line) - for tok in tokens: - if tok in frame.f_globals: - details.append('%s=%r' % (tok, frame.f_globals[tok])) - if tok in frame.f_locals: - details.append('%s=%r' % (tok, frame.f_locals[tok])) - if details: - print("\t%s" % ' '.join(details)) - return self - - -def spew(trace_names=None, show_values=False): - """Install a trace hook which writes incredibly detailed logs - about what code is being executed to stdout. - """ - sys.settrace(Spew(trace_names, show_values)) - - -def unspew(): - """Remove the trace hook installed by spew. - """ - sys.settrace(None) diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/errors.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/errors.py deleted file mode 100644 index 727d336..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/errors.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -# We don't need to call super() in __init__ methods of our -# BaseException and Exception classes because we also define -# our own __str__ methods so there is no need to pass 'message' -# to the base class to get a meaningful output from 'str(exc)'. -# pylint: disable=super-init-not-called - - -# we inherit from BaseException here to make sure to not be caught -# at application level -class HaltServer(BaseException): - def __init__(self, reason, exit_status=1): - self.reason = reason - self.exit_status = exit_status - - def __str__(self): - return "" % (self.reason, self.exit_status) - - -class ConfigError(Exception): - """ Exception raised on config error """ - - -class AppImportError(Exception): - """ Exception raised when loading an application """ diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/glogging.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/glogging.py deleted file mode 100644 index 041a74d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/glogging.py +++ /dev/null @@ -1,478 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import base64 -import binascii -import time -import logging -logging.Logger.manager.emittedNoHandlerWarning = 1 -from logging.config import fileConfig -try: - from logging.config import dictConfig -except ImportError: - # python 2.6 - dictConfig = None -import os -import socket -import sys -import threading -import traceback - -from gunicorn import util -from gunicorn.six import PY3, string_types - - -# syslog facility codes -SYSLOG_FACILITIES = { - "auth": 4, - "authpriv": 10, - "cron": 9, - "daemon": 3, - "ftp": 11, - "kern": 0, - "lpr": 6, - "mail": 2, - "news": 7, - "security": 4, # DEPRECATED - "syslog": 5, - "user": 1, - "uucp": 8, - "local0": 16, - "local1": 17, - "local2": 18, - "local3": 19, - "local4": 20, - "local5": 21, - "local6": 22, - "local7": 23 - } - - -CONFIG_DEFAULTS = dict( - version=1, - disable_existing_loggers=False, - - loggers={ - "root": {"level": "INFO", "handlers": ["console"]}, - "gunicorn.error": { - "level": "INFO", - "handlers": ["error_console"], - "propagate": True, - "qualname": "gunicorn.error" - }, - - "gunicorn.access": { - "level": "INFO", - "handlers": ["console"], - "propagate": True, - "qualname": "gunicorn.access" - } - }, - handlers={ - "console": { - "class": "logging.StreamHandler", - "formatter": "generic", - "stream": "ext://sys.stdout" - }, - "error_console": { - "class": "logging.StreamHandler", - "formatter": "generic", - "stream": "ext://sys.stderr" - }, - }, - formatters={ - "generic": { - "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s", - "datefmt": "[%Y-%m-%d %H:%M:%S %z]", - "class": "logging.Formatter" - } - } -) - - -def loggers(): - """ get list of all loggers """ - root = logging.root - existing = root.manager.loggerDict.keys() - return [logging.getLogger(name) for name in existing] - - -class SafeAtoms(dict): - - def __init__(self, atoms): - dict.__init__(self) - for key, value in atoms.items(): - if isinstance(value, string_types): - self[key] = value.replace('"', '\\"') - else: - self[key] = value - - def __getitem__(self, k): - if k.startswith("{"): - kl = k.lower() - if kl in self: - return super(SafeAtoms, self).__getitem__(kl) - else: - return "-" - if k in self: - return super(SafeAtoms, self).__getitem__(k) - else: - return '-' - - -def parse_syslog_address(addr): - - # unix domain socket type depends on backend - # SysLogHandler will try both when given None - if addr.startswith("unix://"): - sock_type = None - - # set socket type only if explicitly requested - parts = addr.split("#", 1) - if len(parts) == 2: - addr = parts[0] - if parts[1] == "dgram": - sock_type = socket.SOCK_DGRAM - - return (sock_type, addr.split("unix://")[1]) - - if addr.startswith("udp://"): - addr = addr.split("udp://")[1] - socktype = socket.SOCK_DGRAM - elif addr.startswith("tcp://"): - addr = addr.split("tcp://")[1] - socktype = socket.SOCK_STREAM - else: - raise RuntimeError("invalid syslog address") - - if '[' in addr and ']' in addr: - host = addr.split(']')[0][1:].lower() - elif ':' in addr: - host = addr.split(':')[0].lower() - elif addr == "": - host = "localhost" - else: - host = addr.lower() - - addr = addr.split(']')[-1] - if ":" in addr: - port = addr.split(':', 1)[1] - if not port.isdigit(): - raise RuntimeError("%r is not a valid port number." % port) - port = int(port) - else: - port = 514 - - return (socktype, (host, port)) - - -class Logger(object): - - LOG_LEVELS = { - "critical": logging.CRITICAL, - "error": logging.ERROR, - "warning": logging.WARNING, - "info": logging.INFO, - "debug": logging.DEBUG - } - loglevel = logging.INFO - - error_fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s" - datefmt = r"[%Y-%m-%d %H:%M:%S %z]" - - access_fmt = "%(message)s" - syslog_fmt = "[%(process)d] %(message)s" - - atoms_wrapper_class = SafeAtoms - - def __init__(self, cfg): - self.error_log = logging.getLogger("gunicorn.error") - self.error_log.propagate = False - self.access_log = logging.getLogger("gunicorn.access") - self.access_log.propagate = False - self.error_handlers = [] - self.access_handlers = [] - self.logfile = None - self.lock = threading.Lock() - self.cfg = cfg - self.setup(cfg) - - def setup(self, cfg): - self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO) - self.error_log.setLevel(self.loglevel) - self.access_log.setLevel(logging.INFO) - - # set gunicorn.error handler - if self.cfg.capture_output and cfg.errorlog != "-": - for stream in sys.stdout, sys.stderr: - stream.flush() - - self.logfile = open(cfg.errorlog, 'a+') - os.dup2(self.logfile.fileno(), sys.stdout.fileno()) - os.dup2(self.logfile.fileno(), sys.stderr.fileno()) - - self._set_handler(self.error_log, cfg.errorlog, - logging.Formatter(self.error_fmt, self.datefmt)) - - # set gunicorn.access handler - if cfg.accesslog is not None: - self._set_handler(self.access_log, cfg.accesslog, - fmt=logging.Formatter(self.access_fmt), stream=sys.stdout) - - # set syslog handler - if cfg.syslog: - self._set_syslog_handler( - self.error_log, cfg, self.syslog_fmt, "error" - ) - if not cfg.disable_redirect_access_to_syslog: - self._set_syslog_handler( - self.access_log, cfg, self.syslog_fmt, "access" - ) - - if dictConfig is None and cfg.logconfig_dict: - util.warn("Dictionary-based log configuration requires " - "Python 2.7 or above.") - - if dictConfig and cfg.logconfig_dict: - config = CONFIG_DEFAULTS.copy() - config.update(cfg.logconfig_dict) - try: - dictConfig(config) - except ( - AttributeError, - ImportError, - ValueError, - TypeError - ) as exc: - raise RuntimeError(str(exc)) - elif cfg.logconfig: - if os.path.exists(cfg.logconfig): - defaults = CONFIG_DEFAULTS.copy() - defaults['__file__'] = cfg.logconfig - defaults['here'] = os.path.dirname(cfg.logconfig) - fileConfig(cfg.logconfig, defaults=defaults, - disable_existing_loggers=False) - else: - msg = "Error: log config '%s' not found" - raise RuntimeError(msg % cfg.logconfig) - - def critical(self, msg, *args, **kwargs): - self.error_log.critical(msg, *args, **kwargs) - - def error(self, msg, *args, **kwargs): - self.error_log.error(msg, *args, **kwargs) - - def warning(self, msg, *args, **kwargs): - self.error_log.warning(msg, *args, **kwargs) - - def info(self, msg, *args, **kwargs): - self.error_log.info(msg, *args, **kwargs) - - def debug(self, msg, *args, **kwargs): - self.error_log.debug(msg, *args, **kwargs) - - def exception(self, msg, *args, **kwargs): - self.error_log.exception(msg, *args, **kwargs) - - def log(self, lvl, msg, *args, **kwargs): - if isinstance(lvl, string_types): - lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO) - self.error_log.log(lvl, msg, *args, **kwargs) - - def atoms(self, resp, req, environ, request_time): - """ Gets atoms for log formating. - """ - status = resp.status - if isinstance(status, str): - status = status.split(None, 1)[0] - atoms = { - 'h': environ.get('REMOTE_ADDR', '-'), - 'l': '-', - 'u': self._get_user(environ) or '-', - 't': self.now(), - 'r': "%s %s %s" % (environ['REQUEST_METHOD'], - environ['RAW_URI'], environ["SERVER_PROTOCOL"]), - 's': status, - 'm': environ.get('REQUEST_METHOD'), - 'U': environ.get('PATH_INFO'), - 'q': environ.get('QUERY_STRING'), - 'H': environ.get('SERVER_PROTOCOL'), - 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-', - 'B': getattr(resp, 'sent', None), - 'f': environ.get('HTTP_REFERER', '-'), - 'a': environ.get('HTTP_USER_AGENT', '-'), - 'T': request_time.seconds, - 'D': (request_time.seconds*1000000) + request_time.microseconds, - 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds), - 'p': "<%s>" % os.getpid() - } - - # add request headers - if hasattr(req, 'headers'): - req_headers = req.headers - else: - req_headers = req - - if hasattr(req_headers, "items"): - req_headers = req_headers.items() - - atoms.update(dict([("{%s}i" % k.lower(), v) for k, v in req_headers])) - - resp_headers = resp.headers - if hasattr(resp_headers, "items"): - resp_headers = resp_headers.items() - - # add response headers - atoms.update(dict([("{%s}o" % k.lower(), v) for k, v in resp_headers])) - - # add environ variables - environ_variables = environ.items() - atoms.update(dict([("{%s}e" % k.lower(), v) for k, v in environ_variables])) - - return atoms - - def access(self, resp, req, environ, request_time): - """ See http://httpd.apache.org/docs/2.0/logs.html#combined - for format details - """ - - if not (self.cfg.accesslog or self.cfg.logconfig or - self.cfg.logconfig_dict or - (self.cfg.syslog and not self.cfg.disable_redirect_access_to_syslog)): - return - - # wrap atoms: - # - make sure atoms will be test case insensitively - # - if atom doesn't exist replace it by '-' - safe_atoms = self.atoms_wrapper_class(self.atoms(resp, req, environ, - request_time)) - - try: - self.access_log.info(self.cfg.access_log_format, safe_atoms) - except: - self.error(traceback.format_exc()) - - def now(self): - """ return date in Apache Common Log Format """ - return time.strftime('[%d/%b/%Y:%H:%M:%S %z]') - - def reopen_files(self): - if self.cfg.capture_output and self.cfg.errorlog != "-": - for stream in sys.stdout, sys.stderr: - stream.flush() - - with self.lock: - if self.logfile is not None: - self.logfile.close() - self.logfile = open(self.cfg.errorlog, 'a+') - os.dup2(self.logfile.fileno(), sys.stdout.fileno()) - os.dup2(self.logfile.fileno(), sys.stderr.fileno()) - - - for log in loggers(): - for handler in log.handlers: - if isinstance(handler, logging.FileHandler): - handler.acquire() - try: - if handler.stream: - handler.close() - handler.stream = handler._open() - finally: - handler.release() - - def close_on_exec(self): - for log in loggers(): - for handler in log.handlers: - if isinstance(handler, logging.FileHandler): - handler.acquire() - try: - if handler.stream: - util.close_on_exec(handler.stream.fileno()) - finally: - handler.release() - - def _get_gunicorn_handler(self, log): - for h in log.handlers: - if getattr(h, "_gunicorn", False): - return h - - def _set_handler(self, log, output, fmt, stream=None): - # remove previous gunicorn log handler - h = self._get_gunicorn_handler(log) - if h: - log.handlers.remove(h) - - if output is not None: - if output == "-": - h = logging.StreamHandler(stream) - else: - util.check_is_writeable(output) - h = logging.FileHandler(output) - # make sure the user can reopen the file - try: - os.chown(h.baseFilename, self.cfg.user, self.cfg.group) - except OSError: - # it's probably OK there, we assume the user has given - # /dev/null as a parameter. - pass - - h.setFormatter(fmt) - h._gunicorn = True - log.addHandler(h) - - def _set_syslog_handler(self, log, cfg, fmt, name): - # setup format - if not cfg.syslog_prefix: - prefix = cfg.proc_name.replace(":", ".") - else: - prefix = cfg.syslog_prefix - - prefix = "gunicorn.%s.%s" % (prefix, name) - - # set format - fmt = logging.Formatter(r"%s: %s" % (prefix, fmt)) - - # syslog facility - try: - facility = SYSLOG_FACILITIES[cfg.syslog_facility.lower()] - except KeyError: - raise RuntimeError("unknown facility name") - - # parse syslog address - socktype, addr = parse_syslog_address(cfg.syslog_addr) - - # finally setup the syslog handler - if sys.version_info >= (2, 7): - h = logging.handlers.SysLogHandler(address=addr, - facility=facility, socktype=socktype) - else: - # socktype is only supported in 2.7 and sup - # fix issue #541 - h = logging.handlers.SysLogHandler(address=addr, - facility=facility) - - h.setFormatter(fmt) - h._gunicorn = True - log.addHandler(h) - - def _get_user(self, environ): - user = None - http_auth = environ.get("HTTP_AUTHORIZATION") - if http_auth and http_auth.startswith('Basic'): - auth = http_auth.split(" ", 1) - if len(auth) == 2: - try: - # b64decode doesn't accept unicode in Python < 3.3 - # so we need to convert it to a byte string - auth = base64.b64decode(auth[1].strip().encode('utf-8')) - if PY3: # b64decode returns a byte string in Python 3 - auth = auth.decode('utf-8') - auth = auth.split(":", 1) - except (TypeError, binascii.Error, UnicodeDecodeError) as exc: - self.debug("Couldn't get username: %s", exc) - return user - if len(auth) == 2: - user = auth[0] - return user diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/__init__.py deleted file mode 100644 index 1da6f3e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -from gunicorn.http.message import Message, Request -from gunicorn.http.parser import RequestParser - -__all__ = ['Message', 'Request', 'RequestParser'] diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/_sendfile.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/_sendfile.py deleted file mode 100644 index 1764cb3..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/_sendfile.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import errno -import os -import sys - -try: - import ctypes - import ctypes.util -except MemoryError: - # selinux execmem denial - # https://bugzilla.redhat.com/show_bug.cgi?id=488396 - raise ImportError - -SUPPORTED_PLATFORMS = ( - 'darwin', - 'freebsd', - 'dragonfly', - 'linux2') - -if sys.platform not in SUPPORTED_PLATFORMS: - raise ImportError("sendfile isn't supported on this platform") - -_libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True) -_sendfile = _libc.sendfile - - -def sendfile(fdout, fdin, offset, nbytes): - if sys.platform == 'darwin': - _sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64, - ctypes.POINTER(ctypes.c_uint64), ctypes.c_voidp, - ctypes.c_int] - _nbytes = ctypes.c_uint64(nbytes) - result = _sendfile(fdin, fdout, offset, _nbytes, None, 0) - - if result == -1: - e = ctypes.get_errno() - if e == errno.EAGAIN and _nbytes.value is not None: - return _nbytes.value - raise OSError(e, os.strerror(e)) - return _nbytes.value - elif sys.platform in ('freebsd', 'dragonfly',): - _sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64, - ctypes.c_uint64, ctypes.c_voidp, - ctypes.POINTER(ctypes.c_uint64), ctypes.c_int] - _sbytes = ctypes.c_uint64() - result = _sendfile(fdin, fdout, offset, nbytes, None, _sbytes, 0) - if result == -1: - e = ctypes.get_errno() - if e == errno.EAGAIN and _sbytes.value is not None: - return _sbytes.value - raise OSError(e, os.strerror(e)) - return _sbytes.value - - else: - _sendfile.argtypes = [ctypes.c_int, ctypes.c_int, - ctypes.POINTER(ctypes.c_uint64), ctypes.c_size_t] - - _offset = ctypes.c_uint64(offset) - sent = _sendfile(fdout, fdin, _offset, nbytes) - if sent == -1: - e = ctypes.get_errno() - raise OSError(e, os.strerror(e)) - return sent diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/body.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/body.py deleted file mode 100644 index fb8633e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/body.py +++ /dev/null @@ -1,259 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -from gunicorn.http.errors import (NoMoreData, ChunkMissingTerminator, - InvalidChunkSize) -from gunicorn import six - - -class ChunkedReader(object): - def __init__(self, req, unreader): - self.req = req - self.parser = self.parse_chunked(unreader) - self.buf = six.BytesIO() - - def read(self, size): - if not isinstance(size, six.integer_types): - raise TypeError("size must be an integral type") - if size < 0: - raise ValueError("Size must be positive.") - if size == 0: - return b"" - - if self.parser: - while self.buf.tell() < size: - try: - self.buf.write(six.next(self.parser)) - except StopIteration: - self.parser = None - break - - data = self.buf.getvalue() - ret, rest = data[:size], data[size:] - self.buf = six.BytesIO() - self.buf.write(rest) - return ret - - def parse_trailers(self, unreader, data): - buf = six.BytesIO() - buf.write(data) - - idx = buf.getvalue().find(b"\r\n\r\n") - done = buf.getvalue()[:2] == b"\r\n" - while idx < 0 and not done: - self.get_data(unreader, buf) - idx = buf.getvalue().find(b"\r\n\r\n") - done = buf.getvalue()[:2] == b"\r\n" - if done: - unreader.unread(buf.getvalue()[2:]) - return b"" - self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx]) - unreader.unread(buf.getvalue()[idx + 4:]) - - def parse_chunked(self, unreader): - (size, rest) = self.parse_chunk_size(unreader) - while size > 0: - while size > len(rest): - size -= len(rest) - yield rest - rest = unreader.read() - if not rest: - raise NoMoreData() - yield rest[:size] - # Remove \r\n after chunk - rest = rest[size:] - while len(rest) < 2: - rest += unreader.read() - if rest[:2] != b'\r\n': - raise ChunkMissingTerminator(rest[:2]) - (size, rest) = self.parse_chunk_size(unreader, data=rest[2:]) - - def parse_chunk_size(self, unreader, data=None): - buf = six.BytesIO() - if data is not None: - buf.write(data) - - idx = buf.getvalue().find(b"\r\n") - while idx < 0: - self.get_data(unreader, buf) - idx = buf.getvalue().find(b"\r\n") - - data = buf.getvalue() - line, rest_chunk = data[:idx], data[idx + 2:] - - chunk_size = line.split(b";", 1)[0].strip() - try: - chunk_size = int(chunk_size, 16) - except ValueError: - raise InvalidChunkSize(chunk_size) - - if chunk_size == 0: - try: - self.parse_trailers(unreader, rest_chunk) - except NoMoreData: - pass - return (0, None) - return (chunk_size, rest_chunk) - - def get_data(self, unreader, buf): - data = unreader.read() - if not data: - raise NoMoreData() - buf.write(data) - - -class LengthReader(object): - def __init__(self, unreader, length): - self.unreader = unreader - self.length = length - - def read(self, size): - if not isinstance(size, six.integer_types): - raise TypeError("size must be an integral type") - - size = min(self.length, size) - if size < 0: - raise ValueError("Size must be positive.") - if size == 0: - return b"" - - buf = six.BytesIO() - data = self.unreader.read() - while data: - buf.write(data) - if buf.tell() >= size: - break - data = self.unreader.read() - - buf = buf.getvalue() - ret, rest = buf[:size], buf[size:] - self.unreader.unread(rest) - self.length -= size - return ret - - -class EOFReader(object): - def __init__(self, unreader): - self.unreader = unreader - self.buf = six.BytesIO() - self.finished = False - - def read(self, size): - if not isinstance(size, six.integer_types): - raise TypeError("size must be an integral type") - if size < 0: - raise ValueError("Size must be positive.") - if size == 0: - return b"" - - if self.finished: - data = self.buf.getvalue() - ret, rest = data[:size], data[size:] - self.buf = six.BytesIO() - self.buf.write(rest) - return ret - - data = self.unreader.read() - while data: - self.buf.write(data) - if self.buf.tell() > size: - break - data = self.unreader.read() - - if not data: - self.finished = True - - data = self.buf.getvalue() - ret, rest = data[:size], data[size:] - self.buf = six.BytesIO() - self.buf.write(rest) - return ret - - -class Body(object): - def __init__(self, reader): - self.reader = reader - self.buf = six.BytesIO() - - def __iter__(self): - return self - - def __next__(self): - ret = self.readline() - if not ret: - raise StopIteration() - return ret - next = __next__ - - def getsize(self, size): - if size is None: - return six.MAXSIZE - elif not isinstance(size, six.integer_types): - raise TypeError("size must be an integral type") - elif size < 0: - return six.MAXSIZE - return size - - def read(self, size=None): - size = self.getsize(size) - if size == 0: - return b"" - - if size < self.buf.tell(): - data = self.buf.getvalue() - ret, rest = data[:size], data[size:] - self.buf = six.BytesIO() - self.buf.write(rest) - return ret - - while size > self.buf.tell(): - data = self.reader.read(1024) - if not data: - break - self.buf.write(data) - - data = self.buf.getvalue() - ret, rest = data[:size], data[size:] - self.buf = six.BytesIO() - self.buf.write(rest) - return ret - - def readline(self, size=None): - size = self.getsize(size) - if size == 0: - return b"" - - data = self.buf.getvalue() - self.buf = six.BytesIO() - - ret = [] - while 1: - idx = data.find(b"\n", 0, size) - idx = idx + 1 if idx >= 0 else size if len(data) >= size else 0 - if idx: - ret.append(data[:idx]) - self.buf.write(data[idx:]) - break - - ret.append(data) - size -= len(data) - data = self.reader.read(min(1024, size)) - if not data: - break - - return b"".join(ret) - - def readlines(self, size=None): - ret = [] - data = self.read() - while data: - pos = data.find(b"\n") - if pos < 0: - ret.append(data) - data = b"" - else: - line, data = data[:pos + 1], data[pos + 1:] - ret.append(line) - return ret diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/errors.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/errors.py deleted file mode 100644 index 7839ef0..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/errors.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -# We don't need to call super() in __init__ methods of our -# BaseException and Exception classes because we also define -# our own __str__ methods so there is no need to pass 'message' -# to the base class to get a meaningful output from 'str(exc)'. -# pylint: disable=super-init-not-called - - -class ParseException(Exception): - pass - - -class NoMoreData(IOError): - def __init__(self, buf=None): - self.buf = buf - - def __str__(self): - return "No more data after: %r" % self.buf - - -class InvalidRequestLine(ParseException): - def __init__(self, req): - self.req = req - self.code = 400 - - def __str__(self): - return "Invalid HTTP request line: %r" % self.req - - -class InvalidRequestMethod(ParseException): - def __init__(self, method): - self.method = method - - def __str__(self): - return "Invalid HTTP method: %r" % self.method - - -class InvalidHTTPVersion(ParseException): - def __init__(self, version): - self.version = version - - def __str__(self): - return "Invalid HTTP Version: %r" % self.version - - -class InvalidHeader(ParseException): - def __init__(self, hdr, req=None): - self.hdr = hdr - self.req = req - - def __str__(self): - return "Invalid HTTP Header: %r" % self.hdr - - -class InvalidHeaderName(ParseException): - def __init__(self, hdr): - self.hdr = hdr - - def __str__(self): - return "Invalid HTTP header name: %r" % self.hdr - - -class InvalidChunkSize(IOError): - def __init__(self, data): - self.data = data - - def __str__(self): - return "Invalid chunk size: %r" % self.data - - -class ChunkMissingTerminator(IOError): - def __init__(self, term): - self.term = term - - def __str__(self): - return "Invalid chunk terminator is not '\\r\\n': %r" % self.term - - -class LimitRequestLine(ParseException): - def __init__(self, size, max_size): - self.size = size - self.max_size = max_size - - def __str__(self): - return "Request Line is too large (%s > %s)" % (self.size, self.max_size) - - -class LimitRequestHeaders(ParseException): - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - - -class InvalidProxyLine(ParseException): - def __init__(self, line): - self.line = line - self.code = 400 - - def __str__(self): - return "Invalid PROXY line: %r" % self.line - - -class ForbiddenProxyRequest(ParseException): - def __init__(self, host): - self.host = host - self.code = 403 - - def __str__(self): - return "Proxy request from %r not allowed" % self.host - - -class InvalidSchemeHeaders(ParseException): - def __str__(self): - return "Contradictory scheme headers" diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/message.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/message.py deleted file mode 100644 index 2700b32..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/message.py +++ /dev/null @@ -1,363 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import re -import socket -from errno import ENOTCONN - -from gunicorn._compat import bytes_to_str -from gunicorn.http.unreader import SocketUnreader -from gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body -from gunicorn.http.errors import (InvalidHeader, InvalidHeaderName, NoMoreData, - InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion, - LimitRequestLine, LimitRequestHeaders) -from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest -from gunicorn.http.errors import InvalidSchemeHeaders -from gunicorn.six import BytesIO, string_types -from gunicorn.util import split_request_uri - -MAX_REQUEST_LINE = 8190 -MAX_HEADERS = 32768 -DEFAULT_MAX_HEADERFIELD_SIZE = 8190 - -HEADER_RE = re.compile(r"[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\"]") -METH_RE = re.compile(r"[A-Z0-9$-_.]{3,20}") -VERSION_RE = re.compile(r"HTTP/(\d+)\.(\d+)") - - -class Message(object): - def __init__(self, cfg, unreader): - self.cfg = cfg - self.unreader = unreader - self.version = None - self.headers = [] - self.trailers = [] - self.body = None - self.scheme = "https" if cfg.is_ssl else "http" - - # set headers limits - self.limit_request_fields = cfg.limit_request_fields - if (self.limit_request_fields <= 0 - or self.limit_request_fields > MAX_HEADERS): - self.limit_request_fields = MAX_HEADERS - self.limit_request_field_size = cfg.limit_request_field_size - if self.limit_request_field_size < 0: - self.limit_request_field_size = DEFAULT_MAX_HEADERFIELD_SIZE - - # set max header buffer size - max_header_field_size = self.limit_request_field_size or DEFAULT_MAX_HEADERFIELD_SIZE - self.max_buffer_headers = self.limit_request_fields * \ - (max_header_field_size + 2) + 4 - - unused = self.parse(self.unreader) - self.unreader.unread(unused) - self.set_body_reader() - - def parse(self, unreader): - raise NotImplementedError() - - def parse_headers(self, data): - cfg = self.cfg - headers = [] - - # Split lines on \r\n keeping the \r\n on each line - lines = [bytes_to_str(line) + "\r\n" for line in data.split(b"\r\n")] - - # handle scheme headers - scheme_header = False - secure_scheme_headers = {} - if '*' in cfg.forwarded_allow_ips: - secure_scheme_headers = cfg.secure_scheme_headers - elif isinstance(self.unreader, SocketUnreader): - remote_addr = self.unreader.sock.getpeername() - if isinstance(remote_addr, tuple): - remote_host = remote_addr[0] - if remote_host in cfg.forwarded_allow_ips: - secure_scheme_headers = cfg.secure_scheme_headers - elif isinstance(remote_addr, string_types): - secure_scheme_headers = cfg.secure_scheme_headers - - # Parse headers into key/value pairs paying attention - # to continuation lines. - while lines: - if len(headers) >= self.limit_request_fields: - raise LimitRequestHeaders("limit request headers fields") - - # Parse initial header name : value pair. - curr = lines.pop(0) - header_length = len(curr) - if curr.find(":") < 0: - raise InvalidHeader(curr.strip()) - name, value = curr.split(":", 1) - name = name.rstrip(" \t").upper() - if HEADER_RE.search(name): - raise InvalidHeaderName(name) - - name, value = name.strip(), [value.lstrip()] - - # Consume value continuation lines - while lines and lines[0].startswith((" ", "\t")): - curr = lines.pop(0) - header_length += len(curr) - if header_length > self.limit_request_field_size > 0: - raise LimitRequestHeaders("limit request headers " - + "fields size") - value.append(curr) - value = ''.join(value).rstrip() - - if header_length > self.limit_request_field_size > 0: - raise LimitRequestHeaders("limit request headers fields size") - - if name in secure_scheme_headers: - secure = value == secure_scheme_headers[name] - scheme = "https" if secure else "http" - if scheme_header: - if scheme != self.scheme: - raise InvalidSchemeHeaders() - else: - scheme_header = True - self.scheme = scheme - - headers.append((name, value)) - - return headers - - def set_body_reader(self): - chunked = False - content_length = None - for (name, value) in self.headers: - if name == "CONTENT-LENGTH": - content_length = value - elif name == "TRANSFER-ENCODING": - chunked = value.lower() == "chunked" - elif name == "SEC-WEBSOCKET-KEY1": - content_length = 8 - - if chunked: - self.body = Body(ChunkedReader(self, self.unreader)) - elif content_length is not None: - try: - content_length = int(content_length) - except ValueError: - raise InvalidHeader("CONTENT-LENGTH", req=self) - - if content_length < 0: - raise InvalidHeader("CONTENT-LENGTH", req=self) - - self.body = Body(LengthReader(self.unreader, content_length)) - else: - self.body = Body(EOFReader(self.unreader)) - - def should_close(self): - for (h, v) in self.headers: - if h == "CONNECTION": - v = v.lower().strip() - if v == "close": - return True - elif v == "keep-alive": - return False - break - return self.version <= (1, 0) - - -class Request(Message): - def __init__(self, cfg, unreader, req_number=1): - self.method = None - self.uri = None - self.path = None - self.query = None - self.fragment = None - - # get max request line size - self.limit_request_line = cfg.limit_request_line - if (self.limit_request_line < 0 - or self.limit_request_line >= MAX_REQUEST_LINE): - self.limit_request_line = MAX_REQUEST_LINE - - self.req_number = req_number - self.proxy_protocol_info = None - super(Request, self).__init__(cfg, unreader) - - def get_data(self, unreader, buf, stop=False): - data = unreader.read() - if not data: - if stop: - raise StopIteration() - raise NoMoreData(buf.getvalue()) - buf.write(data) - - def parse(self, unreader): - buf = BytesIO() - self.get_data(unreader, buf, stop=True) - - # get request line - line, rbuf = self.read_line(unreader, buf, self.limit_request_line) - - # proxy protocol - if self.proxy_protocol(bytes_to_str(line)): - # get next request line - buf = BytesIO() - buf.write(rbuf) - line, rbuf = self.read_line(unreader, buf, self.limit_request_line) - - self.parse_request_line(line) - buf = BytesIO() - buf.write(rbuf) - - # Headers - data = buf.getvalue() - idx = data.find(b"\r\n\r\n") - - done = data[:2] == b"\r\n" - while True: - idx = data.find(b"\r\n\r\n") - done = data[:2] == b"\r\n" - - if idx < 0 and not done: - self.get_data(unreader, buf) - data = buf.getvalue() - if len(data) > self.max_buffer_headers: - raise LimitRequestHeaders("max buffer headers") - else: - break - - if done: - self.unreader.unread(data[2:]) - return b"" - - self.headers = self.parse_headers(data[:idx]) - - ret = data[idx + 4:] - buf = None - return ret - - def read_line(self, unreader, buf, limit=0): - data = buf.getvalue() - - while True: - idx = data.find(b"\r\n") - if idx >= 0: - # check if the request line is too large - if idx > limit > 0: - raise LimitRequestLine(idx, limit) - break - elif len(data) - 2 > limit > 0: - raise LimitRequestLine(len(data), limit) - self.get_data(unreader, buf) - data = buf.getvalue() - - return (data[:idx], # request line, - data[idx + 2:]) # residue in the buffer, skip \r\n - - def proxy_protocol(self, line): - """\ - Detect, check and parse proxy protocol. - - :raises: ForbiddenProxyRequest, InvalidProxyLine. - :return: True for proxy protocol line else False - """ - if not self.cfg.proxy_protocol: - return False - - if self.req_number != 1: - return False - - if not line.startswith("PROXY"): - return False - - self.proxy_protocol_access_check() - self.parse_proxy_protocol(line) - - return True - - def proxy_protocol_access_check(self): - # check in allow list - if isinstance(self.unreader, SocketUnreader): - try: - remote_host = self.unreader.sock.getpeername()[0] - except socket.error as e: - if e.args[0] == ENOTCONN: - raise ForbiddenProxyRequest("UNKNOW") - raise - if ("*" not in self.cfg.proxy_allow_ips and - remote_host not in self.cfg.proxy_allow_ips): - raise ForbiddenProxyRequest(remote_host) - - def parse_proxy_protocol(self, line): - bits = line.split() - - if len(bits) != 6: - raise InvalidProxyLine(line) - - # Extract data - proto = bits[1] - s_addr = bits[2] - d_addr = bits[3] - - # Validation - if proto not in ["TCP4", "TCP6"]: - raise InvalidProxyLine("protocol '%s' not supported" % proto) - if proto == "TCP4": - try: - socket.inet_pton(socket.AF_INET, s_addr) - socket.inet_pton(socket.AF_INET, d_addr) - except socket.error: - raise InvalidProxyLine(line) - elif proto == "TCP6": - try: - socket.inet_pton(socket.AF_INET6, s_addr) - socket.inet_pton(socket.AF_INET6, d_addr) - except socket.error: - raise InvalidProxyLine(line) - - try: - s_port = int(bits[4]) - d_port = int(bits[5]) - except ValueError: - raise InvalidProxyLine("invalid port %s" % line) - - if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)): - raise InvalidProxyLine("invalid port %s" % line) - - # Set data - self.proxy_protocol_info = { - "proxy_protocol": proto, - "client_addr": s_addr, - "client_port": s_port, - "proxy_addr": d_addr, - "proxy_port": d_port - } - - def parse_request_line(self, line_bytes): - bits = [bytes_to_str(bit) for bit in line_bytes.split(None, 2)] - if len(bits) != 3: - raise InvalidRequestLine(bytes_to_str(line_bytes)) - - # Method - if not METH_RE.match(bits[0]): - raise InvalidRequestMethod(bits[0]) - self.method = bits[0].upper() - - # URI - self.uri = bits[1] - - try: - parts = split_request_uri(self.uri) - except ValueError: - raise InvalidRequestLine(bytes_to_str(line_bytes)) - self.path = parts.path or "" - self.query = parts.query or "" - self.fragment = parts.fragment or "" - - # Version - match = VERSION_RE.match(bits[2]) - if match is None: - raise InvalidHTTPVersion(bits[2]) - self.version = (int(match.group(1)), int(match.group(2))) - - def set_body_reader(self): - super(Request, self).set_body_reader() - if isinstance(self.body.reader, EOFReader): - self.body = Body(LengthReader(self.unreader, 0)) diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/parser.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/parser.py deleted file mode 100644 index a4a0f1e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/parser.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -from gunicorn.http.message import Request -from gunicorn.http.unreader import SocketUnreader, IterUnreader - - -class Parser(object): - - mesg_class = None - - def __init__(self, cfg, source): - self.cfg = cfg - if hasattr(source, "recv"): - self.unreader = SocketUnreader(source) - else: - self.unreader = IterUnreader(source) - self.mesg = None - - # request counter (for keepalive connetions) - self.req_count = 0 - - def __iter__(self): - return self - - def __next__(self): - # Stop if HTTP dictates a stop. - if self.mesg and self.mesg.should_close(): - raise StopIteration() - - # Discard any unread body of the previous message - if self.mesg: - data = self.mesg.body.read(8192) - while data: - data = self.mesg.body.read(8192) - - # Parse the next request - self.req_count += 1 - self.mesg = self.mesg_class(self.cfg, self.unreader, self.req_count) - if not self.mesg: - raise StopIteration() - return self.mesg - - next = __next__ - - -class RequestParser(Parser): - - mesg_class = Request diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/unreader.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/unreader.py deleted file mode 100644 index 9f312a8..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/unreader.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import os - -from gunicorn import six - -# Classes that can undo reading data from -# a given type of data source. - - -class Unreader(object): - def __init__(self): - self.buf = six.BytesIO() - - def chunk(self): - raise NotImplementedError() - - def read(self, size=None): - if size is not None and not isinstance(size, six.integer_types): - raise TypeError("size parameter must be an int or long.") - - if size is not None: - if size == 0: - return b"" - if size < 0: - size = None - - self.buf.seek(0, os.SEEK_END) - - if size is None and self.buf.tell(): - ret = self.buf.getvalue() - self.buf = six.BytesIO() - return ret - if size is None: - d = self.chunk() - return d - - while self.buf.tell() < size: - chunk = self.chunk() - if not chunk: - ret = self.buf.getvalue() - self.buf = six.BytesIO() - return ret - self.buf.write(chunk) - data = self.buf.getvalue() - self.buf = six.BytesIO() - self.buf.write(data[size:]) - return data[:size] - - def unread(self, data): - self.buf.seek(0, os.SEEK_END) - self.buf.write(data) - - -class SocketUnreader(Unreader): - def __init__(self, sock, max_chunk=8192): - super(SocketUnreader, self).__init__() - self.sock = sock - self.mxchunk = max_chunk - - def chunk(self): - return self.sock.recv(self.mxchunk) - - -class IterUnreader(Unreader): - def __init__(self, iterable): - super(IterUnreader, self).__init__() - self.iter = iter(iterable) - - def chunk(self): - if not self.iter: - return b"" - try: - return six.next(self.iter) - except StopIteration: - self.iter = None - return b"" diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/wsgi.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/wsgi.py deleted file mode 100644 index ff75974..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/http/wsgi.py +++ /dev/null @@ -1,411 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import io -import logging -import os -import re -import sys - -from gunicorn._compat import unquote_to_wsgi_str -from gunicorn.http.message import HEADER_RE -from gunicorn.http.errors import InvalidHeader, InvalidHeaderName -from gunicorn.six import string_types, binary_type, reraise -from gunicorn import SERVER_SOFTWARE -import gunicorn.util as util - -try: - # Python 3.3 has os.sendfile(). - from os import sendfile -except ImportError: - try: - from ._sendfile import sendfile - except ImportError: - sendfile = None - -# Send files in at most 1GB blocks as some operating systems can have problems -# with sending files in blocks over 2GB. -BLKSIZE = 0x3FFFFFFF - -HEADER_VALUE_RE = re.compile(r'[\x00-\x1F\x7F]') - -log = logging.getLogger(__name__) - - -class FileWrapper(object): - - def __init__(self, filelike, blksize=8192): - self.filelike = filelike - self.blksize = blksize - if hasattr(filelike, 'close'): - self.close = filelike.close - - def __getitem__(self, key): - data = self.filelike.read(self.blksize) - if data: - return data - raise IndexError - - -class WSGIErrorsWrapper(io.RawIOBase): - - def __init__(self, cfg): - # There is no public __init__ method for RawIOBase so - # we don't need to call super() in the __init__ method. - # pylint: disable=super-init-not-called - errorlog = logging.getLogger("gunicorn.error") - handlers = errorlog.handlers - self.streams = [] - - if cfg.errorlog == "-": - self.streams.append(sys.stderr) - handlers = handlers[1:] - - for h in handlers: - if hasattr(h, "stream"): - self.streams.append(h.stream) - - def write(self, data): - for stream in self.streams: - try: - stream.write(data) - except UnicodeError: - stream.write(data.encode("UTF-8")) - stream.flush() - - -def base_environ(cfg): - return { - "wsgi.errors": WSGIErrorsWrapper(cfg), - "wsgi.version": (1, 0), - "wsgi.multithread": False, - "wsgi.multiprocess": (cfg.workers > 1), - "wsgi.run_once": False, - "wsgi.file_wrapper": FileWrapper, - "SERVER_SOFTWARE": SERVER_SOFTWARE, - } - - -def default_environ(req, sock, cfg): - env = base_environ(cfg) - env.update({ - "wsgi.input": req.body, - "gunicorn.socket": sock, - "REQUEST_METHOD": req.method, - "QUERY_STRING": req.query, - "RAW_URI": req.uri, - "SERVER_PROTOCOL": "HTTP/%s" % ".".join([str(v) for v in req.version]) - }) - return env - - -def proxy_environ(req): - info = req.proxy_protocol_info - - if not info: - return {} - - return { - "PROXY_PROTOCOL": info["proxy_protocol"], - "REMOTE_ADDR": info["client_addr"], - "REMOTE_PORT": str(info["client_port"]), - "PROXY_ADDR": info["proxy_addr"], - "PROXY_PORT": str(info["proxy_port"]), - } - - -def create(req, sock, client, server, cfg): - resp = Response(req, sock, cfg) - - # set initial environ - environ = default_environ(req, sock, cfg) - - # default variables - host = None - script_name = os.environ.get("SCRIPT_NAME", "") - - # add the headers to the environ - for hdr_name, hdr_value in req.headers: - if hdr_name == "EXPECT": - # handle expect - if hdr_value.lower() == "100-continue": - sock.send(b"HTTP/1.1 100 Continue\r\n\r\n") - elif hdr_name == 'HOST': - host = hdr_value - elif hdr_name == "SCRIPT_NAME": - script_name = hdr_value - elif hdr_name == "CONTENT-TYPE": - environ['CONTENT_TYPE'] = hdr_value - continue - elif hdr_name == "CONTENT-LENGTH": - environ['CONTENT_LENGTH'] = hdr_value - continue - - key = 'HTTP_' + hdr_name.replace('-', '_') - if key in environ: - hdr_value = "%s,%s" % (environ[key], hdr_value) - environ[key] = hdr_value - - # set the url scheme - environ['wsgi.url_scheme'] = req.scheme - - # set the REMOTE_* keys in environ - # authors should be aware that REMOTE_HOST and REMOTE_ADDR - # may not qualify the remote addr: - # http://www.ietf.org/rfc/rfc3875 - if isinstance(client, string_types): - environ['REMOTE_ADDR'] = client - elif isinstance(client, binary_type): - environ['REMOTE_ADDR'] = client.decode() - else: - environ['REMOTE_ADDR'] = client[0] - environ['REMOTE_PORT'] = str(client[1]) - - # handle the SERVER_* - # Normally only the application should use the Host header but since the - # WSGI spec doesn't support unix sockets, we are using it to create - # viable SERVER_* if possible. - if isinstance(server, string_types): - server = server.split(":") - if len(server) == 1: - # unix socket - if host: - server = host.split(':') - if len(server) == 1: - if req.scheme == "http": - server.append(80) - elif req.scheme == "https": - server.append(443) - else: - server.append('') - else: - # no host header given which means that we are not behind a - # proxy, so append an empty port. - server.append('') - environ['SERVER_NAME'] = server[0] - environ['SERVER_PORT'] = str(server[1]) - - # set the path and script name - path_info = req.path - if script_name: - path_info = path_info.split(script_name, 1)[1] - environ['PATH_INFO'] = unquote_to_wsgi_str(path_info) - environ['SCRIPT_NAME'] = script_name - - # override the environ with the correct remote and server address if - # we are behind a proxy using the proxy protocol. - environ.update(proxy_environ(req)) - return resp, environ - - -class Response(object): - - def __init__(self, req, sock, cfg): - self.req = req - self.sock = sock - self.version = SERVER_SOFTWARE - self.status = None - self.chunked = False - self.must_close = False - self.headers = [] - self.headers_sent = False - self.response_length = None - self.sent = 0 - self.upgrade = False - self.cfg = cfg - - def force_close(self): - self.must_close = True - - def should_close(self): - if self.must_close or self.req.should_close(): - return True - if self.response_length is not None or self.chunked: - return False - if self.req.method == 'HEAD': - return False - if self.status_code < 200 or self.status_code in (204, 304): - return False - return True - - def start_response(self, status, headers, exc_info=None): - if exc_info: - try: - if self.status and self.headers_sent: - reraise(exc_info[0], exc_info[1], exc_info[2]) - finally: - exc_info = None - elif self.status is not None: - raise AssertionError("Response headers already set!") - - self.status = status - - # get the status code from the response here so we can use it to check - # the need for the connection header later without parsing the string - # each time. - try: - self.status_code = int(self.status.split()[0]) - except ValueError: - self.status_code = None - - self.process_headers(headers) - self.chunked = self.is_chunked() - return self.write - - def process_headers(self, headers): - for name, value in headers: - if not isinstance(name, string_types): - raise TypeError('%r is not a string' % name) - - if HEADER_RE.search(name): - raise InvalidHeaderName('%r' % name) - - if HEADER_VALUE_RE.search(value): - raise InvalidHeader('%r' % value) - - value = str(value).strip() - lname = name.lower().strip() - if lname == "content-length": - self.response_length = int(value) - elif util.is_hoppish(name): - if lname == "connection": - # handle websocket - if value.lower().strip() == "upgrade": - self.upgrade = True - elif lname == "upgrade": - if value.lower().strip() == "websocket": - self.headers.append((name.strip(), value)) - - # ignore hopbyhop headers - continue - self.headers.append((name.strip(), value)) - - def is_chunked(self): - # Only use chunked responses when the client is - # speaking HTTP/1.1 or newer and there was - # no Content-Length header set. - if self.response_length is not None: - return False - elif self.req.version <= (1, 0): - return False - elif self.req.method == 'HEAD': - # Responses to a HEAD request MUST NOT contain a response body. - return False - elif self.status_code in (204, 304): - # Do not use chunked responses when the response is guaranteed to - # not have a response body. - return False - return True - - def default_headers(self): - # set the connection header - if self.upgrade: - connection = "upgrade" - elif self.should_close(): - connection = "close" - else: - connection = "keep-alive" - - headers = [ - "HTTP/%s.%s %s\r\n" % (self.req.version[0], - self.req.version[1], self.status), - "Server: %s\r\n" % self.version, - "Date: %s\r\n" % util.http_date(), - "Connection: %s\r\n" % connection - ] - if self.chunked: - headers.append("Transfer-Encoding: chunked\r\n") - return headers - - def send_headers(self): - if self.headers_sent: - return - tosend = self.default_headers() - tosend.extend(["%s: %s\r\n" % (k, v) for k, v in self.headers]) - - header_str = "%s\r\n" % "".join(tosend) - util.write(self.sock, util.to_bytestring(header_str, "ascii")) - self.headers_sent = True - - def write(self, arg): - self.send_headers() - if not isinstance(arg, binary_type): - raise TypeError('%r is not a byte' % arg) - arglen = len(arg) - tosend = arglen - if self.response_length is not None: - if self.sent >= self.response_length: - # Never write more than self.response_length bytes - return - - tosend = min(self.response_length - self.sent, tosend) - if tosend < arglen: - arg = arg[:tosend] - - # Sending an empty chunk signals the end of the - # response and prematurely closes the response - if self.chunked and tosend == 0: - return - - self.sent += tosend - util.write(self.sock, arg, self.chunked) - - def can_sendfile(self): - return self.cfg.sendfile is not False and sendfile is not None - - def sendfile(self, respiter): - if self.cfg.is_ssl or not self.can_sendfile(): - return False - - if not util.has_fileno(respiter.filelike): - return False - - fileno = respiter.filelike.fileno() - try: - offset = os.lseek(fileno, 0, os.SEEK_CUR) - if self.response_length is None: - filesize = os.fstat(fileno).st_size - - # The file may be special and sendfile will fail. - # It may also be zero-length, but that is okay. - if filesize == 0: - return False - - nbytes = filesize - offset - else: - nbytes = self.response_length - except (OSError, io.UnsupportedOperation): - return False - - self.send_headers() - - if self.is_chunked(): - chunk_size = "%X\r\n" % nbytes - self.sock.sendall(chunk_size.encode('utf-8')) - - sockno = self.sock.fileno() - sent = 0 - - while sent != nbytes: - count = min(nbytes - sent, BLKSIZE) - sent += sendfile(sockno, fileno, offset + sent, count) - - if self.is_chunked(): - self.sock.sendall(b"\r\n") - - os.lseek(fileno, offset, os.SEEK_SET) - - return True - - def write_file(self, respiter): - if not self.sendfile(respiter): - for item in respiter: - self.write(item) - - def close(self): - if not self.headers_sent: - self.send_headers() - if self.chunked: - util.write_chunk(self.sock, b"") diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/instrument/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/instrument/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/instrument/statsd.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/instrument/statsd.py deleted file mode 100644 index 4bbcb20..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/instrument/statsd.py +++ /dev/null @@ -1,123 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -"Bare-bones implementation of statsD's protocol, client-side" - -import socket -import logging -from re import sub - -from gunicorn.glogging import Logger -from gunicorn import six - -# Instrumentation constants -METRIC_VAR = "metric" -VALUE_VAR = "value" -MTYPE_VAR = "mtype" -GAUGE_TYPE = "gauge" -COUNTER_TYPE = "counter" -HISTOGRAM_TYPE = "histogram" - -class Statsd(Logger): - """statsD-based instrumentation, that passes as a logger - """ - def __init__(self, cfg): - """host, port: statsD server - """ - Logger.__init__(self, cfg) - self.prefix = sub(r"^(.+[^.]+)\.*$", "\\g<1>.", cfg.statsd_prefix) - try: - host, port = cfg.statsd_host - self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - self.sock.connect((host, int(port))) - except Exception: - self.sock = None - - # Log errors and warnings - def critical(self, msg, *args, **kwargs): - Logger.critical(self, msg, *args, **kwargs) - self.increment("gunicorn.log.critical", 1) - - def error(self, msg, *args, **kwargs): - Logger.error(self, msg, *args, **kwargs) - self.increment("gunicorn.log.error", 1) - - def warning(self, msg, *args, **kwargs): - Logger.warning(self, msg, *args, **kwargs) - self.increment("gunicorn.log.warning", 1) - - def exception(self, msg, *args, **kwargs): - Logger.exception(self, msg, *args, **kwargs) - self.increment("gunicorn.log.exception", 1) - - # Special treatement for info, the most common log level - def info(self, msg, *args, **kwargs): - self.log(logging.INFO, msg, *args, **kwargs) - - # skip the run-of-the-mill logs - def debug(self, msg, *args, **kwargs): - self.log(logging.DEBUG, msg, *args, **kwargs) - - def log(self, lvl, msg, *args, **kwargs): - """Log a given statistic if metric, value and type are present - """ - try: - extra = kwargs.get("extra", None) - if extra is not None: - metric = extra.get(METRIC_VAR, None) - value = extra.get(VALUE_VAR, None) - typ = extra.get(MTYPE_VAR, None) - if metric and value and typ: - if typ == GAUGE_TYPE: - self.gauge(metric, value) - elif typ == COUNTER_TYPE: - self.increment(metric, value) - elif typ == HISTOGRAM_TYPE: - self.histogram(metric, value) - else: - pass - - # Log to parent logger only if there is something to say - if msg: - Logger.log(self, lvl, msg, *args, **kwargs) - except Exception: - Logger.warning(self, "Failed to log to statsd", exc_info=True) - - # access logging - def access(self, resp, req, environ, request_time): - """Measure request duration - request_time is a datetime.timedelta - """ - Logger.access(self, resp, req, environ, request_time) - duration_in_ms = request_time.seconds * 1000 + float(request_time.microseconds) / 10 ** 3 - status = resp.status - if isinstance(status, str): - status = int(status.split(None, 1)[0]) - self.histogram("gunicorn.request.duration", duration_in_ms) - self.increment("gunicorn.requests", 1) - self.increment("gunicorn.request.status.%d" % status, 1) - - # statsD methods - # you can use those directly if you want - def gauge(self, name, value): - self._sock_send("{0}{1}:{2}|g".format(self.prefix, name, value)) - - def increment(self, name, value, sampling_rate=1.0): - self._sock_send("{0}{1}:{2}|c|@{3}".format(self.prefix, name, value, sampling_rate)) - - def decrement(self, name, value, sampling_rate=1.0): - self._sock_send("{0}{1}:-{2}|c|@{3}".format(self.prefix, name, value, sampling_rate)) - - def histogram(self, name, value): - self._sock_send("{0}{1}:{2}|ms".format(self.prefix, name, value)) - - def _sock_send(self, msg): - try: - if isinstance(msg, six.text_type): - msg = msg.encode("ascii") - if self.sock: - self.sock.send(msg) - except Exception: - Logger.warning(self, "Error sending message to statsd", exc_info=True) diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/pidfile.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/pidfile.py deleted file mode 100644 index a6e085f..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/pidfile.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import errno -import os -import tempfile - - -class Pidfile(object): - """\ - Manage a PID file. If a specific name is provided - it and '"%s.oldpid" % name' will be used. Otherwise - we create a temp file using os.mkstemp. - """ - - def __init__(self, fname): - self.fname = fname - self.pid = None - - def create(self, pid): - oldpid = self.validate() - if oldpid: - if oldpid == os.getpid(): - return - msg = "Already running on PID %s (or pid file '%s' is stale)" - raise RuntimeError(msg % (oldpid, self.fname)) - - self.pid = pid - - # Write pidfile - fdir = os.path.dirname(self.fname) - if fdir and not os.path.isdir(fdir): - raise RuntimeError("%s doesn't exist. Can't create pidfile." % fdir) - fd, fname = tempfile.mkstemp(dir=fdir) - os.write(fd, ("%s\n" % self.pid).encode('utf-8')) - if self.fname: - os.rename(fname, self.fname) - else: - self.fname = fname - os.close(fd) - - # set permissions to -rw-r--r-- - os.chmod(self.fname, 420) - - def rename(self, path): - self.unlink() - self.fname = path - self.create(self.pid) - - def unlink(self): - """ delete pidfile""" - try: - with open(self.fname, "r") as f: - pid1 = int(f.read() or 0) - - if pid1 == self.pid: - os.unlink(self.fname) - except: - pass - - def validate(self): - """ Validate pidfile and make it stale if needed""" - if not self.fname: - return - try: - with open(self.fname, "r") as f: - try: - wpid = int(f.read()) - except ValueError: - return - - try: - os.kill(wpid, 0) - return wpid - except OSError as e: - if e.args[0] == errno.EPERM: - return wpid - if e.args[0] == errno.ESRCH: - return - raise - except IOError as e: - if e.args[0] == errno.ENOENT: - return - raise diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/reloader.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/reloader.py deleted file mode 100644 index c879885..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/reloader.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import os -import os.path -import re -import sys -import time -import threading - -COMPILED_EXT_RE = re.compile(r'py[co]$') - - -class Reloader(threading.Thread): - def __init__(self, extra_files=None, interval=1, callback=None): - super(Reloader, self).__init__() - self.setDaemon(True) - self._extra_files = set(extra_files or ()) - self._extra_files_lock = threading.RLock() - self._interval = interval - self._callback = callback - - def add_extra_file(self, filename): - with self._extra_files_lock: - self._extra_files.add(filename) - - def get_files(self): - fnames = [ - COMPILED_EXT_RE.sub('py', module.__file__) - for module in tuple(sys.modules.values()) - if getattr(module, '__file__', None) - ] - - with self._extra_files_lock: - fnames.extend(self._extra_files) - - return fnames - - def run(self): - mtimes = {} - while True: - for filename in self.get_files(): - try: - mtime = os.stat(filename).st_mtime - except OSError: - continue - old_time = mtimes.get(filename) - if old_time is None: - mtimes[filename] = mtime - continue - elif mtime > old_time: - if self._callback: - self._callback(filename) - time.sleep(self._interval) - -has_inotify = False -if sys.platform.startswith('linux'): - try: - from inotify.adapters import Inotify - import inotify.constants - has_inotify = True - except ImportError: - pass - - -if has_inotify: - - class InotifyReloader(threading.Thread): - event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE - | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY - | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM - | inotify.constants.IN_MOVED_TO) - - def __init__(self, extra_files=None, callback=None): - super(InotifyReloader, self).__init__() - self.setDaemon(True) - self._callback = callback - self._dirs = set() - self._watcher = Inotify() - - for extra_file in extra_files: - self.add_extra_file(extra_file) - - def add_extra_file(self, filename): - dirname = os.path.dirname(filename) - - if dirname in self._dirs: - return - - self._watcher.add_watch(dirname, mask=self.event_mask) - self._dirs.add(dirname) - - def get_dirs(self): - fnames = [ - os.path.dirname(COMPILED_EXT_RE.sub('py', module.__file__)) - for module in tuple(sys.modules.values()) - if hasattr(module, '__file__') - ] - - return set(fnames) - - def run(self): - self._dirs = self.get_dirs() - - for dirname in self._dirs: - self._watcher.add_watch(dirname, mask=self.event_mask) - - for event in self._watcher.event_gen(): - if event is None: - continue - - filename = event[3] - - self._callback(filename) - -else: - - class InotifyReloader(object): - def __init__(self, callback=None): - raise ImportError('You must have the inotify module installed to ' - 'use the inotify reloader') - - -preferred_reloader = InotifyReloader if has_inotify else Reloader - -reloader_engines = { - 'auto': preferred_reloader, - 'poll': Reloader, - 'inotify': InotifyReloader, -} diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/selectors.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/selectors.py deleted file mode 100644 index cdae569..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/selectors.py +++ /dev/null @@ -1,592 +0,0 @@ -"""Selectors module. - -This module allows high-level and efficient I/O multiplexing, built upon the -`select` module primitives. - -The following code adapted from trollius.selectors. -""" - - -from abc import ABCMeta, abstractmethod -from collections import namedtuple, Mapping -import math -import select -import sys - -from gunicorn._compat import wrap_error, InterruptedError -from gunicorn import six - - -# generic events, that must be mapped to implementation-specific ones -EVENT_READ = (1 << 0) -EVENT_WRITE = (1 << 1) - - -def _fileobj_to_fd(fileobj): - """Return a file descriptor from a file object. - - Parameters: - fileobj -- file object or file descriptor - - Returns: - corresponding file descriptor - - Raises: - ValueError if the object is invalid - """ - if isinstance(fileobj, six.integer_types): - fd = fileobj - else: - try: - fd = int(fileobj.fileno()) - except (AttributeError, TypeError, ValueError): - raise ValueError("Invalid file object: " - "{0!r}".format(fileobj)) - if fd < 0: - raise ValueError("Invalid file descriptor: {0}".format(fd)) - return fd - - -SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) -"""Object used to associate a file object to its backing file descriptor, -selected event mask and attached data.""" - - -class _SelectorMapping(Mapping): - """Mapping of file objects to selector keys.""" - - def __init__(self, selector): - self._selector = selector - - def __len__(self): - return len(self._selector._fd_to_key) - - def __getitem__(self, fileobj): - try: - fd = self._selector._fileobj_lookup(fileobj) - return self._selector._fd_to_key[fd] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - def __iter__(self): - return iter(self._selector._fd_to_key) - - -class BaseSelector(six.with_metaclass(ABCMeta)): - """Selector abstract base class. - - A selector supports registering file objects to be monitored for specific - I/O events. - - A file object is a file descriptor or any object with a `fileno()` method. - An arbitrary object can be attached to the file object, which can be used - for example to store context information, a callback, etc. - - A selector can use various implementations (select(), poll(), epoll()...) - depending on the platform. The default `Selector` class uses the most - efficient implementation on the current platform. - """ - - @abstractmethod - def register(self, fileobj, events, data=None): - """Register a file object. - - Parameters: - fileobj -- file object or file descriptor - events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) - data -- attached data - - Returns: - SelectorKey instance - - Raises: - ValueError if events is invalid - KeyError if fileobj is already registered - OSError if fileobj is closed or otherwise is unacceptable to - the underlying system call (if a system call is made) - - Note: - OSError may or may not be raised - """ - raise NotImplementedError - - @abstractmethod - def unregister(self, fileobj): - """Unregister a file object. - - Parameters: - fileobj -- file object or file descriptor - - Returns: - SelectorKey instance - - Raises: - KeyError if fileobj is not registered - - Note: - If fileobj is registered but has since been closed this does - *not* raise OSError (even if the wrapped syscall does) - """ - raise NotImplementedError - - def modify(self, fileobj, events, data=None): - """Change a registered file object monitored events or attached data. - - Parameters: - fileobj -- file object or file descriptor - events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) - data -- attached data - - Returns: - SelectorKey instance - - Raises: - Anything that unregister() or register() raises - """ - self.unregister(fileobj) - return self.register(fileobj, events, data) - - @abstractmethod - def select(self, timeout=None): - """Perform the actual selection, until some monitored file objects are - ready or a timeout expires. - - Parameters: - timeout -- if timeout > 0, this specifies the maximum wait time, in - seconds - if timeout <= 0, the select() call won't block, and will - report the currently ready file objects - if timeout is None, select() will block until a monitored - file object becomes ready - - Returns: - list of (key, events) for ready file objects - `events` is a bitwise mask of EVENT_READ|EVENT_WRITE - """ - raise NotImplementedError - - def close(self): - """Close the selector. - - This must be called to make sure that any underlying resource is freed. - """ - pass - - def get_key(self, fileobj): - """Return the key associated to a registered file object. - - Returns: - SelectorKey for this file object - """ - mapping = self.get_map() - try: - return mapping[fileobj] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - @abstractmethod - def get_map(self): - """Return a mapping of file objects to selector keys.""" - raise NotImplementedError - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - -class _BaseSelectorImpl(BaseSelector): - """Base selector implementation.""" - - def __init__(self): - # this maps file descriptors to keys - self._fd_to_key = {} - # read-only mapping returned by get_map() - self._map = _SelectorMapping(self) - - def _fileobj_lookup(self, fileobj): - """Return a file descriptor from a file object. - - This wraps _fileobj_to_fd() to do an exhaustive search in case - the object is invalid but we still have it in our map. This - is used by unregister() so we can unregister an object that - was previously registered even if it is closed. It is also - used by _SelectorMapping. - """ - try: - return _fileobj_to_fd(fileobj) - except ValueError: - # Do an exhaustive search. - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - return key.fd - # Raise ValueError after all. - raise - - def register(self, fileobj, events, data=None): - if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): - raise ValueError("Invalid events: {0!r}".format(events)) - - key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) - - if key.fd in self._fd_to_key: - raise KeyError("{0!r} (FD {1}) is already registered" - .format(fileobj, key.fd)) - - self._fd_to_key[key.fd] = key - return key - - def unregister(self, fileobj): - try: - key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - return key - - def modify(self, fileobj, events, data=None): - # TODO: Subclasses can probably optimize this even further. - try: - key = self._fd_to_key[self._fileobj_lookup(fileobj)] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - if events != key.events: - self.unregister(fileobj) - key = self.register(fileobj, events, data) - elif data != key.data: - # Use a shortcut to update the data. - key = key._replace(data=data) - self._fd_to_key[key.fd] = key - return key - - def close(self): - self._fd_to_key.clear() - - def get_map(self): - return self._map - - def _key_from_fd(self, fd): - """Return the key associated to a given file descriptor. - - Parameters: - fd -- file descriptor - - Returns: - corresponding key, or None if not found - """ - try: - return self._fd_to_key[fd] - except KeyError: - return None - - -class SelectSelector(_BaseSelectorImpl): - """Select-based selector.""" - - def __init__(self): - super(SelectSelector, self).__init__() - self._readers = set() - self._writers = set() - - def register(self, fileobj, events, data=None): - key = super(SelectSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - self._readers.add(key.fd) - if events & EVENT_WRITE: - self._writers.add(key.fd) - return key - - def unregister(self, fileobj): - key = super(SelectSelector, self).unregister(fileobj) - self._readers.discard(key.fd) - self._writers.discard(key.fd) - return key - - if sys.platform == 'win32': - def _select(self, r, w, _, timeout=None): - r, w, x = select.select(r, w, w, timeout) - return r, w + x, [] - else: - _select = select.select - - def select(self, timeout=None): - timeout = None if timeout is None else max(timeout, 0) - ready = [] - try: - r, w, _ = wrap_error(self._select, - self._readers, self._writers, [], timeout) - except InterruptedError: - return ready - r = set(r) - w = set(w) - for fd in r | w: - events = 0 - if fd in r: - events |= EVENT_READ - if fd in w: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - -if hasattr(select, 'poll'): - - class PollSelector(_BaseSelectorImpl): - """Poll-based selector.""" - - def __init__(self): - super(PollSelector, self).__init__() - self._poll = select.poll() - - def register(self, fileobj, events, data=None): - key = super(PollSelector, self).register(fileobj, events, data) - poll_events = 0 - if events & EVENT_READ: - poll_events |= select.POLLIN - if events & EVENT_WRITE: - poll_events |= select.POLLOUT - self._poll.register(key.fd, poll_events) - return key - - def unregister(self, fileobj): - key = super(PollSelector, self).unregister(fileobj) - self._poll.unregister(key.fd) - return key - - def select(self, timeout=None): - if timeout is None: - timeout = None - elif timeout <= 0: - timeout = 0 - else: - # poll() has a resolution of 1 millisecond, round away from - # zero to wait *at least* timeout seconds. - timeout = int(math.ceil(timeout * 1e3)) - ready = [] - try: - fd_event_list = wrap_error(self._poll.poll, timeout) - except InterruptedError: - return ready - for fd, event in fd_event_list: - events = 0 - if event & ~select.POLLIN: - events |= EVENT_WRITE - if event & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - -if hasattr(select, 'epoll'): - - class EpollSelector(_BaseSelectorImpl): - """Epoll-based selector.""" - - def __init__(self): - super(EpollSelector, self).__init__() - self._epoll = select.epoll() - - def fileno(self): - return self._epoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(EpollSelector, self).register(fileobj, events, data) - epoll_events = 0 - if events & EVENT_READ: - epoll_events |= select.EPOLLIN - if events & EVENT_WRITE: - epoll_events |= select.EPOLLOUT - self._epoll.register(key.fd, epoll_events) - return key - - def unregister(self, fileobj): - key = super(EpollSelector, self).unregister(fileobj) - try: - self._epoll.unregister(key.fd) - except OSError: - # This can happen if the FD was closed since it - # was registered. - pass - return key - - def select(self, timeout=None): - if timeout is None: - timeout = -1 - elif timeout <= 0: - timeout = 0 - else: - # epoll_wait() has a resolution of 1 millisecond, round away - # from zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1e3) * 1e-3 - max_ev = len(self._fd_to_key) - ready = [] - try: - fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev) - except InterruptedError: - return ready - for fd, event in fd_event_list: - events = 0 - if event & ~select.EPOLLIN: - events |= EVENT_WRITE - if event & ~select.EPOLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._epoll.close() - super(EpollSelector, self).close() - - -if hasattr(select, 'devpoll'): - - class DevpollSelector(_BaseSelectorImpl): - """Solaris /dev/poll selector.""" - - def __init__(self): - super(DevpollSelector, self).__init__() - self._devpoll = select.devpoll() - - def fileno(self): - return self._devpoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(DevpollSelector, self).register(fileobj, events, data) - poll_events = 0 - if events & EVENT_READ: - poll_events |= select.POLLIN - if events & EVENT_WRITE: - poll_events |= select.POLLOUT - self._devpoll.register(key.fd, poll_events) - return key - - def unregister(self, fileobj): - key = super(DevpollSelector, self).unregister(fileobj) - self._devpoll.unregister(key.fd) - return key - - def select(self, timeout=None): - if timeout is None: - timeout = None - elif timeout <= 0: - timeout = 0 - else: - # devpoll() has a resolution of 1 millisecond, round away from - # zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1e3) - ready = [] - try: - fd_event_list = self._devpoll.poll(timeout) - except InterruptedError: - return ready - for fd, event in fd_event_list: - events = 0 - if event & ~select.POLLIN: - events |= EVENT_WRITE - if event & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._devpoll.close() - super(DevpollSelector, self).close() - - -if hasattr(select, 'kqueue'): - - class KqueueSelector(_BaseSelectorImpl): - """Kqueue-based selector.""" - - def __init__(self): - super(KqueueSelector, self).__init__() - self._kqueue = select.kqueue() - - def fileno(self): - return self._kqueue.fileno() - - def register(self, fileobj, events, data=None): - key = super(KqueueSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - kev = select.kevent(key.fd, select.KQ_FILTER_READ, - select.KQ_EV_ADD) - self._kqueue.control([kev], 0, 0) - if events & EVENT_WRITE: - kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, - select.KQ_EV_ADD) - self._kqueue.control([kev], 0, 0) - return key - - def unregister(self, fileobj): - key = super(KqueueSelector, self).unregister(fileobj) - if key.events & EVENT_READ: - kev = select.kevent(key.fd, select.KQ_FILTER_READ, - select.KQ_EV_DELETE) - try: - self._kqueue.control([kev], 0, 0) - except OSError: - # This can happen if the FD was closed since it - # was registered. - pass - if key.events & EVENT_WRITE: - kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, - select.KQ_EV_DELETE) - try: - self._kqueue.control([kev], 0, 0) - except OSError: - # See comment above. - pass - return key - - def select(self, timeout=None): - timeout = None if timeout is None else max(timeout, 0) - max_ev = len(self._fd_to_key) - ready = [] - try: - kev_list = wrap_error(self._kqueue.control, - None, max_ev, timeout) - except InterruptedError: - return ready - for kev in kev_list: - fd = kev.ident - flag = kev.filter - events = 0 - if flag == select.KQ_FILTER_READ: - events |= EVENT_READ - if flag == select.KQ_FILTER_WRITE: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._kqueue.close() - super(KqueueSelector, self).close() - - -# Choose the best implementation: roughly, epoll|kqueue|devpoll > poll > select. -# select() also can't accept a FD > FD_SETSIZE (usually around 1024) -if 'KqueueSelector' in globals(): - DefaultSelector = KqueueSelector -elif 'EpollSelector' in globals(): - DefaultSelector = EpollSelector -elif 'DevpollSelector' in globals(): - DefaultSelector = DevpollSelector -elif 'PollSelector' in globals(): - DefaultSelector = PollSelector -else: - DefaultSelector = SelectSelector diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/six.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/six.py deleted file mode 100644 index 21b0e80..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/six.py +++ /dev/null @@ -1,762 +0,0 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2014 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from __future__ import absolute_import - -import functools -import operator -import sys -import types - -__author__ = "Benjamin Peterson " -__version__ = "1.8.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - # This is a bit ugly, but it avoids running this again. - delattr(obj.__class__, self.name) - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), - MovedModule("winreg", "_winreg"), -] -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) -else: - def iterkeys(d, **kw): - return iter(d.iterkeys(**kw)) - - def itervalues(d, **kw): - return iter(d.itervalues(**kw)) - - def iteritems(d, **kw): - return iter(d.iteritems(**kw)) - - def iterlists(d, **kw): - return iter(d.iterlists(**kw)) - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - def u(s): - return s - unichr = chr - if sys.version_info[1] <= 1: - def int2byte(i): - return bytes((i,)) - else: - # This is about 2x faster than the implementation above on 3.2+ - int2byte = operator.methodcaller("to_bytes", 1, "big") - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO -else: - def b(s): - return s - # Workaround for standalone backslash - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - def byte2int(bs): - return ord(bs[0]) - def indexbytes(buf, i): - return ord(buf[i]) - def iterbytes(buf): - return (ord(byte) for byte in buf) - import StringIO - StringIO = BytesIO = StringIO.StringIO -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - - def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/sock.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/sock.py deleted file mode 100644 index 8870936..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/sock.py +++ /dev/null @@ -1,209 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import errno -import os -import socket -import stat -import sys -import time - -from gunicorn import util -from gunicorn.six import string_types - - -class BaseSocket(object): - - def __init__(self, address, conf, log, fd=None): - self.log = log - self.conf = conf - - self.cfg_addr = address - if fd is None: - sock = socket.socket(self.FAMILY, socket.SOCK_STREAM) - bound = False - else: - sock = socket.fromfd(fd, self.FAMILY, socket.SOCK_STREAM) - os.close(fd) - bound = True - - self.sock = self.set_options(sock, bound=bound) - - def __str__(self): - return "" % self.sock.fileno() - - def __getattr__(self, name): - return getattr(self.sock, name) - - def set_options(self, sock, bound=False): - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - if (self.conf.reuse_port - and hasattr(socket, 'SO_REUSEPORT')): # pragma: no cover - try: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) - except socket.error as err: - if err.errno not in (errno.ENOPROTOOPT, errno.EINVAL): - raise - if not bound: - self.bind(sock) - sock.setblocking(0) - - # make sure that the socket can be inherited - if hasattr(sock, "set_inheritable"): - sock.set_inheritable(True) - - sock.listen(self.conf.backlog) - return sock - - def bind(self, sock): - sock.bind(self.cfg_addr) - - def close(self): - if self.sock is None: - return - - try: - self.sock.close() - except socket.error as e: - self.log.info("Error while closing socket %s", str(e)) - - self.sock = None - - -class TCPSocket(BaseSocket): - - FAMILY = socket.AF_INET - - def __str__(self): - if self.conf.is_ssl: - scheme = "https" - else: - scheme = "http" - - addr = self.sock.getsockname() - return "%s://%s:%d" % (scheme, addr[0], addr[1]) - - def set_options(self, sock, bound=False): - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - return super(TCPSocket, self).set_options(sock, bound=bound) - - -class TCP6Socket(TCPSocket): - - FAMILY = socket.AF_INET6 - - def __str__(self): - (host, port, _, _) = self.sock.getsockname() - return "http://[%s]:%d" % (host, port) - - -class UnixSocket(BaseSocket): - - FAMILY = socket.AF_UNIX - - def __init__(self, addr, conf, log, fd=None): - if fd is None: - try: - st = os.stat(addr) - except OSError as e: - if e.args[0] != errno.ENOENT: - raise - else: - if stat.S_ISSOCK(st.st_mode): - os.remove(addr) - else: - raise ValueError("%r is not a socket" % addr) - super(UnixSocket, self).__init__(addr, conf, log, fd=fd) - - def __str__(self): - return "unix:%s" % self.cfg_addr - - def bind(self, sock): - old_umask = os.umask(self.conf.umask) - sock.bind(self.cfg_addr) - util.chown(self.cfg_addr, self.conf.uid, self.conf.gid) - os.umask(old_umask) - - -def _sock_type(addr): - if isinstance(addr, tuple): - if util.is_ipv6(addr[0]): - sock_type = TCP6Socket - else: - sock_type = TCPSocket - elif isinstance(addr, string_types): - sock_type = UnixSocket - else: - raise TypeError("Unable to create socket from: %r" % addr) - return sock_type - - -def create_sockets(conf, log, fds=None): - """ - Create a new socket for the configured addresses or file descriptors. - - If a configured address is a tuple then a TCP socket is created. - If it is a string, a Unix socket is created. Otherwise, a TypeError is - raised. - """ - listeners = [] - - # get it only once - laddr = conf.address - - # check ssl config early to raise the error on startup - # only the certfile is needed since it can contains the keyfile - if conf.certfile and not os.path.exists(conf.certfile): - raise ValueError('certfile "%s" does not exist' % conf.certfile) - - if conf.keyfile and not os.path.exists(conf.keyfile): - raise ValueError('keyfile "%s" does not exist' % conf.keyfile) - - # sockets are already bound - if fds is not None: - for fd in fds: - sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) - sock_name = sock.getsockname() - sock_type = _sock_type(sock_name) - listener = sock_type(sock_name, conf, log, fd=fd) - listeners.append(listener) - - return listeners - - # no sockets is bound, first initialization of gunicorn in this env. - for addr in laddr: - sock_type = _sock_type(addr) - sock = None - for i in range(5): - try: - sock = sock_type(addr, conf, log) - except socket.error as e: - if e.args[0] == errno.EADDRINUSE: - log.error("Connection in use: %s", str(addr)) - if e.args[0] == errno.EADDRNOTAVAIL: - log.error("Invalid address: %s", str(addr)) - if i < 5: - msg = "connection to {addr} failed: {error}" - log.debug(msg.format(addr=str(addr), error=str(e))) - log.error("Retrying in 1 second.") - time.sleep(1) - else: - break - - if sock is None: - log.error("Can't connect to %s", str(addr)) - sys.exit(1) - - listeners.append(sock) - - return listeners - - -def close_sockets(listeners, unlink=True): - for sock in listeners: - sock_name = sock.getsockname() - sock.close() - if unlink and _sock_type(sock_name) is UnixSocket: - os.unlink(sock_name) diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/systemd.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/systemd.py deleted file mode 100644 index 10ffb8d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/systemd.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import os - -SD_LISTEN_FDS_START = 3 - - -def listen_fds(unset_environment=True): - """ - Get the number of sockets inherited from systemd socket activation. - - :param unset_environment: clear systemd environment variables unless False - :type unset_environment: bool - :return: the number of sockets to inherit from systemd socket activation - :rtype: int - - Returns zero immediately if $LISTEN_PID is not set to the current pid. - Otherwise, returns the number of systemd activation sockets specified by - $LISTEN_FDS. - - When $LISTEN_PID matches the current pid, unsets the environment variables - unless the ``unset_environment`` flag is ``False``. - - .. note:: - Unlike the sd_listen_fds C function, this implementation does not set - the FD_CLOEXEC flag because the gunicorn arbiter never needs to do this. - - .. seealso:: - ``_ - - """ - fds = int(os.environ.get('LISTEN_FDS', 0)) - listen_pid = int(os.environ.get('LISTEN_PID', 0)) - - if listen_pid != os.getpid(): - return 0 - - if unset_environment: - os.environ.pop('LISTEN_PID', None) - os.environ.pop('LISTEN_FDS', None) - - return fds diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/util.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/util.py deleted file mode 100644 index 84f6937..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/util.py +++ /dev/null @@ -1,557 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -from __future__ import print_function - -import email.utils -import fcntl -import io -import os -import pkg_resources -import pwd -import random -import socket -import sys -import textwrap -import time -import traceback -import inspect -import errno -import warnings -import logging -import re - -from gunicorn import _compat -from gunicorn.errors import AppImportError -from gunicorn.six import text_type -from gunicorn.workers import SUPPORTED_WORKERS - -REDIRECT_TO = getattr(os, 'devnull', '/dev/null') - -# Server and Date aren't technically hop-by-hop -# headers, but they are in the purview of the -# origin server which the WSGI spec says we should -# act like. So we drop them and add our own. -# -# In the future, concatenation server header values -# might be better, but nothing else does it and -# dropping them is easier. -hop_headers = set(""" - connection keep-alive proxy-authenticate proxy-authorization - te trailers transfer-encoding upgrade - server date - """.split()) - -try: - from setproctitle import setproctitle - - def _setproctitle(title): - setproctitle("gunicorn: %s" % title) -except ImportError: - def _setproctitle(title): - return - - -try: - from importlib import import_module -except ImportError: - def _resolve_name(name, package, level): - """Return the absolute name of the module to be imported.""" - if not hasattr(package, 'rindex'): - raise ValueError("'package' not set to a string") - dot = len(package) - for _ in range(level, 1, -1): - try: - dot = package.rindex('.', 0, dot) - except ValueError: - msg = "attempted relative import beyond top-level package" - raise ValueError(msg) - return "%s.%s" % (package[:dot], name) - - def import_module(name, package=None): - """Import a module. - -The 'package' argument is required when performing a relative import. It -specifies the package to use as the anchor point from which to resolve the -relative import to an absolute import. - -""" - if name.startswith('.'): - if not package: - raise TypeError("relative imports require the 'package' argument") - level = 0 - for character in name: - if character != '.': - break - level += 1 - name = _resolve_name(name[level:], package, level) - __import__(name) - return sys.modules[name] - - -def load_class(uri, default="gunicorn.workers.sync.SyncWorker", - section="gunicorn.workers"): - if inspect.isclass(uri): - return uri - if uri.startswith("egg:"): - # uses entry points - entry_str = uri.split("egg:")[1] - try: - dist, name = entry_str.rsplit("#", 1) - except ValueError: - dist = entry_str - name = default - - try: - return pkg_resources.load_entry_point(dist, section, name) - except: - exc = traceback.format_exc() - msg = "class uri %r invalid or not found: \n\n[%s]" - raise RuntimeError(msg % (uri, exc)) - else: - components = uri.split('.') - if len(components) == 1: - while True: - if uri.startswith("#"): - uri = uri[1:] - - if uri in SUPPORTED_WORKERS: - components = SUPPORTED_WORKERS[uri].split(".") - break - - try: - return pkg_resources.load_entry_point("gunicorn", - section, uri) - except: - exc = traceback.format_exc() - msg = "class uri %r invalid or not found: \n\n[%s]" - raise RuntimeError(msg % (uri, exc)) - - klass = components.pop(-1) - - try: - mod = import_module('.'.join(components)) - except: - exc = traceback.format_exc() - msg = "class uri %r invalid or not found: \n\n[%s]" - raise RuntimeError(msg % (uri, exc)) - return getattr(mod, klass) - - -def get_username(uid): - """ get the username for a user id""" - return pwd.getpwuid(uid).pw_name - - -def set_owner_process(uid, gid, initgroups=False): - """ set user and group of workers processes """ - - if gid: - if uid: - try: - username = get_username(uid) - except KeyError: - initgroups = False - - # versions of python < 2.6.2 don't manage unsigned int for - # groups like on osx or fedora - gid = abs(gid) & 0x7FFFFFFF - - if initgroups: - os.initgroups(username, gid) - elif gid != os.getgid(): - os.setgid(gid) - - if uid: - os.setuid(uid) - - -def chown(path, uid, gid): - gid = abs(gid) & 0x7FFFFFFF # see note above. - os.chown(path, uid, gid) - - -if sys.platform.startswith("win"): - def _waitfor(func, pathname, waitall=False): - # Peform the operation - func(pathname) - # Now setup the wait loop - if waitall: - dirname = pathname - else: - dirname, name = os.path.split(pathname) - dirname = dirname or '.' - # Check for `pathname` to be removed from the filesystem. - # The exponential backoff of the timeout amounts to a total - # of ~1 second after which the deletion is probably an error - # anyway. - # Testing on a i7@4.3GHz shows that usually only 1 iteration is - # required when contention occurs. - timeout = 0.001 - while timeout < 1.0: - # Note we are only testing for the existence of the file(s) in - # the contents of the directory regardless of any security or - # access rights. If we have made it this far, we have sufficient - # permissions to do that much using Python's equivalent of the - # Windows API FindFirstFile. - # Other Windows APIs can fail or give incorrect results when - # dealing with files that are pending deletion. - L = os.listdir(dirname) - if not L if waitall else name in L: - return - # Increase the timeout and try again - time.sleep(timeout) - timeout *= 2 - warnings.warn('tests may fail, delete still pending for ' + pathname, - RuntimeWarning, stacklevel=4) - - def _unlink(filename): - _waitfor(os.unlink, filename) -else: - _unlink = os.unlink - - -def unlink(filename): - try: - _unlink(filename) - except OSError as error: - # The filename need not exist. - if error.errno not in (errno.ENOENT, errno.ENOTDIR): - raise - - -def is_ipv6(addr): - try: - socket.inet_pton(socket.AF_INET6, addr) - except socket.error: # not a valid address - return False - except ValueError: # ipv6 not supported on this platform - return False - return True - - -def parse_address(netloc, default_port=8000): - if re.match(r'unix:(//)?', netloc): - return re.split(r'unix:(//)?', netloc)[-1] - - if netloc.startswith("tcp://"): - netloc = netloc.split("tcp://")[1] - - # get host - if '[' in netloc and ']' in netloc: - host = netloc.split(']')[0][1:].lower() - elif ':' in netloc: - host = netloc.split(':')[0].lower() - elif netloc == "": - host = "0.0.0.0" - else: - host = netloc.lower() - - #get port - netloc = netloc.split(']')[-1] - if ":" in netloc: - port = netloc.split(':', 1)[1] - if not port.isdigit(): - raise RuntimeError("%r is not a valid port number." % port) - port = int(port) - else: - port = default_port - return (host, port) - - -def close_on_exec(fd): - flags = fcntl.fcntl(fd, fcntl.F_GETFD) - flags |= fcntl.FD_CLOEXEC - fcntl.fcntl(fd, fcntl.F_SETFD, flags) - - -def set_non_blocking(fd): - flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK - fcntl.fcntl(fd, fcntl.F_SETFL, flags) - - -def close(sock): - try: - sock.close() - except socket.error: - pass - -try: - from os import closerange -except ImportError: - def closerange(fd_low, fd_high): - # Iterate through and close all file descriptors. - for fd in range(fd_low, fd_high): - try: - os.close(fd) - except OSError: # ERROR, fd wasn't open to begin with (ignored) - pass - - -def write_chunk(sock, data): - if isinstance(data, text_type): - data = data.encode('utf-8') - chunk_size = "%X\r\n" % len(data) - chunk = b"".join([chunk_size.encode('utf-8'), data, b"\r\n"]) - sock.sendall(chunk) - - -def write(sock, data, chunked=False): - if chunked: - return write_chunk(sock, data) - sock.sendall(data) - - -def write_nonblock(sock, data, chunked=False): - timeout = sock.gettimeout() - if timeout != 0.0: - try: - sock.setblocking(0) - return write(sock, data, chunked) - finally: - sock.setblocking(1) - else: - return write(sock, data, chunked) - - -def write_error(sock, status_int, reason, mesg): - html = textwrap.dedent("""\ - - - %(reason)s - - -

%(reason)s

- %(mesg)s - - - """) % {"reason": reason, "mesg": _compat.html_escape(mesg)} - - http = textwrap.dedent("""\ - HTTP/1.1 %s %s\r - Connection: close\r - Content-Type: text/html\r - Content-Length: %d\r - \r - %s""") % (str(status_int), reason, len(html), html) - write_nonblock(sock, http.encode('latin1')) - - -def import_app(module): - parts = module.split(":", 1) - if len(parts) == 1: - module, obj = module, "application" - else: - module, obj = parts[0], parts[1] - - try: - __import__(module) - except ImportError: - if module.endswith(".py") and os.path.exists(module): - msg = "Failed to find application, did you mean '%s:%s'?" - raise ImportError(msg % (module.rsplit(".", 1)[0], obj)) - else: - raise - - mod = sys.modules[module] - - is_debug = logging.root.level == logging.DEBUG - try: - app = eval(obj, vars(mod)) - except NameError: - if is_debug: - traceback.print_exception(*sys.exc_info()) - raise AppImportError("Failed to find application object %r in %r" % (obj, module)) - - if app is None: - raise AppImportError("Failed to find application object: %r" % obj) - - if not callable(app): - raise AppImportError("Application object must be callable.") - return app - - -def getcwd(): - # get current path, try to use PWD env first - try: - a = os.stat(os.environ['PWD']) - b = os.stat(os.getcwd()) - if a.st_ino == b.st_ino and a.st_dev == b.st_dev: - cwd = os.environ['PWD'] - else: - cwd = os.getcwd() - except: - cwd = os.getcwd() - return cwd - - -def http_date(timestamp=None): - """Return the current date and time formatted for a message header.""" - if timestamp is None: - timestamp = time.time() - s = email.utils.formatdate(timestamp, localtime=False, usegmt=True) - return s - - -def is_hoppish(header): - return header.lower().strip() in hop_headers - - -def daemonize(enable_stdio_inheritance=False): - """\ - Standard daemonization of a process. - http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16 - """ - if 'GUNICORN_FD' not in os.environ: - if os.fork(): - os._exit(0) - os.setsid() - - if os.fork(): - os._exit(0) - - os.umask(0o22) - - # In both the following any file descriptors above stdin - # stdout and stderr are left untouched. The inheritance - # option simply allows one to have output go to a file - # specified by way of shell redirection when not wanting - # to use --error-log option. - - if not enable_stdio_inheritance: - # Remap all of stdin, stdout and stderr on to - # /dev/null. The expectation is that users have - # specified the --error-log option. - - closerange(0, 3) - - fd_null = os.open(REDIRECT_TO, os.O_RDWR) - - if fd_null != 0: - os.dup2(fd_null, 0) - - os.dup2(fd_null, 1) - os.dup2(fd_null, 2) - - else: - fd_null = os.open(REDIRECT_TO, os.O_RDWR) - - # Always redirect stdin to /dev/null as we would - # never expect to need to read interactive input. - - if fd_null != 0: - os.close(0) - os.dup2(fd_null, 0) - - # If stdout and stderr are still connected to - # their original file descriptors we check to see - # if they are associated with terminal devices. - # When they are we map them to /dev/null so that - # are still detached from any controlling terminal - # properly. If not we preserve them as they are. - # - # If stdin and stdout were not hooked up to the - # original file descriptors, then all bets are - # off and all we can really do is leave them as - # they were. - # - # This will allow 'gunicorn ... > output.log 2>&1' - # to work with stdout/stderr going to the file - # as expected. - # - # Note that if using --error-log option, the log - # file specified through shell redirection will - # only be used up until the log file specified - # by the option takes over. As it replaces stdout - # and stderr at the file descriptor level, then - # anything using stdout or stderr, including having - # cached a reference to them, will still work. - - def redirect(stream, fd_expect): - try: - fd = stream.fileno() - if fd == fd_expect and stream.isatty(): - os.close(fd) - os.dup2(fd_null, fd) - except AttributeError: - pass - - redirect(sys.stdout, 1) - redirect(sys.stderr, 2) - - -def seed(): - try: - random.seed(os.urandom(64)) - except NotImplementedError: - random.seed('%s.%s' % (time.time(), os.getpid())) - - -def check_is_writeable(path): - try: - f = open(path, 'a') - except IOError as e: - raise RuntimeError("Error: '%s' isn't writable [%r]" % (path, e)) - f.close() - - -def to_bytestring(value, encoding="utf8"): - """Converts a string argument to a byte string""" - if isinstance(value, bytes): - return value - if not isinstance(value, text_type): - raise TypeError('%r is not a string' % value) - - return value.encode(encoding) - -def has_fileno(obj): - if not hasattr(obj, "fileno"): - return False - - # check BytesIO case and maybe others - try: - obj.fileno() - except (AttributeError, IOError, io.UnsupportedOperation): - return False - - return True - - -def warn(msg): - print("!!!", file=sys.stderr) - - lines = msg.splitlines() - for i, line in enumerate(lines): - if i == 0: - line = "WARNING: %s" % line - print("!!! %s" % line, file=sys.stderr) - - print("!!!\n", file=sys.stderr) - sys.stderr.flush() - - -def make_fail_app(msg): - msg = to_bytestring(msg) - - def app(environ, start_response): - start_response("500 Internal Server Error", [ - ("Content-Type", "text/plain"), - ("Content-Length", str(len(msg))) - ]) - return [msg] - - return app - - -def split_request_uri(uri): - if uri.startswith("//"): - # When the path starts with //, urlsplit considers it as a - # relative uri while the RFC says we should consider it as abs_path - # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 - # We use temporary dot prefix to workaround this behaviour - parts = _compat.urlsplit("." + uri) - return parts._replace(path=parts.path[1:]) - - return _compat.urlsplit(uri) diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/__init__.py deleted file mode 100644 index 074e001..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import sys - -# supported gunicorn workers. -SUPPORTED_WORKERS = { - "sync": "gunicorn.workers.sync.SyncWorker", - "eventlet": "gunicorn.workers.geventlet.EventletWorker", - "gevent": "gunicorn.workers.ggevent.GeventWorker", - "gevent_wsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker", - "gevent_pywsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker", - "tornado": "gunicorn.workers.gtornado.TornadoWorker", - "gthread": "gunicorn.workers.gthread.ThreadWorker", -} - - -if sys.version_info >= (3, 4): - # gaiohttp worker can be used with Python 3.4+ only. - SUPPORTED_WORKERS["gaiohttp"] = "gunicorn.workers.gaiohttp.AiohttpWorker" diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/_gaiohttp.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/_gaiohttp.py deleted file mode 100644 index fe378c3..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/_gaiohttp.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import asyncio -import datetime -import functools -import logging -import os - -try: - import ssl -except ImportError: - ssl = None - -import gunicorn.workers.base as base - -from aiohttp.wsgi import WSGIServerHttpProtocol as OldWSGIServerHttpProtocol - - -class WSGIServerHttpProtocol(OldWSGIServerHttpProtocol): - def log_access(self, request, environ, response, time): - self.logger.access(response, request, environ, datetime.timedelta(0, 0, time)) - - -class AiohttpWorker(base.Worker): - - def __init__(self, *args, **kw): # pragma: no cover - super().__init__(*args, **kw) - cfg = self.cfg - if cfg.is_ssl: - self.ssl_context = self._create_ssl_context(cfg) - else: - self.ssl_context = None - self.servers = [] - self.connections = {} - - def init_process(self): - # create new event_loop after fork - asyncio.get_event_loop().close() - - self.loop = asyncio.new_event_loop() - asyncio.set_event_loop(self.loop) - - super().init_process() - - def run(self): - self._runner = asyncio.ensure_future(self._run(), loop=self.loop) - - try: - self.loop.run_until_complete(self._runner) - finally: - self.loop.close() - - def wrap_protocol(self, proto): - proto.connection_made = _wrp( - proto, proto.connection_made, self.connections) - proto.connection_lost = _wrp( - proto, proto.connection_lost, self.connections, False) - return proto - - def factory(self, wsgi, addr): - # are we in debug level - is_debug = self.log.loglevel == logging.DEBUG - - proto = WSGIServerHttpProtocol( - wsgi, readpayload=True, - loop=self.loop, - log=self.log, - debug=is_debug, - keep_alive=self.cfg.keepalive, - access_log=self.log.access_log, - access_log_format=self.cfg.access_log_format) - return self.wrap_protocol(proto) - - def get_factory(self, sock, addr): - return functools.partial(self.factory, self.wsgi, addr) - - @asyncio.coroutine - def close(self): - try: - if hasattr(self.wsgi, 'close'): - yield from self.wsgi.close() - except: - self.log.exception('Process shutdown exception') - - @asyncio.coroutine - def _run(self): - for sock in self.sockets: - factory = self.get_factory(sock.sock, sock.cfg_addr) - self.servers.append( - (yield from self._create_server(factory, sock))) - - # If our parent changed then we shut down. - pid = os.getpid() - try: - while self.alive or self.connections: - self.notify() - - if (self.alive and - pid == os.getpid() and self.ppid != os.getppid()): - self.log.info("Parent changed, shutting down: %s", self) - self.alive = False - - # stop accepting requests - if not self.alive: - if self.servers: - self.log.info( - "Stopping server: %s, connections: %s", - pid, len(self.connections)) - for server in self.servers: - server.close() - self.servers.clear() - - # prepare connections for closing - for conn in self.connections.values(): - if hasattr(conn, 'closing'): - conn.closing() - - yield from asyncio.sleep(1.0, loop=self.loop) - except KeyboardInterrupt: - pass - - if self.servers: - for server in self.servers: - server.close() - - yield from self.close() - - @asyncio.coroutine - def _create_server(self, factory, sock): - return self.loop.create_server(factory, sock=sock.sock, - ssl=self.ssl_context) - - @staticmethod - def _create_ssl_context(cfg): - """ Creates SSLContext instance for usage in asyncio.create_server. - - See ssl.SSLSocket.__init__ for more details. - """ - ctx = ssl.SSLContext(cfg.ssl_version) - ctx.load_cert_chain(cfg.certfile, cfg.keyfile) - ctx.verify_mode = cfg.cert_reqs - if cfg.ca_certs: - ctx.load_verify_locations(cfg.ca_certs) - if cfg.ciphers: - ctx.set_ciphers(cfg.ciphers) - return ctx - - -class _wrp: - - def __init__(self, proto, meth, tracking, add=True): - self._proto = proto - self._id = id(proto) - self._meth = meth - self._tracking = tracking - self._add = add - - def __call__(self, *args): - if self._add: - self._tracking[self._id] = self._proto - elif self._id in self._tracking: - del self._tracking[self._id] - - conn = self._meth(*args) - return conn diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/base.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/base.py deleted file mode 100644 index 881efa0..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/base.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -from datetime import datetime -import os -from random import randint -import signal -from ssl import SSLError -import sys -import time -import traceback - -from gunicorn import six -from gunicorn import util -from gunicorn.workers.workertmp import WorkerTmp -from gunicorn.reloader import reloader_engines -from gunicorn.http.errors import ( - InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod, - InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders, -) -from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest -from gunicorn.http.errors import InvalidSchemeHeaders -from gunicorn.http.wsgi import default_environ, Response -from gunicorn.six import MAXSIZE - - -class Worker(object): - - SIGNALS = [getattr(signal, "SIG%s" % x) - for x in "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()] - - PIPE = [] - - def __init__(self, age, ppid, sockets, app, timeout, cfg, log): - """\ - This is called pre-fork so it shouldn't do anything to the - current process. If there's a need to make process wide - changes you'll want to do that in ``self.init_process()``. - """ - self.age = age - self.pid = "[booting]" - self.ppid = ppid - self.sockets = sockets - self.app = app - self.timeout = timeout - self.cfg = cfg - self.booted = False - self.aborted = False - self.reloader = None - - self.nr = 0 - jitter = randint(0, cfg.max_requests_jitter) - self.max_requests = cfg.max_requests + jitter or MAXSIZE - self.alive = True - self.log = log - self.tmp = WorkerTmp(cfg) - - def __str__(self): - return "" % self.pid - - def notify(self): - """\ - Your worker subclass must arrange to have this method called - once every ``self.timeout`` seconds. If you fail in accomplishing - this task, the master process will murder your workers. - """ - self.tmp.notify() - - def run(self): - """\ - This is the mainloop of a worker process. You should override - this method in a subclass to provide the intended behaviour - for your particular evil schemes. - """ - raise NotImplementedError() - - def init_process(self): - """\ - If you override this method in a subclass, the last statement - in the function should be to call this method with - super(MyWorkerClass, self).init_process() so that the ``run()`` - loop is initiated. - """ - - # set environment' variables - if self.cfg.env: - for k, v in self.cfg.env.items(): - os.environ[k] = v - - util.set_owner_process(self.cfg.uid, self.cfg.gid, - initgroups=self.cfg.initgroups) - - # Reseed the random number generator - util.seed() - - # For waking ourselves up - self.PIPE = os.pipe() - for p in self.PIPE: - util.set_non_blocking(p) - util.close_on_exec(p) - - # Prevent fd inheritance - for s in self.sockets: - util.close_on_exec(s) - util.close_on_exec(self.tmp.fileno()) - - self.wait_fds = self.sockets + [self.PIPE[0]] - - self.log.close_on_exec() - - self.init_signals() - - # start the reloader - if self.cfg.reload: - def changed(fname): - self.log.info("Worker reloading: %s modified", fname) - self.alive = False - self.cfg.worker_int(self) - time.sleep(0.1) - sys.exit(0) - - reloader_cls = reloader_engines[self.cfg.reload_engine] - self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files, - callback=changed) - self.reloader.start() - - self.load_wsgi() - self.cfg.post_worker_init(self) - - # Enter main run loop - self.booted = True - self.run() - - def load_wsgi(self): - try: - self.wsgi = self.app.wsgi() - except SyntaxError as e: - if not self.cfg.reload: - raise - - self.log.exception(e) - - # fix from PR #1228 - # storing the traceback into exc_tb will create a circular reference. - # per https://docs.python.org/2/library/sys.html#sys.exc_info warning, - # delete the traceback after use. - try: - _, exc_val, exc_tb = sys.exc_info() - self.reloader.add_extra_file(exc_val.filename) - - tb_string = six.StringIO() - traceback.print_tb(exc_tb, file=tb_string) - self.wsgi = util.make_fail_app(tb_string.getvalue()) - finally: - del exc_tb - - def init_signals(self): - # reset signaling - for s in self.SIGNALS: - signal.signal(s, signal.SIG_DFL) - # init new signaling - signal.signal(signal.SIGQUIT, self.handle_quit) - signal.signal(signal.SIGTERM, self.handle_exit) - signal.signal(signal.SIGINT, self.handle_quit) - signal.signal(signal.SIGWINCH, self.handle_winch) - signal.signal(signal.SIGUSR1, self.handle_usr1) - signal.signal(signal.SIGABRT, self.handle_abort) - - # Don't let SIGTERM and SIGUSR1 disturb active requests - # by interrupting system calls - if hasattr(signal, 'siginterrupt'): # python >= 2.6 - signal.siginterrupt(signal.SIGTERM, False) - signal.siginterrupt(signal.SIGUSR1, False) - - if hasattr(signal, 'set_wakeup_fd'): - signal.set_wakeup_fd(self.PIPE[1]) - - def handle_usr1(self, sig, frame): - self.log.reopen_files() - - def handle_exit(self, sig, frame): - self.alive = False - - def handle_quit(self, sig, frame): - self.alive = False - # worker_int callback - self.cfg.worker_int(self) - time.sleep(0.1) - sys.exit(0) - - def handle_abort(self, sig, frame): - self.alive = False - self.cfg.worker_abort(self) - sys.exit(1) - - def handle_error(self, req, client, addr, exc): - request_start = datetime.now() - addr = addr or ('', -1) # unix socket case - if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod, - InvalidHTTPVersion, InvalidHeader, InvalidHeaderName, - LimitRequestLine, LimitRequestHeaders, - InvalidProxyLine, ForbiddenProxyRequest, - InvalidSchemeHeaders, - SSLError)): - - status_int = 400 - reason = "Bad Request" - - if isinstance(exc, InvalidRequestLine): - mesg = "Invalid Request Line '%s'" % str(exc) - elif isinstance(exc, InvalidRequestMethod): - mesg = "Invalid Method '%s'" % str(exc) - elif isinstance(exc, InvalidHTTPVersion): - mesg = "Invalid HTTP Version '%s'" % str(exc) - elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)): - mesg = "%s" % str(exc) - if not req and hasattr(exc, "req"): - req = exc.req # for access log - elif isinstance(exc, LimitRequestLine): - mesg = "%s" % str(exc) - elif isinstance(exc, LimitRequestHeaders): - mesg = "Error parsing headers: '%s'" % str(exc) - elif isinstance(exc, InvalidProxyLine): - mesg = "'%s'" % str(exc) - elif isinstance(exc, ForbiddenProxyRequest): - reason = "Forbidden" - mesg = "Request forbidden" - status_int = 403 - elif isinstance(exc, InvalidSchemeHeaders): - mesg = "%s" % str(exc) - elif isinstance(exc, SSLError): - reason = "Forbidden" - mesg = "'%s'" % str(exc) - status_int = 403 - - msg = "Invalid request from ip={ip}: {error}" - self.log.debug(msg.format(ip=addr[0], error=str(exc))) - else: - if hasattr(req, "uri"): - self.log.exception("Error handling request %s", req.uri) - status_int = 500 - reason = "Internal Server Error" - mesg = "" - - if req is not None: - request_time = datetime.now() - request_start - environ = default_environ(req, client, self.cfg) - environ['REMOTE_ADDR'] = addr[0] - environ['REMOTE_PORT'] = str(addr[1]) - resp = Response(req, client, self.cfg) - resp.status = "%s %s" % (status_int, reason) - resp.response_length = len(mesg) - self.log.access(resp, req, environ, request_time) - - try: - util.write_error(client, status_int, reason, mesg) - except: - self.log.debug("Failed to send error message.") - - def handle_winch(self, sig, fname): - # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD. - self.log.debug("worker: SIGWINCH ignored.") diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/base_async.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/base_async.py deleted file mode 100644 index a3a0f91..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/base_async.py +++ /dev/null @@ -1,147 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -from datetime import datetime -import errno -import socket -import ssl -import sys - -import gunicorn.http as http -import gunicorn.http.wsgi as wsgi -import gunicorn.util as util -import gunicorn.workers.base as base -from gunicorn import six - -ALREADY_HANDLED = object() - - -class AsyncWorker(base.Worker): - - def __init__(self, *args, **kwargs): - super(AsyncWorker, self).__init__(*args, **kwargs) - self.worker_connections = self.cfg.worker_connections - - def timeout_ctx(self): - raise NotImplementedError() - - def is_already_handled(self, respiter): - # some workers will need to overload this function to raise a StopIteration - return respiter == ALREADY_HANDLED - - def handle(self, listener, client, addr): - req = None - try: - parser = http.RequestParser(self.cfg, client) - try: - listener_name = listener.getsockname() - if not self.cfg.keepalive: - req = six.next(parser) - self.handle_request(listener_name, req, client, addr) - else: - # keepalive loop - proxy_protocol_info = {} - while True: - req = None - with self.timeout_ctx(): - req = six.next(parser) - if not req: - break - if req.proxy_protocol_info: - proxy_protocol_info = req.proxy_protocol_info - else: - req.proxy_protocol_info = proxy_protocol_info - self.handle_request(listener_name, req, client, addr) - except http.errors.NoMoreData as e: - self.log.debug("Ignored premature client disconnection. %s", e) - except StopIteration as e: - self.log.debug("Closing connection. %s", e) - except ssl.SSLError: - # pass to next try-except level - six.reraise(*sys.exc_info()) - except EnvironmentError: - # pass to next try-except level - six.reraise(*sys.exc_info()) - except Exception as e: - self.handle_error(req, client, addr, e) - except ssl.SSLError as e: - if e.args[0] == ssl.SSL_ERROR_EOF: - self.log.debug("ssl connection closed") - client.close() - else: - self.log.debug("Error processing SSL request.") - self.handle_error(req, client, addr, e) - except EnvironmentError as e: - if e.errno not in (errno.EPIPE, errno.ECONNRESET): - self.log.exception("Socket error processing request.") - else: - if e.errno == errno.ECONNRESET: - self.log.debug("Ignoring connection reset") - else: - self.log.debug("Ignoring EPIPE") - except Exception as e: - self.handle_error(req, client, addr, e) - finally: - util.close(client) - - def handle_request(self, listener_name, req, sock, addr): - request_start = datetime.now() - environ = {} - resp = None - try: - self.cfg.pre_request(self, req) - resp, environ = wsgi.create(req, sock, addr, - listener_name, self.cfg) - environ["wsgi.multithread"] = True - self.nr += 1 - if self.alive and self.nr >= self.max_requests: - self.log.info("Autorestarting worker after current request.") - resp.force_close() - self.alive = False - - if not self.cfg.keepalive: - resp.force_close() - - respiter = self.wsgi(environ, resp.start_response) - if self.is_already_handled(respiter): - return False - try: - if isinstance(respiter, environ['wsgi.file_wrapper']): - resp.write_file(respiter) - else: - for item in respiter: - resp.write(item) - resp.close() - request_time = datetime.now() - request_start - self.log.access(resp, req, environ, request_time) - finally: - if hasattr(respiter, "close"): - respiter.close() - if resp.should_close(): - raise StopIteration() - except StopIteration: - raise - except EnvironmentError: - # If the original exception was a socket.error we delegate - # handling it to the caller (where handle() might ignore it) - six.reraise(*sys.exc_info()) - except Exception: - if resp and resp.headers_sent: - # If the requests have already been sent, we should close the - # connection to indicate the error. - self.log.exception("Error handling request") - try: - sock.shutdown(socket.SHUT_RDWR) - sock.close() - except EnvironmentError: - pass - raise StopIteration() - raise - finally: - try: - self.cfg.post_request(self, req, environ, resp) - except Exception: - self.log.exception("Exception in post_request hook") - return True diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/gaiohttp.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/gaiohttp.py deleted file mode 100644 index bef6b49..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/gaiohttp.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import sys - -from gunicorn import util - -if sys.version_info >= (3, 4): - try: - import aiohttp # pylint: disable=unused-import - except ImportError: - raise RuntimeError("You need aiohttp installed to use this worker.") - else: - try: - from aiohttp.worker import GunicornWebWorker as AiohttpWorker - except ImportError: - from gunicorn.workers._gaiohttp import AiohttpWorker - - util.warn( - "The 'gaiohttp' worker is deprecated. See --worker-class " - "documentation for more information." - ) - __all__ = ['AiohttpWorker'] -else: - raise RuntimeError("You need Python >= 3.4 to use the gaiohttp worker") diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/geventlet.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/geventlet.py deleted file mode 100644 index 189062c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/geventlet.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -from functools import partial -import errno -import sys - -try: - import eventlet -except ImportError: - raise RuntimeError("You need eventlet installed to use this worker.") - -# validate the eventlet version -if eventlet.version_info < (0, 9, 7): - raise RuntimeError("You need eventlet >= 0.9.7") - - -from eventlet import hubs, greenthread -from eventlet.greenio import GreenSocket -from eventlet.hubs import trampoline -from eventlet.wsgi import ALREADY_HANDLED as EVENTLET_ALREADY_HANDLED -import greenlet - -from gunicorn.http.wsgi import sendfile as o_sendfile -from gunicorn.workers.base_async import AsyncWorker - -def _eventlet_sendfile(fdout, fdin, offset, nbytes): - while True: - try: - return o_sendfile(fdout, fdin, offset, nbytes) - except OSError as e: - if e.args[0] == errno.EAGAIN: - trampoline(fdout, write=True) - else: - raise - - -def _eventlet_serve(sock, handle, concurrency): - """ - Serve requests forever. - - This code is nearly identical to ``eventlet.convenience.serve`` except - that it attempts to join the pool at the end, which allows for gunicorn - graceful shutdowns. - """ - pool = eventlet.greenpool.GreenPool(concurrency) - server_gt = eventlet.greenthread.getcurrent() - - while True: - try: - conn, addr = sock.accept() - gt = pool.spawn(handle, conn, addr) - gt.link(_eventlet_stop, server_gt, conn) - conn, addr, gt = None, None, None - except eventlet.StopServe: - sock.close() - pool.waitall() - return - - -def _eventlet_stop(client, server, conn): - """ - Stop a greenlet handling a request and close its connection. - - This code is lifted from eventlet so as not to depend on undocumented - functions in the library. - """ - try: - try: - client.wait() - finally: - conn.close() - except greenlet.GreenletExit: - pass - except Exception: - greenthread.kill(server, *sys.exc_info()) - - -def patch_sendfile(): - from gunicorn.http import wsgi - - if o_sendfile is not None: - setattr(wsgi, "sendfile", _eventlet_sendfile) - - -class EventletWorker(AsyncWorker): - - def patch(self): - hubs.use_hub() - eventlet.monkey_patch(os=False) - patch_sendfile() - - def is_already_handled(self, respiter): - if respiter == EVENTLET_ALREADY_HANDLED: - raise StopIteration() - else: - return super(EventletWorker, self).is_already_handled(respiter) - - def init_process(self): - super(EventletWorker, self).init_process() - self.patch() - - def handle_quit(self, sig, frame): - eventlet.spawn(super(EventletWorker, self).handle_quit, sig, frame) - - def handle_usr1(self, sig, frame): - eventlet.spawn(super(EventletWorker, self).handle_usr1, sig, frame) - - def timeout_ctx(self): - return eventlet.Timeout(self.cfg.keepalive or None, False) - - def handle(self, listener, client, addr): - if self.cfg.is_ssl: - client = eventlet.wrap_ssl(client, server_side=True, - **self.cfg.ssl_options) - - super(EventletWorker, self).handle(listener, client, addr) - - def run(self): - acceptors = [] - for sock in self.sockets: - gsock = GreenSocket(sock) - gsock.setblocking(1) - hfun = partial(self.handle, gsock) - acceptor = eventlet.spawn(_eventlet_serve, gsock, hfun, - self.worker_connections) - - acceptors.append(acceptor) - eventlet.sleep(0.0) - - while self.alive: - self.notify() - eventlet.sleep(1.0) - - self.notify() - try: - with eventlet.Timeout(self.cfg.graceful_timeout) as t: - for a in acceptors: - a.kill(eventlet.StopServe()) - for a in acceptors: - a.wait() - except eventlet.Timeout as te: - if te != t: - raise - for a in acceptors: - a.kill() diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/ggevent.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/ggevent.py deleted file mode 100644 index fb9d919..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/ggevent.py +++ /dev/null @@ -1,246 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import errno -import os -import sys -from datetime import datetime -from functools import partial -import time - -_socket = __import__("socket") - -# workaround on osx, disable kqueue -if sys.platform == "darwin": - os.environ['EVENT_NOKQUEUE'] = "1" - -try: - import gevent -except ImportError: - raise RuntimeError("You need gevent installed to use this worker.") -from gevent.pool import Pool -from gevent.server import StreamServer -from gevent.socket import wait_write, socket -from gevent import pywsgi - -import gunicorn -from gunicorn.http.wsgi import base_environ -from gunicorn.workers.base_async import AsyncWorker -from gunicorn.http.wsgi import sendfile as o_sendfile - -VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__) - -def _gevent_sendfile(fdout, fdin, offset, nbytes): - while True: - try: - return o_sendfile(fdout, fdin, offset, nbytes) - except OSError as e: - if e.args[0] == errno.EAGAIN: - wait_write(fdout) - else: - raise - -def patch_sendfile(): - from gunicorn.http import wsgi - - if o_sendfile is not None: - setattr(wsgi, "sendfile", _gevent_sendfile) - - -class GeventWorker(AsyncWorker): - - server_class = None - wsgi_handler = None - - def patch(self): - from gevent import monkey - monkey.noisy = False - - # if the new version is used make sure to patch subprocess - if gevent.version_info[0] == 0: - monkey.patch_all() - else: - monkey.patch_all(subprocess=True) - - # monkey patch sendfile to make it none blocking - patch_sendfile() - - # patch sockets - sockets = [] - for s in self.sockets: - if sys.version_info[0] == 3: - sockets.append(socket(s.FAMILY, _socket.SOCK_STREAM, - fileno=s.sock.fileno())) - else: - sockets.append(socket(s.FAMILY, _socket.SOCK_STREAM, - _sock=s)) - self.sockets = sockets - - def notify(self): - super(GeventWorker, self).notify() - if self.ppid != os.getppid(): - self.log.info("Parent changed, shutting down: %s", self) - sys.exit(0) - - def timeout_ctx(self): - return gevent.Timeout(self.cfg.keepalive, False) - - def run(self): - servers = [] - ssl_args = {} - - if self.cfg.is_ssl: - ssl_args = dict(server_side=True, **self.cfg.ssl_options) - - for s in self.sockets: - s.setblocking(1) - pool = Pool(self.worker_connections) - if self.server_class is not None: - environ = base_environ(self.cfg) - environ.update({ - "wsgi.multithread": True, - "SERVER_SOFTWARE": VERSION, - }) - server = self.server_class( - s, application=self.wsgi, spawn=pool, log=self.log, - handler_class=self.wsgi_handler, environ=environ, - **ssl_args) - else: - hfun = partial(self.handle, s) - server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args) - - server.start() - servers.append(server) - - while self.alive: - self.notify() - gevent.sleep(1.0) - - try: - # Stop accepting requests - for server in servers: - if hasattr(server, 'close'): # gevent 1.0 - server.close() - if hasattr(server, 'kill'): # gevent < 1.0 - server.kill() - - # Handle current requests until graceful_timeout - ts = time.time() - while time.time() - ts <= self.cfg.graceful_timeout: - accepting = 0 - for server in servers: - if server.pool.free_count() != server.pool.size: - accepting += 1 - - # if no server is accepting a connection, we can exit - if not accepting: - return - - self.notify() - gevent.sleep(1.0) - - # Force kill all active the handlers - self.log.warning("Worker graceful timeout (pid:%s)" % self.pid) - for server in servers: - server.stop(timeout=1) - except: - pass - - def handle(self, listener, client, addr): - # Connected socket timeout defaults to socket.getdefaulttimeout(). - # This forces to blocking mode. - client.setblocking(1) - super(GeventWorker, self).handle(listener, client, addr) - - def handle_request(self, listener_name, req, sock, addr): - try: - super(GeventWorker, self).handle_request(listener_name, req, sock, - addr) - except gevent.GreenletExit: - pass - except SystemExit: - pass - - def handle_quit(self, sig, frame): - # Move this out of the signal handler so we can use - # blocking calls. See #1126 - gevent.spawn(super(GeventWorker, self).handle_quit, sig, frame) - - def handle_usr1(self, sig, frame): - # Make the gevent workers handle the usr1 signal - # by deferring to a new greenlet. See #1645 - gevent.spawn(super(GeventWorker, self).handle_usr1, sig, frame) - - if gevent.version_info[0] == 0: - - def init_process(self): - # monkey patch here - self.patch() - - # reinit the hub - import gevent.core - gevent.core.reinit() - - #gevent 0.13 and older doesn't reinitialize dns for us after forking - #here's the workaround - gevent.core.dns_shutdown(fail_requests=1) - gevent.core.dns_init() - super(GeventWorker, self).init_process() - - else: - - def init_process(self): - # monkey patch here - self.patch() - - # reinit the hub - from gevent import hub - hub.reinit() - - # then initialize the process - super(GeventWorker, self).init_process() - - -class GeventResponse(object): - - status = None - headers = None - sent = None - - def __init__(self, status, headers, clength): - self.status = status - self.headers = headers - self.sent = clength - - -class PyWSGIHandler(pywsgi.WSGIHandler): - - def log_request(self): - start = datetime.fromtimestamp(self.time_start) - finish = datetime.fromtimestamp(self.time_finish) - response_time = finish - start - resp_headers = getattr(self, 'response_headers', {}) - resp = GeventResponse(self.status, resp_headers, self.response_length) - if hasattr(self, 'headers'): - req_headers = self.headers.items() - else: - req_headers = [] - self.server.log.access(resp, req_headers, self.environ, response_time) - - def get_environ(self): - env = super(PyWSGIHandler, self).get_environ() - env['gunicorn.sock'] = self.socket - env['RAW_URI'] = self.path - return env - - -class PyWSGIServer(pywsgi.WSGIServer): - pass - - -class GeventPyWSGIWorker(GeventWorker): - "The Gevent StreamServer based workers." - server_class = PyWSGIServer - wsgi_handler = PyWSGIHandler diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/gthread.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/gthread.py deleted file mode 100644 index 862f873..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/gthread.py +++ /dev/null @@ -1,367 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -# design: -# a threaded worker accepts connections in the main loop, accepted -# connections are are added to the thread pool as a connection job. On -# keepalive connections are put back in the loop waiting for an event. -# If no event happen after the keep alive timeout, the connectoin is -# closed. - -from collections import deque -from datetime import datetime -import errno -from functools import partial -import os -import socket -import ssl -import sys -from threading import RLock -import time - -from .. import http -from ..http import wsgi -from .. import util -from . import base -from .. import six - - -try: - import concurrent.futures as futures -except ImportError: - raise RuntimeError(""" - You need to install the 'futures' package to use this worker with this - Python version. - """) - -try: - from asyncio import selectors -except ImportError: - from gunicorn import selectors - - -class TConn(object): - - def __init__(self, cfg, sock, client, server): - self.cfg = cfg - self.sock = sock - self.client = client - self.server = server - - self.timeout = None - self.parser = None - - # set the socket to non blocking - self.sock.setblocking(False) - - def init(self): - self.sock.setblocking(True) - if self.parser is None: - # wrap the socket if needed - if self.cfg.is_ssl: - self.sock = ssl.wrap_socket(self.sock, server_side=True, - **self.cfg.ssl_options) - - # initialize the parser - self.parser = http.RequestParser(self.cfg, self.sock) - - def set_timeout(self): - # set the timeout - self.timeout = time.time() + self.cfg.keepalive - - def close(self): - util.close(self.sock) - - -class ThreadWorker(base.Worker): - - def __init__(self, *args, **kwargs): - super(ThreadWorker, self).__init__(*args, **kwargs) - self.worker_connections = self.cfg.worker_connections - self.max_keepalived = self.cfg.worker_connections - self.cfg.threads - # initialise the pool - self.tpool = None - self.poller = None - self._lock = None - self.futures = deque() - self._keep = deque() - self.nr_conns = 0 - - @classmethod - def check_config(cls, cfg, log): - max_keepalived = cfg.worker_connections - cfg.threads - - if max_keepalived <= 0 and cfg.keepalive: - log.warning("No keepalived connections can be handled. " + - "Check the number of worker connections and threads.") - - def init_process(self): - self.tpool = futures.ThreadPoolExecutor(max_workers=self.cfg.threads) - self.poller = selectors.DefaultSelector() - self._lock = RLock() - super(ThreadWorker, self).init_process() - - def handle_quit(self, sig, frame): - self.alive = False - # worker_int callback - self.cfg.worker_int(self) - self.tpool.shutdown(False) - time.sleep(0.1) - sys.exit(0) - - def _wrap_future(self, fs, conn): - fs.conn = conn - self.futures.append(fs) - fs.add_done_callback(self.finish_request) - - def enqueue_req(self, conn): - conn.init() - # submit the connection to a worker - fs = self.tpool.submit(self.handle, conn) - self._wrap_future(fs, conn) - - def accept(self, server, listener): - try: - sock, client = listener.accept() - # initialize the connection object - conn = TConn(self.cfg, sock, client, server) - self.nr_conns += 1 - # enqueue the job - self.enqueue_req(conn) - except EnvironmentError as e: - if e.errno not in (errno.EAGAIN, - errno.ECONNABORTED, errno.EWOULDBLOCK): - raise - - def reuse_connection(self, conn, client): - with self._lock: - # unregister the client from the poller - self.poller.unregister(client) - # remove the connection from keepalive - try: - self._keep.remove(conn) - except ValueError: - # race condition - return - - # submit the connection to a worker - self.enqueue_req(conn) - - def murder_keepalived(self): - now = time.time() - while True: - with self._lock: - try: - # remove the connection from the queue - conn = self._keep.popleft() - except IndexError: - break - - delta = conn.timeout - now - if delta > 0: - # add the connection back to the queue - with self._lock: - self._keep.appendleft(conn) - break - else: - self.nr_conns -= 1 - # remove the socket from the poller - with self._lock: - try: - self.poller.unregister(conn.sock) - except EnvironmentError as e: - if e.errno != errno.EBADF: - raise - except KeyError: - # already removed by the system, continue - pass - - # close the socket - conn.close() - - def is_parent_alive(self): - # If our parent changed then we shut down. - if self.ppid != os.getppid(): - self.log.info("Parent changed, shutting down: %s", self) - return False - return True - - def run(self): - # init listeners, add them to the event loop - for sock in self.sockets: - sock.setblocking(False) - # a race condition during graceful shutdown may make the listener - # name unavailable in the request handler so capture it once here - server = sock.getsockname() - acceptor = partial(self.accept, server) - self.poller.register(sock, selectors.EVENT_READ, acceptor) - - while self.alive: - # notify the arbiter we are alive - self.notify() - - # can we accept more connections? - if self.nr_conns < self.worker_connections: - # wait for an event - events = self.poller.select(1.0) - for key, _ in events: - callback = key.data - callback(key.fileobj) - - # check (but do not wait) for finished requests - result = futures.wait(self.futures, timeout=0, - return_when=futures.FIRST_COMPLETED) - else: - # wait for a request to finish - result = futures.wait(self.futures, timeout=1.0, - return_when=futures.FIRST_COMPLETED) - - # clean up finished requests - for fut in result.done: - self.futures.remove(fut) - - if not self.is_parent_alive(): - break - - # hanle keepalive timeouts - self.murder_keepalived() - - self.tpool.shutdown(False) - self.poller.close() - - for s in self.sockets: - s.close() - - futures.wait(self.futures, timeout=self.cfg.graceful_timeout) - - def finish_request(self, fs): - if fs.cancelled(): - self.nr_conns -= 1 - fs.conn.close() - return - - try: - (keepalive, conn) = fs.result() - # if the connection should be kept alived add it - # to the eventloop and record it - if keepalive: - # flag the socket as non blocked - conn.sock.setblocking(False) - - # register the connection - conn.set_timeout() - with self._lock: - self._keep.append(conn) - - # add the socket to the event loop - self.poller.register(conn.sock, selectors.EVENT_READ, - partial(self.reuse_connection, conn)) - else: - self.nr_conns -= 1 - conn.close() - except: - # an exception happened, make sure to close the - # socket. - self.nr_conns -= 1 - fs.conn.close() - - def handle(self, conn): - keepalive = False - req = None - try: - req = six.next(conn.parser) - if not req: - return (False, conn) - - # handle the request - keepalive = self.handle_request(req, conn) - if keepalive: - return (keepalive, conn) - except http.errors.NoMoreData as e: - self.log.debug("Ignored premature client disconnection. %s", e) - - except StopIteration as e: - self.log.debug("Closing connection. %s", e) - except ssl.SSLError as e: - if e.args[0] == ssl.SSL_ERROR_EOF: - self.log.debug("ssl connection closed") - conn.sock.close() - else: - self.log.debug("Error processing SSL request.") - self.handle_error(req, conn.sock, conn.client, e) - - except EnvironmentError as e: - if e.errno not in (errno.EPIPE, errno.ECONNRESET): - self.log.exception("Socket error processing request.") - else: - if e.errno == errno.ECONNRESET: - self.log.debug("Ignoring connection reset") - else: - self.log.debug("Ignoring connection epipe") - except Exception as e: - self.handle_error(req, conn.sock, conn.client, e) - - return (False, conn) - - def handle_request(self, req, conn): - environ = {} - resp = None - try: - self.cfg.pre_request(self, req) - request_start = datetime.now() - resp, environ = wsgi.create(req, conn.sock, conn.client, - conn.server, self.cfg) - environ["wsgi.multithread"] = True - self.nr += 1 - if self.alive and self.nr >= self.max_requests: - self.log.info("Autorestarting worker after current request.") - resp.force_close() - self.alive = False - - if not self.cfg.keepalive: - resp.force_close() - elif len(self._keep) >= self.max_keepalived: - resp.force_close() - - respiter = self.wsgi(environ, resp.start_response) - try: - if isinstance(respiter, environ['wsgi.file_wrapper']): - resp.write_file(respiter) - else: - for item in respiter: - resp.write(item) - - resp.close() - request_time = datetime.now() - request_start - self.log.access(resp, req, environ, request_time) - finally: - if hasattr(respiter, "close"): - respiter.close() - - if resp.should_close(): - self.log.debug("Closing connection.") - return False - except EnvironmentError: - # pass to next try-except level - six.reraise(*sys.exc_info()) - except Exception: - if resp and resp.headers_sent: - # If the requests have already been sent, we should close the - # connection to indicate the error. - self.log.exception("Error handling request") - try: - conn.sock.shutdown(socket.SHUT_RDWR) - conn.sock.close() - except EnvironmentError: - pass - raise StopIteration() - raise - finally: - try: - self.cfg.post_request(self, req, environ, resp) - except Exception: - self.log.exception("Exception in post_request hook") - - return True diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/gtornado.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/gtornado.py deleted file mode 100644 index 7c1b118..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/gtornado.py +++ /dev/null @@ -1,146 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import copy -import os -import sys - -try: - import tornado -except ImportError: - raise RuntimeError("You need tornado installed to use this worker.") -import tornado.web -import tornado.httpserver -from tornado.ioloop import IOLoop, PeriodicCallback -from tornado.wsgi import WSGIContainer -from gunicorn.workers.base import Worker -from gunicorn import __version__ as gversion - - -# `io_loop` arguments to many Tornado functions have been removed in Tornado 5.0 -# -IOLOOP_PARAMETER_REMOVED = tornado.version_info >= (5, 0, 0) - - -class TornadoWorker(Worker): - - @classmethod - def setup(cls): - web = sys.modules.pop("tornado.web") - old_clear = web.RequestHandler.clear - - def clear(self): - old_clear(self) - if "Gunicorn" not in self._headers["Server"]: - self._headers["Server"] += " (Gunicorn/%s)" % gversion - web.RequestHandler.clear = clear - sys.modules["tornado.web"] = web - - def handle_exit(self, sig, frame): - if self.alive: - super(TornadoWorker, self).handle_exit(sig, frame) - - def handle_request(self): - self.nr += 1 - if self.alive and self.nr >= self.max_requests: - self.log.info("Autorestarting worker after current request.") - self.alive = False - - def watchdog(self): - if self.alive: - self.notify() - - if self.ppid != os.getppid(): - self.log.info("Parent changed, shutting down: %s", self) - self.alive = False - - def heartbeat(self): - if not self.alive: - if self.server_alive: - if hasattr(self, 'server'): - try: - self.server.stop() - except Exception: - pass - self.server_alive = False - else: - if not self.ioloop._callbacks: - self.ioloop.stop() - - def run(self): - self.ioloop = IOLoop.instance() - self.alive = True - self.server_alive = False - if IOLOOP_PARAMETER_REMOVED: - PeriodicCallback(self.watchdog, 1000).start() - PeriodicCallback(self.heartbeat, 1000).start() - else: - PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start() - PeriodicCallback(self.heartbeat, 1000, io_loop=self.ioloop).start() - - # Assume the app is a WSGI callable if its not an - # instance of tornado.web.Application or is an - # instance of tornado.wsgi.WSGIApplication - app = self.wsgi - if not isinstance(app, tornado.web.Application) or \ - isinstance(app, tornado.wsgi.WSGIApplication): - app = WSGIContainer(app) - - # Monkey-patching HTTPConnection.finish to count the - # number of requests being handled by Tornado. This - # will help gunicorn shutdown the worker if max_requests - # is exceeded. - httpserver = sys.modules["tornado.httpserver"] - if hasattr(httpserver, 'HTTPConnection'): - old_connection_finish = httpserver.HTTPConnection.finish - - def finish(other): - self.handle_request() - old_connection_finish(other) - httpserver.HTTPConnection.finish = finish - sys.modules["tornado.httpserver"] = httpserver - - server_class = tornado.httpserver.HTTPServer - else: - - class _HTTPServer(tornado.httpserver.HTTPServer): - - def on_close(instance, server_conn): - self.handle_request() - super(_HTTPServer, instance).on_close(server_conn) - - server_class = _HTTPServer - - if self.cfg.is_ssl: - _ssl_opt = copy.deepcopy(self.cfg.ssl_options) - # tornado refuses initialization if ssl_options contains following - # options - del _ssl_opt["do_handshake_on_connect"] - del _ssl_opt["suppress_ragged_eofs"] - if IOLOOP_PARAMETER_REMOVED: - server = server_class(app, ssl_options=_ssl_opt) - else: - server = server_class(app, io_loop=self.ioloop, - ssl_options=_ssl_opt) - else: - if IOLOOP_PARAMETER_REMOVED: - server = server_class(app) - else: - server = server_class(app, io_loop=self.ioloop) - - self.server = server - self.server_alive = True - - for s in self.sockets: - s.setblocking(0) - if hasattr(server, "add_socket"): # tornado > 2.0 - server.add_socket(s) - elif hasattr(server, "_sockets"): # tornado 2.0 - server._sockets[s.fileno()] = s - - server.no_keep_alive = self.cfg.keepalive <= 0 - server.start(num_processes=1) - - self.ioloop.start() diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/sync.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/sync.py deleted file mode 100644 index 1d2ce2f..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/sync.py +++ /dev/null @@ -1,208 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. -# - -from datetime import datetime -import errno -import os -import select -import socket -import ssl -import sys - -import gunicorn.http as http -import gunicorn.http.wsgi as wsgi -import gunicorn.util as util -import gunicorn.workers.base as base -from gunicorn import six - -class StopWaiting(Exception): - """ exception raised to stop waiting for a connnection """ - -class SyncWorker(base.Worker): - - def accept(self, listener): - client, addr = listener.accept() - client.setblocking(1) - util.close_on_exec(client) - self.handle(listener, client, addr) - - def wait(self, timeout): - try: - self.notify() - ret = select.select(self.wait_fds, [], [], timeout) - if ret[0]: - if self.PIPE[0] in ret[0]: - os.read(self.PIPE[0], 1) - return ret[0] - - except select.error as e: - if e.args[0] == errno.EINTR: - return self.sockets - if e.args[0] == errno.EBADF: - if self.nr < 0: - return self.sockets - else: - raise StopWaiting - raise - - def is_parent_alive(self): - # If our parent changed then we shut down. - if self.ppid != os.getppid(): - self.log.info("Parent changed, shutting down: %s", self) - return False - return True - - def run_for_one(self, timeout): - listener = self.sockets[0] - while self.alive: - self.notify() - - # Accept a connection. If we get an error telling us - # that no connection is waiting we fall down to the - # select which is where we'll wait for a bit for new - # workers to come give us some love. - try: - self.accept(listener) - # Keep processing clients until no one is waiting. This - # prevents the need to select() for every client that we - # process. - continue - - except EnvironmentError as e: - if e.errno not in (errno.EAGAIN, errno.ECONNABORTED, - errno.EWOULDBLOCK): - raise - - if not self.is_parent_alive(): - return - - try: - self.wait(timeout) - except StopWaiting: - return - - def run_for_multiple(self, timeout): - while self.alive: - self.notify() - - try: - ready = self.wait(timeout) - except StopWaiting: - return - - if ready is not None: - for listener in ready: - if listener == self.PIPE[0]: - continue - - try: - self.accept(listener) - except EnvironmentError as e: - if e.errno not in (errno.EAGAIN, errno.ECONNABORTED, - errno.EWOULDBLOCK): - raise - - if not self.is_parent_alive(): - return - - def run(self): - # if no timeout is given the worker will never wait and will - # use the CPU for nothing. This minimal timeout prevent it. - timeout = self.timeout or 0.5 - - # self.socket appears to lose its blocking status after - # we fork in the arbiter. Reset it here. - for s in self.sockets: - s.setblocking(0) - - if len(self.sockets) > 1: - self.run_for_multiple(timeout) - else: - self.run_for_one(timeout) - - def handle(self, listener, client, addr): - req = None - try: - if self.cfg.is_ssl: - client = ssl.wrap_socket(client, server_side=True, - **self.cfg.ssl_options) - - parser = http.RequestParser(self.cfg, client) - req = six.next(parser) - self.handle_request(listener, req, client, addr) - except http.errors.NoMoreData as e: - self.log.debug("Ignored premature client disconnection. %s", e) - except StopIteration as e: - self.log.debug("Closing connection. %s", e) - except ssl.SSLError as e: - if e.args[0] == ssl.SSL_ERROR_EOF: - self.log.debug("ssl connection closed") - client.close() - else: - self.log.debug("Error processing SSL request.") - self.handle_error(req, client, addr, e) - except EnvironmentError as e: - if e.errno not in (errno.EPIPE, errno.ECONNRESET): - self.log.exception("Socket error processing request.") - else: - if e.errno == errno.ECONNRESET: - self.log.debug("Ignoring connection reset") - else: - self.log.debug("Ignoring EPIPE") - except Exception as e: - self.handle_error(req, client, addr, e) - finally: - util.close(client) - - def handle_request(self, listener, req, client, addr): - environ = {} - resp = None - try: - self.cfg.pre_request(self, req) - request_start = datetime.now() - resp, environ = wsgi.create(req, client, addr, - listener.getsockname(), self.cfg) - # Force the connection closed until someone shows - # a buffering proxy that supports Keep-Alive to - # the backend. - resp.force_close() - self.nr += 1 - if self.nr >= self.max_requests: - self.log.info("Autorestarting worker after current request.") - self.alive = False - respiter = self.wsgi(environ, resp.start_response) - try: - if isinstance(respiter, environ['wsgi.file_wrapper']): - resp.write_file(respiter) - else: - for item in respiter: - resp.write(item) - resp.close() - request_time = datetime.now() - request_start - self.log.access(resp, req, environ, request_time) - finally: - if hasattr(respiter, "close"): - respiter.close() - except EnvironmentError: - # pass to next try-except level - six.reraise(*sys.exc_info()) - except Exception: - if resp and resp.headers_sent: - # If the requests have already been sent, we should close the - # connection to indicate the error. - self.log.exception("Error handling request") - try: - client.shutdown(socket.SHUT_RDWR) - client.close() - except EnvironmentError: - pass - raise StopIteration() - raise - finally: - try: - self.cfg.post_request(self, req, environ, resp) - except Exception: - self.log.exception("Exception in post_request hook") diff --git a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/workertmp.py b/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/workertmp.py deleted file mode 100644 index 36bc97a..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/gunicorn/workers/workertmp.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 - -# -# This file is part of gunicorn released under the MIT license. -# See the NOTICE for more information. - -import os -import platform -import tempfile - -from gunicorn import util - -PLATFORM = platform.system() -IS_CYGWIN = PLATFORM.startswith('CYGWIN') - - -class WorkerTmp(object): - - def __init__(self, cfg): - old_umask = os.umask(cfg.umask) - fdir = cfg.worker_tmp_dir - if fdir and not os.path.isdir(fdir): - raise RuntimeError("%s doesn't exist. Can't create workertmp." % fdir) - fd, name = tempfile.mkstemp(prefix="wgunicorn-", dir=fdir) - - # allows the process to write to the file - util.chown(name, cfg.uid, cfg.gid) - os.umask(old_umask) - - # unlink the file so we don't leak tempory files - try: - if not IS_CYGWIN: - util.unlink(name) - self._tmp = os.fdopen(fd, 'w+b', 1) - except: - os.close(fd) - raise - - self.spinner = 0 - - def notify(self): - try: - self.spinner = (self.spinner + 1) % 2 - os.fchmod(self._tmp.fileno(), self.spinner) - except AttributeError: - # python < 2.6 - self._tmp.truncate(0) - os.write(self._tmp.fileno(), b"X") - - def last_update(self): - return os.fstat(self._tmp.fileno()).st_ctime - - def fileno(self): - return self._tmp.fileno() - - def close(self): - return self._tmp.close() diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/INSTALLER b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/LICENSE.rst b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/LICENSE.rst deleted file mode 100644 index ef9c194..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/LICENSE.rst +++ /dev/null @@ -1,47 +0,0 @@ -`BSD 3-Clause `_ - -Copyright © 2011 by the Pallets team. - -Some rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -- Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -We kindly ask you to use these themes in an unmodified manner only with -Pallets and Pallets-related projects, not for unrelated projects. If you -like the visual style and want to use it for your own projects, please -consider making some larger changes to the themes (such as changing font -faces, sizes, colors or margins). - -THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND -CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, -BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. - ----- - -The initial implementation of itsdangerous was inspired by Django's -signing module. - -Copyright © Django Software Foundation and individual contributors. -All rights reserved. diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/METADATA b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/METADATA deleted file mode 100644 index 7389a4d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/METADATA +++ /dev/null @@ -1,98 +0,0 @@ -Metadata-Version: 2.1 -Name: itsdangerous -Version: 1.1.0 -Summary: Various helpers to pass data to untrusted environments and back. -Home-page: https://palletsprojects.com/p/itsdangerous/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: Pallets Team -Maintainer-email: contact@palletsprojects.com -License: BSD -Project-URL: Documentation, https://itsdangerous.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/itsdangerous -Project-URL: Issue tracker, https://github.com/pallets/itsdangerous/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* - -itsdangerous -============ - -... so better sign this - -Various helpers to pass data to untrusted environments and to get it -back safe and sound. Data is cryptographically signed to ensure that a -token has not been tampered with. - -It's possible to customize how data is serialized. Data is compressed as -needed. A timestamp can be added and verified automatically while -loading a token. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - pip install -U itsdangerous - -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - -A Simple Example ----------------- - -Here's how you could generate a token for transmitting a user's id and -name between web requests. - -.. code-block:: python - - from itsdangerous import URLSafeSerializer - auth_s = URLSafeSerializer("secret key", "auth") - token = auth_s.dumps({"id": 5, "name": "itsdangerous"}) - - print(token) - # eyJpZCI6NSwibmFtZSI6Iml0c2Rhbmdlcm91cyJ9.6YP6T0BaO67XP--9UzTrmurXSmg - - data = auth_s.loads(token) - print(data["name"]) - # itsdangerous - - -Donate ------- - -The Pallets organization develops and supports itsdangerous and other -popular packages. In order to grow the community of contributors and -users, and allow the maintainers to devote more time to the projects, -`please donate today`_. - -.. _please donate today: https://palletsprojects.com/donate - - -Links ------ - -* Website: https://palletsprojects.com/p/itsdangerous/ -* Documentation: https://itsdangerous.palletsprojects.com/ -* License: `BSD `_ -* Releases: https://pypi.org/project/itsdangerous/ -* Code: https://github.com/pallets/itsdangerous -* Issue tracker: https://github.com/pallets/itsdangerous/issues -* Test status: https://travis-ci.org/pallets/itsdangerous -* Test coverage: https://codecov.io/gh/pallets/itsdangerous - - diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/RECORD b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/RECORD deleted file mode 100644 index 79baf65..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/RECORD +++ /dev/null @@ -1,26 +0,0 @@ -itsdangerous/__init__.py,sha256=Dr-SkfFdOyiR_WjiqIXnlFpYRMW0XvPBNV5muzE5N_A,708 -itsdangerous/_compat.py,sha256=oAAMcQAjwQXQpIbuHT3o-aL56ztm_7Fe-4lD7IteF6A,1133 -itsdangerous/_json.py,sha256=W7BLL4RPnSOjNdo2gfKT3BeARMCIikY6O75rwWV0XoE,431 -itsdangerous/encoding.py,sha256=KhY85PsH3bGHe5JANN4LMZ_3b0IwUWRRnnw1wvLlaIg,1224 -itsdangerous/exc.py,sha256=KFxg7K2XMliMQAxL4jkRNgE8e73z2jcRaLrzwqVObnI,2959 -itsdangerous/jws.py,sha256=6Lh9W-Lu8D9s7bRazs0Zb35eyAZm3pzLeZqHmRELeII,7470 -itsdangerous/serializer.py,sha256=bT-dfjKec9zcKa8Qo8n7mHW_8M-XCTPMOFq1TQI_Fv4,8653 -itsdangerous/signer.py,sha256=OOZbK8XomBjQfOFEul8osesn7fc80MXB0L1r7E86_GQ,6345 -itsdangerous/timed.py,sha256=on5Q5lX7LT_LaETOhzF1ZmrRbia8P98263R8FiRyM6Y,5635 -itsdangerous/url_safe.py,sha256=xnFTaukIPmW6Qwn6uNQLgzdau8RuAKnp5N7ukuXykj0,2275 -itsdangerous-1.1.0.dist-info/LICENSE.rst,sha256=_rKL-jSNgWsOfbrt3xhJnufoAHxngT241qs3xl4EbNQ,2120 -itsdangerous-1.1.0.dist-info/METADATA,sha256=yyKjL2WOg_WybH2Yt-7NIvGpV3B93IsMc2HbToWc7Sk,3062 -itsdangerous-1.1.0.dist-info/WHEEL,sha256=CihQvCnsGZQBGAHLEUMf0IdA4fRduS_NBUTMgCTtvPM,110 -itsdangerous-1.1.0.dist-info/top_level.txt,sha256=gKN1OKLk81i7fbWWildJA88EQ9NhnGMSvZqhfz9ICjk,13 -itsdangerous-1.1.0.dist-info/RECORD,, -itsdangerous-1.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -itsdangerous/__pycache__/jws.cpython-36.pyc,, -itsdangerous/__pycache__/_compat.cpython-36.pyc,, -itsdangerous/__pycache__/url_safe.cpython-36.pyc,, -itsdangerous/__pycache__/encoding.cpython-36.pyc,, -itsdangerous/__pycache__/_json.cpython-36.pyc,, -itsdangerous/__pycache__/timed.cpython-36.pyc,, -itsdangerous/__pycache__/serializer.cpython-36.pyc,, -itsdangerous/__pycache__/__init__.cpython-36.pyc,, -itsdangerous/__pycache__/exc.cpython-36.pyc,, -itsdangerous/__pycache__/signer.cpython-36.pyc,, diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/WHEEL b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/WHEEL deleted file mode 100644 index dea0e20..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.32.2) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/top_level.txt b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/top_level.txt deleted file mode 100644 index e163955..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous-1.1.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -itsdangerous diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/__init__.py deleted file mode 100644 index 0fcd8c1..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from ._json import json -from .encoding import base64_decode -from .encoding import base64_encode -from .encoding import want_bytes -from .exc import BadData -from .exc import BadHeader -from .exc import BadPayload -from .exc import BadSignature -from .exc import BadTimeSignature -from .exc import SignatureExpired -from .jws import JSONWebSignatureSerializer -from .jws import TimedJSONWebSignatureSerializer -from .serializer import Serializer -from .signer import HMACAlgorithm -from .signer import NoneAlgorithm -from .signer import Signer -from .timed import TimedSerializer -from .timed import TimestampSigner -from .url_safe import URLSafeSerializer -from .url_safe import URLSafeTimedSerializer - -__version__ = "1.1.0" diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/_compat.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/_compat.py deleted file mode 100644 index 2291bce..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/_compat.py +++ /dev/null @@ -1,46 +0,0 @@ -import decimal -import hmac -import numbers -import sys - -PY2 = sys.version_info[0] == 2 - -if PY2: - from itertools import izip - - text_type = unicode # noqa: 821 -else: - izip = zip - text_type = str - -number_types = (numbers.Real, decimal.Decimal) - - -def _constant_time_compare(val1, val2): - """Return ``True`` if the two strings are equal, ``False`` - otherwise. - - The time taken is independent of the number of characters that - match. Do not use this function for anything else than comparision - with known length targets. - - This is should be implemented in C in order to get it completely - right. - - This is an alias of :func:`hmac.compare_digest` on Python>=2.7,3.3. - """ - len_eq = len(val1) == len(val2) - if len_eq: - result = 0 - left = val1 - else: - result = 1 - left = val2 - for x, y in izip(bytearray(left), bytearray(val2)): - result |= x ^ y - return result == 0 - - -# Starting with 2.7/3.3 the standard library has a c-implementation for -# constant time string compares. -constant_time_compare = getattr(hmac, "compare_digest", _constant_time_compare) diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/_json.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/_json.py deleted file mode 100644 index 426b36e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/_json.py +++ /dev/null @@ -1,18 +0,0 @@ -try: - import simplejson as json -except ImportError: - import json - - -class _CompactJSON(object): - """Wrapper around json module that strips whitespace.""" - - @staticmethod - def loads(payload): - return json.loads(payload) - - @staticmethod - def dumps(obj, **kwargs): - kwargs.setdefault("ensure_ascii", False) - kwargs.setdefault("separators", (",", ":")) - return json.dumps(obj, **kwargs) diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/encoding.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/encoding.py deleted file mode 100644 index 1e28969..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/encoding.py +++ /dev/null @@ -1,49 +0,0 @@ -import base64 -import string -import struct - -from ._compat import text_type -from .exc import BadData - - -def want_bytes(s, encoding="utf-8", errors="strict"): - if isinstance(s, text_type): - s = s.encode(encoding, errors) - return s - - -def base64_encode(string): - """Base64 encode a string of bytes or text. The resulting bytes are - safe to use in URLs. - """ - string = want_bytes(string) - return base64.urlsafe_b64encode(string).rstrip(b"=") - - -def base64_decode(string): - """Base64 decode a URL-safe string of bytes or text. The result is - bytes. - """ - string = want_bytes(string, encoding="ascii", errors="ignore") - string += b"=" * (-len(string) % 4) - - try: - return base64.urlsafe_b64decode(string) - except (TypeError, ValueError): - raise BadData("Invalid base64-encoded data") - - -# The alphabet used by base64.urlsafe_* -_base64_alphabet = (string.ascii_letters + string.digits + "-_=").encode("ascii") - -_int64_struct = struct.Struct(">Q") -_int_to_bytes = _int64_struct.pack -_bytes_to_int = _int64_struct.unpack - - -def int_to_bytes(num): - return _int_to_bytes(num).lstrip(b"\x00") - - -def bytes_to_int(bytestr): - return _bytes_to_int(bytestr.rjust(8, b"\x00"))[0] diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/exc.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/exc.py deleted file mode 100644 index 287d691..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/exc.py +++ /dev/null @@ -1,98 +0,0 @@ -from ._compat import PY2 -from ._compat import text_type - - -class BadData(Exception): - """Raised if bad data of any sort was encountered. This is the base - for all exceptions that itsdangerous defines. - - .. versionadded:: 0.15 - """ - - message = None - - def __init__(self, message): - super(BadData, self).__init__(self, message) - self.message = message - - def __str__(self): - return text_type(self.message) - - if PY2: - __unicode__ = __str__ - - def __str__(self): - return self.__unicode__().encode("utf-8") - - -class BadSignature(BadData): - """Raised if a signature does not match.""" - - def __init__(self, message, payload=None): - BadData.__init__(self, message) - - #: The payload that failed the signature test. In some - #: situations you might still want to inspect this, even if - #: you know it was tampered with. - #: - #: .. versionadded:: 0.14 - self.payload = payload - - -class BadTimeSignature(BadSignature): - """Raised if a time-based signature is invalid. This is a subclass - of :class:`BadSignature`. - """ - - def __init__(self, message, payload=None, date_signed=None): - BadSignature.__init__(self, message, payload) - - #: If the signature expired this exposes the date of when the - #: signature was created. This can be helpful in order to - #: tell the user how long a link has been gone stale. - #: - #: .. versionadded:: 0.14 - self.date_signed = date_signed - - -class SignatureExpired(BadTimeSignature): - """Raised if a signature timestamp is older than ``max_age``. This - is a subclass of :exc:`BadTimeSignature`. - """ - - -class BadHeader(BadSignature): - """Raised if a signed header is invalid in some form. This only - happens for serializers that have a header that goes with the - signature. - - .. versionadded:: 0.24 - """ - - def __init__(self, message, payload=None, header=None, original_error=None): - BadSignature.__init__(self, message, payload) - - #: If the header is actually available but just malformed it - #: might be stored here. - self.header = header - - #: If available, the error that indicates why the payload was - #: not valid. This might be ``None``. - self.original_error = original_error - - -class BadPayload(BadData): - """Raised if a payload is invalid. This could happen if the payload - is loaded despite an invalid signature, or if there is a mismatch - between the serializer and deserializer. The original exception - that occurred during loading is stored on as :attr:`original_error`. - - .. versionadded:: 0.15 - """ - - def __init__(self, message, original_error=None): - BadData.__init__(self, message) - - #: If available, the error that indicates why the payload was - #: not valid. This might be ``None``. - self.original_error = original_error diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/jws.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/jws.py deleted file mode 100644 index 92e9ec8..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/jws.py +++ /dev/null @@ -1,218 +0,0 @@ -import hashlib -import time -from datetime import datetime - -from ._compat import number_types -from ._json import _CompactJSON -from ._json import json -from .encoding import base64_decode -from .encoding import base64_encode -from .encoding import want_bytes -from .exc import BadData -from .exc import BadHeader -from .exc import BadPayload -from .exc import BadSignature -from .exc import SignatureExpired -from .serializer import Serializer -from .signer import HMACAlgorithm -from .signer import NoneAlgorithm - - -class JSONWebSignatureSerializer(Serializer): - """This serializer implements JSON Web Signature (JWS) support. Only - supports the JWS Compact Serialization. - """ - - jws_algorithms = { - "HS256": HMACAlgorithm(hashlib.sha256), - "HS384": HMACAlgorithm(hashlib.sha384), - "HS512": HMACAlgorithm(hashlib.sha512), - "none": NoneAlgorithm(), - } - - #: The default algorithm to use for signature generation - default_algorithm = "HS512" - - default_serializer = _CompactJSON - - def __init__( - self, - secret_key, - salt=None, - serializer=None, - serializer_kwargs=None, - signer=None, - signer_kwargs=None, - algorithm_name=None, - ): - Serializer.__init__( - self, - secret_key=secret_key, - salt=salt, - serializer=serializer, - serializer_kwargs=serializer_kwargs, - signer=signer, - signer_kwargs=signer_kwargs, - ) - if algorithm_name is None: - algorithm_name = self.default_algorithm - self.algorithm_name = algorithm_name - self.algorithm = self.make_algorithm(algorithm_name) - - def load_payload(self, payload, serializer=None, return_header=False): - payload = want_bytes(payload) - if b"." not in payload: - raise BadPayload('No "." found in value') - base64d_header, base64d_payload = payload.split(b".", 1) - try: - json_header = base64_decode(base64d_header) - except Exception as e: - raise BadHeader( - "Could not base64 decode the header because of an exception", - original_error=e, - ) - try: - json_payload = base64_decode(base64d_payload) - except Exception as e: - raise BadPayload( - "Could not base64 decode the payload because of an exception", - original_error=e, - ) - try: - header = Serializer.load_payload(self, json_header, serializer=json) - except BadData as e: - raise BadHeader( - "Could not unserialize header because it was malformed", - original_error=e, - ) - if not isinstance(header, dict): - raise BadHeader("Header payload is not a JSON object", header=header) - payload = Serializer.load_payload(self, json_payload, serializer=serializer) - if return_header: - return payload, header - return payload - - def dump_payload(self, header, obj): - base64d_header = base64_encode( - self.serializer.dumps(header, **self.serializer_kwargs) - ) - base64d_payload = base64_encode( - self.serializer.dumps(obj, **self.serializer_kwargs) - ) - return base64d_header + b"." + base64d_payload - - def make_algorithm(self, algorithm_name): - try: - return self.jws_algorithms[algorithm_name] - except KeyError: - raise NotImplementedError("Algorithm not supported") - - def make_signer(self, salt=None, algorithm=None): - if salt is None: - salt = self.salt - key_derivation = "none" if salt is None else None - if algorithm is None: - algorithm = self.algorithm - return self.signer( - self.secret_key, - salt=salt, - sep=".", - key_derivation=key_derivation, - algorithm=algorithm, - ) - - def make_header(self, header_fields): - header = header_fields.copy() if header_fields else {} - header["alg"] = self.algorithm_name - return header - - def dumps(self, obj, salt=None, header_fields=None): - """Like :meth:`.Serializer.dumps` but creates a JSON Web - Signature. It also allows for specifying additional fields to be - included in the JWS header. - """ - header = self.make_header(header_fields) - signer = self.make_signer(salt, self.algorithm) - return signer.sign(self.dump_payload(header, obj)) - - def loads(self, s, salt=None, return_header=False): - """Reverse of :meth:`dumps`. If requested via ``return_header`` - it will return a tuple of payload and header. - """ - payload, header = self.load_payload( - self.make_signer(salt, self.algorithm).unsign(want_bytes(s)), - return_header=True, - ) - if header.get("alg") != self.algorithm_name: - raise BadHeader("Algorithm mismatch", header=header, payload=payload) - if return_header: - return payload, header - return payload - - def loads_unsafe(self, s, salt=None, return_header=False): - kwargs = {"return_header": return_header} - return self._loads_unsafe_impl(s, salt, kwargs, kwargs) - - -class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer): - """Works like the regular :class:`JSONWebSignatureSerializer` but - also records the time of the signing and can be used to expire - signatures. - - JWS currently does not specify this behavior but it mentions a - possible extension like this in the spec. Expiry date is encoded - into the header similar to what's specified in `draft-ietf-oauth - -json-web-token `_. - """ - - DEFAULT_EXPIRES_IN = 3600 - - def __init__(self, secret_key, expires_in=None, **kwargs): - JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs) - if expires_in is None: - expires_in = self.DEFAULT_EXPIRES_IN - self.expires_in = expires_in - - def make_header(self, header_fields): - header = JSONWebSignatureSerializer.make_header(self, header_fields) - iat = self.now() - exp = iat + self.expires_in - header["iat"] = iat - header["exp"] = exp - return header - - def loads(self, s, salt=None, return_header=False): - payload, header = JSONWebSignatureSerializer.loads( - self, s, salt, return_header=True - ) - - if "exp" not in header: - raise BadSignature("Missing expiry date", payload=payload) - - int_date_error = BadHeader("Expiry date is not an IntDate", payload=payload) - try: - header["exp"] = int(header["exp"]) - except ValueError: - raise int_date_error - if header["exp"] < 0: - raise int_date_error - - if header["exp"] < self.now(): - raise SignatureExpired( - "Signature expired", - payload=payload, - date_signed=self.get_issue_date(header), - ) - - if return_header: - return payload, header - return payload - - def get_issue_date(self, header): - rv = header.get("iat") - if isinstance(rv, number_types): - return datetime.utcfromtimestamp(int(rv)) - - def now(self): - return int(time.time()) diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/serializer.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/serializer.py deleted file mode 100644 index 12c20f4..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/serializer.py +++ /dev/null @@ -1,233 +0,0 @@ -import hashlib - -from ._compat import text_type -from ._json import json -from .encoding import want_bytes -from .exc import BadPayload -from .exc import BadSignature -from .signer import Signer - - -def is_text_serializer(serializer): - """Checks whether a serializer generates text or binary.""" - return isinstance(serializer.dumps({}), text_type) - - -class Serializer(object): - """This class provides a serialization interface on top of the - signer. It provides a similar API to json/pickle and other modules - but is structured differently internally. If you want to change the - underlying implementation for parsing and loading you have to - override the :meth:`load_payload` and :meth:`dump_payload` - functions. - - This implementation uses simplejson if available for dumping and - loading and will fall back to the standard library's json module if - it's not available. - - You do not need to subclass this class in order to switch out or - customize the :class:`.Signer`. You can instead pass a different - class to the constructor as well as keyword arguments as a dict that - should be forwarded. - - .. code-block:: python - - s = Serializer(signer_kwargs={'key_derivation': 'hmac'}) - - You may want to upgrade the signing parameters without invalidating - existing signatures that are in use. Fallback signatures can be - given that will be tried if unsigning with the current signer fails. - - Fallback signers can be defined by providing a list of - ``fallback_signers``. Each item can be one of the following: a - signer class (which is instantiated with ``signer_kwargs``, - ``salt``, and ``secret_key``), a tuple - ``(signer_class, signer_kwargs)``, or a dict of ``signer_kwargs``. - - For example, this is a serializer that signs using SHA-512, but will - unsign using either SHA-512 or SHA1: - - .. code-block:: python - - s = Serializer( - signer_kwargs={"digest_method": hashlib.sha512}, - fallback_signers=[{"digest_method": hashlib.sha1}] - ) - - .. versionchanged:: 0.14: - The ``signer`` and ``signer_kwargs`` parameters were added to - the constructor. - - .. versionchanged:: 1.1.0: - Added support for ``fallback_signers`` and configured a default - SHA-512 fallback. This fallback is for users who used the yanked - 1.0.0 release which defaulted to SHA-512. - """ - - #: If a serializer module or class is not passed to the constructor - #: this one is picked up. This currently defaults to :mod:`json`. - default_serializer = json - - #: The default :class:`Signer` class that is being used by this - #: serializer. - #: - #: .. versionadded:: 0.14 - default_signer = Signer - - #: The default fallback signers. - default_fallback_signers = [{"digest_method": hashlib.sha512}] - - def __init__( - self, - secret_key, - salt=b"itsdangerous", - serializer=None, - serializer_kwargs=None, - signer=None, - signer_kwargs=None, - fallback_signers=None, - ): - self.secret_key = want_bytes(secret_key) - self.salt = want_bytes(salt) - if serializer is None: - serializer = self.default_serializer - self.serializer = serializer - self.is_text_serializer = is_text_serializer(serializer) - if signer is None: - signer = self.default_signer - self.signer = signer - self.signer_kwargs = signer_kwargs or {} - if fallback_signers is None: - fallback_signers = list(self.default_fallback_signers or ()) - self.fallback_signers = fallback_signers - self.serializer_kwargs = serializer_kwargs or {} - - def load_payload(self, payload, serializer=None): - """Loads the encoded object. This function raises - :class:`.BadPayload` if the payload is not valid. The - ``serializer`` parameter can be used to override the serializer - stored on the class. The encoded ``payload`` should always be - bytes. - """ - if serializer is None: - serializer = self.serializer - is_text = self.is_text_serializer - else: - is_text = is_text_serializer(serializer) - try: - if is_text: - payload = payload.decode("utf-8") - return serializer.loads(payload) - except Exception as e: - raise BadPayload( - "Could not load the payload because an exception" - " occurred on unserializing the data.", - original_error=e, - ) - - def dump_payload(self, obj): - """Dumps the encoded object. The return value is always bytes. - If the internal serializer returns text, the value will be - encoded as UTF-8. - """ - return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs)) - - def make_signer(self, salt=None): - """Creates a new instance of the signer to be used. The default - implementation uses the :class:`.Signer` base class. - """ - if salt is None: - salt = self.salt - return self.signer(self.secret_key, salt=salt, **self.signer_kwargs) - - def iter_unsigners(self, salt=None): - """Iterates over all signers to be tried for unsigning. Starts - with the configured signer, then constructs each signer - specified in ``fallback_signers``. - """ - if salt is None: - salt = self.salt - yield self.make_signer(salt) - for fallback in self.fallback_signers: - if type(fallback) is dict: - kwargs = fallback - fallback = self.signer - elif type(fallback) is tuple: - fallback, kwargs = fallback - else: - kwargs = self.signer_kwargs - yield fallback(self.secret_key, salt=salt, **kwargs) - - def dumps(self, obj, salt=None): - """Returns a signed string serialized with the internal - serializer. The return value can be either a byte or unicode - string depending on the format of the internal serializer. - """ - payload = want_bytes(self.dump_payload(obj)) - rv = self.make_signer(salt).sign(payload) - if self.is_text_serializer: - rv = rv.decode("utf-8") - return rv - - def dump(self, obj, f, salt=None): - """Like :meth:`dumps` but dumps into a file. The file handle has - to be compatible with what the internal serializer expects. - """ - f.write(self.dumps(obj, salt)) - - def loads(self, s, salt=None): - """Reverse of :meth:`dumps`. Raises :exc:`.BadSignature` if the - signature validation fails. - """ - s = want_bytes(s) - last_exception = None - for signer in self.iter_unsigners(salt): - try: - return self.load_payload(signer.unsign(s)) - except BadSignature as err: - last_exception = err - raise last_exception - - def load(self, f, salt=None): - """Like :meth:`loads` but loads from a file.""" - return self.loads(f.read(), salt) - - def loads_unsafe(self, s, salt=None): - """Like :meth:`loads` but without verifying the signature. This - is potentially very dangerous to use depending on how your - serializer works. The return value is ``(signature_valid, - payload)`` instead of just the payload. The first item will be a - boolean that indicates if the signature is valid. This function - never fails. - - Use it for debugging only and if you know that your serializer - module is not exploitable (for example, do not use it with a - pickle serializer). - - .. versionadded:: 0.15 - """ - return self._loads_unsafe_impl(s, salt) - - def _loads_unsafe_impl(self, s, salt, load_kwargs=None, load_payload_kwargs=None): - """Low level helper function to implement :meth:`loads_unsafe` - in serializer subclasses. - """ - try: - return True, self.loads(s, salt=salt, **(load_kwargs or {})) - except BadSignature as e: - if e.payload is None: - return False, None - try: - return ( - False, - self.load_payload(e.payload, **(load_payload_kwargs or {})), - ) - except BadPayload: - return False, None - - def load_unsafe(self, f, *args, **kwargs): - """Like :meth:`loads_unsafe` but loads from a file. - - .. versionadded:: 0.15 - """ - return self.loads_unsafe(f.read(), *args, **kwargs) diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/signer.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/signer.py deleted file mode 100644 index 6bddc03..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/signer.py +++ /dev/null @@ -1,179 +0,0 @@ -import hashlib -import hmac - -from ._compat import constant_time_compare -from .encoding import _base64_alphabet -from .encoding import base64_decode -from .encoding import base64_encode -from .encoding import want_bytes -from .exc import BadSignature - - -class SigningAlgorithm(object): - """Subclasses must implement :meth:`get_signature` to provide - signature generation functionality. - """ - - def get_signature(self, key, value): - """Returns the signature for the given key and value.""" - raise NotImplementedError() - - def verify_signature(self, key, value, sig): - """Verifies the given signature matches the expected - signature. - """ - return constant_time_compare(sig, self.get_signature(key, value)) - - -class NoneAlgorithm(SigningAlgorithm): - """Provides an algorithm that does not perform any signing and - returns an empty signature. - """ - - def get_signature(self, key, value): - return b"" - - -class HMACAlgorithm(SigningAlgorithm): - """Provides signature generation using HMACs.""" - - #: The digest method to use with the MAC algorithm. This defaults to - #: SHA1, but can be changed to any other function in the hashlib - #: module. - default_digest_method = staticmethod(hashlib.sha1) - - def __init__(self, digest_method=None): - if digest_method is None: - digest_method = self.default_digest_method - self.digest_method = digest_method - - def get_signature(self, key, value): - mac = hmac.new(key, msg=value, digestmod=self.digest_method) - return mac.digest() - - -class Signer(object): - """This class can sign and unsign bytes, validating the signature - provided. - - Salt can be used to namespace the hash, so that a signed string is - only valid for a given namespace. Leaving this at the default value - or re-using a salt value across different parts of your application - where the same signed value in one part can mean something different - in another part is a security risk. - - See :ref:`the-salt` for an example of what the salt is doing and how - you can utilize it. - - .. versionadded:: 0.14 - ``key_derivation`` and ``digest_method`` were added as arguments - to the class constructor. - - .. versionadded:: 0.18 - ``algorithm`` was added as an argument to the class constructor. - """ - - #: The digest method to use for the signer. This defaults to - #: SHA1 but can be changed to any other function in the hashlib - #: module. - #: - #: .. versionadded:: 0.14 - default_digest_method = staticmethod(hashlib.sha1) - - #: Controls how the key is derived. The default is Django-style - #: concatenation. Possible values are ``concat``, ``django-concat`` - #: and ``hmac``. This is used for deriving a key from the secret key - #: with an added salt. - #: - #: .. versionadded:: 0.14 - default_key_derivation = "django-concat" - - def __init__( - self, - secret_key, - salt=None, - sep=".", - key_derivation=None, - digest_method=None, - algorithm=None, - ): - self.secret_key = want_bytes(secret_key) - self.sep = want_bytes(sep) - if self.sep in _base64_alphabet: - raise ValueError( - "The given separator cannot be used because it may be" - " contained in the signature itself. Alphanumeric" - " characters and `-_=` must not be used." - ) - self.salt = "itsdangerous.Signer" if salt is None else salt - if key_derivation is None: - key_derivation = self.default_key_derivation - self.key_derivation = key_derivation - if digest_method is None: - digest_method = self.default_digest_method - self.digest_method = digest_method - if algorithm is None: - algorithm = HMACAlgorithm(self.digest_method) - self.algorithm = algorithm - - def derive_key(self): - """This method is called to derive the key. The default key - derivation choices can be overridden here. Key derivation is not - intended to be used as a security method to make a complex key - out of a short password. Instead you should use large random - secret keys. - """ - salt = want_bytes(self.salt) - if self.key_derivation == "concat": - return self.digest_method(salt + self.secret_key).digest() - elif self.key_derivation == "django-concat": - return self.digest_method(salt + b"signer" + self.secret_key).digest() - elif self.key_derivation == "hmac": - mac = hmac.new(self.secret_key, digestmod=self.digest_method) - mac.update(salt) - return mac.digest() - elif self.key_derivation == "none": - return self.secret_key - else: - raise TypeError("Unknown key derivation method") - - def get_signature(self, value): - """Returns the signature for the given value.""" - value = want_bytes(value) - key = self.derive_key() - sig = self.algorithm.get_signature(key, value) - return base64_encode(sig) - - def sign(self, value): - """Signs the given string.""" - return want_bytes(value) + want_bytes(self.sep) + self.get_signature(value) - - def verify_signature(self, value, sig): - """Verifies the signature for the given value.""" - key = self.derive_key() - try: - sig = base64_decode(sig) - except Exception: - return False - return self.algorithm.verify_signature(key, value, sig) - - def unsign(self, signed_value): - """Unsigns the given string.""" - signed_value = want_bytes(signed_value) - sep = want_bytes(self.sep) - if sep not in signed_value: - raise BadSignature("No %r found in value" % self.sep) - value, sig = signed_value.rsplit(sep, 1) - if self.verify_signature(value, sig): - return value - raise BadSignature("Signature %r does not match" % sig, payload=value) - - def validate(self, signed_value): - """Only validates the given signed value. Returns ``True`` if - the signature exists and is valid. - """ - try: - self.unsign(signed_value) - return True - except BadSignature: - return False diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/timed.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/timed.py deleted file mode 100644 index 4c117e4..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/timed.py +++ /dev/null @@ -1,147 +0,0 @@ -import time -from datetime import datetime - -from ._compat import text_type -from .encoding import base64_decode -from .encoding import base64_encode -from .encoding import bytes_to_int -from .encoding import int_to_bytes -from .encoding import want_bytes -from .exc import BadSignature -from .exc import BadTimeSignature -from .exc import SignatureExpired -from .serializer import Serializer -from .signer import Signer - - -class TimestampSigner(Signer): - """Works like the regular :class:`.Signer` but also records the time - of the signing and can be used to expire signatures. The - :meth:`unsign` method can raise :exc:`.SignatureExpired` if the - unsigning failed because the signature is expired. - """ - - def get_timestamp(self): - """Returns the current timestamp. The function must return an - integer. - """ - return int(time.time()) - - def timestamp_to_datetime(self, ts): - """Used to convert the timestamp from :meth:`get_timestamp` into - a datetime object. - """ - return datetime.utcfromtimestamp(ts) - - def sign(self, value): - """Signs the given string and also attaches time information.""" - value = want_bytes(value) - timestamp = base64_encode(int_to_bytes(self.get_timestamp())) - sep = want_bytes(self.sep) - value = value + sep + timestamp - return value + sep + self.get_signature(value) - - def unsign(self, value, max_age=None, return_timestamp=False): - """Works like the regular :meth:`.Signer.unsign` but can also - validate the time. See the base docstring of the class for - the general behavior. If ``return_timestamp`` is ``True`` the - timestamp of the signature will be returned as a naive - :class:`datetime.datetime` object in UTC. - """ - try: - result = Signer.unsign(self, value) - sig_error = None - except BadSignature as e: - sig_error = e - result = e.payload or b"" - sep = want_bytes(self.sep) - - # If there is no timestamp in the result there is something - # seriously wrong. In case there was a signature error, we raise - # that one directly, otherwise we have a weird situation in - # which we shouldn't have come except someone uses a time-based - # serializer on non-timestamp data, so catch that. - if sep not in result: - if sig_error: - raise sig_error - raise BadTimeSignature("timestamp missing", payload=result) - - value, timestamp = result.rsplit(sep, 1) - try: - timestamp = bytes_to_int(base64_decode(timestamp)) - except Exception: - timestamp = None - - # Signature is *not* okay. Raise a proper error now that we have - # split the value and the timestamp. - if sig_error is not None: - raise BadTimeSignature( - text_type(sig_error), payload=value, date_signed=timestamp - ) - - # Signature was okay but the timestamp is actually not there or - # malformed. Should not happen, but we handle it anyway. - if timestamp is None: - raise BadTimeSignature("Malformed timestamp", payload=value) - - # Check timestamp is not older than max_age - if max_age is not None: - age = self.get_timestamp() - timestamp - if age > max_age: - raise SignatureExpired( - "Signature age %s > %s seconds" % (age, max_age), - payload=value, - date_signed=self.timestamp_to_datetime(timestamp), - ) - - if return_timestamp: - return value, self.timestamp_to_datetime(timestamp) - return value - - def validate(self, signed_value, max_age=None): - """Only validates the given signed value. Returns ``True`` if - the signature exists and is valid.""" - try: - self.unsign(signed_value, max_age=max_age) - return True - except BadSignature: - return False - - -class TimedSerializer(Serializer): - """Uses :class:`TimestampSigner` instead of the default - :class:`.Signer`. - """ - - default_signer = TimestampSigner - - def loads(self, s, max_age=None, return_timestamp=False, salt=None): - """Reverse of :meth:`dumps`, raises :exc:`.BadSignature` if the - signature validation fails. If a ``max_age`` is provided it will - ensure the signature is not older than that time in seconds. In - case the signature is outdated, :exc:`.SignatureExpired` is - raised. All arguments are forwarded to the signer's - :meth:`~TimestampSigner.unsign` method. - """ - s = want_bytes(s) - last_exception = None - for signer in self.iter_unsigners(salt): - try: - base64d, timestamp = signer.unsign(s, max_age, return_timestamp=True) - payload = self.load_payload(base64d) - if return_timestamp: - return payload, timestamp - return payload - # If we get a signature expired it means we could read the - # signature but it's invalid. In that case we do not want to - # try the next signer. - except SignatureExpired: - raise - except BadSignature as err: - last_exception = err - raise last_exception - - def loads_unsafe(self, s, max_age=None, salt=None): - load_kwargs = {"max_age": max_age} - load_payload_kwargs = {} - return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs) diff --git a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/url_safe.py b/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/url_safe.py deleted file mode 100644 index fcaa011..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/itsdangerous/url_safe.py +++ /dev/null @@ -1,65 +0,0 @@ -import zlib - -from ._json import _CompactJSON -from .encoding import base64_decode -from .encoding import base64_encode -from .exc import BadPayload -from .serializer import Serializer -from .timed import TimedSerializer - - -class URLSafeSerializerMixin(object): - """Mixed in with a regular serializer it will attempt to zlib - compress the string to make it shorter if necessary. It will also - base64 encode the string so that it can safely be placed in a URL. - """ - - default_serializer = _CompactJSON - - def load_payload(self, payload, *args, **kwargs): - decompress = False - if payload.startswith(b"."): - payload = payload[1:] - decompress = True - try: - json = base64_decode(payload) - except Exception as e: - raise BadPayload( - "Could not base64 decode the payload because of an exception", - original_error=e, - ) - if decompress: - try: - json = zlib.decompress(json) - except Exception as e: - raise BadPayload( - "Could not zlib decompress the payload before decoding the payload", - original_error=e, - ) - return super(URLSafeSerializerMixin, self).load_payload(json, *args, **kwargs) - - def dump_payload(self, obj): - json = super(URLSafeSerializerMixin, self).dump_payload(obj) - is_compressed = False - compressed = zlib.compress(json) - if len(compressed) < (len(json) - 1): - json = compressed - is_compressed = True - base64d = base64_encode(json) - if is_compressed: - base64d = b"." + base64d - return base64d - - -class URLSafeSerializer(URLSafeSerializerMixin, Serializer): - """Works like :class:`.Serializer` but dumps and loads into a URL - safe string consisting of the upper and lowercase character of the - alphabet as well as ``'_'``, ``'-'`` and ``'.'``. - """ - - -class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer): - """Works like :class:`.TimedSerializer` but dumps and loads into a - URL safe string consisting of the upper and lowercase character of - the alphabet as well as ``'_'``, ``'-'`` and ``'.'``. - """ diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/__init__.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/__init__.py deleted file mode 100644 index 15e13b6..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/__init__.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2 - ~~~~~~ - - Jinja2 is a template engine written in pure Python. It provides a - Django inspired non-XML syntax but supports inline expressions and - an optional sandboxed environment. - - Nutshell - -------- - - Here a small example of a Jinja2 template:: - - {% extends 'base.html' %} - {% block title %}Memberlist{% endblock %} - {% block content %} - - {% endblock %} - - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -__docformat__ = 'restructuredtext en' -__version__ = '2.10.1' - -# high level interface -from jinja2.environment import Environment, Template - -# loaders -from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \ - DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \ - ModuleLoader - -# bytecode caches -from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \ - MemcachedBytecodeCache - -# undefined types -from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \ - make_logging_undefined - -# exceptions -from jinja2.exceptions import TemplateError, UndefinedError, \ - TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \ - TemplateAssertionError, TemplateRuntimeError - -# decorators and public utilities -from jinja2.filters import environmentfilter, contextfilter, \ - evalcontextfilter -from jinja2.utils import Markup, escape, clear_caches, \ - environmentfunction, evalcontextfunction, contextfunction, \ - is_undefined, select_autoescape - -__all__ = [ - 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader', - 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader', - 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache', - 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined', - 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound', - 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError', - 'TemplateRuntimeError', - 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape', - 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined', - 'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined', - 'select_autoescape', -] - - -def _patch_async(): - from jinja2.utils import have_async_gen - if have_async_gen: - from jinja2.asyncsupport import patch_all - patch_all() - - -_patch_async() -del _patch_async diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/_compat.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/_compat.py deleted file mode 100644 index 61d8530..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/_compat.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2._compat - ~~~~~~~~~~~~~~ - - Some py2/py3 compatibility support based on a stripped down - version of six so we don't have to depend on a specific version - of it. - - :copyright: Copyright 2013 by the Jinja team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" -import sys - -PY2 = sys.version_info[0] == 2 -PYPY = hasattr(sys, 'pypy_translation_info') -_identity = lambda x: x - - -if not PY2: - unichr = chr - range_type = range - text_type = str - string_types = (str,) - integer_types = (int,) - - iterkeys = lambda d: iter(d.keys()) - itervalues = lambda d: iter(d.values()) - iteritems = lambda d: iter(d.items()) - - import pickle - from io import BytesIO, StringIO - NativeStringIO = StringIO - - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - ifilter = filter - imap = map - izip = zip - intern = sys.intern - - implements_iterator = _identity - implements_to_string = _identity - encode_filename = _identity - -else: - unichr = unichr - text_type = unicode - range_type = xrange - string_types = (str, unicode) - integer_types = (int, long) - - iterkeys = lambda d: d.iterkeys() - itervalues = lambda d: d.itervalues() - iteritems = lambda d: d.iteritems() - - import cPickle as pickle - from cStringIO import StringIO as BytesIO, StringIO - NativeStringIO = BytesIO - - exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') - - from itertools import imap, izip, ifilter - intern = intern - - def implements_iterator(cls): - cls.next = cls.__next__ - del cls.__next__ - return cls - - def implements_to_string(cls): - cls.__unicode__ = cls.__str__ - cls.__str__ = lambda x: x.__unicode__().encode('utf-8') - return cls - - def encode_filename(filename): - if isinstance(filename, unicode): - return filename.encode('utf-8') - return filename - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a - # dummy metaclass for one level of class instantiation that replaces - # itself with the actual metaclass. - class metaclass(type): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -try: - from urllib.parse import quote_from_bytes as url_quote -except ImportError: - from urllib import quote as url_quote diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/_identifier.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/_identifier.py deleted file mode 100644 index 2eac35d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/_identifier.py +++ /dev/null @@ -1,2 +0,0 @@ -# generated by scripts/generate_identifier_pattern.py -pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯' diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/asyncfilters.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/asyncfilters.py deleted file mode 100644 index 5c1f46d..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/asyncfilters.py +++ /dev/null @@ -1,146 +0,0 @@ -from functools import wraps - -from jinja2.asyncsupport import auto_aiter -from jinja2 import filters - - -async def auto_to_seq(value): - seq = [] - if hasattr(value, '__aiter__'): - async for item in value: - seq.append(item) - else: - for item in value: - seq.append(item) - return seq - - -async def async_select_or_reject(args, kwargs, modfunc, lookup_attr): - seq, func = filters.prepare_select_or_reject( - args, kwargs, modfunc, lookup_attr) - if seq: - async for item in auto_aiter(seq): - if func(item): - yield item - - -def dualfilter(normal_filter, async_filter): - wrap_evalctx = False - if getattr(normal_filter, 'environmentfilter', False): - is_async = lambda args: args[0].is_async - wrap_evalctx = False - else: - if not getattr(normal_filter, 'evalcontextfilter', False) and \ - not getattr(normal_filter, 'contextfilter', False): - wrap_evalctx = True - is_async = lambda args: args[0].environment.is_async - - @wraps(normal_filter) - def wrapper(*args, **kwargs): - b = is_async(args) - if wrap_evalctx: - args = args[1:] - if b: - return async_filter(*args, **kwargs) - return normal_filter(*args, **kwargs) - - if wrap_evalctx: - wrapper.evalcontextfilter = True - - wrapper.asyncfiltervariant = True - - return wrapper - - -def asyncfiltervariant(original): - def decorator(f): - return dualfilter(original, f) - return decorator - - -@asyncfiltervariant(filters.do_first) -async def do_first(environment, seq): - try: - return await auto_aiter(seq).__anext__() - except StopAsyncIteration: - return environment.undefined('No first item, sequence was empty.') - - -@asyncfiltervariant(filters.do_groupby) -async def do_groupby(environment, value, attribute): - expr = filters.make_attrgetter(environment, attribute) - return [filters._GroupTuple(key, await auto_to_seq(values)) - for key, values in filters.groupby(sorted( - await auto_to_seq(value), key=expr), expr)] - - -@asyncfiltervariant(filters.do_join) -async def do_join(eval_ctx, value, d=u'', attribute=None): - return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute) - - -@asyncfiltervariant(filters.do_list) -async def do_list(value): - return await auto_to_seq(value) - - -@asyncfiltervariant(filters.do_reject) -async def do_reject(*args, **kwargs): - return async_select_or_reject(args, kwargs, lambda x: not x, False) - - -@asyncfiltervariant(filters.do_rejectattr) -async def do_rejectattr(*args, **kwargs): - return async_select_or_reject(args, kwargs, lambda x: not x, True) - - -@asyncfiltervariant(filters.do_select) -async def do_select(*args, **kwargs): - return async_select_or_reject(args, kwargs, lambda x: x, False) - - -@asyncfiltervariant(filters.do_selectattr) -async def do_selectattr(*args, **kwargs): - return async_select_or_reject(args, kwargs, lambda x: x, True) - - -@asyncfiltervariant(filters.do_map) -async def do_map(*args, **kwargs): - seq, func = filters.prepare_map(args, kwargs) - if seq: - async for item in auto_aiter(seq): - yield func(item) - - -@asyncfiltervariant(filters.do_sum) -async def do_sum(environment, iterable, attribute=None, start=0): - rv = start - if attribute is not None: - func = filters.make_attrgetter(environment, attribute) - else: - func = lambda x: x - async for item in auto_aiter(iterable): - rv += func(item) - return rv - - -@asyncfiltervariant(filters.do_slice) -async def do_slice(value, slices, fill_with=None): - return filters.do_slice(await auto_to_seq(value), slices, fill_with) - - -ASYNC_FILTERS = { - 'first': do_first, - 'groupby': do_groupby, - 'join': do_join, - 'list': do_list, - # we intentionally do not support do_last because that would be - # ridiculous - 'reject': do_reject, - 'rejectattr': do_rejectattr, - 'map': do_map, - 'select': do_select, - 'selectattr': do_selectattr, - 'sum': do_sum, - 'slice': do_slice, -} diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/asyncsupport.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/asyncsupport.py deleted file mode 100644 index b1e7b5c..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/asyncsupport.py +++ /dev/null @@ -1,256 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2.asyncsupport - ~~~~~~~~~~~~~~~~~~~ - - Has all the code for async support which is implemented as a patch - for supported Python versions. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -import sys -import asyncio -import inspect -from functools import update_wrapper - -from jinja2.utils import concat, internalcode, Markup -from jinja2.environment import TemplateModule -from jinja2.runtime import LoopContextBase, _last_iteration - - -async def concat_async(async_gen): - rv = [] - async def collect(): - async for event in async_gen: - rv.append(event) - await collect() - return concat(rv) - - -async def generate_async(self, *args, **kwargs): - vars = dict(*args, **kwargs) - try: - async for event in self.root_render_func(self.new_context(vars)): - yield event - except Exception: - exc_info = sys.exc_info() - else: - return - yield self.environment.handle_exception(exc_info, True) - - -def wrap_generate_func(original_generate): - def _convert_generator(self, loop, args, kwargs): - async_gen = self.generate_async(*args, **kwargs) - try: - while 1: - yield loop.run_until_complete(async_gen.__anext__()) - except StopAsyncIteration: - pass - def generate(self, *args, **kwargs): - if not self.environment.is_async: - return original_generate(self, *args, **kwargs) - return _convert_generator(self, asyncio.get_event_loop(), args, kwargs) - return update_wrapper(generate, original_generate) - - -async def render_async(self, *args, **kwargs): - if not self.environment.is_async: - raise RuntimeError('The environment was not created with async mode ' - 'enabled.') - - vars = dict(*args, **kwargs) - ctx = self.new_context(vars) - - try: - return await concat_async(self.root_render_func(ctx)) - except Exception: - exc_info = sys.exc_info() - return self.environment.handle_exception(exc_info, True) - - -def wrap_render_func(original_render): - def render(self, *args, **kwargs): - if not self.environment.is_async: - return original_render(self, *args, **kwargs) - loop = asyncio.get_event_loop() - return loop.run_until_complete(self.render_async(*args, **kwargs)) - return update_wrapper(render, original_render) - - -def wrap_block_reference_call(original_call): - @internalcode - async def async_call(self): - rv = await concat_async(self._stack[self._depth](self._context)) - if self._context.eval_ctx.autoescape: - rv = Markup(rv) - return rv - - @internalcode - def __call__(self): - if not self._context.environment.is_async: - return original_call(self) - return async_call(self) - - return update_wrapper(__call__, original_call) - - -def wrap_macro_invoke(original_invoke): - @internalcode - async def async_invoke(self, arguments, autoescape): - rv = await self._func(*arguments) - if autoescape: - rv = Markup(rv) - return rv - - @internalcode - def _invoke(self, arguments, autoescape): - if not self._environment.is_async: - return original_invoke(self, arguments, autoescape) - return async_invoke(self, arguments, autoescape) - return update_wrapper(_invoke, original_invoke) - - -@internalcode -async def get_default_module_async(self): - if self._module is not None: - return self._module - self._module = rv = await self.make_module_async() - return rv - - -def wrap_default_module(original_default_module): - @internalcode - def _get_default_module(self): - if self.environment.is_async: - raise RuntimeError('Template module attribute is unavailable ' - 'in async mode') - return original_default_module(self) - return _get_default_module - - -async def make_module_async(self, vars=None, shared=False, locals=None): - context = self.new_context(vars, shared, locals) - body_stream = [] - async for item in self.root_render_func(context): - body_stream.append(item) - return TemplateModule(self, context, body_stream) - - -def patch_template(): - from jinja2 import Template - Template.generate = wrap_generate_func(Template.generate) - Template.generate_async = update_wrapper( - generate_async, Template.generate_async) - Template.render_async = update_wrapper( - render_async, Template.render_async) - Template.render = wrap_render_func(Template.render) - Template._get_default_module = wrap_default_module( - Template._get_default_module) - Template._get_default_module_async = get_default_module_async - Template.make_module_async = update_wrapper( - make_module_async, Template.make_module_async) - - -def patch_runtime(): - from jinja2.runtime import BlockReference, Macro - BlockReference.__call__ = wrap_block_reference_call( - BlockReference.__call__) - Macro._invoke = wrap_macro_invoke(Macro._invoke) - - -def patch_filters(): - from jinja2.filters import FILTERS - from jinja2.asyncfilters import ASYNC_FILTERS - FILTERS.update(ASYNC_FILTERS) - - -def patch_all(): - patch_template() - patch_runtime() - patch_filters() - - -async def auto_await(value): - if inspect.isawaitable(value): - return await value - return value - - -async def auto_aiter(iterable): - if hasattr(iterable, '__aiter__'): - async for item in iterable: - yield item - return - for item in iterable: - yield item - - -class AsyncLoopContext(LoopContextBase): - - def __init__(self, async_iterator, undefined, after, length, recurse=None, - depth0=0): - LoopContextBase.__init__(self, undefined, recurse, depth0) - self._async_iterator = async_iterator - self._after = after - self._length = length - - @property - def length(self): - if self._length is None: - raise TypeError('Loop length for some iterators cannot be ' - 'lazily calculated in async mode') - return self._length - - def __aiter__(self): - return AsyncLoopContextIterator(self) - - -class AsyncLoopContextIterator(object): - __slots__ = ('context',) - - def __init__(self, context): - self.context = context - - def __aiter__(self): - return self - - async def __anext__(self): - ctx = self.context - ctx.index0 += 1 - if ctx._after is _last_iteration: - raise StopAsyncIteration() - ctx._before = ctx._current - ctx._current = ctx._after - try: - ctx._after = await ctx._async_iterator.__anext__() - except StopAsyncIteration: - ctx._after = _last_iteration - return ctx._current, ctx - - -async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0): - # Length is more complicated and less efficient in async mode. The - # reason for this is that we cannot know if length will be used - # upfront but because length is a property we cannot lazily execute it - # later. This means that we need to buffer it up and measure :( - # - # We however only do this for actual iterators, not for async - # iterators as blocking here does not seem like the best idea in the - # world. - try: - length = len(iterable) - except (TypeError, AttributeError): - if not hasattr(iterable, '__aiter__'): - iterable = tuple(iterable) - length = len(iterable) - else: - length = None - async_iterator = auto_aiter(iterable) - try: - after = await async_iterator.__anext__() - except StopAsyncIteration: - after = _last_iteration - return AsyncLoopContext(async_iterator, undefined, after, length, recurse, - depth0) diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/bccache.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/bccache.py deleted file mode 100644 index 080e527..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/bccache.py +++ /dev/null @@ -1,362 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2.bccache - ~~~~~~~~~~~~~~ - - This module implements the bytecode cache system Jinja is optionally - using. This is useful if you have very complex template situations and - the compiliation of all those templates slow down your application too - much. - - Situations where this is useful are often forking web applications that - are initialized on the first request. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD. -""" -from os import path, listdir -import os -import sys -import stat -import errno -import marshal -import tempfile -import fnmatch -from hashlib import sha1 -from jinja2.utils import open_if_exists -from jinja2._compat import BytesIO, pickle, PY2, text_type - - -# marshal works better on 3.x, one hack less required -if not PY2: - marshal_dump = marshal.dump - marshal_load = marshal.load -else: - - def marshal_dump(code, f): - if isinstance(f, file): - marshal.dump(code, f) - else: - f.write(marshal.dumps(code)) - - def marshal_load(f): - if isinstance(f, file): - return marshal.load(f) - return marshal.loads(f.read()) - - -bc_version = 3 - -# magic version used to only change with new jinja versions. With 2.6 -# we change this to also take Python version changes into account. The -# reason for this is that Python tends to segfault if fed earlier bytecode -# versions because someone thought it would be a good idea to reuse opcodes -# or make Python incompatible with earlier versions. -bc_magic = 'j2'.encode('ascii') + \ - pickle.dumps(bc_version, 2) + \ - pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1]) - - -class Bucket(object): - """Buckets are used to store the bytecode for one template. It's created - and initialized by the bytecode cache and passed to the loading functions. - - The buckets get an internal checksum from the cache assigned and use this - to automatically reject outdated cache material. Individual bytecode - cache subclasses don't have to care about cache invalidation. - """ - - def __init__(self, environment, key, checksum): - self.environment = environment - self.key = key - self.checksum = checksum - self.reset() - - def reset(self): - """Resets the bucket (unloads the bytecode).""" - self.code = None - - def load_bytecode(self, f): - """Loads bytecode from a file or file like object.""" - # make sure the magic header is correct - magic = f.read(len(bc_magic)) - if magic != bc_magic: - self.reset() - return - # the source code of the file changed, we need to reload - checksum = pickle.load(f) - if self.checksum != checksum: - self.reset() - return - # if marshal_load fails then we need to reload - try: - self.code = marshal_load(f) - except (EOFError, ValueError, TypeError): - self.reset() - return - - def write_bytecode(self, f): - """Dump the bytecode into the file or file like object passed.""" - if self.code is None: - raise TypeError('can\'t write empty bucket') - f.write(bc_magic) - pickle.dump(self.checksum, f, 2) - marshal_dump(self.code, f) - - def bytecode_from_string(self, string): - """Load bytecode from a string.""" - self.load_bytecode(BytesIO(string)) - - def bytecode_to_string(self): - """Return the bytecode as string.""" - out = BytesIO() - self.write_bytecode(out) - return out.getvalue() - - -class BytecodeCache(object): - """To implement your own bytecode cache you have to subclass this class - and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of - these methods are passed a :class:`~jinja2.bccache.Bucket`. - - A very basic bytecode cache that saves the bytecode on the file system:: - - from os import path - - class MyCache(BytecodeCache): - - def __init__(self, directory): - self.directory = directory - - def load_bytecode(self, bucket): - filename = path.join(self.directory, bucket.key) - if path.exists(filename): - with open(filename, 'rb') as f: - bucket.load_bytecode(f) - - def dump_bytecode(self, bucket): - filename = path.join(self.directory, bucket.key) - with open(filename, 'wb') as f: - bucket.write_bytecode(f) - - A more advanced version of a filesystem based bytecode cache is part of - Jinja2. - """ - - def load_bytecode(self, bucket): - """Subclasses have to override this method to load bytecode into a - bucket. If they are not able to find code in the cache for the - bucket, it must not do anything. - """ - raise NotImplementedError() - - def dump_bytecode(self, bucket): - """Subclasses have to override this method to write the bytecode - from a bucket back to the cache. If it unable to do so it must not - fail silently but raise an exception. - """ - raise NotImplementedError() - - def clear(self): - """Clears the cache. This method is not used by Jinja2 but should be - implemented to allow applications to clear the bytecode cache used - by a particular environment. - """ - - def get_cache_key(self, name, filename=None): - """Returns the unique hash key for this template name.""" - hash = sha1(name.encode('utf-8')) - if filename is not None: - filename = '|' + filename - if isinstance(filename, text_type): - filename = filename.encode('utf-8') - hash.update(filename) - return hash.hexdigest() - - def get_source_checksum(self, source): - """Returns a checksum for the source.""" - return sha1(source.encode('utf-8')).hexdigest() - - def get_bucket(self, environment, name, filename, source): - """Return a cache bucket for the given template. All arguments are - mandatory but filename may be `None`. - """ - key = self.get_cache_key(name, filename) - checksum = self.get_source_checksum(source) - bucket = Bucket(environment, key, checksum) - self.load_bytecode(bucket) - return bucket - - def set_bucket(self, bucket): - """Put the bucket into the cache.""" - self.dump_bytecode(bucket) - - -class FileSystemBytecodeCache(BytecodeCache): - """A bytecode cache that stores bytecode on the filesystem. It accepts - two arguments: The directory where the cache items are stored and a - pattern string that is used to build the filename. - - If no directory is specified a default cache directory is selected. On - Windows the user's temp directory is used, on UNIX systems a directory - is created for the user in the system temp directory. - - The pattern can be used to have multiple separate caches operate on the - same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` - is replaced with the cache key. - - >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') - - This bytecode cache supports clearing of the cache using the clear method. - """ - - def __init__(self, directory=None, pattern='__jinja2_%s.cache'): - if directory is None: - directory = self._get_default_cache_dir() - self.directory = directory - self.pattern = pattern - - def _get_default_cache_dir(self): - def _unsafe_dir(): - raise RuntimeError('Cannot determine safe temp directory. You ' - 'need to explicitly provide one.') - - tmpdir = tempfile.gettempdir() - - # On windows the temporary directory is used specific unless - # explicitly forced otherwise. We can just use that. - if os.name == 'nt': - return tmpdir - if not hasattr(os, 'getuid'): - _unsafe_dir() - - dirname = '_jinja2-cache-%d' % os.getuid() - actual_dir = os.path.join(tmpdir, dirname) - - try: - os.mkdir(actual_dir, stat.S_IRWXU) - except OSError as e: - if e.errno != errno.EEXIST: - raise - try: - os.chmod(actual_dir, stat.S_IRWXU) - actual_dir_stat = os.lstat(actual_dir) - if actual_dir_stat.st_uid != os.getuid() \ - or not stat.S_ISDIR(actual_dir_stat.st_mode) \ - or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU: - _unsafe_dir() - except OSError as e: - if e.errno != errno.EEXIST: - raise - - actual_dir_stat = os.lstat(actual_dir) - if actual_dir_stat.st_uid != os.getuid() \ - or not stat.S_ISDIR(actual_dir_stat.st_mode) \ - or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU: - _unsafe_dir() - - return actual_dir - - def _get_cache_filename(self, bucket): - return path.join(self.directory, self.pattern % bucket.key) - - def load_bytecode(self, bucket): - f = open_if_exists(self._get_cache_filename(bucket), 'rb') - if f is not None: - try: - bucket.load_bytecode(f) - finally: - f.close() - - def dump_bytecode(self, bucket): - f = open(self._get_cache_filename(bucket), 'wb') - try: - bucket.write_bytecode(f) - finally: - f.close() - - def clear(self): - # imported lazily here because google app-engine doesn't support - # write access on the file system and the function does not exist - # normally. - from os import remove - files = fnmatch.filter(listdir(self.directory), self.pattern % '*') - for filename in files: - try: - remove(path.join(self.directory, filename)) - except OSError: - pass - - -class MemcachedBytecodeCache(BytecodeCache): - """This class implements a bytecode cache that uses a memcache cache for - storing the information. It does not enforce a specific memcache library - (tummy's memcache or cmemcache) but will accept any class that provides - the minimal interface required. - - Libraries compatible with this class: - - - `werkzeug `_.contrib.cache - - `python-memcached `_ - - `cmemcache `_ - - (Unfortunately the django cache interface is not compatible because it - does not support storing binary data, only unicode. You can however pass - the underlying cache client to the bytecode cache which is available - as `django.core.cache.cache._client`.) - - The minimal interface for the client passed to the constructor is this: - - .. class:: MinimalClientInterface - - .. method:: set(key, value[, timeout]) - - Stores the bytecode in the cache. `value` is a string and - `timeout` the timeout of the key. If timeout is not provided - a default timeout or no timeout should be assumed, if it's - provided it's an integer with the number of seconds the cache - item should exist. - - .. method:: get(key) - - Returns the value for the cache key. If the item does not - exist in the cache the return value must be `None`. - - The other arguments to the constructor are the prefix for all keys that - is added before the actual cache key and the timeout for the bytecode in - the cache system. We recommend a high (or no) timeout. - - This bytecode cache does not support clearing of used items in the cache. - The clear method is a no-operation function. - - .. versionadded:: 2.7 - Added support for ignoring memcache errors through the - `ignore_memcache_errors` parameter. - """ - - def __init__(self, client, prefix='jinja2/bytecode/', timeout=None, - ignore_memcache_errors=True): - self.client = client - self.prefix = prefix - self.timeout = timeout - self.ignore_memcache_errors = ignore_memcache_errors - - def load_bytecode(self, bucket): - try: - code = self.client.get(self.prefix + bucket.key) - except Exception: - if not self.ignore_memcache_errors: - raise - code = None - if code is not None: - bucket.bytecode_from_string(code) - - def dump_bytecode(self, bucket): - args = (self.prefix + bucket.key, bucket.bytecode_to_string()) - if self.timeout is not None: - args += (self.timeout,) - try: - self.client.set(*args) - except Exception: - if not self.ignore_memcache_errors: - raise diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/compiler.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/compiler.py deleted file mode 100644 index d534a82..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/compiler.py +++ /dev/null @@ -1,1721 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2.compiler - ~~~~~~~~~~~~~~~ - - Compiles nodes into python code. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -from itertools import chain -from copy import deepcopy -from keyword import iskeyword as is_python_keyword -from functools import update_wrapper -from jinja2 import nodes -from jinja2.nodes import EvalContext -from jinja2.visitor import NodeVisitor -from jinja2.optimizer import Optimizer -from jinja2.exceptions import TemplateAssertionError -from jinja2.utils import Markup, concat, escape -from jinja2._compat import range_type, text_type, string_types, \ - iteritems, NativeStringIO, imap, izip -from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \ - VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED - - -operators = { - 'eq': '==', - 'ne': '!=', - 'gt': '>', - 'gteq': '>=', - 'lt': '<', - 'lteq': '<=', - 'in': 'in', - 'notin': 'not in' -} - -# what method to iterate over items do we want to use for dict iteration -# in generated code? on 2.x let's go with iteritems, on 3.x with items -if hasattr(dict, 'iteritems'): - dict_item_iter = 'iteritems' -else: - dict_item_iter = 'items' - -code_features = ['division'] - -# does this python version support generator stops? (PEP 0479) -try: - exec('from __future__ import generator_stop') - code_features.append('generator_stop') -except SyntaxError: - pass - -# does this python version support yield from? -try: - exec('def f(): yield from x()') -except SyntaxError: - supports_yield_from = False -else: - supports_yield_from = True - - -def optimizeconst(f): - def new_func(self, node, frame, **kwargs): - # Only optimize if the frame is not volatile - if self.optimized and not frame.eval_ctx.volatile: - new_node = self.optimizer.visit(node, frame.eval_ctx) - if new_node != node: - return self.visit(new_node, frame) - return f(self, node, frame, **kwargs) - return update_wrapper(new_func, f) - - -def generate(node, environment, name, filename, stream=None, - defer_init=False, optimized=True): - """Generate the python source for a node tree.""" - if not isinstance(node, nodes.Template): - raise TypeError('Can\'t compile non template nodes') - generator = environment.code_generator_class(environment, name, filename, - stream, defer_init, - optimized) - generator.visit(node) - if stream is None: - return generator.stream.getvalue() - - -def has_safe_repr(value): - """Does the node have a safe representation?""" - if value is None or value is NotImplemented or value is Ellipsis: - return True - if type(value) in (bool, int, float, complex, range_type, Markup) + string_types: - return True - if type(value) in (tuple, list, set, frozenset): - for item in value: - if not has_safe_repr(item): - return False - return True - elif type(value) is dict: - for key, value in iteritems(value): - if not has_safe_repr(key): - return False - if not has_safe_repr(value): - return False - return True - return False - - -def find_undeclared(nodes, names): - """Check if the names passed are accessed undeclared. The return value - is a set of all the undeclared names from the sequence of names found. - """ - visitor = UndeclaredNameVisitor(names) - try: - for node in nodes: - visitor.visit(node) - except VisitorExit: - pass - return visitor.undeclared - - -class MacroRef(object): - - def __init__(self, node): - self.node = node - self.accesses_caller = False - self.accesses_kwargs = False - self.accesses_varargs = False - - -class Frame(object): - """Holds compile time information for us.""" - - def __init__(self, eval_ctx, parent=None, level=None): - self.eval_ctx = eval_ctx - self.symbols = Symbols(parent and parent.symbols or None, - level=level) - - # a toplevel frame is the root + soft frames such as if conditions. - self.toplevel = False - - # the root frame is basically just the outermost frame, so no if - # conditions. This information is used to optimize inheritance - # situations. - self.rootlevel = False - - # in some dynamic inheritance situations the compiler needs to add - # write tests around output statements. - self.require_output_check = parent and parent.require_output_check - - # inside some tags we are using a buffer rather than yield statements. - # this for example affects {% filter %} or {% macro %}. If a frame - # is buffered this variable points to the name of the list used as - # buffer. - self.buffer = None - - # the name of the block we're in, otherwise None. - self.block = parent and parent.block or None - - # the parent of this frame - self.parent = parent - - if parent is not None: - self.buffer = parent.buffer - - def copy(self): - """Create a copy of the current one.""" - rv = object.__new__(self.__class__) - rv.__dict__.update(self.__dict__) - rv.symbols = self.symbols.copy() - return rv - - def inner(self, isolated=False): - """Return an inner frame.""" - if isolated: - return Frame(self.eval_ctx, level=self.symbols.level + 1) - return Frame(self.eval_ctx, self) - - def soft(self): - """Return a soft frame. A soft frame may not be modified as - standalone thing as it shares the resources with the frame it - was created of, but it's not a rootlevel frame any longer. - - This is only used to implement if-statements. - """ - rv = self.copy() - rv.rootlevel = False - return rv - - __copy__ = copy - - -class VisitorExit(RuntimeError): - """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" - - -class DependencyFinderVisitor(NodeVisitor): - """A visitor that collects filter and test calls.""" - - def __init__(self): - self.filters = set() - self.tests = set() - - def visit_Filter(self, node): - self.generic_visit(node) - self.filters.add(node.name) - - def visit_Test(self, node): - self.generic_visit(node) - self.tests.add(node.name) - - def visit_Block(self, node): - """Stop visiting at blocks.""" - - -class UndeclaredNameVisitor(NodeVisitor): - """A visitor that checks if a name is accessed without being - declared. This is different from the frame visitor as it will - not stop at closure frames. - """ - - def __init__(self, names): - self.names = set(names) - self.undeclared = set() - - def visit_Name(self, node): - if node.ctx == 'load' and node.name in self.names: - self.undeclared.add(node.name) - if self.undeclared == self.names: - raise VisitorExit() - else: - self.names.discard(node.name) - - def visit_Block(self, node): - """Stop visiting a blocks.""" - - -class CompilerExit(Exception): - """Raised if the compiler encountered a situation where it just - doesn't make sense to further process the code. Any block that - raises such an exception is not further processed. - """ - - -class CodeGenerator(NodeVisitor): - - def __init__(self, environment, name, filename, stream=None, - defer_init=False, optimized=True): - if stream is None: - stream = NativeStringIO() - self.environment = environment - self.name = name - self.filename = filename - self.stream = stream - self.created_block_context = False - self.defer_init = defer_init - self.optimized = optimized - if optimized: - self.optimizer = Optimizer(environment) - - # aliases for imports - self.import_aliases = {} - - # a registry for all blocks. Because blocks are moved out - # into the global python scope they are registered here - self.blocks = {} - - # the number of extends statements so far - self.extends_so_far = 0 - - # some templates have a rootlevel extends. In this case we - # can safely assume that we're a child template and do some - # more optimizations. - self.has_known_extends = False - - # the current line number - self.code_lineno = 1 - - # registry of all filters and tests (global, not block local) - self.tests = {} - self.filters = {} - - # the debug information - self.debug_info = [] - self._write_debug_info = None - - # the number of new lines before the next write() - self._new_lines = 0 - - # the line number of the last written statement - self._last_line = 0 - - # true if nothing was written so far. - self._first_write = True - - # used by the `temporary_identifier` method to get new - # unique, temporary identifier - self._last_identifier = 0 - - # the current indentation - self._indentation = 0 - - # Tracks toplevel assignments - self._assign_stack = [] - - # Tracks parameter definition blocks - self._param_def_block = [] - - # Tracks the current context. - self._context_reference_stack = ['context'] - - # -- Various compilation helpers - - def fail(self, msg, lineno): - """Fail with a :exc:`TemplateAssertionError`.""" - raise TemplateAssertionError(msg, lineno, self.name, self.filename) - - def temporary_identifier(self): - """Get a new unique identifier.""" - self._last_identifier += 1 - return 't_%d' % self._last_identifier - - def buffer(self, frame): - """Enable buffering for the frame from that point onwards.""" - frame.buffer = self.temporary_identifier() - self.writeline('%s = []' % frame.buffer) - - def return_buffer_contents(self, frame, force_unescaped=False): - """Return the buffer contents of the frame.""" - if not force_unescaped: - if frame.eval_ctx.volatile: - self.writeline('if context.eval_ctx.autoescape:') - self.indent() - self.writeline('return Markup(concat(%s))' % frame.buffer) - self.outdent() - self.writeline('else:') - self.indent() - self.writeline('return concat(%s)' % frame.buffer) - self.outdent() - return - elif frame.eval_ctx.autoescape: - self.writeline('return Markup(concat(%s))' % frame.buffer) - return - self.writeline('return concat(%s)' % frame.buffer) - - def indent(self): - """Indent by one.""" - self._indentation += 1 - - def outdent(self, step=1): - """Outdent by step.""" - self._indentation -= step - - def start_write(self, frame, node=None): - """Yield or write into the frame buffer.""" - if frame.buffer is None: - self.writeline('yield ', node) - else: - self.writeline('%s.append(' % frame.buffer, node) - - def end_write(self, frame): - """End the writing process started by `start_write`.""" - if frame.buffer is not None: - self.write(')') - - def simple_write(self, s, frame, node=None): - """Simple shortcut for start_write + write + end_write.""" - self.start_write(frame, node) - self.write(s) - self.end_write(frame) - - def blockvisit(self, nodes, frame): - """Visit a list of nodes as block in a frame. If the current frame - is no buffer a dummy ``if 0: yield None`` is written automatically. - """ - try: - self.writeline('pass') - for node in nodes: - self.visit(node, frame) - except CompilerExit: - pass - - def write(self, x): - """Write a string into the output stream.""" - if self._new_lines: - if not self._first_write: - self.stream.write('\n' * self._new_lines) - self.code_lineno += self._new_lines - if self._write_debug_info is not None: - self.debug_info.append((self._write_debug_info, - self.code_lineno)) - self._write_debug_info = None - self._first_write = False - self.stream.write(' ' * self._indentation) - self._new_lines = 0 - self.stream.write(x) - - def writeline(self, x, node=None, extra=0): - """Combination of newline and write.""" - self.newline(node, extra) - self.write(x) - - def newline(self, node=None, extra=0): - """Add one or more newlines before the next write.""" - self._new_lines = max(self._new_lines, 1 + extra) - if node is not None and node.lineno != self._last_line: - self._write_debug_info = node.lineno - self._last_line = node.lineno - - def signature(self, node, frame, extra_kwargs=None): - """Writes a function call to the stream for the current node. - A leading comma is added automatically. The extra keyword - arguments may not include python keywords otherwise a syntax - error could occour. The extra keyword arguments should be given - as python dict. - """ - # if any of the given keyword arguments is a python keyword - # we have to make sure that no invalid call is created. - kwarg_workaround = False - for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): - if is_python_keyword(kwarg): - kwarg_workaround = True - break - - for arg in node.args: - self.write(', ') - self.visit(arg, frame) - - if not kwarg_workaround: - for kwarg in node.kwargs: - self.write(', ') - self.visit(kwarg, frame) - if extra_kwargs is not None: - for key, value in iteritems(extra_kwargs): - self.write(', %s=%s' % (key, value)) - if node.dyn_args: - self.write(', *') - self.visit(node.dyn_args, frame) - - if kwarg_workaround: - if node.dyn_kwargs is not None: - self.write(', **dict({') - else: - self.write(', **{') - for kwarg in node.kwargs: - self.write('%r: ' % kwarg.key) - self.visit(kwarg.value, frame) - self.write(', ') - if extra_kwargs is not None: - for key, value in iteritems(extra_kwargs): - self.write('%r: %s, ' % (key, value)) - if node.dyn_kwargs is not None: - self.write('}, **') - self.visit(node.dyn_kwargs, frame) - self.write(')') - else: - self.write('}') - - elif node.dyn_kwargs is not None: - self.write(', **') - self.visit(node.dyn_kwargs, frame) - - def pull_dependencies(self, nodes): - """Pull all the dependencies.""" - visitor = DependencyFinderVisitor() - for node in nodes: - visitor.visit(node) - for dependency in 'filters', 'tests': - mapping = getattr(self, dependency) - for name in getattr(visitor, dependency): - if name not in mapping: - mapping[name] = self.temporary_identifier() - self.writeline('%s = environment.%s[%r]' % - (mapping[name], dependency, name)) - - def enter_frame(self, frame): - undefs = [] - for target, (action, param) in iteritems(frame.symbols.loads): - if action == VAR_LOAD_PARAMETER: - pass - elif action == VAR_LOAD_RESOLVE: - self.writeline('%s = %s(%r)' % - (target, self.get_resolve_func(), param)) - elif action == VAR_LOAD_ALIAS: - self.writeline('%s = %s' % (target, param)) - elif action == VAR_LOAD_UNDEFINED: - undefs.append(target) - else: - raise NotImplementedError('unknown load instruction') - if undefs: - self.writeline('%s = missing' % ' = '.join(undefs)) - - def leave_frame(self, frame, with_python_scope=False): - if not with_python_scope: - undefs = [] - for target, _ in iteritems(frame.symbols.loads): - undefs.append(target) - if undefs: - self.writeline('%s = missing' % ' = '.join(undefs)) - - def func(self, name): - if self.environment.is_async: - return 'async def %s' % name - return 'def %s' % name - - def macro_body(self, node, frame): - """Dump the function def of a macro or call block.""" - frame = frame.inner() - frame.symbols.analyze_node(node) - macro_ref = MacroRef(node) - - explicit_caller = None - skip_special_params = set() - args = [] - for idx, arg in enumerate(node.args): - if arg.name == 'caller': - explicit_caller = idx - if arg.name in ('kwargs', 'varargs'): - skip_special_params.add(arg.name) - args.append(frame.symbols.ref(arg.name)) - - undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs')) - - if 'caller' in undeclared: - # In older Jinja2 versions there was a bug that allowed caller - # to retain the special behavior even if it was mentioned in - # the argument list. However thankfully this was only really - # working if it was the last argument. So we are explicitly - # checking this now and error out if it is anywhere else in - # the argument list. - if explicit_caller is not None: - try: - node.defaults[explicit_caller - len(node.args)] - except IndexError: - self.fail('When defining macros or call blocks the ' - 'special "caller" argument must be omitted ' - 'or be given a default.', node.lineno) - else: - args.append(frame.symbols.declare_parameter('caller')) - macro_ref.accesses_caller = True - if 'kwargs' in undeclared and not 'kwargs' in skip_special_params: - args.append(frame.symbols.declare_parameter('kwargs')) - macro_ref.accesses_kwargs = True - if 'varargs' in undeclared and not 'varargs' in skip_special_params: - args.append(frame.symbols.declare_parameter('varargs')) - macro_ref.accesses_varargs = True - - # macros are delayed, they never require output checks - frame.require_output_check = False - frame.symbols.analyze_node(node) - self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node) - self.indent() - - self.buffer(frame) - self.enter_frame(frame) - - self.push_parameter_definitions(frame) - for idx, arg in enumerate(node.args): - ref = frame.symbols.ref(arg.name) - self.writeline('if %s is missing:' % ref) - self.indent() - try: - default = node.defaults[idx - len(node.args)] - except IndexError: - self.writeline('%s = undefined(%r, name=%r)' % ( - ref, - 'parameter %r was not provided' % arg.name, - arg.name)) - else: - self.writeline('%s = ' % ref) - self.visit(default, frame) - self.mark_parameter_stored(ref) - self.outdent() - self.pop_parameter_definitions() - - self.blockvisit(node.body, frame) - self.return_buffer_contents(frame, force_unescaped=True) - self.leave_frame(frame, with_python_scope=True) - self.outdent() - - return frame, macro_ref - - def macro_def(self, macro_ref, frame): - """Dump the macro definition for the def created by macro_body.""" - arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args) - name = getattr(macro_ref.node, 'name', None) - if len(macro_ref.node.args) == 1: - arg_tuple += ',' - self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, ' - 'context.eval_ctx.autoescape)' % - (name, arg_tuple, macro_ref.accesses_kwargs, - macro_ref.accesses_varargs, macro_ref.accesses_caller)) - - def position(self, node): - """Return a human readable position for the node.""" - rv = 'line %d' % node.lineno - if self.name is not None: - rv += ' in ' + repr(self.name) - return rv - - def dump_local_context(self, frame): - return '{%s}' % ', '.join( - '%r: %s' % (name, target) for name, target - in iteritems(frame.symbols.dump_stores())) - - def write_commons(self): - """Writes a common preamble that is used by root and block functions. - Primarily this sets up common local helpers and enforces a generator - through a dead branch. - """ - self.writeline('resolve = context.resolve_or_missing') - self.writeline('undefined = environment.undefined') - self.writeline('if 0: yield None') - - def push_parameter_definitions(self, frame): - """Pushes all parameter targets from the given frame into a local - stack that permits tracking of yet to be assigned parameters. In - particular this enables the optimization from `visit_Name` to skip - undefined expressions for parameters in macros as macros can reference - otherwise unbound parameters. - """ - self._param_def_block.append(frame.symbols.dump_param_targets()) - - def pop_parameter_definitions(self): - """Pops the current parameter definitions set.""" - self._param_def_block.pop() - - def mark_parameter_stored(self, target): - """Marks a parameter in the current parameter definitions as stored. - This will skip the enforced undefined checks. - """ - if self._param_def_block: - self._param_def_block[-1].discard(target) - - def push_context_reference(self, target): - self._context_reference_stack.append(target) - - def pop_context_reference(self): - self._context_reference_stack.pop() - - def get_context_ref(self): - return self._context_reference_stack[-1] - - def get_resolve_func(self): - target = self._context_reference_stack[-1] - if target == 'context': - return 'resolve' - return '%s.resolve' % target - - def derive_context(self, frame): - return '%s.derived(%s)' % ( - self.get_context_ref(), - self.dump_local_context(frame), - ) - - def parameter_is_undeclared(self, target): - """Checks if a given target is an undeclared parameter.""" - if not self._param_def_block: - return False - return target in self._param_def_block[-1] - - def push_assign_tracking(self): - """Pushes a new layer for assignment tracking.""" - self._assign_stack.append(set()) - - def pop_assign_tracking(self, frame): - """Pops the topmost level for assignment tracking and updates the - context variables if necessary. - """ - vars = self._assign_stack.pop() - if not frame.toplevel or not vars: - return - public_names = [x for x in vars if x[:1] != '_'] - if len(vars) == 1: - name = next(iter(vars)) - ref = frame.symbols.ref(name) - self.writeline('context.vars[%r] = %s' % (name, ref)) - else: - self.writeline('context.vars.update({') - for idx, name in enumerate(vars): - if idx: - self.write(', ') - ref = frame.symbols.ref(name) - self.write('%r: %s' % (name, ref)) - self.write('})') - if public_names: - if len(public_names) == 1: - self.writeline('context.exported_vars.add(%r)' % - public_names[0]) - else: - self.writeline('context.exported_vars.update((%s))' % - ', '.join(imap(repr, public_names))) - - # -- Statement Visitors - - def visit_Template(self, node, frame=None): - assert frame is None, 'no root frame allowed' - eval_ctx = EvalContext(self.environment, self.name) - - from jinja2.runtime import __all__ as exported - self.writeline('from __future__ import %s' % ', '.join(code_features)) - self.writeline('from jinja2.runtime import ' + ', '.join(exported)) - - if self.environment.is_async: - self.writeline('from jinja2.asyncsupport import auto_await, ' - 'auto_aiter, make_async_loop_context') - - # if we want a deferred initialization we cannot move the - # environment into a local name - envenv = not self.defer_init and ', environment=environment' or '' - - # do we have an extends tag at all? If not, we can save some - # overhead by just not processing any inheritance code. - have_extends = node.find(nodes.Extends) is not None - - # find all blocks - for block in node.find_all(nodes.Block): - if block.name in self.blocks: - self.fail('block %r defined twice' % block.name, block.lineno) - self.blocks[block.name] = block - - # find all imports and import them - for import_ in node.find_all(nodes.ImportedName): - if import_.importname not in self.import_aliases: - imp = import_.importname - self.import_aliases[imp] = alias = self.temporary_identifier() - if '.' in imp: - module, obj = imp.rsplit('.', 1) - self.writeline('from %s import %s as %s' % - (module, obj, alias)) - else: - self.writeline('import %s as %s' % (imp, alias)) - - # add the load name - self.writeline('name = %r' % self.name) - - # generate the root render function. - self.writeline('%s(context, missing=missing%s):' % - (self.func('root'), envenv), extra=1) - self.indent() - self.write_commons() - - # process the root - frame = Frame(eval_ctx) - if 'self' in find_undeclared(node.body, ('self',)): - ref = frame.symbols.declare_parameter('self') - self.writeline('%s = TemplateReference(context)' % ref) - frame.symbols.analyze_node(node) - frame.toplevel = frame.rootlevel = True - frame.require_output_check = have_extends and not self.has_known_extends - if have_extends: - self.writeline('parent_template = None') - self.enter_frame(frame) - self.pull_dependencies(node.body) - self.blockvisit(node.body, frame) - self.leave_frame(frame, with_python_scope=True) - self.outdent() - - # make sure that the parent root is called. - if have_extends: - if not self.has_known_extends: - self.indent() - self.writeline('if parent_template is not None:') - self.indent() - if supports_yield_from and not self.environment.is_async: - self.writeline('yield from parent_template.' - 'root_render_func(context)') - else: - self.writeline('%sfor event in parent_template.' - 'root_render_func(context):' % - (self.environment.is_async and 'async ' or '')) - self.indent() - self.writeline('yield event') - self.outdent() - self.outdent(1 + (not self.has_known_extends)) - - # at this point we now have the blocks collected and can visit them too. - for name, block in iteritems(self.blocks): - self.writeline('%s(context, missing=missing%s):' % - (self.func('block_' + name), envenv), - block, 1) - self.indent() - self.write_commons() - # It's important that we do not make this frame a child of the - # toplevel template. This would cause a variety of - # interesting issues with identifier tracking. - block_frame = Frame(eval_ctx) - undeclared = find_undeclared(block.body, ('self', 'super')) - if 'self' in undeclared: - ref = block_frame.symbols.declare_parameter('self') - self.writeline('%s = TemplateReference(context)' % ref) - if 'super' in undeclared: - ref = block_frame.symbols.declare_parameter('super') - self.writeline('%s = context.super(%r, ' - 'block_%s)' % (ref, name, name)) - block_frame.symbols.analyze_node(block) - block_frame.block = name - self.enter_frame(block_frame) - self.pull_dependencies(block.body) - self.blockvisit(block.body, block_frame) - self.leave_frame(block_frame, with_python_scope=True) - self.outdent() - - self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x) - for x in self.blocks), - extra=1) - - # add a function that returns the debug info - self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x - in self.debug_info)) - - def visit_Block(self, node, frame): - """Call a block and register it for the template.""" - level = 0 - if frame.toplevel: - # if we know that we are a child template, there is no need to - # check if we are one - if self.has_known_extends: - return - if self.extends_so_far > 0: - self.writeline('if parent_template is None:') - self.indent() - level += 1 - - if node.scoped: - context = self.derive_context(frame) - else: - context = self.get_context_ref() - - if supports_yield_from and not self.environment.is_async and \ - frame.buffer is None: - self.writeline('yield from context.blocks[%r][0](%s)' % ( - node.name, context), node) - else: - loop = self.environment.is_async and 'async for' or 'for' - self.writeline('%s event in context.blocks[%r][0](%s):' % ( - loop, node.name, context), node) - self.indent() - self.simple_write('event', frame) - self.outdent() - - self.outdent(level) - - def visit_Extends(self, node, frame): - """Calls the extender.""" - if not frame.toplevel: - self.fail('cannot use extend from a non top-level scope', - node.lineno) - - # if the number of extends statements in general is zero so - # far, we don't have to add a check if something extended - # the template before this one. - if self.extends_so_far > 0: - - # if we have a known extends we just add a template runtime - # error into the generated code. We could catch that at compile - # time too, but i welcome it not to confuse users by throwing the - # same error at different times just "because we can". - if not self.has_known_extends: - self.writeline('if parent_template is not None:') - self.indent() - self.writeline('raise TemplateRuntimeError(%r)' % - 'extended multiple times') - - # if we have a known extends already we don't need that code here - # as we know that the template execution will end here. - if self.has_known_extends: - raise CompilerExit() - else: - self.outdent() - - self.writeline('parent_template = environment.get_template(', node) - self.visit(node.template, frame) - self.write(', %r)' % self.name) - self.writeline('for name, parent_block in parent_template.' - 'blocks.%s():' % dict_item_iter) - self.indent() - self.writeline('context.blocks.setdefault(name, []).' - 'append(parent_block)') - self.outdent() - - # if this extends statement was in the root level we can take - # advantage of that information and simplify the generated code - # in the top level from this point onwards - if frame.rootlevel: - self.has_known_extends = True - - # and now we have one more - self.extends_so_far += 1 - - def visit_Include(self, node, frame): - """Handles includes.""" - if node.ignore_missing: - self.writeline('try:') - self.indent() - - func_name = 'get_or_select_template' - if isinstance(node.template, nodes.Const): - if isinstance(node.template.value, string_types): - func_name = 'get_template' - elif isinstance(node.template.value, (tuple, list)): - func_name = 'select_template' - elif isinstance(node.template, (nodes.Tuple, nodes.List)): - func_name = 'select_template' - - self.writeline('template = environment.%s(' % func_name, node) - self.visit(node.template, frame) - self.write(', %r)' % self.name) - if node.ignore_missing: - self.outdent() - self.writeline('except TemplateNotFound:') - self.indent() - self.writeline('pass') - self.outdent() - self.writeline('else:') - self.indent() - - skip_event_yield = False - if node.with_context: - loop = self.environment.is_async and 'async for' or 'for' - self.writeline('%s event in template.root_render_func(' - 'template.new_context(context.get_all(), True, ' - '%s)):' % (loop, self.dump_local_context(frame))) - elif self.environment.is_async: - self.writeline('for event in (await ' - 'template._get_default_module_async())' - '._body_stream:') - else: - if supports_yield_from: - self.writeline('yield from template._get_default_module()' - '._body_stream') - skip_event_yield = True - else: - self.writeline('for event in template._get_default_module()' - '._body_stream:') - - if not skip_event_yield: - self.indent() - self.simple_write('event', frame) - self.outdent() - - if node.ignore_missing: - self.outdent() - - def visit_Import(self, node, frame): - """Visit regular imports.""" - self.writeline('%s = ' % frame.symbols.ref(node.target), node) - if frame.toplevel: - self.write('context.vars[%r] = ' % node.target) - if self.environment.is_async: - self.write('await ') - self.write('environment.get_template(') - self.visit(node.template, frame) - self.write(', %r).' % self.name) - if node.with_context: - self.write('make_module%s(context.get_all(), True, %s)' - % (self.environment.is_async and '_async' or '', - self.dump_local_context(frame))) - elif self.environment.is_async: - self.write('_get_default_module_async()') - else: - self.write('_get_default_module()') - if frame.toplevel and not node.target.startswith('_'): - self.writeline('context.exported_vars.discard(%r)' % node.target) - - def visit_FromImport(self, node, frame): - """Visit named imports.""" - self.newline(node) - self.write('included_template = %senvironment.get_template(' - % (self.environment.is_async and 'await ' or '')) - self.visit(node.template, frame) - self.write(', %r).' % self.name) - if node.with_context: - self.write('make_module%s(context.get_all(), True, %s)' - % (self.environment.is_async and '_async' or '', - self.dump_local_context(frame))) - elif self.environment.is_async: - self.write('_get_default_module_async()') - else: - self.write('_get_default_module()') - - var_names = [] - discarded_names = [] - for name in node.names: - if isinstance(name, tuple): - name, alias = name - else: - alias = name - self.writeline('%s = getattr(included_template, ' - '%r, missing)' % (frame.symbols.ref(alias), name)) - self.writeline('if %s is missing:' % frame.symbols.ref(alias)) - self.indent() - self.writeline('%s = undefined(%r %% ' - 'included_template.__name__, ' - 'name=%r)' % - (frame.symbols.ref(alias), - 'the template %%r (imported on %s) does ' - 'not export the requested name %s' % ( - self.position(node), - repr(name) - ), name)) - self.outdent() - if frame.toplevel: - var_names.append(alias) - if not alias.startswith('_'): - discarded_names.append(alias) - - if var_names: - if len(var_names) == 1: - name = var_names[0] - self.writeline('context.vars[%r] = %s' % - (name, frame.symbols.ref(name))) - else: - self.writeline('context.vars.update({%s})' % ', '.join( - '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names - )) - if discarded_names: - if len(discarded_names) == 1: - self.writeline('context.exported_vars.discard(%r)' % - discarded_names[0]) - else: - self.writeline('context.exported_vars.difference_' - 'update((%s))' % ', '.join(imap(repr, discarded_names))) - - def visit_For(self, node, frame): - loop_frame = frame.inner() - test_frame = frame.inner() - else_frame = frame.inner() - - # try to figure out if we have an extended loop. An extended loop - # is necessary if the loop is in recursive mode if the special loop - # variable is accessed in the body. - extended_loop = node.recursive or 'loop' in \ - find_undeclared(node.iter_child_nodes( - only=('body',)), ('loop',)) - - loop_ref = None - if extended_loop: - loop_ref = loop_frame.symbols.declare_parameter('loop') - - loop_frame.symbols.analyze_node(node, for_branch='body') - if node.else_: - else_frame.symbols.analyze_node(node, for_branch='else') - - if node.test: - loop_filter_func = self.temporary_identifier() - test_frame.symbols.analyze_node(node, for_branch='test') - self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test) - self.indent() - self.enter_frame(test_frame) - self.writeline(self.environment.is_async and 'async for ' or 'for ') - self.visit(node.target, loop_frame) - self.write(' in ') - self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter') - self.write(':') - self.indent() - self.writeline('if ', node.test) - self.visit(node.test, test_frame) - self.write(':') - self.indent() - self.writeline('yield ') - self.visit(node.target, loop_frame) - self.outdent(3) - self.leave_frame(test_frame, with_python_scope=True) - - # if we don't have an recursive loop we have to find the shadowed - # variables at that point. Because loops can be nested but the loop - # variable is a special one we have to enforce aliasing for it. - if node.recursive: - self.writeline('%s(reciter, loop_render_func, depth=0):' % - self.func('loop'), node) - self.indent() - self.buffer(loop_frame) - - # Use the same buffer for the else frame - else_frame.buffer = loop_frame.buffer - - # make sure the loop variable is a special one and raise a template - # assertion error if a loop tries to write to loop - if extended_loop: - self.writeline('%s = missing' % loop_ref) - - for name in node.find_all(nodes.Name): - if name.ctx == 'store' and name.name == 'loop': - self.fail('Can\'t assign to special loop variable ' - 'in for-loop target', name.lineno) - - if node.else_: - iteration_indicator = self.temporary_identifier() - self.writeline('%s = 1' % iteration_indicator) - - self.writeline(self.environment.is_async and 'async for ' or 'for ', node) - self.visit(node.target, loop_frame) - if extended_loop: - if self.environment.is_async: - self.write(', %s in await make_async_loop_context(' % loop_ref) - else: - self.write(', %s in LoopContext(' % loop_ref) - else: - self.write(' in ') - - if node.test: - self.write('%s(' % loop_filter_func) - if node.recursive: - self.write('reciter') - else: - if self.environment.is_async and not extended_loop: - self.write('auto_aiter(') - self.visit(node.iter, frame) - if self.environment.is_async and not extended_loop: - self.write(')') - if node.test: - self.write(')') - - if node.recursive: - self.write(', undefined, loop_render_func, depth):') - else: - self.write(extended_loop and ', undefined):' or ':') - - self.indent() - self.enter_frame(loop_frame) - - self.blockvisit(node.body, loop_frame) - if node.else_: - self.writeline('%s = 0' % iteration_indicator) - self.outdent() - self.leave_frame(loop_frame, with_python_scope=node.recursive - and not node.else_) - - if node.else_: - self.writeline('if %s:' % iteration_indicator) - self.indent() - self.enter_frame(else_frame) - self.blockvisit(node.else_, else_frame) - self.leave_frame(else_frame) - self.outdent() - - # if the node was recursive we have to return the buffer contents - # and start the iteration code - if node.recursive: - self.return_buffer_contents(loop_frame) - self.outdent() - self.start_write(frame, node) - if self.environment.is_async: - self.write('await ') - self.write('loop(') - if self.environment.is_async: - self.write('auto_aiter(') - self.visit(node.iter, frame) - if self.environment.is_async: - self.write(')') - self.write(', loop)') - self.end_write(frame) - - def visit_If(self, node, frame): - if_frame = frame.soft() - self.writeline('if ', node) - self.visit(node.test, if_frame) - self.write(':') - self.indent() - self.blockvisit(node.body, if_frame) - self.outdent() - for elif_ in node.elif_: - self.writeline('elif ', elif_) - self.visit(elif_.test, if_frame) - self.write(':') - self.indent() - self.blockvisit(elif_.body, if_frame) - self.outdent() - if node.else_: - self.writeline('else:') - self.indent() - self.blockvisit(node.else_, if_frame) - self.outdent() - - def visit_Macro(self, node, frame): - macro_frame, macro_ref = self.macro_body(node, frame) - self.newline() - if frame.toplevel: - if not node.name.startswith('_'): - self.write('context.exported_vars.add(%r)' % node.name) - ref = frame.symbols.ref(node.name) - self.writeline('context.vars[%r] = ' % node.name) - self.write('%s = ' % frame.symbols.ref(node.name)) - self.macro_def(macro_ref, macro_frame) - - def visit_CallBlock(self, node, frame): - call_frame, macro_ref = self.macro_body(node, frame) - self.writeline('caller = ') - self.macro_def(macro_ref, call_frame) - self.start_write(frame, node) - self.visit_Call(node.call, frame, forward_caller=True) - self.end_write(frame) - - def visit_FilterBlock(self, node, frame): - filter_frame = frame.inner() - filter_frame.symbols.analyze_node(node) - self.enter_frame(filter_frame) - self.buffer(filter_frame) - self.blockvisit(node.body, filter_frame) - self.start_write(frame, node) - self.visit_Filter(node.filter, filter_frame) - self.end_write(frame) - self.leave_frame(filter_frame) - - def visit_With(self, node, frame): - with_frame = frame.inner() - with_frame.symbols.analyze_node(node) - self.enter_frame(with_frame) - for idx, (target, expr) in enumerate(izip(node.targets, node.values)): - self.newline() - self.visit(target, with_frame) - self.write(' = ') - self.visit(expr, frame) - self.blockvisit(node.body, with_frame) - self.leave_frame(with_frame) - - def visit_ExprStmt(self, node, frame): - self.newline(node) - self.visit(node.node, frame) - - def visit_Output(self, node, frame): - # if we have a known extends statement, we don't output anything - # if we are in a require_output_check section - if self.has_known_extends and frame.require_output_check: - return - - allow_constant_finalize = True - if self.environment.finalize: - func = self.environment.finalize - if getattr(func, 'contextfunction', False) or \ - getattr(func, 'evalcontextfunction', False): - allow_constant_finalize = False - elif getattr(func, 'environmentfunction', False): - finalize = lambda x: text_type( - self.environment.finalize(self.environment, x)) - else: - finalize = lambda x: text_type(self.environment.finalize(x)) - else: - finalize = text_type - - # if we are inside a frame that requires output checking, we do so - outdent_later = False - if frame.require_output_check: - self.writeline('if parent_template is None:') - self.indent() - outdent_later = True - - # try to evaluate as many chunks as possible into a static - # string at compile time. - body = [] - for child in node.nodes: - try: - if not allow_constant_finalize: - raise nodes.Impossible() - const = child.as_const(frame.eval_ctx) - except nodes.Impossible: - body.append(child) - continue - # the frame can't be volatile here, becaus otherwise the - # as_const() function would raise an Impossible exception - # at that point. - try: - if frame.eval_ctx.autoescape: - if hasattr(const, '__html__'): - const = const.__html__() - else: - const = escape(const) - const = finalize(const) - except Exception: - # if something goes wrong here we evaluate the node - # at runtime for easier debugging - body.append(child) - continue - if body and isinstance(body[-1], list): - body[-1].append(const) - else: - body.append([const]) - - # if we have less than 3 nodes or a buffer we yield or extend/append - if len(body) < 3 or frame.buffer is not None: - if frame.buffer is not None: - # for one item we append, for more we extend - if len(body) == 1: - self.writeline('%s.append(' % frame.buffer) - else: - self.writeline('%s.extend((' % frame.buffer) - self.indent() - for item in body: - if isinstance(item, list): - val = repr(concat(item)) - if frame.buffer is None: - self.writeline('yield ' + val) - else: - self.writeline(val + ',') - else: - if frame.buffer is None: - self.writeline('yield ', item) - else: - self.newline(item) - close = 1 - if frame.eval_ctx.volatile: - self.write('(escape if context.eval_ctx.autoescape' - ' else to_string)(') - elif frame.eval_ctx.autoescape: - self.write('escape(') - else: - self.write('to_string(') - if self.environment.finalize is not None: - self.write('environment.finalize(') - if getattr(self.environment.finalize, - "contextfunction", False): - self.write('context, ') - close += 1 - self.visit(item, frame) - self.write(')' * close) - if frame.buffer is not None: - self.write(',') - if frame.buffer is not None: - # close the open parentheses - self.outdent() - self.writeline(len(body) == 1 and ')' or '))') - - # otherwise we create a format string as this is faster in that case - else: - format = [] - arguments = [] - for item in body: - if isinstance(item, list): - format.append(concat(item).replace('%', '%%')) - else: - format.append('%s') - arguments.append(item) - self.writeline('yield ') - self.write(repr(concat(format)) + ' % (') - self.indent() - for argument in arguments: - self.newline(argument) - close = 0 - if frame.eval_ctx.volatile: - self.write('(escape if context.eval_ctx.autoescape else' - ' to_string)(') - close += 1 - elif frame.eval_ctx.autoescape: - self.write('escape(') - close += 1 - if self.environment.finalize is not None: - self.write('environment.finalize(') - if getattr(self.environment.finalize, - 'contextfunction', False): - self.write('context, ') - elif getattr(self.environment.finalize, - 'evalcontextfunction', False): - self.write('context.eval_ctx, ') - elif getattr(self.environment.finalize, - 'environmentfunction', False): - self.write('environment, ') - close += 1 - self.visit(argument, frame) - self.write(')' * close + ', ') - self.outdent() - self.writeline(')') - - if outdent_later: - self.outdent() - - def visit_Assign(self, node, frame): - self.push_assign_tracking() - self.newline(node) - self.visit(node.target, frame) - self.write(' = ') - self.visit(node.node, frame) - self.pop_assign_tracking(frame) - - def visit_AssignBlock(self, node, frame): - self.push_assign_tracking() - block_frame = frame.inner() - # This is a special case. Since a set block always captures we - # will disable output checks. This way one can use set blocks - # toplevel even in extended templates. - block_frame.require_output_check = False - block_frame.symbols.analyze_node(node) - self.enter_frame(block_frame) - self.buffer(block_frame) - self.blockvisit(node.body, block_frame) - self.newline(node) - self.visit(node.target, frame) - self.write(' = (Markup if context.eval_ctx.autoescape ' - 'else identity)(') - if node.filter is not None: - self.visit_Filter(node.filter, block_frame) - else: - self.write('concat(%s)' % block_frame.buffer) - self.write(')') - self.pop_assign_tracking(frame) - self.leave_frame(block_frame) - - # -- Expression Visitors - - def visit_Name(self, node, frame): - if node.ctx == 'store' and frame.toplevel: - if self._assign_stack: - self._assign_stack[-1].add(node.name) - ref = frame.symbols.ref(node.name) - - # If we are looking up a variable we might have to deal with the - # case where it's undefined. We can skip that case if the load - # instruction indicates a parameter which are always defined. - if node.ctx == 'load': - load = frame.symbols.find_load(ref) - if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \ - not self.parameter_is_undeclared(ref)): - self.write('(undefined(name=%r) if %s is missing else %s)' % - (node.name, ref, ref)) - return - - self.write(ref) - - def visit_NSRef(self, node, frame): - # NSRefs can only be used to store values; since they use the normal - # `foo.bar` notation they will be parsed as a normal attribute access - # when used anywhere but in a `set` context - ref = frame.symbols.ref(node.name) - self.writeline('if not isinstance(%s, Namespace):' % ref) - self.indent() - self.writeline('raise TemplateRuntimeError(%r)' % - 'cannot assign attribute on non-namespace object') - self.outdent() - self.writeline('%s[%r]' % (ref, node.attr)) - - def visit_Const(self, node, frame): - val = node.as_const(frame.eval_ctx) - if isinstance(val, float): - self.write(str(val)) - else: - self.write(repr(val)) - - def visit_TemplateData(self, node, frame): - try: - self.write(repr(node.as_const(frame.eval_ctx))) - except nodes.Impossible: - self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)' - % node.data) - - def visit_Tuple(self, node, frame): - self.write('(') - idx = -1 - for idx, item in enumerate(node.items): - if idx: - self.write(', ') - self.visit(item, frame) - self.write(idx == 0 and ',)' or ')') - - def visit_List(self, node, frame): - self.write('[') - for idx, item in enumerate(node.items): - if idx: - self.write(', ') - self.visit(item, frame) - self.write(']') - - def visit_Dict(self, node, frame): - self.write('{') - for idx, item in enumerate(node.items): - if idx: - self.write(', ') - self.visit(item.key, frame) - self.write(': ') - self.visit(item.value, frame) - self.write('}') - - def binop(operator, interceptable=True): - @optimizeconst - def visitor(self, node, frame): - if self.environment.sandboxed and \ - operator in self.environment.intercepted_binops: - self.write('environment.call_binop(context, %r, ' % operator) - self.visit(node.left, frame) - self.write(', ') - self.visit(node.right, frame) - else: - self.write('(') - self.visit(node.left, frame) - self.write(' %s ' % operator) - self.visit(node.right, frame) - self.write(')') - return visitor - - def uaop(operator, interceptable=True): - @optimizeconst - def visitor(self, node, frame): - if self.environment.sandboxed and \ - operator in self.environment.intercepted_unops: - self.write('environment.call_unop(context, %r, ' % operator) - self.visit(node.node, frame) - else: - self.write('(' + operator) - self.visit(node.node, frame) - self.write(')') - return visitor - - visit_Add = binop('+') - visit_Sub = binop('-') - visit_Mul = binop('*') - visit_Div = binop('/') - visit_FloorDiv = binop('//') - visit_Pow = binop('**') - visit_Mod = binop('%') - visit_And = binop('and', interceptable=False) - visit_Or = binop('or', interceptable=False) - visit_Pos = uaop('+') - visit_Neg = uaop('-') - visit_Not = uaop('not ', interceptable=False) - del binop, uaop - - @optimizeconst - def visit_Concat(self, node, frame): - if frame.eval_ctx.volatile: - func_name = '(context.eval_ctx.volatile and' \ - ' markup_join or unicode_join)' - elif frame.eval_ctx.autoescape: - func_name = 'markup_join' - else: - func_name = 'unicode_join' - self.write('%s((' % func_name) - for arg in node.nodes: - self.visit(arg, frame) - self.write(', ') - self.write('))') - - @optimizeconst - def visit_Compare(self, node, frame): - self.visit(node.expr, frame) - for op in node.ops: - self.visit(op, frame) - - def visit_Operand(self, node, frame): - self.write(' %s ' % operators[node.op]) - self.visit(node.expr, frame) - - @optimizeconst - def visit_Getattr(self, node, frame): - self.write('environment.getattr(') - self.visit(node.node, frame) - self.write(', %r)' % node.attr) - - @optimizeconst - def visit_Getitem(self, node, frame): - # slices bypass the environment getitem method. - if isinstance(node.arg, nodes.Slice): - self.visit(node.node, frame) - self.write('[') - self.visit(node.arg, frame) - self.write(']') - else: - self.write('environment.getitem(') - self.visit(node.node, frame) - self.write(', ') - self.visit(node.arg, frame) - self.write(')') - - def visit_Slice(self, node, frame): - if node.start is not None: - self.visit(node.start, frame) - self.write(':') - if node.stop is not None: - self.visit(node.stop, frame) - if node.step is not None: - self.write(':') - self.visit(node.step, frame) - - @optimizeconst - def visit_Filter(self, node, frame): - if self.environment.is_async: - self.write('await auto_await(') - self.write(self.filters[node.name] + '(') - func = self.environment.filters.get(node.name) - if func is None: - self.fail('no filter named %r' % node.name, node.lineno) - if getattr(func, 'contextfilter', False): - self.write('context, ') - elif getattr(func, 'evalcontextfilter', False): - self.write('context.eval_ctx, ') - elif getattr(func, 'environmentfilter', False): - self.write('environment, ') - - # if the filter node is None we are inside a filter block - # and want to write to the current buffer - if node.node is not None: - self.visit(node.node, frame) - elif frame.eval_ctx.volatile: - self.write('(context.eval_ctx.autoescape and' - ' Markup(concat(%s)) or concat(%s))' % - (frame.buffer, frame.buffer)) - elif frame.eval_ctx.autoescape: - self.write('Markup(concat(%s))' % frame.buffer) - else: - self.write('concat(%s)' % frame.buffer) - self.signature(node, frame) - self.write(')') - if self.environment.is_async: - self.write(')') - - @optimizeconst - def visit_Test(self, node, frame): - self.write(self.tests[node.name] + '(') - if node.name not in self.environment.tests: - self.fail('no test named %r' % node.name, node.lineno) - self.visit(node.node, frame) - self.signature(node, frame) - self.write(')') - - @optimizeconst - def visit_CondExpr(self, node, frame): - def write_expr2(): - if node.expr2 is not None: - return self.visit(node.expr2, frame) - self.write('undefined(%r)' % ('the inline if-' - 'expression on %s evaluated to false and ' - 'no else section was defined.' % self.position(node))) - - self.write('(') - self.visit(node.expr1, frame) - self.write(' if ') - self.visit(node.test, frame) - self.write(' else ') - write_expr2() - self.write(')') - - @optimizeconst - def visit_Call(self, node, frame, forward_caller=False): - if self.environment.is_async: - self.write('await auto_await(') - if self.environment.sandboxed: - self.write('environment.call(context, ') - else: - self.write('context.call(') - self.visit(node.node, frame) - extra_kwargs = forward_caller and {'caller': 'caller'} or None - self.signature(node, frame, extra_kwargs) - self.write(')') - if self.environment.is_async: - self.write(')') - - def visit_Keyword(self, node, frame): - self.write(node.key + '=') - self.visit(node.value, frame) - - # -- Unused nodes for extensions - - def visit_MarkSafe(self, node, frame): - self.write('Markup(') - self.visit(node.expr, frame) - self.write(')') - - def visit_MarkSafeIfAutoescape(self, node, frame): - self.write('(context.eval_ctx.autoescape and Markup or identity)(') - self.visit(node.expr, frame) - self.write(')') - - def visit_EnvironmentAttribute(self, node, frame): - self.write('environment.' + node.name) - - def visit_ExtensionAttribute(self, node, frame): - self.write('environment.extensions[%r].%s' % (node.identifier, node.name)) - - def visit_ImportedName(self, node, frame): - self.write(self.import_aliases[node.importname]) - - def visit_InternalName(self, node, frame): - self.write(node.name) - - def visit_ContextReference(self, node, frame): - self.write('context') - - def visit_Continue(self, node, frame): - self.writeline('continue', node) - - def visit_Break(self, node, frame): - self.writeline('break', node) - - def visit_Scope(self, node, frame): - scope_frame = frame.inner() - scope_frame.symbols.analyze_node(node) - self.enter_frame(scope_frame) - self.blockvisit(node.body, scope_frame) - self.leave_frame(scope_frame) - - def visit_OverlayScope(self, node, frame): - ctx = self.temporary_identifier() - self.writeline('%s = %s' % (ctx, self.derive_context(frame))) - self.writeline('%s.vars = ' % ctx) - self.visit(node.context, frame) - self.push_context_reference(ctx) - - scope_frame = frame.inner(isolated=True) - scope_frame.symbols.analyze_node(node) - self.enter_frame(scope_frame) - self.blockvisit(node.body, scope_frame) - self.leave_frame(scope_frame) - self.pop_context_reference() - - def visit_EvalContextModifier(self, node, frame): - for keyword in node.options: - self.writeline('context.eval_ctx.%s = ' % keyword.key) - self.visit(keyword.value, frame) - try: - val = keyword.value.as_const(frame.eval_ctx) - except nodes.Impossible: - frame.eval_ctx.volatile = True - else: - setattr(frame.eval_ctx, keyword.key, val) - - def visit_ScopedEvalContextModifier(self, node, frame): - old_ctx_name = self.temporary_identifier() - saved_ctx = frame.eval_ctx.save() - self.writeline('%s = context.eval_ctx.save()' % old_ctx_name) - self.visit_EvalContextModifier(node, frame) - for child in node.body: - self.visit(child, frame) - frame.eval_ctx.revert(saved_ctx) - self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name) diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/constants.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/constants.py deleted file mode 100644 index 11efd1e..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/constants.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja.constants - ~~~~~~~~~~~~~~~ - - Various constants. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" - - -#: list of lorem ipsum words used by the lipsum() helper function -LOREM_IPSUM_WORDS = u'''\ -a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at -auctor augue bibendum blandit class commodo condimentum congue consectetuer -consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus -diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend -elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames -faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac -hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum -justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem -luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie -mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non -nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque -penatibus per pharetra phasellus placerat platea porta porttitor posuere -potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus -ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit -sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor -tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices -ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus -viverra volutpat vulputate''' diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/debug.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/debug.py deleted file mode 100644 index b61139f..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/debug.py +++ /dev/null @@ -1,372 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2.debug - ~~~~~~~~~~~~ - - Implements the debug interface for Jinja. This module does some pretty - ugly stuff with the Python traceback system in order to achieve tracebacks - with correct line numbers, locals and contents. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -import sys -import traceback -from types import TracebackType, CodeType -from jinja2.utils import missing, internal_code -from jinja2.exceptions import TemplateSyntaxError -from jinja2._compat import iteritems, reraise, PY2 - -# on pypy we can take advantage of transparent proxies -try: - from __pypy__ import tproxy -except ImportError: - tproxy = None - - -# how does the raise helper look like? -try: - exec("raise TypeError, 'foo'") -except SyntaxError: - raise_helper = 'raise __jinja_exception__[1]' -except TypeError: - raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]' - - -class TracebackFrameProxy(object): - """Proxies a traceback frame.""" - - def __init__(self, tb): - self.tb = tb - self._tb_next = None - - @property - def tb_next(self): - return self._tb_next - - def set_next(self, next): - if tb_set_next is not None: - try: - tb_set_next(self.tb, next and next.tb or None) - except Exception: - # this function can fail due to all the hackery it does - # on various python implementations. We just catch errors - # down and ignore them if necessary. - pass - self._tb_next = next - - @property - def is_jinja_frame(self): - return '__jinja_template__' in self.tb.tb_frame.f_globals - - def __getattr__(self, name): - return getattr(self.tb, name) - - -def make_frame_proxy(frame): - proxy = TracebackFrameProxy(frame) - if tproxy is None: - return proxy - def operation_handler(operation, *args, **kwargs): - if operation in ('__getattribute__', '__getattr__'): - return getattr(proxy, args[0]) - elif operation == '__setattr__': - proxy.__setattr__(*args, **kwargs) - else: - return getattr(proxy, operation)(*args, **kwargs) - return tproxy(TracebackType, operation_handler) - - -class ProcessedTraceback(object): - """Holds a Jinja preprocessed traceback for printing or reraising.""" - - def __init__(self, exc_type, exc_value, frames): - assert frames, 'no frames for this traceback?' - self.exc_type = exc_type - self.exc_value = exc_value - self.frames = frames - - # newly concatenate the frames (which are proxies) - prev_tb = None - for tb in self.frames: - if prev_tb is not None: - prev_tb.set_next(tb) - prev_tb = tb - prev_tb.set_next(None) - - def render_as_text(self, limit=None): - """Return a string with the traceback.""" - lines = traceback.format_exception(self.exc_type, self.exc_value, - self.frames[0], limit=limit) - return ''.join(lines).rstrip() - - def render_as_html(self, full=False): - """Return a unicode string with the traceback as rendered HTML.""" - from jinja2.debugrenderer import render_traceback - return u'%s\n\n' % ( - render_traceback(self, full=full), - self.render_as_text().decode('utf-8', 'replace') - ) - - @property - def is_template_syntax_error(self): - """`True` if this is a template syntax error.""" - return isinstance(self.exc_value, TemplateSyntaxError) - - @property - def exc_info(self): - """Exception info tuple with a proxy around the frame objects.""" - return self.exc_type, self.exc_value, self.frames[0] - - @property - def standard_exc_info(self): - """Standard python exc_info for re-raising""" - tb = self.frames[0] - # the frame will be an actual traceback (or transparent proxy) if - # we are on pypy or a python implementation with support for tproxy - if type(tb) is not TracebackType: - tb = tb.tb - return self.exc_type, self.exc_value, tb - - -def make_traceback(exc_info, source_hint=None): - """Creates a processed traceback object from the exc_info.""" - exc_type, exc_value, tb = exc_info - if isinstance(exc_value, TemplateSyntaxError): - exc_info = translate_syntax_error(exc_value, source_hint) - initial_skip = 0 - else: - initial_skip = 1 - return translate_exception(exc_info, initial_skip) - - -def translate_syntax_error(error, source=None): - """Rewrites a syntax error to please traceback systems.""" - error.source = source - error.translated = True - exc_info = (error.__class__, error, None) - filename = error.filename - if filename is None: - filename = '' - return fake_exc_info(exc_info, filename, error.lineno) - - -def translate_exception(exc_info, initial_skip=0): - """If passed an exc_info it will automatically rewrite the exceptions - all the way down to the correct line numbers and frames. - """ - tb = exc_info[2] - frames = [] - - # skip some internal frames if wanted - for x in range(initial_skip): - if tb is not None: - tb = tb.tb_next - initial_tb = tb - - while tb is not None: - # skip frames decorated with @internalcode. These are internal - # calls we can't avoid and that are useless in template debugging - # output. - if tb.tb_frame.f_code in internal_code: - tb = tb.tb_next - continue - - # save a reference to the next frame if we override the current - # one with a faked one. - next = tb.tb_next - - # fake template exceptions - template = tb.tb_frame.f_globals.get('__jinja_template__') - if template is not None: - lineno = template.get_corresponding_lineno(tb.tb_lineno) - tb = fake_exc_info(exc_info[:2] + (tb,), template.filename, - lineno)[2] - - frames.append(make_frame_proxy(tb)) - tb = next - - # if we don't have any exceptions in the frames left, we have to - # reraise it unchanged. - # XXX: can we backup here? when could this happen? - if not frames: - reraise(exc_info[0], exc_info[1], exc_info[2]) - - return ProcessedTraceback(exc_info[0], exc_info[1], frames) - - -def get_jinja_locals(real_locals): - ctx = real_locals.get('context') - if ctx: - locals = ctx.get_all().copy() - else: - locals = {} - - local_overrides = {} - - for name, value in iteritems(real_locals): - if not name.startswith('l_') or value is missing: - continue - try: - _, depth, name = name.split('_', 2) - depth = int(depth) - except ValueError: - continue - cur_depth = local_overrides.get(name, (-1,))[0] - if cur_depth < depth: - local_overrides[name] = (depth, value) - - for name, (_, value) in iteritems(local_overrides): - if value is missing: - locals.pop(name, None) - else: - locals[name] = value - - return locals - - -def fake_exc_info(exc_info, filename, lineno): - """Helper for `translate_exception`.""" - exc_type, exc_value, tb = exc_info - - # figure the real context out - if tb is not None: - locals = get_jinja_locals(tb.tb_frame.f_locals) - - # if there is a local called __jinja_exception__, we get - # rid of it to not break the debug functionality. - locals.pop('__jinja_exception__', None) - else: - locals = {} - - # assamble fake globals we need - globals = { - '__name__': filename, - '__file__': filename, - '__jinja_exception__': exc_info[:2], - - # we don't want to keep the reference to the template around - # to not cause circular dependencies, but we mark it as Jinja - # frame for the ProcessedTraceback - '__jinja_template__': None - } - - # and fake the exception - code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec') - - # if it's possible, change the name of the code. This won't work - # on some python environments such as google appengine - try: - if tb is None: - location = 'template' - else: - function = tb.tb_frame.f_code.co_name - if function == 'root': - location = 'top-level template code' - elif function.startswith('block_'): - location = 'block "%s"' % function[6:] - else: - location = 'template' - - if PY2: - code = CodeType(0, code.co_nlocals, code.co_stacksize, - code.co_flags, code.co_code, code.co_consts, - code.co_names, code.co_varnames, filename, - location, code.co_firstlineno, - code.co_lnotab, (), ()) - else: - code = CodeType(0, code.co_kwonlyargcount, - code.co_nlocals, code.co_stacksize, - code.co_flags, code.co_code, code.co_consts, - code.co_names, code.co_varnames, filename, - location, code.co_firstlineno, - code.co_lnotab, (), ()) - except Exception as e: - pass - - # execute the code and catch the new traceback - try: - exec(code, globals, locals) - except: - exc_info = sys.exc_info() - new_tb = exc_info[2].tb_next - - # return without this frame - return exc_info[:2] + (new_tb,) - - -def _init_ugly_crap(): - """This function implements a few ugly things so that we can patch the - traceback objects. The function returned allows resetting `tb_next` on - any python traceback object. Do not attempt to use this on non cpython - interpreters - """ - import ctypes - from types import TracebackType - - if PY2: - # figure out size of _Py_ssize_t for Python 2: - if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'): - _Py_ssize_t = ctypes.c_int64 - else: - _Py_ssize_t = ctypes.c_int - else: - # platform ssize_t on Python 3 - _Py_ssize_t = ctypes.c_ssize_t - - # regular python - class _PyObject(ctypes.Structure): - pass - _PyObject._fields_ = [ - ('ob_refcnt', _Py_ssize_t), - ('ob_type', ctypes.POINTER(_PyObject)) - ] - - # python with trace - if hasattr(sys, 'getobjects'): - class _PyObject(ctypes.Structure): - pass - _PyObject._fields_ = [ - ('_ob_next', ctypes.POINTER(_PyObject)), - ('_ob_prev', ctypes.POINTER(_PyObject)), - ('ob_refcnt', _Py_ssize_t), - ('ob_type', ctypes.POINTER(_PyObject)) - ] - - class _Traceback(_PyObject): - pass - _Traceback._fields_ = [ - ('tb_next', ctypes.POINTER(_Traceback)), - ('tb_frame', ctypes.POINTER(_PyObject)), - ('tb_lasti', ctypes.c_int), - ('tb_lineno', ctypes.c_int) - ] - - def tb_set_next(tb, next): - """Set the tb_next attribute of a traceback object.""" - if not (isinstance(tb, TracebackType) and - (next is None or isinstance(next, TracebackType))): - raise TypeError('tb_set_next arguments must be traceback objects') - obj = _Traceback.from_address(id(tb)) - if tb.tb_next is not None: - old = _Traceback.from_address(id(tb.tb_next)) - old.ob_refcnt -= 1 - if next is None: - obj.tb_next = ctypes.POINTER(_Traceback)() - else: - next = _Traceback.from_address(id(next)) - next.ob_refcnt += 1 - obj.tb_next = ctypes.pointer(next) - - return tb_set_next - - -# try to get a tb_set_next implementation if we don't have transparent -# proxies. -tb_set_next = None -if tproxy is None: - try: - tb_set_next = _init_ugly_crap() - except: - pass - del _init_ugly_crap diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/defaults.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/defaults.py deleted file mode 100644 index 7c93dec..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/defaults.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2.defaults - ~~~~~~~~~~~~~~~ - - Jinja default filters and tags. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -from jinja2._compat import range_type -from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace - - -# defaults for the parser / lexer -BLOCK_START_STRING = '{%' -BLOCK_END_STRING = '%}' -VARIABLE_START_STRING = '{{' -VARIABLE_END_STRING = '}}' -COMMENT_START_STRING = '{#' -COMMENT_END_STRING = '#}' -LINE_STATEMENT_PREFIX = None -LINE_COMMENT_PREFIX = None -TRIM_BLOCKS = False -LSTRIP_BLOCKS = False -NEWLINE_SEQUENCE = '\n' -KEEP_TRAILING_NEWLINE = False - - -# default filters, tests and namespace -from jinja2.filters import FILTERS as DEFAULT_FILTERS -from jinja2.tests import TESTS as DEFAULT_TESTS -DEFAULT_NAMESPACE = { - 'range': range_type, - 'dict': dict, - 'lipsum': generate_lorem_ipsum, - 'cycler': Cycler, - 'joiner': Joiner, - 'namespace': Namespace -} - - -# default policies -DEFAULT_POLICIES = { - 'compiler.ascii_str': True, - 'urlize.rel': 'noopener', - 'urlize.target': None, - 'truncate.leeway': 5, - 'json.dumps_function': None, - 'json.dumps_kwargs': {'sort_keys': True}, - 'ext.i18n.trimmed': False, -} - - -# export all constants -__all__ = tuple(x for x in locals().keys() if x.isupper()) diff --git a/flo-token-explorer/lib/python3.6/site-packages/jinja2/environment.py b/flo-token-explorer/lib/python3.6/site-packages/jinja2/environment.py deleted file mode 100644 index 549d9af..0000000 --- a/flo-token-explorer/lib/python3.6/site-packages/jinja2/environment.py +++ /dev/null @@ -1,1276 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2.environment - ~~~~~~~~~~~~~~~~~~ - - Provides a class that holds runtime and parsing time options. - - :copyright: (c) 2017 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -import os -import sys -import weakref -from functools import reduce, partial -from jinja2 import nodes -from jinja2.defaults import BLOCK_START_STRING, \ - BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \ - COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \ - LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \ - DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \ - DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS -from jinja2.lexer import get_lexer, TokenStream -from jinja2.parser import Parser -from jinja2.nodes import EvalContext -from jinja2.compiler import generate, CodeGenerator -from jinja2.runtime import Undefined, new_context, Context -from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \ - TemplatesNotFound, TemplateRuntimeError -from jinja2.utils import import_string, LRUCache, Markup, missing, \ - concat, consume, internalcode, have_async_gen -from jinja2._compat import imap, ifilter, string_types, iteritems, \ - text_type, reraise, implements_iterator, implements_to_string, \ - encode_filename, PY2, PYPY - - -# for direct template usage we have up to ten living environments -_spontaneous_environments = LRUCache(10) - -# the function to create jinja traceback objects. This is dynamically -# imported on the first exception in the exception handler. -_make_traceback = None - - -def get_spontaneous_environment(*args): - """Return a new spontaneous environment. A spontaneous environment is an - unnamed and unaccessible (in theory) environment that is used for - templates generated from a string and not from the file system. - """ - try: - env = _spontaneous_environments.get(args) - except TypeError: - return Environment(*args) - if env is not None: - return env - _spontaneous_environments[args] = env = Environment(*args) - env.shared = True - return env - - -def create_cache(size): - """Return the cache class for the given size.""" - if size == 0: - return None - if size < 0: - return {} - return LRUCache(size) - - -def copy_cache(cache): - """Create an empty copy of the given cache.""" - if cache is None: - return None - elif type(cache) is dict: - return {} - return LRUCache(cache.capacity) - - -def load_extensions(environment, extensions): - """Load the extensions from the list and bind it to the environment. - Returns a dict of instantiated environments. - """ - result = {} - for extension in extensions: - if isinstance(extension, string_types): - extension = import_string(extension) - result[extension.identifier] = extension(environment) - return result - - -def fail_for_missing_callable(string, name): - msg = string % name - if isinstance(name, Undefined): - try: - name._fail_with_undefined_error() - except Exception as e: - msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e) - raise TemplateRuntimeError(msg) - - -def _environment_sanity_check(environment): - """Perform a sanity check on the environment.""" - assert issubclass(environment.undefined, Undefined), 'undefined must ' \ - 'be a subclass of undefined because filters depend on it.' - assert environment.block_start_string != \ - environment.variable_start_string != \ - environment.comment_start_string, 'block, variable and comment ' \ - 'start strings must be different' - assert environment.newline_sequence in ('\r', '\r\n', '\n'), \ - 'newline_sequence set to unknown line ending string.' - return environment - - -class Environment(object): - r"""The core component of Jinja is the `Environment`. It contains - important shared variables like configuration, filters, tests, - globals and others. Instances of this class may be modified if - they are not shared and if no template was loaded so far. - Modifications on environments after the first template was loaded - will lead to surprising effects and undefined behavior. - - Here are the possible initialization parameters: - - `block_start_string` - The string marking the beginning of a block. Defaults to ``'{%'``. - - `block_end_string` - The string marking the end of a block. Defaults to ``'%}'``. - - `variable_start_string` - The string marking the beginning of a print statement. - Defaults to ``'{{'``. - - `variable_end_string` - The string marking the end of a print statement. Defaults to - ``'}}'``. - - `comment_start_string` - The string marking the beginning of a comment. Defaults to ``'{#'``. - - `comment_end_string` - The string marking the end of a comment. Defaults to ``'#}'``. - - `line_statement_prefix` - If given and a string, this will be used as prefix for line based - statements. See also :ref:`line-statements`. - - `line_comment_prefix` - If given and a string, this will be used as prefix for line based - comments. See also :ref:`line-statements`. - - .. versionadded:: 2.2 - - `trim_blocks` - If this is set to ``True`` the first newline after a block is - removed (block, not variable tag!). Defaults to `False`. - - `lstrip_blocks` - If this is set to ``True`` leading spaces and tabs are stripped - from the start of a line to a block. Defaults to `False`. - - `newline_sequence` - The sequence that starts a newline. Must be one of ``'\r'``, - ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a - useful default for Linux and OS X systems as well as web - applications. - - `keep_trailing_newline` - Preserve the trailing newline when rendering templates. - The default is ``False``, which causes a single newline, - if present, to be stripped from the end of the template. - - .. versionadded:: 2.7 - - `extensions` - List of Jinja extensions to use. This can either be import paths - as strings or extension classes. For more information have a - look at :ref:`the extensions documentation `. - - `optimized` - should the optimizer be enabled? Default is ``True``. - - `undefined` - :class:`Undefined` or a subclass of it that is used to represent - undefined values in the template. - - `finalize` - A callable that can be used to process the result of a variable - expression before it is output. For example one can convert - ``None`` implicitly into an empty string here. - - `autoescape` - If set to ``True`` the XML/HTML autoescaping feature is enabled by - default. For more details about autoescaping see - :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also - be a callable that is passed the template name and has to - return ``True`` or ``False`` depending on autoescape should be - enabled by default. - - .. versionchanged:: 2.4 - `autoescape` can now be a function - - `loader` - The template loader for this environment. - - `cache_size` - The size of the cache. Per default this is ``400`` which means - that if more than 400 templates are loaded the loader will clean - out the least recently used template. If the cache size is set to - ``0`` templates are recompiled all the time, if the cache size is - ``-1`` the cache will not be cleaned. - - .. versionchanged:: 2.8 - The cache size was increased to 400 from a low 50. - - `auto_reload` - Some loaders load templates from locations where the template - sources may change (ie: file system or database). If - ``auto_reload`` is set to ``True`` (default) every time a template is - requested the loader checks if the source changed and if yes, it - will reload the template. For higher performance it's possible to - disable that. - - `bytecode_cache` - If set to a bytecode cache object, this object will provide a - cache for the internal Jinja bytecode so that templates don't - have to be parsed if they were not changed. - - See :ref:`bytecode-cache` for more information. - - `enable_async` - If set to true this enables async template execution which allows - you to take advantage of newer Python features. This requires - Python 3.6 or later. - """ - - #: if this environment is sandboxed. Modifying this variable won't make - #: the environment sandboxed though. For a real sandboxed environment - #: have a look at jinja2.sandbox. This flag alone controls the code - #: generation by the compiler. - sandboxed = False - - #: True if the environment is just an overlay - overlayed = False - - #: the environment this environment is linked to if it is an overlay - linked_to = None - - #: shared environments have this set to `True`. A shared environment - #: must not be modified - shared = False - - #: these are currently EXPERIMENTAL undocumented features. - exception_handler = None - exception_formatter = None - - #: the class that is used for code generation. See - #: :class:`~jinja2.compiler.CodeGenerator` for more information. - code_generator_class = CodeGenerator - - #: the context class thatis used for templates. See - #: :class:`~jinja2.runtime.Context` for more information. - context_class = Context - - def __init__(self, - block_start_string=BLOCK_START_STRING, - block_end_string=BLOCK_END_STRING, - variable_start_string=VARIABLE_START_STRING, - variable_end_string=VARIABLE_END_STRING, - comment_start_string=COMMENT_START_STRING, - comment_end_string=COMMENT_END_STRING, - line_statement_prefix=LINE_STATEMENT_PREFIX, - line_comment_prefix=LINE_COMMENT_PREFIX, - trim_blocks=TRIM_BLOCKS, - lstrip_blocks=LSTRIP_BLOCKS, - newline_sequence=NEWLINE_SEQUENCE, - keep_trailing_newline=KEEP_TRAILING_NEWLINE, - extensions=(), - optimized=True, - undefined=Undefined, - finalize=None, - autoescape=False, - loader=None, - cache_size=400, - auto_reload=True, - bytecode_cache=None, - enable_async=False): - # !!Important notice!! - # The constructor accepts quite a few arguments that should be - # passed by keyword rather than position. However it's important to - # not change the order of arguments because it's used at least - # internally in those cases: - # - spontaneous environments (i18n extension and Template) - # - unittests - # If parameter changes are required only add parameters at the end - # and don't change the arguments (or the defaults!) of the arguments - # existing already. - - # lexer / parser information - self.block_start_string = block_start_string - self.block_end_string = block_end_string - self.variable_start_string = variable_start_string - self.variable_end_string = variable_end_string - self.comment_start_string = comment_start_string - self.comment_end_string = comment_end_string - self.line_statement_prefix = line_statement_prefix - self.line_comment_prefix = line_comment_prefix - self.trim_blocks = trim_blocks - self.lstrip_blocks = lstrip_blocks - self.newline_sequence = newline_sequence - self.keep_trailing_newline = keep_trailing_newline - - # runtime information - self.undefined = undefined - self.optimized = optimized - self.finalize = finalize - self.autoescape = autoescape - - # defaults - self.filters = DEFAULT_FILTERS.copy() - self.tests = DEFAULT_TESTS.copy() - self.globals = DEFAULT_NAMESPACE.copy() - - # set the loader provided - self.loader = loader - self.cache = create_cache(cache_size) - self.bytecode_cache = bytecode_cache - self.auto_reload = auto_reload - - # configurable policies - self.policies = DEFAULT_POLICIES.copy() - - # load extensions - self.extensions = load_extensions(self, extensions) - - self.enable_async = enable_async - self.is_async = self.enable_async and have_async_gen - - _environment_sanity_check(self) - - def add_extension(self, extension): - """Adds an extension after the environment was created. - - .. versionadded:: 2.5 - """ - self.extensions.update(load_extensions(self, [extension])) - - def extend(self, **attributes): - """Add the items to the instance of the environment if they do not exist - yet. This is used by :ref:`extensions ` to register - callbacks and configuration values without breaking inheritance. - """ - for key, value in iteritems(attributes): - if not hasattr(self, key): - setattr(self, key, value) - - def overlay(self, block_start_string=missing, block_end_string=missing, - variable_start_string=missing, variable_end_string=missing, - comment_start_string=missing, comment_end_string=missing, - line_statement_prefix=missing, line_comment_prefix=missing, - trim_blocks=missing, lstrip_blocks=missing, - extensions=missing, optimized=missing, - undefined=missing, finalize=missing, autoescape=missing, - loader=missing, cache_size=missing, auto_reload=missing, - bytecode_cache=missing): - """Create a new overlay environment that shares all the data with the - current environment except for cache and the overridden attributes. - Extensions cannot be removed for an overlayed environment. An overlayed - environment automatically gets all the extensions of the environment it - is linked to plus optional extra extensions. - - Creating overlays should happen after the initial environment was set - up completely. Not all attributes are truly linked, some are just - copied over so modifications on the original environment may not shine - through. - """ - args = dict(locals()) - del args['self'], args['cache_size'], args['extensions'] - - rv = object.__new__(self.__class__) - rv.__dict__.update(self.__dict__) - rv.overlayed = True - rv.linked_to = self - - for key, value in iteritems(args): - if value is not missing: - setattr(rv, key, value) - - if cache_size is not missing: - rv.cache = create_cache(cache_size) - else: - rv.cache = copy_cache(self.cache) - - rv.extensions = {} - for key, value in iteritems(self.extensions): - rv.extensions[key] = value.bind(rv) - if extensions is not missing: - rv.extensions.update(load_extensions(rv, extensions)) - - return _environment_sanity_check(rv) - - lexer = property(get_lexer, doc="The lexer for this environment.") - - def iter_extensions(self): - """Iterates over the extensions by priority.""" - return iter(sorted(self.extensions.values(), - key=lambda x: x.priority)) - - def getitem(self, obj, argument): - """Get an item or attribute of an object but prefer the item.""" - try: - return obj[argument] - except (AttributeError, TypeError, LookupError): - if isinstance(argument, string_types): - try: - attr = str(argument) - except Exception: - pass - else: - try: - return getattr(obj, attr) - except AttributeError: - pass - return self.undefined(obj=obj, name=argument) - - def getattr(self, obj, attribute): - """Get an item or attribute of an object but prefer the attribute. - Unlike :meth:`getitem` the attribute *must* be a bytestring. - """ - try: - return getattr(obj, attribute) - except AttributeError: - pass - try: - return obj[attribute] - except (TypeError, LookupError, AttributeError): - return self.undefined(obj=obj, name=attribute) - - def call_filter(self, name, value, args=None, kwargs=None, - context=None, eval_ctx=None): - """Invokes a filter on a value the same way the compiler does it. - - Note that on Python 3 this might return a coroutine in case the - filter is running from an environment in async mode and the filter - supports async execution. It's your responsibility to await this - if needed. - - .. versionadded:: 2.7 - """ - func = self.filters.get(name) - if func is None: - fail_for_missing_callable('no filter named %r', name) - args = [value] + list(args or ()) - if getattr(func, 'contextfilter', False): - if context is None: - raise TemplateRuntimeError('Attempted to invoke context ' - 'filter without context') - args.insert(0, context) - elif getattr(func, 'evalcontextfilter', False): - if eval_ctx is None: - if context is not None: - eval_ctx = context.eval_ctx - else: - eval_ctx = EvalContext(self) - args.insert(0, eval_ctx) - elif getattr(func, 'environmentfilter', False): - args.insert(0, self) - return func(*args, **(kwargs or {})) - - def call_test(self, name, value, args=None, kwargs=None): - """Invokes a test on a value the same way the compiler does it. - - .. versionadded:: 2.7 - """ - func = self.tests.get(name) - if func is None: - fail_for_missing_callable('no test named %r', name) - return func(value, *(args or ()), **(kwargs or {})) - - @internalcode - def parse(self, source, name=None, filename=None): - """Parse the sourcecode and return the abstract syntax tree. This - tree of nodes is used by the compiler to convert the template into - executable source- or bytecode. This is useful for debugging or to - extract information from templates. - - If you are :ref:`developing Jinja2 extensions ` - this gives you a good overview of the node tree generated. - """ - try: - return self._parse(source, name, filename) - except TemplateSyntaxError: - exc_info = sys.exc_info() - self.handle_exception(exc_info, source_hint=source) - - def _parse(self, source, name, filename): - """Internal parsing function used by `parse` and `compile`.""" - return Parser(self, source, name, encode_filename(filename)).parse() - - def lex(self, source, name=None, filename=None): - """Lex the given sourcecode and return a generator that yields - tokens as tuples in the form ``(lineno, token_type, value)``. - This can be useful for :ref:`extension development ` - and debugging templates. - - This does not perform preprocessing. If you want the preprocessing - of the extensions to be applied you have to filter source through - the :meth:`preprocess` method. - """ - source = text_type(source) - try: - return self.lexer.tokeniter(source, name, filename) - except TemplateSyntaxError: - exc_info = sys.exc_info() - self.handle_exception(exc_info, source_hint=source) - - def preprocess(self, source, name=None, filename=None): - """Preprocesses the source with all extensions. This is automatically - called for all parsing and compiling methods but *not* for :meth:`lex` - because there you usually only want the actual source tokenized. - """ - return reduce(lambda s, e: e.preprocess(s, name, filename), - self.iter_extensions(), text_type(source)) - - def _tokenize(self, source, name, filename=None, state=None): - """Called by the parser to do the preprocessing and filtering - for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. - """ - source = self.preprocess(source, name, filename) - stream = self.lexer.tokenize(source, name, filename, state) - for ext in self.iter_extensions(): - stream = ext.filter_stream(stream) - if not isinstance(stream, TokenStream): - stream = TokenStream(stream, name, filename) - return stream - - def _generate(self, source, name, filename, defer_init=False): - """Internal hook that can be overridden to hook a different generate - method in. - - .. versionadded:: 2.5 - """ - return generate(source, self, name, filename, defer_init=defer_init, - optimized=self.optimized) - - def _compile(self, source, filename): - """Internal hook that can be overridden to hook a different compile - method in. - - .. versionadded:: 2.5 - """ - return compile(source, filename, 'exec') - - @internalcode - def compile(self, source, name=None, filename=None, raw=False, - defer_init=False): - """Compile a node or template source code. The `name` parameter is - the load name of the template after it was joined using - :meth:`join_path` if necessary, not the filename on the file system. - the `filename` parameter is the estimated filename of the template on - the file system. If the template came from a database or memory this - can be omitted. - - The return value of this method is a python code object. If the `raw` - parameter is `True` the return value will be a string with python - code equivalent to the bytecode returned otherwise. This method is - mainly used internally. - - `defer_init` is use internally to aid the module code generator. This - causes the generated code to be able to import without the global - environment variable to be set. - - .. versionadded:: 2.4 - `defer_init` parameter added. - """ - source_hint = None - try: - if isinstance(source, string_types): - source_hint = source - source = self._parse(source, name, filename) - source = self._generate(source, name, filename, - defer_init=defer_init) - if raw: - return source - if filename is None: - filename = '