about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/gunicorn
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/gunicorn
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/gunicorn')
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/__init__.py9
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/__main__.py7
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/app/__init__.py4
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/app/base.py236
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/app/pasterapp.py75
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/app/wsgiapp.py71
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/arbiter.py672
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/config.py2258
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/debug.py69
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/errors.py29
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/glogging.py474
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/http/__init__.py9
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/http/body.py262
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/http/errors.py120
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/http/message.py360
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/http/parser.py52
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/http/unreader.py79
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/http/wsgi.py393
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/instrument/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/instrument/statsd.py133
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/pidfile.py86
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/reloader.py132
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/sock.py232
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/systemd.py76
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/util.py654
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/workers/__init__.py15
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/workers/base.py275
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/workers/base_async.py148
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/workers/geventlet.py187
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/workers/ggevent.py190
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/workers/gthread.py373
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/workers/gtornado.py167
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/workers/sync.py210
-rw-r--r--.venv/lib/python3.12/site-packages/gunicorn/workers/workertmp.py55
34 files changed, 8112 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/__init__.py b/.venv/lib/python3.12/site-packages/gunicorn/__init__.py
new file mode 100644
index 00000000..adf5e89b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/__init__.py
@@ -0,0 +1,9 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+version_info = (21, 2, 0)
+__version__ = ".".join([str(v) for v in version_info])
+SERVER = "gunicorn"
+SERVER_SOFTWARE = "%s/%s" % (SERVER, __version__)
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/__main__.py b/.venv/lib/python3.12/site-packages/gunicorn/__main__.py
new file mode 100644
index 00000000..49ba6960
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/__main__.py
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+from gunicorn.app.wsgiapp import run
+run()
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/app/__init__.py b/.venv/lib/python3.12/site-packages/gunicorn/app/__init__.py
new file mode 100644
index 00000000..87f06117
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/app/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/app/base.py b/.venv/lib/python3.12/site-packages/gunicorn/app/base.py
new file mode 100644
index 00000000..dbd05bc7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/app/base.py
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+import importlib.util
+import importlib.machinery
+import os
+import sys
+import traceback
+
+from gunicorn import util
+from gunicorn.arbiter import Arbiter
+from gunicorn.config import Config, get_default_config_file
+from gunicorn import debug
+
+
+class BaseApplication(object):
+    """
+    An application interface for configuring and loading
+    the various necessities for any given web framework.
+    """
+    def __init__(self, usage=None, prog=None):
+        self.usage = usage
+        self.cfg = None
+        self.callable = None
+        self.prog = prog
+        self.logger = None
+        self.do_load_config()
+
+    def do_load_config(self):
+        """
+        Loads the configuration
+        """
+        try:
+            self.load_default_config()
+            self.load_config()
+        except Exception as e:
+            print("\nError: %s" % str(e), file=sys.stderr)
+            sys.stderr.flush()
+            sys.exit(1)
+
+    def load_default_config(self):
+        # init configuration
+        self.cfg = Config(self.usage, prog=self.prog)
+
+    def init(self, parser, opts, args):
+        raise NotImplementedError
+
+    def load(self):
+        raise NotImplementedError
+
+    def load_config(self):
+        """
+        This method is used to load the configuration from one or several input(s).
+        Custom Command line, configuration file.
+        You have to override this method in your class.
+        """
+        raise NotImplementedError
+
+    def reload(self):
+        self.do_load_config()
+        if self.cfg.spew:
+            debug.spew()
+
+    def wsgi(self):
+        if self.callable is None:
+            self.callable = self.load()
+        return self.callable
+
+    def run(self):
+        try:
+            Arbiter(self).run()
+        except RuntimeError as e:
+            print("\nError: %s\n" % e, file=sys.stderr)
+            sys.stderr.flush()
+            sys.exit(1)
+
+
+class Application(BaseApplication):
+
+    # 'init' and 'load' methods are implemented by WSGIApplication.
+    # pylint: disable=abstract-method
+
+    def chdir(self):
+        # chdir to the configured path before loading,
+        # default is the current dir
+        os.chdir(self.cfg.chdir)
+
+        # add the path to sys.path
+        if self.cfg.chdir not in sys.path:
+            sys.path.insert(0, self.cfg.chdir)
+
+    def get_config_from_filename(self, filename):
+
+        if not os.path.exists(filename):
+            raise RuntimeError("%r doesn't exist" % filename)
+
+        ext = os.path.splitext(filename)[1]
+
+        try:
+            module_name = '__config__'
+            if ext in [".py", ".pyc"]:
+                spec = importlib.util.spec_from_file_location(module_name, filename)
+            else:
+                msg = "configuration file should have a valid Python extension.\n"
+                util.warn(msg)
+                loader_ = importlib.machinery.SourceFileLoader(module_name, filename)
+                spec = importlib.util.spec_from_file_location(module_name, filename, loader=loader_)
+            mod = importlib.util.module_from_spec(spec)
+            sys.modules[module_name] = mod
+            spec.loader.exec_module(mod)
+        except Exception:
+            print("Failed to read config file: %s" % filename, file=sys.stderr)
+            traceback.print_exc()
+            sys.stderr.flush()
+            sys.exit(1)
+
+        return vars(mod)
+
+    def get_config_from_module_name(self, module_name):
+        return vars(importlib.import_module(module_name))
+
+    def load_config_from_module_name_or_filename(self, location):
+        """
+        Loads the configuration file: the file is a python file, otherwise raise an RuntimeError
+        Exception or stop the process if the configuration file contains a syntax error.
+        """
+
+        if location.startswith("python:"):
+            module_name = location[len("python:"):]
+            cfg = self.get_config_from_module_name(module_name)
+        else:
+            if location.startswith("file:"):
+                filename = location[len("file:"):]
+            else:
+                filename = location
+            cfg = self.get_config_from_filename(filename)
+
+        for k, v in cfg.items():
+            # Ignore unknown names
+            if k not in self.cfg.settings:
+                continue
+            try:
+                self.cfg.set(k.lower(), v)
+            except Exception:
+                print("Invalid value for %s: %s\n" % (k, v), file=sys.stderr)
+                sys.stderr.flush()
+                raise
+
+        return cfg
+
+    def load_config_from_file(self, filename):
+        return self.load_config_from_module_name_or_filename(location=filename)
+
+    def load_config(self):
+        # parse console args
+        parser = self.cfg.parser()
+        args = parser.parse_args()
+
+        # optional settings from apps
+        cfg = self.init(parser, args, args.args)
+
+        # set up import paths and follow symlinks
+        self.chdir()
+
+        # Load up the any app specific configuration
+        if cfg:
+            for k, v in cfg.items():
+                self.cfg.set(k.lower(), v)
+
+        env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())
+
+        if args.config:
+            self.load_config_from_file(args.config)
+        elif env_args.config:
+            self.load_config_from_file(env_args.config)
+        else:
+            default_config = get_default_config_file()
+            if default_config is not None:
+                self.load_config_from_file(default_config)
+
+        # Load up environment configuration
+        for k, v in vars(env_args).items():
+            if v is None:
+                continue
+            if k == "args":
+                continue
+            self.cfg.set(k.lower(), v)
+
+        # Lastly, update the configuration with any command line settings.
+        for k, v in vars(args).items():
+            if v is None:
+                continue
+            if k == "args":
+                continue
+            self.cfg.set(k.lower(), v)
+
+        # current directory might be changed by the config now
+        # set up import paths and follow symlinks
+        self.chdir()
+
+    def run(self):
+        if self.cfg.print_config:
+            print(self.cfg)
+
+        if self.cfg.print_config or self.cfg.check_config:
+            try:
+                self.load()
+            except Exception:
+                msg = "\nError while loading the application:\n"
+                print(msg, file=sys.stderr)
+                traceback.print_exc()
+                sys.stderr.flush()
+                sys.exit(1)
+            sys.exit(0)
+
+        if self.cfg.spew:
+            debug.spew()
+
+        if self.cfg.daemon:
+            if os.environ.get('NOTIFY_SOCKET'):
+                msg = "Warning: you shouldn't specify `daemon = True`" \
+                      " when launching by systemd with `Type = notify`"
+                print(msg, file=sys.stderr, flush=True)
+
+            util.daemonize(self.cfg.enable_stdio_inheritance)
+
+        # set python paths
+        if self.cfg.pythonpath:
+            paths = self.cfg.pythonpath.split(",")
+            for path in paths:
+                pythonpath = os.path.abspath(path)
+                if pythonpath not in sys.path:
+                    sys.path.insert(0, pythonpath)
+
+        super().run()
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/app/pasterapp.py b/.venv/lib/python3.12/site-packages/gunicorn/app/pasterapp.py
new file mode 100644
index 00000000..4c9fc7de
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/app/pasterapp.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import configparser
+import os
+
+from paste.deploy import loadapp
+
+from gunicorn.app.wsgiapp import WSGIApplication
+from gunicorn.config import get_default_config_file
+
+
+def get_wsgi_app(config_uri, name=None, defaults=None):
+    if ':' not in config_uri:
+        config_uri = "config:%s" % config_uri
+
+    return loadapp(
+        config_uri,
+        name=name,
+        relative_to=os.getcwd(),
+        global_conf=defaults,
+    )
+
+
+def has_logging_config(config_file):
+    parser = configparser.ConfigParser()
+    parser.read([config_file])
+    return parser.has_section('loggers')
+
+
+def serve(app, global_conf, **local_conf):
+    """\
+    A Paste Deployment server runner.
+
+    Example configuration:
+
+        [server:main]
+        use = egg:gunicorn#main
+        host = 127.0.0.1
+        port = 5000
+    """
+    config_file = global_conf['__file__']
+    gunicorn_config_file = local_conf.pop('config', None)
+
+    host = local_conf.pop('host', '')
+    port = local_conf.pop('port', '')
+    if host and port:
+        local_conf['bind'] = '%s:%s' % (host, port)
+    elif host:
+        local_conf['bind'] = host.split(',')
+
+    class PasterServerApplication(WSGIApplication):
+        def load_config(self):
+            self.cfg.set("default_proc_name", config_file)
+
+            if has_logging_config(config_file):
+                self.cfg.set("logconfig", config_file)
+
+            if gunicorn_config_file:
+                self.load_config_from_file(gunicorn_config_file)
+            else:
+                default_gunicorn_config_file = get_default_config_file()
+                if default_gunicorn_config_file is not None:
+                    self.load_config_from_file(default_gunicorn_config_file)
+
+            for k, v in local_conf.items():
+                if v is not None:
+                    self.cfg.set(k.lower(), v)
+
+        def load(self):
+            return app
+
+    PasterServerApplication().run()
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/app/wsgiapp.py b/.venv/lib/python3.12/site-packages/gunicorn/app/wsgiapp.py
new file mode 100644
index 00000000..36cfba9d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/app/wsgiapp.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import os
+
+from gunicorn.errors import ConfigError
+from gunicorn.app.base import Application
+from gunicorn import util
+
+
+class WSGIApplication(Application):
+    def init(self, parser, opts, args):
+        self.app_uri = None
+
+        if opts.paste:
+            from .pasterapp import has_logging_config
+
+            config_uri = os.path.abspath(opts.paste)
+            config_file = config_uri.split('#')[0]
+
+            if not os.path.exists(config_file):
+                raise ConfigError("%r not found" % config_file)
+
+            self.cfg.set("default_proc_name", config_file)
+            self.app_uri = config_uri
+
+            if has_logging_config(config_file):
+                self.cfg.set("logconfig", config_file)
+
+            return
+
+        if len(args) > 0:
+            self.cfg.set("default_proc_name", args[0])
+            self.app_uri = args[0]
+
+    def load_config(self):
+        super().load_config()
+
+        if self.app_uri is None:
+            if self.cfg.wsgi_app is not None:
+                self.app_uri = self.cfg.wsgi_app
+            else:
+                raise ConfigError("No application module specified.")
+
+    def load_wsgiapp(self):
+        return util.import_app(self.app_uri)
+
+    def load_pasteapp(self):
+        from .pasterapp import get_wsgi_app
+        return get_wsgi_app(self.app_uri, defaults=self.cfg.paste_global_conf)
+
+    def load(self):
+        if self.cfg.paste is not None:
+            return self.load_pasteapp()
+        else:
+            return self.load_wsgiapp()
+
+
+def run():
+    """\
+    The ``gunicorn`` command line runner for launching Gunicorn with
+    generic WSGI applications.
+    """
+    from gunicorn.app.wsgiapp import WSGIApplication
+    WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run()
+
+
+if __name__ == '__main__':
+    run()
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/arbiter.py b/.venv/lib/python3.12/site-packages/gunicorn/arbiter.py
new file mode 100644
index 00000000..08523d44
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/arbiter.py
@@ -0,0 +1,672 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+import errno
+import os
+import random
+import select
+import signal
+import sys
+import time
+import traceback
+
+from gunicorn.errors import HaltServer, AppImportError
+from gunicorn.pidfile import Pidfile
+from gunicorn import sock, systemd, util
+
+from gunicorn import __version__, SERVER_SOFTWARE
+
+
+class Arbiter(object):
+    """
+    Arbiter maintain the workers processes alive. It launches or
+    kills them if needed. It also manages application reloading
+    via SIGHUP/USR2.
+    """
+
+    # A flag indicating if a worker failed to
+    # to boot. If a worker process exist with
+    # this error code, the arbiter will terminate.
+    WORKER_BOOT_ERROR = 3
+
+    # A flag indicating if an application failed to be loaded
+    APP_LOAD_ERROR = 4
+
+    START_CTX = {}
+
+    LISTENERS = []
+    WORKERS = {}
+    PIPE = []
+
+    # I love dynamic languages
+    SIG_QUEUE = []
+    SIGNALS = [getattr(signal, "SIG%s" % x)
+               for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
+    SIG_NAMES = dict(
+        (getattr(signal, name), name[3:].lower()) for name in dir(signal)
+        if name[:3] == "SIG" and name[3] != "_"
+    )
+
+    def __init__(self, app):
+        os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE
+
+        self._num_workers = None
+        self._last_logged_active_worker_count = None
+        self.log = None
+
+        self.setup(app)
+
+        self.pidfile = None
+        self.systemd = False
+        self.worker_age = 0
+        self.reexec_pid = 0
+        self.master_pid = 0
+        self.master_name = "Master"
+
+        cwd = util.getcwd()
+
+        args = sys.argv[:]
+        args.insert(0, sys.executable)
+
+        # init start context
+        self.START_CTX = {
+            "args": args,
+            "cwd": cwd,
+            0: sys.executable
+        }
+
+    def _get_num_workers(self):
+        return self._num_workers
+
+    def _set_num_workers(self, value):
+        old_value = self._num_workers
+        self._num_workers = value
+        self.cfg.nworkers_changed(self, value, old_value)
+    num_workers = property(_get_num_workers, _set_num_workers)
+
+    def setup(self, app):
+        self.app = app
+        self.cfg = app.cfg
+
+        if self.log is None:
+            self.log = self.cfg.logger_class(app.cfg)
+
+        # reopen files
+        if 'GUNICORN_FD' in os.environ:
+            self.log.reopen_files()
+
+        self.worker_class = self.cfg.worker_class
+        self.address = self.cfg.address
+        self.num_workers = self.cfg.workers
+        self.timeout = self.cfg.timeout
+        self.proc_name = self.cfg.proc_name
+
+        self.log.debug('Current configuration:\n{0}'.format(
+            '\n'.join(
+                '  {0}: {1}'.format(config, value.value)
+                for config, value
+                in sorted(self.cfg.settings.items(),
+                          key=lambda setting: setting[1]))))
+
+        # set enviroment' variables
+        if self.cfg.env:
+            for k, v in self.cfg.env.items():
+                os.environ[k] = v
+
+        if self.cfg.preload_app:
+            self.app.wsgi()
+
+    def start(self):
+        """\
+        Initialize the arbiter. Start listening and set pidfile if needed.
+        """
+        self.log.info("Starting gunicorn %s", __version__)
+
+        if 'GUNICORN_PID' in os.environ:
+            self.master_pid = int(os.environ.get('GUNICORN_PID'))
+            self.proc_name = self.proc_name + ".2"
+            self.master_name = "Master.2"
+
+        self.pid = os.getpid()
+        if self.cfg.pidfile is not None:
+            pidname = self.cfg.pidfile
+            if self.master_pid != 0:
+                pidname += ".2"
+            self.pidfile = Pidfile(pidname)
+            self.pidfile.create(self.pid)
+        self.cfg.on_starting(self)
+
+        self.init_signals()
+
+        if not self.LISTENERS:
+            fds = None
+            listen_fds = systemd.listen_fds()
+            if listen_fds:
+                self.systemd = True
+                fds = range(systemd.SD_LISTEN_FDS_START,
+                            systemd.SD_LISTEN_FDS_START + listen_fds)
+
+            elif self.master_pid:
+                fds = []
+                for fd in os.environ.pop('GUNICORN_FD').split(','):
+                    fds.append(int(fd))
+
+            self.LISTENERS = sock.create_sockets(self.cfg, self.log, fds)
+
+        listeners_str = ",".join([str(lnr) for lnr in self.LISTENERS])
+        self.log.debug("Arbiter booted")
+        self.log.info("Listening at: %s (%s)", listeners_str, self.pid)
+        self.log.info("Using worker: %s", self.cfg.worker_class_str)
+        systemd.sd_notify("READY=1\nSTATUS=Gunicorn arbiter booted", self.log)
+
+        # check worker class requirements
+        if hasattr(self.worker_class, "check_config"):
+            self.worker_class.check_config(self.cfg, self.log)
+
+        self.cfg.when_ready(self)
+
+    def init_signals(self):
+        """\
+        Initialize master signal handling. Most of the signals
+        are queued. Child signals only wake up the master.
+        """
+        # close old PIPE
+        for p in self.PIPE:
+            os.close(p)
+
+        # initialize the pipe
+        self.PIPE = pair = os.pipe()
+        for p in pair:
+            util.set_non_blocking(p)
+            util.close_on_exec(p)
+
+        self.log.close_on_exec()
+
+        # initialize all signals
+        for s in self.SIGNALS:
+            signal.signal(s, self.signal)
+        signal.signal(signal.SIGCHLD, self.handle_chld)
+
+    def signal(self, sig, frame):
+        if len(self.SIG_QUEUE) < 5:
+            self.SIG_QUEUE.append(sig)
+            self.wakeup()
+
+    def run(self):
+        "Main master loop."
+        self.start()
+        util._setproctitle("master [%s]" % self.proc_name)
+
+        try:
+            self.manage_workers()
+
+            while True:
+                self.maybe_promote_master()
+
+                sig = self.SIG_QUEUE.pop(0) if self.SIG_QUEUE else None
+                if sig is None:
+                    self.sleep()
+                    self.murder_workers()
+                    self.manage_workers()
+                    continue
+
+                if sig not in self.SIG_NAMES:
+                    self.log.info("Ignoring unknown signal: %s", sig)
+                    continue
+
+                signame = self.SIG_NAMES.get(sig)
+                handler = getattr(self, "handle_%s" % signame, None)
+                if not handler:
+                    self.log.error("Unhandled signal: %s", signame)
+                    continue
+                self.log.info("Handling signal: %s", signame)
+                handler()
+                self.wakeup()
+        except (StopIteration, KeyboardInterrupt):
+            self.halt()
+        except HaltServer as inst:
+            self.halt(reason=inst.reason, exit_status=inst.exit_status)
+        except SystemExit:
+            raise
+        except Exception:
+            self.log.error("Unhandled exception in main loop",
+                           exc_info=True)
+            self.stop(False)
+            if self.pidfile is not None:
+                self.pidfile.unlink()
+            sys.exit(-1)
+
+    def handle_chld(self, sig, frame):
+        "SIGCHLD handling"
+        self.reap_workers()
+        self.wakeup()
+
+    def handle_hup(self):
+        """\
+        HUP handling.
+        - Reload configuration
+        - Start the new worker processes with a new configuration
+        - Gracefully shutdown the old worker processes
+        """
+        self.log.info("Hang up: %s", self.master_name)
+        self.reload()
+
+    def handle_term(self):
+        "SIGTERM handling"
+        raise StopIteration
+
+    def handle_int(self):
+        "SIGINT handling"
+        self.stop(False)
+        raise StopIteration
+
+    def handle_quit(self):
+        "SIGQUIT handling"
+        self.stop(False)
+        raise StopIteration
+
+    def handle_ttin(self):
+        """\
+        SIGTTIN handling.
+        Increases the number of workers by one.
+        """
+        self.num_workers += 1
+        self.manage_workers()
+
+    def handle_ttou(self):
+        """\
+        SIGTTOU handling.
+        Decreases the number of workers by one.
+        """
+        if self.num_workers <= 1:
+            return
+        self.num_workers -= 1
+        self.manage_workers()
+
+    def handle_usr1(self):
+        """\
+        SIGUSR1 handling.
+        Kill all workers by sending them a SIGUSR1
+        """
+        self.log.reopen_files()
+        self.kill_workers(signal.SIGUSR1)
+
+    def handle_usr2(self):
+        """\
+        SIGUSR2 handling.
+        Creates a new arbiter/worker set as a fork of the current
+        arbiter without affecting old workers. Use this to do live
+        deployment with the ability to backout a change.
+        """
+        self.reexec()
+
+    def handle_winch(self):
+        """SIGWINCH handling"""
+        if self.cfg.daemon:
+            self.log.info("graceful stop of workers")
+            self.num_workers = 0
+            self.kill_workers(signal.SIGTERM)
+        else:
+            self.log.debug("SIGWINCH ignored. Not daemonized")
+
+    def maybe_promote_master(self):
+        if self.master_pid == 0:
+            return
+
+        if self.master_pid != os.getppid():
+            self.log.info("Master has been promoted.")
+            # reset master infos
+            self.master_name = "Master"
+            self.master_pid = 0
+            self.proc_name = self.cfg.proc_name
+            del os.environ['GUNICORN_PID']
+            # rename the pidfile
+            if self.pidfile is not None:
+                self.pidfile.rename(self.cfg.pidfile)
+            # reset proctitle
+            util._setproctitle("master [%s]" % self.proc_name)
+
+    def wakeup(self):
+        """\
+        Wake up the arbiter by writing to the PIPE
+        """
+        try:
+            os.write(self.PIPE[1], b'.')
+        except IOError as e:
+            if e.errno not in [errno.EAGAIN, errno.EINTR]:
+                raise
+
+    def halt(self, reason=None, exit_status=0):
+        """ halt arbiter """
+        self.stop()
+
+        log_func = self.log.info if exit_status == 0 else self.log.error
+        log_func("Shutting down: %s", self.master_name)
+        if reason is not None:
+            log_func("Reason: %s", reason)
+
+        if self.pidfile is not None:
+            self.pidfile.unlink()
+        self.cfg.on_exit(self)
+        sys.exit(exit_status)
+
+    def sleep(self):
+        """\
+        Sleep until PIPE is readable or we timeout.
+        A readable PIPE means a signal occurred.
+        """
+        try:
+            ready = select.select([self.PIPE[0]], [], [], 1.0)
+            if not ready[0]:
+                return
+            while os.read(self.PIPE[0], 1):
+                pass
+        except (select.error, OSError) as e:
+            # TODO: select.error is a subclass of OSError since Python 3.3.
+            error_number = getattr(e, 'errno', e.args[0])
+            if error_number not in [errno.EAGAIN, errno.EINTR]:
+                raise
+        except KeyboardInterrupt:
+            sys.exit()
+
+    def stop(self, graceful=True):
+        """\
+        Stop workers
+
+        :attr graceful: boolean, If True (the default) workers will be
+        killed gracefully  (ie. trying to wait for the current connection)
+        """
+        unlink = (
+            self.reexec_pid == self.master_pid == 0
+            and not self.systemd
+            and not self.cfg.reuse_port
+        )
+        sock.close_sockets(self.LISTENERS, unlink)
+
+        self.LISTENERS = []
+        sig = signal.SIGTERM
+        if not graceful:
+            sig = signal.SIGQUIT
+        limit = time.time() + self.cfg.graceful_timeout
+        # instruct the workers to exit
+        self.kill_workers(sig)
+        # wait until the graceful timeout
+        while self.WORKERS and time.time() < limit:
+            time.sleep(0.1)
+
+        self.kill_workers(signal.SIGKILL)
+
+    def reexec(self):
+        """\
+        Relaunch the master and workers.
+        """
+        if self.reexec_pid != 0:
+            self.log.warning("USR2 signal ignored. Child exists.")
+            return
+
+        if self.master_pid != 0:
+            self.log.warning("USR2 signal ignored. Parent exists.")
+            return
+
+        master_pid = os.getpid()
+        self.reexec_pid = os.fork()
+        if self.reexec_pid != 0:
+            return
+
+        self.cfg.pre_exec(self)
+
+        environ = self.cfg.env_orig.copy()
+        environ['GUNICORN_PID'] = str(master_pid)
+
+        if self.systemd:
+            environ['LISTEN_PID'] = str(os.getpid())
+            environ['LISTEN_FDS'] = str(len(self.LISTENERS))
+        else:
+            environ['GUNICORN_FD'] = ','.join(
+                str(lnr.fileno()) for lnr in self.LISTENERS)
+
+        os.chdir(self.START_CTX['cwd'])
+
+        # exec the process using the original environment
+        os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ)
+
+    def reload(self):
+        old_address = self.cfg.address
+
+        # reset old environment
+        for k in self.cfg.env:
+            if k in self.cfg.env_orig:
+                # reset the key to the value it had before
+                # we launched gunicorn
+                os.environ[k] = self.cfg.env_orig[k]
+            else:
+                # delete the value set by gunicorn
+                try:
+                    del os.environ[k]
+                except KeyError:
+                    pass
+
+        # reload conf
+        self.app.reload()
+        self.setup(self.app)
+
+        # reopen log files
+        self.log.reopen_files()
+
+        # do we need to change listener ?
+        if old_address != self.cfg.address:
+            # close all listeners
+            for lnr in self.LISTENERS:
+                lnr.close()
+            # init new listeners
+            self.LISTENERS = sock.create_sockets(self.cfg, self.log)
+            listeners_str = ",".join([str(lnr) for lnr in self.LISTENERS])
+            self.log.info("Listening at: %s", listeners_str)
+
+        # do some actions on reload
+        self.cfg.on_reload(self)
+
+        # unlink pidfile
+        if self.pidfile is not None:
+            self.pidfile.unlink()
+
+        # create new pidfile
+        if self.cfg.pidfile is not None:
+            self.pidfile = Pidfile(self.cfg.pidfile)
+            self.pidfile.create(self.pid)
+
+        # set new proc_name
+        util._setproctitle("master [%s]" % self.proc_name)
+
+        # spawn new workers
+        for _ in range(self.cfg.workers):
+            self.spawn_worker()
+
+        # manage workers
+        self.manage_workers()
+
+    def murder_workers(self):
+        """\
+        Kill unused/idle workers
+        """
+        if not self.timeout:
+            return
+        workers = list(self.WORKERS.items())
+        for (pid, worker) in workers:
+            try:
+                if time.time() - worker.tmp.last_update() <= self.timeout:
+                    continue
+            except (OSError, ValueError):
+                continue
+
+            if not worker.aborted:
+                self.log.critical("WORKER TIMEOUT (pid:%s)", pid)
+                worker.aborted = True
+                self.kill_worker(pid, signal.SIGABRT)
+            else:
+                self.kill_worker(pid, signal.SIGKILL)
+
+    def reap_workers(self):
+        """\
+        Reap workers to avoid zombie processes
+        """
+        try:
+            while True:
+                wpid, status = os.waitpid(-1, os.WNOHANG)
+                if not wpid:
+                    break
+                if self.reexec_pid == wpid:
+                    self.reexec_pid = 0
+                else:
+                    # A worker was terminated. If the termination reason was
+                    # that it could not boot, we'll shut it down to avoid
+                    # infinite start/stop cycles.
+                    exitcode = status >> 8
+                    if exitcode != 0:
+                        self.log.error('Worker (pid:%s) exited with code %s', wpid, exitcode)
+                    if exitcode == self.WORKER_BOOT_ERROR:
+                        reason = "Worker failed to boot."
+                        raise HaltServer(reason, self.WORKER_BOOT_ERROR)
+                    if exitcode == self.APP_LOAD_ERROR:
+                        reason = "App failed to load."
+                        raise HaltServer(reason, self.APP_LOAD_ERROR)
+
+                    if exitcode > 0:
+                        # If the exit code of the worker is greater than 0,
+                        # let the user know.
+                        self.log.error("Worker (pid:%s) exited with code %s.",
+                                       wpid, exitcode)
+                    elif status > 0:
+                        # If the exit code of the worker is 0 and the status
+                        # is greater than 0, then it was most likely killed
+                        # via a signal.
+                        try:
+                            sig_name = signal.Signals(status).name
+                        except ValueError:
+                            sig_name = "code {}".format(status)
+                        msg = "Worker (pid:{}) was sent {}!".format(
+                            wpid, sig_name)
+
+                        # Additional hint for SIGKILL
+                        if status == signal.SIGKILL:
+                            msg += " Perhaps out of memory?"
+                        self.log.error(msg)
+
+                    worker = self.WORKERS.pop(wpid, None)
+                    if not worker:
+                        continue
+                    worker.tmp.close()
+                    self.cfg.child_exit(self, worker)
+        except OSError as e:
+            if e.errno != errno.ECHILD:
+                raise
+
+    def manage_workers(self):
+        """\
+        Maintain the number of workers by spawning or killing
+        as required.
+        """
+        if len(self.WORKERS) < self.num_workers:
+            self.spawn_workers()
+
+        workers = self.WORKERS.items()
+        workers = sorted(workers, key=lambda w: w[1].age)
+        while len(workers) > self.num_workers:
+            (pid, _) = workers.pop(0)
+            self.kill_worker(pid, signal.SIGTERM)
+
+        active_worker_count = len(workers)
+        if self._last_logged_active_worker_count != active_worker_count:
+            self._last_logged_active_worker_count = active_worker_count
+            self.log.debug("{0} workers".format(active_worker_count),
+                           extra={"metric": "gunicorn.workers",
+                                  "value": active_worker_count,
+                                  "mtype": "gauge"})
+
+    def spawn_worker(self):
+        self.worker_age += 1
+        worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS,
+                                   self.app, self.timeout / 2.0,
+                                   self.cfg, self.log)
+        self.cfg.pre_fork(self, worker)
+        pid = os.fork()
+        if pid != 0:
+            worker.pid = pid
+            self.WORKERS[pid] = worker
+            return pid
+
+        # Do not inherit the temporary files of other workers
+        for sibling in self.WORKERS.values():
+            sibling.tmp.close()
+
+        # Process Child
+        worker.pid = os.getpid()
+        try:
+            util._setproctitle("worker [%s]" % self.proc_name)
+            self.log.info("Booting worker with pid: %s", worker.pid)
+            self.cfg.post_fork(self, worker)
+            worker.init_process()
+            sys.exit(0)
+        except SystemExit:
+            raise
+        except AppImportError as e:
+            self.log.debug("Exception while loading the application",
+                           exc_info=True)
+            print("%s" % e, file=sys.stderr)
+            sys.stderr.flush()
+            sys.exit(self.APP_LOAD_ERROR)
+        except Exception:
+            self.log.exception("Exception in worker process")
+            if not worker.booted:
+                sys.exit(self.WORKER_BOOT_ERROR)
+            sys.exit(-1)
+        finally:
+            self.log.info("Worker exiting (pid: %s)", worker.pid)
+            try:
+                worker.tmp.close()
+                self.cfg.worker_exit(self, worker)
+            except Exception:
+                self.log.warning("Exception during worker exit:\n%s",
+                                 traceback.format_exc())
+
+    def spawn_workers(self):
+        """\
+        Spawn new workers as needed.
+
+        This is where a worker process leaves the main loop
+        of the master process.
+        """
+
+        for _ in range(self.num_workers - len(self.WORKERS)):
+            self.spawn_worker()
+            time.sleep(0.1 * random.random())
+
+    def kill_workers(self, sig):
+        """\
+        Kill all workers with the signal `sig`
+        :attr sig: `signal.SIG*` value
+        """
+        worker_pids = list(self.WORKERS.keys())
+        for pid in worker_pids:
+            self.kill_worker(pid, sig)
+
+    def kill_worker(self, pid, sig):
+        """\
+        Kill a worker
+
+        :attr pid: int, worker pid
+        :attr sig: `signal.SIG*` value
+         """
+        try:
+            os.kill(pid, sig)
+        except OSError as e:
+            if e.errno == errno.ESRCH:
+                try:
+                    worker = self.WORKERS.pop(pid)
+                    worker.tmp.close()
+                    self.cfg.worker_exit(self, worker)
+                    return
+                except (KeyError, OSError):
+                    return
+            raise
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/config.py b/.venv/lib/python3.12/site-packages/gunicorn/config.py
new file mode 100644
index 00000000..84e7619e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/config.py
@@ -0,0 +1,2258 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+# Please remember to run "make -C docs html" after update "desc" attributes.
+
+import argparse
+import copy
+import grp
+import inspect
+import os
+import pwd
+import re
+import shlex
+import ssl
+import sys
+import textwrap
+
+from gunicorn import __version__, util
+from gunicorn.errors import ConfigError
+from gunicorn.reloader import reloader_engines
+
+KNOWN_SETTINGS = []
+PLATFORM = sys.platform
+
+
+def make_settings(ignore=None):
+    settings = {}
+    ignore = ignore or ()
+    for s in KNOWN_SETTINGS:
+        setting = s()
+        if setting.name in ignore:
+            continue
+        settings[setting.name] = setting.copy()
+    return settings
+
+
+def auto_int(_, x):
+    # for compatible with octal numbers in python3
+    if re.match(r'0(\d)', x, re.IGNORECASE):
+        x = x.replace('0', '0o', 1)
+    return int(x, 0)
+
+
+class Config(object):
+
+    def __init__(self, usage=None, prog=None):
+        self.settings = make_settings()
+        self.usage = usage
+        self.prog = prog or os.path.basename(sys.argv[0])
+        self.env_orig = os.environ.copy()
+
+    def __str__(self):
+        lines = []
+        kmax = max(len(k) for k in self.settings)
+        for k in sorted(self.settings):
+            v = self.settings[k].value
+            if callable(v):
+                v = "<{}()>".format(v.__qualname__)
+            lines.append("{k:{kmax}} = {v}".format(k=k, v=v, kmax=kmax))
+        return "\n".join(lines)
+
+    def __getattr__(self, name):
+        if name not in self.settings:
+            raise AttributeError("No configuration setting for: %s" % name)
+        return self.settings[name].get()
+
+    def __setattr__(self, name, value):
+        if name != "settings" and name in self.settings:
+            raise AttributeError("Invalid access!")
+        super().__setattr__(name, value)
+
+    def set(self, name, value):
+        if name not in self.settings:
+            raise AttributeError("No configuration setting for: %s" % name)
+        self.settings[name].set(value)
+
+    def get_cmd_args_from_env(self):
+        if 'GUNICORN_CMD_ARGS' in self.env_orig:
+            return shlex.split(self.env_orig['GUNICORN_CMD_ARGS'])
+        return []
+
+    def parser(self):
+        kwargs = {
+            "usage": self.usage,
+            "prog": self.prog
+        }
+        parser = argparse.ArgumentParser(**kwargs)
+        parser.add_argument("-v", "--version",
+                            action="version", default=argparse.SUPPRESS,
+                            version="%(prog)s (version " + __version__ + ")\n",
+                            help="show program's version number and exit")
+        parser.add_argument("args", nargs="*", help=argparse.SUPPRESS)
+
+        keys = sorted(self.settings, key=self.settings.__getitem__)
+        for k in keys:
+            self.settings[k].add_option(parser)
+
+        return parser
+
+    @property
+    def worker_class_str(self):
+        uri = self.settings['worker_class'].get()
+
+        # are we using a threaded worker?
+        is_sync = uri.endswith('SyncWorker') or uri == 'sync'
+        if is_sync and self.threads > 1:
+            return "gthread"
+        return uri
+
+    @property
+    def worker_class(self):
+        uri = self.settings['worker_class'].get()
+
+        # are we using a threaded worker?
+        is_sync = uri.endswith('SyncWorker') or uri == 'sync'
+        if is_sync and self.threads > 1:
+            uri = "gunicorn.workers.gthread.ThreadWorker"
+
+        worker_class = util.load_class(uri)
+        if hasattr(worker_class, "setup"):
+            worker_class.setup()
+        return worker_class
+
+    @property
+    def address(self):
+        s = self.settings['bind'].get()
+        return [util.parse_address(util.bytes_to_str(bind)) for bind in s]
+
+    @property
+    def uid(self):
+        return self.settings['user'].get()
+
+    @property
+    def gid(self):
+        return self.settings['group'].get()
+
+    @property
+    def proc_name(self):
+        pn = self.settings['proc_name'].get()
+        if pn is not None:
+            return pn
+        else:
+            return self.settings['default_proc_name'].get()
+
+    @property
+    def logger_class(self):
+        uri = self.settings['logger_class'].get()
+        if uri == "simple":
+            # support the default
+            uri = LoggerClass.default
+
+        # if default logger is in use, and statsd is on, automagically switch
+        # to the statsd logger
+        if uri == LoggerClass.default:
+            if 'statsd_host' in self.settings and self.settings['statsd_host'].value is not None:
+                uri = "gunicorn.instrument.statsd.Statsd"
+
+        logger_class = util.load_class(
+            uri,
+            default="gunicorn.glogging.Logger",
+            section="gunicorn.loggers")
+
+        if hasattr(logger_class, "install"):
+            logger_class.install()
+        return logger_class
+
+    @property
+    def is_ssl(self):
+        return self.certfile or self.keyfile
+
+    @property
+    def ssl_options(self):
+        opts = {}
+        for name, value in self.settings.items():
+            if value.section == 'SSL':
+                opts[name] = value.get()
+        return opts
+
+    @property
+    def env(self):
+        raw_env = self.settings['raw_env'].get()
+        env = {}
+
+        if not raw_env:
+            return env
+
+        for e in raw_env:
+            s = util.bytes_to_str(e)
+            try:
+                k, v = s.split('=', 1)
+            except ValueError:
+                raise RuntimeError("environment setting %r invalid" % s)
+
+            env[k] = v
+
+        return env
+
+    @property
+    def sendfile(self):
+        if self.settings['sendfile'].get() is not None:
+            return False
+
+        if 'SENDFILE' in os.environ:
+            sendfile = os.environ['SENDFILE'].lower()
+            return sendfile in ['y', '1', 'yes', 'true']
+
+        return True
+
+    @property
+    def reuse_port(self):
+        return self.settings['reuse_port'].get()
+
+    @property
+    def paste_global_conf(self):
+        raw_global_conf = self.settings['raw_paste_global_conf'].get()
+        if raw_global_conf is None:
+            return None
+
+        global_conf = {}
+        for e in raw_global_conf:
+            s = util.bytes_to_str(e)
+            try:
+                k, v = re.split(r'(?<!\\)=', s, 1)
+            except ValueError:
+                raise RuntimeError("environment setting %r invalid" % s)
+            k = k.replace('\\=', '=')
+            v = v.replace('\\=', '=')
+            global_conf[k] = v
+
+        return global_conf
+
+
+class SettingMeta(type):
+    def __new__(cls, name, bases, attrs):
+        super_new = super().__new__
+        parents = [b for b in bases if isinstance(b, SettingMeta)]
+        if not parents:
+            return super_new(cls, name, bases, attrs)
+
+        attrs["order"] = len(KNOWN_SETTINGS)
+        attrs["validator"] = staticmethod(attrs["validator"])
+
+        new_class = super_new(cls, name, bases, attrs)
+        new_class.fmt_desc(attrs.get("desc", ""))
+        KNOWN_SETTINGS.append(new_class)
+        return new_class
+
+    def fmt_desc(cls, desc):
+        desc = textwrap.dedent(desc).strip()
+        setattr(cls, "desc", desc)
+        setattr(cls, "short", desc.splitlines()[0])
+
+
+class Setting(object):
+    name = None
+    value = None
+    section = None
+    cli = None
+    validator = None
+    type = None
+    meta = None
+    action = None
+    default = None
+    short = None
+    desc = None
+    nargs = None
+    const = None
+
+    def __init__(self):
+        if self.default is not None:
+            self.set(self.default)
+
+    def add_option(self, parser):
+        if not self.cli:
+            return
+        args = tuple(self.cli)
+
+        help_txt = "%s [%s]" % (self.short, self.default)
+        help_txt = help_txt.replace("%", "%%")
+
+        kwargs = {
+            "dest": self.name,
+            "action": self.action or "store",
+            "type": self.type or str,
+            "default": None,
+            "help": help_txt
+        }
+
+        if self.meta is not None:
+            kwargs['metavar'] = self.meta
+
+        if kwargs["action"] != "store":
+            kwargs.pop("type")
+
+        if self.nargs is not None:
+            kwargs["nargs"] = self.nargs
+
+        if self.const is not None:
+            kwargs["const"] = self.const
+
+        parser.add_argument(*args, **kwargs)
+
+    def copy(self):
+        return copy.copy(self)
+
+    def get(self):
+        return self.value
+
+    def set(self, val):
+        if not callable(self.validator):
+            raise TypeError('Invalid validator: %s' % self.name)
+        self.value = self.validator(val)
+
+    def __lt__(self, other):
+        return (self.section == other.section and
+                self.order < other.order)
+    __cmp__ = __lt__
+
+    def __repr__(self):
+        return "<%s.%s object at %x with value %r>" % (
+            self.__class__.__module__,
+            self.__class__.__name__,
+            id(self),
+            self.value,
+        )
+
+
+Setting = SettingMeta('Setting', (Setting,), {})
+
+
+def validate_bool(val):
+    if val is None:
+        return
+
+    if isinstance(val, bool):
+        return val
+    if not isinstance(val, str):
+        raise TypeError("Invalid type for casting: %s" % val)
+    if val.lower().strip() == "true":
+        return True
+    elif val.lower().strip() == "false":
+        return False
+    else:
+        raise ValueError("Invalid boolean: %s" % val)
+
+
+def validate_dict(val):
+    if not isinstance(val, dict):
+        raise TypeError("Value is not a dictionary: %s " % val)
+    return val
+
+
+def validate_pos_int(val):
+    if not isinstance(val, int):
+        val = int(val, 0)
+    else:
+        # Booleans are ints!
+        val = int(val)
+    if val < 0:
+        raise ValueError("Value must be positive: %s" % val)
+    return val
+
+
+def validate_ssl_version(val):
+    if val != SSLVersion.default:
+        sys.stderr.write("Warning: option `ssl_version` is deprecated and it is ignored. Use ssl_context instead.\n")
+    return val
+
+
+def validate_string(val):
+    if val is None:
+        return None
+    if not isinstance(val, str):
+        raise TypeError("Not a string: %s" % val)
+    return val.strip()
+
+
+def validate_file_exists(val):
+    if val is None:
+        return None
+    if not os.path.exists(val):
+        raise ValueError("File %s does not exists." % val)
+    return val
+
+
+def validate_list_string(val):
+    if not val:
+        return []
+
+    # legacy syntax
+    if isinstance(val, str):
+        val = [val]
+
+    return [validate_string(v) for v in val]
+
+
+def validate_list_of_existing_files(val):
+    return [validate_file_exists(v) for v in validate_list_string(val)]
+
+
+def validate_string_to_list(val):
+    val = validate_string(val)
+
+    if not val:
+        return []
+
+    return [v.strip() for v in val.split(",") if v]
+
+
+def validate_class(val):
+    if inspect.isfunction(val) or inspect.ismethod(val):
+        val = val()
+    if inspect.isclass(val):
+        return val
+    return validate_string(val)
+
+
+def validate_callable(arity):
+    def _validate_callable(val):
+        if isinstance(val, str):
+            try:
+                mod_name, obj_name = val.rsplit(".", 1)
+            except ValueError:
+                raise TypeError("Value '%s' is not import string. "
+                                "Format: module[.submodules...].object" % val)
+            try:
+                mod = __import__(mod_name, fromlist=[obj_name])
+                val = getattr(mod, obj_name)
+            except ImportError as e:
+                raise TypeError(str(e))
+            except AttributeError:
+                raise TypeError("Can not load '%s' from '%s'"
+                                "" % (obj_name, mod_name))
+        if not callable(val):
+            raise TypeError("Value is not callable: %s" % val)
+        if arity != -1 and arity != util.get_arity(val):
+            raise TypeError("Value must have an arity of: %s" % arity)
+        return val
+    return _validate_callable
+
+
+def validate_user(val):
+    if val is None:
+        return os.geteuid()
+    if isinstance(val, int):
+        return val
+    elif val.isdigit():
+        return int(val)
+    else:
+        try:
+            return pwd.getpwnam(val).pw_uid
+        except KeyError:
+            raise ConfigError("No such user: '%s'" % val)
+
+
+def validate_group(val):
+    if val is None:
+        return os.getegid()
+
+    if isinstance(val, int):
+        return val
+    elif val.isdigit():
+        return int(val)
+    else:
+        try:
+            return grp.getgrnam(val).gr_gid
+        except KeyError:
+            raise ConfigError("No such group: '%s'" % val)
+
+
+def validate_post_request(val):
+    val = validate_callable(-1)(val)
+
+    largs = util.get_arity(val)
+    if largs == 4:
+        return val
+    elif largs == 3:
+        return lambda worker, req, env, _r: val(worker, req, env)
+    elif largs == 2:
+        return lambda worker, req, _e, _r: val(worker, req)
+    else:
+        raise TypeError("Value must have an arity of: 4")
+
+
+def validate_chdir(val):
+    # valid if the value is a string
+    val = validate_string(val)
+
+    # transform relative paths
+    path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
+
+    # test if the path exists
+    if not os.path.exists(path):
+        raise ConfigError("can't chdir to %r" % val)
+
+    return path
+
+
+def validate_statsd_address(val):
+    val = validate_string(val)
+    if val is None:
+        return None
+
+    # As of major release 20, util.parse_address would recognize unix:PORT
+    # as a UDS address, breaking backwards compatibility. We defend against
+    # that regression here (this is also unit-tested).
+    # Feel free to remove in the next major release.
+    unix_hostname_regression = re.match(r'^unix:(\d+)$', val)
+    if unix_hostname_regression:
+        return ('unix', int(unix_hostname_regression.group(1)))
+
+    try:
+        address = util.parse_address(val, default_port='8125')
+    except RuntimeError:
+        raise TypeError("Value must be one of ('host:port', 'unix://PATH')")
+
+    return address
+
+
+def validate_reload_engine(val):
+    if val not in reloader_engines:
+        raise ConfigError("Invalid reload_engine: %r" % val)
+
+    return val
+
+
+def get_default_config_file():
+    config_path = os.path.join(os.path.abspath(os.getcwd()),
+                               'gunicorn.conf.py')
+    if os.path.exists(config_path):
+        return config_path
+    return None
+
+
+class ConfigFile(Setting):
+    name = "config"
+    section = "Config File"
+    cli = ["-c", "--config"]
+    meta = "CONFIG"
+    validator = validate_string
+    default = "./gunicorn.conf.py"
+    desc = """\
+        :ref:`The Gunicorn config file<configuration_file>`.
+
+        A string of the form ``PATH``, ``file:PATH``, or ``python:MODULE_NAME``.
+
+        Only has an effect when specified on the command line or as part of an
+        application specific configuration.
+
+        By default, a file named ``gunicorn.conf.py`` will be read from the same
+        directory where gunicorn is being run.
+
+        .. versionchanged:: 19.4
+           Loading the config from a Python module requires the ``python:``
+           prefix.
+        """
+
+
+class WSGIApp(Setting):
+    name = "wsgi_app"
+    section = "Config File"
+    meta = "STRING"
+    validator = validate_string
+    default = None
+    desc = """\
+        A WSGI application path in pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``.
+
+        .. versionadded:: 20.1.0
+        """
+
+
+class Bind(Setting):
+    name = "bind"
+    action = "append"
+    section = "Server Socket"
+    cli = ["-b", "--bind"]
+    meta = "ADDRESS"
+    validator = validate_list_string
+
+    if 'PORT' in os.environ:
+        default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))]
+    else:
+        default = ['127.0.0.1:8000']
+
+    desc = """\
+        The socket to bind.
+
+        A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``,
+        ``fd://FD``. An IP is a valid ``HOST``.
+
+        .. versionchanged:: 20.0
+           Support for ``fd://FD`` got added.
+
+        Multiple addresses can be bound. ex.::
+
+            $ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app
+
+        will bind the `test:app` application on localhost both on ipv6
+        and ipv4 interfaces.
+
+        If the ``PORT`` environment variable is defined, the default
+        is ``['0.0.0.0:$PORT']``. If it is not defined, the default
+        is ``['127.0.0.1:8000']``.
+        """
+
+
+class Backlog(Setting):
+    name = "backlog"
+    section = "Server Socket"
+    cli = ["--backlog"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 2048
+    desc = """\
+        The maximum number of pending connections.
+
+        This refers to the number of clients that can be waiting to be served.
+        Exceeding this number results in the client getting an error when
+        attempting to connect. It should only affect servers under significant
+        load.
+
+        Must be a positive integer. Generally set in the 64-2048 range.
+        """
+
+
+class Workers(Setting):
+    name = "workers"
+    section = "Worker Processes"
+    cli = ["-w", "--workers"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = int(os.environ.get("WEB_CONCURRENCY", 1))
+    desc = """\
+        The number of worker processes for handling requests.
+
+        A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
+        You'll want to vary this a bit to find the best for your particular
+        application's work load.
+
+        By default, the value of the ``WEB_CONCURRENCY`` environment variable,
+        which is set by some Platform-as-a-Service providers such as Heroku. If
+        it is not defined, the default is ``1``.
+        """
+
+
+class WorkerClass(Setting):
+    name = "worker_class"
+    section = "Worker Processes"
+    cli = ["-k", "--worker-class"]
+    meta = "STRING"
+    validator = validate_class
+    default = "sync"
+    desc = """\
+        The type of workers to use.
+
+        The default class (``sync``) should handle most "normal" types of
+        workloads. You'll want to read :doc:`design` for information on when
+        you might want to choose one of the other worker classes. Required
+        libraries may be installed using setuptools' ``extras_require`` feature.
+
+        A string referring to one of the following bundled classes:
+
+        * ``sync``
+        * ``eventlet`` - Requires eventlet >= 0.24.1 (or install it via
+          ``pip install gunicorn[eventlet]``)
+        * ``gevent``   - Requires gevent >= 1.4 (or install it via
+          ``pip install gunicorn[gevent]``)
+        * ``tornado``  - Requires tornado >= 0.2 (or install it via
+          ``pip install gunicorn[tornado]``)
+        * ``gthread``  - Python 2 requires the futures package to be installed
+          (or install it via ``pip install gunicorn[gthread]``)
+
+        Optionally, you can provide your own worker by giving Gunicorn a
+        Python path to a subclass of ``gunicorn.workers.base.Worker``.
+        This alternative syntax will load the gevent class:
+        ``gunicorn.workers.ggevent.GeventWorker``.
+        """
+
+
+class WorkerThreads(Setting):
+    name = "threads"
+    section = "Worker Processes"
+    cli = ["--threads"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 1
+    desc = """\
+        The number of worker threads for handling requests.
+
+        Run each worker with the specified number of threads.
+
+        A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
+        You'll want to vary this a bit to find the best for your particular
+        application's work load.
+
+        If it is not defined, the default is ``1``.
+
+        This setting only affects the Gthread worker type.
+
+        .. note::
+           If you try to use the ``sync`` worker type and set the ``threads``
+           setting to more than 1, the ``gthread`` worker type will be used
+           instead.
+        """
+
+
+class WorkerConnections(Setting):
+    name = "worker_connections"
+    section = "Worker Processes"
+    cli = ["--worker-connections"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 1000
+    desc = """\
+        The maximum number of simultaneous clients.
+
+        This setting only affects the ``gthread``, ``eventlet`` and ``gevent`` worker types.
+        """
+
+
+class MaxRequests(Setting):
+    name = "max_requests"
+    section = "Worker Processes"
+    cli = ["--max-requests"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 0
+    desc = """\
+        The maximum number of requests a worker will process before restarting.
+
+        Any value greater than zero will limit the number of requests a worker
+        will process before automatically restarting. This is a simple method
+        to help limit the damage of memory leaks.
+
+        If this is set to zero (the default) then the automatic worker
+        restarts are disabled.
+        """
+
+
+class MaxRequestsJitter(Setting):
+    name = "max_requests_jitter"
+    section = "Worker Processes"
+    cli = ["--max-requests-jitter"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 0
+    desc = """\
+        The maximum jitter to add to the *max_requests* setting.
+
+        The jitter causes the restart per worker to be randomized by
+        ``randint(0, max_requests_jitter)``. This is intended to stagger worker
+        restarts to avoid all workers restarting at the same time.
+
+        .. versionadded:: 19.2
+        """
+
+
+class Timeout(Setting):
+    name = "timeout"
+    section = "Worker Processes"
+    cli = ["-t", "--timeout"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 30
+    desc = """\
+        Workers silent for more than this many seconds are killed and restarted.
+
+        Value is a positive number or 0. Setting it to 0 has the effect of
+        infinite timeouts by disabling timeouts for all workers entirely.
+
+        Generally, the default of thirty seconds should suffice. Only set this
+        noticeably higher if you're sure of the repercussions for sync workers.
+        For the non sync workers it just means that the worker process is still
+        communicating and is not tied to the length of time required to handle a
+        single request.
+        """
+
+
+class GracefulTimeout(Setting):
+    name = "graceful_timeout"
+    section = "Worker Processes"
+    cli = ["--graceful-timeout"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 30
+    desc = """\
+        Timeout for graceful workers restart.
+
+        After receiving a restart signal, workers have this much time to finish
+        serving requests. Workers still alive after the timeout (starting from
+        the receipt of the restart signal) are force killed.
+        """
+
+
+class Keepalive(Setting):
+    name = "keepalive"
+    section = "Worker Processes"
+    cli = ["--keep-alive"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 2
+    desc = """\
+        The number of seconds to wait for requests on a Keep-Alive connection.
+
+        Generally set in the 1-5 seconds range for servers with direct connection
+        to the client (e.g. when you don't have separate load balancer). When
+        Gunicorn is deployed behind a load balancer, it often makes sense to
+        set this to a higher value.
+
+        .. note::
+           ``sync`` worker does not support persistent connections and will
+           ignore this option.
+        """
+
+
+class LimitRequestLine(Setting):
+    name = "limit_request_line"
+    section = "Security"
+    cli = ["--limit-request-line"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 4094
+    desc = """\
+        The maximum size of HTTP request line in bytes.
+
+        This parameter is used to limit the allowed size of a client's
+        HTTP request-line. Since the request-line consists of the HTTP
+        method, URI, and protocol version, this directive places a
+        restriction on the length of a request-URI allowed for a request
+        on the server. A server needs this value to be large enough to
+        hold any of its resource names, including any information that
+        might be passed in the query part of a GET request. Value is a number
+        from 0 (unlimited) to 8190.
+
+        This parameter can be used to prevent any DDOS attack.
+        """
+
+
+class LimitRequestFields(Setting):
+    name = "limit_request_fields"
+    section = "Security"
+    cli = ["--limit-request-fields"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 100
+    desc = """\
+        Limit the number of HTTP headers fields in a request.
+
+        This parameter is used to limit the number of headers in a request to
+        prevent DDOS attack. Used with the *limit_request_field_size* it allows
+        more safety. By default this value is 100 and can't be larger than
+        32768.
+        """
+
+
+class LimitRequestFieldSize(Setting):
+    name = "limit_request_field_size"
+    section = "Security"
+    cli = ["--limit-request-field_size"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = int
+    default = 8190
+    desc = """\
+        Limit the allowed size of an HTTP request header field.
+
+        Value is a positive number or 0. Setting it to 0 will allow unlimited
+        header field sizes.
+
+        .. warning::
+           Setting this parameter to a very high or unlimited value can open
+           up for DDOS attacks.
+        """
+
+
+class Reload(Setting):
+    name = "reload"
+    section = 'Debugging'
+    cli = ['--reload']
+    validator = validate_bool
+    action = 'store_true'
+    default = False
+
+    desc = '''\
+        Restart workers when code changes.
+
+        This setting is intended for development. It will cause workers to be
+        restarted whenever application code changes.
+
+        The reloader is incompatible with application preloading. When using a
+        paste configuration be sure that the server block does not import any
+        application code or the reload will not work as designed.
+
+        The default behavior is to attempt inotify with a fallback to file
+        system polling. Generally, inotify should be preferred if available
+        because it consumes less system resources.
+
+        .. note::
+           In order to use the inotify reloader, you must have the ``inotify``
+           package installed.
+        '''
+
+
+class ReloadEngine(Setting):
+    name = "reload_engine"
+    section = "Debugging"
+    cli = ["--reload-engine"]
+    meta = "STRING"
+    validator = validate_reload_engine
+    default = "auto"
+    desc = """\
+        The implementation that should be used to power :ref:`reload`.
+
+        Valid engines are:
+
+        * ``'auto'``
+        * ``'poll'``
+        * ``'inotify'`` (requires inotify)
+
+        .. versionadded:: 19.7
+        """
+
+
+class ReloadExtraFiles(Setting):
+    name = "reload_extra_files"
+    action = "append"
+    section = "Debugging"
+    cli = ["--reload-extra-file"]
+    meta = "FILES"
+    validator = validate_list_of_existing_files
+    default = []
+    desc = """\
+        Extends :ref:`reload` option to also watch and reload on additional files
+        (e.g., templates, configurations, specifications, etc.).
+
+        .. versionadded:: 19.8
+        """
+
+
+class Spew(Setting):
+    name = "spew"
+    section = "Debugging"
+    cli = ["--spew"]
+    validator = validate_bool
+    action = "store_true"
+    default = False
+    desc = """\
+        Install a trace function that spews every line executed by the server.
+
+        This is the nuclear option.
+        """
+
+
+class ConfigCheck(Setting):
+    name = "check_config"
+    section = "Debugging"
+    cli = ["--check-config"]
+    validator = validate_bool
+    action = "store_true"
+    default = False
+    desc = """\
+        Check the configuration and exit. The exit status is 0 if the
+        configuration is correct, and 1 if the configuration is incorrect.
+        """
+
+
+class PrintConfig(Setting):
+    name = "print_config"
+    section = "Debugging"
+    cli = ["--print-config"]
+    validator = validate_bool
+    action = "store_true"
+    default = False
+    desc = """\
+        Print the configuration settings as fully resolved. Implies :ref:`check-config`.
+        """
+
+
+class PreloadApp(Setting):
+    name = "preload_app"
+    section = "Server Mechanics"
+    cli = ["--preload"]
+    validator = validate_bool
+    action = "store_true"
+    default = False
+    desc = """\
+        Load application code before the worker processes are forked.
+
+        By preloading an application you can save some RAM resources as well as
+        speed up server boot times. Although, if you defer application loading
+        to each worker process, you can reload your application code easily by
+        restarting workers.
+        """
+
+
+class Sendfile(Setting):
+    name = "sendfile"
+    section = "Server Mechanics"
+    cli = ["--no-sendfile"]
+    validator = validate_bool
+    action = "store_const"
+    const = False
+
+    desc = """\
+        Disables the use of ``sendfile()``.
+
+        If not set, the value of the ``SENDFILE`` environment variable is used
+        to enable or disable its usage.
+
+        .. versionadded:: 19.2
+        .. versionchanged:: 19.4
+           Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow
+           disabling.
+        .. versionchanged:: 19.6
+           added support for the ``SENDFILE`` environment variable
+        """
+
+
+class ReusePort(Setting):
+    name = "reuse_port"
+    section = "Server Mechanics"
+    cli = ["--reuse-port"]
+    validator = validate_bool
+    action = "store_true"
+    default = False
+
+    desc = """\
+        Set the ``SO_REUSEPORT`` flag on the listening socket.
+
+        .. versionadded:: 19.8
+        """
+
+
+class Chdir(Setting):
+    name = "chdir"
+    section = "Server Mechanics"
+    cli = ["--chdir"]
+    validator = validate_chdir
+    default = util.getcwd()
+    default_doc = "``'.'``"
+    desc = """\
+        Change directory to specified directory before loading apps.
+        """
+
+
+class Daemon(Setting):
+    name = "daemon"
+    section = "Server Mechanics"
+    cli = ["-D", "--daemon"]
+    validator = validate_bool
+    action = "store_true"
+    default = False
+    desc = """\
+        Daemonize the Gunicorn process.
+
+        Detaches the server from the controlling terminal and enters the
+        background.
+        """
+
+
+class Env(Setting):
+    name = "raw_env"
+    action = "append"
+    section = "Server Mechanics"
+    cli = ["-e", "--env"]
+    meta = "ENV"
+    validator = validate_list_string
+    default = []
+
+    desc = """\
+        Set environment variables in the execution environment.
+
+        Should be a list of strings in the ``key=value`` format.
+
+        For example on the command line:
+
+        .. code-block:: console
+
+            $ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app
+
+        Or in the configuration file:
+
+        .. code-block:: python
+
+            raw_env = ["FOO=1"]
+        """
+
+
+class Pidfile(Setting):
+    name = "pidfile"
+    section = "Server Mechanics"
+    cli = ["-p", "--pid"]
+    meta = "FILE"
+    validator = validate_string
+    default = None
+    desc = """\
+        A filename to use for the PID file.
+
+        If not set, no PID file will be written.
+        """
+
+
+class WorkerTmpDir(Setting):
+    name = "worker_tmp_dir"
+    section = "Server Mechanics"
+    cli = ["--worker-tmp-dir"]
+    meta = "DIR"
+    validator = validate_string
+    default = None
+    desc = """\
+        A directory to use for the worker heartbeat temporary file.
+
+        If not set, the default temporary directory will be used.
+
+        .. note::
+           The current heartbeat system involves calling ``os.fchmod`` on
+           temporary file handlers and may block a worker for arbitrary time
+           if the directory is on a disk-backed filesystem.
+
+           See :ref:`blocking-os-fchmod` for more detailed information
+           and a solution for avoiding this problem.
+        """
+
+
+class User(Setting):
+    name = "user"
+    section = "Server Mechanics"
+    cli = ["-u", "--user"]
+    meta = "USER"
+    validator = validate_user
+    default = os.geteuid()
+    default_doc = "``os.geteuid()``"
+    desc = """\
+        Switch worker processes to run as this user.
+
+        A valid user id (as an integer) or the name of a user that can be
+        retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not
+        change the worker process user.
+        """
+
+
+class Group(Setting):
+    name = "group"
+    section = "Server Mechanics"
+    cli = ["-g", "--group"]
+    meta = "GROUP"
+    validator = validate_group
+    default = os.getegid()
+    default_doc = "``os.getegid()``"
+    desc = """\
+        Switch worker process to run as this group.
+
+        A valid group id (as an integer) or the name of a user that can be
+        retrieved with a call to ``pwd.getgrnam(value)`` or ``None`` to not
+        change the worker processes group.
+        """
+
+
+class Umask(Setting):
+    name = "umask"
+    section = "Server Mechanics"
+    cli = ["-m", "--umask"]
+    meta = "INT"
+    validator = validate_pos_int
+    type = auto_int
+    default = 0
+    desc = """\
+        A bit mask for the file mode on files written by Gunicorn.
+
+        Note that this affects unix socket permissions.
+
+        A valid value for the ``os.umask(mode)`` call or a string compatible
+        with ``int(value, 0)`` (``0`` means Python guesses the base, so values
+        like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal
+        representations)
+        """
+
+
+class Initgroups(Setting):
+    name = "initgroups"
+    section = "Server Mechanics"
+    cli = ["--initgroups"]
+    validator = validate_bool
+    action = 'store_true'
+    default = False
+
+    desc = """\
+        If true, set the worker process's group access list with all of the
+        groups of which the specified username is a member, plus the specified
+        group id.
+
+        .. versionadded:: 19.7
+        """
+
+
+class TmpUploadDir(Setting):
+    name = "tmp_upload_dir"
+    section = "Server Mechanics"
+    meta = "DIR"
+    validator = validate_string
+    default = None
+    desc = """\
+        Directory to store temporary request data as they are read.
+
+        This may disappear in the near future.
+
+        This path should be writable by the process permissions set for Gunicorn
+        workers. If not specified, Gunicorn will choose a system generated
+        temporary directory.
+        """
+
+
+class SecureSchemeHeader(Setting):
+    name = "secure_scheme_headers"
+    section = "Server Mechanics"
+    validator = validate_dict
+    default = {
+        "X-FORWARDED-PROTOCOL": "ssl",
+        "X-FORWARDED-PROTO": "https",
+        "X-FORWARDED-SSL": "on"
+    }
+    desc = """\
+
+        A dictionary containing headers and values that the front-end proxy
+        uses to indicate HTTPS requests. If the source IP is permitted by
+        ``forwarded-allow-ips`` (below), *and* at least one request header matches
+        a key-value pair listed in this dictionary, then Gunicorn will set
+        ``wsgi.url_scheme`` to ``https``, so your application can tell that the
+        request is secure.
+
+        If the other headers listed in this dictionary are not present in the request, they will be ignored,
+        but if the other headers are present and do not match the provided values, then
+        the request will fail to parse. See the note below for more detailed examples of this behaviour.
+
+        The dictionary should map upper-case header names to exact string
+        values. The value comparisons are case-sensitive, unlike the header
+        names, so make sure they're exactly what your front-end proxy sends
+        when handling HTTPS requests.
+
+        It is important that your front-end proxy configuration ensures that
+        the headers defined here can not be passed directly from the client.
+        """
+
+
+class ForwardedAllowIPS(Setting):
+    name = "forwarded_allow_ips"
+    section = "Server Mechanics"
+    cli = ["--forwarded-allow-ips"]
+    meta = "STRING"
+    validator = validate_string_to_list
+    default = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1")
+    desc = """\
+        Front-end's IPs from which allowed to handle set secure headers.
+        (comma separate).
+
+        Set to ``*`` to disable checking of Front-end IPs (useful for setups
+        where you don't know in advance the IP address of Front-end, but
+        you still trust the environment).
+
+        By default, the value of the ``FORWARDED_ALLOW_IPS`` environment
+        variable. If it is not defined, the default is ``"127.0.0.1"``.
+
+        .. note::
+
+            The interplay between the request headers, the value of ``forwarded_allow_ips``, and the value of
+            ``secure_scheme_headers`` is complex. Various scenarios are documented below to further elaborate.
+            In each case, we have a request from the remote address 134.213.44.18, and the default value of
+            ``secure_scheme_headers``:
+
+            .. code::
+
+                secure_scheme_headers = {
+                    'X-FORWARDED-PROTOCOL': 'ssl',
+                    'X-FORWARDED-PROTO': 'https',
+                    'X-FORWARDED-SSL': 'on'
+                }
+
+
+            .. list-table::
+                :header-rows: 1
+                :align: center
+                :widths: auto
+
+                * - ``forwarded-allow-ips``
+                  - Secure Request Headers
+                  - Result
+                  - Explanation
+                * - .. code::
+
+                        ["127.0.0.1"]
+                  - .. code::
+
+                        X-Forwarded-Proto: https
+                  - .. code::
+
+                        wsgi.url_scheme = "http"
+                  - IP address was not allowed
+                * - .. code::
+
+                        "*"
+                  - <none>
+                  - .. code::
+
+                        wsgi.url_scheme = "http"
+                  - IP address allowed, but no secure headers provided
+                * - .. code::
+
+                        "*"
+                  - .. code::
+
+                        X-Forwarded-Proto: https
+                  - .. code::
+
+                        wsgi.url_scheme = "https"
+                  - IP address allowed, one request header matched
+                * - .. code::
+
+                        ["134.213.44.18"]
+                  - .. code::
+
+                        X-Forwarded-Ssl: on
+                        X-Forwarded-Proto: http
+                  - ``InvalidSchemeHeaders()`` raised
+                  - IP address allowed, but the two secure headers disagreed on if HTTPS was used
+
+
+        """
+
+
+class AccessLog(Setting):
+    name = "accesslog"
+    section = "Logging"
+    cli = ["--access-logfile"]
+    meta = "FILE"
+    validator = validate_string
+    default = None
+    desc = """\
+        The Access log file to write to.
+
+        ``'-'`` means log to stdout.
+        """
+
+
+class DisableRedirectAccessToSyslog(Setting):
+    name = "disable_redirect_access_to_syslog"
+    section = "Logging"
+    cli = ["--disable-redirect-access-to-syslog"]
+    validator = validate_bool
+    action = 'store_true'
+    default = False
+    desc = """\
+    Disable redirect access logs to syslog.
+
+    .. versionadded:: 19.8
+    """
+
+
+class AccessLogFormat(Setting):
+    name = "access_log_format"
+    section = "Logging"
+    cli = ["--access-logformat"]
+    meta = "STRING"
+    validator = validate_string
+    default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
+    desc = """\
+        The access log format.
+
+        ===========  ===========
+        Identifier   Description
+        ===========  ===========
+        h            remote address
+        l            ``'-'``
+        u            user name
+        t            date of the request
+        r            status line (e.g. ``GET / HTTP/1.1``)
+        m            request method
+        U            URL path without query string
+        q            query string
+        H            protocol
+        s            status
+        B            response length
+        b            response length or ``'-'`` (CLF format)
+        f            referer
+        a            user agent
+        T            request time in seconds
+        M            request time in milliseconds
+        D            request time in microseconds
+        L            request time in decimal seconds
+        p            process ID
+        {header}i    request header
+        {header}o    response header
+        {variable}e  environment variable
+        ===========  ===========
+
+        Use lowercase for header and environment variable names, and put
+        ``{...}x`` names inside ``%(...)s``. For example::
+
+            %({x-forwarded-for}i)s
+        """
+
+
+class ErrorLog(Setting):
+    name = "errorlog"
+    section = "Logging"
+    cli = ["--error-logfile", "--log-file"]
+    meta = "FILE"
+    validator = validate_string
+    default = '-'
+    desc = """\
+        The Error log file to write to.
+
+        Using ``'-'`` for FILE makes gunicorn log to stderr.
+
+        .. versionchanged:: 19.2
+           Log to stderr by default.
+
+        """
+
+
+class Loglevel(Setting):
+    name = "loglevel"
+    section = "Logging"
+    cli = ["--log-level"]
+    meta = "LEVEL"
+    validator = validate_string
+    default = "info"
+    desc = """\
+        The granularity of Error log outputs.
+
+        Valid level names are:
+
+        * ``'debug'``
+        * ``'info'``
+        * ``'warning'``
+        * ``'error'``
+        * ``'critical'``
+        """
+
+
+class CaptureOutput(Setting):
+    name = "capture_output"
+    section = "Logging"
+    cli = ["--capture-output"]
+    validator = validate_bool
+    action = 'store_true'
+    default = False
+    desc = """\
+        Redirect stdout/stderr to specified file in :ref:`errorlog`.
+
+        .. versionadded:: 19.6
+        """
+
+
+class LoggerClass(Setting):
+    name = "logger_class"
+    section = "Logging"
+    cli = ["--logger-class"]
+    meta = "STRING"
+    validator = validate_class
+    default = "gunicorn.glogging.Logger"
+    desc = """\
+        The logger you want to use to log events in Gunicorn.
+
+        The default class (``gunicorn.glogging.Logger``) handles most
+        normal usages in logging. It provides error and access logging.
+
+        You can provide your own logger by giving Gunicorn a Python path to a
+        class that quacks like ``gunicorn.glogging.Logger``.
+        """
+
+
+class LogConfig(Setting):
+    name = "logconfig"
+    section = "Logging"
+    cli = ["--log-config"]
+    meta = "FILE"
+    validator = validate_string
+    default = None
+    desc = """\
+    The log config file to use.
+    Gunicorn uses the standard Python logging module's Configuration
+    file format.
+    """
+
+
+class LogConfigDict(Setting):
+    name = "logconfig_dict"
+    section = "Logging"
+    validator = validate_dict
+    default = {}
+    desc = """\
+    The log config dictionary to use, using the standard Python
+    logging module's dictionary configuration format. This option
+    takes precedence over the :ref:`logconfig` and :ref:`logConfigJson` options,
+    which uses the older file configuration format and JSON
+    respectively.
+
+    Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig
+
+    For more context you can look at the default configuration dictionary for logging,
+    which can be found at ``gunicorn.glogging.CONFIG_DEFAULTS``.
+
+    .. versionadded:: 19.8
+    """
+
+
+class LogConfigJson(Setting):
+    name = "logconfig_json"
+    section = "Logging"
+    cli = ["--log-config-json"]
+    meta = "FILE"
+    validator = validate_string
+    default = None
+    desc = """\
+    The log config to read config from a JSON file
+
+    Format: https://docs.python.org/3/library/logging.config.html#logging.config.jsonConfig
+
+    .. versionadded:: 20.0
+    """
+
+
+class SyslogTo(Setting):
+    name = "syslog_addr"
+    section = "Logging"
+    cli = ["--log-syslog-to"]
+    meta = "SYSLOG_ADDR"
+    validator = validate_string
+
+    if PLATFORM == "darwin":
+        default = "unix:///var/run/syslog"
+    elif PLATFORM in ('freebsd', 'dragonfly', ):
+        default = "unix:///var/run/log"
+    elif PLATFORM == "openbsd":
+        default = "unix:///dev/log"
+    else:
+        default = "udp://localhost:514"
+
+    desc = """\
+    Address to send syslog messages.
+
+    Address is a string of the form:
+
+    * ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream``
+      for the stream driver or ``dgram`` for the dgram driver.
+      ``stream`` is the default.
+    * ``udp://HOST:PORT`` : for UDP sockets
+    * ``tcp://HOST:PORT`` : for TCP sockets
+
+    """
+
+
+class Syslog(Setting):
+    name = "syslog"
+    section = "Logging"
+    cli = ["--log-syslog"]
+    validator = validate_bool
+    action = 'store_true'
+    default = False
+    desc = """\
+    Send *Gunicorn* logs to syslog.
+
+    .. versionchanged:: 19.8
+       You can now disable sending access logs by using the
+       :ref:`disable-redirect-access-to-syslog` setting.
+    """
+
+
+class SyslogPrefix(Setting):
+    name = "syslog_prefix"
+    section = "Logging"
+    cli = ["--log-syslog-prefix"]
+    meta = "SYSLOG_PREFIX"
+    validator = validate_string
+    default = None
+    desc = """\
+    Makes Gunicorn use the parameter as program-name in the syslog entries.
+
+    All entries will be prefixed by ``gunicorn.<prefix>``. By default the
+    program name is the name of the process.
+    """
+
+
+class SyslogFacility(Setting):
+    name = "syslog_facility"
+    section = "Logging"
+    cli = ["--log-syslog-facility"]
+    meta = "SYSLOG_FACILITY"
+    validator = validate_string
+    default = "user"
+    desc = """\
+    Syslog facility name
+    """
+
+
+class EnableStdioInheritance(Setting):
+    name = "enable_stdio_inheritance"
+    section = "Logging"
+    cli = ["-R", "--enable-stdio-inheritance"]
+    validator = validate_bool
+    default = False
+    action = "store_true"
+    desc = """\
+    Enable stdio inheritance.
+
+    Enable inheritance for stdio file descriptors in daemon mode.
+
+    Note: To disable the Python stdout buffering, you can to set the user
+    environment variable ``PYTHONUNBUFFERED`` .
+    """
+
+
+# statsD monitoring
+class StatsdHost(Setting):
+    name = "statsd_host"
+    section = "Logging"
+    cli = ["--statsd-host"]
+    meta = "STATSD_ADDR"
+    default = None
+    validator = validate_statsd_address
+    desc = """\
+    The address of the StatsD server to log to.
+
+    Address is a string of the form:
+
+    * ``unix://PATH`` : for a unix domain socket.
+    * ``HOST:PORT`` : for a network address
+
+    .. versionadded:: 19.1
+    """
+
+
+# Datadog Statsd (dogstatsd) tags. https://docs.datadoghq.com/developers/dogstatsd/
+class DogstatsdTags(Setting):
+    name = "dogstatsd_tags"
+    section = "Logging"
+    cli = ["--dogstatsd-tags"]
+    meta = "DOGSTATSD_TAGS"
+    default = ""
+    validator = validate_string
+    desc = """\
+    A comma-delimited list of datadog statsd (dogstatsd) tags to append to
+    statsd metrics.
+
+    .. versionadded:: 20
+    """
+
+
+class StatsdPrefix(Setting):
+    name = "statsd_prefix"
+    section = "Logging"
+    cli = ["--statsd-prefix"]
+    meta = "STATSD_PREFIX"
+    default = ""
+    validator = validate_string
+    desc = """\
+    Prefix to use when emitting statsd metrics (a trailing ``.`` is added,
+    if not provided).
+
+    .. versionadded:: 19.2
+    """
+
+
+class Procname(Setting):
+    name = "proc_name"
+    section = "Process Naming"
+    cli = ["-n", "--name"]
+    meta = "STRING"
+    validator = validate_string
+    default = None
+    desc = """\
+        A base to use with setproctitle for process naming.
+
+        This affects things like ``ps`` and ``top``. If you're going to be
+        running more than one instance of Gunicorn you'll probably want to set a
+        name to tell them apart. This requires that you install the setproctitle
+        module.
+
+        If not set, the *default_proc_name* setting will be used.
+        """
+
+
+class DefaultProcName(Setting):
+    name = "default_proc_name"
+    section = "Process Naming"
+    validator = validate_string
+    default = "gunicorn"
+    desc = """\
+        Internal setting that is adjusted for each type of application.
+        """
+
+
+class PythonPath(Setting):
+    name = "pythonpath"
+    section = "Server Mechanics"
+    cli = ["--pythonpath"]
+    meta = "STRING"
+    validator = validate_string
+    default = None
+    desc = """\
+        A comma-separated list of directories to add to the Python path.
+
+        e.g.
+        ``'/home/djangoprojects/myproject,/home/python/mylibrary'``.
+        """
+
+
+class Paste(Setting):
+    name = "paste"
+    section = "Server Mechanics"
+    cli = ["--paste", "--paster"]
+    meta = "STRING"
+    validator = validate_string
+    default = None
+    desc = """\
+        Load a PasteDeploy config file. The argument may contain a ``#``
+        symbol followed by the name of an app section from the config file,
+        e.g. ``production.ini#admin``.
+
+        At this time, using alternate server blocks is not supported. Use the
+        command line arguments to control server configuration instead.
+        """
+
+
+class OnStarting(Setting):
+    name = "on_starting"
+    section = "Server Hooks"
+    validator = validate_callable(1)
+    type = callable
+
+    def on_starting(server):
+        pass
+    default = staticmethod(on_starting)
+    desc = """\
+        Called just before the master process is initialized.
+
+        The callable needs to accept a single instance variable for the Arbiter.
+        """
+
+
+class OnReload(Setting):
+    name = "on_reload"
+    section = "Server Hooks"
+    validator = validate_callable(1)
+    type = callable
+
+    def on_reload(server):
+        pass
+    default = staticmethod(on_reload)
+    desc = """\
+        Called to recycle workers during a reload via SIGHUP.
+
+        The callable needs to accept a single instance variable for the Arbiter.
+        """
+
+
+class WhenReady(Setting):
+    name = "when_ready"
+    section = "Server Hooks"
+    validator = validate_callable(1)
+    type = callable
+
+    def when_ready(server):
+        pass
+    default = staticmethod(when_ready)
+    desc = """\
+        Called just after the server is started.
+
+        The callable needs to accept a single instance variable for the Arbiter.
+        """
+
+
+class Prefork(Setting):
+    name = "pre_fork"
+    section = "Server Hooks"
+    validator = validate_callable(2)
+    type = callable
+
+    def pre_fork(server, worker):
+        pass
+    default = staticmethod(pre_fork)
+    desc = """\
+        Called just before a worker is forked.
+
+        The callable needs to accept two instance variables for the Arbiter and
+        new Worker.
+        """
+
+
+class Postfork(Setting):
+    name = "post_fork"
+    section = "Server Hooks"
+    validator = validate_callable(2)
+    type = callable
+
+    def post_fork(server, worker):
+        pass
+    default = staticmethod(post_fork)
+    desc = """\
+        Called just after a worker has been forked.
+
+        The callable needs to accept two instance variables for the Arbiter and
+        new Worker.
+        """
+
+
+class PostWorkerInit(Setting):
+    name = "post_worker_init"
+    section = "Server Hooks"
+    validator = validate_callable(1)
+    type = callable
+
+    def post_worker_init(worker):
+        pass
+
+    default = staticmethod(post_worker_init)
+    desc = """\
+        Called just after a worker has initialized the application.
+
+        The callable needs to accept one instance variable for the initialized
+        Worker.
+        """
+
+
+class WorkerInt(Setting):
+    name = "worker_int"
+    section = "Server Hooks"
+    validator = validate_callable(1)
+    type = callable
+
+    def worker_int(worker):
+        pass
+
+    default = staticmethod(worker_int)
+    desc = """\
+        Called just after a worker exited on SIGINT or SIGQUIT.
+
+        The callable needs to accept one instance variable for the initialized
+        Worker.
+        """
+
+
+class WorkerAbort(Setting):
+    name = "worker_abort"
+    section = "Server Hooks"
+    validator = validate_callable(1)
+    type = callable
+
+    def worker_abort(worker):
+        pass
+
+    default = staticmethod(worker_abort)
+    desc = """\
+        Called when a worker received the SIGABRT signal.
+
+        This call generally happens on timeout.
+
+        The callable needs to accept one instance variable for the initialized
+        Worker.
+        """
+
+
+class PreExec(Setting):
+    name = "pre_exec"
+    section = "Server Hooks"
+    validator = validate_callable(1)
+    type = callable
+
+    def pre_exec(server):
+        pass
+    default = staticmethod(pre_exec)
+    desc = """\
+        Called just before a new master process is forked.
+
+        The callable needs to accept a single instance variable for the Arbiter.
+        """
+
+
+class PreRequest(Setting):
+    name = "pre_request"
+    section = "Server Hooks"
+    validator = validate_callable(2)
+    type = callable
+
+    def pre_request(worker, req):
+        worker.log.debug("%s %s", req.method, req.path)
+    default = staticmethod(pre_request)
+    desc = """\
+        Called just before a worker processes the request.
+
+        The callable needs to accept two instance variables for the Worker and
+        the Request.
+        """
+
+
+class PostRequest(Setting):
+    name = "post_request"
+    section = "Server Hooks"
+    validator = validate_post_request
+    type = callable
+
+    def post_request(worker, req, environ, resp):
+        pass
+    default = staticmethod(post_request)
+    desc = """\
+        Called after a worker processes the request.
+
+        The callable needs to accept two instance variables for the Worker and
+        the Request.
+        """
+
+
+class ChildExit(Setting):
+    name = "child_exit"
+    section = "Server Hooks"
+    validator = validate_callable(2)
+    type = callable
+
+    def child_exit(server, worker):
+        pass
+    default = staticmethod(child_exit)
+    desc = """\
+        Called just after a worker has been exited, in the master process.
+
+        The callable needs to accept two instance variables for the Arbiter and
+        the just-exited Worker.
+
+        .. versionadded:: 19.7
+        """
+
+
+class WorkerExit(Setting):
+    name = "worker_exit"
+    section = "Server Hooks"
+    validator = validate_callable(2)
+    type = callable
+
+    def worker_exit(server, worker):
+        pass
+    default = staticmethod(worker_exit)
+    desc = """\
+        Called just after a worker has been exited, in the worker process.
+
+        The callable needs to accept two instance variables for the Arbiter and
+        the just-exited Worker.
+        """
+
+
+class NumWorkersChanged(Setting):
+    name = "nworkers_changed"
+    section = "Server Hooks"
+    validator = validate_callable(3)
+    type = callable
+
+    def nworkers_changed(server, new_value, old_value):
+        pass
+    default = staticmethod(nworkers_changed)
+    desc = """\
+        Called just after *num_workers* has been changed.
+
+        The callable needs to accept an instance variable of the Arbiter and
+        two integers of number of workers after and before change.
+
+        If the number of workers is set for the first time, *old_value* would
+        be ``None``.
+        """
+
+
+class OnExit(Setting):
+    name = "on_exit"
+    section = "Server Hooks"
+    validator = validate_callable(1)
+
+    def on_exit(server):
+        pass
+
+    default = staticmethod(on_exit)
+    desc = """\
+        Called just before exiting Gunicorn.
+
+        The callable needs to accept a single instance variable for the Arbiter.
+        """
+
+
+class NewSSLContext(Setting):
+    name = "ssl_context"
+    section = "Server Hooks"
+    validator = validate_callable(2)
+    type = callable
+
+    def ssl_context(config, default_ssl_context_factory):
+        return default_ssl_context_factory()
+
+    default = staticmethod(ssl_context)
+    desc = """\
+        Called when SSLContext is needed.
+
+        Allows customizing SSL context.
+
+        The callable needs to accept an instance variable for the Config and
+        a factory function that returns default SSLContext which is initialized
+        with certificates, private key, cert_reqs, and ciphers according to
+        config and can be further customized by the callable.
+        The callable needs to return SSLContext object.
+
+        Following example shows a configuration file that sets the minimum TLS version to 1.3:
+
+        .. code-block:: python
+
+            def ssl_context(conf, default_ssl_context_factory):
+                import ssl
+                context = default_ssl_context_factory()
+                context.minimum_version = ssl.TLSVersion.TLSv1_3
+                return context
+
+        .. versionadded:: 20.2
+        """
+
+
+class ProxyProtocol(Setting):
+    name = "proxy_protocol"
+    section = "Server Mechanics"
+    cli = ["--proxy-protocol"]
+    validator = validate_bool
+    default = False
+    action = "store_true"
+    desc = """\
+        Enable detect PROXY protocol (PROXY mode).
+
+        Allow using HTTP and Proxy together. It may be useful for work with
+        stunnel as HTTPS frontend and Gunicorn as HTTP server.
+
+        PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
+
+        Example for stunnel config::
+
+            [https]
+            protocol = proxy
+            accept  = 443
+            connect = 80
+            cert = /etc/ssl/certs/stunnel.pem
+            key = /etc/ssl/certs/stunnel.key
+        """
+
+
+class ProxyAllowFrom(Setting):
+    name = "proxy_allow_ips"
+    section = "Server Mechanics"
+    cli = ["--proxy-allow-from"]
+    validator = validate_string_to_list
+    default = "127.0.0.1"
+    desc = """\
+        Front-end's IPs from which allowed accept proxy requests (comma separate).
+
+        Set to ``*`` to disable checking of Front-end IPs (useful for setups
+        where you don't know in advance the IP address of Front-end, but
+        you still trust the environment)
+        """
+
+
+class KeyFile(Setting):
+    name = "keyfile"
+    section = "SSL"
+    cli = ["--keyfile"]
+    meta = "FILE"
+    validator = validate_string
+    default = None
+    desc = """\
+    SSL key file
+    """
+
+
+class CertFile(Setting):
+    name = "certfile"
+    section = "SSL"
+    cli = ["--certfile"]
+    meta = "FILE"
+    validator = validate_string
+    default = None
+    desc = """\
+    SSL certificate file
+    """
+
+
+class SSLVersion(Setting):
+    name = "ssl_version"
+    section = "SSL"
+    cli = ["--ssl-version"]
+    validator = validate_ssl_version
+
+    if hasattr(ssl, "PROTOCOL_TLS"):
+        default = ssl.PROTOCOL_TLS
+    else:
+        default = ssl.PROTOCOL_SSLv23
+
+    default = ssl.PROTOCOL_SSLv23
+    desc = """\
+    SSL version to use (see stdlib ssl module's).
+
+    .. deprecated:: 20.2
+       The option is deprecated and it is currently ignored. Use :ref:`ssl-context` instead.
+
+    ============= ============
+    --ssl-version Description
+    ============= ============
+    SSLv3         SSLv3 is not-secure and is strongly discouraged.
+    SSLv23        Alias for TLS. Deprecated in Python 3.6, use TLS.
+    TLS           Negotiate highest possible version between client/server.
+                  Can yield SSL. (Python 3.6+)
+    TLSv1         TLS 1.0
+    TLSv1_1       TLS 1.1 (Python 3.4+)
+    TLSv1_2       TLS 1.2 (Python 3.4+)
+    TLS_SERVER    Auto-negotiate the highest protocol version like TLS,
+                  but only support server-side SSLSocket connections.
+                  (Python 3.6+)
+    ============= ============
+
+    .. versionchanged:: 19.7
+       The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to
+       ``ssl.PROTOCOL_SSLv23``.
+    .. versionchanged:: 20.0
+       This setting now accepts string names based on ``ssl.PROTOCOL_``
+       constants.
+    .. versionchanged:: 20.0.1
+       The default value has been changed from ``ssl.PROTOCOL_SSLv23`` to
+       ``ssl.PROTOCOL_TLS`` when Python >= 3.6 .
+    """
+
+
+class CertReqs(Setting):
+    name = "cert_reqs"
+    section = "SSL"
+    cli = ["--cert-reqs"]
+    validator = validate_pos_int
+    default = ssl.CERT_NONE
+    desc = """\
+    Whether client certificate is required (see stdlib ssl module's)
+
+    ===========  ===========================
+    --cert-reqs      Description
+    ===========  ===========================
+    `0`          no client veirifcation
+    `1`          ssl.CERT_OPTIONAL
+    `2`          ssl.CERT_REQUIRED
+    ===========  ===========================
+    """
+
+
+class CACerts(Setting):
+    name = "ca_certs"
+    section = "SSL"
+    cli = ["--ca-certs"]
+    meta = "FILE"
+    validator = validate_string
+    default = None
+    desc = """\
+    CA certificates file
+    """
+
+
+class SuppressRaggedEOFs(Setting):
+    name = "suppress_ragged_eofs"
+    section = "SSL"
+    cli = ["--suppress-ragged-eofs"]
+    action = "store_true"
+    default = True
+    validator = validate_bool
+    desc = """\
+    Suppress ragged EOFs (see stdlib ssl module's)
+    """
+
+
+class DoHandshakeOnConnect(Setting):
+    name = "do_handshake_on_connect"
+    section = "SSL"
+    cli = ["--do-handshake-on-connect"]
+    validator = validate_bool
+    action = "store_true"
+    default = False
+    desc = """\
+    Whether to perform SSL handshake on socket connect (see stdlib ssl module's)
+    """
+
+
+class Ciphers(Setting):
+    name = "ciphers"
+    section = "SSL"
+    cli = ["--ciphers"]
+    validator = validate_string
+    default = None
+    desc = """\
+    SSL Cipher suite to use, in the format of an OpenSSL cipher list.
+
+    By default we use the default cipher list from Python's ``ssl`` module,
+    which contains ciphers considered strong at the time of each Python
+    release.
+
+    As a recommended alternative, the Open Web App Security Project (OWASP)
+    offers `a vetted set of strong cipher strings rated A+ to C-
+    <https://www.owasp.org/index.php/TLS_Cipher_String_Cheat_Sheet>`_.
+    OWASP provides details on user-agent compatibility at each security level.
+
+    See the `OpenSSL Cipher List Format Documentation
+    <https://www.openssl.org/docs/manmaster/man1/ciphers.html#CIPHER-LIST-FORMAT>`_
+    for details on the format of an OpenSSL cipher list.
+    """
+
+
+class PasteGlobalConf(Setting):
+    name = "raw_paste_global_conf"
+    action = "append"
+    section = "Server Mechanics"
+    cli = ["--paste-global"]
+    meta = "CONF"
+    validator = validate_list_string
+    default = []
+
+    desc = """\
+        Set a PasteDeploy global config variable in ``key=value`` form.
+
+        The option can be specified multiple times.
+
+        The variables are passed to the the PasteDeploy entrypoint. Example::
+
+            $ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2
+
+        .. versionadded:: 19.7
+        """
+
+
+class StripHeaderSpaces(Setting):
+    name = "strip_header_spaces"
+    section = "Server Mechanics"
+    cli = ["--strip-header-spaces"]
+    validator = validate_bool
+    action = "store_true"
+    default = False
+    desc = """\
+        Strip spaces present between the header name and the the ``:``.
+
+        This is known to induce vulnerabilities and is not compliant with the HTTP/1.1 standard.
+        See https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn.
+
+        Use with care and only if necessary.
+        """
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/debug.py b/.venv/lib/python3.12/site-packages/gunicorn/debug.py
new file mode 100644
index 00000000..a492df9e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/debug.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+"""The debug module contains utilities and functions for better
+debugging Gunicorn."""
+
+import sys
+import linecache
+import re
+import inspect
+
+__all__ = ['spew', 'unspew']
+
+_token_spliter = re.compile(r'\W+')
+
+
+class Spew(object):
+
+    def __init__(self, trace_names=None, show_values=True):
+        self.trace_names = trace_names
+        self.show_values = show_values
+
+    def __call__(self, frame, event, arg):
+        if event == 'line':
+            lineno = frame.f_lineno
+            if '__file__' in frame.f_globals:
+                filename = frame.f_globals['__file__']
+                if (filename.endswith('.pyc') or
+                        filename.endswith('.pyo')):
+                    filename = filename[:-1]
+                name = frame.f_globals['__name__']
+                line = linecache.getline(filename, lineno)
+            else:
+                name = '[unknown]'
+                try:
+                    src = inspect.getsourcelines(frame)
+                    line = src[lineno]
+                except IOError:
+                    line = 'Unknown code named [%s].  VM instruction #%d' % (
+                        frame.f_code.co_name, frame.f_lasti)
+            if self.trace_names is None or name in self.trace_names:
+                print('%s:%s: %s' % (name, lineno, line.rstrip()))
+                if not self.show_values:
+                    return self
+                details = []
+                tokens = _token_spliter.split(line)
+                for tok in tokens:
+                    if tok in frame.f_globals:
+                        details.append('%s=%r' % (tok, frame.f_globals[tok]))
+                    if tok in frame.f_locals:
+                        details.append('%s=%r' % (tok, frame.f_locals[tok]))
+                if details:
+                    print("\t%s" % ' '.join(details))
+        return self
+
+
+def spew(trace_names=None, show_values=False):
+    """Install a trace hook which writes incredibly detailed logs
+    about what code is being executed to stdout.
+    """
+    sys.settrace(Spew(trace_names, show_values))
+
+
+def unspew():
+    """Remove the trace hook installed by spew.
+    """
+    sys.settrace(None)
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/errors.py b/.venv/lib/python3.12/site-packages/gunicorn/errors.py
new file mode 100644
index 00000000..727d336a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/errors.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+# We don't need to call super() in __init__ methods of our
+# BaseException and Exception classes because we also define
+# our own __str__ methods so there is no need to pass 'message'
+# to the base class to get a meaningful output from 'str(exc)'.
+# pylint: disable=super-init-not-called
+
+
+# we inherit from BaseException here to make sure to not be caught
+# at application level
+class HaltServer(BaseException):
+    def __init__(self, reason, exit_status=1):
+        self.reason = reason
+        self.exit_status = exit_status
+
+    def __str__(self):
+        return "<HaltServer %r %d>" % (self.reason, self.exit_status)
+
+
+class ConfigError(Exception):
+    """ Exception raised on config error """
+
+
+class AppImportError(Exception):
+    """ Exception raised when loading an application """
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/glogging.py b/.venv/lib/python3.12/site-packages/gunicorn/glogging.py
new file mode 100644
index 00000000..b552e26a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/glogging.py
@@ -0,0 +1,474 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import base64
+import binascii
+import json
+import time
+import logging
+logging.Logger.manager.emittedNoHandlerWarning = 1  # noqa
+from logging.config import dictConfig
+from logging.config import fileConfig
+import os
+import socket
+import sys
+import threading
+import traceback
+
+from gunicorn import util
+
+
+# syslog facility codes
+SYSLOG_FACILITIES = {
+    "auth": 4,
+    "authpriv": 10,
+    "cron": 9,
+    "daemon": 3,
+    "ftp": 11,
+    "kern": 0,
+    "lpr": 6,
+    "mail": 2,
+    "news": 7,
+    "security": 4,  # DEPRECATED
+    "syslog": 5,
+    "user": 1,
+    "uucp": 8,
+    "local0": 16,
+    "local1": 17,
+    "local2": 18,
+    "local3": 19,
+    "local4": 20,
+    "local5": 21,
+    "local6": 22,
+    "local7": 23
+}
+
+CONFIG_DEFAULTS = {
+    "version": 1,
+    "disable_existing_loggers": False,
+    "root": {"level": "INFO", "handlers": ["console"]},
+    "loggers": {
+        "gunicorn.error": {
+            "level": "INFO",
+            "handlers": ["error_console"],
+            "propagate": True,
+            "qualname": "gunicorn.error"
+        },
+
+        "gunicorn.access": {
+            "level": "INFO",
+            "handlers": ["console"],
+            "propagate": True,
+            "qualname": "gunicorn.access"
+        }
+    },
+    "handlers": {
+        "console": {
+            "class": "logging.StreamHandler",
+            "formatter": "generic",
+            "stream": "ext://sys.stdout"
+        },
+        "error_console": {
+            "class": "logging.StreamHandler",
+            "formatter": "generic",
+            "stream": "ext://sys.stderr"
+        },
+    },
+    "formatters": {
+        "generic": {
+            "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
+            "datefmt": "[%Y-%m-%d %H:%M:%S %z]",
+            "class": "logging.Formatter"
+        }
+    }
+}
+
+
+def loggers():
+    """ get list of all loggers """
+    root = logging.root
+    existing = list(root.manager.loggerDict.keys())
+    return [logging.getLogger(name) for name in existing]
+
+
+class SafeAtoms(dict):
+
+    def __init__(self, atoms):
+        dict.__init__(self)
+        for key, value in atoms.items():
+            if isinstance(value, str):
+                self[key] = value.replace('"', '\\"')
+            else:
+                self[key] = value
+
+    def __getitem__(self, k):
+        if k.startswith("{"):
+            kl = k.lower()
+            if kl in self:
+                return super().__getitem__(kl)
+            else:
+                return "-"
+        if k in self:
+            return super().__getitem__(k)
+        else:
+            return '-'
+
+
+def parse_syslog_address(addr):
+
+    # unix domain socket type depends on backend
+    # SysLogHandler will try both when given None
+    if addr.startswith("unix://"):
+        sock_type = None
+
+        # set socket type only if explicitly requested
+        parts = addr.split("#", 1)
+        if len(parts) == 2:
+            addr = parts[0]
+            if parts[1] == "dgram":
+                sock_type = socket.SOCK_DGRAM
+
+        return (sock_type, addr.split("unix://")[1])
+
+    if addr.startswith("udp://"):
+        addr = addr.split("udp://")[1]
+        socktype = socket.SOCK_DGRAM
+    elif addr.startswith("tcp://"):
+        addr = addr.split("tcp://")[1]
+        socktype = socket.SOCK_STREAM
+    else:
+        raise RuntimeError("invalid syslog address")
+
+    if '[' in addr and ']' in addr:
+        host = addr.split(']')[0][1:].lower()
+    elif ':' in addr:
+        host = addr.split(':')[0].lower()
+    elif addr == "":
+        host = "localhost"
+    else:
+        host = addr.lower()
+
+    addr = addr.split(']')[-1]
+    if ":" in addr:
+        port = addr.split(':', 1)[1]
+        if not port.isdigit():
+            raise RuntimeError("%r is not a valid port number." % port)
+        port = int(port)
+    else:
+        port = 514
+
+    return (socktype, (host, port))
+
+
+class Logger(object):
+
+    LOG_LEVELS = {
+        "critical": logging.CRITICAL,
+        "error": logging.ERROR,
+        "warning": logging.WARNING,
+        "info": logging.INFO,
+        "debug": logging.DEBUG
+    }
+    loglevel = logging.INFO
+
+    error_fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s"
+    datefmt = r"[%Y-%m-%d %H:%M:%S %z]"
+
+    access_fmt = "%(message)s"
+    syslog_fmt = "[%(process)d] %(message)s"
+
+    atoms_wrapper_class = SafeAtoms
+
+    def __init__(self, cfg):
+        self.error_log = logging.getLogger("gunicorn.error")
+        self.error_log.propagate = False
+        self.access_log = logging.getLogger("gunicorn.access")
+        self.access_log.propagate = False
+        self.error_handlers = []
+        self.access_handlers = []
+        self.logfile = None
+        self.lock = threading.Lock()
+        self.cfg = cfg
+        self.setup(cfg)
+
+    def setup(self, cfg):
+        self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)
+        self.error_log.setLevel(self.loglevel)
+        self.access_log.setLevel(logging.INFO)
+
+        # set gunicorn.error handler
+        if self.cfg.capture_output and cfg.errorlog != "-":
+            for stream in sys.stdout, sys.stderr:
+                stream.flush()
+
+            self.logfile = open(cfg.errorlog, 'a+')
+            os.dup2(self.logfile.fileno(), sys.stdout.fileno())
+            os.dup2(self.logfile.fileno(), sys.stderr.fileno())
+
+        self._set_handler(self.error_log, cfg.errorlog,
+                          logging.Formatter(self.error_fmt, self.datefmt))
+
+        # set gunicorn.access handler
+        if cfg.accesslog is not None:
+            self._set_handler(
+                self.access_log, cfg.accesslog,
+                fmt=logging.Formatter(self.access_fmt), stream=sys.stdout
+            )
+
+        # set syslog handler
+        if cfg.syslog:
+            self._set_syslog_handler(
+                self.error_log, cfg, self.syslog_fmt, "error"
+            )
+            if not cfg.disable_redirect_access_to_syslog:
+                self._set_syslog_handler(
+                    self.access_log, cfg, self.syslog_fmt, "access"
+                )
+
+        if cfg.logconfig_dict:
+            config = CONFIG_DEFAULTS.copy()
+            config.update(cfg.logconfig_dict)
+            try:
+                dictConfig(config)
+            except (
+                    AttributeError,
+                    ImportError,
+                    ValueError,
+                    TypeError
+            ) as exc:
+                raise RuntimeError(str(exc))
+        elif cfg.logconfig_json:
+            config = CONFIG_DEFAULTS.copy()
+            if os.path.exists(cfg.logconfig_json):
+                try:
+                    config_json = json.load(open(cfg.logconfig_json))
+                    config.update(config_json)
+                    dictConfig(config)
+                except (
+                    json.JSONDecodeError,
+                    AttributeError,
+                    ImportError,
+                    ValueError,
+                    TypeError
+                ) as exc:
+                    raise RuntimeError(str(exc))
+        elif cfg.logconfig:
+            if os.path.exists(cfg.logconfig):
+                defaults = CONFIG_DEFAULTS.copy()
+                defaults['__file__'] = cfg.logconfig
+                defaults['here'] = os.path.dirname(cfg.logconfig)
+                fileConfig(cfg.logconfig, defaults=defaults,
+                           disable_existing_loggers=False)
+            else:
+                msg = "Error: log config '%s' not found"
+                raise RuntimeError(msg % cfg.logconfig)
+
+    def critical(self, msg, *args, **kwargs):
+        self.error_log.critical(msg, *args, **kwargs)
+
+    def error(self, msg, *args, **kwargs):
+        self.error_log.error(msg, *args, **kwargs)
+
+    def warning(self, msg, *args, **kwargs):
+        self.error_log.warning(msg, *args, **kwargs)
+
+    def info(self, msg, *args, **kwargs):
+        self.error_log.info(msg, *args, **kwargs)
+
+    def debug(self, msg, *args, **kwargs):
+        self.error_log.debug(msg, *args, **kwargs)
+
+    def exception(self, msg, *args, **kwargs):
+        self.error_log.exception(msg, *args, **kwargs)
+
+    def log(self, lvl, msg, *args, **kwargs):
+        if isinstance(lvl, str):
+            lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO)
+        self.error_log.log(lvl, msg, *args, **kwargs)
+
+    def atoms(self, resp, req, environ, request_time):
+        """ Gets atoms for log formatting.
+        """
+        status = resp.status
+        if isinstance(status, str):
+            status = status.split(None, 1)[0]
+        atoms = {
+            'h': environ.get('REMOTE_ADDR', '-'),
+            'l': '-',
+            'u': self._get_user(environ) or '-',
+            't': self.now(),
+            'r': "%s %s %s" % (environ['REQUEST_METHOD'],
+                               environ['RAW_URI'],
+                               environ["SERVER_PROTOCOL"]),
+            's': status,
+            'm': environ.get('REQUEST_METHOD'),
+            'U': environ.get('PATH_INFO'),
+            'q': environ.get('QUERY_STRING'),
+            'H': environ.get('SERVER_PROTOCOL'),
+            'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
+            'B': getattr(resp, 'sent', None),
+            'f': environ.get('HTTP_REFERER', '-'),
+            'a': environ.get('HTTP_USER_AGENT', '-'),
+            'T': request_time.seconds,
+            'D': (request_time.seconds * 1000000) + request_time.microseconds,
+            'M': (request_time.seconds * 1000) + int(request_time.microseconds / 1000),
+            'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
+            'p': "<%s>" % os.getpid()
+        }
+
+        # add request headers
+        if hasattr(req, 'headers'):
+            req_headers = req.headers
+        else:
+            req_headers = req
+
+        if hasattr(req_headers, "items"):
+            req_headers = req_headers.items()
+
+        atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
+
+        resp_headers = resp.headers
+        if hasattr(resp_headers, "items"):
+            resp_headers = resp_headers.items()
+
+        # add response headers
+        atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
+
+        # add environ variables
+        environ_variables = environ.items()
+        atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
+
+        return atoms
+
+    def access(self, resp, req, environ, request_time):
+        """ See http://httpd.apache.org/docs/2.0/logs.html#combined
+        for format details
+        """
+
+        if not (self.cfg.accesslog or self.cfg.logconfig or
+           self.cfg.logconfig_dict or self.cfg.logconfig_json or
+           (self.cfg.syslog and not self.cfg.disable_redirect_access_to_syslog)):
+            return
+
+        # wrap atoms:
+        # - make sure atoms will be test case insensitively
+        # - if atom doesn't exist replace it by '-'
+        safe_atoms = self.atoms_wrapper_class(
+            self.atoms(resp, req, environ, request_time)
+        )
+
+        try:
+            self.access_log.info(self.cfg.access_log_format, safe_atoms)
+        except Exception:
+            self.error(traceback.format_exc())
+
+    def now(self):
+        """ return date in Apache Common Log Format """
+        return time.strftime('[%d/%b/%Y:%H:%M:%S %z]')
+
+    def reopen_files(self):
+        if self.cfg.capture_output and self.cfg.errorlog != "-":
+            for stream in sys.stdout, sys.stderr:
+                stream.flush()
+
+            with self.lock:
+                if self.logfile is not None:
+                    self.logfile.close()
+                self.logfile = open(self.cfg.errorlog, 'a+')
+                os.dup2(self.logfile.fileno(), sys.stdout.fileno())
+                os.dup2(self.logfile.fileno(), sys.stderr.fileno())
+
+        for log in loggers():
+            for handler in log.handlers:
+                if isinstance(handler, logging.FileHandler):
+                    handler.acquire()
+                    try:
+                        if handler.stream:
+                            handler.close()
+                            handler.stream = handler._open()
+                    finally:
+                        handler.release()
+
+    def close_on_exec(self):
+        for log in loggers():
+            for handler in log.handlers:
+                if isinstance(handler, logging.FileHandler):
+                    handler.acquire()
+                    try:
+                        if handler.stream:
+                            util.close_on_exec(handler.stream.fileno())
+                    finally:
+                        handler.release()
+
+    def _get_gunicorn_handler(self, log):
+        for h in log.handlers:
+            if getattr(h, "_gunicorn", False):
+                return h
+
+    def _set_handler(self, log, output, fmt, stream=None):
+        # remove previous gunicorn log handler
+        h = self._get_gunicorn_handler(log)
+        if h:
+            log.handlers.remove(h)
+
+        if output is not None:
+            if output == "-":
+                h = logging.StreamHandler(stream)
+            else:
+                util.check_is_writable(output)
+                h = logging.FileHandler(output)
+                # make sure the user can reopen the file
+                try:
+                    os.chown(h.baseFilename, self.cfg.user, self.cfg.group)
+                except OSError:
+                    # it's probably OK there, we assume the user has given
+                    # /dev/null as a parameter.
+                    pass
+
+            h.setFormatter(fmt)
+            h._gunicorn = True
+            log.addHandler(h)
+
+    def _set_syslog_handler(self, log, cfg, fmt, name):
+        # setup format
+        prefix = cfg.syslog_prefix or cfg.proc_name.replace(":", ".")
+
+        prefix = "gunicorn.%s.%s" % (prefix, name)
+
+        # set format
+        fmt = logging.Formatter(r"%s: %s" % (prefix, fmt))
+
+        # syslog facility
+        try:
+            facility = SYSLOG_FACILITIES[cfg.syslog_facility.lower()]
+        except KeyError:
+            raise RuntimeError("unknown facility name")
+
+        # parse syslog address
+        socktype, addr = parse_syslog_address(cfg.syslog_addr)
+
+        # finally setup the syslog handler
+        h = logging.handlers.SysLogHandler(address=addr,
+                                           facility=facility, socktype=socktype)
+
+        h.setFormatter(fmt)
+        h._gunicorn = True
+        log.addHandler(h)
+
+    def _get_user(self, environ):
+        user = None
+        http_auth = environ.get("HTTP_AUTHORIZATION")
+        if http_auth and http_auth.lower().startswith('basic'):
+            auth = http_auth.split(" ", 1)
+            if len(auth) == 2:
+                try:
+                    # b64decode doesn't accept unicode in Python < 3.3
+                    # so we need to convert it to a byte string
+                    auth = base64.b64decode(auth[1].strip().encode('utf-8'))
+                    # b64decode returns a byte string
+                    user = auth.split(b":", 1)[0].decode("UTF-8")
+                except (TypeError, binascii.Error, UnicodeDecodeError) as exc:
+                    self.debug("Couldn't get username: %s", exc)
+        return user
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/http/__init__.py b/.venv/lib/python3.12/site-packages/gunicorn/http/__init__.py
new file mode 100644
index 00000000..1da6f3ec
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/http/__init__.py
@@ -0,0 +1,9 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+from gunicorn.http.message import Message, Request
+from gunicorn.http.parser import RequestParser
+
+__all__ = ['Message', 'Request', 'RequestParser']
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/http/body.py b/.venv/lib/python3.12/site-packages/gunicorn/http/body.py
new file mode 100644
index 00000000..aa1af2cb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/http/body.py
@@ -0,0 +1,262 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import io
+import sys
+
+from gunicorn.http.errors import (NoMoreData, ChunkMissingTerminator,
+                                  InvalidChunkSize)
+
+
+class ChunkedReader(object):
+    def __init__(self, req, unreader):
+        self.req = req
+        self.parser = self.parse_chunked(unreader)
+        self.buf = io.BytesIO()
+
+    def read(self, size):
+        if not isinstance(size, int):
+            raise TypeError("size must be an integer type")
+        if size < 0:
+            raise ValueError("Size must be positive.")
+        if size == 0:
+            return b""
+
+        if self.parser:
+            while self.buf.tell() < size:
+                try:
+                    self.buf.write(next(self.parser))
+                except StopIteration:
+                    self.parser = None
+                    break
+
+        data = self.buf.getvalue()
+        ret, rest = data[:size], data[size:]
+        self.buf = io.BytesIO()
+        self.buf.write(rest)
+        return ret
+
+    def parse_trailers(self, unreader, data):
+        buf = io.BytesIO()
+        buf.write(data)
+
+        idx = buf.getvalue().find(b"\r\n\r\n")
+        done = buf.getvalue()[:2] == b"\r\n"
+        while idx < 0 and not done:
+            self.get_data(unreader, buf)
+            idx = buf.getvalue().find(b"\r\n\r\n")
+            done = buf.getvalue()[:2] == b"\r\n"
+        if done:
+            unreader.unread(buf.getvalue()[2:])
+            return b""
+        self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx])
+        unreader.unread(buf.getvalue()[idx + 4:])
+
+    def parse_chunked(self, unreader):
+        (size, rest) = self.parse_chunk_size(unreader)
+        while size > 0:
+            while size > len(rest):
+                size -= len(rest)
+                yield rest
+                rest = unreader.read()
+                if not rest:
+                    raise NoMoreData()
+            yield rest[:size]
+            # Remove \r\n after chunk
+            rest = rest[size:]
+            while len(rest) < 2:
+                rest += unreader.read()
+            if rest[:2] != b'\r\n':
+                raise ChunkMissingTerminator(rest[:2])
+            (size, rest) = self.parse_chunk_size(unreader, data=rest[2:])
+
+    def parse_chunk_size(self, unreader, data=None):
+        buf = io.BytesIO()
+        if data is not None:
+            buf.write(data)
+
+        idx = buf.getvalue().find(b"\r\n")
+        while idx < 0:
+            self.get_data(unreader, buf)
+            idx = buf.getvalue().find(b"\r\n")
+
+        data = buf.getvalue()
+        line, rest_chunk = data[:idx], data[idx + 2:]
+
+        chunk_size = line.split(b";", 1)[0].strip()
+        try:
+            chunk_size = int(chunk_size, 16)
+        except ValueError:
+            raise InvalidChunkSize(chunk_size)
+
+        if chunk_size == 0:
+            try:
+                self.parse_trailers(unreader, rest_chunk)
+            except NoMoreData:
+                pass
+            return (0, None)
+        return (chunk_size, rest_chunk)
+
+    def get_data(self, unreader, buf):
+        data = unreader.read()
+        if not data:
+            raise NoMoreData()
+        buf.write(data)
+
+
+class LengthReader(object):
+    def __init__(self, unreader, length):
+        self.unreader = unreader
+        self.length = length
+
+    def read(self, size):
+        if not isinstance(size, int):
+            raise TypeError("size must be an integral type")
+
+        size = min(self.length, size)
+        if size < 0:
+            raise ValueError("Size must be positive.")
+        if size == 0:
+            return b""
+
+        buf = io.BytesIO()
+        data = self.unreader.read()
+        while data:
+            buf.write(data)
+            if buf.tell() >= size:
+                break
+            data = self.unreader.read()
+
+        buf = buf.getvalue()
+        ret, rest = buf[:size], buf[size:]
+        self.unreader.unread(rest)
+        self.length -= size
+        return ret
+
+
+class EOFReader(object):
+    def __init__(self, unreader):
+        self.unreader = unreader
+        self.buf = io.BytesIO()
+        self.finished = False
+
+    def read(self, size):
+        if not isinstance(size, int):
+            raise TypeError("size must be an integral type")
+        if size < 0:
+            raise ValueError("Size must be positive.")
+        if size == 0:
+            return b""
+
+        if self.finished:
+            data = self.buf.getvalue()
+            ret, rest = data[:size], data[size:]
+            self.buf = io.BytesIO()
+            self.buf.write(rest)
+            return ret
+
+        data = self.unreader.read()
+        while data:
+            self.buf.write(data)
+            if self.buf.tell() > size:
+                break
+            data = self.unreader.read()
+
+        if not data:
+            self.finished = True
+
+        data = self.buf.getvalue()
+        ret, rest = data[:size], data[size:]
+        self.buf = io.BytesIO()
+        self.buf.write(rest)
+        return ret
+
+
+class Body(object):
+    def __init__(self, reader):
+        self.reader = reader
+        self.buf = io.BytesIO()
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        ret = self.readline()
+        if not ret:
+            raise StopIteration()
+        return ret
+
+    next = __next__
+
+    def getsize(self, size):
+        if size is None:
+            return sys.maxsize
+        elif not isinstance(size, int):
+            raise TypeError("size must be an integral type")
+        elif size < 0:
+            return sys.maxsize
+        return size
+
+    def read(self, size=None):
+        size = self.getsize(size)
+        if size == 0:
+            return b""
+
+        if size < self.buf.tell():
+            data = self.buf.getvalue()
+            ret, rest = data[:size], data[size:]
+            self.buf = io.BytesIO()
+            self.buf.write(rest)
+            return ret
+
+        while size > self.buf.tell():
+            data = self.reader.read(1024)
+            if not data:
+                break
+            self.buf.write(data)
+
+        data = self.buf.getvalue()
+        ret, rest = data[:size], data[size:]
+        self.buf = io.BytesIO()
+        self.buf.write(rest)
+        return ret
+
+    def readline(self, size=None):
+        size = self.getsize(size)
+        if size == 0:
+            return b""
+
+        data = self.buf.getvalue()
+        self.buf = io.BytesIO()
+
+        ret = []
+        while 1:
+            idx = data.find(b"\n", 0, size)
+            idx = idx + 1 if idx >= 0 else size if len(data) >= size else 0
+            if idx:
+                ret.append(data[:idx])
+                self.buf.write(data[idx:])
+                break
+
+            ret.append(data)
+            size -= len(data)
+            data = self.reader.read(min(1024, size))
+            if not data:
+                break
+
+        return b"".join(ret)
+
+    def readlines(self, size=None):
+        ret = []
+        data = self.read()
+        while data:
+            pos = data.find(b"\n")
+            if pos < 0:
+                ret.append(data)
+                data = b""
+            else:
+                line, data = data[:pos + 1], data[pos + 1:]
+                ret.append(line)
+        return ret
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/http/errors.py b/.venv/lib/python3.12/site-packages/gunicorn/http/errors.py
new file mode 100644
index 00000000..7839ef05
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/http/errors.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+# We don't need to call super() in __init__ methods of our
+# BaseException and Exception classes because we also define
+# our own __str__ methods so there is no need to pass 'message'
+# to the base class to get a meaningful output from 'str(exc)'.
+# pylint: disable=super-init-not-called
+
+
+class ParseException(Exception):
+    pass
+
+
+class NoMoreData(IOError):
+    def __init__(self, buf=None):
+        self.buf = buf
+
+    def __str__(self):
+        return "No more data after: %r" % self.buf
+
+
+class InvalidRequestLine(ParseException):
+    def __init__(self, req):
+        self.req = req
+        self.code = 400
+
+    def __str__(self):
+        return "Invalid HTTP request line: %r" % self.req
+
+
+class InvalidRequestMethod(ParseException):
+    def __init__(self, method):
+        self.method = method
+
+    def __str__(self):
+        return "Invalid HTTP method: %r" % self.method
+
+
+class InvalidHTTPVersion(ParseException):
+    def __init__(self, version):
+        self.version = version
+
+    def __str__(self):
+        return "Invalid HTTP Version: %r" % self.version
+
+
+class InvalidHeader(ParseException):
+    def __init__(self, hdr, req=None):
+        self.hdr = hdr
+        self.req = req
+
+    def __str__(self):
+        return "Invalid HTTP Header: %r" % self.hdr
+
+
+class InvalidHeaderName(ParseException):
+    def __init__(self, hdr):
+        self.hdr = hdr
+
+    def __str__(self):
+        return "Invalid HTTP header name: %r" % self.hdr
+
+
+class InvalidChunkSize(IOError):
+    def __init__(self, data):
+        self.data = data
+
+    def __str__(self):
+        return "Invalid chunk size: %r" % self.data
+
+
+class ChunkMissingTerminator(IOError):
+    def __init__(self, term):
+        self.term = term
+
+    def __str__(self):
+        return "Invalid chunk terminator is not '\\r\\n': %r" % self.term
+
+
+class LimitRequestLine(ParseException):
+    def __init__(self, size, max_size):
+        self.size = size
+        self.max_size = max_size
+
+    def __str__(self):
+        return "Request Line is too large (%s > %s)" % (self.size, self.max_size)
+
+
+class LimitRequestHeaders(ParseException):
+    def __init__(self, msg):
+        self.msg = msg
+
+    def __str__(self):
+        return self.msg
+
+
+class InvalidProxyLine(ParseException):
+    def __init__(self, line):
+        self.line = line
+        self.code = 400
+
+    def __str__(self):
+        return "Invalid PROXY line: %r" % self.line
+
+
+class ForbiddenProxyRequest(ParseException):
+    def __init__(self, host):
+        self.host = host
+        self.code = 403
+
+    def __str__(self):
+        return "Proxy request from %r not allowed" % self.host
+
+
+class InvalidSchemeHeaders(ParseException):
+    def __str__(self):
+        return "Contradictory scheme headers"
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/http/message.py b/.venv/lib/python3.12/site-packages/gunicorn/http/message.py
new file mode 100644
index 00000000..1f93c714
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/http/message.py
@@ -0,0 +1,360 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import io
+import re
+import socket
+
+from gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body
+from gunicorn.http.errors import (
+    InvalidHeader, InvalidHeaderName, NoMoreData,
+    InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion,
+    LimitRequestLine, LimitRequestHeaders,
+)
+from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
+from gunicorn.http.errors import InvalidSchemeHeaders
+from gunicorn.util import bytes_to_str, split_request_uri
+
+MAX_REQUEST_LINE = 8190
+MAX_HEADERS = 32768
+DEFAULT_MAX_HEADERFIELD_SIZE = 8190
+
+HEADER_RE = re.compile(r"[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\"]")
+METH_RE = re.compile(r"[A-Z0-9$-_.]{3,20}")
+VERSION_RE = re.compile(r"HTTP/(\d+)\.(\d+)")
+
+
+class Message(object):
+    def __init__(self, cfg, unreader, peer_addr):
+        self.cfg = cfg
+        self.unreader = unreader
+        self.peer_addr = peer_addr
+        self.remote_addr = peer_addr
+        self.version = None
+        self.headers = []
+        self.trailers = []
+        self.body = None
+        self.scheme = "https" if cfg.is_ssl else "http"
+
+        # set headers limits
+        self.limit_request_fields = cfg.limit_request_fields
+        if (self.limit_request_fields <= 0
+                or self.limit_request_fields > MAX_HEADERS):
+            self.limit_request_fields = MAX_HEADERS
+        self.limit_request_field_size = cfg.limit_request_field_size
+        if self.limit_request_field_size < 0:
+            self.limit_request_field_size = DEFAULT_MAX_HEADERFIELD_SIZE
+
+        # set max header buffer size
+        max_header_field_size = self.limit_request_field_size or DEFAULT_MAX_HEADERFIELD_SIZE
+        self.max_buffer_headers = self.limit_request_fields * \
+            (max_header_field_size + 2) + 4
+
+        unused = self.parse(self.unreader)
+        self.unreader.unread(unused)
+        self.set_body_reader()
+
+    def parse(self, unreader):
+        raise NotImplementedError()
+
+    def parse_headers(self, data):
+        cfg = self.cfg
+        headers = []
+
+        # Split lines on \r\n keeping the \r\n on each line
+        lines = [bytes_to_str(line) + "\r\n" for line in data.split(b"\r\n")]
+
+        # handle scheme headers
+        scheme_header = False
+        secure_scheme_headers = {}
+        if ('*' in cfg.forwarded_allow_ips or
+            not isinstance(self.peer_addr, tuple)
+                or self.peer_addr[0] in cfg.forwarded_allow_ips):
+            secure_scheme_headers = cfg.secure_scheme_headers
+
+        # Parse headers into key/value pairs paying attention
+        # to continuation lines.
+        while lines:
+            if len(headers) >= self.limit_request_fields:
+                raise LimitRequestHeaders("limit request headers fields")
+
+            # Parse initial header name : value pair.
+            curr = lines.pop(0)
+            header_length = len(curr)
+            if curr.find(":") < 0:
+                raise InvalidHeader(curr.strip())
+            name, value = curr.split(":", 1)
+            if self.cfg.strip_header_spaces:
+                name = name.rstrip(" \t").upper()
+            else:
+                name = name.upper()
+            if HEADER_RE.search(name):
+                raise InvalidHeaderName(name)
+
+            name, value = name.strip(), [value.lstrip()]
+
+            # Consume value continuation lines
+            while lines and lines[0].startswith((" ", "\t")):
+                curr = lines.pop(0)
+                header_length += len(curr)
+                if header_length > self.limit_request_field_size > 0:
+                    raise LimitRequestHeaders("limit request headers "
+                                              "fields size")
+                value.append(curr)
+            value = ''.join(value).rstrip()
+
+            if header_length > self.limit_request_field_size > 0:
+                raise LimitRequestHeaders("limit request headers fields size")
+
+            if name in secure_scheme_headers:
+                secure = value == secure_scheme_headers[name]
+                scheme = "https" if secure else "http"
+                if scheme_header:
+                    if scheme != self.scheme:
+                        raise InvalidSchemeHeaders()
+                else:
+                    scheme_header = True
+                    self.scheme = scheme
+
+            headers.append((name, value))
+
+        return headers
+
+    def set_body_reader(self):
+        chunked = False
+        content_length = None
+
+        for (name, value) in self.headers:
+            if name == "CONTENT-LENGTH":
+                if content_length is not None:
+                    raise InvalidHeader("CONTENT-LENGTH", req=self)
+                content_length = value
+            elif name == "TRANSFER-ENCODING":
+                if value.lower() == "chunked":
+                    chunked = True
+
+        if chunked:
+            self.body = Body(ChunkedReader(self, self.unreader))
+        elif content_length is not None:
+            try:
+                if str(content_length).isnumeric():
+                    content_length = int(content_length)
+                else:
+                    raise InvalidHeader("CONTENT-LENGTH", req=self)
+            except ValueError:
+                raise InvalidHeader("CONTENT-LENGTH", req=self)
+
+            if content_length < 0:
+                raise InvalidHeader("CONTENT-LENGTH", req=self)
+
+            self.body = Body(LengthReader(self.unreader, content_length))
+        else:
+            self.body = Body(EOFReader(self.unreader))
+
+    def should_close(self):
+        for (h, v) in self.headers:
+            if h == "CONNECTION":
+                v = v.lower().strip()
+                if v == "close":
+                    return True
+                elif v == "keep-alive":
+                    return False
+                break
+        return self.version <= (1, 0)
+
+
+class Request(Message):
+    def __init__(self, cfg, unreader, peer_addr, req_number=1):
+        self.method = None
+        self.uri = None
+        self.path = None
+        self.query = None
+        self.fragment = None
+
+        # get max request line size
+        self.limit_request_line = cfg.limit_request_line
+        if (self.limit_request_line < 0
+                or self.limit_request_line >= MAX_REQUEST_LINE):
+            self.limit_request_line = MAX_REQUEST_LINE
+
+        self.req_number = req_number
+        self.proxy_protocol_info = None
+        super().__init__(cfg, unreader, peer_addr)
+
+    def get_data(self, unreader, buf, stop=False):
+        data = unreader.read()
+        if not data:
+            if stop:
+                raise StopIteration()
+            raise NoMoreData(buf.getvalue())
+        buf.write(data)
+
+    def parse(self, unreader):
+        buf = io.BytesIO()
+        self.get_data(unreader, buf, stop=True)
+
+        # get request line
+        line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
+
+        # proxy protocol
+        if self.proxy_protocol(bytes_to_str(line)):
+            # get next request line
+            buf = io.BytesIO()
+            buf.write(rbuf)
+            line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
+
+        self.parse_request_line(line)
+        buf = io.BytesIO()
+        buf.write(rbuf)
+
+        # Headers
+        data = buf.getvalue()
+        idx = data.find(b"\r\n\r\n")
+
+        done = data[:2] == b"\r\n"
+        while True:
+            idx = data.find(b"\r\n\r\n")
+            done = data[:2] == b"\r\n"
+
+            if idx < 0 and not done:
+                self.get_data(unreader, buf)
+                data = buf.getvalue()
+                if len(data) > self.max_buffer_headers:
+                    raise LimitRequestHeaders("max buffer headers")
+            else:
+                break
+
+        if done:
+            self.unreader.unread(data[2:])
+            return b""
+
+        self.headers = self.parse_headers(data[:idx])
+
+        ret = data[idx + 4:]
+        buf = None
+        return ret
+
+    def read_line(self, unreader, buf, limit=0):
+        data = buf.getvalue()
+
+        while True:
+            idx = data.find(b"\r\n")
+            if idx >= 0:
+                # check if the request line is too large
+                if idx > limit > 0:
+                    raise LimitRequestLine(idx, limit)
+                break
+            if len(data) - 2 > limit > 0:
+                raise LimitRequestLine(len(data), limit)
+            self.get_data(unreader, buf)
+            data = buf.getvalue()
+
+        return (data[:idx],  # request line,
+                data[idx + 2:])  # residue in the buffer, skip \r\n
+
+    def proxy_protocol(self, line):
+        """\
+        Detect, check and parse proxy protocol.
+
+        :raises: ForbiddenProxyRequest, InvalidProxyLine.
+        :return: True for proxy protocol line else False
+        """
+        if not self.cfg.proxy_protocol:
+            return False
+
+        if self.req_number != 1:
+            return False
+
+        if not line.startswith("PROXY"):
+            return False
+
+        self.proxy_protocol_access_check()
+        self.parse_proxy_protocol(line)
+
+        return True
+
+    def proxy_protocol_access_check(self):
+        # check in allow list
+        if ("*" not in self.cfg.proxy_allow_ips and
+            isinstance(self.peer_addr, tuple) and
+                self.peer_addr[0] not in self.cfg.proxy_allow_ips):
+            raise ForbiddenProxyRequest(self.peer_addr[0])
+
+    def parse_proxy_protocol(self, line):
+        bits = line.split()
+
+        if len(bits) != 6:
+            raise InvalidProxyLine(line)
+
+        # Extract data
+        proto = bits[1]
+        s_addr = bits[2]
+        d_addr = bits[3]
+
+        # Validation
+        if proto not in ["TCP4", "TCP6"]:
+            raise InvalidProxyLine("protocol '%s' not supported" % proto)
+        if proto == "TCP4":
+            try:
+                socket.inet_pton(socket.AF_INET, s_addr)
+                socket.inet_pton(socket.AF_INET, d_addr)
+            except socket.error:
+                raise InvalidProxyLine(line)
+        elif proto == "TCP6":
+            try:
+                socket.inet_pton(socket.AF_INET6, s_addr)
+                socket.inet_pton(socket.AF_INET6, d_addr)
+            except socket.error:
+                raise InvalidProxyLine(line)
+
+        try:
+            s_port = int(bits[4])
+            d_port = int(bits[5])
+        except ValueError:
+            raise InvalidProxyLine("invalid port %s" % line)
+
+        if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)):
+            raise InvalidProxyLine("invalid port %s" % line)
+
+        # Set data
+        self.proxy_protocol_info = {
+            "proxy_protocol": proto,
+            "client_addr": s_addr,
+            "client_port": s_port,
+            "proxy_addr": d_addr,
+            "proxy_port": d_port
+        }
+
+    def parse_request_line(self, line_bytes):
+        bits = [bytes_to_str(bit) for bit in line_bytes.split(None, 2)]
+        if len(bits) != 3:
+            raise InvalidRequestLine(bytes_to_str(line_bytes))
+
+        # Method
+        if not METH_RE.match(bits[0]):
+            raise InvalidRequestMethod(bits[0])
+        self.method = bits[0].upper()
+
+        # URI
+        self.uri = bits[1]
+
+        try:
+            parts = split_request_uri(self.uri)
+        except ValueError:
+            raise InvalidRequestLine(bytes_to_str(line_bytes))
+        self.path = parts.path or ""
+        self.query = parts.query or ""
+        self.fragment = parts.fragment or ""
+
+        # Version
+        match = VERSION_RE.match(bits[2])
+        if match is None:
+            raise InvalidHTTPVersion(bits[2])
+        self.version = (int(match.group(1)), int(match.group(2)))
+
+    def set_body_reader(self):
+        super().set_body_reader()
+        if isinstance(self.body.reader, EOFReader):
+            self.body = Body(LengthReader(self.unreader, 0))
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/http/parser.py b/.venv/lib/python3.12/site-packages/gunicorn/http/parser.py
new file mode 100644
index 00000000..5d689f06
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/http/parser.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+from gunicorn.http.message import Request
+from gunicorn.http.unreader import SocketUnreader, IterUnreader
+
+
+class Parser(object):
+
+    mesg_class = None
+
+    def __init__(self, cfg, source, source_addr):
+        self.cfg = cfg
+        if hasattr(source, "recv"):
+            self.unreader = SocketUnreader(source)
+        else:
+            self.unreader = IterUnreader(source)
+        self.mesg = None
+        self.source_addr = source_addr
+
+        # request counter (for keepalive connetions)
+        self.req_count = 0
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        # Stop if HTTP dictates a stop.
+        if self.mesg and self.mesg.should_close():
+            raise StopIteration()
+
+        # Discard any unread body of the previous message
+        if self.mesg:
+            data = self.mesg.body.read(8192)
+            while data:
+                data = self.mesg.body.read(8192)
+
+        # Parse the next request
+        self.req_count += 1
+        self.mesg = self.mesg_class(self.cfg, self.unreader, self.source_addr, self.req_count)
+        if not self.mesg:
+            raise StopIteration()
+        return self.mesg
+
+    next = __next__
+
+
+class RequestParser(Parser):
+
+    mesg_class = Request
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/http/unreader.py b/.venv/lib/python3.12/site-packages/gunicorn/http/unreader.py
new file mode 100644
index 00000000..273bfc31
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/http/unreader.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import io
+import os
+
+# Classes that can undo reading data from
+# a given type of data source.
+
+
+class Unreader(object):
+    def __init__(self):
+        self.buf = io.BytesIO()
+
+    def chunk(self):
+        raise NotImplementedError()
+
+    def read(self, size=None):
+        if size is not None and not isinstance(size, int):
+            raise TypeError("size parameter must be an int or long.")
+
+        if size is not None:
+            if size == 0:
+                return b""
+            if size < 0:
+                size = None
+
+        self.buf.seek(0, os.SEEK_END)
+
+        if size is None and self.buf.tell():
+            ret = self.buf.getvalue()
+            self.buf = io.BytesIO()
+            return ret
+        if size is None:
+            d = self.chunk()
+            return d
+
+        while self.buf.tell() < size:
+            chunk = self.chunk()
+            if not chunk:
+                ret = self.buf.getvalue()
+                self.buf = io.BytesIO()
+                return ret
+            self.buf.write(chunk)
+        data = self.buf.getvalue()
+        self.buf = io.BytesIO()
+        self.buf.write(data[size:])
+        return data[:size]
+
+    def unread(self, data):
+        self.buf.seek(0, os.SEEK_END)
+        self.buf.write(data)
+
+
+class SocketUnreader(Unreader):
+    def __init__(self, sock, max_chunk=8192):
+        super().__init__()
+        self.sock = sock
+        self.mxchunk = max_chunk
+
+    def chunk(self):
+        return self.sock.recv(self.mxchunk)
+
+
+class IterUnreader(Unreader):
+    def __init__(self, iterable):
+        super().__init__()
+        self.iter = iter(iterable)
+
+    def chunk(self):
+        if not self.iter:
+            return b""
+        try:
+            return next(self.iter)
+        except StopIteration:
+            self.iter = None
+            return b""
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/http/wsgi.py b/.venv/lib/python3.12/site-packages/gunicorn/http/wsgi.py
new file mode 100644
index 00000000..25715eab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/http/wsgi.py
@@ -0,0 +1,393 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import io
+import logging
+import os
+import re
+import sys
+
+from gunicorn.http.message import HEADER_RE
+from gunicorn.http.errors import InvalidHeader, InvalidHeaderName
+from gunicorn import SERVER_SOFTWARE, SERVER
+from gunicorn import util
+
+# Send files in at most 1GB blocks as some operating systems can have problems
+# with sending files in blocks over 2GB.
+BLKSIZE = 0x3FFFFFFF
+
+HEADER_VALUE_RE = re.compile(r'[\x00-\x1F\x7F]')
+
+log = logging.getLogger(__name__)
+
+
+class FileWrapper(object):
+
+    def __init__(self, filelike, blksize=8192):
+        self.filelike = filelike
+        self.blksize = blksize
+        if hasattr(filelike, 'close'):
+            self.close = filelike.close
+
+    def __getitem__(self, key):
+        data = self.filelike.read(self.blksize)
+        if data:
+            return data
+        raise IndexError
+
+
+class WSGIErrorsWrapper(io.RawIOBase):
+
+    def __init__(self, cfg):
+        # There is no public __init__ method for RawIOBase so
+        # we don't need to call super() in the __init__ method.
+        # pylint: disable=super-init-not-called
+        errorlog = logging.getLogger("gunicorn.error")
+        handlers = errorlog.handlers
+        self.streams = []
+
+        if cfg.errorlog == "-":
+            self.streams.append(sys.stderr)
+            handlers = handlers[1:]
+
+        for h in handlers:
+            if hasattr(h, "stream"):
+                self.streams.append(h.stream)
+
+    def write(self, data):
+        for stream in self.streams:
+            try:
+                stream.write(data)
+            except UnicodeError:
+                stream.write(data.encode("UTF-8"))
+            stream.flush()
+
+
+def base_environ(cfg):
+    return {
+        "wsgi.errors": WSGIErrorsWrapper(cfg),
+        "wsgi.version": (1, 0),
+        "wsgi.multithread": False,
+        "wsgi.multiprocess": (cfg.workers > 1),
+        "wsgi.run_once": False,
+        "wsgi.file_wrapper": FileWrapper,
+        "wsgi.input_terminated": True,
+        "SERVER_SOFTWARE": SERVER_SOFTWARE,
+    }
+
+
+def default_environ(req, sock, cfg):
+    env = base_environ(cfg)
+    env.update({
+        "wsgi.input": req.body,
+        "gunicorn.socket": sock,
+        "REQUEST_METHOD": req.method,
+        "QUERY_STRING": req.query,
+        "RAW_URI": req.uri,
+        "SERVER_PROTOCOL": "HTTP/%s" % ".".join([str(v) for v in req.version])
+    })
+    return env
+
+
+def proxy_environ(req):
+    info = req.proxy_protocol_info
+
+    if not info:
+        return {}
+
+    return {
+        "PROXY_PROTOCOL": info["proxy_protocol"],
+        "REMOTE_ADDR": info["client_addr"],
+        "REMOTE_PORT": str(info["client_port"]),
+        "PROXY_ADDR": info["proxy_addr"],
+        "PROXY_PORT": str(info["proxy_port"]),
+    }
+
+
+def create(req, sock, client, server, cfg):
+    resp = Response(req, sock, cfg)
+
+    # set initial environ
+    environ = default_environ(req, sock, cfg)
+
+    # default variables
+    host = None
+    script_name = os.environ.get("SCRIPT_NAME", "")
+
+    # add the headers to the environ
+    for hdr_name, hdr_value in req.headers:
+        if hdr_name == "EXPECT":
+            # handle expect
+            if hdr_value.lower() == "100-continue":
+                sock.send(b"HTTP/1.1 100 Continue\r\n\r\n")
+        elif hdr_name == 'HOST':
+            host = hdr_value
+        elif hdr_name == "SCRIPT_NAME":
+            script_name = hdr_value
+        elif hdr_name == "CONTENT-TYPE":
+            environ['CONTENT_TYPE'] = hdr_value
+            continue
+        elif hdr_name == "CONTENT-LENGTH":
+            environ['CONTENT_LENGTH'] = hdr_value
+            continue
+
+        key = 'HTTP_' + hdr_name.replace('-', '_')
+        if key in environ:
+            hdr_value = "%s,%s" % (environ[key], hdr_value)
+        environ[key] = hdr_value
+
+    # set the url scheme
+    environ['wsgi.url_scheme'] = req.scheme
+
+    # set the REMOTE_* keys in environ
+    # authors should be aware that REMOTE_HOST and REMOTE_ADDR
+    # may not qualify the remote addr:
+    # http://www.ietf.org/rfc/rfc3875
+    if isinstance(client, str):
+        environ['REMOTE_ADDR'] = client
+    elif isinstance(client, bytes):
+        environ['REMOTE_ADDR'] = client.decode()
+    else:
+        environ['REMOTE_ADDR'] = client[0]
+        environ['REMOTE_PORT'] = str(client[1])
+
+    # handle the SERVER_*
+    # Normally only the application should use the Host header but since the
+    # WSGI spec doesn't support unix sockets, we are using it to create
+    # viable SERVER_* if possible.
+    if isinstance(server, str):
+        server = server.split(":")
+        if len(server) == 1:
+            # unix socket
+            if host:
+                server = host.split(':')
+                if len(server) == 1:
+                    if req.scheme == "http":
+                        server.append(80)
+                    elif req.scheme == "https":
+                        server.append(443)
+                    else:
+                        server.append('')
+            else:
+                # no host header given which means that we are not behind a
+                # proxy, so append an empty port.
+                server.append('')
+    environ['SERVER_NAME'] = server[0]
+    environ['SERVER_PORT'] = str(server[1])
+
+    # set the path and script name
+    path_info = req.path
+    if script_name:
+        path_info = path_info.split(script_name, 1)[1]
+    environ['PATH_INFO'] = util.unquote_to_wsgi_str(path_info)
+    environ['SCRIPT_NAME'] = script_name
+
+    # override the environ with the correct remote and server address if
+    # we are behind a proxy using the proxy protocol.
+    environ.update(proxy_environ(req))
+    return resp, environ
+
+
+class Response(object):
+
+    def __init__(self, req, sock, cfg):
+        self.req = req
+        self.sock = sock
+        self.version = SERVER
+        self.status = None
+        self.chunked = False
+        self.must_close = False
+        self.headers = []
+        self.headers_sent = False
+        self.response_length = None
+        self.sent = 0
+        self.upgrade = False
+        self.cfg = cfg
+
+    def force_close(self):
+        self.must_close = True
+
+    def should_close(self):
+        if self.must_close or self.req.should_close():
+            return True
+        if self.response_length is not None or self.chunked:
+            return False
+        if self.req.method == 'HEAD':
+            return False
+        if self.status_code < 200 or self.status_code in (204, 304):
+            return False
+        return True
+
+    def start_response(self, status, headers, exc_info=None):
+        if exc_info:
+            try:
+                if self.status and self.headers_sent:
+                    util.reraise(exc_info[0], exc_info[1], exc_info[2])
+            finally:
+                exc_info = None
+        elif self.status is not None:
+            raise AssertionError("Response headers already set!")
+
+        self.status = status
+
+        # get the status code from the response here so we can use it to check
+        # the need for the connection header later without parsing the string
+        # each time.
+        try:
+            self.status_code = int(self.status.split()[0])
+        except ValueError:
+            self.status_code = None
+
+        self.process_headers(headers)
+        self.chunked = self.is_chunked()
+        return self.write
+
+    def process_headers(self, headers):
+        for name, value in headers:
+            if not isinstance(name, str):
+                raise TypeError('%r is not a string' % name)
+
+            if HEADER_RE.search(name):
+                raise InvalidHeaderName('%r' % name)
+
+            if not isinstance(value, str):
+                raise TypeError('%r is not a string' % value)
+
+            if HEADER_VALUE_RE.search(value):
+                raise InvalidHeader('%r' % value)
+
+            value = value.strip()
+            lname = name.lower().strip()
+            if lname == "content-length":
+                self.response_length = int(value)
+            elif util.is_hoppish(name):
+                if lname == "connection":
+                    # handle websocket
+                    if value.lower().strip() == "upgrade":
+                        self.upgrade = True
+                elif lname == "upgrade":
+                    if value.lower().strip() == "websocket":
+                        self.headers.append((name.strip(), value))
+
+                # ignore hopbyhop headers
+                continue
+            self.headers.append((name.strip(), value))
+
+    def is_chunked(self):
+        # Only use chunked responses when the client is
+        # speaking HTTP/1.1 or newer and there was
+        # no Content-Length header set.
+        if self.response_length is not None:
+            return False
+        elif self.req.version <= (1, 0):
+            return False
+        elif self.req.method == 'HEAD':
+            # Responses to a HEAD request MUST NOT contain a response body.
+            return False
+        elif self.status_code in (204, 304):
+            # Do not use chunked responses when the response is guaranteed to
+            # not have a response body.
+            return False
+        return True
+
+    def default_headers(self):
+        # set the connection header
+        if self.upgrade:
+            connection = "upgrade"
+        elif self.should_close():
+            connection = "close"
+        else:
+            connection = "keep-alive"
+
+        headers = [
+            "HTTP/%s.%s %s\r\n" % (self.req.version[0],
+                                   self.req.version[1], self.status),
+            "Server: %s\r\n" % self.version,
+            "Date: %s\r\n" % util.http_date(),
+            "Connection: %s\r\n" % connection
+        ]
+        if self.chunked:
+            headers.append("Transfer-Encoding: chunked\r\n")
+        return headers
+
+    def send_headers(self):
+        if self.headers_sent:
+            return
+        tosend = self.default_headers()
+        tosend.extend(["%s: %s\r\n" % (k, v) for k, v in self.headers])
+
+        header_str = "%s\r\n" % "".join(tosend)
+        util.write(self.sock, util.to_bytestring(header_str, "latin-1"))
+        self.headers_sent = True
+
+    def write(self, arg):
+        self.send_headers()
+        if not isinstance(arg, bytes):
+            raise TypeError('%r is not a byte' % arg)
+        arglen = len(arg)
+        tosend = arglen
+        if self.response_length is not None:
+            if self.sent >= self.response_length:
+                # Never write more than self.response_length bytes
+                return
+
+            tosend = min(self.response_length - self.sent, tosend)
+            if tosend < arglen:
+                arg = arg[:tosend]
+
+        # Sending an empty chunk signals the end of the
+        # response and prematurely closes the response
+        if self.chunked and tosend == 0:
+            return
+
+        self.sent += tosend
+        util.write(self.sock, arg, self.chunked)
+
+    def can_sendfile(self):
+        return self.cfg.sendfile is not False
+
+    def sendfile(self, respiter):
+        if self.cfg.is_ssl or not self.can_sendfile():
+            return False
+
+        if not util.has_fileno(respiter.filelike):
+            return False
+
+        fileno = respiter.filelike.fileno()
+        try:
+            offset = os.lseek(fileno, 0, os.SEEK_CUR)
+            if self.response_length is None:
+                filesize = os.fstat(fileno).st_size
+                nbytes = filesize - offset
+            else:
+                nbytes = self.response_length
+        except (OSError, io.UnsupportedOperation):
+            return False
+
+        self.send_headers()
+
+        if self.is_chunked():
+            chunk_size = "%X\r\n" % nbytes
+            self.sock.sendall(chunk_size.encode('utf-8'))
+        if nbytes > 0:
+            self.sock.sendfile(respiter.filelike, offset=offset, count=nbytes)
+
+        if self.is_chunked():
+            self.sock.sendall(b"\r\n")
+
+        os.lseek(fileno, offset, os.SEEK_SET)
+
+        return True
+
+    def write_file(self, respiter):
+        if not self.sendfile(respiter):
+            for item in respiter:
+                self.write(item)
+
+    def close(self):
+        if not self.headers_sent:
+            self.send_headers()
+        if self.chunked:
+            util.write_chunk(self.sock, b"")
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/instrument/__init__.py b/.venv/lib/python3.12/site-packages/gunicorn/instrument/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/instrument/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/instrument/statsd.py b/.venv/lib/python3.12/site-packages/gunicorn/instrument/statsd.py
new file mode 100644
index 00000000..2c54b2e7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/instrument/statsd.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+"Bare-bones implementation of statsD's protocol, client-side"
+
+import logging
+import socket
+from re import sub
+
+from gunicorn.glogging import Logger
+
+# Instrumentation constants
+METRIC_VAR = "metric"
+VALUE_VAR = "value"
+MTYPE_VAR = "mtype"
+GAUGE_TYPE = "gauge"
+COUNTER_TYPE = "counter"
+HISTOGRAM_TYPE = "histogram"
+
+
+class Statsd(Logger):
+    """statsD-based instrumentation, that passes as a logger
+    """
+    def __init__(self, cfg):
+        Logger.__init__(self, cfg)
+        self.prefix = sub(r"^(.+[^.]+)\.*$", "\\g<1>.", cfg.statsd_prefix)
+
+        if isinstance(cfg.statsd_host, str):
+            address_family = socket.AF_UNIX
+        else:
+            address_family = socket.AF_INET
+
+        try:
+            self.sock = socket.socket(address_family, socket.SOCK_DGRAM)
+            self.sock.connect(cfg.statsd_host)
+        except Exception:
+            self.sock = None
+
+        self.dogstatsd_tags = cfg.dogstatsd_tags
+
+    # Log errors and warnings
+    def critical(self, msg, *args, **kwargs):
+        Logger.critical(self, msg, *args, **kwargs)
+        self.increment("gunicorn.log.critical", 1)
+
+    def error(self, msg, *args, **kwargs):
+        Logger.error(self, msg, *args, **kwargs)
+        self.increment("gunicorn.log.error", 1)
+
+    def warning(self, msg, *args, **kwargs):
+        Logger.warning(self, msg, *args, **kwargs)
+        self.increment("gunicorn.log.warning", 1)
+
+    def exception(self, msg, *args, **kwargs):
+        Logger.exception(self, msg, *args, **kwargs)
+        self.increment("gunicorn.log.exception", 1)
+
+    # Special treatment for info, the most common log level
+    def info(self, msg, *args, **kwargs):
+        self.log(logging.INFO, msg, *args, **kwargs)
+
+    # skip the run-of-the-mill logs
+    def debug(self, msg, *args, **kwargs):
+        self.log(logging.DEBUG, msg, *args, **kwargs)
+
+    def log(self, lvl, msg, *args, **kwargs):
+        """Log a given statistic if metric, value and type are present
+        """
+        try:
+            extra = kwargs.get("extra", None)
+            if extra is not None:
+                metric = extra.get(METRIC_VAR, None)
+                value = extra.get(VALUE_VAR, None)
+                typ = extra.get(MTYPE_VAR, None)
+                if metric and value and typ:
+                    if typ == GAUGE_TYPE:
+                        self.gauge(metric, value)
+                    elif typ == COUNTER_TYPE:
+                        self.increment(metric, value)
+                    elif typ == HISTOGRAM_TYPE:
+                        self.histogram(metric, value)
+                    else:
+                        pass
+
+            # Log to parent logger only if there is something to say
+            if msg:
+                Logger.log(self, lvl, msg, *args, **kwargs)
+        except Exception:
+            Logger.warning(self, "Failed to log to statsd", exc_info=True)
+
+    # access logging
+    def access(self, resp, req, environ, request_time):
+        """Measure request duration
+        request_time is a datetime.timedelta
+        """
+        Logger.access(self, resp, req, environ, request_time)
+        duration_in_ms = request_time.seconds * 1000 + float(request_time.microseconds) / 10 ** 3
+        status = resp.status
+        if isinstance(status, str):
+            status = int(status.split(None, 1)[0])
+        self.histogram("gunicorn.request.duration", duration_in_ms)
+        self.increment("gunicorn.requests", 1)
+        self.increment("gunicorn.request.status.%d" % status, 1)
+
+    # statsD methods
+    # you can use those directly if you want
+    def gauge(self, name, value):
+        self._sock_send("{0}{1}:{2}|g".format(self.prefix, name, value))
+
+    def increment(self, name, value, sampling_rate=1.0):
+        self._sock_send("{0}{1}:{2}|c|@{3}".format(self.prefix, name, value, sampling_rate))
+
+    def decrement(self, name, value, sampling_rate=1.0):
+        self._sock_send("{0}{1}:-{2}|c|@{3}".format(self.prefix, name, value, sampling_rate))
+
+    def histogram(self, name, value):
+        self._sock_send("{0}{1}:{2}|ms".format(self.prefix, name, value))
+
+    def _sock_send(self, msg):
+        try:
+            if isinstance(msg, str):
+                msg = msg.encode("ascii")
+
+            # http://docs.datadoghq.com/guides/dogstatsd/#datagram-format
+            if self.dogstatsd_tags:
+                msg = msg + b"|#" + self.dogstatsd_tags.encode('ascii')
+
+            if self.sock:
+                self.sock.send(msg)
+        except Exception:
+            Logger.warning(self, "Error sending message to statsd", exc_info=True)
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/pidfile.py b/.venv/lib/python3.12/site-packages/gunicorn/pidfile.py
new file mode 100644
index 00000000..585b02af
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/pidfile.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import errno
+import os
+import tempfile
+
+
+class Pidfile(object):
+    """\
+    Manage a PID file. If a specific name is provided
+    it and '"%s.oldpid" % name' will be used. Otherwise
+    we create a temp file using os.mkstemp.
+    """
+
+    def __init__(self, fname):
+        self.fname = fname
+        self.pid = None
+
+    def create(self, pid):
+        oldpid = self.validate()
+        if oldpid:
+            if oldpid == os.getpid():
+                return
+            msg = "Already running on PID %s (or pid file '%s' is stale)"
+            raise RuntimeError(msg % (oldpid, self.fname))
+
+        self.pid = pid
+
+        # Write pidfile
+        fdir = os.path.dirname(self.fname)
+        if fdir and not os.path.isdir(fdir):
+            raise RuntimeError("%s doesn't exist. Can't create pidfile." % fdir)
+        fd, fname = tempfile.mkstemp(dir=fdir)
+        os.write(fd, ("%s\n" % self.pid).encode('utf-8'))
+        if self.fname:
+            os.rename(fname, self.fname)
+        else:
+            self.fname = fname
+        os.close(fd)
+
+        # set permissions to -rw-r--r--
+        os.chmod(self.fname, 420)
+
+    def rename(self, path):
+        self.unlink()
+        self.fname = path
+        self.create(self.pid)
+
+    def unlink(self):
+        """ delete pidfile"""
+        try:
+            with open(self.fname, "r") as f:
+                pid1 = int(f.read() or 0)
+
+            if pid1 == self.pid:
+                os.unlink(self.fname)
+        except Exception:
+            pass
+
+    def validate(self):
+        """ Validate pidfile and make it stale if needed"""
+        if not self.fname:
+            return
+        try:
+            with open(self.fname, "r") as f:
+                try:
+                    wpid = int(f.read())
+                except ValueError:
+                    return
+
+                try:
+                    os.kill(wpid, 0)
+                    return wpid
+                except OSError as e:
+                    if e.args[0] == errno.EPERM:
+                        return wpid
+                    if e.args[0] == errno.ESRCH:
+                        return
+                    raise
+        except IOError as e:
+            if e.args[0] == errno.ENOENT:
+                return
+            raise
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/reloader.py b/.venv/lib/python3.12/site-packages/gunicorn/reloader.py
new file mode 100644
index 00000000..88b540bd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/reloader.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+# pylint: disable=no-else-continue
+
+import os
+import os.path
+import re
+import sys
+import time
+import threading
+
+COMPILED_EXT_RE = re.compile(r'py[co]$')
+
+
+class Reloader(threading.Thread):
+    def __init__(self, extra_files=None, interval=1, callback=None):
+        super().__init__()
+        self.daemon = True
+        self._extra_files = set(extra_files or ())
+        self._interval = interval
+        self._callback = callback
+
+    def add_extra_file(self, filename):
+        self._extra_files.add(filename)
+
+    def get_files(self):
+        fnames = [
+            COMPILED_EXT_RE.sub('py', module.__file__)
+            for module in tuple(sys.modules.values())
+            if getattr(module, '__file__', None)
+        ]
+
+        fnames.extend(self._extra_files)
+
+        return fnames
+
+    def run(self):
+        mtimes = {}
+        while True:
+            for filename in self.get_files():
+                try:
+                    mtime = os.stat(filename).st_mtime
+                except OSError:
+                    continue
+                old_time = mtimes.get(filename)
+                if old_time is None:
+                    mtimes[filename] = mtime
+                    continue
+                elif mtime > old_time:
+                    if self._callback:
+                        self._callback(filename)
+            time.sleep(self._interval)
+
+
+has_inotify = False
+if sys.platform.startswith('linux'):
+    try:
+        from inotify.adapters import Inotify
+        import inotify.constants
+        has_inotify = True
+    except ImportError:
+        pass
+
+
+if has_inotify:
+
+    class InotifyReloader(threading.Thread):
+        event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE
+                      | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY
+                      | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM
+                      | inotify.constants.IN_MOVED_TO)
+
+        def __init__(self, extra_files=None, callback=None):
+            super().__init__()
+            self.daemon = True
+            self._callback = callback
+            self._dirs = set()
+            self._watcher = Inotify()
+
+            for extra_file in extra_files:
+                self.add_extra_file(extra_file)
+
+        def add_extra_file(self, filename):
+            dirname = os.path.dirname(filename)
+
+            if dirname in self._dirs:
+                return
+
+            self._watcher.add_watch(dirname, mask=self.event_mask)
+            self._dirs.add(dirname)
+
+        def get_dirs(self):
+            fnames = [
+                os.path.dirname(os.path.abspath(COMPILED_EXT_RE.sub('py', module.__file__)))
+                for module in tuple(sys.modules.values())
+                if getattr(module, '__file__', None)
+            ]
+
+            return set(fnames)
+
+        def run(self):
+            self._dirs = self.get_dirs()
+
+            for dirname in self._dirs:
+                if os.path.isdir(dirname):
+                    self._watcher.add_watch(dirname, mask=self.event_mask)
+
+            for event in self._watcher.event_gen():
+                if event is None:
+                    continue
+
+                filename = event[3]
+
+                self._callback(filename)
+
+else:
+
+    class InotifyReloader(object):
+        def __init__(self, extra_files=None, callback=None):
+            raise ImportError('You must have the inotify module installed to '
+                              'use the inotify reloader')
+
+
+preferred_reloader = InotifyReloader if has_inotify else Reloader
+
+reloader_engines = {
+    'auto': preferred_reloader,
+    'poll': Reloader,
+    'inotify': InotifyReloader,
+}
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/sock.py b/.venv/lib/python3.12/site-packages/gunicorn/sock.py
new file mode 100644
index 00000000..7700146a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/sock.py
@@ -0,0 +1,232 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import errno
+import os
+import socket
+import ssl
+import stat
+import sys
+import time
+
+from gunicorn import util
+
+
+class BaseSocket(object):
+
+    def __init__(self, address, conf, log, fd=None):
+        self.log = log
+        self.conf = conf
+
+        self.cfg_addr = address
+        if fd is None:
+            sock = socket.socket(self.FAMILY, socket.SOCK_STREAM)
+            bound = False
+        else:
+            sock = socket.fromfd(fd, self.FAMILY, socket.SOCK_STREAM)
+            os.close(fd)
+            bound = True
+
+        self.sock = self.set_options(sock, bound=bound)
+
+    def __str__(self):
+        return "<socket %d>" % self.sock.fileno()
+
+    def __getattr__(self, name):
+        return getattr(self.sock, name)
+
+    def set_options(self, sock, bound=False):
+        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        if (self.conf.reuse_port
+                and hasattr(socket, 'SO_REUSEPORT')):  # pragma: no cover
+            try:
+                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+            except socket.error as err:
+                if err.errno not in (errno.ENOPROTOOPT, errno.EINVAL):
+                    raise
+        if not bound:
+            self.bind(sock)
+        sock.setblocking(0)
+
+        # make sure that the socket can be inherited
+        if hasattr(sock, "set_inheritable"):
+            sock.set_inheritable(True)
+
+        sock.listen(self.conf.backlog)
+        return sock
+
+    def bind(self, sock):
+        sock.bind(self.cfg_addr)
+
+    def close(self):
+        if self.sock is None:
+            return
+
+        try:
+            self.sock.close()
+        except socket.error as e:
+            self.log.info("Error while closing socket %s", str(e))
+
+        self.sock = None
+
+
+class TCPSocket(BaseSocket):
+
+    FAMILY = socket.AF_INET
+
+    def __str__(self):
+        if self.conf.is_ssl:
+            scheme = "https"
+        else:
+            scheme = "http"
+
+        addr = self.sock.getsockname()
+        return "%s://%s:%d" % (scheme, addr[0], addr[1])
+
+    def set_options(self, sock, bound=False):
+        sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+        return super().set_options(sock, bound=bound)
+
+
+class TCP6Socket(TCPSocket):
+
+    FAMILY = socket.AF_INET6
+
+    def __str__(self):
+        (host, port, _, _) = self.sock.getsockname()
+        return "http://[%s]:%d" % (host, port)
+
+
+class UnixSocket(BaseSocket):
+
+    FAMILY = socket.AF_UNIX
+
+    def __init__(self, addr, conf, log, fd=None):
+        if fd is None:
+            try:
+                st = os.stat(addr)
+            except OSError as e:
+                if e.args[0] != errno.ENOENT:
+                    raise
+            else:
+                if stat.S_ISSOCK(st.st_mode):
+                    os.remove(addr)
+                else:
+                    raise ValueError("%r is not a socket" % addr)
+        super().__init__(addr, conf, log, fd=fd)
+
+    def __str__(self):
+        return "unix:%s" % self.cfg_addr
+
+    def bind(self, sock):
+        old_umask = os.umask(self.conf.umask)
+        sock.bind(self.cfg_addr)
+        util.chown(self.cfg_addr, self.conf.uid, self.conf.gid)
+        os.umask(old_umask)
+
+
+def _sock_type(addr):
+    if isinstance(addr, tuple):
+        if util.is_ipv6(addr[0]):
+            sock_type = TCP6Socket
+        else:
+            sock_type = TCPSocket
+    elif isinstance(addr, (str, bytes)):
+        sock_type = UnixSocket
+    else:
+        raise TypeError("Unable to create socket from: %r" % addr)
+    return sock_type
+
+
+def create_sockets(conf, log, fds=None):
+    """
+    Create a new socket for the configured addresses or file descriptors.
+
+    If a configured address is a tuple then a TCP socket is created.
+    If it is a string, a Unix socket is created. Otherwise, a TypeError is
+    raised.
+    """
+    listeners = []
+
+    # get it only once
+    addr = conf.address
+    fdaddr = [bind for bind in addr if isinstance(bind, int)]
+    if fds:
+        fdaddr += list(fds)
+    laddr = [bind for bind in addr if not isinstance(bind, int)]
+
+    # check ssl config early to raise the error on startup
+    # only the certfile is needed since it can contains the keyfile
+    if conf.certfile and not os.path.exists(conf.certfile):
+        raise ValueError('certfile "%s" does not exist' % conf.certfile)
+
+    if conf.keyfile and not os.path.exists(conf.keyfile):
+        raise ValueError('keyfile "%s" does not exist' % conf.keyfile)
+
+    # sockets are already bound
+    if fdaddr:
+        for fd in fdaddr:
+            sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)
+            sock_name = sock.getsockname()
+            sock_type = _sock_type(sock_name)
+            listener = sock_type(sock_name, conf, log, fd=fd)
+            listeners.append(listener)
+
+        return listeners
+
+    # no sockets is bound, first initialization of gunicorn in this env.
+    for addr in laddr:
+        sock_type = _sock_type(addr)
+        sock = None
+        for i in range(5):
+            try:
+                sock = sock_type(addr, conf, log)
+            except socket.error as e:
+                if e.args[0] == errno.EADDRINUSE:
+                    log.error("Connection in use: %s", str(addr))
+                if e.args[0] == errno.EADDRNOTAVAIL:
+                    log.error("Invalid address: %s", str(addr))
+                if i < 5:
+                    msg = "connection to {addr} failed: {error}"
+                    log.debug(msg.format(addr=str(addr), error=str(e)))
+                    log.error("Retrying in 1 second.")
+                    time.sleep(1)
+            else:
+                break
+
+        if sock is None:
+            log.error("Can't connect to %s", str(addr))
+            sys.exit(1)
+
+        listeners.append(sock)
+
+    return listeners
+
+
+def close_sockets(listeners, unlink=True):
+    for sock in listeners:
+        sock_name = sock.getsockname()
+        sock.close()
+        if unlink and _sock_type(sock_name) is UnixSocket:
+            os.unlink(sock_name)
+
+
+def ssl_context(conf):
+    def default_ssl_context_factory():
+        context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=conf.ca_certs)
+        context.load_cert_chain(certfile=conf.certfile, keyfile=conf.keyfile)
+        context.verify_mode = conf.cert_reqs
+        if conf.ciphers:
+            context.set_ciphers(conf.ciphers)
+        return context
+
+    return conf.ssl_context(conf, default_ssl_context_factory)
+
+
+def ssl_wrap_socket(sock, conf):
+    return ssl_context(conf).wrap_socket(sock,
+                                         server_side=True,
+                                         suppress_ragged_eofs=conf.suppress_ragged_eofs,
+                                         do_handshake_on_connect=conf.do_handshake_on_connect)
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/systemd.py b/.venv/lib/python3.12/site-packages/gunicorn/systemd.py
new file mode 100644
index 00000000..5bc1a744
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/systemd.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import os
+import socket
+
+SD_LISTEN_FDS_START = 3
+
+
+def listen_fds(unset_environment=True):
+    """
+    Get the number of sockets inherited from systemd socket activation.
+
+    :param unset_environment: clear systemd environment variables unless False
+    :type unset_environment: bool
+    :return: the number of sockets to inherit from systemd socket activation
+    :rtype: int
+
+    Returns zero immediately if $LISTEN_PID is not set to the current pid.
+    Otherwise, returns the number of systemd activation sockets specified by
+    $LISTEN_FDS.
+
+    When $LISTEN_PID matches the current pid, unsets the environment variables
+    unless the ``unset_environment`` flag is ``False``.
+
+    .. note::
+        Unlike the sd_listen_fds C function, this implementation does not set
+        the FD_CLOEXEC flag because the gunicorn arbiter never needs to do this.
+
+    .. seealso::
+        `<https://www.freedesktop.org/software/systemd/man/sd_listen_fds.html>`_
+
+    """
+    fds = int(os.environ.get('LISTEN_FDS', 0))
+    listen_pid = int(os.environ.get('LISTEN_PID', 0))
+
+    if listen_pid != os.getpid():
+        return 0
+
+    if unset_environment:
+        os.environ.pop('LISTEN_PID', None)
+        os.environ.pop('LISTEN_FDS', None)
+
+    return fds
+
+
+def sd_notify(state, logger, unset_environment=False):
+    """Send a notification to systemd. state is a string; see
+    the man page of sd_notify (http://www.freedesktop.org/software/systemd/man/sd_notify.html)
+    for a description of the allowable values.
+
+    If the unset_environment parameter is True, sd_notify() will unset
+    the $NOTIFY_SOCKET environment variable before returning (regardless of
+    whether the function call itself succeeded or not). Further calls to
+    sd_notify() will then fail, but the variable is no longer inherited by
+    child processes.
+    """
+
+    addr = os.environ.get('NOTIFY_SOCKET')
+    if addr is None:
+        # not run in a service, just a noop
+        return
+    try:
+        sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC)
+        if addr[0] == '@':
+            addr = '\0' + addr[1:]
+        sock.connect(addr)
+        sock.sendall(state.encode('utf-8'))
+    except Exception:
+        logger.debug("Exception while invoking sd_notify()", exc_info=True)
+    finally:
+        if unset_environment:
+            os.environ.pop('NOTIFY_SOCKET')
+        sock.close()
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/util.py b/.venv/lib/python3.12/site-packages/gunicorn/util.py
new file mode 100644
index 00000000..751deea7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/util.py
@@ -0,0 +1,654 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+import ast
+import email.utils
+import errno
+import fcntl
+import html
+import importlib
+import inspect
+import io
+import logging
+import os
+import pwd
+import random
+import re
+import socket
+import sys
+import textwrap
+import time
+import traceback
+import warnings
+
+try:
+    import importlib.metadata as importlib_metadata
+except (ModuleNotFoundError, ImportError):
+    import importlib_metadata
+
+from gunicorn.errors import AppImportError
+from gunicorn.workers import SUPPORTED_WORKERS
+import urllib.parse
+
+REDIRECT_TO = getattr(os, 'devnull', '/dev/null')
+
+# Server and Date aren't technically hop-by-hop
+# headers, but they are in the purview of the
+# origin server which the WSGI spec says we should
+# act like. So we drop them and add our own.
+#
+# In the future, concatenation server header values
+# might be better, but nothing else does it and
+# dropping them is easier.
+hop_headers = set("""
+    connection keep-alive proxy-authenticate proxy-authorization
+    te trailers transfer-encoding upgrade
+    server date
+    """.split())
+
+try:
+    from setproctitle import setproctitle
+
+    def _setproctitle(title):
+        setproctitle("gunicorn: %s" % title)
+except ImportError:
+    def _setproctitle(title):
+        pass
+
+
+def load_entry_point(distribution, group, name):
+    dist_obj = importlib_metadata.distribution(distribution)
+    eps = [ep for ep in dist_obj.entry_points
+           if ep.group == group and ep.name == name]
+    if not eps:
+        raise ImportError("Entry point %r not found" % ((group, name),))
+    return eps[0].load()
+
+
+def load_class(uri, default="gunicorn.workers.sync.SyncWorker",
+               section="gunicorn.workers"):
+    if inspect.isclass(uri):
+        return uri
+    if uri.startswith("egg:"):
+        # uses entry points
+        entry_str = uri.split("egg:")[1]
+        try:
+            dist, name = entry_str.rsplit("#", 1)
+        except ValueError:
+            dist = entry_str
+            name = default
+
+        try:
+            return load_entry_point(dist, section, name)
+        except Exception:
+            exc = traceback.format_exc()
+            msg = "class uri %r invalid or not found: \n\n[%s]"
+            raise RuntimeError(msg % (uri, exc))
+    else:
+        components = uri.split('.')
+        if len(components) == 1:
+            while True:
+                if uri.startswith("#"):
+                    uri = uri[1:]
+
+                if uri in SUPPORTED_WORKERS:
+                    components = SUPPORTED_WORKERS[uri].split(".")
+                    break
+
+                try:
+                    return load_entry_point(
+                        "gunicorn", section, uri
+                    )
+                except Exception:
+                    exc = traceback.format_exc()
+                    msg = "class uri %r invalid or not found: \n\n[%s]"
+                    raise RuntimeError(msg % (uri, exc))
+
+        klass = components.pop(-1)
+
+        try:
+            mod = importlib.import_module('.'.join(components))
+        except Exception:
+            exc = traceback.format_exc()
+            msg = "class uri %r invalid or not found: \n\n[%s]"
+            raise RuntimeError(msg % (uri, exc))
+        return getattr(mod, klass)
+
+
+positionals = (
+    inspect.Parameter.POSITIONAL_ONLY,
+    inspect.Parameter.POSITIONAL_OR_KEYWORD,
+)
+
+
+def get_arity(f):
+    sig = inspect.signature(f)
+    arity = 0
+
+    for param in sig.parameters.values():
+        if param.kind in positionals:
+            arity += 1
+
+    return arity
+
+
+def get_username(uid):
+    """ get the username for a user id"""
+    return pwd.getpwuid(uid).pw_name
+
+
+def set_owner_process(uid, gid, initgroups=False):
+    """ set user and group of workers processes """
+
+    if gid:
+        if uid:
+            try:
+                username = get_username(uid)
+            except KeyError:
+                initgroups = False
+
+        # versions of python < 2.6.2 don't manage unsigned int for
+        # groups like on osx or fedora
+        gid = abs(gid) & 0x7FFFFFFF
+
+        if initgroups:
+            os.initgroups(username, gid)
+        elif gid != os.getgid():
+            os.setgid(gid)
+
+    if uid and uid != os.getuid():
+        os.setuid(uid)
+
+
+def chown(path, uid, gid):
+    os.chown(path, uid, gid)
+
+
+if sys.platform.startswith("win"):
+    def _waitfor(func, pathname, waitall=False):
+        # Perform the operation
+        func(pathname)
+        # Now setup the wait loop
+        if waitall:
+            dirname = pathname
+        else:
+            dirname, name = os.path.split(pathname)
+            dirname = dirname or '.'
+        # Check for `pathname` to be removed from the filesystem.
+        # The exponential backoff of the timeout amounts to a total
+        # of ~1 second after which the deletion is probably an error
+        # anyway.
+        # Testing on a i7@4.3GHz shows that usually only 1 iteration is
+        # required when contention occurs.
+        timeout = 0.001
+        while timeout < 1.0:
+            # Note we are only testing for the existence of the file(s) in
+            # the contents of the directory regardless of any security or
+            # access rights.  If we have made it this far, we have sufficient
+            # permissions to do that much using Python's equivalent of the
+            # Windows API FindFirstFile.
+            # Other Windows APIs can fail or give incorrect results when
+            # dealing with files that are pending deletion.
+            L = os.listdir(dirname)
+            if not L if waitall else name in L:
+                return
+            # Increase the timeout and try again
+            time.sleep(timeout)
+            timeout *= 2
+        warnings.warn('tests may fail, delete still pending for ' + pathname,
+                      RuntimeWarning, stacklevel=4)
+
+    def _unlink(filename):
+        _waitfor(os.unlink, filename)
+else:
+    _unlink = os.unlink
+
+
+def unlink(filename):
+    try:
+        _unlink(filename)
+    except OSError as error:
+        # The filename need not exist.
+        if error.errno not in (errno.ENOENT, errno.ENOTDIR):
+            raise
+
+
+def is_ipv6(addr):
+    try:
+        socket.inet_pton(socket.AF_INET6, addr)
+    except socket.error:  # not a valid address
+        return False
+    except ValueError:  # ipv6 not supported on this platform
+        return False
+    return True
+
+
+def parse_address(netloc, default_port='8000'):
+    if re.match(r'unix:(//)?', netloc):
+        return re.split(r'unix:(//)?', netloc)[-1]
+
+    if netloc.startswith("fd://"):
+        fd = netloc[5:]
+        try:
+            return int(fd)
+        except ValueError:
+            raise RuntimeError("%r is not a valid file descriptor." % fd) from None
+
+    if netloc.startswith("tcp://"):
+        netloc = netloc.split("tcp://")[1]
+    host, port = netloc, default_port
+
+    if '[' in netloc and ']' in netloc:
+        host = netloc.split(']')[0][1:]
+        port = (netloc.split(']:') + [default_port])[1]
+    elif ':' in netloc:
+        host, port = (netloc.split(':') + [default_port])[:2]
+    elif netloc == "":
+        host, port = "0.0.0.0", default_port
+
+    try:
+        port = int(port)
+    except ValueError:
+        raise RuntimeError("%r is not a valid port number." % port)
+
+    return host.lower(), port
+
+
+def close_on_exec(fd):
+    flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+    flags |= fcntl.FD_CLOEXEC
+    fcntl.fcntl(fd, fcntl.F_SETFD, flags)
+
+
+def set_non_blocking(fd):
+    flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
+    fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+
+def close(sock):
+    try:
+        sock.close()
+    except socket.error:
+        pass
+
+
+try:
+    from os import closerange
+except ImportError:
+    def closerange(fd_low, fd_high):
+        # Iterate through and close all file descriptors.
+        for fd in range(fd_low, fd_high):
+            try:
+                os.close(fd)
+            except OSError:  # ERROR, fd wasn't open to begin with (ignored)
+                pass
+
+
+def write_chunk(sock, data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    chunk_size = "%X\r\n" % len(data)
+    chunk = b"".join([chunk_size.encode('utf-8'), data, b"\r\n"])
+    sock.sendall(chunk)
+
+
+def write(sock, data, chunked=False):
+    if chunked:
+        return write_chunk(sock, data)
+    sock.sendall(data)
+
+
+def write_nonblock(sock, data, chunked=False):
+    timeout = sock.gettimeout()
+    if timeout != 0.0:
+        try:
+            sock.setblocking(0)
+            return write(sock, data, chunked)
+        finally:
+            sock.setblocking(1)
+    else:
+        return write(sock, data, chunked)
+
+
+def write_error(sock, status_int, reason, mesg):
+    html_error = textwrap.dedent("""\
+    <html>
+      <head>
+        <title>%(reason)s</title>
+      </head>
+      <body>
+        <h1><p>%(reason)s</p></h1>
+        %(mesg)s
+      </body>
+    </html>
+    """) % {"reason": reason, "mesg": html.escape(mesg)}
+
+    http = textwrap.dedent("""\
+    HTTP/1.1 %s %s\r
+    Connection: close\r
+    Content-Type: text/html\r
+    Content-Length: %d\r
+    \r
+    %s""") % (str(status_int), reason, len(html_error), html_error)
+    write_nonblock(sock, http.encode('latin1'))
+
+
+def _called_with_wrong_args(f):
+    """Check whether calling a function raised a ``TypeError`` because
+    the call failed or because something in the function raised the
+    error.
+
+    :param f: The function that was called.
+    :return: ``True`` if the call failed.
+    """
+    tb = sys.exc_info()[2]
+
+    try:
+        while tb is not None:
+            if tb.tb_frame.f_code is f.__code__:
+                # In the function, it was called successfully.
+                return False
+
+            tb = tb.tb_next
+
+        # Didn't reach the function.
+        return True
+    finally:
+        # Delete tb to break a circular reference in Python 2.
+        # https://docs.python.org/2/library/sys.html#sys.exc_info
+        del tb
+
+
+def import_app(module):
+    parts = module.split(":", 1)
+    if len(parts) == 1:
+        obj = "application"
+    else:
+        module, obj = parts[0], parts[1]
+
+    try:
+        mod = importlib.import_module(module)
+    except ImportError:
+        if module.endswith(".py") and os.path.exists(module):
+            msg = "Failed to find application, did you mean '%s:%s'?"
+            raise ImportError(msg % (module.rsplit(".", 1)[0], obj))
+        raise
+
+    # Parse obj as a single expression to determine if it's a valid
+    # attribute name or function call.
+    try:
+        expression = ast.parse(obj, mode="eval").body
+    except SyntaxError:
+        raise AppImportError(
+            "Failed to parse %r as an attribute name or function call." % obj
+        )
+
+    if isinstance(expression, ast.Name):
+        name = expression.id
+        args = kwargs = None
+    elif isinstance(expression, ast.Call):
+        # Ensure the function name is an attribute name only.
+        if not isinstance(expression.func, ast.Name):
+            raise AppImportError("Function reference must be a simple name: %r" % obj)
+
+        name = expression.func.id
+
+        # Parse the positional and keyword arguments as literals.
+        try:
+            args = [ast.literal_eval(arg) for arg in expression.args]
+            kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expression.keywords}
+        except ValueError:
+            # literal_eval gives cryptic error messages, show a generic
+            # message with the full expression instead.
+            raise AppImportError(
+                "Failed to parse arguments as literal values: %r" % obj
+            )
+    else:
+        raise AppImportError(
+            "Failed to parse %r as an attribute name or function call." % obj
+        )
+
+    is_debug = logging.root.level == logging.DEBUG
+    try:
+        app = getattr(mod, name)
+    except AttributeError:
+        if is_debug:
+            traceback.print_exception(*sys.exc_info())
+        raise AppImportError("Failed to find attribute %r in %r." % (name, module))
+
+    # If the expression was a function call, call the retrieved object
+    # to get the real application.
+    if args is not None:
+        try:
+            app = app(*args, **kwargs)
+        except TypeError as e:
+            # If the TypeError was due to bad arguments to the factory
+            # function, show Python's nice error message without a
+            # traceback.
+            if _called_with_wrong_args(app):
+                raise AppImportError(
+                    "".join(traceback.format_exception_only(TypeError, e)).strip()
+                )
+
+            # Otherwise it was raised from within the function, show the
+            # full traceback.
+            raise
+
+    if app is None:
+        raise AppImportError("Failed to find application object: %r" % obj)
+
+    if not callable(app):
+        raise AppImportError("Application object must be callable.")
+    return app
+
+
+def getcwd():
+    # get current path, try to use PWD env first
+    try:
+        a = os.stat(os.environ['PWD'])
+        b = os.stat(os.getcwd())
+        if a.st_ino == b.st_ino and a.st_dev == b.st_dev:
+            cwd = os.environ['PWD']
+        else:
+            cwd = os.getcwd()
+    except Exception:
+        cwd = os.getcwd()
+    return cwd
+
+
+def http_date(timestamp=None):
+    """Return the current date and time formatted for a message header."""
+    if timestamp is None:
+        timestamp = time.time()
+    s = email.utils.formatdate(timestamp, localtime=False, usegmt=True)
+    return s
+
+
+def is_hoppish(header):
+    return header.lower().strip() in hop_headers
+
+
+def daemonize(enable_stdio_inheritance=False):
+    """\
+    Standard daemonization of a process.
+    http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7
+    """
+    if 'GUNICORN_FD' not in os.environ:
+        if os.fork():
+            os._exit(0)
+        os.setsid()
+
+        if os.fork():
+            os._exit(0)
+
+        os.umask(0o22)
+
+        # In both the following any file descriptors above stdin
+        # stdout and stderr are left untouched. The inheritance
+        # option simply allows one to have output go to a file
+        # specified by way of shell redirection when not wanting
+        # to use --error-log option.
+
+        if not enable_stdio_inheritance:
+            # Remap all of stdin, stdout and stderr on to
+            # /dev/null. The expectation is that users have
+            # specified the --error-log option.
+
+            closerange(0, 3)
+
+            fd_null = os.open(REDIRECT_TO, os.O_RDWR)
+            # PEP 446, make fd for /dev/null inheritable
+            os.set_inheritable(fd_null, True)
+
+            # expect fd_null to be always 0 here, but in-case not ...
+            if fd_null != 0:
+                os.dup2(fd_null, 0)
+
+            os.dup2(fd_null, 1)
+            os.dup2(fd_null, 2)
+
+        else:
+            fd_null = os.open(REDIRECT_TO, os.O_RDWR)
+
+            # Always redirect stdin to /dev/null as we would
+            # never expect to need to read interactive input.
+
+            if fd_null != 0:
+                os.close(0)
+                os.dup2(fd_null, 0)
+
+            # If stdout and stderr are still connected to
+            # their original file descriptors we check to see
+            # if they are associated with terminal devices.
+            # When they are we map them to /dev/null so that
+            # are still detached from any controlling terminal
+            # properly. If not we preserve them as they are.
+            #
+            # If stdin and stdout were not hooked up to the
+            # original file descriptors, then all bets are
+            # off and all we can really do is leave them as
+            # they were.
+            #
+            # This will allow 'gunicorn ... > output.log 2>&1'
+            # to work with stdout/stderr going to the file
+            # as expected.
+            #
+            # Note that if using --error-log option, the log
+            # file specified through shell redirection will
+            # only be used up until the log file specified
+            # by the option takes over. As it replaces stdout
+            # and stderr at the file descriptor level, then
+            # anything using stdout or stderr, including having
+            # cached a reference to them, will still work.
+
+            def redirect(stream, fd_expect):
+                try:
+                    fd = stream.fileno()
+                    if fd == fd_expect and stream.isatty():
+                        os.close(fd)
+                        os.dup2(fd_null, fd)
+                except AttributeError:
+                    pass
+
+            redirect(sys.stdout, 1)
+            redirect(sys.stderr, 2)
+
+
+def seed():
+    try:
+        random.seed(os.urandom(64))
+    except NotImplementedError:
+        random.seed('%s.%s' % (time.time(), os.getpid()))
+
+
+def check_is_writable(path):
+    try:
+        with open(path, 'a') as f:
+            f.close()
+    except IOError as e:
+        raise RuntimeError("Error: '%s' isn't writable [%r]" % (path, e))
+
+
+def to_bytestring(value, encoding="utf8"):
+    """Converts a string argument to a byte string"""
+    if isinstance(value, bytes):
+        return value
+    if not isinstance(value, str):
+        raise TypeError('%r is not a string' % value)
+
+    return value.encode(encoding)
+
+
+def has_fileno(obj):
+    if not hasattr(obj, "fileno"):
+        return False
+
+    # check BytesIO case and maybe others
+    try:
+        obj.fileno()
+    except (AttributeError, IOError, io.UnsupportedOperation):
+        return False
+
+    return True
+
+
+def warn(msg):
+    print("!!!", file=sys.stderr)
+
+    lines = msg.splitlines()
+    for i, line in enumerate(lines):
+        if i == 0:
+            line = "WARNING: %s" % line
+        print("!!! %s" % line, file=sys.stderr)
+
+    print("!!!\n", file=sys.stderr)
+    sys.stderr.flush()
+
+
+def make_fail_app(msg):
+    msg = to_bytestring(msg)
+
+    def app(environ, start_response):
+        start_response("500 Internal Server Error", [
+            ("Content-Type", "text/plain"),
+            ("Content-Length", str(len(msg)))
+        ])
+        return [msg]
+
+    return app
+
+
+def split_request_uri(uri):
+    if uri.startswith("//"):
+        # When the path starts with //, urlsplit considers it as a
+        # relative uri while the RFC says we should consider it as abs_path
+        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
+        # We use temporary dot prefix to workaround this behaviour
+        parts = urllib.parse.urlsplit("." + uri)
+        return parts._replace(path=parts.path[1:])
+
+    return urllib.parse.urlsplit(uri)
+
+
+# From six.reraise
+def reraise(tp, value, tb=None):
+    try:
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+    finally:
+        value = None
+        tb = None
+
+
+def bytes_to_str(b):
+    if isinstance(b, str):
+        return b
+    return str(b, 'latin1')
+
+
+def unquote_to_wsgi_str(string):
+    return urllib.parse.unquote_to_bytes(string).decode('latin-1')
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/workers/__init__.py b/.venv/lib/python3.12/site-packages/gunicorn/workers/__init__.py
new file mode 100644
index 00000000..ae753e1c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/workers/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+# supported gunicorn workers.
+SUPPORTED_WORKERS = {
+    "sync": "gunicorn.workers.sync.SyncWorker",
+    "eventlet": "gunicorn.workers.geventlet.EventletWorker",
+    "gevent": "gunicorn.workers.ggevent.GeventWorker",
+    "gevent_wsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker",
+    "gevent_pywsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker",
+    "tornado": "gunicorn.workers.gtornado.TornadoWorker",
+    "gthread": "gunicorn.workers.gthread.ThreadWorker",
+}
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/workers/base.py b/.venv/lib/python3.12/site-packages/gunicorn/workers/base.py
new file mode 100644
index 00000000..f321dd2d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/workers/base.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import io
+import os
+import signal
+import sys
+import time
+import traceback
+from datetime import datetime
+from random import randint
+from ssl import SSLError
+
+from gunicorn import util
+from gunicorn.http.errors import (
+    ForbiddenProxyRequest, InvalidHeader,
+    InvalidHeaderName, InvalidHTTPVersion,
+    InvalidProxyLine, InvalidRequestLine,
+    InvalidRequestMethod, InvalidSchemeHeaders,
+    LimitRequestHeaders, LimitRequestLine,
+)
+from gunicorn.http.wsgi import Response, default_environ
+from gunicorn.reloader import reloader_engines
+from gunicorn.workers.workertmp import WorkerTmp
+
+
+class Worker(object):
+
+    SIGNALS = [getattr(signal, "SIG%s" % x) for x in (
+        "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()
+    )]
+
+    PIPE = []
+
+    def __init__(self, age, ppid, sockets, app, timeout, cfg, log):
+        """\
+        This is called pre-fork so it shouldn't do anything to the
+        current process. If there's a need to make process wide
+        changes you'll want to do that in ``self.init_process()``.
+        """
+        self.age = age
+        self.pid = "[booting]"
+        self.ppid = ppid
+        self.sockets = sockets
+        self.app = app
+        self.timeout = timeout
+        self.cfg = cfg
+        self.booted = False
+        self.aborted = False
+        self.reloader = None
+
+        self.nr = 0
+
+        if cfg.max_requests > 0:
+            jitter = randint(0, cfg.max_requests_jitter)
+            self.max_requests = cfg.max_requests + jitter
+        else:
+            self.max_requests = sys.maxsize
+
+        self.alive = True
+        self.log = log
+        self.tmp = WorkerTmp(cfg)
+
+    def __str__(self):
+        return "<Worker %s>" % self.pid
+
+    def notify(self):
+        """\
+        Your worker subclass must arrange to have this method called
+        once every ``self.timeout`` seconds. If you fail in accomplishing
+        this task, the master process will murder your workers.
+        """
+        self.tmp.notify()
+
+    def run(self):
+        """\
+        This is the mainloop of a worker process. You should override
+        this method in a subclass to provide the intended behaviour
+        for your particular evil schemes.
+        """
+        raise NotImplementedError()
+
+    def init_process(self):
+        """\
+        If you override this method in a subclass, the last statement
+        in the function should be to call this method with
+        super().init_process() so that the ``run()`` loop is initiated.
+        """
+
+        # set environment' variables
+        if self.cfg.env:
+            for k, v in self.cfg.env.items():
+                os.environ[k] = v
+
+        util.set_owner_process(self.cfg.uid, self.cfg.gid,
+                               initgroups=self.cfg.initgroups)
+
+        # Reseed the random number generator
+        util.seed()
+
+        # For waking ourselves up
+        self.PIPE = os.pipe()
+        for p in self.PIPE:
+            util.set_non_blocking(p)
+            util.close_on_exec(p)
+
+        # Prevent fd inheritance
+        for s in self.sockets:
+            util.close_on_exec(s)
+        util.close_on_exec(self.tmp.fileno())
+
+        self.wait_fds = self.sockets + [self.PIPE[0]]
+
+        self.log.close_on_exec()
+
+        self.init_signals()
+
+        # start the reloader
+        if self.cfg.reload:
+            def changed(fname):
+                self.log.info("Worker reloading: %s modified", fname)
+                self.alive = False
+                os.write(self.PIPE[1], b"1")
+                self.cfg.worker_int(self)
+                time.sleep(0.1)
+                sys.exit(0)
+
+            reloader_cls = reloader_engines[self.cfg.reload_engine]
+            self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files,
+                                         callback=changed)
+
+        self.load_wsgi()
+        if self.reloader:
+            self.reloader.start()
+
+        self.cfg.post_worker_init(self)
+
+        # Enter main run loop
+        self.booted = True
+        self.run()
+
+    def load_wsgi(self):
+        try:
+            self.wsgi = self.app.wsgi()
+        except SyntaxError as e:
+            if not self.cfg.reload:
+                raise
+
+            self.log.exception(e)
+
+            # fix from PR #1228
+            # storing the traceback into exc_tb will create a circular reference.
+            # per https://docs.python.org/2/library/sys.html#sys.exc_info warning,
+            # delete the traceback after use.
+            try:
+                _, exc_val, exc_tb = sys.exc_info()
+                self.reloader.add_extra_file(exc_val.filename)
+
+                tb_string = io.StringIO()
+                traceback.print_tb(exc_tb, file=tb_string)
+                self.wsgi = util.make_fail_app(tb_string.getvalue())
+            finally:
+                del exc_tb
+
+    def init_signals(self):
+        # reset signaling
+        for s in self.SIGNALS:
+            signal.signal(s, signal.SIG_DFL)
+        # init new signaling
+        signal.signal(signal.SIGQUIT, self.handle_quit)
+        signal.signal(signal.SIGTERM, self.handle_exit)
+        signal.signal(signal.SIGINT, self.handle_quit)
+        signal.signal(signal.SIGWINCH, self.handle_winch)
+        signal.signal(signal.SIGUSR1, self.handle_usr1)
+        signal.signal(signal.SIGABRT, self.handle_abort)
+
+        # Don't let SIGTERM and SIGUSR1 disturb active requests
+        # by interrupting system calls
+        signal.siginterrupt(signal.SIGTERM, False)
+        signal.siginterrupt(signal.SIGUSR1, False)
+
+        if hasattr(signal, 'set_wakeup_fd'):
+            signal.set_wakeup_fd(self.PIPE[1])
+
+    def handle_usr1(self, sig, frame):
+        self.log.reopen_files()
+
+    def handle_exit(self, sig, frame):
+        self.alive = False
+
+    def handle_quit(self, sig, frame):
+        self.alive = False
+        # worker_int callback
+        self.cfg.worker_int(self)
+        time.sleep(0.1)
+        sys.exit(0)
+
+    def handle_abort(self, sig, frame):
+        self.alive = False
+        self.cfg.worker_abort(self)
+        sys.exit(1)
+
+    def handle_error(self, req, client, addr, exc):
+        request_start = datetime.now()
+        addr = addr or ('', -1)  # unix socket case
+        if isinstance(exc, (
+            InvalidRequestLine, InvalidRequestMethod,
+            InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,
+            LimitRequestLine, LimitRequestHeaders,
+            InvalidProxyLine, ForbiddenProxyRequest,
+            InvalidSchemeHeaders,
+            SSLError,
+        )):
+
+            status_int = 400
+            reason = "Bad Request"
+
+            if isinstance(exc, InvalidRequestLine):
+                mesg = "Invalid Request Line '%s'" % str(exc)
+            elif isinstance(exc, InvalidRequestMethod):
+                mesg = "Invalid Method '%s'" % str(exc)
+            elif isinstance(exc, InvalidHTTPVersion):
+                mesg = "Invalid HTTP Version '%s'" % str(exc)
+            elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):
+                mesg = "%s" % str(exc)
+                if not req and hasattr(exc, "req"):
+                    req = exc.req  # for access log
+            elif isinstance(exc, LimitRequestLine):
+                mesg = "%s" % str(exc)
+            elif isinstance(exc, LimitRequestHeaders):
+                reason = "Request Header Fields Too Large"
+                mesg = "Error parsing headers: '%s'" % str(exc)
+                status_int = 431
+            elif isinstance(exc, InvalidProxyLine):
+                mesg = "'%s'" % str(exc)
+            elif isinstance(exc, ForbiddenProxyRequest):
+                reason = "Forbidden"
+                mesg = "Request forbidden"
+                status_int = 403
+            elif isinstance(exc, InvalidSchemeHeaders):
+                mesg = "%s" % str(exc)
+            elif isinstance(exc, SSLError):
+                reason = "Forbidden"
+                mesg = "'%s'" % str(exc)
+                status_int = 403
+
+            msg = "Invalid request from ip={ip}: {error}"
+            self.log.warning(msg.format(ip=addr[0], error=str(exc)))
+        else:
+            if hasattr(req, "uri"):
+                self.log.exception("Error handling request %s", req.uri)
+            status_int = 500
+            reason = "Internal Server Error"
+            mesg = ""
+
+        if req is not None:
+            request_time = datetime.now() - request_start
+            environ = default_environ(req, client, self.cfg)
+            environ['REMOTE_ADDR'] = addr[0]
+            environ['REMOTE_PORT'] = str(addr[1])
+            resp = Response(req, client, self.cfg)
+            resp.status = "%s %s" % (status_int, reason)
+            resp.response_length = len(mesg)
+            self.log.access(resp, req, environ, request_time)
+
+        try:
+            util.write_error(client, status_int, reason, mesg)
+        except Exception:
+            self.log.debug("Failed to send error message.")
+
+    def handle_winch(self, sig, fname):
+        # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
+        self.log.debug("worker: SIGWINCH ignored.")
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/workers/base_async.py b/.venv/lib/python3.12/site-packages/gunicorn/workers/base_async.py
new file mode 100644
index 00000000..b059a7cb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/workers/base_async.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+from datetime import datetime
+import errno
+import socket
+import ssl
+import sys
+
+from gunicorn import http
+from gunicorn.http import wsgi
+from gunicorn import util
+from gunicorn.workers import base
+
+ALREADY_HANDLED = object()
+
+
+class AsyncWorker(base.Worker):
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.worker_connections = self.cfg.worker_connections
+
+    def timeout_ctx(self):
+        raise NotImplementedError()
+
+    def is_already_handled(self, respiter):
+        # some workers will need to overload this function to raise a StopIteration
+        return respiter == ALREADY_HANDLED
+
+    def handle(self, listener, client, addr):
+        req = None
+        try:
+            parser = http.RequestParser(self.cfg, client, addr)
+            try:
+                listener_name = listener.getsockname()
+                if not self.cfg.keepalive:
+                    req = next(parser)
+                    self.handle_request(listener_name, req, client, addr)
+                else:
+                    # keepalive loop
+                    proxy_protocol_info = {}
+                    while True:
+                        req = None
+                        with self.timeout_ctx():
+                            req = next(parser)
+                        if not req:
+                            break
+                        if req.proxy_protocol_info:
+                            proxy_protocol_info = req.proxy_protocol_info
+                        else:
+                            req.proxy_protocol_info = proxy_protocol_info
+                        self.handle_request(listener_name, req, client, addr)
+            except http.errors.NoMoreData as e:
+                self.log.debug("Ignored premature client disconnection. %s", e)
+            except StopIteration as e:
+                self.log.debug("Closing connection. %s", e)
+            except ssl.SSLError:
+                # pass to next try-except level
+                util.reraise(*sys.exc_info())
+            except EnvironmentError:
+                # pass to next try-except level
+                util.reraise(*sys.exc_info())
+            except Exception as e:
+                self.handle_error(req, client, addr, e)
+        except ssl.SSLError as e:
+            if e.args[0] == ssl.SSL_ERROR_EOF:
+                self.log.debug("ssl connection closed")
+                client.close()
+            else:
+                self.log.debug("Error processing SSL request.")
+                self.handle_error(req, client, addr, e)
+        except EnvironmentError as e:
+            if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):
+                self.log.exception("Socket error processing request.")
+            else:
+                if e.errno == errno.ECONNRESET:
+                    self.log.debug("Ignoring connection reset")
+                elif e.errno == errno.ENOTCONN:
+                    self.log.debug("Ignoring socket not connected")
+                else:
+                    self.log.debug("Ignoring EPIPE")
+        except Exception as e:
+            self.handle_error(req, client, addr, e)
+        finally:
+            util.close(client)
+
+    def handle_request(self, listener_name, req, sock, addr):
+        request_start = datetime.now()
+        environ = {}
+        resp = None
+        try:
+            self.cfg.pre_request(self, req)
+            resp, environ = wsgi.create(req, sock, addr,
+                                        listener_name, self.cfg)
+            environ["wsgi.multithread"] = True
+            self.nr += 1
+            if self.nr >= self.max_requests:
+                if self.alive:
+                    self.log.info("Autorestarting worker after current request.")
+                    self.alive = False
+
+            if not self.alive or not self.cfg.keepalive:
+                resp.force_close()
+
+            respiter = self.wsgi(environ, resp.start_response)
+            if self.is_already_handled(respiter):
+                return False
+            try:
+                if isinstance(respiter, environ['wsgi.file_wrapper']):
+                    resp.write_file(respiter)
+                else:
+                    for item in respiter:
+                        resp.write(item)
+                resp.close()
+            finally:
+                request_time = datetime.now() - request_start
+                self.log.access(resp, req, environ, request_time)
+                if hasattr(respiter, "close"):
+                    respiter.close()
+            if resp.should_close():
+                raise StopIteration()
+        except StopIteration:
+            raise
+        except EnvironmentError:
+            # If the original exception was a socket.error we delegate
+            # handling it to the caller (where handle() might ignore it)
+            util.reraise(*sys.exc_info())
+        except Exception:
+            if resp and resp.headers_sent:
+                # If the requests have already been sent, we should close the
+                # connection to indicate the error.
+                self.log.exception("Error handling request")
+                try:
+                    sock.shutdown(socket.SHUT_RDWR)
+                    sock.close()
+                except EnvironmentError:
+                    pass
+                raise StopIteration()
+            raise
+        finally:
+            try:
+                self.cfg.post_request(self, req, environ, resp)
+            except Exception:
+                self.log.exception("Exception in post_request hook")
+        return True
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/workers/geventlet.py b/.venv/lib/python3.12/site-packages/gunicorn/workers/geventlet.py
new file mode 100644
index 00000000..c42ed118
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/workers/geventlet.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+from functools import partial
+import sys
+
+try:
+    import eventlet
+except ImportError:
+    raise RuntimeError("eventlet worker requires eventlet 0.24.1 or higher")
+else:
+    from packaging.version import parse as parse_version
+    if parse_version(eventlet.__version__) < parse_version('0.24.1'):
+        raise RuntimeError("eventlet worker requires eventlet 0.24.1 or higher")
+
+from eventlet import hubs, greenthread
+from eventlet.greenio import GreenSocket
+import eventlet.wsgi
+import greenlet
+
+from gunicorn.workers.base_async import AsyncWorker
+from gunicorn.sock import ssl_wrap_socket
+
+# ALREADY_HANDLED is removed in 0.30.3+ now it's `WSGI_LOCAL.already_handled: bool`
+# https://github.com/eventlet/eventlet/pull/544
+EVENTLET_WSGI_LOCAL = getattr(eventlet.wsgi, "WSGI_LOCAL", None)
+EVENTLET_ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None)
+
+
+def _eventlet_socket_sendfile(self, file, offset=0, count=None):
+    # Based on the implementation in gevent which in turn is slightly
+    # modified from the standard library implementation.
+    if self.gettimeout() == 0:
+        raise ValueError("non-blocking sockets are not supported")
+    if offset:
+        file.seek(offset)
+    blocksize = min(count, 8192) if count else 8192
+    total_sent = 0
+    # localize variable access to minimize overhead
+    file_read = file.read
+    sock_send = self.send
+    try:
+        while True:
+            if count:
+                blocksize = min(count - total_sent, blocksize)
+                if blocksize <= 0:
+                    break
+            data = memoryview(file_read(blocksize))
+            if not data:
+                break  # EOF
+            while True:
+                try:
+                    sent = sock_send(data)
+                except BlockingIOError:
+                    continue
+                else:
+                    total_sent += sent
+                    if sent < len(data):
+                        data = data[sent:]
+                    else:
+                        break
+        return total_sent
+    finally:
+        if total_sent > 0 and hasattr(file, 'seek'):
+            file.seek(offset + total_sent)
+
+
+def _eventlet_serve(sock, handle, concurrency):
+    """
+    Serve requests forever.
+
+    This code is nearly identical to ``eventlet.convenience.serve`` except
+    that it attempts to join the pool at the end, which allows for gunicorn
+    graceful shutdowns.
+    """
+    pool = eventlet.greenpool.GreenPool(concurrency)
+    server_gt = eventlet.greenthread.getcurrent()
+
+    while True:
+        try:
+            conn, addr = sock.accept()
+            gt = pool.spawn(handle, conn, addr)
+            gt.link(_eventlet_stop, server_gt, conn)
+            conn, addr, gt = None, None, None
+        except eventlet.StopServe:
+            sock.close()
+            pool.waitall()
+            return
+
+
+def _eventlet_stop(client, server, conn):
+    """
+    Stop a greenlet handling a request and close its connection.
+
+    This code is lifted from eventlet so as not to depend on undocumented
+    functions in the library.
+    """
+    try:
+        try:
+            client.wait()
+        finally:
+            conn.close()
+    except greenlet.GreenletExit:
+        pass
+    except Exception:
+        greenthread.kill(server, *sys.exc_info())
+
+
+def patch_sendfile():
+    # As of eventlet 0.25.1, GreenSocket.sendfile doesn't exist,
+    # meaning the native implementations of socket.sendfile will be used.
+    # If os.sendfile exists, it will attempt to use that, failing explicitly
+    # if the socket is in non-blocking mode, which the underlying
+    # socket object /is/. Even the regular _sendfile_use_send will
+    # fail in that way; plus, it would use the underlying socket.send which isn't
+    # properly cooperative. So we have to monkey-patch a working socket.sendfile()
+    # into GreenSocket; in this method, `self.send` will be the GreenSocket's
+    # send method which is properly cooperative.
+    if not hasattr(GreenSocket, 'sendfile'):
+        GreenSocket.sendfile = _eventlet_socket_sendfile
+
+
+class EventletWorker(AsyncWorker):
+
+    def patch(self):
+        hubs.use_hub()
+        eventlet.monkey_patch()
+        patch_sendfile()
+
+    def is_already_handled(self, respiter):
+        # eventlet >= 0.30.3
+        if getattr(EVENTLET_WSGI_LOCAL, "already_handled", None):
+            raise StopIteration()
+        # eventlet < 0.30.3
+        if respiter == EVENTLET_ALREADY_HANDLED:
+            raise StopIteration()
+        return super().is_already_handled(respiter)
+
+    def init_process(self):
+        self.patch()
+        super().init_process()
+
+    def handle_quit(self, sig, frame):
+        eventlet.spawn(super().handle_quit, sig, frame)
+
+    def handle_usr1(self, sig, frame):
+        eventlet.spawn(super().handle_usr1, sig, frame)
+
+    def timeout_ctx(self):
+        return eventlet.Timeout(self.cfg.keepalive or None, False)
+
+    def handle(self, listener, client, addr):
+        if self.cfg.is_ssl:
+            client = ssl_wrap_socket(client, self.cfg)
+        super().handle(listener, client, addr)
+
+    def run(self):
+        acceptors = []
+        for sock in self.sockets:
+            gsock = GreenSocket(sock)
+            gsock.setblocking(1)
+            hfun = partial(self.handle, gsock)
+            acceptor = eventlet.spawn(_eventlet_serve, gsock, hfun,
+                                      self.worker_connections)
+
+            acceptors.append(acceptor)
+            eventlet.sleep(0.0)
+
+        while self.alive:
+            self.notify()
+            eventlet.sleep(1.0)
+
+        self.notify()
+        t = None
+        try:
+            with eventlet.Timeout(self.cfg.graceful_timeout) as t:
+                for a in acceptors:
+                    a.kill(eventlet.StopServe())
+                for a in acceptors:
+                    a.wait()
+        except eventlet.Timeout as te:
+            if te != t:
+                raise
+            for a in acceptors:
+                a.kill()
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/workers/ggevent.py b/.venv/lib/python3.12/site-packages/gunicorn/workers/ggevent.py
new file mode 100644
index 00000000..2125a32d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/workers/ggevent.py
@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import os
+import sys
+from datetime import datetime
+from functools import partial
+import time
+
+try:
+    import gevent
+except ImportError:
+    raise RuntimeError("gevent worker requires gevent 1.4 or higher")
+else:
+    from packaging.version import parse as parse_version
+    if parse_version(gevent.__version__) < parse_version('1.4'):
+        raise RuntimeError("gevent worker requires gevent 1.4 or higher")
+
+from gevent.pool import Pool
+from gevent.server import StreamServer
+from gevent import hub, monkey, socket, pywsgi
+
+import gunicorn
+from gunicorn.http.wsgi import base_environ
+from gunicorn.sock import ssl_context
+from gunicorn.workers.base_async import AsyncWorker
+
+VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__)
+
+
+class GeventWorker(AsyncWorker):
+
+    server_class = None
+    wsgi_handler = None
+
+    def patch(self):
+        monkey.patch_all()
+
+        # patch sockets
+        sockets = []
+        for s in self.sockets:
+            sockets.append(socket.socket(s.FAMILY, socket.SOCK_STREAM,
+                                         fileno=s.sock.fileno()))
+        self.sockets = sockets
+
+    def notify(self):
+        super().notify()
+        if self.ppid != os.getppid():
+            self.log.info("Parent changed, shutting down: %s", self)
+            sys.exit(0)
+
+    def timeout_ctx(self):
+        return gevent.Timeout(self.cfg.keepalive, False)
+
+    def run(self):
+        servers = []
+        ssl_args = {}
+
+        if self.cfg.is_ssl:
+            ssl_args = {"ssl_context": ssl_context(self.cfg)}
+
+        for s in self.sockets:
+            s.setblocking(1)
+            pool = Pool(self.worker_connections)
+            if self.server_class is not None:
+                environ = base_environ(self.cfg)
+                environ.update({
+                    "wsgi.multithread": True,
+                    "SERVER_SOFTWARE": VERSION,
+                })
+                server = self.server_class(
+                    s, application=self.wsgi, spawn=pool, log=self.log,
+                    handler_class=self.wsgi_handler, environ=environ,
+                    **ssl_args)
+            else:
+                hfun = partial(self.handle, s)
+                server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args)
+                if self.cfg.workers > 1:
+                    server.max_accept = 1
+
+            server.start()
+            servers.append(server)
+
+        while self.alive:
+            self.notify()
+            gevent.sleep(1.0)
+
+        try:
+            # Stop accepting requests
+            for server in servers:
+                if hasattr(server, 'close'):  # gevent 1.0
+                    server.close()
+                if hasattr(server, 'kill'):  # gevent < 1.0
+                    server.kill()
+
+            # Handle current requests until graceful_timeout
+            ts = time.time()
+            while time.time() - ts <= self.cfg.graceful_timeout:
+                accepting = 0
+                for server in servers:
+                    if server.pool.free_count() != server.pool.size:
+                        accepting += 1
+
+                # if no server is accepting a connection, we can exit
+                if not accepting:
+                    return
+
+                self.notify()
+                gevent.sleep(1.0)
+
+            # Force kill all active the handlers
+            self.log.warning("Worker graceful timeout (pid:%s)", self.pid)
+            for server in servers:
+                server.stop(timeout=1)
+        except Exception:
+            pass
+
+    def handle(self, listener, client, addr):
+        # Connected socket timeout defaults to socket.getdefaulttimeout().
+        # This forces to blocking mode.
+        client.setblocking(1)
+        super().handle(listener, client, addr)
+
+    def handle_request(self, listener_name, req, sock, addr):
+        try:
+            super().handle_request(listener_name, req, sock, addr)
+        except gevent.GreenletExit:
+            pass
+        except SystemExit:
+            pass
+
+    def handle_quit(self, sig, frame):
+        # Move this out of the signal handler so we can use
+        # blocking calls. See #1126
+        gevent.spawn(super().handle_quit, sig, frame)
+
+    def handle_usr1(self, sig, frame):
+        # Make the gevent workers handle the usr1 signal
+        # by deferring to a new greenlet. See #1645
+        gevent.spawn(super().handle_usr1, sig, frame)
+
+    def init_process(self):
+        self.patch()
+        hub.reinit()
+        super().init_process()
+
+
+class GeventResponse(object):
+
+    status = None
+    headers = None
+    sent = None
+
+    def __init__(self, status, headers, clength):
+        self.status = status
+        self.headers = headers
+        self.sent = clength
+
+
+class PyWSGIHandler(pywsgi.WSGIHandler):
+
+    def log_request(self):
+        start = datetime.fromtimestamp(self.time_start)
+        finish = datetime.fromtimestamp(self.time_finish)
+        response_time = finish - start
+        resp_headers = getattr(self, 'response_headers', {})
+        resp = GeventResponse(self.status, resp_headers, self.response_length)
+        if hasattr(self, 'headers'):
+            req_headers = self.headers.items()
+        else:
+            req_headers = []
+        self.server.log.access(resp, req_headers, self.environ, response_time)
+
+    def get_environ(self):
+        env = super().get_environ()
+        env['gunicorn.sock'] = self.socket
+        env['RAW_URI'] = self.path
+        return env
+
+
+class PyWSGIServer(pywsgi.WSGIServer):
+    pass
+
+
+class GeventPyWSGIWorker(GeventWorker):
+    "The Gevent StreamServer based workers."
+    server_class = PyWSGIServer
+    wsgi_handler = PyWSGIHandler
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/workers/gthread.py b/.venv/lib/python3.12/site-packages/gunicorn/workers/gthread.py
new file mode 100644
index 00000000..c9c42345
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/workers/gthread.py
@@ -0,0 +1,373 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+# design:
+# A threaded worker accepts connections in the main loop, accepted
+# connections are added to the thread pool as a connection job.
+# Keepalive connections are put back in the loop waiting for an event.
+# If no event happen after the keep alive timeout, the connection is
+# closed.
+# pylint: disable=no-else-break
+
+from concurrent import futures
+import errno
+import os
+import selectors
+import socket
+import ssl
+import sys
+import time
+from collections import deque
+from datetime import datetime
+from functools import partial
+from threading import RLock
+
+from . import base
+from .. import http
+from .. import util
+from .. import sock
+from ..http import wsgi
+
+
+class TConn(object):
+
+    def __init__(self, cfg, sock, client, server):
+        self.cfg = cfg
+        self.sock = sock
+        self.client = client
+        self.server = server
+
+        self.timeout = None
+        self.parser = None
+        self.initialized = False
+
+        # set the socket to non blocking
+        self.sock.setblocking(False)
+
+    def init(self):
+        self.initialized = True
+        self.sock.setblocking(True)
+
+        if self.parser is None:
+            # wrap the socket if needed
+            if self.cfg.is_ssl:
+                self.sock = sock.ssl_wrap_socket(self.sock, self.cfg)
+
+            # initialize the parser
+            self.parser = http.RequestParser(self.cfg, self.sock, self.client)
+
+    def set_timeout(self):
+        # set the timeout
+        self.timeout = time.time() + self.cfg.keepalive
+
+    def close(self):
+        util.close(self.sock)
+
+
+class ThreadWorker(base.Worker):
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.worker_connections = self.cfg.worker_connections
+        self.max_keepalived = self.cfg.worker_connections - self.cfg.threads
+        # initialise the pool
+        self.tpool = None
+        self.poller = None
+        self._lock = None
+        self.futures = deque()
+        self._keep = deque()
+        self.nr_conns = 0
+
+    @classmethod
+    def check_config(cls, cfg, log):
+        max_keepalived = cfg.worker_connections - cfg.threads
+
+        if max_keepalived <= 0 and cfg.keepalive:
+            log.warning("No keepalived connections can be handled. " +
+                        "Check the number of worker connections and threads.")
+
+    def init_process(self):
+        self.tpool = self.get_thread_pool()
+        self.poller = selectors.DefaultSelector()
+        self._lock = RLock()
+        super().init_process()
+
+    def get_thread_pool(self):
+        """Override this method to customize how the thread pool is created"""
+        return futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
+
+    def handle_quit(self, sig, frame):
+        self.alive = False
+        # worker_int callback
+        self.cfg.worker_int(self)
+        self.tpool.shutdown(False)
+        time.sleep(0.1)
+        sys.exit(0)
+
+    def _wrap_future(self, fs, conn):
+        fs.conn = conn
+        self.futures.append(fs)
+        fs.add_done_callback(self.finish_request)
+
+    def enqueue_req(self, conn):
+        conn.init()
+        # submit the connection to a worker
+        fs = self.tpool.submit(self.handle, conn)
+        self._wrap_future(fs, conn)
+
+    def accept(self, server, listener):
+        try:
+            sock, client = listener.accept()
+            # initialize the connection object
+            conn = TConn(self.cfg, sock, client, server)
+
+            self.nr_conns += 1
+            # wait until socket is readable
+            with self._lock:
+                self.poller.register(conn.sock, selectors.EVENT_READ,
+                                     partial(self.on_client_socket_readable, conn))
+        except EnvironmentError as e:
+            if e.errno not in (errno.EAGAIN, errno.ECONNABORTED,
+                               errno.EWOULDBLOCK):
+                raise
+
+    def on_client_socket_readable(self, conn, client):
+        with self._lock:
+            # unregister the client from the poller
+            self.poller.unregister(client)
+
+            if conn.initialized:
+                # remove the connection from keepalive
+                try:
+                    self._keep.remove(conn)
+                except ValueError:
+                    # race condition
+                    return
+
+        # submit the connection to a worker
+        self.enqueue_req(conn)
+
+    def murder_keepalived(self):
+        now = time.time()
+        while True:
+            with self._lock:
+                try:
+                    # remove the connection from the queue
+                    conn = self._keep.popleft()
+                except IndexError:
+                    break
+
+            delta = conn.timeout - now
+            if delta > 0:
+                # add the connection back to the queue
+                with self._lock:
+                    self._keep.appendleft(conn)
+                break
+            else:
+                self.nr_conns -= 1
+                # remove the socket from the poller
+                with self._lock:
+                    try:
+                        self.poller.unregister(conn.sock)
+                    except EnvironmentError as e:
+                        if e.errno != errno.EBADF:
+                            raise
+                    except KeyError:
+                        # already removed by the system, continue
+                        pass
+                    except ValueError:
+                        # already removed by the system continue
+                        pass
+
+                # close the socket
+                conn.close()
+
+    def is_parent_alive(self):
+        # If our parent changed then we shut down.
+        if self.ppid != os.getppid():
+            self.log.info("Parent changed, shutting down: %s", self)
+            return False
+        return True
+
+    def run(self):
+        # init listeners, add them to the event loop
+        for sock in self.sockets:
+            sock.setblocking(False)
+            # a race condition during graceful shutdown may make the listener
+            # name unavailable in the request handler so capture it once here
+            server = sock.getsockname()
+            acceptor = partial(self.accept, server)
+            self.poller.register(sock, selectors.EVENT_READ, acceptor)
+
+        while self.alive:
+            # notify the arbiter we are alive
+            self.notify()
+
+            # can we accept more connections?
+            if self.nr_conns < self.worker_connections:
+                # wait for an event
+                events = self.poller.select(1.0)
+                for key, _ in events:
+                    callback = key.data
+                    callback(key.fileobj)
+
+                # check (but do not wait) for finished requests
+                result = futures.wait(self.futures, timeout=0,
+                                      return_when=futures.FIRST_COMPLETED)
+            else:
+                # wait for a request to finish
+                result = futures.wait(self.futures, timeout=1.0,
+                                      return_when=futures.FIRST_COMPLETED)
+
+            # clean up finished requests
+            for fut in result.done:
+                self.futures.remove(fut)
+
+            if not self.is_parent_alive():
+                break
+
+            # handle keepalive timeouts
+            self.murder_keepalived()
+
+        self.tpool.shutdown(False)
+        self.poller.close()
+
+        for s in self.sockets:
+            s.close()
+
+        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
+
+    def finish_request(self, fs):
+        if fs.cancelled():
+            self.nr_conns -= 1
+            fs.conn.close()
+            return
+
+        try:
+            (keepalive, conn) = fs.result()
+            # if the connection should be kept alived add it
+            # to the eventloop and record it
+            if keepalive and self.alive:
+                # flag the socket as non blocked
+                conn.sock.setblocking(False)
+
+                # register the connection
+                conn.set_timeout()
+                with self._lock:
+                    self._keep.append(conn)
+
+                    # add the socket to the event loop
+                    self.poller.register(conn.sock, selectors.EVENT_READ,
+                                         partial(self.on_client_socket_readable, conn))
+            else:
+                self.nr_conns -= 1
+                conn.close()
+        except Exception:
+            # an exception happened, make sure to close the
+            # socket.
+            self.nr_conns -= 1
+            fs.conn.close()
+
+    def handle(self, conn):
+        keepalive = False
+        req = None
+        try:
+            req = next(conn.parser)
+            if not req:
+                return (False, conn)
+
+            # handle the request
+            keepalive = self.handle_request(req, conn)
+            if keepalive:
+                return (keepalive, conn)
+        except http.errors.NoMoreData as e:
+            self.log.debug("Ignored premature client disconnection. %s", e)
+
+        except StopIteration as e:
+            self.log.debug("Closing connection. %s", e)
+        except ssl.SSLError as e:
+            if e.args[0] == ssl.SSL_ERROR_EOF:
+                self.log.debug("ssl connection closed")
+                conn.sock.close()
+            else:
+                self.log.debug("Error processing SSL request.")
+                self.handle_error(req, conn.sock, conn.client, e)
+
+        except EnvironmentError as e:
+            if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):
+                self.log.exception("Socket error processing request.")
+            else:
+                if e.errno == errno.ECONNRESET:
+                    self.log.debug("Ignoring connection reset")
+                elif e.errno == errno.ENOTCONN:
+                    self.log.debug("Ignoring socket not connected")
+                else:
+                    self.log.debug("Ignoring connection epipe")
+        except Exception as e:
+            self.handle_error(req, conn.sock, conn.client, e)
+
+        return (False, conn)
+
+    def handle_request(self, req, conn):
+        environ = {}
+        resp = None
+        try:
+            self.cfg.pre_request(self, req)
+            request_start = datetime.now()
+            resp, environ = wsgi.create(req, conn.sock, conn.client,
+                                        conn.server, self.cfg)
+            environ["wsgi.multithread"] = True
+            self.nr += 1
+            if self.nr >= self.max_requests:
+                if self.alive:
+                    self.log.info("Autorestarting worker after current request.")
+                    self.alive = False
+                resp.force_close()
+
+            if not self.alive or not self.cfg.keepalive:
+                resp.force_close()
+            elif len(self._keep) >= self.max_keepalived:
+                resp.force_close()
+
+            respiter = self.wsgi(environ, resp.start_response)
+            try:
+                if isinstance(respiter, environ['wsgi.file_wrapper']):
+                    resp.write_file(respiter)
+                else:
+                    for item in respiter:
+                        resp.write(item)
+
+                resp.close()
+            finally:
+                request_time = datetime.now() - request_start
+                self.log.access(resp, req, environ, request_time)
+                if hasattr(respiter, "close"):
+                    respiter.close()
+
+            if resp.should_close():
+                self.log.debug("Closing connection.")
+                return False
+        except EnvironmentError:
+            # pass to next try-except level
+            util.reraise(*sys.exc_info())
+        except Exception:
+            if resp and resp.headers_sent:
+                # If the requests have already been sent, we should close the
+                # connection to indicate the error.
+                self.log.exception("Error handling request")
+                try:
+                    conn.sock.shutdown(socket.SHUT_RDWR)
+                    conn.sock.close()
+                except EnvironmentError:
+                    pass
+                raise StopIteration()
+            raise
+        finally:
+            try:
+                self.cfg.post_request(self, req, environ, resp)
+            except Exception:
+                self.log.exception("Exception in post_request hook")
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/workers/gtornado.py b/.venv/lib/python3.12/site-packages/gunicorn/workers/gtornado.py
new file mode 100644
index 00000000..28506119
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/workers/gtornado.py
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import os
+import sys
+
+try:
+    import tornado
+except ImportError:
+    raise RuntimeError("You need tornado installed to use this worker.")
+import tornado.web
+import tornado.httpserver
+from tornado.ioloop import IOLoop, PeriodicCallback
+from tornado.wsgi import WSGIContainer
+from gunicorn.workers.base import Worker
+from gunicorn import __version__ as gversion
+from gunicorn.sock import ssl_context
+
+
+# Tornado 5.0 updated its IOLoop, and the `io_loop` arguments to many
+# Tornado functions have been removed in Tornado 5.0. Also, they no
+# longer store PeriodCallbacks in ioloop._callbacks. Instead we store
+# them on our side, and use stop() on them when stopping the worker.
+# See https://www.tornadoweb.org/en/stable/releases/v5.0.0.html#backwards-compatibility-notes
+# for more details.
+TORNADO5 = tornado.version_info >= (5, 0, 0)
+
+
+class TornadoWorker(Worker):
+
+    @classmethod
+    def setup(cls):
+        web = sys.modules.pop("tornado.web")
+        old_clear = web.RequestHandler.clear
+
+        def clear(self):
+            old_clear(self)
+            if "Gunicorn" not in self._headers["Server"]:
+                self._headers["Server"] += " (Gunicorn/%s)" % gversion
+        web.RequestHandler.clear = clear
+        sys.modules["tornado.web"] = web
+
+    def handle_exit(self, sig, frame):
+        if self.alive:
+            super().handle_exit(sig, frame)
+
+    def handle_request(self):
+        self.nr += 1
+        if self.alive and self.nr >= self.max_requests:
+            self.log.info("Autorestarting worker after current request.")
+            self.alive = False
+
+    def watchdog(self):
+        if self.alive:
+            self.notify()
+
+        if self.ppid != os.getppid():
+            self.log.info("Parent changed, shutting down: %s", self)
+            self.alive = False
+
+    def heartbeat(self):
+        if not self.alive:
+            if self.server_alive:
+                if hasattr(self, 'server'):
+                    try:
+                        self.server.stop()
+                    except Exception:
+                        pass
+                self.server_alive = False
+            else:
+                if TORNADO5:
+                    for callback in self.callbacks:
+                        callback.stop()
+                    self.ioloop.stop()
+                else:
+                    if not self.ioloop._callbacks:
+                        self.ioloop.stop()
+
+    def init_process(self):
+        # IOLoop cannot survive a fork or be shared across processes
+        # in any way. When multiple processes are being used, each process
+        # should create its own IOLoop. We should clear current IOLoop
+        # if exists before os.fork.
+        IOLoop.clear_current()
+        super().init_process()
+
+    def run(self):
+        self.ioloop = IOLoop.instance()
+        self.alive = True
+        self.server_alive = False
+
+        if TORNADO5:
+            self.callbacks = []
+            self.callbacks.append(PeriodicCallback(self.watchdog, 1000))
+            self.callbacks.append(PeriodicCallback(self.heartbeat, 1000))
+            for callback in self.callbacks:
+                callback.start()
+        else:
+            PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start()
+            PeriodicCallback(self.heartbeat, 1000, io_loop=self.ioloop).start()
+
+        # Assume the app is a WSGI callable if its not an
+        # instance of tornado.web.Application or is an
+        # instance of tornado.wsgi.WSGIApplication
+        app = self.wsgi
+
+        if tornado.version_info[0] < 6:
+            if not isinstance(app, tornado.web.Application) or \
+                    isinstance(app, tornado.wsgi.WSGIApplication):
+                app = WSGIContainer(app)
+        elif not isinstance(app, WSGIContainer) and \
+                not isinstance(app, tornado.web.Application):
+            app = WSGIContainer(app)
+
+        # Monkey-patching HTTPConnection.finish to count the
+        # number of requests being handled by Tornado. This
+        # will help gunicorn shutdown the worker if max_requests
+        # is exceeded.
+        httpserver = sys.modules["tornado.httpserver"]
+        if hasattr(httpserver, 'HTTPConnection'):
+            old_connection_finish = httpserver.HTTPConnection.finish
+
+            def finish(other):
+                self.handle_request()
+                old_connection_finish(other)
+            httpserver.HTTPConnection.finish = finish
+            sys.modules["tornado.httpserver"] = httpserver
+
+            server_class = tornado.httpserver.HTTPServer
+        else:
+
+            class _HTTPServer(tornado.httpserver.HTTPServer):
+
+                def on_close(instance, server_conn):
+                    self.handle_request()
+                    super(_HTTPServer, instance).on_close(server_conn)
+
+            server_class = _HTTPServer
+
+        if self.cfg.is_ssl:
+            if TORNADO5:
+                server = server_class(app, ssl_options=ssl_context(self.cfg))
+            else:
+                server = server_class(app, io_loop=self.ioloop,
+                                      ssl_options=ssl_context(self.cfg))
+        else:
+            if TORNADO5:
+                server = server_class(app)
+            else:
+                server = server_class(app, io_loop=self.ioloop)
+
+        self.server = server
+        self.server_alive = True
+
+        for s in self.sockets:
+            s.setblocking(0)
+            if hasattr(server, "add_socket"):  # tornado > 2.0
+                server.add_socket(s)
+            elif hasattr(server, "_sockets"):  # tornado 2.0
+                server._sockets[s.fileno()] = s
+
+        server.no_keep_alive = self.cfg.keepalive <= 0
+        server.start(num_processes=1)
+
+        self.ioloop.start()
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/workers/sync.py b/.venv/lib/python3.12/site-packages/gunicorn/workers/sync.py
new file mode 100644
index 00000000..39a209f0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/workers/sync.py
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+#
+
+from datetime import datetime
+import errno
+import os
+import select
+import socket
+import ssl
+import sys
+
+from gunicorn import http
+from gunicorn.http import wsgi
+from gunicorn import sock
+from gunicorn import util
+from gunicorn.workers import base
+
+
+class StopWaiting(Exception):
+    """ exception raised to stop waiting for a connection """
+
+
+class SyncWorker(base.Worker):
+
+    def accept(self, listener):
+        client, addr = listener.accept()
+        client.setblocking(1)
+        util.close_on_exec(client)
+        self.handle(listener, client, addr)
+
+    def wait(self, timeout):
+        try:
+            self.notify()
+            ret = select.select(self.wait_fds, [], [], timeout)
+            if ret[0]:
+                if self.PIPE[0] in ret[0]:
+                    os.read(self.PIPE[0], 1)
+                return ret[0]
+
+        except select.error as e:
+            if e.args[0] == errno.EINTR:
+                return self.sockets
+            if e.args[0] == errno.EBADF:
+                if self.nr < 0:
+                    return self.sockets
+                else:
+                    raise StopWaiting
+            raise
+
+    def is_parent_alive(self):
+        # If our parent changed then we shut down.
+        if self.ppid != os.getppid():
+            self.log.info("Parent changed, shutting down: %s", self)
+            return False
+        return True
+
+    def run_for_one(self, timeout):
+        listener = self.sockets[0]
+        while self.alive:
+            self.notify()
+
+            # Accept a connection. If we get an error telling us
+            # that no connection is waiting we fall down to the
+            # select which is where we'll wait for a bit for new
+            # workers to come give us some love.
+            try:
+                self.accept(listener)
+                # Keep processing clients until no one is waiting. This
+                # prevents the need to select() for every client that we
+                # process.
+                continue
+
+            except EnvironmentError as e:
+                if e.errno not in (errno.EAGAIN, errno.ECONNABORTED,
+                                   errno.EWOULDBLOCK):
+                    raise
+
+            if not self.is_parent_alive():
+                return
+
+            try:
+                self.wait(timeout)
+            except StopWaiting:
+                return
+
+    def run_for_multiple(self, timeout):
+        while self.alive:
+            self.notify()
+
+            try:
+                ready = self.wait(timeout)
+            except StopWaiting:
+                return
+
+            if ready is not None:
+                for listener in ready:
+                    if listener == self.PIPE[0]:
+                        continue
+
+                    try:
+                        self.accept(listener)
+                    except EnvironmentError as e:
+                        if e.errno not in (errno.EAGAIN, errno.ECONNABORTED,
+                                           errno.EWOULDBLOCK):
+                            raise
+
+            if not self.is_parent_alive():
+                return
+
+    def run(self):
+        # if no timeout is given the worker will never wait and will
+        # use the CPU for nothing. This minimal timeout prevent it.
+        timeout = self.timeout or 0.5
+
+        # self.socket appears to lose its blocking status after
+        # we fork in the arbiter. Reset it here.
+        for s in self.sockets:
+            s.setblocking(0)
+
+        if len(self.sockets) > 1:
+            self.run_for_multiple(timeout)
+        else:
+            self.run_for_one(timeout)
+
+    def handle(self, listener, client, addr):
+        req = None
+        try:
+            if self.cfg.is_ssl:
+                client = sock.ssl_wrap_socket(client, self.cfg)
+            parser = http.RequestParser(self.cfg, client, addr)
+            req = next(parser)
+            self.handle_request(listener, req, client, addr)
+        except http.errors.NoMoreData as e:
+            self.log.debug("Ignored premature client disconnection. %s", e)
+        except StopIteration as e:
+            self.log.debug("Closing connection. %s", e)
+        except ssl.SSLError as e:
+            if e.args[0] == ssl.SSL_ERROR_EOF:
+                self.log.debug("ssl connection closed")
+                client.close()
+            else:
+                self.log.debug("Error processing SSL request.")
+                self.handle_error(req, client, addr, e)
+        except EnvironmentError as e:
+            if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):
+                self.log.exception("Socket error processing request.")
+            else:
+                if e.errno == errno.ECONNRESET:
+                    self.log.debug("Ignoring connection reset")
+                elif e.errno == errno.ENOTCONN:
+                    self.log.debug("Ignoring socket not connected")
+                else:
+                    self.log.debug("Ignoring EPIPE")
+        except Exception as e:
+            self.handle_error(req, client, addr, e)
+        finally:
+            util.close(client)
+
+    def handle_request(self, listener, req, client, addr):
+        environ = {}
+        resp = None
+        try:
+            self.cfg.pre_request(self, req)
+            request_start = datetime.now()
+            resp, environ = wsgi.create(req, client, addr,
+                                        listener.getsockname(), self.cfg)
+            # Force the connection closed until someone shows
+            # a buffering proxy that supports Keep-Alive to
+            # the backend.
+            resp.force_close()
+            self.nr += 1
+            if self.nr >= self.max_requests:
+                self.log.info("Autorestarting worker after current request.")
+                self.alive = False
+            respiter = self.wsgi(environ, resp.start_response)
+            try:
+                if isinstance(respiter, environ['wsgi.file_wrapper']):
+                    resp.write_file(respiter)
+                else:
+                    for item in respiter:
+                        resp.write(item)
+                resp.close()
+            finally:
+                request_time = datetime.now() - request_start
+                self.log.access(resp, req, environ, request_time)
+                if hasattr(respiter, "close"):
+                    respiter.close()
+        except EnvironmentError:
+            # pass to next try-except level
+            util.reraise(*sys.exc_info())
+        except Exception:
+            if resp and resp.headers_sent:
+                # If the requests have already been sent, we should close the
+                # connection to indicate the error.
+                self.log.exception("Error handling request")
+                try:
+                    client.shutdown(socket.SHUT_RDWR)
+                    client.close()
+                except EnvironmentError:
+                    pass
+                raise StopIteration()
+            raise
+        finally:
+            try:
+                self.cfg.post_request(self, req, environ, resp)
+            except Exception:
+                self.log.exception("Exception in post_request hook")
diff --git a/.venv/lib/python3.12/site-packages/gunicorn/workers/workertmp.py b/.venv/lib/python3.12/site-packages/gunicorn/workers/workertmp.py
new file mode 100644
index 00000000..cc79ecd6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/gunicorn/workers/workertmp.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -
+#
+# This file is part of gunicorn released under the MIT license.
+# See the NOTICE for more information.
+
+import os
+import platform
+import tempfile
+
+from gunicorn import util
+
+PLATFORM = platform.system()
+IS_CYGWIN = PLATFORM.startswith('CYGWIN')
+
+
+class WorkerTmp(object):
+
+    def __init__(self, cfg):
+        old_umask = os.umask(cfg.umask)
+        fdir = cfg.worker_tmp_dir
+        if fdir and not os.path.isdir(fdir):
+            raise RuntimeError("%s doesn't exist. Can't create workertmp." % fdir)
+        fd, name = tempfile.mkstemp(prefix="wgunicorn-", dir=fdir)
+        os.umask(old_umask)
+
+        # change the owner and group of the file if the worker will run as
+        # a different user or group, so that the worker can modify the file
+        if cfg.uid != os.geteuid() or cfg.gid != os.getegid():
+            util.chown(name, cfg.uid, cfg.gid)
+
+        # unlink the file so we don't leak temporary files
+        try:
+            if not IS_CYGWIN:
+                util.unlink(name)
+            # In Python 3.8, open() emits RuntimeWarning if buffering=1 for binary mode.
+            # Because we never write to this file, pass 0 to switch buffering off.
+            self._tmp = os.fdopen(fd, 'w+b', 0)
+        except Exception:
+            os.close(fd)
+            raise
+
+        self.spinner = 0
+
+    def notify(self):
+        self.spinner = (self.spinner + 1) % 2
+        os.fchmod(self._tmp.fileno(), self.spinner)
+
+    def last_update(self):
+        return os.fstat(self._tmp.fileno()).st_ctime
+
+    def fileno(self):
+        return self._tmp.fileno()
+
+    def close(self):
+        return self._tmp.close()