aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/apscheduler
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/apscheduler
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/apscheduler')
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/__init__.py11
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/events.py134
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/executors/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/executors/asyncio.py52
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/executors/base.py205
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/executors/debug.py20
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/executors/gevent.py29
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/executors/pool.py82
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/executors/tornado.py49
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/executors/twisted.py24
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/job.py330
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/jobstores/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/jobstores/base.py141
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/jobstores/etcd.py170
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/jobstores/memory.py106
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/jobstores/mongodb.py158
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/jobstores/redis.py160
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/jobstores/rethinkdb.py173
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/jobstores/sqlalchemy.py194
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/jobstores/zookeeper.py197
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/schedulers/__init__.py12
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/schedulers/asyncio.py67
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/schedulers/background.py42
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/schedulers/base.py1264
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/schedulers/blocking.py33
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/schedulers/gevent.py34
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/schedulers/qt.py44
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/schedulers/tornado.py65
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/schedulers/twisted.py62
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/triggers/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/triggers/base.py35
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/triggers/calendarinterval.py186
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/triggers/combining.py114
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/__init__.py289
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/expressions.py285
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/fields.py149
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/triggers/date.py51
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/triggers/interval.py138
-rw-r--r--.venv/lib/python3.12/site-packages/apscheduler/util.py461
39 files changed, 5566 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/__init__.py b/.venv/lib/python3.12/site-packages/apscheduler/__init__.py
new file mode 100644
index 00000000..475135fe
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/__init__.py
@@ -0,0 +1,11 @@
+import importlib.metadata as importlib_metadata
+import sys
+
+try:
+ release = importlib_metadata.version("APScheduler").split("-")[0]
+except importlib_metadata.PackageNotFoundError:
+ release = "3.5.0"
+
+version_info = tuple(int(x) if x.isdigit() else x for x in release.split("."))
+version = __version__ = ".".join(str(x) for x in version_info[:3])
+del sys, importlib_metadata
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/events.py b/.venv/lib/python3.12/site-packages/apscheduler/events.py
new file mode 100644
index 00000000..12ceb08f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/events.py
@@ -0,0 +1,134 @@
+__all__ = (
+ "EVENT_SCHEDULER_STARTED",
+ "EVENT_SCHEDULER_SHUTDOWN",
+ "EVENT_SCHEDULER_PAUSED",
+ "EVENT_SCHEDULER_RESUMED",
+ "EVENT_EXECUTOR_ADDED",
+ "EVENT_EXECUTOR_REMOVED",
+ "EVENT_JOBSTORE_ADDED",
+ "EVENT_JOBSTORE_REMOVED",
+ "EVENT_ALL_JOBS_REMOVED",
+ "EVENT_JOB_ADDED",
+ "EVENT_JOB_REMOVED",
+ "EVENT_JOB_MODIFIED",
+ "EVENT_JOB_EXECUTED",
+ "EVENT_JOB_ERROR",
+ "EVENT_JOB_MISSED",
+ "EVENT_JOB_SUBMITTED",
+ "EVENT_JOB_MAX_INSTANCES",
+ "EVENT_ALL",
+ "SchedulerEvent",
+ "JobEvent",
+ "JobExecutionEvent",
+ "JobSubmissionEvent",
+)
+
+
+EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2**0
+EVENT_SCHEDULER_SHUTDOWN = 2**1
+EVENT_SCHEDULER_PAUSED = 2**2
+EVENT_SCHEDULER_RESUMED = 2**3
+EVENT_EXECUTOR_ADDED = 2**4
+EVENT_EXECUTOR_REMOVED = 2**5
+EVENT_JOBSTORE_ADDED = 2**6
+EVENT_JOBSTORE_REMOVED = 2**7
+EVENT_ALL_JOBS_REMOVED = 2**8
+EVENT_JOB_ADDED = 2**9
+EVENT_JOB_REMOVED = 2**10
+EVENT_JOB_MODIFIED = 2**11
+EVENT_JOB_EXECUTED = 2**12
+EVENT_JOB_ERROR = 2**13
+EVENT_JOB_MISSED = 2**14
+EVENT_JOB_SUBMITTED = 2**15
+EVENT_JOB_MAX_INSTANCES = 2**16
+EVENT_ALL = (
+ EVENT_SCHEDULER_STARTED
+ | EVENT_SCHEDULER_SHUTDOWN
+ | EVENT_SCHEDULER_PAUSED
+ | EVENT_SCHEDULER_RESUMED
+ | EVENT_EXECUTOR_ADDED
+ | EVENT_EXECUTOR_REMOVED
+ | EVENT_JOBSTORE_ADDED
+ | EVENT_JOBSTORE_REMOVED
+ | EVENT_ALL_JOBS_REMOVED
+ | EVENT_JOB_ADDED
+ | EVENT_JOB_REMOVED
+ | EVENT_JOB_MODIFIED
+ | EVENT_JOB_EXECUTED
+ | EVENT_JOB_ERROR
+ | EVENT_JOB_MISSED
+ | EVENT_JOB_SUBMITTED
+ | EVENT_JOB_MAX_INSTANCES
+)
+
+
+class SchedulerEvent:
+ """
+ An event that concerns the scheduler itself.
+
+ :ivar code: the type code of this event
+ :ivar alias: alias of the job store or executor that was added or removed (if applicable)
+ """
+
+ def __init__(self, code, alias=None):
+ super().__init__()
+ self.code = code
+ self.alias = alias
+
+ def __repr__(self):
+ return "<%s (code=%d)>" % (self.__class__.__name__, self.code)
+
+
+class JobEvent(SchedulerEvent):
+ """
+ An event that concerns a job.
+
+ :ivar code: the type code of this event
+ :ivar job_id: identifier of the job in question
+ :ivar jobstore: alias of the job store containing the job in question
+ """
+
+ def __init__(self, code, job_id, jobstore):
+ super().__init__(code)
+ self.code = code
+ self.job_id = job_id
+ self.jobstore = jobstore
+
+
+class JobSubmissionEvent(JobEvent):
+ """
+ An event that concerns the submission of a job to its executor.
+
+ :ivar scheduled_run_times: a list of datetimes when the job was intended to run
+ """
+
+ def __init__(self, code, job_id, jobstore, scheduled_run_times):
+ super().__init__(code, job_id, jobstore)
+ self.scheduled_run_times = scheduled_run_times
+
+
+class JobExecutionEvent(JobEvent):
+ """
+ An event that concerns the running of a job within its executor.
+
+ :ivar scheduled_run_time: the time when the job was scheduled to be run
+ :ivar retval: the return value of the successfully executed job
+ :ivar exception: the exception raised by the job
+ :ivar traceback: a formatted traceback for the exception
+ """
+
+ def __init__(
+ self,
+ code,
+ job_id,
+ jobstore,
+ scheduled_run_time,
+ retval=None,
+ exception=None,
+ traceback=None,
+ ):
+ super().__init__(code, job_id, jobstore)
+ self.scheduled_run_time = scheduled_run_time
+ self.retval = retval
+ self.exception = exception
+ self.traceback = traceback
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/executors/__init__.py b/.venv/lib/python3.12/site-packages/apscheduler/executors/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/executors/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/executors/asyncio.py b/.venv/lib/python3.12/site-packages/apscheduler/executors/asyncio.py
new file mode 100644
index 00000000..cc7eb651
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/executors/asyncio.py
@@ -0,0 +1,52 @@
+import sys
+
+from apscheduler.executors.base import BaseExecutor, run_coroutine_job, run_job
+from apscheduler.util import iscoroutinefunction_partial
+
+
+class AsyncIOExecutor(BaseExecutor):
+ """
+ Runs jobs in the default executor of the event loop.
+
+ If the job function is a native coroutine function, it is scheduled to be run directly in the
+ event loop as soon as possible. All other functions are run in the event loop's default
+ executor which is usually a thread pool.
+
+ Plugin alias: ``asyncio``
+ """
+
+ def start(self, scheduler, alias):
+ super().start(scheduler, alias)
+ self._eventloop = scheduler._eventloop
+ self._pending_futures = set()
+
+ def shutdown(self, wait=True):
+ # There is no way to honor wait=True without converting this method into a coroutine method
+ for f in self._pending_futures:
+ if not f.done():
+ f.cancel()
+
+ self._pending_futures.clear()
+
+ def _do_submit_job(self, job, run_times):
+ def callback(f):
+ self._pending_futures.discard(f)
+ try:
+ events = f.result()
+ except BaseException:
+ self._run_job_error(job.id, *sys.exc_info()[1:])
+ else:
+ self._run_job_success(job.id, events)
+
+ if iscoroutinefunction_partial(job.func):
+ coro = run_coroutine_job(
+ job, job._jobstore_alias, run_times, self._logger.name
+ )
+ f = self._eventloop.create_task(coro)
+ else:
+ f = self._eventloop.run_in_executor(
+ None, run_job, job, job._jobstore_alias, run_times, self._logger.name
+ )
+
+ f.add_done_callback(callback)
+ self._pending_futures.add(f)
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/executors/base.py b/.venv/lib/python3.12/site-packages/apscheduler/executors/base.py
new file mode 100644
index 00000000..d690a38d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/executors/base.py
@@ -0,0 +1,205 @@
+import logging
+import sys
+import traceback
+from abc import ABCMeta, abstractmethod
+from collections import defaultdict
+from datetime import datetime, timedelta, timezone
+from traceback import format_tb
+
+from apscheduler.events import (
+ EVENT_JOB_ERROR,
+ EVENT_JOB_EXECUTED,
+ EVENT_JOB_MISSED,
+ JobExecutionEvent,
+)
+
+
+class MaxInstancesReachedError(Exception):
+ def __init__(self, job):
+ super().__init__(
+ 'Job "%s" has already reached its maximum number of instances (%d)'
+ % (job.id, job.max_instances)
+ )
+
+
+class BaseExecutor(metaclass=ABCMeta):
+ """Abstract base class that defines the interface that every executor must implement."""
+
+ _scheduler = None
+ _lock = None
+ _logger = logging.getLogger("apscheduler.executors")
+
+ def __init__(self):
+ super().__init__()
+ self._instances = defaultdict(lambda: 0)
+
+ def start(self, scheduler, alias):
+ """
+ Called by the scheduler when the scheduler is being started or when the executor is being
+ added to an already running scheduler.
+
+ :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
+ this executor
+ :param str|unicode alias: alias of this executor as it was assigned to the scheduler
+
+ """
+ self._scheduler = scheduler
+ self._lock = scheduler._create_lock()
+ self._logger = logging.getLogger(f"apscheduler.executors.{alias}")
+
+ def shutdown(self, wait=True):
+ """
+ Shuts down this executor.
+
+ :param bool wait: ``True`` to wait until all submitted jobs
+ have been executed
+ """
+
+ def submit_job(self, job, run_times):
+ """
+ Submits job for execution.
+
+ :param Job job: job to execute
+ :param list[datetime] run_times: list of datetimes specifying
+ when the job should have been run
+ :raises MaxInstancesReachedError: if the maximum number of
+ allowed instances for this job has been reached
+
+ """
+ assert self._lock is not None, "This executor has not been started yet"
+ with self._lock:
+ if self._instances[job.id] >= job.max_instances:
+ raise MaxInstancesReachedError(job)
+
+ self._do_submit_job(job, run_times)
+ self._instances[job.id] += 1
+
+ @abstractmethod
+ def _do_submit_job(self, job, run_times):
+ """Performs the actual task of scheduling `run_job` to be called."""
+
+ def _run_job_success(self, job_id, events):
+ """
+ Called by the executor with the list of generated events when :func:`run_job` has been
+ successfully called.
+
+ """
+ with self._lock:
+ self._instances[job_id] -= 1
+ if self._instances[job_id] == 0:
+ del self._instances[job_id]
+
+ for event in events:
+ self._scheduler._dispatch_event(event)
+
+ def _run_job_error(self, job_id, exc, traceback=None):
+ """Called by the executor with the exception if there is an error calling `run_job`."""
+ with self._lock:
+ self._instances[job_id] -= 1
+ if self._instances[job_id] == 0:
+ del self._instances[job_id]
+
+ exc_info = (exc.__class__, exc, traceback)
+ self._logger.error("Error running job %s", job_id, exc_info=exc_info)
+
+
+def run_job(job, jobstore_alias, run_times, logger_name):
+ """
+ Called by executors to run the job. Returns a list of scheduler events to be dispatched by the
+ scheduler.
+
+ """
+ events = []
+ logger = logging.getLogger(logger_name)
+ for run_time in run_times:
+ # See if the job missed its run time window, and handle
+ # possible misfires accordingly
+ if job.misfire_grace_time is not None:
+ difference = datetime.now(timezone.utc) - run_time
+ grace_time = timedelta(seconds=job.misfire_grace_time)
+ if difference > grace_time:
+ events.append(
+ JobExecutionEvent(
+ EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
+ )
+ )
+ logger.warning('Run time of job "%s" was missed by %s', job, difference)
+ continue
+
+ logger.info('Running job "%s" (scheduled at %s)', job, run_time)
+ try:
+ retval = job.func(*job.args, **job.kwargs)
+ except BaseException:
+ exc, tb = sys.exc_info()[1:]
+ formatted_tb = "".join(format_tb(tb))
+ events.append(
+ JobExecutionEvent(
+ EVENT_JOB_ERROR,
+ job.id,
+ jobstore_alias,
+ run_time,
+ exception=exc,
+ traceback=formatted_tb,
+ )
+ )
+ logger.exception('Job "%s" raised an exception', job)
+
+ # This is to prevent cyclic references that would lead to memory leaks
+ traceback.clear_frames(tb)
+ del tb
+ else:
+ events.append(
+ JobExecutionEvent(
+ EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
+ )
+ )
+ logger.info('Job "%s" executed successfully', job)
+
+ return events
+
+
+async def run_coroutine_job(job, jobstore_alias, run_times, logger_name):
+ """Coroutine version of run_job()."""
+ events = []
+ logger = logging.getLogger(logger_name)
+ for run_time in run_times:
+ # See if the job missed its run time window, and handle possible misfires accordingly
+ if job.misfire_grace_time is not None:
+ difference = datetime.now(timezone.utc) - run_time
+ grace_time = timedelta(seconds=job.misfire_grace_time)
+ if difference > grace_time:
+ events.append(
+ JobExecutionEvent(
+ EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
+ )
+ )
+ logger.warning('Run time of job "%s" was missed by %s', job, difference)
+ continue
+
+ logger.info('Running job "%s" (scheduled at %s)', job, run_time)
+ try:
+ retval = await job.func(*job.args, **job.kwargs)
+ except BaseException:
+ exc, tb = sys.exc_info()[1:]
+ formatted_tb = "".join(format_tb(tb))
+ events.append(
+ JobExecutionEvent(
+ EVENT_JOB_ERROR,
+ job.id,
+ jobstore_alias,
+ run_time,
+ exception=exc,
+ traceback=formatted_tb,
+ )
+ )
+ logger.exception('Job "%s" raised an exception', job)
+ traceback.clear_frames(tb)
+ else:
+ events.append(
+ JobExecutionEvent(
+ EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
+ )
+ )
+ logger.info('Job "%s" executed successfully', job)
+
+ return events
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/executors/debug.py b/.venv/lib/python3.12/site-packages/apscheduler/executors/debug.py
new file mode 100644
index 00000000..ac739aeb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/executors/debug.py
@@ -0,0 +1,20 @@
+import sys
+
+from apscheduler.executors.base import BaseExecutor, run_job
+
+
+class DebugExecutor(BaseExecutor):
+ """
+ A special executor that executes the target callable directly instead of deferring it to a
+ thread or process.
+
+ Plugin alias: ``debug``
+ """
+
+ def _do_submit_job(self, job, run_times):
+ try:
+ events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
+ except BaseException:
+ self._run_job_error(job.id, *sys.exc_info()[1:])
+ else:
+ self._run_job_success(job.id, events)
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/executors/gevent.py b/.venv/lib/python3.12/site-packages/apscheduler/executors/gevent.py
new file mode 100644
index 00000000..385be40c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/executors/gevent.py
@@ -0,0 +1,29 @@
+import sys
+
+from apscheduler.executors.base import BaseExecutor, run_job
+
+try:
+ import gevent
+except ImportError as exc: # pragma: nocover
+ raise ImportError("GeventExecutor requires gevent installed") from exc
+
+
+class GeventExecutor(BaseExecutor):
+ """
+ Runs jobs as greenlets.
+
+ Plugin alias: ``gevent``
+ """
+
+ def _do_submit_job(self, job, run_times):
+ def callback(greenlet):
+ try:
+ events = greenlet.get()
+ except BaseException:
+ self._run_job_error(job.id, *sys.exc_info()[1:])
+ else:
+ self._run_job_success(job.id, events)
+
+ gevent.spawn(
+ run_job, job, job._jobstore_alias, run_times, self._logger.name
+ ).link(callback)
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/executors/pool.py b/.venv/lib/python3.12/site-packages/apscheduler/executors/pool.py
new file mode 100644
index 00000000..166de7c7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/executors/pool.py
@@ -0,0 +1,82 @@
+import concurrent.futures
+import multiprocessing
+from abc import abstractmethod
+from concurrent.futures.process import BrokenProcessPool
+
+from apscheduler.executors.base import BaseExecutor, run_job
+
+
+class BasePoolExecutor(BaseExecutor):
+ @abstractmethod
+ def __init__(self, pool):
+ super().__init__()
+ self._pool = pool
+
+ def _do_submit_job(self, job, run_times):
+ def callback(f):
+ exc, tb = (
+ f.exception_info()
+ if hasattr(f, "exception_info")
+ else (f.exception(), getattr(f.exception(), "__traceback__", None))
+ )
+ if exc:
+ self._run_job_error(job.id, exc, tb)
+ else:
+ self._run_job_success(job.id, f.result())
+
+ f = self._pool.submit(
+ run_job, job, job._jobstore_alias, run_times, self._logger.name
+ )
+ f.add_done_callback(callback)
+
+ def shutdown(self, wait=True):
+ self._pool.shutdown(wait)
+
+
+class ThreadPoolExecutor(BasePoolExecutor):
+ """
+ An executor that runs jobs in a concurrent.futures thread pool.
+
+ Plugin alias: ``threadpool``
+
+ :param max_workers: the maximum number of spawned threads.
+ :param pool_kwargs: dict of keyword arguments to pass to the underlying
+ ThreadPoolExecutor constructor
+ """
+
+ def __init__(self, max_workers=10, pool_kwargs=None):
+ pool_kwargs = pool_kwargs or {}
+ pool = concurrent.futures.ThreadPoolExecutor(int(max_workers), **pool_kwargs)
+ super().__init__(pool)
+
+
+class ProcessPoolExecutor(BasePoolExecutor):
+ """
+ An executor that runs jobs in a concurrent.futures process pool.
+
+ Plugin alias: ``processpool``
+
+ :param max_workers: the maximum number of spawned processes.
+ :param pool_kwargs: dict of keyword arguments to pass to the underlying
+ ProcessPoolExecutor constructor
+ """
+
+ def __init__(self, max_workers=10, pool_kwargs=None):
+ self.pool_kwargs = pool_kwargs or {}
+ self.pool_kwargs.setdefault("mp_context", multiprocessing.get_context("spawn"))
+ pool = concurrent.futures.ProcessPoolExecutor(
+ int(max_workers), **self.pool_kwargs
+ )
+ super().__init__(pool)
+
+ def _do_submit_job(self, job, run_times):
+ try:
+ super()._do_submit_job(job, run_times)
+ except BrokenProcessPool:
+ self._logger.warning(
+ "Process pool is broken; replacing pool with a fresh instance"
+ )
+ self._pool = self._pool.__class__(
+ self._pool._max_workers, **self.pool_kwargs
+ )
+ super()._do_submit_job(job, run_times)
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/executors/tornado.py b/.venv/lib/python3.12/site-packages/apscheduler/executors/tornado.py
new file mode 100644
index 00000000..46789bd9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/executors/tornado.py
@@ -0,0 +1,49 @@
+import sys
+from concurrent.futures import ThreadPoolExecutor
+
+from tornado.gen import convert_yielded
+
+from apscheduler.executors.base import BaseExecutor, run_coroutine_job, run_job
+from apscheduler.util import iscoroutinefunction_partial
+
+
+class TornadoExecutor(BaseExecutor):
+ """
+ Runs jobs either in a thread pool or directly on the I/O loop.
+
+ If the job function is a native coroutine function, it is scheduled to be run directly in the
+ I/O loop as soon as possible. All other functions are run in a thread pool.
+
+ Plugin alias: ``tornado``
+
+ :param int max_workers: maximum number of worker threads in the thread pool
+ """
+
+ def __init__(self, max_workers=10):
+ super().__init__()
+ self.executor = ThreadPoolExecutor(max_workers)
+
+ def start(self, scheduler, alias):
+ super().start(scheduler, alias)
+ self._ioloop = scheduler._ioloop
+
+ def _do_submit_job(self, job, run_times):
+ def callback(f):
+ try:
+ events = f.result()
+ except BaseException:
+ self._run_job_error(job.id, *sys.exc_info()[1:])
+ else:
+ self._run_job_success(job.id, events)
+
+ if iscoroutinefunction_partial(job.func):
+ f = run_coroutine_job(
+ job, job._jobstore_alias, run_times, self._logger.name
+ )
+ else:
+ f = self.executor.submit(
+ run_job, job, job._jobstore_alias, run_times, self._logger.name
+ )
+
+ f = convert_yielded(f)
+ f.add_done_callback(callback)
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/executors/twisted.py b/.venv/lib/python3.12/site-packages/apscheduler/executors/twisted.py
new file mode 100644
index 00000000..710b20ae
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/executors/twisted.py
@@ -0,0 +1,24 @@
+from apscheduler.executors.base import BaseExecutor, run_job
+
+
+class TwistedExecutor(BaseExecutor):
+ """
+ Runs jobs in the reactor's thread pool.
+
+ Plugin alias: ``twisted``
+ """
+
+ def start(self, scheduler, alias):
+ super().start(scheduler, alias)
+ self._reactor = scheduler._reactor
+
+ def _do_submit_job(self, job, run_times):
+ def callback(success, result):
+ if success:
+ self._run_job_success(job.id, result)
+ else:
+ self._run_job_error(job.id, result.value, result.tb)
+
+ self._reactor.getThreadPool().callInThreadWithCallback(
+ callback, run_job, job, job._jobstore_alias, run_times, self._logger.name
+ )
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/job.py b/.venv/lib/python3.12/site-packages/apscheduler/job.py
new file mode 100644
index 00000000..35e83700
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/job.py
@@ -0,0 +1,330 @@
+from collections.abc import Iterable, Mapping
+from inspect import isclass, ismethod
+from uuid import uuid4
+
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import (
+ check_callable_args,
+ convert_to_datetime,
+ datetime_repr,
+ get_callable_name,
+ obj_to_ref,
+ ref_to_obj,
+)
+
+
+class Job:
+ """
+ Contains the options given when scheduling callables and its current schedule and other state.
+ This class should never be instantiated by the user.
+
+ :var str id: the unique identifier of this job
+ :var str name: the description of this job
+ :var func: the callable to execute
+ :var tuple|list args: positional arguments to the callable
+ :var dict kwargs: keyword arguments to the callable
+ :var bool coalesce: whether to only run the job once when several run times are due
+ :var trigger: the trigger object that controls the schedule of this job
+ :var str executor: the name of the executor that will run this job
+ :var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to
+ be late (``None`` means "allow the job to run no matter how late it is")
+ :var int max_instances: the maximum number of concurrently executing instances allowed for this
+ job
+ :var datetime.datetime next_run_time: the next scheduled run time of this job
+
+ .. note::
+ The ``misfire_grace_time`` has some non-obvious effects on job execution. See the
+ :ref:`missed-job-executions` section in the documentation for an in-depth explanation.
+ """
+
+ __slots__ = (
+ "_scheduler",
+ "_jobstore_alias",
+ "id",
+ "trigger",
+ "executor",
+ "func",
+ "func_ref",
+ "args",
+ "kwargs",
+ "name",
+ "misfire_grace_time",
+ "coalesce",
+ "max_instances",
+ "next_run_time",
+ "__weakref__",
+ )
+
+ def __init__(self, scheduler, id=None, **kwargs):
+ super().__init__()
+ self._scheduler = scheduler
+ self._jobstore_alias = None
+ self._modify(id=id or uuid4().hex, **kwargs)
+
+ def modify(self, **changes):
+ """
+ Makes the given changes to this job and saves it in the associated job store.
+
+ Accepted keyword arguments are the same as the variables on this class.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job`
+
+ :return Job: this job instance
+
+ """
+ self._scheduler.modify_job(self.id, self._jobstore_alias, **changes)
+ return self
+
+ def reschedule(self, trigger, **trigger_args):
+ """
+ Shortcut for switching the trigger on this job.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job`
+
+ :return Job: this job instance
+
+ """
+ self._scheduler.reschedule_job(
+ self.id, self._jobstore_alias, trigger, **trigger_args
+ )
+ return self
+
+ def pause(self):
+ """
+ Temporarily suspend the execution of this job.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job`
+
+ :return Job: this job instance
+
+ """
+ self._scheduler.pause_job(self.id, self._jobstore_alias)
+ return self
+
+ def resume(self):
+ """
+ Resume the schedule of this job if previously paused.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job`
+
+ :return Job: this job instance
+
+ """
+ self._scheduler.resume_job(self.id, self._jobstore_alias)
+ return self
+
+ def remove(self):
+ """
+ Unschedules this job and removes it from its associated job store.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job`
+
+ """
+ self._scheduler.remove_job(self.id, self._jobstore_alias)
+
+ @property
+ def pending(self):
+ """
+ Returns ``True`` if the referenced job is still waiting to be added to its designated job
+ store.
+
+ """
+ return self._jobstore_alias is None
+
+ #
+ # Private API
+ #
+
+ def _get_run_times(self, now):
+ """
+ Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive).
+
+ :type now: datetime.datetime
+ :rtype: list[datetime.datetime]
+
+ """
+ run_times = []
+ next_run_time = self.next_run_time
+ while next_run_time and next_run_time <= now:
+ run_times.append(next_run_time)
+ next_run_time = self.trigger.get_next_fire_time(next_run_time, now)
+
+ return run_times
+
+ def _modify(self, **changes):
+ """
+ Validates the changes to the Job and makes the modifications if and only if all of them
+ validate.
+
+ """
+ approved = {}
+
+ if "id" in changes:
+ value = changes.pop("id")
+ if not isinstance(value, str):
+ raise TypeError("id must be a nonempty string")
+ if hasattr(self, "id"):
+ raise ValueError("The job ID may not be changed")
+ approved["id"] = value
+
+ if "func" in changes or "args" in changes or "kwargs" in changes:
+ func = changes.pop("func") if "func" in changes else self.func
+ args = changes.pop("args") if "args" in changes else self.args
+ kwargs = changes.pop("kwargs") if "kwargs" in changes else self.kwargs
+
+ if isinstance(func, str):
+ func_ref = func
+ func = ref_to_obj(func)
+ elif callable(func):
+ try:
+ func_ref = obj_to_ref(func)
+ except ValueError:
+ # If this happens, this Job won't be serializable
+ func_ref = None
+ else:
+ raise TypeError("func must be a callable or a textual reference to one")
+
+ if not hasattr(self, "name") and changes.get("name", None) is None:
+ changes["name"] = get_callable_name(func)
+
+ if isinstance(args, str) or not isinstance(args, Iterable):
+ raise TypeError("args must be a non-string iterable")
+ if isinstance(kwargs, str) or not isinstance(kwargs, Mapping):
+ raise TypeError("kwargs must be a dict-like object")
+
+ check_callable_args(func, args, kwargs)
+
+ approved["func"] = func
+ approved["func_ref"] = func_ref
+ approved["args"] = args
+ approved["kwargs"] = kwargs
+
+ if "name" in changes:
+ value = changes.pop("name")
+ if not value or not isinstance(value, str):
+ raise TypeError("name must be a nonempty string")
+ approved["name"] = value
+
+ if "misfire_grace_time" in changes:
+ value = changes.pop("misfire_grace_time")
+ if value is not None and (not isinstance(value, int) or value <= 0):
+ raise TypeError(
+ "misfire_grace_time must be either None or a positive integer"
+ )
+ approved["misfire_grace_time"] = value
+
+ if "coalesce" in changes:
+ value = bool(changes.pop("coalesce"))
+ approved["coalesce"] = value
+
+ if "max_instances" in changes:
+ value = changes.pop("max_instances")
+ if not isinstance(value, int) or value <= 0:
+ raise TypeError("max_instances must be a positive integer")
+ approved["max_instances"] = value
+
+ if "trigger" in changes:
+ trigger = changes.pop("trigger")
+ if not isinstance(trigger, BaseTrigger):
+ raise TypeError(
+ f"Expected a trigger instance, got {trigger.__class__.__name__} instead"
+ )
+
+ approved["trigger"] = trigger
+
+ if "executor" in changes:
+ value = changes.pop("executor")
+ if not isinstance(value, str):
+ raise TypeError("executor must be a string")
+ approved["executor"] = value
+
+ if "next_run_time" in changes:
+ value = changes.pop("next_run_time")
+ approved["next_run_time"] = convert_to_datetime(
+ value, self._scheduler.timezone, "next_run_time"
+ )
+
+ if changes:
+ raise AttributeError(
+ "The following are not modifiable attributes of Job: {}".format(
+ ", ".join(changes)
+ )
+ )
+
+ for key, value in approved.items():
+ setattr(self, key, value)
+
+ def __getstate__(self):
+ # Don't allow this Job to be serialized if the function reference could not be determined
+ if not self.func_ref:
+ raise ValueError(
+ f"This Job cannot be serialized since the reference to its callable ({self.func!r}) could not "
+ "be determined. Consider giving a textual reference (module:function name) "
+ "instead."
+ )
+
+ # Instance methods cannot survive serialization as-is, so store the "self" argument
+ # explicitly
+ func = self.func
+ if (
+ ismethod(func)
+ and not isclass(func.__self__)
+ and obj_to_ref(func) == self.func_ref
+ ):
+ args = (func.__self__,) + tuple(self.args)
+ else:
+ args = self.args
+
+ return {
+ "version": 1,
+ "id": self.id,
+ "func": self.func_ref,
+ "trigger": self.trigger,
+ "executor": self.executor,
+ "args": args,
+ "kwargs": self.kwargs,
+ "name": self.name,
+ "misfire_grace_time": self.misfire_grace_time,
+ "coalesce": self.coalesce,
+ "max_instances": self.max_instances,
+ "next_run_time": self.next_run_time,
+ }
+
+ def __setstate__(self, state):
+ if state.get("version", 1) > 1:
+ raise ValueError(
+ f"Job has version {state['version']}, but only version 1 can be handled"
+ )
+
+ self.id = state["id"]
+ self.func_ref = state["func"]
+ self.func = ref_to_obj(self.func_ref)
+ self.trigger = state["trigger"]
+ self.executor = state["executor"]
+ self.args = state["args"]
+ self.kwargs = state["kwargs"]
+ self.name = state["name"]
+ self.misfire_grace_time = state["misfire_grace_time"]
+ self.coalesce = state["coalesce"]
+ self.max_instances = state["max_instances"]
+ self.next_run_time = state["next_run_time"]
+
+ def __eq__(self, other):
+ if isinstance(other, Job):
+ return self.id == other.id
+ return NotImplemented
+
+ def __repr__(self):
+ return f"<Job (id={self.id} name={self.name})>"
+
+ def __str__(self):
+ if hasattr(self, "next_run_time"):
+ status = (
+ "next run at: " + datetime_repr(self.next_run_time)
+ if self.next_run_time
+ else "paused"
+ )
+ else:
+ status = "pending"
+
+ return f"{self.name} (trigger: {self.trigger}, {status})"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/jobstores/__init__.py b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/jobstores/base.py b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/base.py
new file mode 100644
index 00000000..01cabd1e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/base.py
@@ -0,0 +1,141 @@
+import logging
+from abc import ABCMeta, abstractmethod
+
+
+class JobLookupError(KeyError):
+ """Raised when the job store cannot find a job for update or removal."""
+
+ def __init__(self, job_id):
+ super().__init__(f"No job by the id of {job_id} was found")
+
+
+class ConflictingIdError(KeyError):
+ """Raised when the uniqueness of job IDs is being violated."""
+
+ def __init__(self, job_id):
+ super().__init__(f"Job identifier ({job_id}) conflicts with an existing job")
+
+
+class TransientJobError(ValueError):
+ """
+ Raised when an attempt to add transient (with no func_ref) job to a persistent job store is
+ detected.
+ """
+
+ def __init__(self, job_id):
+ super().__init__(
+ f"Job ({job_id}) cannot be added to this job store because a reference to the callable "
+ "could not be determined."
+ )
+
+
+class BaseJobStore(metaclass=ABCMeta):
+ """Abstract base class that defines the interface that every job store must implement."""
+
+ _scheduler = None
+ _alias = None
+ _logger = logging.getLogger("apscheduler.jobstores")
+
+ def start(self, scheduler, alias):
+ """
+ Called by the scheduler when the scheduler is being started or when the job store is being
+ added to an already running scheduler.
+
+ :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
+ this job store
+ :param str|unicode alias: alias of this job store as it was assigned to the scheduler
+ """
+
+ self._scheduler = scheduler
+ self._alias = alias
+ self._logger = logging.getLogger(f"apscheduler.jobstores.{alias}")
+
+ def shutdown(self):
+ """Frees any resources still bound to this job store."""
+
+ def _fix_paused_jobs_sorting(self, jobs):
+ for i, job in enumerate(jobs):
+ if job.next_run_time is not None:
+ if i > 0:
+ paused_jobs = jobs[:i]
+ del jobs[:i]
+ jobs.extend(paused_jobs)
+ break
+
+ @abstractmethod
+ def lookup_job(self, job_id):
+ """
+ Returns a specific job, or ``None`` if it isn't found..
+
+ The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of
+ the returned job to point to the scheduler and itself, respectively.
+
+ :param str|unicode job_id: identifier of the job
+ :rtype: Job
+ """
+
+ @abstractmethod
+ def get_due_jobs(self, now):
+ """
+ Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``.
+ The returned jobs must be sorted by next run time (ascending).
+
+ :param datetime.datetime now: the current (timezone aware) datetime
+ :rtype: list[Job]
+ """
+
+ @abstractmethod
+ def get_next_run_time(self):
+ """
+ Returns the earliest run time of all the jobs stored in this job store, or ``None`` if
+ there are no active jobs.
+
+ :rtype: datetime.datetime
+ """
+
+ @abstractmethod
+ def get_all_jobs(self):
+ """
+ Returns a list of all jobs in this job store.
+ The returned jobs should be sorted by next run time (ascending).
+ Paused jobs (next_run_time == None) should be sorted last.
+
+ The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of
+ the returned jobs to point to the scheduler and itself, respectively.
+
+ :rtype: list[Job]
+ """
+
+ @abstractmethod
+ def add_job(self, job):
+ """
+ Adds the given job to this store.
+
+ :param Job job: the job to add
+ :raises ConflictingIdError: if there is another job in this store with the same ID
+ """
+
+ @abstractmethod
+ def update_job(self, job):
+ """
+ Replaces the job in the store with the given newer version.
+
+ :param Job job: the job to update
+ :raises JobLookupError: if the job does not exist
+ """
+
+ @abstractmethod
+ def remove_job(self, job_id):
+ """
+ Removes the given job from this store.
+
+ :param str|unicode job_id: identifier of the job
+ :raises JobLookupError: if the job does not exist
+ """
+
+ @abstractmethod
+ def remove_all_jobs(self):
+ """Removes all jobs from this store."""
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}>"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/jobstores/etcd.py b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/etcd.py
new file mode 100644
index 00000000..3fe74ff1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/etcd.py
@@ -0,0 +1,170 @@
+import pickle
+from datetime import datetime, timezone
+
+from apscheduler.job import Job
+from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
+from apscheduler.util import (
+ datetime_to_utc_timestamp,
+ maybe_ref,
+ utc_timestamp_to_datetime,
+)
+
+try:
+ from etcd3 import Etcd3Client
+except ImportError as exc: # pragma: nocover
+ raise ImportError("EtcdJobStore requires etcd3 be installed") from exc
+
+
+class EtcdJobStore(BaseJobStore):
+ """
+ Stores jobs in a etcd. Any leftover keyword arguments are directly passed to
+ etcd3's `etcd3.client
+ <https://python-etcd3.readthedocs.io/en/latest/readme.html>`_.
+
+ Plugin alias: ``etcd``
+
+ :param str path: path to store jobs in
+ :param client: a :class:`~etcd3.client.etcd3` instance to use instead of
+ providing connection arguments
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ """
+
+ def __init__(
+ self,
+ path="/apscheduler",
+ client=None,
+ close_connection_on_exit=False,
+ pickle_protocol=pickle.DEFAULT_PROTOCOL,
+ **connect_args,
+ ):
+ super().__init__()
+ self.pickle_protocol = pickle_protocol
+ self.close_connection_on_exit = close_connection_on_exit
+
+ if not path:
+ raise ValueError('The "path" parameter must not be empty')
+
+ self.path = path
+
+ if client:
+ self.client = maybe_ref(client)
+ else:
+ self.client = Etcd3Client(**connect_args)
+
+ def lookup_job(self, job_id):
+ node_path = self.path + "/" + str(job_id)
+ try:
+ content, _ = self.client.get(node_path)
+ content = pickle.loads(content)
+ job = self._reconstitute_job(content["job_state"])
+ return job
+ except BaseException:
+ return None
+
+ def get_due_jobs(self, now):
+ timestamp = datetime_to_utc_timestamp(now)
+ jobs = [
+ job_record["job"]
+ for job_record in self._get_jobs()
+ if job_record["next_run_time"] is not None
+ and job_record["next_run_time"] <= timestamp
+ ]
+ return jobs
+
+ def get_next_run_time(self):
+ next_runs = [
+ job_record["next_run_time"]
+ for job_record in self._get_jobs()
+ if job_record["next_run_time"] is not None
+ ]
+ return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None
+
+ def get_all_jobs(self):
+ jobs = [job_record["job"] for job_record in self._get_jobs()]
+ self._fix_paused_jobs_sorting(jobs)
+ return jobs
+
+ def add_job(self, job):
+ node_path = self.path + "/" + str(job.id)
+ value = {
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": job.__getstate__(),
+ }
+ data = pickle.dumps(value, self.pickle_protocol)
+ status = self.client.put_if_not_exists(node_path, value=data)
+ if not status:
+ raise ConflictingIdError(job.id)
+
+ def update_job(self, job):
+ node_path = self.path + "/" + str(job.id)
+ changes = {
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": job.__getstate__(),
+ }
+ data = pickle.dumps(changes, self.pickle_protocol)
+ status, _ = self.client.transaction(
+ compare=[self.client.transactions.version(node_path) > 0],
+ success=[self.client.transactions.put(node_path, value=data)],
+ failure=[],
+ )
+ if not status:
+ raise JobLookupError(job.id)
+
+ def remove_job(self, job_id):
+ node_path = self.path + "/" + str(job_id)
+ status, _ = self.client.transaction(
+ compare=[self.client.transactions.version(node_path) > 0],
+ success=[self.client.transactions.delete(node_path)],
+ failure=[],
+ )
+ if not status:
+ raise JobLookupError(job_id)
+
+ def remove_all_jobs(self):
+ self.client.delete_prefix(self.path)
+
+ def shutdown(self):
+ self.client.close()
+
+ def _reconstitute_job(self, job_state):
+ job_state = job_state
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _get_jobs(self):
+ jobs = []
+ failed_job_ids = []
+ all_ids = list(self.client.get_prefix(self.path))
+
+ for doc, _ in all_ids:
+ try:
+ content = pickle.loads(doc)
+ job_record = {
+ "next_run_time": content["next_run_time"],
+ "job": self._reconstitute_job(content["job_state"]),
+ }
+ jobs.append(job_record)
+ except BaseException:
+ content = pickle.loads(doc)
+ failed_id = content["job_state"]["id"]
+ failed_job_ids.append(failed_id)
+ self._logger.exception(
+ 'Unable to restore job "%s" -- removing it', failed_id
+ )
+
+ if failed_job_ids:
+ for failed_id in failed_job_ids:
+ self.remove_job(failed_id)
+ paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
+ return sorted(
+ jobs,
+ key=lambda job_record: job_record["job"].next_run_time or paused_sort_key,
+ )
+
+ def __repr__(self):
+ self._logger.exception("<%s (client=%s)>", self.__class__.__name__, self.client)
+ return f"<{self.__class__.__name__} (client={self.client})>"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/jobstores/memory.py b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/memory.py
new file mode 100644
index 00000000..8103cfdc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/memory.py
@@ -0,0 +1,106 @@
+from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
+from apscheduler.util import datetime_to_utc_timestamp
+
+
+class MemoryJobStore(BaseJobStore):
+ """
+ Stores jobs in an array in RAM. Provides no persistence support.
+
+ Plugin alias: ``memory``
+ """
+
+ def __init__(self):
+ super().__init__()
+ # list of (job, timestamp), sorted by next_run_time and job id (ascending)
+ self._jobs = []
+ self._jobs_index = {} # id -> (job, timestamp) lookup table
+
+ def lookup_job(self, job_id):
+ return self._jobs_index.get(job_id, (None, None))[0]
+
+ def get_due_jobs(self, now):
+ now_timestamp = datetime_to_utc_timestamp(now)
+ pending = []
+ for job, timestamp in self._jobs:
+ if timestamp is None or timestamp > now_timestamp:
+ break
+ pending.append(job)
+
+ return pending
+
+ def get_next_run_time(self):
+ return self._jobs[0][0].next_run_time if self._jobs else None
+
+ def get_all_jobs(self):
+ return [j[0] for j in self._jobs]
+
+ def add_job(self, job):
+ if job.id in self._jobs_index:
+ raise ConflictingIdError(job.id)
+
+ timestamp = datetime_to_utc_timestamp(job.next_run_time)
+ index = self._get_job_index(timestamp, job.id)
+ self._jobs.insert(index, (job, timestamp))
+ self._jobs_index[job.id] = (job, timestamp)
+
+ def update_job(self, job):
+ old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
+ if old_job is None:
+ raise JobLookupError(job.id)
+
+ # If the next run time has not changed, simply replace the job in its present index.
+ # Otherwise, reinsert the job to the list to preserve the ordering.
+ old_index = self._get_job_index(old_timestamp, old_job.id)
+ new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
+ if old_timestamp == new_timestamp:
+ self._jobs[old_index] = (job, new_timestamp)
+ else:
+ del self._jobs[old_index]
+ new_index = self._get_job_index(new_timestamp, job.id)
+ self._jobs.insert(new_index, (job, new_timestamp))
+
+ self._jobs_index[old_job.id] = (job, new_timestamp)
+
+ def remove_job(self, job_id):
+ job, timestamp = self._jobs_index.get(job_id, (None, None))
+ if job is None:
+ raise JobLookupError(job_id)
+
+ index = self._get_job_index(timestamp, job_id)
+ del self._jobs[index]
+ del self._jobs_index[job.id]
+
+ def remove_all_jobs(self):
+ self._jobs = []
+ self._jobs_index = {}
+
+ def shutdown(self):
+ self.remove_all_jobs()
+
+ def _get_job_index(self, timestamp, job_id):
+ """
+ Returns the index of the given job, or if it's not found, the index where the job should be
+ inserted based on the given timestamp.
+
+ :type timestamp: int
+ :type job_id: str
+
+ """
+ lo, hi = 0, len(self._jobs)
+ timestamp = float("inf") if timestamp is None else timestamp
+ while lo < hi:
+ mid = (lo + hi) // 2
+ mid_job, mid_timestamp = self._jobs[mid]
+ mid_timestamp = float("inf") if mid_timestamp is None else mid_timestamp
+ if mid_timestamp > timestamp:
+ hi = mid
+ elif mid_timestamp < timestamp:
+ lo = mid + 1
+ elif mid_job.id > job_id:
+ hi = mid
+ elif mid_job.id < job_id:
+ lo = mid + 1
+ else:
+ return mid
+
+ return lo
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/jobstores/mongodb.py b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/mongodb.py
new file mode 100644
index 00000000..102c0bd0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/mongodb.py
@@ -0,0 +1,158 @@
+import pickle
+import warnings
+
+from apscheduler.job import Job
+from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
+from apscheduler.util import (
+ datetime_to_utc_timestamp,
+ maybe_ref,
+ utc_timestamp_to_datetime,
+)
+
+try:
+ from bson.binary import Binary
+ from pymongo import ASCENDING, MongoClient
+ from pymongo.errors import DuplicateKeyError
+except ImportError as exc: # pragma: nocover
+ raise ImportError("MongoDBJobStore requires PyMongo installed") from exc
+
+
+class MongoDBJobStore(BaseJobStore):
+ """
+ Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to
+ pymongo's `MongoClient
+ <http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient>`_.
+
+ Plugin alias: ``mongodb``
+
+ :param str database: database to store jobs in
+ :param str collection: collection to store jobs in
+ :param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of
+ providing connection arguments
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ """
+
+ def __init__(
+ self,
+ database="apscheduler",
+ collection="jobs",
+ client=None,
+ pickle_protocol=pickle.HIGHEST_PROTOCOL,
+ **connect_args,
+ ):
+ super().__init__()
+ self.pickle_protocol = pickle_protocol
+
+ if not database:
+ raise ValueError('The "database" parameter must not be empty')
+ if not collection:
+ raise ValueError('The "collection" parameter must not be empty')
+
+ if client:
+ self.client = maybe_ref(client)
+ else:
+ connect_args.setdefault("w", 1)
+ self.client = MongoClient(**connect_args)
+
+ self.collection = self.client[database][collection]
+
+ def start(self, scheduler, alias):
+ super().start(scheduler, alias)
+ self.collection.create_index("next_run_time", sparse=True)
+
+ @property
+ def connection(self):
+ warnings.warn(
+ 'The "connection" member is deprecated -- use "client" instead',
+ DeprecationWarning,
+ )
+ return self.client
+
+ def lookup_job(self, job_id):
+ document = self.collection.find_one(job_id, ["job_state"])
+ return self._reconstitute_job(document["job_state"]) if document else None
+
+ def get_due_jobs(self, now):
+ timestamp = datetime_to_utc_timestamp(now)
+ return self._get_jobs({"next_run_time": {"$lte": timestamp}})
+
+ def get_next_run_time(self):
+ document = self.collection.find_one(
+ {"next_run_time": {"$ne": None}},
+ projection=["next_run_time"],
+ sort=[("next_run_time", ASCENDING)],
+ )
+ return (
+ utc_timestamp_to_datetime(document["next_run_time"]) if document else None
+ )
+
+ def get_all_jobs(self):
+ jobs = self._get_jobs({})
+ self._fix_paused_jobs_sorting(jobs)
+ return jobs
+
+ def add_job(self, job):
+ try:
+ self.collection.insert_one(
+ {
+ "_id": job.id,
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": Binary(
+ pickle.dumps(job.__getstate__(), self.pickle_protocol)
+ ),
+ }
+ )
+ except DuplicateKeyError:
+ raise ConflictingIdError(job.id)
+
+ def update_job(self, job):
+ changes = {
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)),
+ }
+ result = self.collection.update_one({"_id": job.id}, {"$set": changes})
+ if result and result.matched_count == 0:
+ raise JobLookupError(job.id)
+
+ def remove_job(self, job_id):
+ result = self.collection.delete_one({"_id": job_id})
+ if result and result.deleted_count == 0:
+ raise JobLookupError(job_id)
+
+ def remove_all_jobs(self):
+ self.collection.delete_many({})
+
+ def shutdown(self):
+ self.client.close()
+
+ def _reconstitute_job(self, job_state):
+ job_state = pickle.loads(job_state)
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _get_jobs(self, conditions):
+ jobs = []
+ failed_job_ids = []
+ for document in self.collection.find(
+ conditions, ["_id", "job_state"], sort=[("next_run_time", ASCENDING)]
+ ):
+ try:
+ jobs.append(self._reconstitute_job(document["job_state"]))
+ except BaseException:
+ self._logger.exception(
+ 'Unable to restore job "%s" -- removing it', document["_id"]
+ )
+ failed_job_ids.append(document["_id"])
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ self.collection.delete_many({"_id": {"$in": failed_job_ids}})
+
+ return jobs
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__} (client={self.client})>"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/jobstores/redis.py b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/redis.py
new file mode 100644
index 00000000..528285fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/redis.py
@@ -0,0 +1,160 @@
+import pickle
+from datetime import datetime, timezone
+
+from apscheduler.job import Job
+from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
+from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime
+
+try:
+ from redis import Redis
+except ImportError as exc: # pragma: nocover
+ raise ImportError("RedisJobStore requires redis installed") from exc
+
+
+class RedisJobStore(BaseJobStore):
+ """
+ Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's
+ :class:`~redis.StrictRedis`.
+
+ Plugin alias: ``redis``
+
+ :param int db: the database number to store jobs in
+ :param str jobs_key: key to store jobs in
+ :param str run_times_key: key to store the jobs' run times in
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ """
+
+ def __init__(
+ self,
+ db=0,
+ jobs_key="apscheduler.jobs",
+ run_times_key="apscheduler.run_times",
+ pickle_protocol=pickle.HIGHEST_PROTOCOL,
+ **connect_args,
+ ):
+ super().__init__()
+
+ if db is None:
+ raise ValueError('The "db" parameter must not be empty')
+ if not jobs_key:
+ raise ValueError('The "jobs_key" parameter must not be empty')
+ if not run_times_key:
+ raise ValueError('The "run_times_key" parameter must not be empty')
+
+ self.pickle_protocol = pickle_protocol
+ self.jobs_key = jobs_key
+ self.run_times_key = run_times_key
+ self.redis = Redis(db=int(db), **connect_args)
+
+ def lookup_job(self, job_id):
+ job_state = self.redis.hget(self.jobs_key, job_id)
+ return self._reconstitute_job(job_state) if job_state else None
+
+ def get_due_jobs(self, now):
+ timestamp = datetime_to_utc_timestamp(now)
+ job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
+ if job_ids:
+ job_states = self.redis.hmget(self.jobs_key, *job_ids)
+ return self._reconstitute_jobs(zip(job_ids, job_states))
+ return []
+
+ def get_next_run_time(self):
+ next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
+ if next_run_time:
+ return utc_timestamp_to_datetime(next_run_time[0][1])
+
+ def get_all_jobs(self):
+ job_states = self.redis.hgetall(self.jobs_key)
+ jobs = self._reconstitute_jobs(job_states.items())
+ paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
+ return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)
+
+ def add_job(self, job):
+ if self.redis.hexists(self.jobs_key, job.id):
+ raise ConflictingIdError(job.id)
+
+ with self.redis.pipeline() as pipe:
+ pipe.multi()
+ pipe.hset(
+ self.jobs_key,
+ job.id,
+ pickle.dumps(job.__getstate__(), self.pickle_protocol),
+ )
+ if job.next_run_time:
+ pipe.zadd(
+ self.run_times_key,
+ {job.id: datetime_to_utc_timestamp(job.next_run_time)},
+ )
+
+ pipe.execute()
+
+ def update_job(self, job):
+ if not self.redis.hexists(self.jobs_key, job.id):
+ raise JobLookupError(job.id)
+
+ with self.redis.pipeline() as pipe:
+ pipe.hset(
+ self.jobs_key,
+ job.id,
+ pickle.dumps(job.__getstate__(), self.pickle_protocol),
+ )
+ if job.next_run_time:
+ pipe.zadd(
+ self.run_times_key,
+ {job.id: datetime_to_utc_timestamp(job.next_run_time)},
+ )
+ else:
+ pipe.zrem(self.run_times_key, job.id)
+
+ pipe.execute()
+
+ def remove_job(self, job_id):
+ if not self.redis.hexists(self.jobs_key, job_id):
+ raise JobLookupError(job_id)
+
+ with self.redis.pipeline() as pipe:
+ pipe.hdel(self.jobs_key, job_id)
+ pipe.zrem(self.run_times_key, job_id)
+ pipe.execute()
+
+ def remove_all_jobs(self):
+ with self.redis.pipeline() as pipe:
+ pipe.delete(self.jobs_key)
+ pipe.delete(self.run_times_key)
+ pipe.execute()
+
+ def shutdown(self):
+ self.redis.connection_pool.disconnect()
+
+ def _reconstitute_job(self, job_state):
+ job_state = pickle.loads(job_state)
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _reconstitute_jobs(self, job_states):
+ jobs = []
+ failed_job_ids = []
+ for job_id, job_state in job_states:
+ try:
+ jobs.append(self._reconstitute_job(job_state))
+ except BaseException:
+ self._logger.exception(
+ 'Unable to restore job "%s" -- removing it', job_id
+ )
+ failed_job_ids.append(job_id)
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ with self.redis.pipeline() as pipe:
+ pipe.hdel(self.jobs_key, *failed_job_ids)
+ pipe.zrem(self.run_times_key, *failed_job_ids)
+ pipe.execute()
+
+ return jobs
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}>"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/jobstores/rethinkdb.py b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/rethinkdb.py
new file mode 100644
index 00000000..d78290b1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/rethinkdb.py
@@ -0,0 +1,173 @@
+import pickle
+
+from apscheduler.job import Job
+from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
+from apscheduler.util import (
+ datetime_to_utc_timestamp,
+ maybe_ref,
+ utc_timestamp_to_datetime,
+)
+
+try:
+ from rethinkdb import RethinkDB
+except ImportError as exc: # pragma: nocover
+ raise ImportError("RethinkDBJobStore requires rethinkdb installed") from exc
+
+
+class RethinkDBJobStore(BaseJobStore):
+ """
+ Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to
+ rethinkdb's `RethinkdbClient <http://www.rethinkdb.com/api/#connect>`_.
+
+ Plugin alias: ``rethinkdb``
+
+ :param str database: database to store jobs in
+ :param str collection: collection to store jobs in
+ :param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing
+ connection arguments
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ """
+
+ def __init__(
+ self,
+ database="apscheduler",
+ table="jobs",
+ client=None,
+ pickle_protocol=pickle.HIGHEST_PROTOCOL,
+ **connect_args,
+ ):
+ super().__init__()
+
+ if not database:
+ raise ValueError('The "database" parameter must not be empty')
+ if not table:
+ raise ValueError('The "table" parameter must not be empty')
+
+ self.database = database
+ self.table_name = table
+ self.table = None
+ self.client = client
+ self.pickle_protocol = pickle_protocol
+ self.connect_args = connect_args
+ self.r = RethinkDB()
+ self.conn = None
+
+ def start(self, scheduler, alias):
+ super().start(scheduler, alias)
+
+ if self.client:
+ self.conn = maybe_ref(self.client)
+ else:
+ self.conn = self.r.connect(db=self.database, **self.connect_args)
+
+ if self.database not in self.r.db_list().run(self.conn):
+ self.r.db_create(self.database).run(self.conn)
+
+ if self.table_name not in self.r.table_list().run(self.conn):
+ self.r.table_create(self.table_name).run(self.conn)
+
+ if "next_run_time" not in self.r.table(self.table_name).index_list().run(
+ self.conn
+ ):
+ self.r.table(self.table_name).index_create("next_run_time").run(self.conn)
+
+ self.table = self.r.db(self.database).table(self.table_name)
+
+ def lookup_job(self, job_id):
+ results = list(self.table.get_all(job_id).pluck("job_state").run(self.conn))
+ return self._reconstitute_job(results[0]["job_state"]) if results else None
+
+ def get_due_jobs(self, now):
+ return self._get_jobs(
+ self.r.row["next_run_time"] <= datetime_to_utc_timestamp(now)
+ )
+
+ def get_next_run_time(self):
+ results = list(
+ self.table.filter(self.r.row["next_run_time"] != None)
+ .order_by(self.r.asc("next_run_time"))
+ .map(lambda x: x["next_run_time"])
+ .limit(1)
+ .run(self.conn)
+ )
+ return utc_timestamp_to_datetime(results[0]) if results else None
+
+ def get_all_jobs(self):
+ jobs = self._get_jobs()
+ self._fix_paused_jobs_sorting(jobs)
+ return jobs
+
+ def add_job(self, job):
+ job_dict = {
+ "id": job.id,
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": self.r.binary(
+ pickle.dumps(job.__getstate__(), self.pickle_protocol)
+ ),
+ }
+ results = self.table.insert(job_dict).run(self.conn)
+ if results["errors"] > 0:
+ raise ConflictingIdError(job.id)
+
+ def update_job(self, job):
+ changes = {
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": self.r.binary(
+ pickle.dumps(job.__getstate__(), self.pickle_protocol)
+ ),
+ }
+ results = self.table.get_all(job.id).update(changes).run(self.conn)
+ skipped = False in map(lambda x: results[x] == 0, results.keys())
+ if results["skipped"] > 0 or results["errors"] > 0 or not skipped:
+ raise JobLookupError(job.id)
+
+ def remove_job(self, job_id):
+ results = self.table.get_all(job_id).delete().run(self.conn)
+ if results["deleted"] + results["skipped"] != 1:
+ raise JobLookupError(job_id)
+
+ def remove_all_jobs(self):
+ self.table.delete().run(self.conn)
+
+ def shutdown(self):
+ self.conn.close()
+
+ def _reconstitute_job(self, job_state):
+ job_state = pickle.loads(job_state)
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _get_jobs(self, predicate=None):
+ jobs = []
+ failed_job_ids = []
+ query = (
+ self.table.filter(self.r.row["next_run_time"] != None).filter(predicate)
+ if predicate
+ else self.table
+ )
+ query = query.order_by("next_run_time", "id").pluck("id", "job_state")
+
+ for document in query.run(self.conn):
+ try:
+ jobs.append(self._reconstitute_job(document["job_state"]))
+ except Exception:
+ self._logger.exception(
+ 'Unable to restore job "%s" -- removing it', document["id"]
+ )
+ failed_job_ids.append(document["id"])
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ self.r.expr(failed_job_ids).for_each(
+ lambda job_id: self.table.get_all(job_id).delete()
+ ).run(self.conn)
+
+ return jobs
+
+ def __repr__(self):
+ connection = self.conn
+ return f"<{self.__class__.__name__} (connection={connection})>"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/jobstores/sqlalchemy.py b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/sqlalchemy.py
new file mode 100644
index 00000000..9866acf5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/sqlalchemy.py
@@ -0,0 +1,194 @@
+import pickle
+
+from apscheduler.job import Job
+from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
+from apscheduler.util import (
+ datetime_to_utc_timestamp,
+ maybe_ref,
+ utc_timestamp_to_datetime,
+)
+
+try:
+ from sqlalchemy import (
+ Column,
+ Float,
+ LargeBinary,
+ MetaData,
+ Table,
+ Unicode,
+ and_,
+ create_engine,
+ select,
+ )
+ from sqlalchemy.exc import IntegrityError
+ from sqlalchemy.sql.expression import null
+except ImportError as exc: # pragma: nocover
+ raise ImportError("SQLAlchemyJobStore requires SQLAlchemy installed") from exc
+
+
+class SQLAlchemyJobStore(BaseJobStore):
+ """
+ Stores jobs in a database table using SQLAlchemy.
+ The table will be created if it doesn't exist in the database.
+
+ Plugin alias: ``sqlalchemy``
+
+ :param str url: connection string (see
+ :ref:`SQLAlchemy documentation <sqlalchemy:database_urls>` on this)
+ :param engine: an SQLAlchemy :class:`~sqlalchemy.engine.Engine` to use instead of creating a
+ new one based on ``url``
+ :param str tablename: name of the table to store jobs in
+ :param metadata: a :class:`~sqlalchemy.schema.MetaData` instance to use instead of creating a
+ new one
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ :param str tableschema: name of the (existing) schema in the target database where the table
+ should be
+ :param dict engine_options: keyword arguments to :func:`~sqlalchemy.create_engine`
+ (ignored if ``engine`` is given)
+ """
+
+ def __init__(
+ self,
+ url=None,
+ engine=None,
+ tablename="apscheduler_jobs",
+ metadata=None,
+ pickle_protocol=pickle.HIGHEST_PROTOCOL,
+ tableschema=None,
+ engine_options=None,
+ ):
+ super().__init__()
+ self.pickle_protocol = pickle_protocol
+ metadata = maybe_ref(metadata) or MetaData()
+
+ if engine:
+ self.engine = maybe_ref(engine)
+ elif url:
+ self.engine = create_engine(url, **(engine_options or {}))
+ else:
+ raise ValueError('Need either "engine" or "url" defined')
+
+ # 191 = max key length in MySQL for InnoDB/utf8mb4 tables,
+ # 25 = precision that translates to an 8-byte float
+ self.jobs_t = Table(
+ tablename,
+ metadata,
+ Column("id", Unicode(191), primary_key=True),
+ Column("next_run_time", Float(25), index=True),
+ Column("job_state", LargeBinary, nullable=False),
+ schema=tableschema,
+ )
+
+ def start(self, scheduler, alias):
+ super().start(scheduler, alias)
+ self.jobs_t.create(self.engine, True)
+
+ def lookup_job(self, job_id):
+ selectable = select(self.jobs_t.c.job_state).where(self.jobs_t.c.id == job_id)
+ with self.engine.begin() as connection:
+ job_state = connection.execute(selectable).scalar()
+ return self._reconstitute_job(job_state) if job_state else None
+
+ def get_due_jobs(self, now):
+ timestamp = datetime_to_utc_timestamp(now)
+ return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
+
+ def get_next_run_time(self):
+ selectable = (
+ select(self.jobs_t.c.next_run_time)
+ .where(self.jobs_t.c.next_run_time != null())
+ .order_by(self.jobs_t.c.next_run_time)
+ .limit(1)
+ )
+ with self.engine.begin() as connection:
+ next_run_time = connection.execute(selectable).scalar()
+ return utc_timestamp_to_datetime(next_run_time)
+
+ def get_all_jobs(self):
+ jobs = self._get_jobs()
+ self._fix_paused_jobs_sorting(jobs)
+ return jobs
+
+ def add_job(self, job):
+ insert = self.jobs_t.insert().values(
+ **{
+ "id": job.id,
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": pickle.dumps(job.__getstate__(), self.pickle_protocol),
+ }
+ )
+ with self.engine.begin() as connection:
+ try:
+ connection.execute(insert)
+ except IntegrityError:
+ raise ConflictingIdError(job.id)
+
+ def update_job(self, job):
+ update = (
+ self.jobs_t.update()
+ .values(
+ **{
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": pickle.dumps(job.__getstate__(), self.pickle_protocol),
+ }
+ )
+ .where(self.jobs_t.c.id == job.id)
+ )
+ with self.engine.begin() as connection:
+ result = connection.execute(update)
+ if result.rowcount == 0:
+ raise JobLookupError(job.id)
+
+ def remove_job(self, job_id):
+ delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
+ with self.engine.begin() as connection:
+ result = connection.execute(delete)
+ if result.rowcount == 0:
+ raise JobLookupError(job_id)
+
+ def remove_all_jobs(self):
+ delete = self.jobs_t.delete()
+ with self.engine.begin() as connection:
+ connection.execute(delete)
+
+ def shutdown(self):
+ self.engine.dispose()
+
+ def _reconstitute_job(self, job_state):
+ job_state = pickle.loads(job_state)
+ job_state["jobstore"] = self
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _get_jobs(self, *conditions):
+ jobs = []
+ selectable = select(self.jobs_t.c.id, self.jobs_t.c.job_state).order_by(
+ self.jobs_t.c.next_run_time
+ )
+ selectable = selectable.where(and_(*conditions)) if conditions else selectable
+ failed_job_ids = set()
+ with self.engine.begin() as connection:
+ for row in connection.execute(selectable):
+ try:
+ jobs.append(self._reconstitute_job(row.job_state))
+ except BaseException:
+ self._logger.exception(
+ 'Unable to restore job "%s" -- removing it', row.id
+ )
+ failed_job_ids.add(row.id)
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ delete = self.jobs_t.delete().where(
+ self.jobs_t.c.id.in_(failed_job_ids)
+ )
+ connection.execute(delete)
+
+ return jobs
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__} (url={self.engine.url})>"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/jobstores/zookeeper.py b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/zookeeper.py
new file mode 100644
index 00000000..687fbc2a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/jobstores/zookeeper.py
@@ -0,0 +1,197 @@
+import pickle
+from datetime import datetime, timezone
+
+from kazoo.exceptions import NodeExistsError, NoNodeError
+
+from apscheduler.job import Job
+from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
+from apscheduler.util import (
+ datetime_to_utc_timestamp,
+ maybe_ref,
+ utc_timestamp_to_datetime,
+)
+
+try:
+ from kazoo.client import KazooClient
+except ImportError as exc: # pragma: nocover
+ raise ImportError("ZooKeeperJobStore requires Kazoo installed") from exc
+
+
+class ZooKeeperJobStore(BaseJobStore):
+ """
+ Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to
+ kazoo's `KazooClient
+ <http://kazoo.readthedocs.io/en/latest/api/client.html>`_.
+
+ Plugin alias: ``zookeeper``
+
+ :param str path: path to store jobs in
+ :param client: a :class:`~kazoo.client.KazooClient` instance to use instead of
+ providing connection arguments
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ """
+
+ def __init__(
+ self,
+ path="/apscheduler",
+ client=None,
+ close_connection_on_exit=False,
+ pickle_protocol=pickle.HIGHEST_PROTOCOL,
+ **connect_args,
+ ):
+ super().__init__()
+ self.pickle_protocol = pickle_protocol
+ self.close_connection_on_exit = close_connection_on_exit
+
+ if not path:
+ raise ValueError('The "path" parameter must not be empty')
+
+ self.path = path
+
+ if client:
+ self.client = maybe_ref(client)
+ else:
+ self.client = KazooClient(**connect_args)
+ self._ensured_path = False
+
+ def _ensure_paths(self):
+ if not self._ensured_path:
+ self.client.ensure_path(self.path)
+ self._ensured_path = True
+
+ def start(self, scheduler, alias):
+ super().start(scheduler, alias)
+ if not self.client.connected:
+ self.client.start()
+
+ def lookup_job(self, job_id):
+ self._ensure_paths()
+ node_path = self.path + "/" + str(job_id)
+ try:
+ content, _ = self.client.get(node_path)
+ doc = pickle.loads(content)
+ job = self._reconstitute_job(doc["job_state"])
+ return job
+ except BaseException:
+ return None
+
+ def get_due_jobs(self, now):
+ timestamp = datetime_to_utc_timestamp(now)
+ jobs = [
+ job_def["job"]
+ for job_def in self._get_jobs()
+ if job_def["next_run_time"] is not None
+ and job_def["next_run_time"] <= timestamp
+ ]
+ return jobs
+
+ def get_next_run_time(self):
+ next_runs = [
+ job_def["next_run_time"]
+ for job_def in self._get_jobs()
+ if job_def["next_run_time"] is not None
+ ]
+ return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None
+
+ def get_all_jobs(self):
+ jobs = [job_def["job"] for job_def in self._get_jobs()]
+ self._fix_paused_jobs_sorting(jobs)
+ return jobs
+
+ def add_job(self, job):
+ self._ensure_paths()
+ node_path = self.path + "/" + str(job.id)
+ value = {
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": job.__getstate__(),
+ }
+ data = pickle.dumps(value, self.pickle_protocol)
+ try:
+ self.client.create(node_path, value=data)
+ except NodeExistsError:
+ raise ConflictingIdError(job.id)
+
+ def update_job(self, job):
+ self._ensure_paths()
+ node_path = self.path + "/" + str(job.id)
+ changes = {
+ "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
+ "job_state": job.__getstate__(),
+ }
+ data = pickle.dumps(changes, self.pickle_protocol)
+ try:
+ self.client.set(node_path, value=data)
+ except NoNodeError:
+ raise JobLookupError(job.id)
+
+ def remove_job(self, job_id):
+ self._ensure_paths()
+ node_path = self.path + "/" + str(job_id)
+ try:
+ self.client.delete(node_path)
+ except NoNodeError:
+ raise JobLookupError(job_id)
+
+ def remove_all_jobs(self):
+ try:
+ self.client.delete(self.path, recursive=True)
+ except NoNodeError:
+ pass
+ self._ensured_path = False
+
+ def shutdown(self):
+ if self.close_connection_on_exit:
+ self.client.stop()
+ self.client.close()
+
+ def _reconstitute_job(self, job_state):
+ job_state = job_state
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _get_jobs(self):
+ self._ensure_paths()
+ jobs = []
+ failed_job_ids = []
+ all_ids = self.client.get_children(self.path)
+ for node_name in all_ids:
+ try:
+ node_path = self.path + "/" + node_name
+ content, _ = self.client.get(node_path)
+ doc = pickle.loads(content)
+ job_def = {
+ "job_id": node_name,
+ "next_run_time": doc["next_run_time"]
+ if doc["next_run_time"]
+ else None,
+ "job_state": doc["job_state"],
+ "job": self._reconstitute_job(doc["job_state"]),
+ "creation_time": _.ctime,
+ }
+ jobs.append(job_def)
+ except BaseException:
+ self._logger.exception(
+ 'Unable to restore job "%s" -- removing it', node_name
+ )
+ failed_job_ids.append(node_name)
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ for failed_id in failed_job_ids:
+ self.remove_job(failed_id)
+ paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
+ return sorted(
+ jobs,
+ key=lambda job_def: (
+ job_def["job"].next_run_time or paused_sort_key,
+ job_def["creation_time"],
+ ),
+ )
+
+ def __repr__(self):
+ self._logger.exception("<%s (client=%s)>", self.__class__.__name__, self.client)
+ return f"<{self.__class__.__name__} (client={self.client})>"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/schedulers/__init__.py b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/__init__.py
new file mode 100644
index 00000000..c17cc298
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/__init__.py
@@ -0,0 +1,12 @@
+class SchedulerAlreadyRunningError(Exception):
+ """Raised when attempting to start or configure the scheduler when it's already running."""
+
+ def __str__(self):
+ return "Scheduler is already running"
+
+
+class SchedulerNotRunningError(Exception):
+ """Raised when attempting to shutdown the scheduler when it's not running."""
+
+ def __str__(self):
+ return "Scheduler is not running"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/schedulers/asyncio.py b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/asyncio.py
new file mode 100644
index 00000000..ff31adbb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/asyncio.py
@@ -0,0 +1,67 @@
+import asyncio
+from functools import partial, wraps
+
+from apscheduler.schedulers.base import BaseScheduler
+from apscheduler.util import maybe_ref
+
+
+def run_in_event_loop(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ wrapped = partial(func, self, *args, **kwargs)
+ self._eventloop.call_soon_threadsafe(wrapped)
+
+ return wrapper
+
+
+class AsyncIOScheduler(BaseScheduler):
+ """
+ A scheduler that runs on an asyncio (:pep:`3156`) event loop.
+
+ The default executor can run jobs based on native coroutines (``async def``).
+
+ Extra options:
+
+ ============== =============================================================
+ ``event_loop`` AsyncIO event loop to use (defaults to the global event loop)
+ ============== =============================================================
+ """
+
+ _eventloop = None
+ _timeout = None
+
+ def start(self, paused=False):
+ if not self._eventloop:
+ self._eventloop = asyncio.get_running_loop()
+
+ super().start(paused)
+
+ @run_in_event_loop
+ def shutdown(self, wait=True):
+ super().shutdown(wait)
+ self._stop_timer()
+
+ def _configure(self, config):
+ self._eventloop = maybe_ref(config.pop("event_loop", None))
+ super()._configure(config)
+
+ def _start_timer(self, wait_seconds):
+ self._stop_timer()
+ if wait_seconds is not None:
+ self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup)
+
+ def _stop_timer(self):
+ if self._timeout:
+ self._timeout.cancel()
+ del self._timeout
+
+ @run_in_event_loop
+ def wakeup(self):
+ self._stop_timer()
+ wait_seconds = self._process_jobs()
+ self._start_timer(wait_seconds)
+
+ def _create_default_executor(self):
+ from apscheduler.executors.asyncio import AsyncIOExecutor
+
+ return AsyncIOExecutor()
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/schedulers/background.py b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/background.py
new file mode 100644
index 00000000..7d8d1bc7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/background.py
@@ -0,0 +1,42 @@
+from threading import Event, Thread
+
+from apscheduler.schedulers.base import BaseScheduler
+from apscheduler.schedulers.blocking import BlockingScheduler
+from apscheduler.util import asbool
+
+
+class BackgroundScheduler(BlockingScheduler):
+ """
+ A scheduler that runs in the background using a separate thread
+ (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately).
+
+ Extra options:
+
+ ========== =============================================================================
+ ``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see
+ `the documentation
+ <https://docs.python.org/3.4/library/threading.html#thread-objects>`_
+ for further details)
+ ========== =============================================================================
+ """
+
+ _thread = None
+
+ def _configure(self, config):
+ self._daemon = asbool(config.pop("daemon", True))
+ super()._configure(config)
+
+ def start(self, *args, **kwargs):
+ if self._event is None or self._event.is_set():
+ self._event = Event()
+
+ BaseScheduler.start(self, *args, **kwargs)
+ self._thread = Thread(
+ target=self._main_loop, name="APScheduler", daemon=self._daemon
+ )
+ self._thread.start()
+
+ def shutdown(self, *args, **kwargs):
+ super().shutdown(*args, **kwargs)
+ self._thread.join()
+ del self._thread
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/schedulers/base.py b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/base.py
new file mode 100644
index 00000000..7d713c75
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/base.py
@@ -0,0 +1,1264 @@
+import sys
+import warnings
+from abc import ABCMeta, abstractmethod
+from collections.abc import Mapping, MutableMapping
+from contextlib import ExitStack
+from datetime import datetime, timedelta
+from importlib.metadata import entry_points
+from logging import getLogger
+from threading import TIMEOUT_MAX, RLock
+
+from tzlocal import get_localzone
+
+from apscheduler.events import (
+ EVENT_ALL,
+ EVENT_ALL_JOBS_REMOVED,
+ EVENT_EXECUTOR_ADDED,
+ EVENT_EXECUTOR_REMOVED,
+ EVENT_JOB_ADDED,
+ EVENT_JOB_MAX_INSTANCES,
+ EVENT_JOB_MODIFIED,
+ EVENT_JOB_REMOVED,
+ EVENT_JOB_SUBMITTED,
+ EVENT_JOBSTORE_ADDED,
+ EVENT_JOBSTORE_REMOVED,
+ EVENT_SCHEDULER_PAUSED,
+ EVENT_SCHEDULER_RESUMED,
+ EVENT_SCHEDULER_SHUTDOWN,
+ EVENT_SCHEDULER_STARTED,
+ JobEvent,
+ JobSubmissionEvent,
+ SchedulerEvent,
+)
+from apscheduler.executors.base import BaseExecutor, MaxInstancesReachedError
+from apscheduler.executors.pool import ThreadPoolExecutor
+from apscheduler.job import Job
+from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
+from apscheduler.jobstores.memory import MemoryJobStore
+from apscheduler.schedulers import (
+ SchedulerAlreadyRunningError,
+ SchedulerNotRunningError,
+)
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import (
+ asbool,
+ asint,
+ astimezone,
+ maybe_ref,
+ obj_to_ref,
+ ref_to_obj,
+ undefined,
+)
+
+#: constant indicating a scheduler's stopped state
+STATE_STOPPED = 0
+#: constant indicating a scheduler's running state (started and processing jobs)
+STATE_RUNNING = 1
+#: constant indicating a scheduler's paused state (started but not processing jobs)
+STATE_PAUSED = 2
+
+
+class BaseScheduler(metaclass=ABCMeta):
+ """
+ Abstract base class for all schedulers.
+
+ Takes the following keyword arguments:
+
+ :param str|logging.Logger logger: logger to use for the scheduler's logging (defaults to
+ apscheduler.scheduler)
+ :param str|datetime.tzinfo timezone: the default time zone (defaults to the local timezone)
+ :param int|float jobstore_retry_interval: the minimum number of seconds to wait between
+ retries in the scheduler's main loop if the job store raises an exception when getting
+ the list of due jobs
+ :param dict job_defaults: default values for newly added jobs
+ :param dict jobstores: a dictionary of job store alias -> job store instance or configuration
+ dict
+ :param dict executors: a dictionary of executor alias -> executor instance or configuration
+ dict
+
+ :ivar int state: current running state of the scheduler (one of the following constants from
+ ``apscheduler.schedulers.base``: ``STATE_STOPPED``, ``STATE_RUNNING``, ``STATE_PAUSED``)
+
+ .. seealso:: :ref:`scheduler-config`
+ """
+
+ # The `group=...` API is only available in the backport, used in <=3.7, and in std>=3.10.
+ if (3, 8) <= sys.version_info < (3, 10):
+ _trigger_plugins = {
+ ep.name: ep for ep in entry_points()["apscheduler.triggers"]
+ }
+ _executor_plugins = {
+ ep.name: ep for ep in entry_points()["apscheduler.executors"]
+ }
+ _jobstore_plugins = {
+ ep.name: ep for ep in entry_points()["apscheduler.jobstores"]
+ }
+ else:
+ _trigger_plugins = {
+ ep.name: ep for ep in entry_points(group="apscheduler.triggers")
+ }
+ _executor_plugins = {
+ ep.name: ep for ep in entry_points(group="apscheduler.executors")
+ }
+ _jobstore_plugins = {
+ ep.name: ep for ep in entry_points(group="apscheduler.jobstores")
+ }
+
+ _trigger_classes = {}
+ _executor_classes = {}
+ _jobstore_classes = {}
+
+ #
+ # Public API
+ #
+
+ def __init__(self, gconfig={}, **options):
+ super().__init__()
+ self._executors = {}
+ self._executors_lock = self._create_lock()
+ self._jobstores = {}
+ self._jobstores_lock = self._create_lock()
+ self._listeners = []
+ self._listeners_lock = self._create_lock()
+ self._pending_jobs = []
+ self.state = STATE_STOPPED
+ self.configure(gconfig, **options)
+
+ def __getstate__(self):
+ raise TypeError(
+ "Schedulers cannot be serialized. Ensure that you are not passing a "
+ "scheduler instance as an argument to a job, or scheduling an instance "
+ "method where the instance contains a scheduler as an attribute."
+ )
+
+ def configure(self, gconfig={}, prefix="apscheduler.", **options):
+ """
+ Reconfigures the scheduler with the given options.
+
+ Can only be done when the scheduler isn't running.
+
+ :param dict gconfig: a "global" configuration dictionary whose values can be overridden by
+ keyword arguments to this method
+ :param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with
+ this string (pass an empty string or ``None`` to use all keys)
+ :raises SchedulerAlreadyRunningError: if the scheduler is already running
+
+ """
+ if self.state != STATE_STOPPED:
+ raise SchedulerAlreadyRunningError
+
+ # If a non-empty prefix was given, strip it from the keys in the
+ # global configuration dict
+ if prefix:
+ prefixlen = len(prefix)
+ gconfig = dict(
+ (key[prefixlen:], value)
+ for key, value in gconfig.items()
+ if key.startswith(prefix)
+ )
+
+ # Create a structure from the dotted options
+ # (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}})
+ config = {}
+ for key, value in gconfig.items():
+ parts = key.split(".")
+ parent = config
+ key = parts.pop(0)
+ while parts:
+ parent = parent.setdefault(key, {})
+ key = parts.pop(0)
+ parent[key] = value
+
+ # Override any options with explicit keyword arguments
+ config.update(options)
+ self._configure(config)
+
+ def start(self, paused=False):
+ """
+ Start the configured executors and job stores and begin processing scheduled jobs.
+
+ :param bool paused: if ``True``, don't start job processing until :meth:`resume` is called
+ :raises SchedulerAlreadyRunningError: if the scheduler is already running
+ :raises RuntimeError: if running under uWSGI with threads disabled
+
+ """
+ if self.state != STATE_STOPPED:
+ raise SchedulerAlreadyRunningError
+
+ self._check_uwsgi()
+
+ with self._executors_lock:
+ # Create a default executor if nothing else is configured
+ if "default" not in self._executors:
+ self.add_executor(self._create_default_executor(), "default")
+
+ # Start all the executors
+ for alias, executor in self._executors.items():
+ executor.start(self, alias)
+
+ with self._jobstores_lock:
+ # Create a default job store if nothing else is configured
+ if "default" not in self._jobstores:
+ self.add_jobstore(self._create_default_jobstore(), "default")
+
+ # Start all the job stores
+ for alias, store in self._jobstores.items():
+ store.start(self, alias)
+
+ # Schedule all pending jobs
+ for job, jobstore_alias, replace_existing in self._pending_jobs:
+ self._real_add_job(job, jobstore_alias, replace_existing)
+ del self._pending_jobs[:]
+
+ self.state = STATE_PAUSED if paused else STATE_RUNNING
+ self._logger.info("Scheduler started")
+ self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_STARTED))
+
+ if not paused:
+ self.wakeup()
+
+ @abstractmethod
+ def shutdown(self, wait=True):
+ """
+ Shuts down the scheduler, along with its executors and job stores.
+
+ Does not interrupt any currently running jobs.
+
+ :param bool wait: ``True`` to wait until all currently executing jobs have finished
+ :raises SchedulerNotRunningError: if the scheduler has not been started yet
+
+ """
+ if self.state == STATE_STOPPED:
+ raise SchedulerNotRunningError
+
+ self.state = STATE_STOPPED
+
+ # Shut down all executors
+ with self._executors_lock, self._jobstores_lock:
+ for executor in self._executors.values():
+ executor.shutdown(wait)
+
+ # Shut down all job stores
+ for jobstore in self._jobstores.values():
+ jobstore.shutdown()
+
+ self._logger.info("Scheduler has been shut down")
+ self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))
+
+ def pause(self):
+ """
+ Pause job processing in the scheduler.
+
+ This will prevent the scheduler from waking up to do job processing until :meth:`resume`
+ is called. It will not however stop any already running job processing.
+
+ """
+ if self.state == STATE_STOPPED:
+ raise SchedulerNotRunningError
+ elif self.state == STATE_RUNNING:
+ self.state = STATE_PAUSED
+ self._logger.info("Paused scheduler job processing")
+ self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_PAUSED))
+
+ def resume(self):
+ """Resume job processing in the scheduler."""
+ if self.state == STATE_STOPPED:
+ raise SchedulerNotRunningError
+ elif self.state == STATE_PAUSED:
+ self.state = STATE_RUNNING
+ self._logger.info("Resumed scheduler job processing")
+ self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_RESUMED))
+ self.wakeup()
+
+ @property
+ def running(self):
+ """
+ Return ``True`` if the scheduler has been started.
+
+ This is a shortcut for ``scheduler.state != STATE_STOPPED``.
+
+ """
+ return self.state != STATE_STOPPED
+
+ def add_executor(self, executor, alias="default", **executor_opts):
+ """
+ Adds an executor to this scheduler.
+
+ Any extra keyword arguments will be passed to the executor plugin's constructor, assuming
+ that the first argument is the name of an executor plugin.
+
+ :param str|unicode|apscheduler.executors.base.BaseExecutor executor: either an executor
+ instance or the name of an executor plugin
+ :param str|unicode alias: alias for the scheduler
+ :raises ValueError: if there is already an executor by the given alias
+
+ """
+ with self._executors_lock:
+ if alias in self._executors:
+ raise ValueError(
+ f'This scheduler already has an executor by the alias of "{alias}"'
+ )
+
+ if isinstance(executor, BaseExecutor):
+ self._executors[alias] = executor
+ elif isinstance(executor, str):
+ self._executors[alias] = executor = self._create_plugin_instance(
+ "executor", executor, executor_opts
+ )
+ else:
+ raise TypeError(
+ f"Expected an executor instance or a string, got {executor.__class__.__name__} instead"
+ )
+
+ # Start the executor right away if the scheduler is running
+ if self.state != STATE_STOPPED:
+ executor.start(self, alias)
+
+ self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_ADDED, alias))
+
+ def remove_executor(self, alias, shutdown=True):
+ """
+ Removes the executor by the given alias from this scheduler.
+
+ :param str|unicode alias: alias of the executor
+ :param bool shutdown: ``True`` to shut down the executor after
+ removing it
+
+ """
+ with self._executors_lock:
+ executor = self._lookup_executor(alias)
+ del self._executors[alias]
+
+ if shutdown:
+ executor.shutdown()
+
+ self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_REMOVED, alias))
+
+ def add_jobstore(self, jobstore, alias="default", **jobstore_opts):
+ """
+ Adds a job store to this scheduler.
+
+ Any extra keyword arguments will be passed to the job store plugin's constructor, assuming
+ that the first argument is the name of a job store plugin.
+
+ :param str|unicode|apscheduler.jobstores.base.BaseJobStore jobstore: job store to be added
+ :param str|unicode alias: alias for the job store
+ :raises ValueError: if there is already a job store by the given alias
+
+ """
+ with self._jobstores_lock:
+ if alias in self._jobstores:
+ raise ValueError(
+ f'This scheduler already has a job store by the alias of "{alias}"'
+ )
+
+ if isinstance(jobstore, BaseJobStore):
+ self._jobstores[alias] = jobstore
+ elif isinstance(jobstore, str):
+ self._jobstores[alias] = jobstore = self._create_plugin_instance(
+ "jobstore", jobstore, jobstore_opts
+ )
+ else:
+ raise TypeError(
+ f"Expected a job store instance or a string, got {jobstore.__class__.__name__} instead"
+ )
+
+ # Start the job store right away if the scheduler isn't stopped
+ if self.state != STATE_STOPPED:
+ jobstore.start(self, alias)
+
+ # Notify listeners that a new job store has been added
+ self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_ADDED, alias))
+
+ # Notify the scheduler so it can scan the new job store for jobs
+ if self.state != STATE_STOPPED:
+ self.wakeup()
+
+ def remove_jobstore(self, alias, shutdown=True):
+ """
+ Removes the job store by the given alias from this scheduler.
+
+ :param str|unicode alias: alias of the job store
+ :param bool shutdown: ``True`` to shut down the job store after removing it
+
+ """
+ with self._jobstores_lock:
+ jobstore = self._lookup_jobstore(alias)
+ del self._jobstores[alias]
+
+ if shutdown:
+ jobstore.shutdown()
+
+ self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_REMOVED, alias))
+
+ def add_listener(self, callback, mask=EVENT_ALL):
+ """
+ add_listener(callback, mask=EVENT_ALL)
+
+ Adds a listener for scheduler events.
+
+ When a matching event occurs, ``callback`` is executed with the event object as its
+ sole argument. If the ``mask`` parameter is not provided, the callback will receive events
+ of all types.
+
+ :param callback: any callable that takes one argument
+ :param int mask: bitmask that indicates which events should be
+ listened to
+
+ .. seealso:: :mod:`apscheduler.events`
+ .. seealso:: :ref:`scheduler-events`
+
+ """
+ with self._listeners_lock:
+ self._listeners.append((callback, mask))
+
+ def remove_listener(self, callback):
+ """Removes a previously added event listener."""
+
+ with self._listeners_lock:
+ for i, (cb, _) in enumerate(self._listeners):
+ if callback == cb:
+ del self._listeners[i]
+
+ def add_job(
+ self,
+ func,
+ trigger=None,
+ args=None,
+ kwargs=None,
+ id=None,
+ name=None,
+ misfire_grace_time=undefined,
+ coalesce=undefined,
+ max_instances=undefined,
+ next_run_time=undefined,
+ jobstore="default",
+ executor="default",
+ replace_existing=False,
+ **trigger_args,
+ ):
+ """
+ add_job(func, trigger=None, args=None, kwargs=None, id=None, \
+ name=None, misfire_grace_time=undefined, coalesce=undefined, \
+ max_instances=undefined, next_run_time=undefined, \
+ jobstore='default', executor='default', \
+ replace_existing=False, **trigger_args)
+
+ Adds the given job to the job list and wakes up the scheduler if it's already running.
+
+ Any option that defaults to ``undefined`` will be replaced with the corresponding default
+ value when the job is scheduled (which happens when the scheduler is started, or
+ immediately if the scheduler is already running).
+
+ The ``func`` argument can be given either as a callable object or a textual reference in
+ the ``package.module:some.object`` format, where the first half (separated by ``:``) is an
+ importable module and the second half is a reference to the callable object, relative to
+ the module.
+
+ The ``trigger`` argument can either be:
+ #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case
+ any extra keyword arguments to this method are passed on to the trigger's constructor
+ #. an instance of a trigger class
+
+ :param func: callable (or a textual reference to one) to run at the given time
+ :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when
+ ``func`` is called
+ :param list|tuple args: list of positional arguments to call func with
+ :param dict kwargs: dict of keyword arguments to call func with
+ :param str|unicode id: explicit identifier for the job (for modifying it later)
+ :param str|unicode name: textual description of the job
+ :param int misfire_grace_time: seconds after the designated runtime that the job is still
+ allowed to be run (or ``None`` to allow the job to run no matter how late it is)
+ :param bool coalesce: run once instead of many times if the scheduler determines that the
+ job should be run more than once in succession
+ :param int max_instances: maximum number of concurrently running instances allowed for this
+ job
+ :param datetime next_run_time: when to first run the job, regardless of the trigger (pass
+ ``None`` to add the job as paused)
+ :param str|unicode jobstore: alias of the job store to store the job in
+ :param str|unicode executor: alias of the executor to run the job with
+ :param bool replace_existing: ``True`` to replace an existing job with the same ``id``
+ (but retain the number of runs from the existing one)
+ :rtype: Job
+
+ """
+ job_kwargs = {
+ "trigger": self._create_trigger(trigger, trigger_args),
+ "executor": executor,
+ "func": func,
+ "args": tuple(args) if args is not None else (),
+ "kwargs": dict(kwargs) if kwargs is not None else {},
+ "id": id,
+ "name": name,
+ "misfire_grace_time": misfire_grace_time,
+ "coalesce": coalesce,
+ "max_instances": max_instances,
+ "next_run_time": next_run_time,
+ }
+ job_kwargs = dict(
+ (key, value) for key, value in job_kwargs.items() if value is not undefined
+ )
+ job = Job(self, **job_kwargs)
+
+ # Don't really add jobs to job stores before the scheduler is up and running
+ with self._jobstores_lock:
+ if self.state == STATE_STOPPED:
+ self._pending_jobs.append((job, jobstore, replace_existing))
+ self._logger.info(
+ "Adding job tentatively -- it will be properly scheduled when "
+ "the scheduler starts"
+ )
+ else:
+ self._real_add_job(job, jobstore, replace_existing)
+
+ return job
+
+ def scheduled_job(
+ self,
+ trigger,
+ args=None,
+ kwargs=None,
+ id=None,
+ name=None,
+ misfire_grace_time=undefined,
+ coalesce=undefined,
+ max_instances=undefined,
+ next_run_time=undefined,
+ jobstore="default",
+ executor="default",
+ **trigger_args,
+ ):
+ """
+ scheduled_job(trigger, args=None, kwargs=None, id=None, \
+ name=None, misfire_grace_time=undefined, \
+ coalesce=undefined, max_instances=undefined, \
+ next_run_time=undefined, jobstore='default', \
+ executor='default',**trigger_args)
+
+ A decorator version of :meth:`add_job`, except that ``replace_existing`` is always
+ ``True``.
+
+ .. important:: The ``id`` argument must be given if scheduling a job in a persistent job
+ store. The scheduler cannot, however, enforce this requirement.
+
+ """
+
+ def inner(func):
+ self.add_job(
+ func,
+ trigger,
+ args,
+ kwargs,
+ id,
+ name,
+ misfire_grace_time,
+ coalesce,
+ max_instances,
+ next_run_time,
+ jobstore,
+ executor,
+ True,
+ **trigger_args,
+ )
+ return func
+
+ return inner
+
+ def modify_job(self, job_id, jobstore=None, **changes):
+ """
+ Modifies the properties of a single job.
+
+ Modifications are passed to this method as extra keyword arguments.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :return Job: the relevant job instance
+
+ """
+ with self._jobstores_lock:
+ job, jobstore = self._lookup_job(job_id, jobstore)
+ job._modify(**changes)
+ if jobstore:
+ self._lookup_jobstore(jobstore).update_job(job)
+
+ self._dispatch_event(JobEvent(EVENT_JOB_MODIFIED, job_id, jobstore))
+
+ # Wake up the scheduler since the job's next run time may have been changed
+ if self.state == STATE_RUNNING:
+ self.wakeup()
+
+ return job
+
+ def reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args):
+ """
+ Constructs a new trigger for a job and updates its next run time.
+
+ Extra keyword arguments are passed directly to the trigger's constructor.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :param trigger: alias of the trigger type or a trigger instance
+ :return Job: the relevant job instance
+
+ """
+ trigger = self._create_trigger(trigger, trigger_args)
+ now = datetime.now(self.timezone)
+ next_run_time = trigger.get_next_fire_time(None, now)
+ return self.modify_job(
+ job_id, jobstore, trigger=trigger, next_run_time=next_run_time
+ )
+
+ def pause_job(self, job_id, jobstore=None):
+ """
+ Causes the given job not to be executed until it is explicitly resumed.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :return Job: the relevant job instance
+
+ """
+ return self.modify_job(job_id, jobstore, next_run_time=None)
+
+ def resume_job(self, job_id, jobstore=None):
+ """
+ Resumes the schedule of the given job, or removes the job if its schedule is finished.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :return Job|None: the relevant job instance if the job was rescheduled, or ``None`` if no
+ next run time could be calculated and the job was removed
+
+ """
+ with self._jobstores_lock:
+ job, jobstore = self._lookup_job(job_id, jobstore)
+ now = datetime.now(self.timezone)
+ next_run_time = job.trigger.get_next_fire_time(None, now)
+ if next_run_time:
+ return self.modify_job(job_id, jobstore, next_run_time=next_run_time)
+ else:
+ self.remove_job(job.id, jobstore)
+
+ def get_jobs(self, jobstore=None, pending=None):
+ """
+ Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled
+ jobs, either from a specific job store or from all of them.
+
+ If the scheduler has not been started yet, only pending jobs can be returned because the
+ job stores haven't been started yet either.
+
+ :param str|unicode jobstore: alias of the job store
+ :param bool pending: **DEPRECATED**
+ :rtype: list[Job]
+
+ """
+ if pending is not None:
+ warnings.warn(
+ 'The "pending" option is deprecated -- get_jobs() always returns '
+ "scheduled jobs if the scheduler has been started and pending jobs "
+ "otherwise",
+ DeprecationWarning,
+ )
+
+ with self._jobstores_lock:
+ jobs = []
+ if self.state == STATE_STOPPED:
+ for job, alias, replace_existing in self._pending_jobs:
+ if jobstore is None or alias == jobstore:
+ jobs.append(job)
+ else:
+ for alias, store in self._jobstores.items():
+ if jobstore is None or alias == jobstore:
+ jobs.extend(store.get_all_jobs())
+
+ return jobs
+
+ def get_job(self, job_id, jobstore=None):
+ """
+ Returns the Job that matches the given ``job_id``.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that most likely contains the job
+ :return: the Job by the given ID, or ``None`` if it wasn't found
+ :rtype: Job
+
+ """
+ with self._jobstores_lock:
+ try:
+ return self._lookup_job(job_id, jobstore)[0]
+ except JobLookupError:
+ return
+
+ def remove_job(self, job_id, jobstore=None):
+ """
+ Removes a job, preventing it from being run any more.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :raises JobLookupError: if the job was not found
+
+ """
+ jobstore_alias = None
+ with self._jobstores_lock:
+ # Check if the job is among the pending jobs
+ if self.state == STATE_STOPPED:
+ for i, (job, alias, replace_existing) in enumerate(self._pending_jobs):
+ if job.id == job_id and jobstore in (None, alias):
+ del self._pending_jobs[i]
+ jobstore_alias = alias
+ break
+ else:
+ # Otherwise, try to remove it from each store until it succeeds or we run out of
+ # stores to check
+ for alias, store in self._jobstores.items():
+ if jobstore in (None, alias):
+ try:
+ store.remove_job(job_id)
+ jobstore_alias = alias
+ break
+ except JobLookupError:
+ continue
+
+ if jobstore_alias is None:
+ raise JobLookupError(job_id)
+
+ # Notify listeners that a job has been removed
+ event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore_alias)
+ self._dispatch_event(event)
+
+ self._logger.info("Removed job %s", job_id)
+
+ def remove_all_jobs(self, jobstore=None):
+ """
+ Removes all jobs from the specified job store, or all job stores if none is given.
+
+ :param str|unicode jobstore: alias of the job store
+
+ """
+ with self._jobstores_lock:
+ if self.state == STATE_STOPPED:
+ if jobstore:
+ self._pending_jobs = [
+ pending
+ for pending in self._pending_jobs
+ if pending[1] != jobstore
+ ]
+ else:
+ self._pending_jobs = []
+ else:
+ for alias, store in self._jobstores.items():
+ if jobstore in (None, alias):
+ store.remove_all_jobs()
+
+ self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore))
+
+ def print_jobs(self, jobstore=None, out=None):
+ """
+ print_jobs(jobstore=None, out=sys.stdout)
+
+ Prints out a textual listing of all jobs currently scheduled on either all job stores or
+ just a specific one.
+
+ :param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all stores
+ :param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is
+ given)
+
+ """
+ out = out or sys.stdout
+ with self._jobstores_lock:
+ if self.state == STATE_STOPPED:
+ print("Pending jobs:", file=out)
+ if self._pending_jobs:
+ for job, jobstore_alias, replace_existing in self._pending_jobs:
+ if jobstore in (None, jobstore_alias):
+ print(f" {job}", file=out)
+ else:
+ print(" No pending jobs", file=out)
+ else:
+ for alias, store in sorted(self._jobstores.items()):
+ if jobstore in (None, alias):
+ print(f"Jobstore {alias}:", file=out)
+ jobs = store.get_all_jobs()
+ if jobs:
+ for job in jobs:
+ print(f" {job}", file=out)
+ else:
+ print(" No scheduled jobs", file=out)
+
+ def export_jobs(self, outfile, jobstore=None):
+ """
+ Export stored jobs as JSON.
+
+ :param outfile: either a file object opened in text write mode ("w"), or a path
+ to the target file
+ :param jobstore: alias of the job store to export jobs from (if omitted, export
+ from all configured job stores)
+
+ """
+ import json
+ import pickle
+ from base64 import b64encode
+
+ from apscheduler import version
+
+ if self.state == STATE_STOPPED:
+ raise RuntimeError(
+ "the scheduler must have been started for job export to work"
+ )
+
+ def encode_with_pickle(obj):
+ return b64encode(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)).decode("ascii")
+
+ def json_default(obj):
+ if hasattr(obj, "__getstate__") and hasattr(obj, "__setstate__"):
+ state = obj.__getstate__()
+ if isinstance(state, Mapping):
+ return {
+ "__apscheduler_class__": obj_to_ref(obj.__class__),
+ "__apscheduler_state__": state,
+ }
+
+ return {"__apscheduler_pickle__": encode_with_pickle(obj)}
+
+ with self._jobstores_lock:
+ all_jobs = [
+ job
+ for alias, store in self._jobstores.items()
+ for job in store.get_all_jobs()
+ if jobstore in (None, alias)
+ ]
+
+ with ExitStack() as stack:
+ if not hasattr(outfile, "write"):
+ outfile = stack.enter_context(open(outfile, "w"))
+
+ json.dump(
+ {
+ "version": 1,
+ "scheduler_version": version,
+ "jobs": [job.__getstate__() for job in all_jobs],
+ },
+ outfile,
+ default=json_default,
+ )
+
+ def import_jobs(self, infile, jobstore="default"):
+ """
+ Import jobs previously exported via :meth:`export_jobs.
+
+ :param infile: either a file object opened in text read mode ("r") or a path to
+ a JSON file containing previously exported jobs
+ :param jobstore: the alias of the job store to import the jobs to
+
+ """
+ import json
+ import pickle
+ from base64 import b64decode
+
+ def json_object_hook(dct):
+ if pickle_data := dct.get("__apscheduler_pickle__"):
+ return pickle.loads(b64decode(pickle_data))
+
+ if obj_class := dct.get("__apscheduler_class__"):
+ if obj_state := dct.get("__apscheduler_state__"):
+ obj_class = ref_to_obj(obj_class)
+ obj = object.__new__(obj_class)
+ obj.__setstate__(obj_state)
+ return obj
+
+ return dct
+
+ jobstore = self._jobstores[jobstore]
+ with ExitStack() as stack:
+ if not hasattr(infile, "read"):
+ infile = stack.enter_context(open(infile))
+
+ data = json.load(infile, object_hook=json_object_hook)
+ if not isinstance(data, dict):
+ raise ValueError()
+
+ if (version := data.get("version", None)) != 1:
+ raise ValueError(f"unrecognized version: {version}")
+
+ for job_state in data["jobs"]:
+ job = object.__new__(Job)
+ job.__setstate__(job_state)
+ jobstore.add_job(job)
+
+ @abstractmethod
+ def wakeup(self):
+ """
+ Notifies the scheduler that there may be jobs due for execution.
+ Triggers :meth:`_process_jobs` to be run in an implementation specific manner.
+ """
+
+ #
+ # Private API
+ #
+
+ def _configure(self, config):
+ # Set general options
+ self._logger = maybe_ref(config.pop("logger", None)) or getLogger(
+ "apscheduler.scheduler"
+ )
+ self.timezone = astimezone(config.pop("timezone", None)) or get_localzone()
+ self.jobstore_retry_interval = float(config.pop("jobstore_retry_interval", 10))
+
+ # Set the job defaults
+ job_defaults = config.get("job_defaults", {})
+ self._job_defaults = {
+ "misfire_grace_time": asint(job_defaults.get("misfire_grace_time", 1)),
+ "coalesce": asbool(job_defaults.get("coalesce", True)),
+ "max_instances": asint(job_defaults.get("max_instances", 1)),
+ }
+
+ # Configure executors
+ self._executors.clear()
+ for alias, value in config.get("executors", {}).items():
+ if isinstance(value, BaseExecutor):
+ self.add_executor(value, alias)
+ elif isinstance(value, MutableMapping):
+ executor_class = value.pop("class", None)
+ plugin = value.pop("type", None)
+ if plugin:
+ executor = self._create_plugin_instance("executor", plugin, value)
+ elif executor_class:
+ cls = maybe_ref(executor_class)
+ executor = cls(**value)
+ else:
+ raise ValueError(
+ f'Cannot create executor "{alias}" -- either "type" or "class" must be defined'
+ )
+
+ self.add_executor(executor, alias)
+ else:
+ raise TypeError(
+ f"Expected executor instance or dict for executors['{alias}'], got {value.__class__.__name__} instead"
+ )
+
+ # Configure job stores
+ self._jobstores.clear()
+ for alias, value in config.get("jobstores", {}).items():
+ if isinstance(value, BaseJobStore):
+ self.add_jobstore(value, alias)
+ elif isinstance(value, MutableMapping):
+ jobstore_class = value.pop("class", None)
+ plugin = value.pop("type", None)
+ if plugin:
+ jobstore = self._create_plugin_instance("jobstore", plugin, value)
+ elif jobstore_class:
+ cls = maybe_ref(jobstore_class)
+ jobstore = cls(**value)
+ else:
+ raise ValueError(
+ f'Cannot create job store "{alias}" -- either "type" or "class" must be '
+ "defined"
+ )
+
+ self.add_jobstore(jobstore, alias)
+ else:
+ raise TypeError(
+ f"Expected job store instance or dict for jobstores['{alias}'], got {value.__class__.__name__} instead"
+ )
+
+ def _create_default_executor(self):
+ """Creates a default executor store, specific to the particular scheduler type."""
+ return ThreadPoolExecutor()
+
+ def _create_default_jobstore(self):
+ """Creates a default job store, specific to the particular scheduler type."""
+ return MemoryJobStore()
+
+ def _lookup_executor(self, alias):
+ """
+ Returns the executor instance by the given name from the list of executors that were added
+ to this scheduler.
+
+ :type alias: str
+ :raises KeyError: if no executor by the given alias is not found
+
+ """
+ try:
+ return self._executors[alias]
+ except KeyError:
+ raise KeyError(f"No such executor: {alias}")
+
+ def _lookup_jobstore(self, alias):
+ """
+ Returns the job store instance by the given name from the list of job stores that were
+ added to this scheduler.
+
+ :type alias: str
+ :raises KeyError: if no job store by the given alias is not found
+
+ """
+ try:
+ return self._jobstores[alias]
+ except KeyError:
+ raise KeyError(f"No such job store: {alias}")
+
+ def _lookup_job(self, job_id, jobstore_alias):
+ """
+ Finds a job by its ID.
+
+ :type job_id: str
+ :param str jobstore_alias: alias of a job store to look in
+ :return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of
+ a pending job)
+ :raises JobLookupError: if no job by the given ID is found.
+
+ """
+ if self.state == STATE_STOPPED:
+ # Check if the job is among the pending jobs
+ for job, alias, replace_existing in self._pending_jobs:
+ if job.id == job_id:
+ return job, None
+ else:
+ # Look in all job stores
+ for alias, store in self._jobstores.items():
+ if jobstore_alias in (None, alias):
+ job = store.lookup_job(job_id)
+ if job is not None:
+ return job, alias
+
+ raise JobLookupError(job_id)
+
+ def _dispatch_event(self, event):
+ """
+ Dispatches the given event to interested listeners.
+
+ :param SchedulerEvent event: the event to send
+
+ """
+ with self._listeners_lock:
+ listeners = tuple(self._listeners)
+
+ for cb, mask in listeners:
+ if event.code & mask:
+ try:
+ cb(event)
+ except BaseException:
+ self._logger.exception("Error notifying listener")
+
+ def _check_uwsgi(self):
+ """Check if we're running under uWSGI with threads disabled."""
+ uwsgi_module = sys.modules.get("uwsgi")
+ if not getattr(uwsgi_module, "has_threads", True):
+ raise RuntimeError(
+ "The scheduler seems to be running under uWSGI, but threads have "
+ "been disabled. You must run uWSGI with the --enable-threads "
+ "option for the scheduler to work."
+ )
+
+ def _real_add_job(self, job, jobstore_alias, replace_existing):
+ """
+ :param Job job: the job to add
+ :param bool replace_existing: ``True`` to use update_job() in case the job already exists
+ in the store
+
+ """
+ # Fill in undefined values with defaults
+ replacements = {}
+ for key, value in self._job_defaults.items():
+ if not hasattr(job, key):
+ replacements[key] = value
+
+ # Calculate the next run time if there is none defined
+ if not hasattr(job, "next_run_time"):
+ now = datetime.now(self.timezone)
+ replacements["next_run_time"] = job.trigger.get_next_fire_time(None, now)
+
+ # Apply any replacements
+ job._modify(**replacements)
+
+ # Add the job to the given job store
+ store = self._lookup_jobstore(jobstore_alias)
+ try:
+ store.add_job(job)
+ except ConflictingIdError:
+ if replace_existing:
+ store.update_job(job)
+ else:
+ raise
+
+ # Mark the job as no longer pending
+ job._jobstore_alias = jobstore_alias
+
+ # Notify listeners that a new job has been added
+ event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias)
+ self._dispatch_event(event)
+
+ self._logger.info('Added job "%s" to job store "%s"', job.name, jobstore_alias)
+
+ # Notify the scheduler about the new job
+ if self.state == STATE_RUNNING:
+ self.wakeup()
+
+ def _create_plugin_instance(self, type_, alias, constructor_kwargs):
+ """Creates an instance of the given plugin type, loading the plugin first if necessary."""
+ plugin_container, class_container, base_class = {
+ "trigger": (self._trigger_plugins, self._trigger_classes, BaseTrigger),
+ "jobstore": (self._jobstore_plugins, self._jobstore_classes, BaseJobStore),
+ "executor": (self._executor_plugins, self._executor_classes, BaseExecutor),
+ }[type_]
+
+ try:
+ plugin_cls = class_container[alias]
+ except KeyError:
+ if alias in plugin_container:
+ plugin_cls = class_container[alias] = plugin_container[alias].load()
+ if not issubclass(plugin_cls, base_class):
+ raise TypeError(
+ f"The {type_} entry point does not point to a {type_} class"
+ )
+ else:
+ raise LookupError(f'No {type_} by the name "{alias}" was found')
+
+ return plugin_cls(**constructor_kwargs)
+
+ def _create_trigger(self, trigger, trigger_args):
+ if isinstance(trigger, BaseTrigger):
+ return trigger
+ elif trigger is None:
+ trigger = "date"
+ elif not isinstance(trigger, str):
+ raise TypeError(
+ f"Expected a trigger instance or string, got {trigger.__class__.__name__} instead"
+ )
+
+ # Use the scheduler's time zone if nothing else is specified
+ trigger_args.setdefault("timezone", self.timezone)
+
+ # Instantiate the trigger class
+ return self._create_plugin_instance("trigger", trigger, trigger_args)
+
+ def _create_lock(self):
+ """Creates a reentrant lock object."""
+ return RLock()
+
+ def _process_jobs(self):
+ """
+ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long
+ to wait for the next round.
+
+ If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least
+ ``jobstore_retry_interval`` seconds.
+
+ """
+ if self.state == STATE_PAUSED:
+ self._logger.debug("Scheduler is paused -- not processing jobs")
+ return None
+
+ self._logger.debug("Looking for jobs to run")
+ now = datetime.now(self.timezone)
+ next_wakeup_time = None
+ events = []
+
+ with self._jobstores_lock:
+ for jobstore_alias, jobstore in self._jobstores.items():
+ try:
+ due_jobs = jobstore.get_due_jobs(now)
+ except Exception as e:
+ # Schedule a wakeup at least in jobstore_retry_interval seconds
+ self._logger.warning(
+ "Error getting due jobs from job store %r: %s",
+ jobstore_alias,
+ e,
+ )
+ retry_wakeup_time = now + timedelta(
+ seconds=self.jobstore_retry_interval
+ )
+ if not next_wakeup_time or next_wakeup_time > retry_wakeup_time:
+ next_wakeup_time = retry_wakeup_time
+
+ continue
+
+ for job in due_jobs:
+ # Look up the job's executor
+ try:
+ executor = self._lookup_executor(job.executor)
+ except BaseException:
+ self._logger.error(
+ 'Executor lookup ("%s") failed for job "%s" -- removing it from the '
+ "job store",
+ job.executor,
+ job,
+ )
+ self.remove_job(job.id, jobstore_alias)
+ continue
+
+ run_times = job._get_run_times(now)
+ run_times = (
+ run_times[-1:] if run_times and job.coalesce else run_times
+ )
+ if run_times:
+ try:
+ executor.submit_job(job, run_times)
+ except MaxInstancesReachedError:
+ self._logger.warning(
+ 'Execution of job "%s" skipped: maximum number of running '
+ "instances reached (%d)",
+ job,
+ job.max_instances,
+ )
+ event = JobSubmissionEvent(
+ EVENT_JOB_MAX_INSTANCES,
+ job.id,
+ jobstore_alias,
+ run_times,
+ )
+ events.append(event)
+ except BaseException:
+ self._logger.exception(
+ 'Error submitting job "%s" to executor "%s"',
+ job,
+ job.executor,
+ )
+ else:
+ event = JobSubmissionEvent(
+ EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times
+ )
+ events.append(event)
+
+ # Update the job if it has a next execution time.
+ # Otherwise remove it from the job store.
+ job_next_run = job.trigger.get_next_fire_time(
+ run_times[-1], now
+ )
+ if job_next_run:
+ job._modify(next_run_time=job_next_run)
+ jobstore.update_job(job)
+ else:
+ self.remove_job(job.id, jobstore_alias)
+
+ # Set a new next wakeup time if there isn't one yet or
+ # the jobstore has an even earlier one
+ jobstore_next_run_time = jobstore.get_next_run_time()
+ if jobstore_next_run_time and (
+ next_wakeup_time is None
+ or jobstore_next_run_time < next_wakeup_time
+ ):
+ next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone)
+
+ # Dispatch collected events
+ for event in events:
+ self._dispatch_event(event)
+
+ # Determine the delay until this method should be called again
+ if self.state == STATE_PAUSED:
+ wait_seconds = None
+ self._logger.debug("Scheduler is paused; waiting until resume() is called")
+ elif next_wakeup_time is None:
+ wait_seconds = None
+ self._logger.debug("No jobs; waiting until a job is added")
+ else:
+ now = datetime.now(self.timezone)
+ wait_seconds = min(
+ max((next_wakeup_time - now).total_seconds(), 0), TIMEOUT_MAX
+ )
+ self._logger.debug(
+ "Next wakeup is due at %s (in %f seconds)",
+ next_wakeup_time,
+ wait_seconds,
+ )
+
+ return wait_seconds
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/schedulers/blocking.py b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/blocking.py
new file mode 100644
index 00000000..0330885d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/blocking.py
@@ -0,0 +1,33 @@
+from threading import TIMEOUT_MAX, Event
+
+from apscheduler.schedulers.base import STATE_STOPPED, BaseScheduler
+
+
+class BlockingScheduler(BaseScheduler):
+ """
+ A scheduler that runs in the foreground
+ (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block).
+ """
+
+ _event = None
+
+ def start(self, *args, **kwargs):
+ if self._event is None or self._event.is_set():
+ self._event = Event()
+
+ super().start(*args, **kwargs)
+ self._main_loop()
+
+ def shutdown(self, wait=True):
+ super().shutdown(wait)
+ self._event.set()
+
+ def _main_loop(self):
+ wait_seconds = TIMEOUT_MAX
+ while self.state != STATE_STOPPED:
+ self._event.wait(wait_seconds)
+ self._event.clear()
+ wait_seconds = self._process_jobs()
+
+ def wakeup(self):
+ self._event.set()
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/schedulers/gevent.py b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/gevent.py
new file mode 100644
index 00000000..e32ad643
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/gevent.py
@@ -0,0 +1,34 @@
+from apscheduler.schedulers.base import BaseScheduler
+from apscheduler.schedulers.blocking import BlockingScheduler
+
+try:
+ import gevent
+ from gevent.event import Event
+ from gevent.lock import RLock
+except ImportError as exc: # pragma: nocover
+ raise ImportError("GeventScheduler requires gevent installed") from exc
+
+
+class GeventScheduler(BlockingScheduler):
+ """A scheduler that runs as a Gevent greenlet."""
+
+ _greenlet = None
+
+ def start(self, *args, **kwargs):
+ self._event = Event()
+ BaseScheduler.start(self, *args, **kwargs)
+ self._greenlet = gevent.spawn(self._main_loop)
+ return self._greenlet
+
+ def shutdown(self, *args, **kwargs):
+ super().shutdown(*args, **kwargs)
+ self._greenlet.join()
+ del self._greenlet
+
+ def _create_lock(self):
+ return RLock()
+
+ def _create_default_executor(self):
+ from apscheduler.executors.gevent import GeventExecutor
+
+ return GeventExecutor()
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/schedulers/qt.py b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/qt.py
new file mode 100644
index 00000000..457b5395
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/qt.py
@@ -0,0 +1,44 @@
+from importlib import import_module
+from itertools import product
+
+from apscheduler.schedulers.base import BaseScheduler
+
+for version, pkgname in product(range(6, 1, -1), ("PySide", "PyQt")):
+ try:
+ qtcore = import_module(pkgname + str(version) + ".QtCore")
+ except ImportError:
+ pass
+ else:
+ QTimer = qtcore.QTimer
+ break
+else:
+ raise ImportError("QtScheduler requires either PySide/PyQt (v6 to v2) installed")
+
+
+class QtScheduler(BaseScheduler):
+ """A scheduler that runs in a Qt event loop."""
+
+ _timer = None
+
+ def shutdown(self, *args, **kwargs):
+ super().shutdown(*args, **kwargs)
+ self._stop_timer()
+
+ def _start_timer(self, wait_seconds):
+ self._stop_timer()
+ if wait_seconds is not None:
+ wait_time = min(int(wait_seconds * 1000), 2147483647)
+ self._timer = QTimer.singleShot(wait_time, self._process_jobs)
+
+ def _stop_timer(self):
+ if self._timer:
+ if self._timer.isActive():
+ self._timer.stop()
+ del self._timer
+
+ def wakeup(self):
+ self._start_timer(0)
+
+ def _process_jobs(self):
+ wait_seconds = super()._process_jobs()
+ self._start_timer(wait_seconds)
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/schedulers/tornado.py b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/tornado.py
new file mode 100644
index 00000000..96741b79
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/tornado.py
@@ -0,0 +1,65 @@
+from datetime import timedelta
+from functools import wraps
+
+from apscheduler.schedulers.base import BaseScheduler
+from apscheduler.util import maybe_ref
+
+try:
+ from tornado.ioloop import IOLoop
+except ImportError as exc: # pragma: nocover
+ raise ImportError("TornadoScheduler requires tornado installed") from exc
+
+
+def run_in_ioloop(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ self._ioloop.add_callback(func, self, *args, **kwargs)
+
+ return wrapper
+
+
+class TornadoScheduler(BaseScheduler):
+ """
+ A scheduler that runs on a Tornado IOLoop.
+
+ The default executor can run jobs based on native coroutines (``async def``).
+
+ =========== ===============================================================
+ ``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop)
+ =========== ===============================================================
+ """
+
+ _ioloop = None
+ _timeout = None
+
+ @run_in_ioloop
+ def shutdown(self, wait=True):
+ super().shutdown(wait)
+ self._stop_timer()
+
+ def _configure(self, config):
+ self._ioloop = maybe_ref(config.pop("io_loop", None)) or IOLoop.current()
+ super()._configure(config)
+
+ def _start_timer(self, wait_seconds):
+ self._stop_timer()
+ if wait_seconds is not None:
+ self._timeout = self._ioloop.add_timeout(
+ timedelta(seconds=wait_seconds), self.wakeup
+ )
+
+ def _stop_timer(self):
+ if self._timeout:
+ self._ioloop.remove_timeout(self._timeout)
+ del self._timeout
+
+ def _create_default_executor(self):
+ from apscheduler.executors.tornado import TornadoExecutor
+
+ return TornadoExecutor()
+
+ @run_in_ioloop
+ def wakeup(self):
+ self._stop_timer()
+ wait_seconds = self._process_jobs()
+ self._start_timer(wait_seconds)
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/schedulers/twisted.py b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/twisted.py
new file mode 100644
index 00000000..7a3622c1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/schedulers/twisted.py
@@ -0,0 +1,62 @@
+from functools import wraps
+
+from apscheduler.schedulers.base import BaseScheduler
+from apscheduler.util import maybe_ref
+
+try:
+ from twisted.internet import reactor as default_reactor
+except ImportError as exc: # pragma: nocover
+ raise ImportError("TwistedScheduler requires Twisted installed") from exc
+
+
+def run_in_reactor(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ self._reactor.callFromThread(func, self, *args, **kwargs)
+
+ return wrapper
+
+
+class TwistedScheduler(BaseScheduler):
+ """
+ A scheduler that runs on a Twisted reactor.
+
+ Extra options:
+
+ =========== ========================================================
+ ``reactor`` Reactor instance to use (defaults to the global reactor)
+ =========== ========================================================
+ """
+
+ _reactor = None
+ _delayedcall = None
+
+ def _configure(self, config):
+ self._reactor = maybe_ref(config.pop("reactor", default_reactor))
+ super()._configure(config)
+
+ @run_in_reactor
+ def shutdown(self, wait=True):
+ super().shutdown(wait)
+ self._stop_timer()
+
+ def _start_timer(self, wait_seconds):
+ self._stop_timer()
+ if wait_seconds is not None:
+ self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup)
+
+ def _stop_timer(self):
+ if self._delayedcall and self._delayedcall.active():
+ self._delayedcall.cancel()
+ del self._delayedcall
+
+ @run_in_reactor
+ def wakeup(self):
+ self._stop_timer()
+ wait_seconds = self._process_jobs()
+ self._start_timer(wait_seconds)
+
+ def _create_default_executor(self):
+ from apscheduler.executors.twisted import TwistedExecutor
+
+ return TwistedExecutor()
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/triggers/__init__.py b/.venv/lib/python3.12/site-packages/apscheduler/triggers/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/triggers/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/triggers/base.py b/.venv/lib/python3.12/site-packages/apscheduler/triggers/base.py
new file mode 100644
index 00000000..917af8ca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/triggers/base.py
@@ -0,0 +1,35 @@
+import random
+from abc import ABCMeta, abstractmethod
+from datetime import timedelta
+
+
+class BaseTrigger(metaclass=ABCMeta):
+ """Abstract base class that defines the interface that every trigger must implement."""
+
+ __slots__ = ()
+
+ @abstractmethod
+ def get_next_fire_time(self, previous_fire_time, now):
+ """
+ Returns the next datetime to fire on, If no such datetime can be calculated, returns
+ ``None``.
+
+ :param datetime.datetime previous_fire_time: the previous time the trigger was fired
+ :param datetime.datetime now: current datetime
+ """
+
+ def _apply_jitter(self, next_fire_time, jitter, now):
+ """
+ Randomize ``next_fire_time`` by adding a random value (the jitter).
+
+ :param datetime.datetime|None next_fire_time: next fire time without jitter applied. If
+ ``None``, returns ``None``.
+ :param int|None jitter: maximum number of seconds to add to ``next_fire_time``
+ (if ``None`` or ``0``, returns ``next_fire_time``)
+ :param datetime.datetime now: current datetime
+ :return datetime.datetime|None: next fire time with a jitter.
+ """
+ if next_fire_time is None or not jitter:
+ return next_fire_time
+
+ return next_fire_time + timedelta(seconds=random.uniform(0, jitter))
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/triggers/calendarinterval.py b/.venv/lib/python3.12/site-packages/apscheduler/triggers/calendarinterval.py
new file mode 100644
index 00000000..cd860489
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/triggers/calendarinterval.py
@@ -0,0 +1,186 @@
+from __future__ import annotations
+
+from datetime import date, datetime, time, timedelta, tzinfo
+from typing import Any
+
+from tzlocal import get_localzone
+
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import (
+ asdate,
+ astimezone,
+ timezone_repr,
+)
+
+
+class CalendarIntervalTrigger(BaseTrigger):
+ """
+ Runs the task on specified calendar-based intervals always at the same exact time of
+ day.
+
+ When calculating the next date, the ``years`` and ``months`` parameters are first
+ added to the previous date while keeping the day of the month constant. This is
+ repeated until the resulting date is valid. After that, the ``weeks`` and ``days``
+ parameters are added to that date. Finally, the date is combined with the given time
+ (hour, minute, second) to form the final datetime.
+
+ This means that if the ``days`` or ``weeks`` parameters are not used, the task will
+ always be executed on the same day of the month at the same wall clock time,
+ assuming the date and time are valid.
+
+ If the resulting datetime is invalid due to a daylight saving forward shift, the
+ date is discarded and the process moves on to the next date. If instead the datetime
+ is ambiguous due to a backward DST shift, the earlier of the two resulting datetimes
+ is used.
+
+ If no previous run time is specified when requesting a new run time (like when
+ starting for the first time or resuming after being paused), ``start_date`` is used
+ as a reference and the next valid datetime equal to or later than the current time
+ will be returned. Otherwise, the next valid datetime starting from the previous run
+ time is returned, even if it's in the past.
+
+ .. warning:: Be wary of setting a start date near the end of the month (29. – 31.)
+ if you have ``months`` specified in your interval, as this will skip the months
+ when those days do not exist. Likewise, setting the start date on the leap day
+ (February 29th) and having ``years`` defined may cause some years to be skipped.
+
+ Users are also discouraged from using a time inside the target timezone's DST
+ switching period (typically around 2 am) since a date could either be skipped or
+ repeated due to the specified wall clock time either occurring twice or not at
+ all.
+
+ :param years: number of years to wait
+ :param months: number of months to wait
+ :param weeks: number of weeks to wait
+ :param days: number of days to wait
+ :param hour: hour to run the task at
+ :param minute: minute to run the task at
+ :param second: second to run the task at
+ :param start_date: first date to trigger on (defaults to current date if omitted)
+ :param end_date: latest possible date to trigger on
+ :param timezone: time zone to use for calculating the next fire time (defaults
+ to scheduler timezone if created via the scheduler, otherwise the local time
+ zone)
+ :param jitter: delay the job execution by ``jitter`` seconds at most
+ """
+
+ __slots__ = (
+ "years",
+ "months",
+ "weeks",
+ "days",
+ "start_date",
+ "end_date",
+ "timezone",
+ "jitter",
+ "_time",
+ )
+
+ def __init__(
+ self,
+ *,
+ years: int = 0,
+ months: int = 0,
+ weeks: int = 0,
+ days: int = 0,
+ hour: int = 0,
+ minute: int = 0,
+ second: int = 0,
+ start_date: date | str | None = None,
+ end_date: date | str | None = None,
+ timezone: str | tzinfo | None = None,
+ jitter: int | None = None,
+ ):
+ if timezone:
+ self.timezone = astimezone(timezone)
+ else:
+ self.timezone = astimezone(get_localzone())
+
+ self.years = years
+ self.months = months
+ self.weeks = weeks
+ self.days = days
+ self.start_date = asdate(start_date) or date.today()
+ self.end_date = asdate(end_date)
+ self.jitter = jitter
+ self._time = time(hour, minute, second, tzinfo=self.timezone)
+
+ if self.years == self.months == self.weeks == self.days == 0:
+ raise ValueError("interval must be at least 1 day long")
+
+ if self.end_date and self.start_date > self.end_date:
+ raise ValueError("end_date cannot be earlier than start_date")
+
+ def get_next_fire_time(
+ self, previous_fire_time: datetime | None, now: datetime
+ ) -> datetime | None:
+ while True:
+ if previous_fire_time:
+ year, month = previous_fire_time.year, previous_fire_time.month
+ while True:
+ month += self.months
+ year += self.years + (month - 1) // 12
+ month = (month - 1) % 12 + 1
+ try:
+ next_date = date(year, month, previous_fire_time.day)
+ except ValueError:
+ pass # Nonexistent date
+ else:
+ next_date += timedelta(self.days + self.weeks * 7)
+ break
+ else:
+ next_date = self.start_date
+
+ # Don't return any date past end_date
+ if self.end_date and next_date > self.end_date:
+ return None
+
+ # Combine the date with the designated time and normalize the result
+ timestamp = datetime.combine(next_date, self._time).timestamp()
+ next_time = datetime.fromtimestamp(timestamp, self.timezone)
+
+ # Check if the time is off due to normalization and a forward DST shift
+ if next_time.timetz() != self._time:
+ previous_fire_time = next_time.date()
+ else:
+ return self._apply_jitter(next_time, self.jitter, now)
+
+ def __getstate__(self) -> dict[str, Any]:
+ return {
+ "version": 1,
+ "interval": [self.years, self.months, self.weeks, self.days],
+ "time": [self._time.hour, self._time.minute, self._time.second],
+ "start_date": self.start_date,
+ "end_date": self.end_date,
+ "timezone": self.timezone,
+ "jitter": self.jitter,
+ }
+
+ def __setstate__(self, state: dict[str, Any]) -> None:
+ if state.get("version", 1) > 1:
+ raise ValueError(
+ f"Got serialized data for version {state['version']} of "
+ f"{self.__class__.__name__}, but only versions up to 1 can be handled"
+ )
+
+ self.years, self.months, self.weeks, self.days = state["interval"]
+ self.start_date = state["start_date"]
+ self.end_date = state["end_date"]
+ self.timezone = state["timezone"]
+ self.jitter = state["jitter"]
+ self._time = time(*state["time"], tzinfo=self.timezone)
+
+ def __repr__(self) -> str:
+ fields = []
+ for field in "years", "months", "weeks", "days":
+ value = getattr(self, field)
+ if value > 0:
+ fields.append(f"{field}={value}")
+
+ fields.append(f"time={self._time.isoformat()!r}")
+ fields.append(f"start_date='{self.start_date}'")
+ if self.end_date:
+ fields.append(f"end_date='{self.end_date}'")
+
+ fields.append(f"timezone={timezone_repr(self.timezone)!r}")
+ return f'{self.__class__.__name__}({", ".join(fields)})'
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/triggers/combining.py b/.venv/lib/python3.12/site-packages/apscheduler/triggers/combining.py
new file mode 100644
index 00000000..653f9b57
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/triggers/combining.py
@@ -0,0 +1,114 @@
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import obj_to_ref, ref_to_obj
+
+
+class BaseCombiningTrigger(BaseTrigger):
+ __slots__ = ("triggers", "jitter")
+
+ def __init__(self, triggers, jitter=None):
+ self.triggers = triggers
+ self.jitter = jitter
+
+ def __getstate__(self):
+ return {
+ "version": 1,
+ "triggers": [
+ (obj_to_ref(trigger.__class__), trigger.__getstate__())
+ for trigger in self.triggers
+ ],
+ "jitter": self.jitter,
+ }
+
+ def __setstate__(self, state):
+ if state.get("version", 1) > 1:
+ raise ValueError(
+ f"Got serialized data for version {state['version']} of "
+ f"{self.__class__.__name__}, but only versions up to 1 can be handled"
+ )
+
+ self.jitter = state["jitter"]
+ self.triggers = []
+ for clsref, state in state["triggers"]:
+ cls = ref_to_obj(clsref)
+ trigger = cls.__new__(cls)
+ trigger.__setstate__(state)
+ self.triggers.append(trigger)
+
+ def __repr__(self):
+ return "<{}({}{})>".format(
+ self.__class__.__name__,
+ self.triggers,
+ f", jitter={self.jitter}" if self.jitter else "",
+ )
+
+
+class AndTrigger(BaseCombiningTrigger):
+ """
+ Always returns the earliest next fire time that all the given triggers can agree on.
+ The trigger is considered to be finished when any of the given triggers has finished its
+ schedule.
+
+ Trigger alias: ``and``
+
+ .. warning:: This trigger should only be used to combine triggers that fire on
+ specific times of day, such as
+ :class:`~apscheduler.triggers.cron.CronTrigger` and
+ class:`~apscheduler.triggers.calendarinterval.CalendarIntervalTrigger`.
+ Attempting to use it with
+ :class:`~apscheduler.triggers.interval.IntervalTrigger` will likely result in
+ the scheduler hanging as it tries to find a fire time that matches exactly
+ between fire times produced by all the given triggers.
+
+ :param list triggers: triggers to combine
+ :param int|None jitter: delay the job execution by ``jitter`` seconds at most
+ """
+
+ __slots__ = ()
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ while True:
+ fire_times = [
+ trigger.get_next_fire_time(previous_fire_time, now)
+ for trigger in self.triggers
+ ]
+ if None in fire_times:
+ return None
+ elif min(fire_times) == max(fire_times):
+ return self._apply_jitter(fire_times[0], self.jitter, now)
+ else:
+ now = max(fire_times)
+
+ def __str__(self):
+ return "and[{}]".format(", ".join(str(trigger) for trigger in self.triggers))
+
+
+class OrTrigger(BaseCombiningTrigger):
+ """
+ Always returns the earliest next fire time produced by any of the given triggers.
+ The trigger is considered finished when all the given triggers have finished their schedules.
+
+ Trigger alias: ``or``
+
+ :param list triggers: triggers to combine
+ :param int|None jitter: delay the job execution by ``jitter`` seconds at most
+
+ .. note:: Triggers that depends on the previous fire time, such as the interval trigger, may
+ seem to behave strangely since they are always passed the previous fire time produced by
+ any of the given triggers.
+ """
+
+ __slots__ = ()
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ fire_times = [
+ trigger.get_next_fire_time(previous_fire_time, now)
+ for trigger in self.triggers
+ ]
+ fire_times = [fire_time for fire_time in fire_times if fire_time is not None]
+ if fire_times:
+ return self._apply_jitter(min(fire_times), self.jitter, now)
+ else:
+ return None
+
+ def __str__(self):
+ return "or[{}]".format(", ".join(str(trigger) for trigger in self.triggers))
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/__init__.py b/.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/__init__.py
new file mode 100644
index 00000000..03be8196
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/__init__.py
@@ -0,0 +1,289 @@
+from datetime import datetime, timedelta
+
+from tzlocal import get_localzone
+
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.triggers.cron.fields import (
+ DEFAULT_VALUES,
+ BaseField,
+ DayOfMonthField,
+ DayOfWeekField,
+ MonthField,
+ WeekField,
+)
+from apscheduler.util import (
+ astimezone,
+ convert_to_datetime,
+ datetime_ceil,
+ datetime_repr,
+)
+
+
+class CronTrigger(BaseTrigger):
+ """
+ Triggers when current time matches all specified time constraints,
+ similarly to how the UNIX cron scheduler works.
+
+ :param int|str year: 4-digit year
+ :param int|str month: month (1-12)
+ :param int|str day: day of month (1-31)
+ :param int|str week: ISO week (1-53)
+ :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
+ :param int|str hour: hour (0-23)
+ :param int|str minute: minute (0-59)
+ :param int|str second: second (0-59)
+ :param datetime|str start_date: earliest possible date/time to trigger on (inclusive)
+ :param datetime|str end_date: latest possible date/time to trigger on (inclusive)
+ :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults
+ to scheduler timezone)
+ :param int|None jitter: delay the job execution by ``jitter`` seconds at most
+
+ .. note:: The first weekday is always **monday**.
+ """
+
+ FIELD_NAMES = (
+ "year",
+ "month",
+ "day",
+ "week",
+ "day_of_week",
+ "hour",
+ "minute",
+ "second",
+ )
+ FIELDS_MAP = {
+ "year": BaseField,
+ "month": MonthField,
+ "week": WeekField,
+ "day": DayOfMonthField,
+ "day_of_week": DayOfWeekField,
+ "hour": BaseField,
+ "minute": BaseField,
+ "second": BaseField,
+ }
+
+ __slots__ = "timezone", "start_date", "end_date", "fields", "jitter"
+
+ def __init__(
+ self,
+ year=None,
+ month=None,
+ day=None,
+ week=None,
+ day_of_week=None,
+ hour=None,
+ minute=None,
+ second=None,
+ start_date=None,
+ end_date=None,
+ timezone=None,
+ jitter=None,
+ ):
+ if timezone:
+ self.timezone = astimezone(timezone)
+ elif isinstance(start_date, datetime) and start_date.tzinfo:
+ self.timezone = astimezone(start_date.tzinfo)
+ elif isinstance(end_date, datetime) and end_date.tzinfo:
+ self.timezone = astimezone(end_date.tzinfo)
+ else:
+ self.timezone = get_localzone()
+
+ self.start_date = convert_to_datetime(start_date, self.timezone, "start_date")
+ self.end_date = convert_to_datetime(end_date, self.timezone, "end_date")
+
+ self.jitter = jitter
+
+ values = dict(
+ (key, value)
+ for (key, value) in locals().items()
+ if key in self.FIELD_NAMES and value is not None
+ )
+ self.fields = []
+ assign_defaults = False
+ for field_name in self.FIELD_NAMES:
+ if field_name in values:
+ exprs = values.pop(field_name)
+ is_default = False
+ assign_defaults = not values
+ elif assign_defaults:
+ exprs = DEFAULT_VALUES[field_name]
+ is_default = True
+ else:
+ exprs = "*"
+ is_default = True
+
+ field_class = self.FIELDS_MAP[field_name]
+ field = field_class(field_name, exprs, is_default)
+ self.fields.append(field)
+
+ @classmethod
+ def from_crontab(cls, expr, timezone=None):
+ """
+ Create a :class:`~CronTrigger` from a standard crontab expression.
+
+ See https://en.wikipedia.org/wiki/Cron for more information on the format accepted here.
+
+ :param expr: minute, hour, day of month, month, day of week
+ :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (
+ defaults to scheduler timezone)
+ :return: a :class:`~CronTrigger` instance
+
+ """
+ values = expr.split()
+ if len(values) != 5:
+ raise ValueError(f"Wrong number of fields; got {len(values)}, expected 5")
+
+ return cls(
+ minute=values[0],
+ hour=values[1],
+ day=values[2],
+ month=values[3],
+ day_of_week=values[4],
+ timezone=timezone,
+ )
+
+ def _increment_field_value(self, dateval, fieldnum):
+ """
+ Increments the designated field and resets all less significant fields to their minimum
+ values.
+
+ :type dateval: datetime
+ :type fieldnum: int
+ :return: a tuple containing the new date, and the number of the field that was actually
+ incremented
+ :rtype: tuple
+ """
+
+ values = {}
+ i = 0
+ while i < len(self.fields):
+ field = self.fields[i]
+ if not field.REAL:
+ if i == fieldnum:
+ fieldnum -= 1
+ i -= 1
+ else:
+ i += 1
+ continue
+
+ if i < fieldnum:
+ values[field.name] = field.get_value(dateval)
+ i += 1
+ elif i > fieldnum:
+ values[field.name] = field.get_min(dateval)
+ i += 1
+ else:
+ value = field.get_value(dateval)
+ maxval = field.get_max(dateval)
+ if value == maxval:
+ fieldnum -= 1
+ i -= 1
+ else:
+ values[field.name] = value + 1
+ i += 1
+
+ difference = datetime(**values) - dateval.replace(tzinfo=None)
+ dateval = datetime.fromtimestamp(
+ dateval.timestamp() + difference.total_seconds(), self.timezone
+ )
+ return dateval, fieldnum
+
+ def _set_field_value(self, dateval, fieldnum, new_value):
+ values = {}
+ for i, field in enumerate(self.fields):
+ if field.REAL:
+ if i < fieldnum:
+ values[field.name] = field.get_value(dateval)
+ elif i > fieldnum:
+ values[field.name] = field.get_min(dateval)
+ else:
+ values[field.name] = new_value
+
+ return datetime(**values, tzinfo=self.timezone, fold=dateval.fold)
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ if previous_fire_time:
+ start_date = min(now, previous_fire_time + timedelta(microseconds=1))
+ if start_date == previous_fire_time:
+ start_date += timedelta(microseconds=1)
+ else:
+ start_date = max(now, self.start_date) if self.start_date else now
+
+ fieldnum = 0
+ next_date = datetime_ceil(start_date).astimezone(self.timezone)
+ while 0 <= fieldnum < len(self.fields):
+ field = self.fields[fieldnum]
+ curr_value = field.get_value(next_date)
+ next_value = field.get_next_value(next_date)
+
+ if next_value is None:
+ # No valid value was found
+ next_date, fieldnum = self._increment_field_value(
+ next_date, fieldnum - 1
+ )
+ elif next_value > curr_value:
+ # A valid, but higher than the starting value, was found
+ if field.REAL:
+ next_date = self._set_field_value(next_date, fieldnum, next_value)
+ fieldnum += 1
+ else:
+ next_date, fieldnum = self._increment_field_value(
+ next_date, fieldnum
+ )
+ else:
+ # A valid value was found, no changes necessary
+ fieldnum += 1
+
+ # Return if the date has rolled past the end date
+ if self.end_date and next_date > self.end_date:
+ return None
+
+ if fieldnum >= 0:
+ next_date = self._apply_jitter(next_date, self.jitter, now)
+ return min(next_date, self.end_date) if self.end_date else next_date
+
+ def __getstate__(self):
+ return {
+ "version": 2,
+ "timezone": self.timezone,
+ "start_date": self.start_date,
+ "end_date": self.end_date,
+ "fields": self.fields,
+ "jitter": self.jitter,
+ }
+
+ def __setstate__(self, state):
+ # This is for compatibility with APScheduler 3.0.x
+ if isinstance(state, tuple):
+ state = state[1]
+
+ if state.get("version", 1) > 2:
+ raise ValueError(
+ f"Got serialized data for version {state['version']} of "
+ f"{self.__class__.__name__}, but only versions up to 2 can be handled"
+ )
+
+ self.timezone = astimezone(state["timezone"])
+ self.start_date = state["start_date"]
+ self.end_date = state["end_date"]
+ self.fields = state["fields"]
+ self.jitter = state.get("jitter")
+
+ def __str__(self):
+ options = [f"{f.name}='{f}'" for f in self.fields if not f.is_default]
+ return "cron[{}]".format(", ".join(options))
+
+ def __repr__(self):
+ options = [f"{f.name}='{f}'" for f in self.fields if not f.is_default]
+ if self.start_date:
+ options.append(f"start_date={datetime_repr(self.start_date)!r}")
+ if self.end_date:
+ options.append(f"end_date={datetime_repr(self.end_date)!r}")
+ if self.jitter:
+ options.append(f"jitter={self.jitter}")
+
+ return "<{} ({}, timezone='{}')>".format(
+ self.__class__.__name__,
+ ", ".join(options),
+ self.timezone,
+ )
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/expressions.py b/.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/expressions.py
new file mode 100644
index 00000000..0d84ec23
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/expressions.py
@@ -0,0 +1,285 @@
+"""This module contains the expressions applicable for CronTrigger's fields."""
+
+__all__ = (
+ "AllExpression",
+ "RangeExpression",
+ "WeekdayRangeExpression",
+ "WeekdayPositionExpression",
+ "LastDayOfMonthExpression",
+)
+
+import re
+from calendar import monthrange
+
+from apscheduler.util import asint
+
+WEEKDAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
+MONTHS = [
+ "jan",
+ "feb",
+ "mar",
+ "apr",
+ "may",
+ "jun",
+ "jul",
+ "aug",
+ "sep",
+ "oct",
+ "nov",
+ "dec",
+]
+
+
+class AllExpression:
+ value_re = re.compile(r"\*(?:/(?P<step>\d+))?$")
+
+ def __init__(self, step=None):
+ self.step = asint(step)
+ if self.step == 0:
+ raise ValueError("Increment must be higher than 0")
+
+ def validate_range(self, field_name):
+ from apscheduler.triggers.cron.fields import MAX_VALUES, MIN_VALUES
+
+ value_range = MAX_VALUES[field_name] - MIN_VALUES[field_name]
+ if self.step and self.step > value_range:
+ raise ValueError(
+ f"the step value ({self.step}) is higher than the total range of the "
+ f"expression ({value_range})"
+ )
+
+ def get_next_value(self, date, field):
+ start = field.get_value(date)
+ minval = field.get_min(date)
+ maxval = field.get_max(date)
+ start = max(start, minval)
+
+ if not self.step:
+ next = start
+ else:
+ distance_to_next = (self.step - (start - minval)) % self.step
+ next = start + distance_to_next
+
+ if next <= maxval:
+ return next
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.step == other.step
+
+ def __str__(self):
+ if self.step:
+ return "*/%d" % self.step
+ return "*"
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}({self.step})"
+
+
+class RangeExpression(AllExpression):
+ value_re = re.compile(r"(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$")
+
+ def __init__(self, first, last=None, step=None):
+ super().__init__(step)
+ first = asint(first)
+ last = asint(last)
+ if last is None and step is None:
+ last = first
+ if last is not None and first > last:
+ raise ValueError(
+ "The minimum value in a range must not be higher than the maximum"
+ )
+ self.first = first
+ self.last = last
+
+ def validate_range(self, field_name):
+ from apscheduler.triggers.cron.fields import MAX_VALUES, MIN_VALUES
+
+ super().validate_range(field_name)
+ if self.first < MIN_VALUES[field_name]:
+ raise ValueError(
+ f"the first value ({self.first}) is lower than the minimum value ({MIN_VALUES[field_name]})"
+ )
+ if self.last is not None and self.last > MAX_VALUES[field_name]:
+ raise ValueError(
+ f"the last value ({self.last}) is higher than the maximum value ({MAX_VALUES[field_name]})"
+ )
+ value_range = (self.last or MAX_VALUES[field_name]) - self.first
+ if self.step and self.step > value_range:
+ raise ValueError(
+ f"the step value ({self.step}) is higher than the total range of the "
+ f"expression ({value_range})"
+ )
+
+ def get_next_value(self, date, field):
+ startval = field.get_value(date)
+ minval = field.get_min(date)
+ maxval = field.get_max(date)
+
+ # Apply range limits
+ minval = max(minval, self.first)
+ maxval = min(maxval, self.last) if self.last is not None else maxval
+ nextval = max(minval, startval)
+
+ # Apply the step if defined
+ if self.step:
+ distance_to_next = (self.step - (nextval - minval)) % self.step
+ nextval += distance_to_next
+
+ return nextval if nextval <= maxval else None
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, self.__class__)
+ and self.first == other.first
+ and self.last == other.last
+ )
+
+ def __str__(self):
+ if self.last != self.first and self.last is not None:
+ range = "%d-%d" % (self.first, self.last)
+ else:
+ range = str(self.first)
+
+ if self.step:
+ return "%s/%d" % (range, self.step)
+
+ return range
+
+ def __repr__(self):
+ args = [str(self.first)]
+ if self.last != self.first and self.last is not None or self.step:
+ args.append(str(self.last))
+
+ if self.step:
+ args.append(str(self.step))
+
+ return "{}({})".format(self.__class__.__name__, ", ".join(args))
+
+
+class MonthRangeExpression(RangeExpression):
+ value_re = re.compile(r"(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?", re.IGNORECASE)
+
+ def __init__(self, first, last=None):
+ try:
+ first_num = MONTHS.index(first.lower()) + 1
+ except ValueError:
+ raise ValueError(f'Invalid month name "{first}"')
+
+ if last:
+ try:
+ last_num = MONTHS.index(last.lower()) + 1
+ except ValueError:
+ raise ValueError(f'Invalid month name "{last}"')
+ else:
+ last_num = None
+
+ super().__init__(first_num, last_num)
+
+ def __str__(self):
+ if self.last != self.first and self.last is not None:
+ return f"{MONTHS[self.first - 1]}-{MONTHS[self.last - 1]}"
+ return MONTHS[self.first - 1]
+
+ def __repr__(self):
+ args = [f"'{MONTHS[self.first]}'"]
+ if self.last != self.first and self.last is not None:
+ args.append(f"'{MONTHS[self.last - 1]}'")
+ return "{}({})".format(self.__class__.__name__, ", ".join(args))
+
+
+class WeekdayRangeExpression(RangeExpression):
+ value_re = re.compile(r"(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?", re.IGNORECASE)
+
+ def __init__(self, first, last=None):
+ try:
+ first_num = WEEKDAYS.index(first.lower())
+ except ValueError:
+ raise ValueError(f'Invalid weekday name "{first}"')
+
+ if last:
+ try:
+ last_num = WEEKDAYS.index(last.lower())
+ except ValueError:
+ raise ValueError(f'Invalid weekday name "{last}"')
+ else:
+ last_num = None
+
+ super().__init__(first_num, last_num)
+
+ def __str__(self):
+ if self.last != self.first and self.last is not None:
+ return f"{WEEKDAYS[self.first]}-{WEEKDAYS[self.last]}"
+ return WEEKDAYS[self.first]
+
+ def __repr__(self):
+ args = [f"'{WEEKDAYS[self.first]}'"]
+ if self.last != self.first and self.last is not None:
+ args.append(f"'{WEEKDAYS[self.last]}'")
+ return "{}({})".format(self.__class__.__name__, ", ".join(args))
+
+
+class WeekdayPositionExpression(AllExpression):
+ options = ["1st", "2nd", "3rd", "4th", "5th", "last"]
+ value_re = re.compile(
+ r"(?P<option_name>{}) +(?P<weekday_name>(?:\d+|\w+))".format("|".join(options)),
+ re.IGNORECASE,
+ )
+
+ def __init__(self, option_name, weekday_name):
+ super().__init__(None)
+ try:
+ self.option_num = self.options.index(option_name.lower())
+ except ValueError:
+ raise ValueError(f'Invalid weekday position "{option_name}"')
+
+ try:
+ self.weekday = WEEKDAYS.index(weekday_name.lower())
+ except ValueError:
+ raise ValueError(f'Invalid weekday name "{weekday_name}"')
+
+ def get_next_value(self, date, field):
+ # Figure out the weekday of the month's first day and the number of days in that month
+ first_day_wday, last_day = monthrange(date.year, date.month)
+
+ # Calculate which day of the month is the first of the target weekdays
+ first_hit_day = self.weekday - first_day_wday + 1
+ if first_hit_day <= 0:
+ first_hit_day += 7
+
+ # Calculate what day of the month the target weekday would be
+ if self.option_num < 5:
+ target_day = first_hit_day + self.option_num * 7
+ else:
+ target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7
+
+ if target_day <= last_day and target_day >= date.day:
+ return target_day
+
+ def __eq__(self, other):
+ return (
+ super().__eq__(other)
+ and self.option_num == other.option_num
+ and self.weekday == other.weekday
+ )
+
+ def __str__(self):
+ return f"{self.options[self.option_num]} {WEEKDAYS[self.weekday]}"
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}('{self.options[self.option_num]}', '{WEEKDAYS[self.weekday]}')"
+
+
+class LastDayOfMonthExpression(AllExpression):
+ value_re = re.compile(r"last", re.IGNORECASE)
+
+ def __init__(self):
+ super().__init__(None)
+
+ def get_next_value(self, date, field):
+ return monthrange(date.year, date.month)[1]
+
+ def __str__(self):
+ return "last"
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}()"
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/fields.py b/.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/fields.py
new file mode 100644
index 00000000..4c081797
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/triggers/cron/fields.py
@@ -0,0 +1,149 @@
+"""Fields represent CronTrigger options which map to :class:`~datetime.datetime` fields."""
+
+__all__ = (
+ "MIN_VALUES",
+ "MAX_VALUES",
+ "DEFAULT_VALUES",
+ "BaseField",
+ "WeekField",
+ "DayOfMonthField",
+ "DayOfWeekField",
+)
+
+import re
+from calendar import monthrange
+
+from apscheduler.triggers.cron.expressions import (
+ AllExpression,
+ LastDayOfMonthExpression,
+ MonthRangeExpression,
+ RangeExpression,
+ WeekdayPositionExpression,
+ WeekdayRangeExpression,
+)
+
+MIN_VALUES = {
+ "year": 1970,
+ "month": 1,
+ "day": 1,
+ "week": 1,
+ "day_of_week": 0,
+ "hour": 0,
+ "minute": 0,
+ "second": 0,
+}
+MAX_VALUES = {
+ "year": 9999,
+ "month": 12,
+ "day": 31,
+ "week": 53,
+ "day_of_week": 6,
+ "hour": 23,
+ "minute": 59,
+ "second": 59,
+}
+DEFAULT_VALUES = {
+ "year": "*",
+ "month": 1,
+ "day": 1,
+ "week": "*",
+ "day_of_week": "*",
+ "hour": 0,
+ "minute": 0,
+ "second": 0,
+}
+SEPARATOR = re.compile(" *, *")
+
+
+class BaseField:
+ REAL = True
+ COMPILERS = [AllExpression, RangeExpression]
+
+ def __init__(self, name, exprs, is_default=False):
+ self.name = name
+ self.is_default = is_default
+ self.compile_expressions(exprs)
+
+ def get_min(self, dateval):
+ return MIN_VALUES[self.name]
+
+ def get_max(self, dateval):
+ return MAX_VALUES[self.name]
+
+ def get_value(self, dateval):
+ return getattr(dateval, self.name)
+
+ def get_next_value(self, dateval):
+ smallest = None
+ for expr in self.expressions:
+ value = expr.get_next_value(dateval, self)
+ if smallest is None or (value is not None and value < smallest):
+ smallest = value
+
+ return smallest
+
+ def compile_expressions(self, exprs):
+ self.expressions = []
+
+ # Split a comma-separated expression list, if any
+ for expr in SEPARATOR.split(str(exprs).strip()):
+ self.compile_expression(expr)
+
+ def compile_expression(self, expr):
+ for compiler in self.COMPILERS:
+ match = compiler.value_re.match(expr)
+ if match:
+ compiled_expr = compiler(**match.groupdict())
+
+ try:
+ compiled_expr.validate_range(self.name)
+ except ValueError as e:
+ raise ValueError(
+ f"Error validating expression {expr!r}: {e}"
+ ) from None
+
+ self.expressions.append(compiled_expr)
+ return
+
+ raise ValueError(f'Unrecognized expression "{expr}" for field "{self.name}"')
+
+ def __eq__(self, other):
+ return (
+ isinstance(self, self.__class__) and self.expressions == other.expressions
+ )
+
+ def __str__(self):
+ expr_strings = (str(e) for e in self.expressions)
+ return ",".join(expr_strings)
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}('{self.name}', '{self}')"
+
+
+class WeekField(BaseField):
+ REAL = False
+
+ def get_value(self, dateval):
+ return dateval.isocalendar()[1]
+
+
+class DayOfMonthField(BaseField):
+ COMPILERS = BaseField.COMPILERS + [
+ WeekdayPositionExpression,
+ LastDayOfMonthExpression,
+ ]
+
+ def get_max(self, dateval):
+ return monthrange(dateval.year, dateval.month)[1]
+
+
+class DayOfWeekField(BaseField):
+ REAL = False
+ COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
+
+ def get_value(self, dateval):
+ return dateval.weekday()
+
+
+class MonthField(BaseField):
+ COMPILERS = BaseField.COMPILERS + [MonthRangeExpression]
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/triggers/date.py b/.venv/lib/python3.12/site-packages/apscheduler/triggers/date.py
new file mode 100644
index 00000000..a9302da5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/triggers/date.py
@@ -0,0 +1,51 @@
+from datetime import datetime
+
+from tzlocal import get_localzone
+
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import astimezone, convert_to_datetime, datetime_repr
+
+
+class DateTrigger(BaseTrigger):
+ """
+ Triggers once on the given datetime. If ``run_date`` is left empty, current time is used.
+
+ :param datetime|str run_date: the date/time to run the job at
+ :param datetime.tzinfo|str timezone: time zone for ``run_date`` if it doesn't have one already
+ """
+
+ __slots__ = "run_date"
+
+ def __init__(self, run_date=None, timezone=None):
+ timezone = astimezone(timezone) or get_localzone()
+ if run_date is not None:
+ self.run_date = convert_to_datetime(run_date, timezone, "run_date")
+ else:
+ self.run_date = datetime.now(timezone)
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ return self.run_date if previous_fire_time is None else None
+
+ def __getstate__(self):
+ return {"version": 1, "run_date": self.run_date}
+
+ def __setstate__(self, state):
+ # This is for compatibility with APScheduler 3.0.x
+ if isinstance(state, tuple):
+ state = state[1]
+
+ if state.get("version", 1) > 1:
+ raise ValueError(
+ f"Got serialized data for version {state['version']} of "
+ f"{self.__class__.__name__}, but only version 1 can be handled"
+ )
+
+ self.run_date = state["run_date"]
+
+ def __str__(self):
+ return f"date[{datetime_repr(self.run_date)}]"
+
+ def __repr__(self):
+ return (
+ f"<{self.__class__.__name__} (run_date='{datetime_repr(self.run_date)}')>"
+ )
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/triggers/interval.py b/.venv/lib/python3.12/site-packages/apscheduler/triggers/interval.py
new file mode 100644
index 00000000..9264c4ac
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/triggers/interval.py
@@ -0,0 +1,138 @@
+import random
+from datetime import datetime, timedelta
+from math import ceil
+
+from tzlocal import get_localzone
+
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import (
+ astimezone,
+ convert_to_datetime,
+ datetime_repr,
+)
+
+
+class IntervalTrigger(BaseTrigger):
+ """
+ Triggers on specified intervals, starting on ``start_date`` if specified, ``datetime.now()`` +
+ interval otherwise.
+
+ :param int weeks: number of weeks to wait
+ :param int days: number of days to wait
+ :param int hours: number of hours to wait
+ :param int minutes: number of minutes to wait
+ :param int seconds: number of seconds to wait
+ :param datetime|str start_date: starting point for the interval calculation
+ :param datetime|str end_date: latest possible date/time to trigger on
+ :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations
+ :param int|None jitter: delay the job execution by ``jitter`` seconds at most
+ """
+
+ __slots__ = (
+ "timezone",
+ "start_date",
+ "end_date",
+ "interval",
+ "interval_length",
+ "jitter",
+ )
+
+ def __init__(
+ self,
+ weeks=0,
+ days=0,
+ hours=0,
+ minutes=0,
+ seconds=0,
+ start_date=None,
+ end_date=None,
+ timezone=None,
+ jitter=None,
+ ):
+ self.interval = timedelta(
+ weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds
+ )
+ self.interval_length = self.interval.total_seconds()
+ if self.interval_length == 0:
+ self.interval = timedelta(seconds=1)
+ self.interval_length = 1
+
+ if timezone:
+ self.timezone = astimezone(timezone)
+ elif isinstance(start_date, datetime) and start_date.tzinfo:
+ self.timezone = astimezone(start_date.tzinfo)
+ elif isinstance(end_date, datetime) and end_date.tzinfo:
+ self.timezone = astimezone(end_date.tzinfo)
+ else:
+ self.timezone = get_localzone()
+
+ start_date = start_date or (datetime.now(self.timezone) + self.interval)
+ self.start_date = convert_to_datetime(start_date, self.timezone, "start_date")
+ self.end_date = convert_to_datetime(end_date, self.timezone, "end_date")
+
+ self.jitter = jitter
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ if previous_fire_time:
+ next_fire_time = previous_fire_time.timestamp() + self.interval_length
+ elif self.start_date > now:
+ next_fire_time = self.start_date.timestamp()
+ else:
+ timediff = now.timestamp() - self.start_date.timestamp()
+ next_interval_num = int(ceil(timediff / self.interval_length))
+ next_fire_time = (
+ self.start_date.timestamp() + self.interval_length * next_interval_num
+ )
+
+ if self.jitter is not None:
+ next_fire_time += random.uniform(0, self.jitter)
+
+ if not self.end_date or next_fire_time <= self.end_date.timestamp():
+ return datetime.fromtimestamp(next_fire_time, tz=self.timezone)
+
+ def __getstate__(self):
+ return {
+ "version": 2,
+ "timezone": astimezone(self.timezone),
+ "start_date": self.start_date,
+ "end_date": self.end_date,
+ "interval": self.interval,
+ "jitter": self.jitter,
+ }
+
+ def __setstate__(self, state):
+ # This is for compatibility with APScheduler 3.0.x
+ if isinstance(state, tuple):
+ state = state[1]
+
+ if state.get("version", 1) > 2:
+ raise ValueError(
+ f"Got serialized data for version {state['version']} of "
+ f"{self.__class__.__name__}, but only versions up to 2 can be handled"
+ )
+
+ self.timezone = state["timezone"]
+ self.start_date = state["start_date"]
+ self.end_date = state["end_date"]
+ self.interval = state["interval"]
+ self.interval_length = self.interval.total_seconds()
+ self.jitter = state.get("jitter")
+
+ def __str__(self):
+ return f"interval[{self.interval!s}]"
+
+ def __repr__(self):
+ options = [
+ f"interval={self.interval!r}",
+ f"start_date={datetime_repr(self.start_date)!r}",
+ ]
+ if self.end_date:
+ options.append(f"end_date={datetime_repr(self.end_date)!r}")
+ if self.jitter:
+ options.append(f"jitter={self.jitter}")
+
+ return "<{} ({}, timezone='{}')>".format(
+ self.__class__.__name__,
+ ", ".join(options),
+ self.timezone,
+ )
diff --git a/.venv/lib/python3.12/site-packages/apscheduler/util.py b/.venv/lib/python3.12/site-packages/apscheduler/util.py
new file mode 100644
index 00000000..82eb8c07
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/apscheduler/util.py
@@ -0,0 +1,461 @@
+"""This module contains several handy functions primarily meant for internal use."""
+
+__all__ = (
+ "asint",
+ "asbool",
+ "astimezone",
+ "convert_to_datetime",
+ "datetime_to_utc_timestamp",
+ "utc_timestamp_to_datetime",
+ "datetime_ceil",
+ "get_callable_name",
+ "obj_to_ref",
+ "ref_to_obj",
+ "maybe_ref",
+ "check_callable_args",
+ "normalize",
+ "localize",
+ "undefined",
+)
+
+import re
+import sys
+from calendar import timegm
+from datetime import date, datetime, time, timedelta, timezone, tzinfo
+from functools import partial
+from inspect import isbuiltin, isclass, isfunction, ismethod, signature
+
+if sys.version_info < (3, 14):
+ from asyncio import iscoroutinefunction
+else:
+ from inspect import iscoroutinefunction
+
+if sys.version_info < (3, 9):
+ from backports.zoneinfo import ZoneInfo
+else:
+ from zoneinfo import ZoneInfo
+
+
+class _Undefined:
+ def __nonzero__(self):
+ return False
+
+ def __bool__(self):
+ return False
+
+ def __repr__(self):
+ return "<undefined>"
+
+
+undefined = (
+ _Undefined()
+) #: a unique object that only signifies that no value is defined
+
+
+def asint(text):
+ """
+ Safely converts a string to an integer, returning ``None`` if the string is ``None``.
+
+ :type text: str
+ :rtype: int
+
+ """
+ if text is not None:
+ return int(text)
+
+
+def asbool(obj):
+ """
+ Interprets an object as a boolean value.
+
+ :rtype: bool
+
+ """
+ if isinstance(obj, str):
+ obj = obj.strip().lower()
+ if obj in ("true", "yes", "on", "y", "t", "1"):
+ return True
+
+ if obj in ("false", "no", "off", "n", "f", "0"):
+ return False
+
+ raise ValueError(f'Unable to interpret value "{obj}" as boolean')
+
+ return bool(obj)
+
+
+def astimezone(obj):
+ """
+ Interprets an object as a timezone.
+
+ :rtype: tzinfo
+
+ """
+ if isinstance(obj, str):
+ if obj == "UTC":
+ return timezone.utc
+
+ return ZoneInfo(obj)
+
+ if isinstance(obj, tzinfo):
+ if obj.tzname(None) == "local":
+ raise ValueError(
+ "Unable to determine the name of the local timezone -- you must "
+ "explicitly specify the name of the local timezone. Please refrain "
+ "from using timezones like EST to prevent problems with daylight "
+ "saving time. Instead, use a locale based timezone name (such as "
+ "Europe/Helsinki)."
+ )
+ elif isinstance(obj, ZoneInfo):
+ return obj
+ elif hasattr(obj, "zone"):
+ # pytz timezones
+ if obj.zone:
+ return ZoneInfo(obj.zone)
+
+ return timezone(obj._offset)
+
+ return obj
+
+ if obj is not None:
+ raise TypeError(f"Expected tzinfo, got {obj.__class__.__name__} instead")
+
+
+def asdate(obj):
+ if isinstance(obj, str):
+ return date.fromisoformat(obj)
+
+ return obj
+
+
+_DATE_REGEX = re.compile(
+ r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
+ r"(?:[ T](?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})"
+ r"(?:\.(?P<microsecond>\d{1,6}))?"
+ r"(?P<timezone>Z|[+-]\d\d:\d\d)?)?$"
+)
+
+
+def convert_to_datetime(input, tz, arg_name):
+ """
+ Converts the given object to a timezone aware datetime object.
+
+ If a timezone aware datetime object is passed, it is returned unmodified.
+ If a native datetime object is passed, it is given the specified timezone.
+ If the input is a string, it is parsed as a datetime with the given timezone.
+
+ Date strings are accepted in three different forms: date only (Y-m-d), date with
+ time (Y-m-d H:M:S) or with date+time with microseconds (Y-m-d H:M:S.micro).
+ Additionally you can override the time zone by giving a specific offset in the
+ format specified by ISO 8601: Z (UTC), +HH:MM or -HH:MM.
+
+ :param str|datetime input: the datetime or string to convert to a timezone aware
+ datetime
+ :param datetime.tzinfo tz: timezone to interpret ``input`` in
+ :param str arg_name: the name of the argument (used in an error message)
+ :rtype: datetime
+
+ """
+ if input is None:
+ return
+ elif isinstance(input, datetime):
+ datetime_ = input
+ elif isinstance(input, date):
+ datetime_ = datetime.combine(input, time())
+ elif isinstance(input, str):
+ m = _DATE_REGEX.match(input)
+ if not m:
+ raise ValueError("Invalid date string")
+
+ values = m.groupdict()
+ tzname = values.pop("timezone")
+ if tzname == "Z":
+ tz = timezone.utc
+ elif tzname:
+ hours, minutes = (int(x) for x in tzname[1:].split(":"))
+ sign = 1 if tzname[0] == "+" else -1
+ tz = timezone(sign * timedelta(hours=hours, minutes=minutes))
+
+ values = {k: int(v or 0) for k, v in values.items()}
+ datetime_ = datetime(**values)
+ else:
+ raise TypeError(f"Unsupported type for {arg_name}: {input.__class__.__name__}")
+
+ if datetime_.tzinfo is not None:
+ return datetime_
+ if tz is None:
+ raise ValueError(
+ f'The "tz" argument must be specified if {arg_name} has no timezone information'
+ )
+ if isinstance(tz, str):
+ tz = astimezone(tz)
+
+ return localize(datetime_, tz)
+
+
+def datetime_to_utc_timestamp(timeval):
+ """
+ Converts a datetime instance to a timestamp.
+
+ :type timeval: datetime
+ :rtype: float
+
+ """
+ if timeval is not None:
+ return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000
+
+
+def utc_timestamp_to_datetime(timestamp):
+ """
+ Converts the given timestamp to a datetime instance.
+
+ :type timestamp: float
+ :rtype: datetime
+
+ """
+ if timestamp is not None:
+ return datetime.fromtimestamp(timestamp, timezone.utc)
+
+
+def timedelta_seconds(delta):
+ """
+ Converts the given timedelta to seconds.
+
+ :type delta: timedelta
+ :rtype: float
+
+ """
+ return delta.days * 24 * 60 * 60 + delta.seconds + delta.microseconds / 1000000.0
+
+
+def datetime_ceil(dateval):
+ """
+ Rounds the given datetime object upwards.
+
+ :type dateval: datetime
+
+ """
+ if dateval.microsecond > 0:
+ return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond)
+ return dateval
+
+
+def datetime_repr(dateval):
+ return dateval.strftime("%Y-%m-%d %H:%M:%S %Z") if dateval else "None"
+
+
+def timezone_repr(timezone: tzinfo) -> str:
+ if isinstance(timezone, ZoneInfo):
+ return timezone.key
+
+ return repr(timezone)
+
+
+def get_callable_name(func):
+ """
+ Returns the best available display name for the given function/callable.
+
+ :rtype: str
+
+ """
+ if ismethod(func):
+ self = func.__self__
+ cls = self if isclass(self) else type(self)
+ return f"{cls.__qualname__}.{func.__name__}"
+ elif isclass(func) or isfunction(func) or isbuiltin(func):
+ return func.__qualname__
+ elif hasattr(func, "__call__") and callable(func.__call__):
+ # instance of a class with a __call__ method
+ return type(func).__qualname__
+
+ raise TypeError(
+ f"Unable to determine a name for {func!r} -- maybe it is not a callable?"
+ )
+
+
+def obj_to_ref(obj):
+ """
+ Returns the path to the given callable.
+
+ :rtype: str
+ :raises TypeError: if the given object is not callable
+ :raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested
+ function
+
+ """
+ if isinstance(obj, partial):
+ raise ValueError("Cannot create a reference to a partial()")
+
+ name = get_callable_name(obj)
+ if "<lambda>" in name:
+ raise ValueError("Cannot create a reference to a lambda")
+ if "<locals>" in name:
+ raise ValueError("Cannot create a reference to a nested function")
+
+ if ismethod(obj):
+ module = obj.__self__.__module__
+ else:
+ module = obj.__module__
+
+ return f"{module}:{name}"
+
+
+def ref_to_obj(ref):
+ """
+ Returns the object pointed to by ``ref``.
+
+ :type ref: str
+
+ """
+ if not isinstance(ref, str):
+ raise TypeError("References must be strings")
+ if ":" not in ref:
+ raise ValueError("Invalid reference")
+
+ modulename, rest = ref.split(":", 1)
+ try:
+ obj = __import__(modulename, fromlist=[rest])
+ except ImportError as exc:
+ raise LookupError(
+ f"Error resolving reference {ref}: could not import module"
+ ) from exc
+
+ try:
+ for name in rest.split("."):
+ obj = getattr(obj, name)
+ return obj
+ except Exception:
+ raise LookupError(f"Error resolving reference {ref}: error looking up object")
+
+
+def maybe_ref(ref):
+ """
+ Returns the object that the given reference points to, if it is indeed a reference.
+ If it is not a reference, the object is returned as-is.
+
+ """
+ if not isinstance(ref, str):
+ return ref
+ return ref_to_obj(ref)
+
+
+def check_callable_args(func, args, kwargs):
+ """
+ Ensures that the given callable can be called with the given arguments.
+
+ :type args: tuple
+ :type kwargs: dict
+
+ """
+ pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs
+ positional_only_kwargs = [] # positional-only parameters that have a match in kwargs
+ unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs
+ unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs
+ unmatched_args = list(
+ args
+ ) # args that didn't match any of the parameters in the signature
+ # kwargs that didn't match any of the parameters in the signature
+ unmatched_kwargs = list(kwargs)
+ # indicates if the signature defines *args and **kwargs respectively
+ has_varargs = has_var_kwargs = False
+
+ try:
+ sig = signature(func, follow_wrapped=False)
+ except ValueError:
+ # signature() doesn't work against every kind of callable
+ return
+
+ for param in sig.parameters.values():
+ if param.kind == param.POSITIONAL_OR_KEYWORD:
+ if param.name in unmatched_kwargs and unmatched_args:
+ pos_kwargs_conflicts.append(param.name)
+ elif unmatched_args:
+ del unmatched_args[0]
+ elif param.name in unmatched_kwargs:
+ unmatched_kwargs.remove(param.name)
+ elif param.default is param.empty:
+ unsatisfied_args.append(param.name)
+ elif param.kind == param.POSITIONAL_ONLY:
+ if unmatched_args:
+ del unmatched_args[0]
+ elif param.name in unmatched_kwargs:
+ unmatched_kwargs.remove(param.name)
+ positional_only_kwargs.append(param.name)
+ elif param.default is param.empty:
+ unsatisfied_args.append(param.name)
+ elif param.kind == param.KEYWORD_ONLY:
+ if param.name in unmatched_kwargs:
+ unmatched_kwargs.remove(param.name)
+ elif param.default is param.empty:
+ unsatisfied_kwargs.append(param.name)
+ elif param.kind == param.VAR_POSITIONAL:
+ has_varargs = True
+ elif param.kind == param.VAR_KEYWORD:
+ has_var_kwargs = True
+
+ # Make sure there are no conflicts between args and kwargs
+ if pos_kwargs_conflicts:
+ raise ValueError(
+ "The following arguments are supplied in both args and kwargs: {}".format(
+ ", ".join(pos_kwargs_conflicts)
+ )
+ )
+
+ # Check if keyword arguments are being fed to positional-only parameters
+ if positional_only_kwargs:
+ raise ValueError(
+ "The following arguments cannot be given as keyword arguments: {}".format(
+ ", ".join(positional_only_kwargs)
+ )
+ )
+
+ # Check that the number of positional arguments minus the number of matched kwargs
+ # matches the argspec
+ if unsatisfied_args:
+ raise ValueError(
+ "The following arguments have not been supplied: {}".format(
+ ", ".join(unsatisfied_args)
+ )
+ )
+
+ # Check that all keyword-only arguments have been supplied
+ if unsatisfied_kwargs:
+ raise ValueError(
+ "The following keyword-only arguments have not been supplied in kwargs: "
+ "{}".format(", ".join(unsatisfied_kwargs))
+ )
+
+ # Check that the callable can accept the given number of positional arguments
+ if not has_varargs and unmatched_args:
+ raise ValueError(
+ f"The list of positional arguments is longer than the target callable can "
+ f"handle (allowed: {len(args) - len(unmatched_args)}, given in args: "
+ f"{len(args)})"
+ )
+
+ # Check that the callable can accept the given keyword arguments
+ if not has_var_kwargs and unmatched_kwargs:
+ raise ValueError(
+ "The target callable does not accept the following keyword arguments: "
+ "{}".format(", ".join(unmatched_kwargs))
+ )
+
+
+def iscoroutinefunction_partial(f):
+ while isinstance(f, partial):
+ f = f.func
+
+ # The asyncio version of iscoroutinefunction includes testing for @coroutine
+ # decorations vs. the inspect version which does not.
+ return iscoroutinefunction(f)
+
+
+def normalize(dt):
+ return datetime.fromtimestamp(dt.timestamp(), dt.tzinfo)
+
+
+def localize(dt, tzinfo):
+ if hasattr(tzinfo, "localize"):
+ return tzinfo.localize(dt)
+
+ return normalize(dt.replace(tzinfo=tzinfo))