aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/markdown/extensions
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/markdown/extensions')
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/__init__.py145
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/abbr.py185
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/admonition.py183
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/attr_list.py203
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/codehilite.py347
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/def_list.py119
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/extra.py66
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/fenced_code.py193
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/footnotes.py418
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/legacy_attrs.py71
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/legacy_em.py52
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/md_in_html.py376
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/meta.py86
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/nl2br.py41
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/sane_lists.py69
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/smarty.py277
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/tables.py248
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/toc.py488
-rw-r--r--.venv/lib/python3.12/site-packages/markdown/extensions/wikilinks.py97
19 files changed, 3664 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/__init__.py b/.venv/lib/python3.12/site-packages/markdown/extensions/__init__.py
new file mode 100644
index 00000000..a5ec07b2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/__init__.py
@@ -0,0 +1,145 @@
+# Python Markdown
+
+# A Python implementation of John Gruber's Markdown.
+
+# Documentation: https://python-markdown.github.io/
+# GitHub: https://github.com/Python-Markdown/markdown/
+# PyPI: https://pypi.org/project/Markdown/
+
+# Started by Manfred Stienstra (http://www.dwerg.net/).
+# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
+# Currently maintained by Waylan Limberg (https://github.com/waylan),
+# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
+
+# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
+# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
+# Copyright 2004 Manfred Stienstra (the original version)
+
+# License: BSD (see LICENSE.md for details).
+
+"""
+Markdown accepts an [`Extension`][markdown.extensions.Extension] instance for each extension. Therefore, each extension
+must to define a class that extends [`Extension`][markdown.extensions.Extension] and over-rides the
+[`extendMarkdown`][markdown.extensions.Extension.extendMarkdown] method. Within this class one can manage configuration
+options for their extension and attach the various processors and patterns which make up an extension to the
+[`Markdown`][markdown.Markdown] instance.
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, Iterable, Mapping
+from ..util import parseBoolValue
+
+if TYPE_CHECKING: # pragma: no cover
+ from markdown import Markdown
+
+
+class Extension:
+ """ Base class for extensions to subclass. """
+
+ config: Mapping[str, list] = {}
+ """
+ Default configuration for an extension.
+
+ This attribute is to be defined in a subclass and must be of the following format:
+
+ ``` python
+ config = {
+ 'key': ['value', 'description']
+ }
+ ```
+
+ Note that [`setConfig`][markdown.extensions.Extension.setConfig] will raise a [`KeyError`][]
+ if a default is not set for each option.
+ """
+
+ def __init__(self, **kwargs):
+ """ Initiate Extension and set up configs. """
+ self.setConfigs(kwargs)
+
+ def getConfig(self, key: str, default: Any = '') -> Any:
+ """
+ Return a single configuration option value.
+
+ Arguments:
+ key: The configuration option name.
+ default: Default value to return if key is not set.
+
+ Returns:
+ Value of stored configuration option.
+ """
+ if key in self.config:
+ return self.config[key][0]
+ else:
+ return default
+
+ def getConfigs(self) -> dict[str, Any]:
+ """
+ Return all configuration options.
+
+ Returns:
+ All configuration options.
+ """
+ return {key: self.getConfig(key) for key in self.config.keys()}
+
+ def getConfigInfo(self) -> list[tuple[str, str]]:
+ """
+ Return descriptions of all configuration options.
+
+ Returns:
+ All descriptions of configuration options.
+ """
+ return [(key, self.config[key][1]) for key in self.config.keys()]
+
+ def setConfig(self, key: str, value: Any) -> None:
+ """
+ Set a configuration option.
+
+ If the corresponding default value set in [`config`][markdown.extensions.Extension.config]
+ is a `bool` value or `None`, then `value` is passed through
+ [`parseBoolValue`][markdown.util.parseBoolValue] before being stored.
+
+ Arguments:
+ key: Name of configuration option to set.
+ value: Value to assign to option.
+
+ Raises:
+ KeyError: If `key` is not known.
+ """
+ if isinstance(self.config[key][0], bool):
+ value = parseBoolValue(value)
+ if self.config[key][0] is None:
+ value = parseBoolValue(value, preserve_none=True)
+ self.config[key][0] = value
+
+ def setConfigs(self, items: Mapping[str, Any] | Iterable[tuple[str, Any]]) -> None:
+ """
+ Loop through a collection of configuration options, passing each to
+ [`setConfig`][markdown.extensions.Extension.setConfig].
+
+ Arguments:
+ items: Collection of configuration options.
+
+ Raises:
+ KeyError: for any unknown key.
+ """
+ if hasattr(items, 'items'):
+ # it's a dict
+ items = items.items()
+ for key, value in items:
+ self.setConfig(key, value)
+
+ def extendMarkdown(self, md: Markdown) -> None:
+ """
+ Add the various processors and patterns to the Markdown Instance.
+
+ This method must be overridden by every extension.
+
+ Arguments:
+ md: The Markdown instance.
+
+ """
+ raise NotImplementedError(
+ 'Extension "%s.%s" must define an "extendMarkdown"'
+ 'method.' % (self.__class__.__module__, self.__class__.__name__)
+ )
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/abbr.py b/.venv/lib/python3.12/site-packages/markdown/extensions/abbr.py
new file mode 100644
index 00000000..693c3bba
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/abbr.py
@@ -0,0 +1,185 @@
+# Abbreviation Extension for Python-Markdown
+# ==========================================
+
+# This extension adds abbreviation handling to Python-Markdown.
+
+# See https://Python-Markdown.github.io/extensions/abbreviations
+# for documentation.
+
+# Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/)
+# and [Seemant Kulleen](http://www.kulleen.org/)
+
+# All changes Copyright 2008-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+This extension adds abbreviation handling to Python-Markdown.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/abbreviations)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..blockprocessors import BlockProcessor
+from ..inlinepatterns import InlineProcessor
+from ..treeprocessors import Treeprocessor
+from ..util import AtomicString, deprecated
+from typing import TYPE_CHECKING
+import re
+import xml.etree.ElementTree as etree
+
+if TYPE_CHECKING: # pragma: no cover
+ from .. import Markdown
+ from ..blockparsers import BlockParser
+
+
+class AbbrExtension(Extension):
+ """ Abbreviation Extension for Python-Markdown. """
+
+ def __init__(self, **kwargs):
+ """ Initiate Extension and set up configs. """
+ self.config = {
+ 'glossary': [
+ {},
+ 'A dictionary where the `key` is the abbreviation and the `value` is the definition.'
+ "Default: `{}`"
+ ],
+ }
+ """ Default configuration options. """
+ super().__init__(**kwargs)
+ self.abbrs = {}
+ self.glossary = {}
+
+ def reset(self):
+ """ Clear all previously defined abbreviations. """
+ self.abbrs.clear()
+ if (self.glossary):
+ self.abbrs.update(self.glossary)
+
+ def reset_glossary(self):
+ """ Clear all abbreviations from the glossary. """
+ self.glossary.clear()
+
+ def load_glossary(self, dictionary: dict[str, str]):
+ """Adds `dictionary` to our glossary. Any abbreviations that already exist will be overwritten."""
+ if dictionary:
+ self.glossary = {**dictionary, **self.glossary}
+
+ def extendMarkdown(self, md):
+ """ Insert `AbbrTreeprocessor` and `AbbrBlockprocessor`. """
+ if (self.config['glossary'][0]):
+ self.load_glossary(self.config['glossary'][0])
+ self.abbrs.update(self.glossary)
+ md.registerExtension(self)
+ md.treeprocessors.register(AbbrTreeprocessor(md, self.abbrs), 'abbr', 7)
+ md.parser.blockprocessors.register(AbbrBlockprocessor(md.parser, self.abbrs), 'abbr', 16)
+
+
+class AbbrTreeprocessor(Treeprocessor):
+ """ Replace abbreviation text with `<abbr>` elements. """
+
+ def __init__(self, md: Markdown | None = None, abbrs: dict | None = None):
+ self.abbrs: dict = abbrs if abbrs is not None else {}
+ self.RE: re.RegexObject | None = None
+ super().__init__(md)
+
+ def iter_element(self, el: etree.Element, parent: etree.Element | None = None) -> None:
+ ''' Recursively iterate over elements, run regex on text and wrap matches in `abbr` tags. '''
+ for child in reversed(el):
+ self.iter_element(child, el)
+ if text := el.text:
+ for m in reversed(list(self.RE.finditer(text))):
+ if self.abbrs[m.group(0)]:
+ abbr = etree.Element('abbr', {'title': self.abbrs[m.group(0)]})
+ abbr.text = AtomicString(m.group(0))
+ abbr.tail = text[m.end():]
+ el.insert(0, abbr)
+ text = text[:m.start()]
+ el.text = text
+ if parent is not None and el.tail:
+ tail = el.tail
+ index = list(parent).index(el) + 1
+ for m in reversed(list(self.RE.finditer(tail))):
+ abbr = etree.Element('abbr', {'title': self.abbrs[m.group(0)]})
+ abbr.text = AtomicString(m.group(0))
+ abbr.tail = tail[m.end():]
+ parent.insert(index, abbr)
+ tail = tail[:m.start()]
+ el.tail = tail
+
+ def run(self, root: etree.Element) -> etree.Element | None:
+ ''' Step through tree to find known abbreviations. '''
+ if not self.abbrs:
+ # No abbreviations defined. Skip running processor.
+ return
+ # Build and compile regex
+ abbr_list = list(self.abbrs.keys())
+ abbr_list.sort(key=len, reverse=True)
+ self.RE = re.compile(f"\\b(?:{ '|'.join(re.escape(key) for key in abbr_list) })\\b")
+ # Step through tree and modify on matches
+ self.iter_element(root)
+
+
+class AbbrBlockprocessor(BlockProcessor):
+ """ Parse text for abbreviation references. """
+
+ RE = re.compile(r'^[*]\[(?P<abbr>[^\\]*?)\][ ]?:[ ]*\n?[ ]*(?P<title>.*)$', re.MULTILINE)
+
+ def __init__(self, parser: BlockParser, abbrs: dict):
+ self.abbrs: dict = abbrs
+ super().__init__(parser)
+
+ def test(self, parent: etree.Element, block: str) -> bool:
+ return True
+
+ def run(self, parent: etree.Element, blocks: list[str]) -> bool:
+ """
+ Find and remove all abbreviation references from the text.
+ Each reference is added to the abbreviation collection.
+
+ """
+ block = blocks.pop(0)
+ m = self.RE.search(block)
+ if m:
+ abbr = m.group('abbr').strip()
+ title = m.group('title').strip()
+ if title and abbr:
+ if title == "''" or title == '""':
+ self.abbrs.pop(abbr)
+ else:
+ self.abbrs[abbr] = title
+ if block[m.end():].strip():
+ # Add any content after match back to blocks as separate block
+ blocks.insert(0, block[m.end():].lstrip('\n'))
+ if block[:m.start()].strip():
+ # Add any content before match back to blocks as separate block
+ blocks.insert(0, block[:m.start()].rstrip('\n'))
+ return True
+ # No match. Restore block.
+ blocks.insert(0, block)
+ return False
+
+
+AbbrPreprocessor = deprecated("This class has been renamed to `AbbrBlockprocessor`.")(AbbrBlockprocessor)
+
+
+@deprecated("This class will be removed in the future; use `AbbrTreeprocessor` instead.")
+class AbbrInlineProcessor(InlineProcessor):
+ """ Abbreviation inline pattern. """
+
+ def __init__(self, pattern: str, title: str):
+ super().__init__(pattern)
+ self.title = title
+
+ def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]:
+ abbr = etree.Element('abbr')
+ abbr.text = AtomicString(m.group('abbr'))
+ abbr.set('title', self.title)
+ return abbr, m.start(0), m.end(0)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return AbbrExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/admonition.py b/.venv/lib/python3.12/site-packages/markdown/extensions/admonition.py
new file mode 100644
index 00000000..01c2316d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/admonition.py
@@ -0,0 +1,183 @@
+# Admonition extension for Python-Markdown
+# ========================================
+
+# Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
+
+# [rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions
+
+# See https://Python-Markdown.github.io/extensions/admonition
+# for documentation.
+
+# Original code Copyright [Tiago Serafim](https://www.tiagoserafim.com/).
+
+# All changes Copyright The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+
+"""
+Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
+
+[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions
+
+See the [documentation](https://Python-Markdown.github.io/extensions/admonition)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..blockprocessors import BlockProcessor
+import xml.etree.ElementTree as etree
+import re
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from markdown import blockparser
+
+
+class AdmonitionExtension(Extension):
+ """ Admonition extension for Python-Markdown. """
+
+ def extendMarkdown(self, md):
+ """ Add Admonition to Markdown instance. """
+ md.registerExtension(self)
+
+ md.parser.blockprocessors.register(AdmonitionProcessor(md.parser), 'admonition', 105)
+
+
+class AdmonitionProcessor(BlockProcessor):
+
+ CLASSNAME = 'admonition'
+ CLASSNAME_TITLE = 'admonition-title'
+ RE = re.compile(r'(?:^|\n)!!! ?([\w\-]+(?: +[\w\-]+)*)(?: +"(.*?)")? *(?:\n|$)')
+ RE_SPACES = re.compile(' +')
+
+ def __init__(self, parser: blockparser.BlockParser):
+ """Initialization."""
+
+ super().__init__(parser)
+
+ self.current_sibling: etree.Element | None = None
+ self.content_indent = 0
+
+ def parse_content(self, parent: etree.Element, block: str) -> tuple[etree.Element | None, str, str]:
+ """Get sibling admonition.
+
+ Retrieve the appropriate sibling element. This can get tricky when
+ dealing with lists.
+
+ """
+
+ old_block = block
+ the_rest = ''
+
+ # We already acquired the block via test
+ if self.current_sibling is not None:
+ sibling = self.current_sibling
+ block, the_rest = self.detab(block, self.content_indent)
+ self.current_sibling = None
+ self.content_indent = 0
+ return sibling, block, the_rest
+
+ sibling = self.lastChild(parent)
+
+ if sibling is None or sibling.tag != 'div' or sibling.get('class', '').find(self.CLASSNAME) == -1:
+ sibling = None
+ else:
+ # If the last child is a list and the content is sufficiently indented
+ # to be under it, then the content's sibling is in the list.
+ last_child = self.lastChild(sibling)
+ indent = 0
+ while last_child is not None:
+ if (
+ sibling is not None and block.startswith(' ' * self.tab_length * 2) and
+ last_child is not None and last_child.tag in ('ul', 'ol', 'dl')
+ ):
+
+ # The expectation is that we'll find an `<li>` or `<dt>`.
+ # We should get its last child as well.
+ sibling = self.lastChild(last_child)
+ last_child = self.lastChild(sibling) if sibling is not None else None
+
+ # Context has been lost at this point, so we must adjust the
+ # text's indentation level so it will be evaluated correctly
+ # under the list.
+ block = block[self.tab_length:]
+ indent += self.tab_length
+ else:
+ last_child = None
+
+ if not block.startswith(' ' * self.tab_length):
+ sibling = None
+
+ if sibling is not None:
+ indent += self.tab_length
+ block, the_rest = self.detab(old_block, indent)
+ self.current_sibling = sibling
+ self.content_indent = indent
+
+ return sibling, block, the_rest
+
+ def test(self, parent: etree.Element, block: str) -> bool:
+
+ if self.RE.search(block):
+ return True
+ else:
+ return self.parse_content(parent, block)[0] is not None
+
+ def run(self, parent: etree.Element, blocks: list[str]) -> None:
+ block = blocks.pop(0)
+ m = self.RE.search(block)
+
+ if m:
+ if m.start() > 0:
+ self.parser.parseBlocks(parent, [block[:m.start()]])
+ block = block[m.end():] # removes the first line
+ block, theRest = self.detab(block)
+ else:
+ sibling, block, theRest = self.parse_content(parent, block)
+
+ if m:
+ klass, title = self.get_class_and_title(m)
+ div = etree.SubElement(parent, 'div')
+ div.set('class', '{} {}'.format(self.CLASSNAME, klass))
+ if title:
+ p = etree.SubElement(div, 'p')
+ p.text = title
+ p.set('class', self.CLASSNAME_TITLE)
+ else:
+ # Sibling is a list item, but we need to wrap it's content should be wrapped in <p>
+ if sibling.tag in ('li', 'dd') and sibling.text:
+ text = sibling.text
+ sibling.text = ''
+ p = etree.SubElement(sibling, 'p')
+ p.text = text
+
+ div = sibling
+
+ self.parser.parseChunk(div, block)
+
+ if theRest:
+ # This block contained unindented line(s) after the first indented
+ # line. Insert these lines as the first block of the master blocks
+ # list for future processing.
+ blocks.insert(0, theRest)
+
+ def get_class_and_title(self, match: re.Match[str]) -> tuple[str, str | None]:
+ klass, title = match.group(1).lower(), match.group(2)
+ klass = self.RE_SPACES.sub(' ', klass)
+ if title is None:
+ # no title was provided, use the capitalized class name as title
+ # e.g.: `!!! note` will render
+ # `<p class="admonition-title">Note</p>`
+ title = klass.split(' ', 1)[0].capitalize()
+ elif title == '':
+ # an explicit blank title should not be rendered
+ # e.g.: `!!! warning ""` will *not* render `p` with a title
+ title = None
+ return klass, title
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return AdmonitionExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/attr_list.py b/.venv/lib/python3.12/site-packages/markdown/extensions/attr_list.py
new file mode 100644
index 00000000..9206d11e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/attr_list.py
@@ -0,0 +1,203 @@
+# Attribute List Extension for Python-Markdown
+# ============================================
+
+# Adds attribute list syntax. Inspired by
+# [Maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
+# feature of the same name.
+
+# See https://Python-Markdown.github.io/extensions/attr_list
+# for documentation.
+
+# Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/).
+
+# All changes Copyright 2011-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+ Adds attribute list syntax. Inspired by
+[Maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
+feature of the same name.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/attr_list)
+for details.
+"""
+
+from __future__ import annotations
+from typing import TYPE_CHECKING
+
+from . import Extension
+from ..treeprocessors import Treeprocessor
+import re
+
+if TYPE_CHECKING: # pragma: no cover
+ from xml.etree.ElementTree import Element
+
+
+def _handle_double_quote(s, t):
+ k, v = t.split('=', 1)
+ return k, v.strip('"')
+
+
+def _handle_single_quote(s, t):
+ k, v = t.split('=', 1)
+ return k, v.strip("'")
+
+
+def _handle_key_value(s, t):
+ return t.split('=', 1)
+
+
+def _handle_word(s, t):
+ if t.startswith('.'):
+ return '.', t[1:]
+ if t.startswith('#'):
+ return 'id', t[1:]
+ return t, t
+
+
+_scanner = re.Scanner([
+ (r'[^ =}]+=".*?"', _handle_double_quote),
+ (r"[^ =}]+='.*?'", _handle_single_quote),
+ (r'[^ =}]+=[^ =}]+', _handle_key_value),
+ (r'[^ =}]+', _handle_word),
+ (r' ', None)
+])
+
+
+def get_attrs_and_remainder(attrs_string: str) -> tuple[list[tuple[str, str]], str]:
+ """ Parse attribute list and return a list of attribute tuples.
+
+ Additionally, return any text that remained after a curly brace. In typical cases, its presence
+ should mean that the input does not match the intended attribute list syntax.
+ """
+ attrs, remainder = _scanner.scan(attrs_string)
+ # To keep historic behavior, discard all unparsable text prior to '}'.
+ index = remainder.find('}')
+ remainder = remainder[index:] if index != -1 else ''
+ return attrs, remainder
+
+
+def get_attrs(str: str) -> list[tuple[str, str]]: # pragma: no cover
+ """ Soft-deprecated. Prefer `get_attrs_and_remainder`. """
+ return get_attrs_and_remainder(str)[0]
+
+
+def isheader(elem: Element) -> bool:
+ return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
+
+
+class AttrListTreeprocessor(Treeprocessor):
+
+ BASE_RE = r'\{\:?[ ]*([^\}\n ][^\n]*)[ ]*\}'
+ HEADER_RE = re.compile(r'[ ]+{}[ ]*$'.format(BASE_RE))
+ BLOCK_RE = re.compile(r'\n[ ]*{}[ ]*$'.format(BASE_RE))
+ INLINE_RE = re.compile(r'^{}'.format(BASE_RE))
+ NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff'
+ r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d'
+ r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff'
+ r'\uf900-\ufdcf\ufdf0-\ufffd'
+ r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+')
+
+ def run(self, doc: Element) -> None:
+ for elem in doc.iter():
+ if self.md.is_block_level(elem.tag):
+ # Block level: check for `attrs` on last line of text
+ RE = self.BLOCK_RE
+ if isheader(elem) or elem.tag in ['dt', 'td', 'th']:
+ # header, def-term, or table cell: check for attributes at end of element
+ RE = self.HEADER_RE
+ if len(elem) and elem.tag == 'li':
+ # special case list items. children may include a `ul` or `ol`.
+ pos = None
+ # find the `ul` or `ol` position
+ for i, child in enumerate(elem):
+ if child.tag in ['ul', 'ol']:
+ pos = i
+ break
+ if pos is None and elem[-1].tail:
+ # use tail of last child. no `ul` or `ol`.
+ m = RE.search(elem[-1].tail)
+ if m:
+ if not self.assign_attrs(elem, m.group(1), strict=True):
+ elem[-1].tail = elem[-1].tail[:m.start()]
+ elif pos is not None and pos > 0 and elem[pos-1].tail:
+ # use tail of last child before `ul` or `ol`
+ m = RE.search(elem[pos-1].tail)
+ if m:
+ if not self.assign_attrs(elem, m.group(1), strict=True):
+ elem[pos-1].tail = elem[pos-1].tail[:m.start()]
+ elif elem.text:
+ # use text. `ul` is first child.
+ m = RE.search(elem.text)
+ if m:
+ if not self.assign_attrs(elem, m.group(1), strict=True):
+ elem.text = elem.text[:m.start()]
+ elif len(elem) and elem[-1].tail:
+ # has children. Get from tail of last child
+ m = RE.search(elem[-1].tail)
+ if m:
+ if not self.assign_attrs(elem, m.group(1), strict=True):
+ elem[-1].tail = elem[-1].tail[:m.start()]
+ if isheader(elem):
+ # clean up trailing #s
+ elem[-1].tail = elem[-1].tail.rstrip('#').rstrip()
+ elif elem.text:
+ # no children. Get from text.
+ m = RE.search(elem.text)
+ if m:
+ if not self.assign_attrs(elem, m.group(1), strict=True):
+ elem.text = elem.text[:m.start()]
+ if isheader(elem):
+ # clean up trailing #s
+ elem.text = elem.text.rstrip('#').rstrip()
+ else:
+ # inline: check for `attrs` at start of tail
+ if elem.tail:
+ m = self.INLINE_RE.match(elem.tail)
+ if m:
+ remainder = self.assign_attrs(elem, m.group(1))
+ elem.tail = elem.tail[m.end():] + remainder
+
+ def assign_attrs(self, elem: Element, attrs_string: str, *, strict: bool = False) -> str:
+ """ Assign `attrs` to element.
+
+ If the `attrs_string` has an extra closing curly brace, the remaining text is returned.
+
+ The `strict` argument controls whether to still assign `attrs` if there is a remaining `}`.
+ """
+ attrs, remainder = get_attrs_and_remainder(attrs_string)
+ if strict and remainder:
+ return remainder
+
+ for k, v in attrs:
+ if k == '.':
+ # add to class
+ cls = elem.get('class')
+ if cls:
+ elem.set('class', '{} {}'.format(cls, v))
+ else:
+ elem.set('class', v)
+ else:
+ # assign attribute `k` with `v`
+ elem.set(self.sanitize_name(k), v)
+ # The text that we initially over-matched will be put back.
+ return remainder
+
+ def sanitize_name(self, name: str) -> str:
+ """
+ Sanitize name as 'an XML Name, minus the `:`.'
+ See <https://www.w3.org/TR/REC-xml-names/#NT-NCName>.
+ """
+ return self.NAME_RE.sub('_', name)
+
+
+class AttrListExtension(Extension):
+ """ Attribute List extension for Python-Markdown """
+ def extendMarkdown(self, md):
+ md.treeprocessors.register(AttrListTreeprocessor(md), 'attr_list', 8)
+ md.registerExtension(self)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return AttrListExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/codehilite.py b/.venv/lib/python3.12/site-packages/markdown/extensions/codehilite.py
new file mode 100644
index 00000000..92e7d8f2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/codehilite.py
@@ -0,0 +1,347 @@
+# CodeHilite Extension for Python-Markdown
+# ========================================
+
+# Adds code/syntax highlighting to standard Python-Markdown code blocks.
+
+# See https://Python-Markdown.github.io/extensions/code_hilite
+# for documentation.
+
+# Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
+
+# All changes Copyright 2008-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+Adds code/syntax highlighting to standard Python-Markdown code blocks.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/code_hilite)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..treeprocessors import Treeprocessor
+from ..util import parseBoolValue
+from typing import TYPE_CHECKING, Callable, Any
+
+if TYPE_CHECKING: # pragma: no cover
+ import xml.etree.ElementTree as etree
+
+try: # pragma: no cover
+ from pygments import highlight
+ from pygments.lexers import get_lexer_by_name, guess_lexer
+ from pygments.formatters import get_formatter_by_name
+ from pygments.util import ClassNotFound
+ pygments = True
+except ImportError: # pragma: no cover
+ pygments = False
+
+
+def parse_hl_lines(expr: str) -> list[int]:
+ """Support our syntax for emphasizing certain lines of code.
+
+ `expr` should be like '1 2' to emphasize lines 1 and 2 of a code block.
+ Returns a list of integers, the line numbers to emphasize.
+ """
+ if not expr:
+ return []
+
+ try:
+ return list(map(int, expr.split()))
+ except ValueError: # pragma: no cover
+ return []
+
+
+# ------------------ The Main CodeHilite Class ----------------------
+class CodeHilite:
+ """
+ Determine language of source code, and pass it on to the Pygments highlighter.
+
+ Usage:
+
+ ```python
+ code = CodeHilite(src=some_code, lang='python')
+ html = code.hilite()
+ ```
+
+ Arguments:
+ src: Source string or any object with a `.readline` attribute.
+
+ Keyword arguments:
+ lang (str): String name of Pygments lexer to use for highlighting. Default: `None`.
+ guess_lang (bool): Auto-detect which lexer to use.
+ Ignored if `lang` is set to a valid value. Default: `True`.
+ use_pygments (bool): Pass code to Pygments for code highlighting. If `False`, the code is
+ instead wrapped for highlighting by a JavaScript library. Default: `True`.
+ pygments_formatter (str): The name of a Pygments formatter or a formatter class used for
+ highlighting the code blocks. Default: `html`.
+ linenums (bool): An alias to Pygments `linenos` formatter option. Default: `None`.
+ css_class (str): An alias to Pygments `cssclass` formatter option. Default: 'codehilite'.
+ lang_prefix (str): Prefix prepended to the language. Default: "language-".
+
+ Other Options:
+
+ Any other options are accepted and passed on to the lexer and formatter. Therefore,
+ valid options include any options which are accepted by the `html` formatter or
+ whichever lexer the code's language uses. Note that most lexers do not have any
+ options. However, a few have very useful options, such as PHP's `startinline` option.
+ Any invalid options are ignored without error.
+
+ * **Formatter options**: <https://pygments.org/docs/formatters/#HtmlFormatter>
+ * **Lexer Options**: <https://pygments.org/docs/lexers/>
+
+ Additionally, when Pygments is enabled, the code's language is passed to the
+ formatter as an extra option `lang_str`, whose value being `{lang_prefix}{lang}`.
+ This option has no effect to the Pygments' builtin formatters.
+
+ Advanced Usage:
+
+ ```python
+ code = CodeHilite(
+ src = some_code,
+ lang = 'php',
+ startinline = True, # Lexer option. Snippet does not start with `<?php`.
+ linenostart = 42, # Formatter option. Snippet starts on line 42.
+ hl_lines = [45, 49, 50], # Formatter option. Highlight lines 45, 49, and 50.
+ linenos = 'inline' # Formatter option. Avoid alignment problems.
+ )
+ html = code.hilite()
+ ```
+
+ """
+
+ def __init__(self, src: str, **options):
+ self.src = src
+ self.lang: str | None = options.pop('lang', None)
+ self.guess_lang: bool = options.pop('guess_lang', True)
+ self.use_pygments: bool = options.pop('use_pygments', True)
+ self.lang_prefix: str = options.pop('lang_prefix', 'language-')
+ self.pygments_formatter: str | Callable = options.pop('pygments_formatter', 'html')
+
+ if 'linenos' not in options:
+ options['linenos'] = options.pop('linenums', None)
+ if 'cssclass' not in options:
+ options['cssclass'] = options.pop('css_class', 'codehilite')
+ if 'wrapcode' not in options:
+ # Override Pygments default
+ options['wrapcode'] = True
+ # Disallow use of `full` option
+ options['full'] = False
+
+ self.options = options
+
+ def hilite(self, shebang: bool = True) -> str:
+ """
+ Pass code to the [Pygments](https://pygments.org/) highlighter with
+ optional line numbers. The output should then be styled with CSS to
+ your liking. No styles are applied by default - only styling hooks
+ (i.e.: `<span class="k">`).
+
+ returns : A string of html.
+
+ """
+
+ self.src = self.src.strip('\n')
+
+ if self.lang is None and shebang:
+ self._parseHeader()
+
+ if pygments and self.use_pygments:
+ try:
+ lexer = get_lexer_by_name(self.lang, **self.options)
+ except ValueError:
+ try:
+ if self.guess_lang:
+ lexer = guess_lexer(self.src, **self.options)
+ else:
+ lexer = get_lexer_by_name('text', **self.options)
+ except ValueError: # pragma: no cover
+ lexer = get_lexer_by_name('text', **self.options)
+ if not self.lang:
+ # Use the guessed lexer's language instead
+ self.lang = lexer.aliases[0]
+ lang_str = f'{self.lang_prefix}{self.lang}'
+ if isinstance(self.pygments_formatter, str):
+ try:
+ formatter = get_formatter_by_name(self.pygments_formatter, **self.options)
+ except ClassNotFound:
+ formatter = get_formatter_by_name('html', **self.options)
+ else:
+ formatter = self.pygments_formatter(lang_str=lang_str, **self.options)
+ return highlight(self.src, lexer, formatter)
+ else:
+ # just escape and build markup usable by JavaScript highlighting libraries
+ txt = self.src.replace('&', '&amp;')
+ txt = txt.replace('<', '&lt;')
+ txt = txt.replace('>', '&gt;')
+ txt = txt.replace('"', '&quot;')
+ classes = []
+ if self.lang:
+ classes.append('{}{}'.format(self.lang_prefix, self.lang))
+ if self.options['linenos']:
+ classes.append('linenums')
+ class_str = ''
+ if classes:
+ class_str = ' class="{}"'.format(' '.join(classes))
+ return '<pre class="{}"><code{}>{}\n</code></pre>\n'.format(
+ self.options['cssclass'],
+ class_str,
+ txt
+ )
+
+ def _parseHeader(self) -> None:
+ """
+ Determines language of a code block from shebang line and whether the
+ said line should be removed or left in place. If the shebang line
+ contains a path (even a single /) then it is assumed to be a real
+ shebang line and left alone. However, if no path is given
+ (e.i.: `#!python` or `:::python`) then it is assumed to be a mock shebang
+ for language identification of a code fragment and removed from the
+ code block prior to processing for code highlighting. When a mock
+ shebang (e.i: `#!python`) is found, line numbering is turned on. When
+ colons are found in place of a shebang (e.i.: `:::python`), line
+ numbering is left in the current state - off by default.
+
+ Also parses optional list of highlight lines, like:
+
+ :::python hl_lines="1 3"
+ """
+
+ import re
+
+ # split text into lines
+ lines = self.src.split("\n")
+ # pull first line to examine
+ fl = lines.pop(0)
+
+ c = re.compile(r'''
+ (?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
+ (?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
+ (?P<lang>[\w#.+-]*) # The language
+ \s* # Arbitrary whitespace
+ # Optional highlight lines, single- or double-quote-delimited
+ (hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
+ ''', re.VERBOSE)
+ # search first line for shebang
+ m = c.search(fl)
+ if m:
+ # we have a match
+ try:
+ self.lang = m.group('lang').lower()
+ except IndexError: # pragma: no cover
+ self.lang = None
+ if m.group('path'):
+ # path exists - restore first line
+ lines.insert(0, fl)
+ if self.options['linenos'] is None and m.group('shebang'):
+ # Overridable and Shebang exists - use line numbers
+ self.options['linenos'] = True
+
+ self.options['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
+ else:
+ # No match
+ lines.insert(0, fl)
+
+ self.src = "\n".join(lines).strip("\n")
+
+
+# ------------------ The Markdown Extension -------------------------------
+
+
+class HiliteTreeprocessor(Treeprocessor):
+ """ Highlight source code in code blocks. """
+
+ config: dict[str, Any]
+
+ def code_unescape(self, text: str) -> str:
+ """Unescape code."""
+ text = text.replace("&lt;", "<")
+ text = text.replace("&gt;", ">")
+ # Escaped '&' should be replaced at the end to avoid
+ # conflicting with < and >.
+ text = text.replace("&amp;", "&")
+ return text
+
+ def run(self, root: etree.Element) -> None:
+ """ Find code blocks and store in `htmlStash`. """
+ blocks = root.iter('pre')
+ for block in blocks:
+ if len(block) == 1 and block[0].tag == 'code':
+ local_config = self.config.copy()
+ text = block[0].text
+ if text is None:
+ continue
+ code = CodeHilite(
+ self.code_unescape(text),
+ tab_length=self.md.tab_length,
+ style=local_config.pop('pygments_style', 'default'),
+ **local_config
+ )
+ placeholder = self.md.htmlStash.store(code.hilite())
+ # Clear code block in `etree` instance
+ block.clear()
+ # Change to `p` element which will later
+ # be removed when inserting raw html
+ block.tag = 'p'
+ block.text = placeholder
+
+
+class CodeHiliteExtension(Extension):
+ """ Add source code highlighting to markdown code blocks. """
+
+ def __init__(self, **kwargs):
+ # define default configs
+ self.config = {
+ 'linenums': [
+ None, "Use lines numbers. True|table|inline=yes, False=no, None=auto. Default: `None`."
+ ],
+ 'guess_lang': [
+ True, "Automatic language detection - Default: `True`."
+ ],
+ 'css_class': [
+ "codehilite", "Set class name for wrapper <div> - Default: `codehilite`."
+ ],
+ 'pygments_style': [
+ 'default', 'Pygments HTML Formatter Style (Colorscheme). Default: `default`.'
+ ],
+ 'noclasses': [
+ False, 'Use inline styles instead of CSS classes - Default `False`.'
+ ],
+ 'use_pygments': [
+ True, 'Highlight code blocks with pygments. Disable if using a JavaScript library. Default: `True`.'
+ ],
+ 'lang_prefix': [
+ 'language-', 'Prefix prepended to the language when `use_pygments` is false. Default: `language-`.'
+ ],
+ 'pygments_formatter': [
+ 'html', 'Use a specific formatter for Pygments highlighting. Default: `html`.'
+ ],
+ }
+ """ Default configuration options. """
+
+ for key, value in kwargs.items():
+ if key in self.config:
+ self.setConfig(key, value)
+ else:
+ # manually set unknown keywords.
+ if isinstance(value, str):
+ try:
+ # Attempt to parse `str` as a boolean value
+ value = parseBoolValue(value, preserve_none=True)
+ except ValueError:
+ pass # Assume it's not a boolean value. Use as-is.
+ self.config[key] = [value, '']
+
+ def extendMarkdown(self, md):
+ """ Add `HilitePostprocessor` to Markdown instance. """
+ hiliter = HiliteTreeprocessor(md)
+ hiliter.config = self.getConfigs()
+ md.treeprocessors.register(hiliter, 'hilite', 30)
+
+ md.registerExtension(self)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return CodeHiliteExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/def_list.py b/.venv/lib/python3.12/site-packages/markdown/extensions/def_list.py
new file mode 100644
index 00000000..5324bf19
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/def_list.py
@@ -0,0 +1,119 @@
+# Definition List Extension for Python-Markdown
+# =============================================
+
+# Adds parsing of Definition Lists to Python-Markdown.
+
+# See https://Python-Markdown.github.io/extensions/definition_lists
+# for documentation.
+
+# Original code Copyright 2008 [Waylan Limberg](http://achinghead.com)
+
+# All changes Copyright 2008-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+Adds parsing of Definition Lists to Python-Markdown.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/definition_lists)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..blockprocessors import BlockProcessor, ListIndentProcessor
+import xml.etree.ElementTree as etree
+import re
+
+
+class DefListProcessor(BlockProcessor):
+ """ Process Definition Lists. """
+
+ RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
+ NO_INDENT_RE = re.compile(r'^[ ]{0,3}[^ :]')
+
+ def test(self, parent: etree.Element, block: str) -> bool:
+ return bool(self.RE.search(block))
+
+ def run(self, parent: etree.Element, blocks: list[str]) -> bool | None:
+
+ raw_block = blocks.pop(0)
+ m = self.RE.search(raw_block)
+ terms = [term.strip() for term in
+ raw_block[:m.start()].split('\n') if term.strip()]
+ block = raw_block[m.end():]
+ no_indent = self.NO_INDENT_RE.match(block)
+ if no_indent:
+ d, theRest = (block, None)
+ else:
+ d, theRest = self.detab(block)
+ if d:
+ d = '{}\n{}'.format(m.group(2), d)
+ else:
+ d = m.group(2)
+ sibling = self.lastChild(parent)
+ if not terms and sibling is None:
+ # This is not a definition item. Most likely a paragraph that
+ # starts with a colon at the beginning of a document or list.
+ blocks.insert(0, raw_block)
+ return False
+ if not terms and sibling.tag == 'p':
+ # The previous paragraph contains the terms
+ state = 'looselist'
+ terms = sibling.text.split('\n')
+ parent.remove(sibling)
+ # Acquire new sibling
+ sibling = self.lastChild(parent)
+ else:
+ state = 'list'
+
+ if sibling is not None and sibling.tag == 'dl':
+ # This is another item on an existing list
+ dl = sibling
+ if not terms and len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
+ state = 'looselist'
+ else:
+ # This is a new list
+ dl = etree.SubElement(parent, 'dl')
+ # Add terms
+ for term in terms:
+ dt = etree.SubElement(dl, 'dt')
+ dt.text = term
+ # Add definition
+ self.parser.state.set(state)
+ dd = etree.SubElement(dl, 'dd')
+ self.parser.parseBlocks(dd, [d])
+ self.parser.state.reset()
+
+ if theRest:
+ blocks.insert(0, theRest)
+
+
+class DefListIndentProcessor(ListIndentProcessor):
+ """ Process indented children of definition list items. """
+
+ # Definition lists need to be aware of all list types
+ ITEM_TYPES = ['dd', 'li']
+ """ Include `dd` in list item types. """
+ LIST_TYPES = ['dl', 'ol', 'ul']
+ """ Include `dl` is list types. """
+
+ def create_item(self, parent: etree.Element, block: str) -> None:
+ """ Create a new `dd` or `li` (depending on parent) and parse the block with it as the parent. """
+
+ dd = etree.SubElement(parent, 'dd')
+ self.parser.parseBlocks(dd, [block])
+
+
+class DefListExtension(Extension):
+ """ Add definition lists to Markdown. """
+
+ def extendMarkdown(self, md):
+ """ Add an instance of `DefListProcessor` to `BlockParser`. """
+ md.parser.blockprocessors.register(DefListIndentProcessor(md.parser), 'defindent', 85)
+ md.parser.blockprocessors.register(DefListProcessor(md.parser), 'deflist', 25)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return DefListExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/extra.py b/.venv/lib/python3.12/site-packages/markdown/extensions/extra.py
new file mode 100644
index 00000000..74ebc192
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/extra.py
@@ -0,0 +1,66 @@
+# Python-Markdown Extra Extension
+# ===============================
+
+# A compilation of various Python-Markdown extensions that imitates
+# [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
+
+# See https://Python-Markdown.github.io/extensions/extra
+# for documentation.
+
+# Copyright The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+A compilation of various Python-Markdown extensions that imitates
+[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
+
+Note that each of the individual extensions still need to be available
+on your `PYTHONPATH`. This extension simply wraps them all up as a
+convenience so that only one extension needs to be listed when
+initiating Markdown. See the documentation for each individual
+extension for specifics about that extension.
+
+There may be additional extensions that are distributed with
+Python-Markdown that are not included here in Extra. Those extensions
+are not part of PHP Markdown Extra, and therefore, not part of
+Python-Markdown Extra. If you really would like Extra to include
+additional extensions, we suggest creating your own clone of Extra
+under a different name. You could also edit the `extensions` global
+variable defined below, but be aware that such changes may be lost
+when you upgrade to any future version of Python-Markdown.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/extra)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+
+extensions = [
+ 'fenced_code',
+ 'footnotes',
+ 'attr_list',
+ 'def_list',
+ 'tables',
+ 'abbr',
+ 'md_in_html'
+]
+""" The list of included extensions. """
+
+
+class ExtraExtension(Extension):
+ """ Add various extensions to Markdown class."""
+
+ def __init__(self, **kwargs):
+ """ `config` is a dumb holder which gets passed to the actual extension later. """
+ self.config = kwargs
+
+ def extendMarkdown(self, md):
+ """ Register extension instances. """
+ md.registerExtensions(extensions, self.config)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return ExtraExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/fenced_code.py b/.venv/lib/python3.12/site-packages/markdown/extensions/fenced_code.py
new file mode 100644
index 00000000..bae7330a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/fenced_code.py
@@ -0,0 +1,193 @@
+# Fenced Code Extension for Python Markdown
+# =========================================
+
+# This extension adds Fenced Code Blocks to Python-Markdown.
+
+# See https://Python-Markdown.github.io/extensions/fenced_code_blocks
+# for documentation.
+
+# Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
+
+# All changes Copyright 2008-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+This extension adds Fenced Code Blocks to Python-Markdown.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/fenced_code_blocks)
+for details.
+"""
+
+from __future__ import annotations
+
+from textwrap import dedent
+from . import Extension
+from ..preprocessors import Preprocessor
+from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines
+from .attr_list import get_attrs_and_remainder, AttrListExtension
+from ..util import parseBoolValue
+from ..serializers import _escape_attrib_html
+import re
+from typing import TYPE_CHECKING, Any, Iterable
+
+if TYPE_CHECKING: # pragma: no cover
+ from markdown import Markdown
+
+
+class FencedCodeExtension(Extension):
+ def __init__(self, **kwargs):
+ self.config = {
+ 'lang_prefix': ['language-', 'Prefix prepended to the language. Default: "language-"']
+ }
+ """ Default configuration options. """
+ super().__init__(**kwargs)
+
+ def extendMarkdown(self, md):
+ """ Add `FencedBlockPreprocessor` to the Markdown instance. """
+ md.registerExtension(self)
+
+ md.preprocessors.register(FencedBlockPreprocessor(md, self.getConfigs()), 'fenced_code_block', 25)
+
+
+class FencedBlockPreprocessor(Preprocessor):
+ """ Find and extract fenced code blocks. """
+
+ FENCED_BLOCK_RE = re.compile(
+ dedent(r'''
+ (?P<fence>^(?:~{3,}|`{3,}))[ ]* # opening fence
+ ((\{(?P<attrs>[^\n]*)\})| # (optional {attrs} or
+ (\.?(?P<lang>[\w#.+-]*)[ ]*)? # optional (.)lang
+ (hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot)[ ]*)?) # optional hl_lines)
+ \n # newline (end of opening fence)
+ (?P<code>.*?)(?<=\n) # the code block
+ (?P=fence)[ ]*$ # closing fence
+ '''),
+ re.MULTILINE | re.DOTALL | re.VERBOSE
+ )
+
+ def __init__(self, md: Markdown, config: dict[str, Any]):
+ super().__init__(md)
+ self.config = config
+ self.checked_for_deps = False
+ self.codehilite_conf: dict[str, Any] = {}
+ self.use_attr_list = False
+ # List of options to convert to boolean values
+ self.bool_options = [
+ 'linenums',
+ 'guess_lang',
+ 'noclasses',
+ 'use_pygments'
+ ]
+
+ def run(self, lines: list[str]) -> list[str]:
+ """ Match and store Fenced Code Blocks in the `HtmlStash`. """
+
+ # Check for dependent extensions
+ if not self.checked_for_deps:
+ for ext in self.md.registeredExtensions:
+ if isinstance(ext, CodeHiliteExtension):
+ self.codehilite_conf = ext.getConfigs()
+ if isinstance(ext, AttrListExtension):
+ self.use_attr_list = True
+
+ self.checked_for_deps = True
+
+ text = "\n".join(lines)
+ index = 0
+ while 1:
+ m = self.FENCED_BLOCK_RE.search(text, index)
+ if m:
+ lang, id, classes, config = None, '', [], {}
+ if m.group('attrs'):
+ attrs, remainder = get_attrs_and_remainder(m.group('attrs'))
+ if remainder: # Does not have correctly matching curly braces, so the syntax is invalid.
+ index = m.end('attrs') # Explicitly skip over this, to prevent an infinite loop.
+ continue
+ id, classes, config = self.handle_attrs(attrs)
+ if len(classes):
+ lang = classes.pop(0)
+ else:
+ if m.group('lang'):
+ lang = m.group('lang')
+ if m.group('hl_lines'):
+ # Support `hl_lines` outside of `attrs` for backward-compatibility
+ config['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
+
+ # If `config` is not empty, then the `codehighlite` extension
+ # is enabled, so we call it to highlight the code
+ if self.codehilite_conf and self.codehilite_conf['use_pygments'] and config.get('use_pygments', True):
+ local_config = self.codehilite_conf.copy()
+ local_config.update(config)
+ # Combine classes with `cssclass`. Ensure `cssclass` is at end
+ # as Pygments appends a suffix under certain circumstances.
+ # Ignore ID as Pygments does not offer an option to set it.
+ if classes:
+ local_config['css_class'] = '{} {}'.format(
+ ' '.join(classes),
+ local_config['css_class']
+ )
+ highliter = CodeHilite(
+ m.group('code'),
+ lang=lang,
+ style=local_config.pop('pygments_style', 'default'),
+ **local_config
+ )
+
+ code = highliter.hilite(shebang=False)
+ else:
+ id_attr = lang_attr = class_attr = kv_pairs = ''
+ if lang:
+ prefix = self.config.get('lang_prefix', 'language-')
+ lang_attr = f' class="{prefix}{_escape_attrib_html(lang)}"'
+ if classes:
+ class_attr = f' class="{_escape_attrib_html(" ".join(classes))}"'
+ if id:
+ id_attr = f' id="{_escape_attrib_html(id)}"'
+ if self.use_attr_list and config and not config.get('use_pygments', False):
+ # Only assign key/value pairs to code element if `attr_list` extension is enabled, key/value
+ # pairs were defined on the code block, and the `use_pygments` key was not set to `True`. The
+ # `use_pygments` key could be either set to `False` or not defined. It is omitted from output.
+ kv_pairs = ''.join(
+ f' {k}="{_escape_attrib_html(v)}"' for k, v in config.items() if k != 'use_pygments'
+ )
+ code = self._escape(m.group('code'))
+ code = f'<pre{id_attr}{class_attr}><code{lang_attr}{kv_pairs}>{code}</code></pre>'
+
+ placeholder = self.md.htmlStash.store(code)
+ text = f'{text[:m.start()]}\n{placeholder}\n{text[m.end():]}'
+ # Continue from after the replaced text in the next iteration.
+ index = m.start() + 1 + len(placeholder)
+ else:
+ break
+ return text.split("\n")
+
+ def handle_attrs(self, attrs: Iterable[tuple[str, str]]) -> tuple[str, list[str], dict[str, Any]]:
+ """ Return tuple: `(id, [list, of, classes], {configs})` """
+ id = ''
+ classes = []
+ configs = {}
+ for k, v in attrs:
+ if k == 'id':
+ id = v
+ elif k == '.':
+ classes.append(v)
+ elif k == 'hl_lines':
+ configs[k] = parse_hl_lines(v)
+ elif k in self.bool_options:
+ configs[k] = parseBoolValue(v, fail_on_errors=False, preserve_none=True)
+ else:
+ configs[k] = v
+ return id, classes, configs
+
+ def _escape(self, txt: str) -> str:
+ """ basic html escaping """
+ txt = txt.replace('&', '&amp;')
+ txt = txt.replace('<', '&lt;')
+ txt = txt.replace('>', '&gt;')
+ txt = txt.replace('"', '&quot;')
+ return txt
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return FencedCodeExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/footnotes.py b/.venv/lib/python3.12/site-packages/markdown/extensions/footnotes.py
new file mode 100644
index 00000000..30c08113
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/footnotes.py
@@ -0,0 +1,418 @@
+# Footnotes Extension for Python-Markdown
+# =======================================
+
+# Adds footnote handling to Python-Markdown.
+
+# See https://Python-Markdown.github.io/extensions/footnotes
+# for documentation.
+
+# Copyright The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+Adds footnote handling to Python-Markdown.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/footnotes)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..blockprocessors import BlockProcessor
+from ..inlinepatterns import InlineProcessor
+from ..treeprocessors import Treeprocessor
+from ..postprocessors import Postprocessor
+from .. import util
+from collections import OrderedDict
+import re
+import copy
+import xml.etree.ElementTree as etree
+
+FN_BACKLINK_TEXT = util.STX + "zz1337820767766393qq" + util.ETX
+NBSP_PLACEHOLDER = util.STX + "qq3936677670287331zz" + util.ETX
+RE_REF_ID = re.compile(r'(fnref)(\d+)')
+
+
+class FootnoteExtension(Extension):
+ """ Footnote Extension. """
+
+ def __init__(self, **kwargs):
+ """ Setup configs. """
+
+ self.config = {
+ 'PLACE_MARKER': [
+ '///Footnotes Go Here///', 'The text string that marks where the footnotes go'
+ ],
+ 'UNIQUE_IDS': [
+ False, 'Avoid name collisions across multiple calls to `reset()`.'
+ ],
+ 'BACKLINK_TEXT': [
+ '&#8617;', "The text string that links from the footnote to the reader's place."
+ ],
+ 'SUPERSCRIPT_TEXT': [
+ '{}', "The text string that links from the reader's place to the footnote."
+ ],
+ 'BACKLINK_TITLE': [
+ 'Jump back to footnote %d in the text',
+ 'The text string used for the title HTML attribute of the backlink. '
+ '%d will be replaced by the footnote number.'
+ ],
+ 'SEPARATOR': [
+ ':', 'Footnote separator.'
+ ]
+ }
+ """ Default configuration options. """
+ super().__init__(**kwargs)
+
+ # In multiple invocations, emit links that don't get tangled.
+ self.unique_prefix = 0
+ self.found_refs: dict[str, int] = {}
+ self.used_refs: set[str] = set()
+
+ self.reset()
+
+ def extendMarkdown(self, md):
+ """ Add pieces to Markdown. """
+ md.registerExtension(self)
+ self.parser = md.parser
+ self.md = md
+ # Insert a `blockprocessor` before `ReferencePreprocessor`
+ md.parser.blockprocessors.register(FootnoteBlockProcessor(self), 'footnote', 17)
+
+ # Insert an inline pattern before `ImageReferencePattern`
+ FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
+ md.inlinePatterns.register(FootnoteInlineProcessor(FOOTNOTE_RE, self), 'footnote', 175)
+ # Insert a tree-processor that would actually add the footnote div
+ # This must be before all other tree-processors (i.e., `inline` and
+ # `codehilite`) so they can run on the the contents of the div.
+ md.treeprocessors.register(FootnoteTreeprocessor(self), 'footnote', 50)
+
+ # Insert a tree-processor that will run after inline is done.
+ # In this tree-processor we want to check our duplicate footnote tracker
+ # And add additional `backrefs` to the footnote pointing back to the
+ # duplicated references.
+ md.treeprocessors.register(FootnotePostTreeprocessor(self), 'footnote-duplicate', 15)
+
+ # Insert a postprocessor after amp_substitute processor
+ md.postprocessors.register(FootnotePostprocessor(self), 'footnote', 25)
+
+ def reset(self) -> None:
+ """ Clear footnotes on reset, and prepare for distinct document. """
+ self.footnotes: OrderedDict[str, str] = OrderedDict()
+ self.unique_prefix += 1
+ self.found_refs = {}
+ self.used_refs = set()
+
+ def unique_ref(self, reference: str, found: bool = False) -> str:
+ """ Get a unique reference if there are duplicates. """
+ if not found:
+ return reference
+
+ original_ref = reference
+ while reference in self.used_refs:
+ ref, rest = reference.split(self.get_separator(), 1)
+ m = RE_REF_ID.match(ref)
+ if m:
+ reference = '%s%d%s%s' % (m.group(1), int(m.group(2))+1, self.get_separator(), rest)
+ else:
+ reference = '%s%d%s%s' % (ref, 2, self.get_separator(), rest)
+
+ self.used_refs.add(reference)
+ if original_ref in self.found_refs:
+ self.found_refs[original_ref] += 1
+ else:
+ self.found_refs[original_ref] = 1
+ return reference
+
+ def findFootnotesPlaceholder(
+ self, root: etree.Element
+ ) -> tuple[etree.Element, etree.Element, bool] | None:
+ """ Return ElementTree Element that contains Footnote placeholder. """
+ def finder(element):
+ for child in element:
+ if child.text:
+ if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
+ return child, element, True
+ if child.tail:
+ if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
+ return child, element, False
+ child_res = finder(child)
+ if child_res is not None:
+ return child_res
+ return None
+
+ res = finder(root)
+ return res
+
+ def setFootnote(self, id: str, text: str) -> None:
+ """ Store a footnote for later retrieval. """
+ self.footnotes[id] = text
+
+ def get_separator(self) -> str:
+ """ Get the footnote separator. """
+ return self.getConfig("SEPARATOR")
+
+ def makeFootnoteId(self, id: str) -> str:
+ """ Return footnote link id. """
+ if self.getConfig("UNIQUE_IDS"):
+ return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
+ else:
+ return 'fn{}{}'.format(self.get_separator(), id)
+
+ def makeFootnoteRefId(self, id: str, found: bool = False) -> str:
+ """ Return footnote back-link id. """
+ if self.getConfig("UNIQUE_IDS"):
+ return self.unique_ref('fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id), found)
+ else:
+ return self.unique_ref('fnref{}{}'.format(self.get_separator(), id), found)
+
+ def makeFootnotesDiv(self, root: etree.Element) -> etree.Element | None:
+ """ Return `div` of footnotes as `etree` Element. """
+
+ if not list(self.footnotes.keys()):
+ return None
+
+ div = etree.Element("div")
+ div.set('class', 'footnote')
+ etree.SubElement(div, "hr")
+ ol = etree.SubElement(div, "ol")
+ surrogate_parent = etree.Element("div")
+
+ # Backward compatibility with old '%d' placeholder
+ backlink_title = self.getConfig("BACKLINK_TITLE").replace("%d", "{}")
+
+ for index, id in enumerate(self.footnotes.keys(), start=1):
+ li = etree.SubElement(ol, "li")
+ li.set("id", self.makeFootnoteId(id))
+ # Parse footnote with surrogate parent as `li` cannot be used.
+ # List block handlers have special logic to deal with `li`.
+ # When we are done parsing, we will copy everything over to `li`.
+ self.parser.parseChunk(surrogate_parent, self.footnotes[id])
+ for el in list(surrogate_parent):
+ li.append(el)
+ surrogate_parent.remove(el)
+ backlink = etree.Element("a")
+ backlink.set("href", "#" + self.makeFootnoteRefId(id))
+ backlink.set("class", "footnote-backref")
+ backlink.set(
+ "title",
+ backlink_title.format(index)
+ )
+ backlink.text = FN_BACKLINK_TEXT
+
+ if len(li):
+ node = li[-1]
+ if node.tag == "p":
+ node.text = node.text + NBSP_PLACEHOLDER
+ node.append(backlink)
+ else:
+ p = etree.SubElement(li, "p")
+ p.append(backlink)
+ return div
+
+
+class FootnoteBlockProcessor(BlockProcessor):
+ """ Find all footnote references and store for later use. """
+
+ RE = re.compile(r'^[ ]{0,3}\[\^([^\]]*)\]:[ ]*(.*)$', re.MULTILINE)
+
+ def __init__(self, footnotes: FootnoteExtension):
+ super().__init__(footnotes.parser)
+ self.footnotes = footnotes
+
+ def test(self, parent: etree.Element, block: str) -> bool:
+ return True
+
+ def run(self, parent: etree.Element, blocks: list[str]) -> bool:
+ """ Find, set, and remove footnote definitions. """
+ block = blocks.pop(0)
+ m = self.RE.search(block)
+ if m:
+ id = m.group(1)
+ fn_blocks = [m.group(2)]
+
+ # Handle rest of block
+ therest = block[m.end():].lstrip('\n')
+ m2 = self.RE.search(therest)
+ if m2:
+ # Another footnote exists in the rest of this block.
+ # Any content before match is continuation of this footnote, which may be lazily indented.
+ before = therest[:m2.start()].rstrip('\n')
+ fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(before)]).lstrip('\n')
+ # Add back to blocks everything from beginning of match forward for next iteration.
+ blocks.insert(0, therest[m2.start():])
+ else:
+ # All remaining lines of block are continuation of this footnote, which may be lazily indented.
+ fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(therest)]).strip('\n')
+
+ # Check for child elements in remaining blocks.
+ fn_blocks.extend(self.detectTabbed(blocks))
+
+ footnote = "\n\n".join(fn_blocks)
+ self.footnotes.setFootnote(id, footnote.rstrip())
+
+ if block[:m.start()].strip():
+ # Add any content before match back to blocks as separate block
+ blocks.insert(0, block[:m.start()].rstrip('\n'))
+ return True
+ # No match. Restore block.
+ blocks.insert(0, block)
+ return False
+
+ def detectTabbed(self, blocks: list[str]) -> list[str]:
+ """ Find indented text and remove indent before further processing.
+
+ Returns:
+ A list of blocks with indentation removed.
+ """
+ fn_blocks = []
+ while blocks:
+ if blocks[0].startswith(' '*4):
+ block = blocks.pop(0)
+ # Check for new footnotes within this block and split at new footnote.
+ m = self.RE.search(block)
+ if m:
+ # Another footnote exists in this block.
+ # Any content before match is continuation of this footnote, which may be lazily indented.
+ before = block[:m.start()].rstrip('\n')
+ fn_blocks.append(self.detab(before))
+ # Add back to blocks everything from beginning of match forward for next iteration.
+ blocks.insert(0, block[m.start():])
+ # End of this footnote.
+ break
+ else:
+ # Entire block is part of this footnote.
+ fn_blocks.append(self.detab(block))
+ else:
+ # End of this footnote.
+ break
+ return fn_blocks
+
+ def detab(self, block: str) -> str:
+ """ Remove one level of indent from a block.
+
+ Preserve lazily indented blocks by only removing indent from indented lines.
+ """
+ lines = block.split('\n')
+ for i, line in enumerate(lines):
+ if line.startswith(' '*4):
+ lines[i] = line[4:]
+ return '\n'.join(lines)
+
+
+class FootnoteInlineProcessor(InlineProcessor):
+ """ `InlineProcessor` for footnote markers in a document's body text. """
+
+ def __init__(self, pattern: str, footnotes: FootnoteExtension):
+ super().__init__(pattern)
+ self.footnotes = footnotes
+
+ def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]:
+ id = m.group(1)
+ if id in self.footnotes.footnotes.keys():
+ sup = etree.Element("sup")
+ a = etree.SubElement(sup, "a")
+ sup.set('id', self.footnotes.makeFootnoteRefId(id, found=True))
+ a.set('href', '#' + self.footnotes.makeFootnoteId(id))
+ a.set('class', 'footnote-ref')
+ a.text = self.footnotes.getConfig("SUPERSCRIPT_TEXT").format(
+ list(self.footnotes.footnotes.keys()).index(id) + 1
+ )
+ return sup, m.start(0), m.end(0)
+ else:
+ return None, None, None
+
+
+class FootnotePostTreeprocessor(Treeprocessor):
+ """ Amend footnote div with duplicates. """
+
+ def __init__(self, footnotes: FootnoteExtension):
+ self.footnotes = footnotes
+
+ def add_duplicates(self, li: etree.Element, duplicates: int) -> None:
+ """ Adjust current `li` and add the duplicates: `fnref2`, `fnref3`, etc. """
+ for link in li.iter('a'):
+ # Find the link that needs to be duplicated.
+ if link.attrib.get('class', '') == 'footnote-backref':
+ ref, rest = link.attrib['href'].split(self.footnotes.get_separator(), 1)
+ # Duplicate link the number of times we need to
+ # and point the to the appropriate references.
+ links = []
+ for index in range(2, duplicates + 1):
+ sib_link = copy.deepcopy(link)
+ sib_link.attrib['href'] = '%s%d%s%s' % (ref, index, self.footnotes.get_separator(), rest)
+ links.append(sib_link)
+ self.offset += 1
+ # Add all the new duplicate links.
+ el = list(li)[-1]
+ for link in links:
+ el.append(link)
+ break
+
+ def get_num_duplicates(self, li: etree.Element) -> int:
+ """ Get the number of duplicate refs of the footnote. """
+ fn, rest = li.attrib.get('id', '').split(self.footnotes.get_separator(), 1)
+ link_id = '{}ref{}{}'.format(fn, self.footnotes.get_separator(), rest)
+ return self.footnotes.found_refs.get(link_id, 0)
+
+ def handle_duplicates(self, parent: etree.Element) -> None:
+ """ Find duplicate footnotes and format and add the duplicates. """
+ for li in list(parent):
+ # Check number of duplicates footnotes and insert
+ # additional links if needed.
+ count = self.get_num_duplicates(li)
+ if count > 1:
+ self.add_duplicates(li, count)
+
+ def run(self, root: etree.Element) -> None:
+ """ Crawl the footnote div and add missing duplicate footnotes. """
+ self.offset = 0
+ for div in root.iter('div'):
+ if div.attrib.get('class', '') == 'footnote':
+ # Footnotes should be under the first ordered list under
+ # the footnote div. So once we find it, quit.
+ for ol in div.iter('ol'):
+ self.handle_duplicates(ol)
+ break
+
+
+class FootnoteTreeprocessor(Treeprocessor):
+ """ Build and append footnote div to end of document. """
+
+ def __init__(self, footnotes: FootnoteExtension):
+ self.footnotes = footnotes
+
+ def run(self, root: etree.Element) -> None:
+ footnotesDiv = self.footnotes.makeFootnotesDiv(root)
+ if footnotesDiv is not None:
+ result = self.footnotes.findFootnotesPlaceholder(root)
+ if result:
+ child, parent, isText = result
+ ind = list(parent).index(child)
+ if isText:
+ parent.remove(child)
+ parent.insert(ind, footnotesDiv)
+ else:
+ parent.insert(ind + 1, footnotesDiv)
+ child.tail = None
+ else:
+ root.append(footnotesDiv)
+
+
+class FootnotePostprocessor(Postprocessor):
+ """ Replace placeholders with html entities. """
+ def __init__(self, footnotes: FootnoteExtension):
+ self.footnotes = footnotes
+
+ def run(self, text: str) -> str:
+ text = text.replace(
+ FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT")
+ )
+ return text.replace(NBSP_PLACEHOLDER, "&#160;")
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ """ Return an instance of the `FootnoteExtension` """
+ return FootnoteExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/legacy_attrs.py b/.venv/lib/python3.12/site-packages/markdown/extensions/legacy_attrs.py
new file mode 100644
index 00000000..6641e6ea
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/legacy_attrs.py
@@ -0,0 +1,71 @@
+# Python Markdown
+
+# A Python implementation of John Gruber's Markdown.
+
+# Documentation: https://python-markdown.github.io/
+# GitHub: https://github.com/Python-Markdown/markdown/
+# PyPI: https://pypi.org/project/Markdown/
+
+# Started by Manfred Stienstra (http://www.dwerg.net/).
+# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
+# Currently maintained by Waylan Limberg (https://github.com/waylan),
+# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
+
+# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
+# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
+# Copyright 2004 Manfred Stienstra (the original version)
+
+# License: BSD (see LICENSE.md for details).
+
+"""
+An extension to Python Markdown which implements legacy attributes.
+
+Prior to Python-Markdown version 3.0, the Markdown class had an `enable_attributes`
+keyword which was on by default and provided for attributes to be defined for elements
+using the format `{@key=value}`. This extension is provided as a replacement for
+backward compatibility. New documents should be authored using `attr_lists`. However,
+numerous documents exist which have been using the old attribute format for many
+years. This extension can be used to continue to render those documents correctly.
+"""
+
+from __future__ import annotations
+
+import re
+from markdown.treeprocessors import Treeprocessor, isString
+from markdown.extensions import Extension
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ import xml.etree.ElementTree as etree
+
+
+ATTR_RE = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123}
+
+
+class LegacyAttrs(Treeprocessor):
+ def run(self, doc: etree.Element) -> None:
+ """Find and set values of attributes ({@key=value}). """
+ for el in doc.iter():
+ alt = el.get('alt', None)
+ if alt is not None:
+ el.set('alt', self.handleAttributes(el, alt))
+ if el.text and isString(el.text):
+ el.text = self.handleAttributes(el, el.text)
+ if el.tail and isString(el.tail):
+ el.tail = self.handleAttributes(el, el.tail)
+
+ def handleAttributes(self, el: etree.Element, txt: str) -> str:
+ """ Set attributes and return text without definitions. """
+ def attributeCallback(match: re.Match[str]):
+ el.set(match.group(1), match.group(2).replace('\n', ' '))
+ return ATTR_RE.sub(attributeCallback, txt)
+
+
+class LegacyAttrExtension(Extension):
+ def extendMarkdown(self, md):
+ """ Add `LegacyAttrs` to Markdown instance. """
+ md.treeprocessors.register(LegacyAttrs(md), 'legacyattrs', 15)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return LegacyAttrExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/legacy_em.py b/.venv/lib/python3.12/site-packages/markdown/extensions/legacy_em.py
new file mode 100644
index 00000000..a6f67b7e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/legacy_em.py
@@ -0,0 +1,52 @@
+# Legacy Em Extension for Python-Markdown
+# =======================================
+
+# This extension provides legacy behavior for _connected_words_.
+
+# Copyright 2015-2018 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+This extension provides legacy behavior for _connected_words_.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..inlinepatterns import UnderscoreProcessor, EmStrongItem, EM_STRONG2_RE, STRONG_EM2_RE
+import re
+
+# _emphasis_
+EMPHASIS_RE = r'(_)([^_]+)\1'
+
+# __strong__
+STRONG_RE = r'(_{2})(.+?)\1'
+
+# __strong_em___
+STRONG_EM_RE = r'(_)\1(?!\1)([^_]+?)\1(?!\1)(.+?)\1{3}'
+
+
+class LegacyUnderscoreProcessor(UnderscoreProcessor):
+ """Emphasis processor for handling strong and em matches inside underscores."""
+
+ PATTERNS = [
+ EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
+ EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
+ EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
+ EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
+ EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
+ ]
+
+
+class LegacyEmExtension(Extension):
+ """ Add legacy_em extension to Markdown class."""
+
+ def extendMarkdown(self, md):
+ """ Modify inline patterns. """
+ md.inlinePatterns.register(LegacyUnderscoreProcessor(r'_'), 'em_strong2', 50)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ """ Return an instance of the `LegacyEmExtension` """
+ return LegacyEmExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/md_in_html.py b/.venv/lib/python3.12/site-packages/markdown/extensions/md_in_html.py
new file mode 100644
index 00000000..64b84a5f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/md_in_html.py
@@ -0,0 +1,376 @@
+# Python-Markdown Markdown in HTML Extension
+# ===============================
+
+# An implementation of [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/)'s
+# parsing of Markdown syntax in raw HTML.
+
+# See https://Python-Markdown.github.io/extensions/raw_html
+# for documentation.
+
+# Copyright The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+An implementation of [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/)'s
+parsing of Markdown syntax in raw HTML.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/raw_html)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..blockprocessors import BlockProcessor
+from ..preprocessors import Preprocessor
+from ..postprocessors import RawHtmlPostprocessor
+from .. import util
+from ..htmlparser import HTMLExtractor, blank_line_re
+import xml.etree.ElementTree as etree
+from typing import TYPE_CHECKING, Literal, Mapping
+
+if TYPE_CHECKING: # pragma: no cover
+ from markdown import Markdown
+
+
+class HTMLExtractorExtra(HTMLExtractor):
+ """
+ Override `HTMLExtractor` and create `etree` `Elements` for any elements which should have content parsed as
+ Markdown.
+ """
+
+ def __init__(self, md: Markdown, *args, **kwargs):
+ # All block-level tags.
+ self.block_level_tags = set(md.block_level_elements.copy())
+ # Block-level tags in which the content only gets span level parsing
+ self.span_tags = set(
+ ['address', 'dd', 'dt', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'legend', 'li', 'p', 'summary', 'td', 'th']
+ )
+ # Block-level tags which never get their content parsed.
+ self.raw_tags = set(['canvas', 'math', 'option', 'pre', 'script', 'style', 'textarea'])
+
+ super().__init__(md, *args, **kwargs)
+
+ # Block-level tags in which the content gets parsed as blocks
+ self.block_tags = set(self.block_level_tags) - (self.span_tags | self.raw_tags | self.empty_tags)
+ self.span_and_blocks_tags = self.block_tags | self.span_tags
+
+ def reset(self):
+ """Reset this instance. Loses all unprocessed data."""
+ self.mdstack: list[str] = [] # When markdown=1, stack contains a list of tags
+ self.treebuilder = etree.TreeBuilder()
+ self.mdstate: list[Literal['block', 'span', 'off', None]] = []
+ super().reset()
+
+ def close(self):
+ """Handle any buffered data."""
+ super().close()
+ # Handle any unclosed tags.
+ if self.mdstack:
+ # Close the outermost parent. `handle_endtag` will close all unclosed children.
+ self.handle_endtag(self.mdstack[0])
+
+ def get_element(self) -> etree.Element:
+ """ Return element from `treebuilder` and reset `treebuilder` for later use. """
+ element = self.treebuilder.close()
+ self.treebuilder = etree.TreeBuilder()
+ return element
+
+ def get_state(self, tag, attrs: Mapping[str, str]) -> Literal['block', 'span', 'off', None]:
+ """ Return state from tag and `markdown` attribute. One of 'block', 'span', or 'off'. """
+ md_attr = attrs.get('markdown', '0')
+ if md_attr == 'markdown':
+ # `<tag markdown>` is the same as `<tag markdown='1'>`.
+ md_attr = '1'
+ parent_state = self.mdstate[-1] if self.mdstate else None
+ if parent_state == 'off' or (parent_state == 'span' and md_attr != '0'):
+ # Only use the parent state if it is more restrictive than the markdown attribute.
+ md_attr = parent_state
+ if ((md_attr == '1' and tag in self.block_tags) or
+ (md_attr == 'block' and tag in self.span_and_blocks_tags)):
+ return 'block'
+ elif ((md_attr == '1' and tag in self.span_tags) or
+ (md_attr == 'span' and tag in self.span_and_blocks_tags)):
+ return 'span'
+ elif tag in self.block_level_tags:
+ return 'off'
+ else: # pragma: no cover
+ return None
+
+ def handle_starttag(self, tag, attrs):
+ # Handle tags that should always be empty and do not specify a closing tag
+ if tag in self.empty_tags and (self.at_line_start() or self.intail):
+ attrs = {key: value if value is not None else key for key, value in attrs}
+ if "markdown" in attrs:
+ attrs.pop('markdown')
+ element = etree.Element(tag, attrs)
+ data = etree.tostring(element, encoding='unicode', method='html')
+ else:
+ data = self.get_starttag_text()
+ self.handle_empty_tag(data, True)
+ return
+
+ if tag in self.block_level_tags and (self.at_line_start() or self.intail):
+ # Valueless attribute (ex: `<tag checked>`) results in `[('checked', None)]`.
+ # Convert to `{'checked': 'checked'}`.
+ attrs = {key: value if value is not None else key for key, value in attrs}
+ state = self.get_state(tag, attrs)
+ if self.inraw or (state in [None, 'off'] and not self.mdstack):
+ # fall back to default behavior
+ attrs.pop('markdown', None)
+ super().handle_starttag(tag, attrs)
+ else:
+ if 'p' in self.mdstack and tag in self.block_level_tags:
+ # Close unclosed 'p' tag
+ self.handle_endtag('p')
+ self.mdstate.append(state)
+ self.mdstack.append(tag)
+ attrs['markdown'] = state
+ self.treebuilder.start(tag, attrs)
+ else:
+ # Span level tag
+ if self.inraw:
+ super().handle_starttag(tag, attrs)
+ else:
+ text = self.get_starttag_text()
+ if self.mdstate and self.mdstate[-1] == "off":
+ self.handle_data(self.md.htmlStash.store(text))
+ else:
+ self.handle_data(text)
+ if tag in self.CDATA_CONTENT_ELEMENTS:
+ # This is presumably a standalone tag in a code span (see #1036).
+ self.clear_cdata_mode()
+
+ def handle_endtag(self, tag):
+ if tag in self.block_level_tags:
+ if self.inraw:
+ super().handle_endtag(tag)
+ elif tag in self.mdstack:
+ # Close element and any unclosed children
+ while self.mdstack:
+ item = self.mdstack.pop()
+ self.mdstate.pop()
+ self.treebuilder.end(item)
+ if item == tag:
+ break
+ if not self.mdstack:
+ # Last item in stack is closed. Stash it
+ element = self.get_element()
+ # Get last entry to see if it ends in newlines
+ # If it is an element, assume there is no newlines
+ item = self.cleandoc[-1] if self.cleandoc else ''
+ # If we only have one newline before block element, add another
+ if not item.endswith('\n\n') and item.endswith('\n'):
+ self.cleandoc.append('\n')
+ self.cleandoc.append(self.md.htmlStash.store(element))
+ self.cleandoc.append('\n\n')
+ self.state = []
+ # Check if element has a tail
+ if not blank_line_re.match(
+ self.rawdata[self.line_offset + self.offset + len(self.get_endtag_text(tag)):]):
+ # More content exists after `endtag`.
+ self.intail = True
+ else:
+ # Treat orphan closing tag as a span level tag.
+ text = self.get_endtag_text(tag)
+ if self.mdstate and self.mdstate[-1] == "off":
+ self.handle_data(self.md.htmlStash.store(text))
+ else:
+ self.handle_data(text)
+ else:
+ # Span level tag
+ if self.inraw:
+ super().handle_endtag(tag)
+ else:
+ text = self.get_endtag_text(tag)
+ if self.mdstate and self.mdstate[-1] == "off":
+ self.handle_data(self.md.htmlStash.store(text))
+ else:
+ self.handle_data(text)
+
+ def handle_startendtag(self, tag, attrs):
+ if tag in self.empty_tags:
+ attrs = {key: value if value is not None else key for key, value in attrs}
+ if "markdown" in attrs:
+ attrs.pop('markdown')
+ element = etree.Element(tag, attrs)
+ data = etree.tostring(element, encoding='unicode', method='html')
+ else:
+ data = self.get_starttag_text()
+ else:
+ data = self.get_starttag_text()
+ self.handle_empty_tag(data, is_block=self.md.is_block_level(tag))
+
+ def handle_data(self, data):
+ if self.intail and '\n' in data:
+ self.intail = False
+ if self.inraw or not self.mdstack:
+ super().handle_data(data)
+ else:
+ self.treebuilder.data(data)
+
+ def handle_empty_tag(self, data, is_block):
+ if self.inraw or not self.mdstack:
+ super().handle_empty_tag(data, is_block)
+ else:
+ if self.at_line_start() and is_block:
+ self.handle_data('\n' + self.md.htmlStash.store(data) + '\n\n')
+ else:
+ self.handle_data(self.md.htmlStash.store(data))
+
+ def parse_pi(self, i: int) -> int:
+ if self.at_line_start() or self.intail or self.mdstack:
+ # The same override exists in `HTMLExtractor` without the check
+ # for `mdstack`. Therefore, use parent of `HTMLExtractor` instead.
+ return super(HTMLExtractor, self).parse_pi(i)
+ # This is not the beginning of a raw block so treat as plain data
+ # and avoid consuming any tags which may follow (see #1066).
+ self.handle_data('<?')
+ return i + 2
+
+ def parse_html_declaration(self, i: int) -> int:
+ if self.at_line_start() or self.intail or self.mdstack:
+ # The same override exists in `HTMLExtractor` without the check
+ # for `mdstack`. Therefore, use parent of `HTMLExtractor` instead.
+ return super(HTMLExtractor, self).parse_html_declaration(i)
+ # This is not the beginning of a raw block so treat as plain data
+ # and avoid consuming any tags which may follow (see #1066).
+ self.handle_data('<!')
+ return i + 2
+
+
+class HtmlBlockPreprocessor(Preprocessor):
+ """Remove html blocks from the text and store them for later retrieval."""
+
+ def run(self, lines: list[str]) -> list[str]:
+ source = '\n'.join(lines)
+ parser = HTMLExtractorExtra(self.md)
+ parser.feed(source)
+ parser.close()
+ return ''.join(parser.cleandoc).split('\n')
+
+
+class MarkdownInHtmlProcessor(BlockProcessor):
+ """Process Markdown Inside HTML Blocks which have been stored in the `HtmlStash`."""
+
+ def test(self, parent: etree.Element, block: str) -> bool:
+ # Always return True. `run` will return `False` it not a valid match.
+ return True
+
+ def parse_element_content(self, element: etree.Element) -> None:
+ """
+ Recursively parse the text content of an `etree` Element as Markdown.
+
+ Any block level elements generated from the Markdown will be inserted as children of the element in place
+ of the text content. All `markdown` attributes are removed. For any elements in which Markdown parsing has
+ been disabled, the text content of it and its children are wrapped in an `AtomicString`.
+ """
+
+ md_attr = element.attrib.pop('markdown', 'off')
+
+ if md_attr == 'block':
+ # Parse content as block level
+ # The order in which the different parts are parsed (text, children, tails) is important here as the
+ # order of elements needs to be preserved. We can't be inserting items at a later point in the current
+ # iteration as we don't want to do raw processing on elements created from parsing Markdown text (for
+ # example). Therefore, the order of operations is children, tails, text.
+
+ # Recursively parse existing children from raw HTML
+ for child in list(element):
+ self.parse_element_content(child)
+
+ # Parse Markdown text in tail of children. Do this separate to avoid raw HTML parsing.
+ # Save the position of each item to be inserted later in reverse.
+ tails = []
+ for pos, child in enumerate(element):
+ if child.tail:
+ block = child.tail.rstrip('\n')
+ child.tail = ''
+ # Use a dummy placeholder element.
+ dummy = etree.Element('div')
+ self.parser.parseBlocks(dummy, block.split('\n\n'))
+ children = list(dummy)
+ children.reverse()
+ tails.append((pos + 1, children))
+
+ # Insert the elements created from the tails in reverse.
+ tails.reverse()
+ for pos, tail in tails:
+ for item in tail:
+ element.insert(pos, item)
+
+ # Parse Markdown text content. Do this last to avoid raw HTML parsing.
+ if element.text:
+ block = element.text.rstrip('\n')
+ element.text = ''
+ # Use a dummy placeholder element as the content needs to get inserted before existing children.
+ dummy = etree.Element('div')
+ self.parser.parseBlocks(dummy, block.split('\n\n'))
+ children = list(dummy)
+ children.reverse()
+ for child in children:
+ element.insert(0, child)
+
+ elif md_attr == 'span':
+ # Span level parsing will be handled by inline processors.
+ # Walk children here to remove any `markdown` attributes.
+ for child in list(element):
+ self.parse_element_content(child)
+
+ else:
+ # Disable inline parsing for everything else
+ if element.text is None:
+ element.text = ''
+ element.text = util.AtomicString(element.text)
+ for child in list(element):
+ self.parse_element_content(child)
+ if child.tail:
+ child.tail = util.AtomicString(child.tail)
+
+ def run(self, parent: etree.Element, blocks: list[str]) -> bool:
+ m = util.HTML_PLACEHOLDER_RE.match(blocks[0])
+ if m:
+ index = int(m.group(1))
+ element = self.parser.md.htmlStash.rawHtmlBlocks[index]
+ if isinstance(element, etree.Element):
+ # We have a matched element. Process it.
+ blocks.pop(0)
+ self.parse_element_content(element)
+ parent.append(element)
+ # Cleanup stash. Replace element with empty string to avoid confusing postprocessor.
+ self.parser.md.htmlStash.rawHtmlBlocks.pop(index)
+ self.parser.md.htmlStash.rawHtmlBlocks.insert(index, '')
+ # Confirm the match to the `blockparser`.
+ return True
+ # No match found.
+ return False
+
+
+class MarkdownInHTMLPostprocessor(RawHtmlPostprocessor):
+ def stash_to_string(self, text: str | etree.Element) -> str:
+ """ Override default to handle any `etree` elements still in the stash. """
+ if isinstance(text, etree.Element):
+ return self.md.serializer(text)
+ else:
+ return str(text)
+
+
+class MarkdownInHtmlExtension(Extension):
+ """Add Markdown parsing in HTML to Markdown class."""
+
+ def extendMarkdown(self, md):
+ """ Register extension instances. """
+
+ # Replace raw HTML preprocessor
+ md.preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20)
+ # Add `blockprocessor` which handles the placeholders for `etree` elements
+ md.parser.blockprocessors.register(
+ MarkdownInHtmlProcessor(md.parser), 'markdown_block', 105
+ )
+ # Replace raw HTML postprocessor
+ md.postprocessors.register(MarkdownInHTMLPostprocessor(md), 'raw_html', 30)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return MarkdownInHtmlExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/meta.py b/.venv/lib/python3.12/site-packages/markdown/extensions/meta.py
new file mode 100644
index 00000000..cb703399
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/meta.py
@@ -0,0 +1,86 @@
+# Meta Data Extension for Python-Markdown
+# =======================================
+
+# This extension adds Meta Data handling to markdown.
+
+# See https://Python-Markdown.github.io/extensions/meta_data
+# for documentation.
+
+# Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
+
+# All changes Copyright 2008-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+This extension adds Meta Data handling to markdown.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/meta_data)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..preprocessors import Preprocessor
+import re
+import logging
+from typing import Any
+
+log = logging.getLogger('MARKDOWN')
+
+# Global Vars
+META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
+META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
+BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
+END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
+
+
+class MetaExtension (Extension):
+ """ Meta-Data extension for Python-Markdown. """
+
+ def extendMarkdown(self, md):
+ """ Add `MetaPreprocessor` to Markdown instance. """
+ md.registerExtension(self)
+ self.md = md
+ md.preprocessors.register(MetaPreprocessor(md), 'meta', 27)
+
+ def reset(self) -> None:
+ self.md.Meta = {}
+
+
+class MetaPreprocessor(Preprocessor):
+ """ Get Meta-Data. """
+
+ def run(self, lines: list[str]) -> list[str]:
+ """ Parse Meta-Data and store in Markdown.Meta. """
+ meta: dict[str, Any] = {}
+ key = None
+ if lines and BEGIN_RE.match(lines[0]):
+ lines.pop(0)
+ while lines:
+ line = lines.pop(0)
+ m1 = META_RE.match(line)
+ if line.strip() == '' or END_RE.match(line):
+ break # blank line or end of YAML header - done
+ if m1:
+ key = m1.group('key').lower().strip()
+ value = m1.group('value').strip()
+ try:
+ meta[key].append(value)
+ except KeyError:
+ meta[key] = [value]
+ else:
+ m2 = META_MORE_RE.match(line)
+ if m2 and key:
+ # Add another line to existing key
+ meta[key].append(m2.group('value').strip())
+ else:
+ lines.insert(0, line)
+ break # no meta data - done
+ self.md.Meta = meta
+ return lines
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return MetaExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/nl2br.py b/.venv/lib/python3.12/site-packages/markdown/extensions/nl2br.py
new file mode 100644
index 00000000..177df1ee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/nl2br.py
@@ -0,0 +1,41 @@
+# `NL2BR` Extension
+# ===============
+
+# A Python-Markdown extension to treat newlines as hard breaks; like
+# GitHub-flavored Markdown does.
+
+# See https://Python-Markdown.github.io/extensions/nl2br
+# for documentation.
+
+# Original code Copyright 2011 [Brian Neal](https://deathofagremmie.com/)
+
+# All changes Copyright 2011-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+A Python-Markdown extension to treat newlines as hard breaks; like
+GitHub-flavored Markdown does.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/nl2br)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..inlinepatterns import SubstituteTagInlineProcessor
+
+BR_RE = r'\n'
+
+
+class Nl2BrExtension(Extension):
+
+ def extendMarkdown(self, md):
+ """ Add a `SubstituteTagInlineProcessor` to Markdown. """
+ br_tag = SubstituteTagInlineProcessor(BR_RE, 'br')
+ md.inlinePatterns.register(br_tag, 'nl', 5)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return Nl2BrExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/sane_lists.py b/.venv/lib/python3.12/site-packages/markdown/extensions/sane_lists.py
new file mode 100644
index 00000000..be421f94
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/sane_lists.py
@@ -0,0 +1,69 @@
+# Sane List Extension for Python-Markdown
+# =======================================
+
+# Modify the behavior of Lists in Python-Markdown to act in a sane manor.
+
+# See https://Python-Markdown.github.io/extensions/sane_lists
+# for documentation.
+
+# Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
+
+# All changes Copyright 2011-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+Modify the behavior of Lists in Python-Markdown to act in a sane manor.
+
+See [documentation](https://Python-Markdown.github.io/extensions/sane_lists)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..blockprocessors import OListProcessor, UListProcessor
+import re
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from .. import blockparser
+
+
+class SaneOListProcessor(OListProcessor):
+ """ Override `SIBLING_TAGS` to not include `ul` and set `LAZY_OL` to `False`. """
+
+ SIBLING_TAGS = ['ol']
+ """ Exclude `ul` from list of siblings. """
+ LAZY_OL = False
+ """ Disable lazy list behavior. """
+
+ def __init__(self, parser: blockparser.BlockParser):
+ super().__init__(parser)
+ self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.))[ ]+(.*)' %
+ (self.tab_length - 1))
+
+
+class SaneUListProcessor(UListProcessor):
+ """ Override `SIBLING_TAGS` to not include `ol`. """
+
+ SIBLING_TAGS = ['ul']
+ """ Exclude `ol` from list of siblings. """
+
+ def __init__(self, parser: blockparser.BlockParser):
+ super().__init__(parser)
+ self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' %
+ (self.tab_length - 1))
+
+
+class SaneListExtension(Extension):
+ """ Add sane lists to Markdown. """
+
+ def extendMarkdown(self, md):
+ """ Override existing Processors. """
+ md.parser.blockprocessors.register(SaneOListProcessor(md.parser), 'olist', 40)
+ md.parser.blockprocessors.register(SaneUListProcessor(md.parser), 'ulist', 30)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return SaneListExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/smarty.py b/.venv/lib/python3.12/site-packages/markdown/extensions/smarty.py
new file mode 100644
index 00000000..7a7c952d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/smarty.py
@@ -0,0 +1,277 @@
+# Smarty extension for Python-Markdown
+# ====================================
+
+# Adds conversion of ASCII dashes, quotes and ellipses to their HTML
+# entity equivalents.
+
+# See https://Python-Markdown.github.io/extensions/smarty
+# for documentation.
+
+# Author: 2013, Dmitry Shachnev <mitya57@gmail.com>
+
+# All changes Copyright 2013-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+# SmartyPants license:
+
+# Copyright (c) 2003 John Gruber <https://daringfireball.net/>
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+
+# * Neither the name "SmartyPants" nor the names of its contributors
+# may be used to endorse or promote products derived from this
+# software without specific prior written permission.
+
+# This software is provided by the copyright holders and contributors "as
+# is" and any express or implied warranties, including, but not limited
+# to, the implied warranties of merchantability and fitness for a
+# particular purpose are disclaimed. In no event shall the copyright
+# owner or contributors be liable for any direct, indirect, incidental,
+# special, exemplary, or consequential damages (including, but not
+# limited to, procurement of substitute goods or services; loss of use,
+# data, or profits; or business interruption) however caused and on any
+# theory of liability, whether in contract, strict liability, or tort
+# (including negligence or otherwise) arising in any way out of the use
+# of this software, even if advised of the possibility of such damage.
+
+
+# `smartypants.py` license:
+
+# `smartypants.py` is a derivative work of SmartyPants.
+# Copyright (c) 2004, 2007 Chad Miller <http://web.chad.org/>
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+
+# This software is provided by the copyright holders and contributors "as
+# is" and any express or implied warranties, including, but not limited
+# to, the implied warranties of merchantability and fitness for a
+# particular purpose are disclaimed. In no event shall the copyright
+# owner or contributors be liable for any direct, indirect, incidental,
+# special, exemplary, or consequential damages (including, but not
+# limited to, procurement of substitute goods or services; loss of use,
+# data, or profits; or business interruption) however caused and on any
+# theory of liability, whether in contract, strict liability, or tort
+# (including negligence or otherwise) arising in any way out of the use
+# of this software, even if advised of the possibility of such damage.
+
+"""
+Adds conversion of ASCII dashes, quotes and ellipses to their HTML
+entity equivalents.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/smarty)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..inlinepatterns import HtmlInlineProcessor, HTML_RE
+from ..treeprocessors import InlineProcessor
+from ..util import Registry
+from typing import TYPE_CHECKING, Sequence
+
+if TYPE_CHECKING: # pragma: no cover
+ from markdown import Markdown
+ from .. import inlinepatterns
+ import re
+ import xml.etree.ElementTree as etree
+
+# Constants for quote education.
+punctClass = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
+endOfWordClass = r"[\s.,;:!?)]"
+closeClass = r"[^\ \t\r\n\[\{\(\-\u0002\u0003]"
+
+openingQuotesBase = (
+ r'(\s' # a whitespace char
+ r'|&nbsp;' # or a non-breaking space entity
+ r'|--' # or dashes
+ r'|–|—' # or Unicode
+ r'|&[mn]dash;' # or named dash entities
+ r'|&#8211;|&#8212;' # or decimal entities
+ r')'
+)
+
+substitutions = {
+ 'mdash': '&mdash;',
+ 'ndash': '&ndash;',
+ 'ellipsis': '&hellip;',
+ 'left-angle-quote': '&laquo;',
+ 'right-angle-quote': '&raquo;',
+ 'left-single-quote': '&lsquo;',
+ 'right-single-quote': '&rsquo;',
+ 'left-double-quote': '&ldquo;',
+ 'right-double-quote': '&rdquo;',
+}
+
+
+# Special case if the very first character is a quote
+# followed by punctuation at a non-word-break. Close the quotes by brute force:
+singleQuoteStartRe = r"^'(?=%s\B)" % punctClass
+doubleQuoteStartRe = r'^"(?=%s\B)' % punctClass
+
+# Special case for double sets of quotes, e.g.:
+# <p>He said, "'Quoted' words in a larger quote."</p>
+doubleQuoteSetsRe = r""""'(?=\w)"""
+singleQuoteSetsRe = r"""'"(?=\w)"""
+
+# Special case for decade abbreviations (the '80s):
+decadeAbbrRe = r"(?<!\w)'(?=\d{2}s)"
+
+# Get most opening double quotes:
+openingDoubleQuotesRegex = r'%s"(?=\w)' % openingQuotesBase
+
+# Double closing quotes:
+closingDoubleQuotesRegex = r'"(?=\s)'
+closingDoubleQuotesRegex2 = '(?<=%s)"' % closeClass
+
+# Get most opening single quotes:
+openingSingleQuotesRegex = r"%s'(?=\w)" % openingQuotesBase
+
+# Single closing quotes:
+closingSingleQuotesRegex = r"(?<=%s)'(?!\s|s\b|\d)" % closeClass
+closingSingleQuotesRegex2 = r"'(\s|s\b)"
+
+# All remaining quotes should be opening ones
+remainingSingleQuotesRegex = r"'"
+remainingDoubleQuotesRegex = r'"'
+
+HTML_STRICT_RE = HTML_RE + r'(?!\>)'
+
+
+class SubstituteTextPattern(HtmlInlineProcessor):
+ def __init__(self, pattern: str, replace: Sequence[int | str | etree.Element], md: Markdown):
+ """ Replaces matches with some text. """
+ HtmlInlineProcessor.__init__(self, pattern)
+ self.replace = replace
+ self.md = md
+
+ def handleMatch(self, m: re.Match[str], data: str) -> tuple[str, int, int]:
+ result = ''
+ for part in self.replace:
+ if isinstance(part, int):
+ result += m.group(part)
+ else:
+ result += self.md.htmlStash.store(part)
+ return result, m.start(0), m.end(0)
+
+
+class SmartyExtension(Extension):
+ """ Add Smarty to Markdown. """
+ def __init__(self, **kwargs):
+ self.config = {
+ 'smart_quotes': [True, 'Educate quotes'],
+ 'smart_angled_quotes': [False, 'Educate angled quotes'],
+ 'smart_dashes': [True, 'Educate dashes'],
+ 'smart_ellipses': [True, 'Educate ellipses'],
+ 'substitutions': [{}, 'Overwrite default substitutions'],
+ }
+ """ Default configuration options. """
+ super().__init__(**kwargs)
+ self.substitutions: dict[str, str] = dict(substitutions)
+ self.substitutions.update(self.getConfig('substitutions', default={}))
+
+ def _addPatterns(
+ self,
+ md: Markdown,
+ patterns: Sequence[tuple[str, Sequence[int | str | etree.Element]]],
+ serie: str,
+ priority: int,
+ ):
+ for ind, pattern in enumerate(patterns):
+ pattern += (md,)
+ pattern = SubstituteTextPattern(*pattern)
+ name = 'smarty-%s-%d' % (serie, ind)
+ self.inlinePatterns.register(pattern, name, priority-ind)
+
+ def educateDashes(self, md: Markdown) -> None:
+ emDashesPattern = SubstituteTextPattern(
+ r'(?<!-)---(?!-)', (self.substitutions['mdash'],), md
+ )
+ enDashesPattern = SubstituteTextPattern(
+ r'(?<!-)--(?!-)', (self.substitutions['ndash'],), md
+ )
+ self.inlinePatterns.register(emDashesPattern, 'smarty-em-dashes', 50)
+ self.inlinePatterns.register(enDashesPattern, 'smarty-en-dashes', 45)
+
+ def educateEllipses(self, md: Markdown) -> None:
+ ellipsesPattern = SubstituteTextPattern(
+ r'(?<!\.)\.{3}(?!\.)', (self.substitutions['ellipsis'],), md
+ )
+ self.inlinePatterns.register(ellipsesPattern, 'smarty-ellipses', 10)
+
+ def educateAngledQuotes(self, md: Markdown) -> None:
+ leftAngledQuotePattern = SubstituteTextPattern(
+ r'\<\<', (self.substitutions['left-angle-quote'],), md
+ )
+ rightAngledQuotePattern = SubstituteTextPattern(
+ r'\>\>', (self.substitutions['right-angle-quote'],), md
+ )
+ self.inlinePatterns.register(leftAngledQuotePattern, 'smarty-left-angle-quotes', 40)
+ self.inlinePatterns.register(rightAngledQuotePattern, 'smarty-right-angle-quotes', 35)
+
+ def educateQuotes(self, md: Markdown) -> None:
+ lsquo = self.substitutions['left-single-quote']
+ rsquo = self.substitutions['right-single-quote']
+ ldquo = self.substitutions['left-double-quote']
+ rdquo = self.substitutions['right-double-quote']
+ patterns = (
+ (singleQuoteStartRe, (rsquo,)),
+ (doubleQuoteStartRe, (rdquo,)),
+ (doubleQuoteSetsRe, (ldquo + lsquo,)),
+ (singleQuoteSetsRe, (lsquo + ldquo,)),
+ (decadeAbbrRe, (rsquo,)),
+ (openingSingleQuotesRegex, (1, lsquo)),
+ (closingSingleQuotesRegex, (rsquo,)),
+ (closingSingleQuotesRegex2, (rsquo, 1)),
+ (remainingSingleQuotesRegex, (lsquo,)),
+ (openingDoubleQuotesRegex, (1, ldquo)),
+ (closingDoubleQuotesRegex, (rdquo,)),
+ (closingDoubleQuotesRegex2, (rdquo,)),
+ (remainingDoubleQuotesRegex, (ldquo,))
+ )
+ self._addPatterns(md, patterns, 'quotes', 30)
+
+ def extendMarkdown(self, md):
+ configs = self.getConfigs()
+ self.inlinePatterns: Registry[inlinepatterns.InlineProcessor] = Registry()
+ if configs['smart_ellipses']:
+ self.educateEllipses(md)
+ if configs['smart_quotes']:
+ self.educateQuotes(md)
+ if configs['smart_angled_quotes']:
+ self.educateAngledQuotes(md)
+ # Override `HTML_RE` from `inlinepatterns.py` so that it does not
+ # process tags with duplicate closing quotes.
+ md.inlinePatterns.register(HtmlInlineProcessor(HTML_STRICT_RE, md), 'html', 90)
+ if configs['smart_dashes']:
+ self.educateDashes(md)
+ inlineProcessor = InlineProcessor(md)
+ inlineProcessor.inlinePatterns = self.inlinePatterns
+ md.treeprocessors.register(inlineProcessor, 'smarty', 6)
+ md.ESCAPED_CHARS.extend(['"', "'"])
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return SmartyExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/tables.py b/.venv/lib/python3.12/site-packages/markdown/extensions/tables.py
new file mode 100644
index 00000000..6e2fa174
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/tables.py
@@ -0,0 +1,248 @@
+# Tables Extension for Python-Markdown
+# ====================================
+
+# Added parsing of tables to Python-Markdown.
+
+# See https://Python-Markdown.github.io/extensions/tables
+# for documentation.
+
+# Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
+
+# All changes Copyright 2008-2014 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+Added parsing of tables to Python-Markdown.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/tables)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..blockprocessors import BlockProcessor
+import xml.etree.ElementTree as etree
+import re
+from typing import TYPE_CHECKING, Any, Sequence
+
+if TYPE_CHECKING: # pragma: no cover
+ from .. import blockparser
+
+PIPE_NONE = 0
+PIPE_LEFT = 1
+PIPE_RIGHT = 2
+
+
+class TableProcessor(BlockProcessor):
+ """ Process Tables. """
+
+ RE_CODE_PIPES = re.compile(r'(?:(\\\\)|(\\`+)|(`+)|(\\\|)|(\|))')
+ RE_END_BORDER = re.compile(r'(?<!\\)(?:\\\\)*\|$')
+
+ def __init__(self, parser: blockparser.BlockParser, config: dict[str, Any]):
+ self.border: bool | int = False
+ self.separator: Sequence[str] = ''
+ self.config = config
+
+ super().__init__(parser)
+
+ def test(self, parent: etree.Element, block: str) -> bool:
+ """
+ Ensure first two rows (column header and separator row) are valid table rows.
+
+ Keep border check and separator row do avoid repeating the work.
+ """
+ is_table = False
+ rows = [row.strip(' ') for row in block.split('\n')]
+ if len(rows) > 1:
+ header0 = rows[0]
+ self.border = PIPE_NONE
+ if header0.startswith('|'):
+ self.border |= PIPE_LEFT
+ if self.RE_END_BORDER.search(header0) is not None:
+ self.border |= PIPE_RIGHT
+ row = self._split_row(header0)
+ row0_len = len(row)
+ is_table = row0_len > 1
+
+ # Each row in a single column table needs at least one pipe.
+ if not is_table and row0_len == 1 and self.border:
+ for index in range(1, len(rows)):
+ is_table = rows[index].startswith('|')
+ if not is_table:
+ is_table = self.RE_END_BORDER.search(rows[index]) is not None
+ if not is_table:
+ break
+
+ if is_table:
+ row = self._split_row(rows[1])
+ is_table = (len(row) == row0_len) and set(''.join(row)) <= set('|:- ')
+ if is_table:
+ self.separator = row
+
+ return is_table
+
+ def run(self, parent: etree.Element, blocks: list[str]) -> None:
+ """ Parse a table block and build table. """
+ block = blocks.pop(0).split('\n')
+ header = block[0].strip(' ')
+ rows = [] if len(block) < 3 else block[2:]
+
+ # Get alignment of columns
+ align: list[str | None] = []
+ for c in self.separator:
+ c = c.strip(' ')
+ if c.startswith(':') and c.endswith(':'):
+ align.append('center')
+ elif c.startswith(':'):
+ align.append('left')
+ elif c.endswith(':'):
+ align.append('right')
+ else:
+ align.append(None)
+
+ # Build table
+ table = etree.SubElement(parent, 'table')
+ thead = etree.SubElement(table, 'thead')
+ self._build_row(header, thead, align)
+ tbody = etree.SubElement(table, 'tbody')
+ if len(rows) == 0:
+ # Handle empty table
+ self._build_empty_row(tbody, align)
+ else:
+ for row in rows:
+ self._build_row(row.strip(' '), tbody, align)
+
+ def _build_empty_row(self, parent: etree.Element, align: Sequence[str | None]) -> None:
+ """Build an empty row."""
+ tr = etree.SubElement(parent, 'tr')
+ count = len(align)
+ while count:
+ etree.SubElement(tr, 'td')
+ count -= 1
+
+ def _build_row(self, row: str, parent: etree.Element, align: Sequence[str | None]) -> None:
+ """ Given a row of text, build table cells. """
+ tr = etree.SubElement(parent, 'tr')
+ tag = 'td'
+ if parent.tag == 'thead':
+ tag = 'th'
+ cells = self._split_row(row)
+ # We use align here rather than cells to ensure every row
+ # contains the same number of columns.
+ for i, a in enumerate(align):
+ c = etree.SubElement(tr, tag)
+ try:
+ c.text = cells[i].strip(' ')
+ except IndexError: # pragma: no cover
+ c.text = ""
+ if a:
+ if self.config['use_align_attribute']:
+ c.set('align', a)
+ else:
+ c.set('style', f'text-align: {a};')
+
+ def _split_row(self, row: str) -> list[str]:
+ """ split a row of text into list of cells. """
+ if self.border:
+ if row.startswith('|'):
+ row = row[1:]
+ row = self.RE_END_BORDER.sub('', row)
+ return self._split(row)
+
+ def _split(self, row: str) -> list[str]:
+ """ split a row of text with some code into a list of cells. """
+ elements = []
+ pipes = []
+ tics = []
+ tic_points = []
+ tic_region = []
+ good_pipes = []
+
+ # Parse row
+ # Throw out \\, and \|
+ for m in self.RE_CODE_PIPES.finditer(row):
+ # Store ` data (len, start_pos, end_pos)
+ if m.group(2):
+ # \`+
+ # Store length of each tic group: subtract \
+ tics.append(len(m.group(2)) - 1)
+ # Store start of group, end of group, and escape length
+ tic_points.append((m.start(2), m.end(2) - 1, 1))
+ elif m.group(3):
+ # `+
+ # Store length of each tic group
+ tics.append(len(m.group(3)))
+ # Store start of group, end of group, and escape length
+ tic_points.append((m.start(3), m.end(3) - 1, 0))
+ # Store pipe location
+ elif m.group(5):
+ pipes.append(m.start(5))
+
+ # Pair up tics according to size if possible
+ # Subtract the escape length *only* from the opening.
+ # Walk through tic list and see if tic has a close.
+ # Store the tic region (start of region, end of region).
+ pos = 0
+ tic_len = len(tics)
+ while pos < tic_len:
+ try:
+ tic_size = tics[pos] - tic_points[pos][2]
+ if tic_size == 0:
+ raise ValueError
+ index = tics[pos + 1:].index(tic_size) + 1
+ tic_region.append((tic_points[pos][0], tic_points[pos + index][1]))
+ pos += index + 1
+ except ValueError:
+ pos += 1
+
+ # Resolve pipes. Check if they are within a tic pair region.
+ # Walk through pipes comparing them to each region.
+ # - If pipe position is less that a region, it isn't in a region
+ # - If it is within a region, we don't want it, so throw it out
+ # - If we didn't throw it out, it must be a table pipe
+ for pipe in pipes:
+ throw_out = False
+ for region in tic_region:
+ if pipe < region[0]:
+ # Pipe is not in a region
+ break
+ elif region[0] <= pipe <= region[1]:
+ # Pipe is within a code region. Throw it out.
+ throw_out = True
+ break
+ if not throw_out:
+ good_pipes.append(pipe)
+
+ # Split row according to table delimiters.
+ pos = 0
+ for pipe in good_pipes:
+ elements.append(row[pos:pipe])
+ pos = pipe + 1
+ elements.append(row[pos:])
+ return elements
+
+
+class TableExtension(Extension):
+ """ Add tables to Markdown. """
+
+ def __init__(self, **kwargs):
+ self.config = {
+ 'use_align_attribute': [False, 'True to use align attribute instead of style.'],
+ }
+ """ Default configuration options. """
+
+ super().__init__(**kwargs)
+
+ def extendMarkdown(self, md):
+ """ Add an instance of `TableProcessor` to `BlockParser`. """
+ if '|' not in md.ESCAPED_CHARS:
+ md.ESCAPED_CHARS.append('|')
+ processor = TableProcessor(md.parser, self.getConfigs())
+ md.parser.blockprocessors.register(processor, 'table', 75)
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return TableExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/toc.py b/.venv/lib/python3.12/site-packages/markdown/extensions/toc.py
new file mode 100644
index 00000000..5462a906
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/toc.py
@@ -0,0 +1,488 @@
+# Table of Contents Extension for Python-Markdown
+# ===============================================
+
+# See https://Python-Markdown.github.io/extensions/toc
+# for documentation.
+
+# Original code Copyright 2008 [Jack Miller](https://codezen.org/)
+
+# All changes Copyright 2008-2024 The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+Add table of contents support to Python-Markdown.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/toc)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..treeprocessors import Treeprocessor
+from ..util import parseBoolValue, AMP_SUBSTITUTE, deprecated, HTML_PLACEHOLDER_RE, AtomicString
+from ..treeprocessors import UnescapeTreeprocessor
+from ..serializers import RE_AMP
+import re
+import html
+import unicodedata
+from copy import deepcopy
+import xml.etree.ElementTree as etree
+from typing import TYPE_CHECKING, Any, Iterator, MutableSet
+
+if TYPE_CHECKING: # pragma: no cover
+ from markdown import Markdown
+
+
+def slugify(value: str, separator: str, unicode: bool = False) -> str:
+ """ Slugify a string, to make it URL friendly. """
+ if not unicode:
+ # Replace Extended Latin characters with ASCII, i.e. `žlutý` => `zluty`
+ value = unicodedata.normalize('NFKD', value)
+ value = value.encode('ascii', 'ignore').decode('ascii')
+ value = re.sub(r'[^\w\s-]', '', value).strip().lower()
+ return re.sub(r'[{}\s]+'.format(separator), separator, value)
+
+
+def slugify_unicode(value: str, separator: str) -> str:
+ """ Slugify a string, to make it URL friendly while preserving Unicode characters. """
+ return slugify(value, separator, unicode=True)
+
+
+IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
+
+
+def unique(id: str, ids: MutableSet[str]) -> str:
+ """ Ensure id is unique in set of ids. Append '_1', '_2'... if not """
+ while id in ids or not id:
+ m = IDCOUNT_RE.match(id)
+ if m:
+ id = '%s_%d' % (m.group(1), int(m.group(2))+1)
+ else:
+ id = '%s_%d' % (id, 1)
+ ids.add(id)
+ return id
+
+
+@deprecated('Use `render_inner_html` and `striptags` instead.')
+def get_name(el: etree.Element) -> str:
+ """Get title name."""
+
+ text = []
+ for c in el.itertext():
+ if isinstance(c, AtomicString):
+ text.append(html.unescape(c))
+ else:
+ text.append(c)
+ return ''.join(text).strip()
+
+
+@deprecated('Use `run_postprocessors`, `render_inner_html` and/or `striptags` instead.')
+def stashedHTML2text(text: str, md: Markdown, strip_entities: bool = True) -> str:
+ """ Extract raw HTML from stash, reduce to plain text and swap with placeholder. """
+ def _html_sub(m: re.Match[str]) -> str:
+ """ Substitute raw html with plain text. """
+ try:
+ raw = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
+ except (IndexError, TypeError): # pragma: no cover
+ return m.group(0)
+ # Strip out tags and/or entities - leaving text
+ res = re.sub(r'(<[^>]+>)', '', raw)
+ if strip_entities:
+ res = re.sub(r'(&[\#a-zA-Z0-9]+;)', '', res)
+ return res
+
+ return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
+
+
+def unescape(text: str) -> str:
+ """ Unescape Markdown backslash escaped text. """
+ c = UnescapeTreeprocessor()
+ return c.unescape(text)
+
+
+def strip_tags(text: str) -> str:
+ """ Strip HTML tags and return plain text. Note: HTML entities are unaffected. """
+ # A comment could contain a tag, so strip comments first
+ while (start := text.find('<!--')) != -1 and (end := text.find('-->', start)) != -1:
+ text = f'{text[:start]}{text[end + 3:]}'
+
+ while (start := text.find('<')) != -1 and (end := text.find('>', start)) != -1:
+ text = f'{text[:start]}{text[end + 1:]}'
+
+ # Collapse whitespace
+ text = ' '.join(text.split())
+ return text
+
+
+def escape_cdata(text: str) -> str:
+ """ Escape character data. """
+ if "&" in text:
+ # Only replace & when not part of an entity
+ text = RE_AMP.sub('&amp;', text)
+ if "<" in text:
+ text = text.replace("<", "&lt;")
+ if ">" in text:
+ text = text.replace(">", "&gt;")
+ return text
+
+
+def run_postprocessors(text: str, md: Markdown) -> str:
+ """ Run postprocessors from Markdown instance on text. """
+ for pp in md.postprocessors:
+ text = pp.run(text)
+ return text.strip()
+
+
+def render_inner_html(el: etree.Element, md: Markdown) -> str:
+ """ Fully render inner html of an `etree` element as a string. """
+ # The `UnescapeTreeprocessor` runs after `toc` extension so run here.
+ text = unescape(md.serializer(el))
+
+ # strip parent tag
+ start = text.index('>') + 1
+ end = text.rindex('<')
+ text = text[start:end].strip()
+
+ return run_postprocessors(text, md)
+
+
+def remove_fnrefs(root: etree.Element) -> etree.Element:
+ """ Remove footnote references from a copy of the element, if any are present. """
+ # Remove footnote references, which look like this: `<sup id="fnref:1">...</sup>`.
+ # If there are no `sup` elements, then nothing to do.
+ if next(root.iter('sup'), None) is None:
+ return root
+ root = deepcopy(root)
+ # Find parent elements that contain `sup` elements.
+ for parent in root.findall('.//sup/..'):
+ carry_text = ""
+ for child in reversed(parent): # Reversed for the ability to mutate during iteration.
+ # Remove matching footnote references but carry any `tail` text to preceding elements.
+ if child.tag == 'sup' and child.get('id', '').startswith('fnref'):
+ carry_text = f'{child.tail or ""}{carry_text}'
+ parent.remove(child)
+ elif carry_text:
+ child.tail = f'{child.tail or ""}{carry_text}'
+ carry_text = ""
+ if carry_text:
+ parent.text = f'{parent.text or ""}{carry_text}'
+ return root
+
+
+def nest_toc_tokens(toc_list):
+ """Given an unsorted list with errors and skips, return a nested one.
+
+ [{'level': 1}, {'level': 2}]
+ =>
+ [{'level': 1, 'children': [{'level': 2, 'children': []}]}]
+
+ A wrong list is also converted:
+
+ [{'level': 2}, {'level': 1}]
+ =>
+ [{'level': 2, 'children': []}, {'level': 1, 'children': []}]
+ """
+
+ ordered_list = []
+ if len(toc_list):
+ # Initialize everything by processing the first entry
+ last = toc_list.pop(0)
+ last['children'] = []
+ levels = [last['level']]
+ ordered_list.append(last)
+ parents = []
+
+ # Walk the rest nesting the entries properly
+ while toc_list:
+ t = toc_list.pop(0)
+ current_level = t['level']
+ t['children'] = []
+
+ # Reduce depth if current level < last item's level
+ if current_level < levels[-1]:
+ # Pop last level since we know we are less than it
+ levels.pop()
+
+ # Pop parents and levels we are less than or equal to
+ to_pop = 0
+ for p in reversed(parents):
+ if current_level <= p['level']:
+ to_pop += 1
+ else: # pragma: no cover
+ break
+ if to_pop:
+ levels = levels[:-to_pop]
+ parents = parents[:-to_pop]
+
+ # Note current level as last
+ levels.append(current_level)
+
+ # Level is the same, so append to
+ # the current parent (if available)
+ if current_level == levels[-1]:
+ (parents[-1]['children'] if parents
+ else ordered_list).append(t)
+
+ # Current level is > last item's level,
+ # So make last item a parent and append current as child
+ else:
+ last['children'].append(t)
+ parents.append(last)
+ levels.append(current_level)
+ last = t
+
+ return ordered_list
+
+
+class TocTreeprocessor(Treeprocessor):
+ """ Step through document and build TOC. """
+
+ def __init__(self, md: Markdown, config: dict[str, Any]):
+ super().__init__(md)
+
+ self.marker: str = config["marker"]
+ self.title: str = config["title"]
+ self.base_level = int(config["baselevel"]) - 1
+ self.slugify = config["slugify"]
+ self.sep = config["separator"]
+ self.toc_class = config["toc_class"]
+ self.title_class: str = config["title_class"]
+ self.use_anchors: bool = parseBoolValue(config["anchorlink"])
+ self.anchorlink_class: str = config["anchorlink_class"]
+ self.use_permalinks = parseBoolValue(config["permalink"], False)
+ if self.use_permalinks is None:
+ self.use_permalinks = config["permalink"]
+ self.permalink_class: str = config["permalink_class"]
+ self.permalink_title: str = config["permalink_title"]
+ self.permalink_leading: bool | None = parseBoolValue(config["permalink_leading"], False)
+ self.header_rgx = re.compile("[Hh][123456]")
+ if isinstance(config["toc_depth"], str) and '-' in config["toc_depth"]:
+ self.toc_top, self.toc_bottom = [int(x) for x in config["toc_depth"].split('-')]
+ else:
+ self.toc_top = 1
+ self.toc_bottom = int(config["toc_depth"])
+
+ def iterparent(self, node: etree.Element) -> Iterator[tuple[etree.Element, etree.Element]]:
+ """ Iterator wrapper to get allowed parent and child all at once. """
+
+ # We do not allow the marker inside a header as that
+ # would causes an endless loop of placing a new TOC
+ # inside previously generated TOC.
+ for child in node:
+ if not self.header_rgx.match(child.tag) and child.tag not in ['pre', 'code']:
+ yield node, child
+ yield from self.iterparent(child)
+
+ def replace_marker(self, root: etree.Element, elem: etree.Element) -> None:
+ """ Replace marker with elem. """
+ for (p, c) in self.iterparent(root):
+ text = ''.join(c.itertext()).strip()
+ if not text:
+ continue
+
+ # To keep the output from screwing up the
+ # validation by putting a `<div>` inside of a `<p>`
+ # we actually replace the `<p>` in its entirety.
+
+ # The `<p>` element may contain more than a single text content
+ # (`nl2br` can introduce a `<br>`). In this situation, `c.text` returns
+ # the very first content, ignore children contents or tail content.
+ # `len(c) == 0` is here to ensure there is only text in the `<p>`.
+ if c.text and c.text.strip() == self.marker and len(c) == 0:
+ for i in range(len(p)):
+ if p[i] == c:
+ p[i] = elem
+ break
+
+ def set_level(self, elem: etree.Element) -> None:
+ """ Adjust header level according to base level. """
+ level = int(elem.tag[-1]) + self.base_level
+ if level > 6:
+ level = 6
+ elem.tag = 'h%d' % level
+
+ def add_anchor(self, c: etree.Element, elem_id: str) -> None:
+ anchor = etree.Element("a")
+ anchor.text = c.text
+ anchor.attrib["href"] = "#" + elem_id
+ anchor.attrib["class"] = self.anchorlink_class
+ c.text = ""
+ for elem in c:
+ anchor.append(elem)
+ while len(c):
+ c.remove(c[0])
+ c.append(anchor)
+
+ def add_permalink(self, c: etree.Element, elem_id: str) -> None:
+ permalink = etree.Element("a")
+ permalink.text = ("%spara;" % AMP_SUBSTITUTE
+ if self.use_permalinks is True
+ else self.use_permalinks)
+ permalink.attrib["href"] = "#" + elem_id
+ permalink.attrib["class"] = self.permalink_class
+ if self.permalink_title:
+ permalink.attrib["title"] = self.permalink_title
+ if self.permalink_leading:
+ permalink.tail = c.text
+ c.text = ""
+ c.insert(0, permalink)
+ else:
+ c.append(permalink)
+
+ def build_toc_div(self, toc_list: list) -> etree.Element:
+ """ Return a string div given a toc list. """
+ div = etree.Element("div")
+ div.attrib["class"] = self.toc_class
+
+ # Add title to the div
+ if self.title:
+ header = etree.SubElement(div, "span")
+ if self.title_class:
+ header.attrib["class"] = self.title_class
+ header.text = self.title
+
+ def build_etree_ul(toc_list: list, parent: etree.Element) -> etree.Element:
+ ul = etree.SubElement(parent, "ul")
+ for item in toc_list:
+ # List item link, to be inserted into the toc div
+ li = etree.SubElement(ul, "li")
+ link = etree.SubElement(li, "a")
+ link.text = item.get('name', '')
+ link.attrib["href"] = '#' + item.get('id', '')
+ if item['children']:
+ build_etree_ul(item['children'], li)
+ return ul
+
+ build_etree_ul(toc_list, div)
+
+ if 'prettify' in self.md.treeprocessors:
+ self.md.treeprocessors['prettify'].run(div)
+
+ return div
+
+ def run(self, doc: etree.Element) -> None:
+ # Get a list of id attributes
+ used_ids = set()
+ for el in doc.iter():
+ if "id" in el.attrib:
+ used_ids.add(el.attrib["id"])
+
+ toc_tokens = []
+ for el in doc.iter():
+ if isinstance(el.tag, str) and self.header_rgx.match(el.tag):
+ self.set_level(el)
+ innerhtml = render_inner_html(remove_fnrefs(el), self.md)
+ name = strip_tags(innerhtml)
+
+ # Do not override pre-existing ids
+ if "id" not in el.attrib:
+ el.attrib["id"] = unique(self.slugify(html.unescape(name), self.sep), used_ids)
+
+ data_toc_label = ''
+ if 'data-toc-label' in el.attrib:
+ data_toc_label = run_postprocessors(unescape(el.attrib['data-toc-label']), self.md)
+ # Overwrite name with sanitized value of `data-toc-label`.
+ name = escape_cdata(strip_tags(data_toc_label))
+ # Remove the data-toc-label attribute as it is no longer needed
+ del el.attrib['data-toc-label']
+
+ if int(el.tag[-1]) >= self.toc_top and int(el.tag[-1]) <= self.toc_bottom:
+ toc_tokens.append({
+ 'level': int(el.tag[-1]),
+ 'id': el.attrib["id"],
+ 'name': name,
+ 'html': innerhtml,
+ 'data-toc-label': data_toc_label
+ })
+
+ if self.use_anchors:
+ self.add_anchor(el, el.attrib["id"])
+ if self.use_permalinks not in [False, None]:
+ self.add_permalink(el, el.attrib["id"])
+
+ toc_tokens = nest_toc_tokens(toc_tokens)
+ div = self.build_toc_div(toc_tokens)
+ if self.marker:
+ self.replace_marker(doc, div)
+
+ # serialize and attach to markdown instance.
+ toc = self.md.serializer(div)
+ for pp in self.md.postprocessors:
+ toc = pp.run(toc)
+ self.md.toc_tokens = toc_tokens
+ self.md.toc = toc
+
+
+class TocExtension(Extension):
+
+ TreeProcessorClass = TocTreeprocessor
+
+ def __init__(self, **kwargs):
+ self.config = {
+ 'marker': [
+ '[TOC]',
+ 'Text to find and replace with Table of Contents. Set to an empty string to disable. '
+ 'Default: `[TOC]`.'
+ ],
+ 'title': [
+ '', 'Title to insert into TOC `<div>`. Default: an empty string.'
+ ],
+ 'title_class': [
+ 'toctitle', 'CSS class used for the title. Default: `toctitle`.'
+ ],
+ 'toc_class': [
+ 'toc', 'CSS class(es) used for the link. Default: `toclink`.'
+ ],
+ 'anchorlink': [
+ False, 'True if header should be a self link. Default: `False`.'
+ ],
+ 'anchorlink_class': [
+ 'toclink', 'CSS class(es) used for the link. Defaults: `toclink`.'
+ ],
+ 'permalink': [
+ 0, 'True or link text if a Sphinx-style permalink should be added. Default: `False`.'
+ ],
+ 'permalink_class': [
+ 'headerlink', 'CSS class(es) used for the link. Default: `headerlink`.'
+ ],
+ 'permalink_title': [
+ 'Permanent link', 'Title attribute of the permalink. Default: `Permanent link`.'
+ ],
+ 'permalink_leading': [
+ False,
+ 'True if permalinks should be placed at start of the header, rather than end. Default: False.'
+ ],
+ 'baselevel': ['1', 'Base level for headers. Default: `1`.'],
+ 'slugify': [
+ slugify, 'Function to generate anchors based on header text. Default: `slugify`.'
+ ],
+ 'separator': ['-', 'Word separator. Default: `-`.'],
+ 'toc_depth': [
+ 6,
+ 'Define the range of section levels to include in the Table of Contents. A single integer '
+ '(b) defines the bottom section level (<h1>..<hb>) only. A string consisting of two digits '
+ 'separated by a hyphen in between (`2-5`) defines the top (t) and the bottom (b) (<ht>..<hb>). '
+ 'Default: `6` (bottom).'
+ ],
+ }
+ """ Default configuration options. """
+
+ super().__init__(**kwargs)
+
+ def extendMarkdown(self, md):
+ """ Add TOC tree processor to Markdown. """
+ md.registerExtension(self)
+ self.md = md
+ self.reset()
+ tocext = self.TreeProcessorClass(md, self.getConfigs())
+ md.treeprocessors.register(tocext, 'toc', 5)
+
+ def reset(self) -> None:
+ self.md.toc = ''
+ self.md.toc_tokens = []
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return TocExtension(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/markdown/extensions/wikilinks.py b/.venv/lib/python3.12/site-packages/markdown/extensions/wikilinks.py
new file mode 100644
index 00000000..3f3cbe2d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/markdown/extensions/wikilinks.py
@@ -0,0 +1,97 @@
+# WikiLinks Extension for Python-Markdown
+# ======================================
+
+# Converts [[WikiLinks]] to relative links.
+
+# See https://Python-Markdown.github.io/extensions/wikilinks
+# for documentation.
+
+# Original code Copyright [Waylan Limberg](http://achinghead.com/).
+
+# All changes Copyright The Python Markdown Project
+
+# License: [BSD](https://opensource.org/licenses/bsd-license.php)
+
+"""
+Converts `[[WikiLinks]]` to relative links.
+
+See the [documentation](https://Python-Markdown.github.io/extensions/wikilinks)
+for details.
+"""
+
+from __future__ import annotations
+
+from . import Extension
+from ..inlinepatterns import InlineProcessor
+import xml.etree.ElementTree as etree
+import re
+from typing import Any
+
+
+def build_url(label: str, base: str, end: str) -> str:
+ """ Build a URL from the label, a base, and an end. """
+ clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
+ return '{}{}{}'.format(base, clean_label, end)
+
+
+class WikiLinkExtension(Extension):
+ """ Add inline processor to Markdown. """
+
+ def __init__(self, **kwargs):
+ self.config = {
+ 'base_url': ['/', 'String to append to beginning or URL.'],
+ 'end_url': ['/', 'String to append to end of URL.'],
+ 'html_class': ['wikilink', 'CSS hook. Leave blank for none.'],
+ 'build_url': [build_url, 'Callable formats URL from label.'],
+ }
+ """ Default configuration options. """
+ super().__init__(**kwargs)
+
+ def extendMarkdown(self, md):
+ self.md = md
+
+ # append to end of inline patterns
+ WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
+ wikilinkPattern = WikiLinksInlineProcessor(WIKILINK_RE, self.getConfigs())
+ wikilinkPattern.md = md
+ md.inlinePatterns.register(wikilinkPattern, 'wikilink', 75)
+
+
+class WikiLinksInlineProcessor(InlineProcessor):
+ """ Build link from `wikilink`. """
+
+ def __init__(self, pattern: str, config: dict[str, Any]):
+ super().__init__(pattern)
+ self.config = config
+
+ def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | str, int, int]:
+ if m.group(1).strip():
+ base_url, end_url, html_class = self._getMeta()
+ label = m.group(1).strip()
+ url = self.config['build_url'](label, base_url, end_url)
+ a = etree.Element('a')
+ a.text = label
+ a.set('href', url)
+ if html_class:
+ a.set('class', html_class)
+ else:
+ a = ''
+ return a, m.start(0), m.end(0)
+
+ def _getMeta(self) -> tuple[str, str, str]:
+ """ Return meta data or `config` data. """
+ base_url = self.config['base_url']
+ end_url = self.config['end_url']
+ html_class = self.config['html_class']
+ if hasattr(self.md, 'Meta'):
+ if 'wiki_base_url' in self.md.Meta:
+ base_url = self.md.Meta['wiki_base_url'][0]
+ if 'wiki_end_url' in self.md.Meta:
+ end_url = self.md.Meta['wiki_end_url'][0]
+ if 'wiki_html_class' in self.md.Meta:
+ html_class = self.md.Meta['wiki_html_class'][0]
+ return base_url, end_url, html_class
+
+
+def makeExtension(**kwargs): # pragma: no cover
+ return WikiLinkExtension(**kwargs)