about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/strictyaml/ruamel
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/strictyaml/ruamel')
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/__init__.py63
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/anchor.py19
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/comments.py1158
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/compat.py333
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/composer.py243
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/configobjwalker.py16
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/constructor.py1891
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/cyaml.py192
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/dumper.py221
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/emitter.py1738
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/error.py321
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/events.py159
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/loader.py76
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/main.py1581
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/nodes.py142
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/parser.py844
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/reader.py325
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/representer.py1335
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/resolver.py410
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarbool.py51
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarfloat.py137
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarint.py140
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarstring.py156
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/scanner.py2017
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/serializer.py256
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/timestamp.py66
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/tokens.py288
-rw-r--r--.venv/lib/python3.12/site-packages/strictyaml/ruamel/util.py190
28 files changed, 14368 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/__init__.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/__init__.py
new file mode 100644
index 00000000..4ec67b39
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/__init__.py
@@ -0,0 +1,63 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+if False:  # MYPY
+    from typing import Dict, Any  # NOQA
+
+_package_data = dict(
+    full_package_name="strictyaml.ruamel",
+    version_info=(0, 16, 13),
+    __version__="0.16.13",
+    author="Anthon van der Neut",
+    author_email="a.van.der.neut@ruamel.eu",
+    description="strictyaml.ruamel is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order",  # NOQA
+    entry_points=None,
+    since=2014,
+    extras_require={
+        ':platform_python_implementation=="CPython" and python_version<="2.7"': [
+            "ruamel.ordereddict"
+        ],  # NOQA
+        ':platform_python_implementation=="CPython" and python_version<"3.10"': [
+            "strictyaml.ruamel.clib>=0.1.2"
+        ],  # NOQA
+        "jinja2": ["strictyaml.ruamel.jinja2>=0.2"],
+        "docs": ["ryd"],
+    },
+    classifiers=[
+        "Programming Language :: Python :: 2.7",
+        "Programming Language :: Python :: 3.5",
+        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.7",
+        "Programming Language :: Python :: 3.8",
+        "Programming Language :: Python :: Implementation :: CPython",
+        "Programming Language :: Python :: Implementation :: PyPy",
+        "Programming Language :: Python :: Implementation :: Jython",
+        "Topic :: Software Development :: Libraries :: Python Modules",
+        "Topic :: Text Processing :: Markup",
+        "Typing :: Typed",
+    ],
+    keywords="yaml 1.2 parser round-trip preserve quotes order config",
+    read_the_docs="yaml",
+    supported=[(2, 7), (3, 5)],  # minimum
+    tox=dict(
+        env="*",  # remove 'pn', no longer test narrow Python 2.7 for unicode patterns and PyPy
+        deps="ruamel.std.pathlib",
+        fl8excl="_test/lib",
+    ),
+    universal=True,
+    rtfd="yaml",
+)  # type: Dict[Any, Any]
+
+
+version_info = _package_data["version_info"]
+__version__ = _package_data["__version__"]
+
+try:
+    from .cyaml import *  # NOQA
+
+    __with_libyaml__ = True
+except (ImportError, ValueError):  # for Jython
+    __with_libyaml__ = False
+
+from strictyaml.ruamel.main import *  # NOQA
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/anchor.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/anchor.py
new file mode 100644
index 00000000..361958ca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/anchor.py
@@ -0,0 +1,19 @@
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List, Union, Optional, Iterator  # NOQA
+
+anchor_attrib = "_yaml_anchor"
+
+
+class Anchor(object):
+    __slots__ = "value", "always_dump"
+    attrib = anchor_attrib
+
+    def __init__(self):
+        # type: () -> None
+        self.value = None
+        self.always_dump = False
+
+    def __repr__(self):
+        # type: () -> Any
+        ad = ", (always dump)" if self.always_dump else ""
+        return "Anchor({!r}{})".format(self.value, ad)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/comments.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/comments.py
new file mode 100644
index 00000000..4e855517
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/comments.py
@@ -0,0 +1,1158 @@
+# coding: utf-8
+
+from __future__ import absolute_import, print_function
+
+"""
+stuff to deal with comments and formatting on dict/list/ordereddict/set
+these are not really related, formatting could be factored out as
+a separate base
+"""
+
+import sys
+import copy
+
+
+from strictyaml.ruamel.compat import ordereddict  # type: ignore
+from strictyaml.ruamel.compat import PY2, string_types, MutableSliceableSequence
+from strictyaml.ruamel.scalarstring import ScalarString
+from strictyaml.ruamel.anchor import Anchor
+
+if PY2:
+    from collections import MutableSet, Sized, Set, Mapping
+else:
+    from collections.abc import MutableSet, Sized, Set, Mapping
+
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List, Union, Optional, Iterator  # NOQA
+
+# fmt: off
+__all__ = ['CommentedSeq', 'CommentedKeySeq',
+           'CommentedMap', 'CommentedOrderedMap',
+           'CommentedSet', 'comment_attrib', 'merge_attrib']
+# fmt: on
+
+comment_attrib = "_yaml_comment"
+format_attrib = "_yaml_format"
+line_col_attrib = "_yaml_line_col"
+merge_attrib = "_yaml_merge"
+tag_attrib = "_yaml_tag"
+
+
+class Comment(object):
+    # sys.getsize tested the Comment objects, __slots__ makes them bigger
+    # and adding self.end did not matter
+    __slots__ = "comment", "_items", "_end", "_start"
+    attrib = comment_attrib
+
+    def __init__(self):
+        # type: () -> None
+        self.comment = None  # [post, [pre]]
+        # map key (mapping/omap/dict) or index (sequence/list) to a  list of
+        # dict: post_key, pre_key, post_value, pre_value
+        # list: pre item, post item
+        self._items = {}  # type: Dict[Any, Any]
+        # self._start = [] # should not put these on first item
+        self._end = []  # type: List[Any] # end of document comments
+
+    def __str__(self):
+        # type: () -> str
+        if bool(self._end):
+            end = ",\n  end=" + str(self._end)
+        else:
+            end = ""
+        return "Comment(comment={0},\n  items={1}{2})".format(
+            self.comment, self._items, end
+        )
+
+    @property
+    def items(self):
+        # type: () -> Any
+        return self._items
+
+    @property
+    def end(self):
+        # type: () -> Any
+        return self._end
+
+    @end.setter
+    def end(self, value):
+        # type: (Any) -> None
+        self._end = value
+
+    @property
+    def start(self):
+        # type: () -> Any
+        return self._start
+
+    @start.setter
+    def start(self, value):
+        # type: (Any) -> None
+        self._start = value
+
+
+# to distinguish key from None
+def NoComment():
+    # type: () -> None
+    pass
+
+
+class Format(object):
+    __slots__ = ("_flow_style",)
+    attrib = format_attrib
+
+    def __init__(self):
+        # type: () -> None
+        self._flow_style = None  # type: Any
+
+    def set_flow_style(self):
+        # type: () -> None
+        self._flow_style = True
+
+    def set_block_style(self):
+        # type: () -> None
+        self._flow_style = False
+
+    def flow_style(self, default=None):
+        # type: (Optional[Any]) -> Any
+        """if default (the flow_style) is None, the flow style tacked on to
+        the object explicitly will be taken. If that is None as well the
+        default flow style rules the format down the line, or the type
+        of the constituent values (simple -> flow, map/list -> block)"""
+        if self._flow_style is None:
+            return default
+        return self._flow_style
+
+
+class LineCol(object):
+    attrib = line_col_attrib
+
+    def __init__(self):
+        # type: () -> None
+        self.line = None
+        self.col = None
+        self.data = None  # type: Optional[Dict[Any, Any]]
+
+    def add_kv_line_col(self, key, data):
+        # type: (Any, Any) -> None
+        if self.data is None:
+            self.data = {}
+        self.data[key] = data
+
+    def key(self, k):
+        # type: (Any) -> Any
+        return self._kv(k, 0, 1)
+
+    def value(self, k):
+        # type: (Any) -> Any
+        return self._kv(k, 2, 3)
+
+    def _kv(self, k, x0, x1):
+        # type: (Any, Any, Any) -> Any
+        if self.data is None:
+            return None
+        data = self.data[k]
+        return data[x0], data[x1]
+
+    def item(self, idx):
+        # type: (Any) -> Any
+        if self.data is None:
+            return None
+        return self.data[idx][0], self.data[idx][1]
+
+    def add_idx_line_col(self, key, data):
+        # type: (Any, Any) -> None
+        if self.data is None:
+            self.data = {}
+        self.data[key] = data
+
+
+class Tag(object):
+    """store tag information for roundtripping"""
+
+    __slots__ = ("value",)
+    attrib = tag_attrib
+
+    def __init__(self):
+        # type: () -> None
+        self.value = None
+
+    def __repr__(self):
+        # type: () -> Any
+        return "{0.__class__.__name__}({0.value!r})".format(self)
+
+
+class CommentedBase(object):
+    @property
+    def ca(self):
+        # type: () -> Any
+        if not hasattr(self, Comment.attrib):
+            setattr(self, Comment.attrib, Comment())
+        return getattr(self, Comment.attrib)
+
+    def yaml_end_comment_extend(self, comment, clear=False):
+        # type: (Any, bool) -> None
+        if comment is None:
+            return
+        if clear or self.ca.end is None:
+            self.ca.end = []
+        self.ca.end.extend(comment)
+
+    def yaml_key_comment_extend(self, key, comment, clear=False):
+        # type: (Any, Any, bool) -> None
+        r = self.ca._items.setdefault(key, [None, None, None, None])
+        if clear or r[1] is None:
+            if comment[1] is not None:
+                assert isinstance(comment[1], list)
+            r[1] = comment[1]
+        else:
+            r[1].extend(comment[0])
+        r[0] = comment[0]
+
+    def yaml_value_comment_extend(self, key, comment, clear=False):
+        # type: (Any, Any, bool) -> None
+        r = self.ca._items.setdefault(key, [None, None, None, None])
+        if clear or r[3] is None:
+            if comment[1] is not None:
+                assert isinstance(comment[1], list)
+            r[3] = comment[1]
+        else:
+            r[3].extend(comment[0])
+        r[2] = comment[0]
+
+    def yaml_set_start_comment(self, comment, indent=0):
+        # type: (Any, Any) -> None
+        """overwrites any preceding comment lines on an object
+        expects comment to be without `#` and possible have multiple lines
+        """
+        from .error import CommentMark
+        from .tokens import CommentToken
+
+        pre_comments = self._yaml_get_pre_comment()
+        if comment[-1] == "\n":
+            comment = comment[:-1]  # strip final newline if there
+        start_mark = CommentMark(indent)
+        for com in comment.split("\n"):
+            c = com.strip()
+            if len(c) > 0 and c[0] != "#":
+                com = "# " + com
+            pre_comments.append(CommentToken(com + "\n", start_mark, None))
+
+    def yaml_set_comment_before_after_key(
+        self, key, before=None, indent=0, after=None, after_indent=None
+    ):
+        # type: (Any, Any, Any, Any, Any) -> None
+        """
+        expects comment (before/after) to be without `#` and possible have multiple lines
+        """
+        from strictyaml.ruamel.error import CommentMark
+        from strictyaml.ruamel.tokens import CommentToken
+
+        def comment_token(s, mark):
+            # type: (Any, Any) -> Any
+            # handle empty lines as having no comment
+            return CommentToken(("# " if s else "") + s + "\n", mark, None)
+
+        if after_indent is None:
+            after_indent = indent + 2
+        if before and (len(before) > 1) and before[-1] == "\n":
+            before = before[:-1]  # strip final newline if there
+        if after and after[-1] == "\n":
+            after = after[:-1]  # strip final newline if there
+        start_mark = CommentMark(indent)
+        c = self.ca.items.setdefault(key, [None, [], None, None])
+        if before == "\n":
+            c[1].append(comment_token("", start_mark))
+        elif before:
+            for com in before.split("\n"):
+                c[1].append(comment_token(com, start_mark))
+        if after:
+            start_mark = CommentMark(after_indent)
+            if c[3] is None:
+                c[3] = []
+            for com in after.split("\n"):
+                c[3].append(comment_token(com, start_mark))  # type: ignore
+
+    @property
+    def fa(self):
+        # type: () -> Any
+        """format attribute
+
+        set_flow_style()/set_block_style()"""
+        if not hasattr(self, Format.attrib):
+            setattr(self, Format.attrib, Format())
+        return getattr(self, Format.attrib)
+
+    def yaml_add_eol_comment(self, comment, key=NoComment, column=None):
+        # type: (Any, Optional[Any], Optional[Any]) -> None
+        """
+        there is a problem as eol comments should start with ' #'
+        (but at the beginning of the line the space doesn't have to be before
+        the #. The column index is for the # mark
+        """
+        from .tokens import CommentToken
+        from .error import CommentMark
+
+        if column is None:
+            try:
+                column = self._yaml_get_column(key)
+            except AttributeError:
+                column = 0
+        if comment[0] != "#":
+            comment = "# " + comment
+        if column is None:
+            if comment[0] == "#":
+                comment = " " + comment
+                column = 0
+        start_mark = CommentMark(column)
+        ct = [CommentToken(comment, start_mark, None), None]
+        self._yaml_add_eol_comment(ct, key=key)
+
+    @property
+    def lc(self):
+        # type: () -> Any
+        if not hasattr(self, LineCol.attrib):
+            setattr(self, LineCol.attrib, LineCol())
+        return getattr(self, LineCol.attrib)
+
+    def _yaml_set_line_col(self, line, col):
+        # type: (Any, Any) -> None
+        self.lc.line = line
+        self.lc.col = col
+
+    def _yaml_set_kv_line_col(self, key, data):
+        # type: (Any, Any) -> None
+        self.lc.add_kv_line_col(key, data)
+
+    def _yaml_set_idx_line_col(self, key, data):
+        # type: (Any, Any) -> None
+        self.lc.add_idx_line_col(key, data)
+
+    @property
+    def anchor(self):
+        # type: () -> Any
+        if not hasattr(self, Anchor.attrib):
+            setattr(self, Anchor.attrib, Anchor())
+        return getattr(self, Anchor.attrib)
+
+    def yaml_anchor(self):
+        # type: () -> Any
+        if not hasattr(self, Anchor.attrib):
+            return None
+        return self.anchor
+
+    def yaml_set_anchor(self, value, always_dump=False):
+        # type: (Any, bool) -> None
+        self.anchor.value = value
+        self.anchor.always_dump = always_dump
+
+    @property
+    def tag(self):
+        # type: () -> Any
+        if not hasattr(self, Tag.attrib):
+            setattr(self, Tag.attrib, Tag())
+        return getattr(self, Tag.attrib)
+
+    def yaml_set_tag(self, value):
+        # type: (Any) -> None
+        self.tag.value = value
+
+    def copy_attributes(self, t, memo=None):
+        # type: (Any, Any) -> None
+        # fmt: off
+        for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib,
+                  Tag.attrib, merge_attrib]:
+            if hasattr(self, a):
+                if memo is not None:
+                    setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
+                else:
+                    setattr(t, a, getattr(self, a))
+        # fmt: on
+
+    def _yaml_add_eol_comment(self, comment, key):
+        # type: (Any, Any) -> None
+        raise NotImplementedError
+
+    def _yaml_get_pre_comment(self):
+        # type: () -> Any
+        raise NotImplementedError
+
+    def _yaml_get_column(self, key):
+        # type: (Any) -> Any
+        raise NotImplementedError
+
+
+class CommentedSeq(MutableSliceableSequence, list, CommentedBase):  # type: ignore
+    __slots__ = (Comment.attrib, "_lst")
+
+    def __init__(self, *args, **kw):
+        # type: (Any, Any) -> None
+        list.__init__(self, *args, **kw)
+
+    def __getsingleitem__(self, idx):
+        # type: (Any) -> Any
+        return list.__getitem__(self, idx)
+
+    def __setsingleitem__(self, idx, value):
+        # type: (Any, Any) -> None
+        # try to preserve the scalarstring type if setting an existing key to a new value
+        if idx < len(self):
+            if (
+                isinstance(value, string_types)
+                and not isinstance(value, ScalarString)
+                and isinstance(self[idx], ScalarString)
+            ):
+                value = type(self[idx])(value)
+        list.__setitem__(self, idx, value)
+
+    def __delsingleitem__(self, idx=None):
+        # type: (Any) -> Any
+        list.__delitem__(self, idx)
+        self.ca.items.pop(idx, None)  # might not be there -> default value
+        for list_index in sorted(self.ca.items):
+            if list_index < idx:
+                continue
+            self.ca.items[list_index - 1] = self.ca.items.pop(list_index)
+
+    def __len__(self):
+        # type: () -> int
+        return list.__len__(self)
+
+    def insert(self, idx, val):
+        # type: (Any, Any) -> None
+        """the comments after the insertion have to move forward"""
+        list.insert(self, idx, val)
+        for list_index in sorted(self.ca.items, reverse=True):
+            if list_index < idx:
+                break
+            self.ca.items[list_index + 1] = self.ca.items.pop(list_index)
+
+    def extend(self, val):
+        # type: (Any) -> None
+        list.extend(self, val)
+
+    def __eq__(self, other):
+        # type: (Any) -> bool
+        return list.__eq__(self, other)
+
+    def _yaml_add_comment(self, comment, key=NoComment):
+        # type: (Any, Optional[Any]) -> None
+        if key is not NoComment:
+            self.yaml_key_comment_extend(key, comment)
+        else:
+            self.ca.comment = comment
+
+    def _yaml_add_eol_comment(self, comment, key):
+        # type: (Any, Any) -> None
+        self._yaml_add_comment(comment, key=key)
+
+    def _yaml_get_columnX(self, key):
+        # type: (Any) -> Any
+        return self.ca.items[key][0].start_mark.column
+
+    def _yaml_get_column(self, key):
+        # type: (Any) -> Any
+        column = None
+        sel_idx = None
+        pre, post = key - 1, key + 1
+        if pre in self.ca.items:
+            sel_idx = pre
+        elif post in self.ca.items:
+            sel_idx = post
+        else:
+            # self.ca.items is not ordered
+            for row_idx, _k1 in enumerate(self):
+                if row_idx >= key:
+                    break
+                if row_idx not in self.ca.items:
+                    continue
+                sel_idx = row_idx
+        if sel_idx is not None:
+            column = self._yaml_get_columnX(sel_idx)
+        return column
+
+    def _yaml_get_pre_comment(self):
+        # type: () -> Any
+        pre_comments = []  # type: List[Any]
+        if self.ca.comment is None:
+            self.ca.comment = [None, pre_comments]
+        else:
+            self.ca.comment[1] = pre_comments
+        return pre_comments
+
+    def __deepcopy__(self, memo):
+        # type: (Any) -> Any
+        res = self.__class__()
+        memo[id(self)] = res
+        for k in self:
+            res.append(copy.deepcopy(k, memo))
+            self.copy_attributes(res, memo=memo)
+        return res
+
+    def __add__(self, other):
+        # type: (Any) -> Any
+        return list.__add__(self, other)
+
+    def sort(self, key=None, reverse=False):  # type: ignore
+        # type: (Any, bool) -> None
+        if key is None:
+            tmp_lst = sorted(zip(self, range(len(self))), reverse=reverse)
+            list.__init__(self, [x[0] for x in tmp_lst])
+        else:
+            tmp_lst = sorted(
+                zip(map(key, list.__iter__(self)), range(len(self))), reverse=reverse
+            )
+            list.__init__(self, [list.__getitem__(self, x[1]) for x in tmp_lst])
+        itm = self.ca.items
+        self.ca._items = {}
+        for idx, x in enumerate(tmp_lst):
+            old_index = x[1]
+            if old_index in itm:
+                self.ca.items[idx] = itm[old_index]
+
+    def __repr__(self):
+        # type: () -> Any
+        return list.__repr__(self)
+
+
+class CommentedKeySeq(tuple, CommentedBase):  # type: ignore
+    """This primarily exists to be able to roundtrip keys that are sequences"""
+
+    def _yaml_add_comment(self, comment, key=NoComment):
+        # type: (Any, Optional[Any]) -> None
+        if key is not NoComment:
+            self.yaml_key_comment_extend(key, comment)
+        else:
+            self.ca.comment = comment
+
+    def _yaml_add_eol_comment(self, comment, key):
+        # type: (Any, Any) -> None
+        self._yaml_add_comment(comment, key=key)
+
+    def _yaml_get_columnX(self, key):
+        # type: (Any) -> Any
+        return self.ca.items[key][0].start_mark.column
+
+    def _yaml_get_column(self, key):
+        # type: (Any) -> Any
+        column = None
+        sel_idx = None
+        pre, post = key - 1, key + 1
+        if pre in self.ca.items:
+            sel_idx = pre
+        elif post in self.ca.items:
+            sel_idx = post
+        else:
+            # self.ca.items is not ordered
+            for row_idx, _k1 in enumerate(self):
+                if row_idx >= key:
+                    break
+                if row_idx not in self.ca.items:
+                    continue
+                sel_idx = row_idx
+        if sel_idx is not None:
+            column = self._yaml_get_columnX(sel_idx)
+        return column
+
+    def _yaml_get_pre_comment(self):
+        # type: () -> Any
+        pre_comments = []  # type: List[Any]
+        if self.ca.comment is None:
+            self.ca.comment = [None, pre_comments]
+        else:
+            self.ca.comment[1] = pre_comments
+        return pre_comments
+
+
+class CommentedMapView(Sized):
+    __slots__ = ("_mapping",)
+
+    def __init__(self, mapping):
+        # type: (Any) -> None
+        self._mapping = mapping
+
+    def __len__(self):
+        # type: () -> int
+        count = len(self._mapping)
+        return count
+
+
+class CommentedMapKeysView(CommentedMapView, Set):  # type: ignore
+    __slots__ = ()
+
+    @classmethod
+    def _from_iterable(self, it):
+        # type: (Any) -> Any
+        return set(it)
+
+    def __contains__(self, key):
+        # type: (Any) -> Any
+        return key in self._mapping
+
+    def __iter__(self):
+        # type: () -> Any  # yield from self._mapping  # not in py27, pypy
+        # for x in self._mapping._keys():
+        for x in self._mapping:
+            yield x
+
+
+class CommentedMapItemsView(CommentedMapView, Set):  # type: ignore
+    __slots__ = ()
+
+    @classmethod
+    def _from_iterable(self, it):
+        # type: (Any) -> Any
+        return set(it)
+
+    def __contains__(self, item):
+        # type: (Any) -> Any
+        key, value = item
+        try:
+            v = self._mapping[key]
+        except KeyError:
+            return False
+        else:
+            return v == value
+
+    def __iter__(self):
+        # type: () -> Any
+        for key in self._mapping._keys():
+            yield (key, self._mapping[key])
+
+
+class CommentedMapValuesView(CommentedMapView):
+    __slots__ = ()
+
+    def __contains__(self, value):
+        # type: (Any) -> Any
+        for key in self._mapping:
+            if value == self._mapping[key]:
+                return True
+        return False
+
+    def __iter__(self):
+        # type: () -> Any
+        for key in self._mapping._keys():
+            yield self._mapping[key]
+
+
+class CommentedMap(ordereddict, CommentedBase):  # type: ignore
+    __slots__ = (Comment.attrib, "_ok", "_ref")
+
+    def __init__(self, *args, **kw):
+        # type: (Any, Any) -> None
+        self._ok = set()  # type: MutableSet[Any]  #  own keys
+        self._ref = []  # type: List[CommentedMap]
+        ordereddict.__init__(self, *args, **kw)
+
+    def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
+        # type: (Any, Optional[Any], Optional[Any]) -> None
+        """values is set to key to indicate a value attachment of comment"""
+        if key is not NoComment:
+            self.yaml_key_comment_extend(key, comment)
+            return
+        if value is not NoComment:
+            self.yaml_value_comment_extend(value, comment)
+        else:
+            self.ca.comment = comment
+
+    def _yaml_add_eol_comment(self, comment, key):
+        # type: (Any, Any) -> None
+        """add on the value line, with value specified by the key"""
+        self._yaml_add_comment(comment, value=key)
+
+    def _yaml_get_columnX(self, key):
+        # type: (Any) -> Any
+        return self.ca.items[key][2].start_mark.column
+
+    def _yaml_get_column(self, key):
+        # type: (Any) -> Any
+        column = None
+        sel_idx = None
+        pre, post, last = None, None, None
+        for x in self:
+            if pre is not None and x != key:
+                post = x
+                break
+            if x == key:
+                pre = last
+            last = x
+        if pre in self.ca.items:
+            sel_idx = pre
+        elif post in self.ca.items:
+            sel_idx = post
+        else:
+            # self.ca.items is not ordered
+            for k1 in self:
+                if k1 >= key:
+                    break
+                if k1 not in self.ca.items:
+                    continue
+                sel_idx = k1
+        if sel_idx is not None:
+            column = self._yaml_get_columnX(sel_idx)
+        return column
+
+    def _yaml_get_pre_comment(self):
+        # type: () -> Any
+        pre_comments = []  # type: List[Any]
+        if self.ca.comment is None:
+            self.ca.comment = [None, pre_comments]
+        else:
+            self.ca.comment[1] = pre_comments
+        return pre_comments
+
+    def update(self, *vals, **kw):
+        # type: (Any, Any) -> None
+        try:
+            ordereddict.update(self, *vals, **kw)
+        except TypeError:
+            # probably a dict that is used
+            for x in vals[0]:
+                self[x] = vals[0][x]
+        try:
+            self._ok.update(vals.keys())  # type: ignore
+        except AttributeError:
+            # assume one argument that is a list/tuple of two element lists/tuples
+            for x in vals[0]:
+                self._ok.add(x[0])
+        if kw:
+            self._ok.add(*kw.keys())
+
+    def insert(self, pos, key, value, comment=None):
+        # type: (Any, Any, Any, Optional[Any]) -> None
+        """insert key value into given position
+        attach comment if provided
+        """
+        ordereddict.insert(self, pos, key, value)
+        self._ok.add(key)
+        if comment is not None:
+            self.yaml_add_eol_comment(comment, key=key)
+
+    def mlget(self, key, default=None, list_ok=False):
+        # type: (Any, Any, Any) -> Any
+        """multi-level get that expects dicts within dicts"""
+        if not isinstance(key, list):
+            return self.get(key, default)
+        # assume that the key is a list of recursively accessible dicts
+
+        def get_one_level(key_list, level, d):
+            # type: (Any, Any, Any) -> Any
+            if not list_ok:
+                assert isinstance(d, dict)
+            if level >= len(key_list):
+                if level > len(key_list):
+                    raise IndexError
+                return d[key_list[level - 1]]
+            return get_one_level(key_list, level + 1, d[key_list[level - 1]])
+
+        try:
+            return get_one_level(key, 1, self)
+        except KeyError:
+            return default
+        except (TypeError, IndexError):
+            if not list_ok:
+                raise
+            return default
+
+    def __getitem__(self, key):
+        # type: (Any) -> Any
+        try:
+            return ordereddict.__getitem__(self, key)
+        except KeyError:
+            for merged in getattr(self, merge_attrib, []):
+                if key in merged[1]:
+                    return merged[1][key]
+            raise
+
+    def __setitem__(self, key, value):
+        # type: (Any, Any) -> None
+        # try to preserve the scalarstring type if setting an existing key to a new value
+        if key in self:
+            if (
+                isinstance(value, string_types)
+                and not isinstance(value, ScalarString)
+                and isinstance(self[key], ScalarString)
+            ):
+                value = type(self[key])(value)
+        ordereddict.__setitem__(self, key, value)
+        self._ok.add(key)
+
+    def _unmerged_contains(self, key):
+        # type: (Any) -> Any
+        if key in self._ok:
+            return True
+        return None
+
+    def __contains__(self, key):
+        # type: (Any) -> bool
+        return bool(ordereddict.__contains__(self, key))
+
+    def get(self, key, default=None):
+        # type: (Any, Any) -> Any
+        try:
+            return self.__getitem__(key)
+        except:  # NOQA
+            return default
+
+    def __repr__(self):
+        # type: () -> Any
+        return ordereddict.__repr__(self).replace("CommentedMap", "ordereddict")
+
+    def non_merged_items(self):
+        # type: () -> Any
+        for x in ordereddict.__iter__(self):
+            if x in self._ok:
+                yield x, ordereddict.__getitem__(self, x)
+
+    def __delitem__(self, key):
+        # type: (Any) -> None
+        # for merged in getattr(self, merge_attrib, []):
+        #     if key in merged[1]:
+        #         value = merged[1][key]
+        #         break
+        # else:
+        #     # not found in merged in stuff
+        #     ordereddict.__delitem__(self, key)
+        #    for referer in self._ref:
+        #        referer.update_key_value(key)
+        #    return
+        #
+        # ordereddict.__setitem__(self, key, value)  # merge might have different value
+        # self._ok.discard(key)
+        self._ok.discard(key)
+        ordereddict.__delitem__(self, key)
+        for referer in self._ref:
+            referer.update_key_value(key)
+
+    def __iter__(self):
+        # type: () -> Any
+        for x in ordereddict.__iter__(self):
+            yield x
+
+    def _keys(self):
+        # type: () -> Any
+        for x in ordereddict.__iter__(self):
+            yield x
+
+    def __len__(self):
+        # type: () -> int
+        return int(ordereddict.__len__(self))
+
+    def __eq__(self, other):
+        # type: (Any) -> bool
+        return bool(dict(self) == other)
+
+    if PY2:
+
+        def keys(self):
+            # type: () -> Any
+            return list(self._keys())
+
+        def iterkeys(self):
+            # type: () -> Any
+            return self._keys()
+
+        def viewkeys(self):
+            # type: () -> Any
+            return CommentedMapKeysView(self)
+
+    else:
+
+        def keys(self):
+            # type: () -> Any
+            return CommentedMapKeysView(self)
+
+    if PY2:
+
+        def _values(self):
+            # type: () -> Any
+            for x in ordereddict.__iter__(self):
+                yield ordereddict.__getitem__(self, x)
+
+        def values(self):
+            # type: () -> Any
+            return list(self._values())
+
+        def itervalues(self):
+            # type: () -> Any
+            return self._values()
+
+        def viewvalues(self):
+            # type: () -> Any
+            return CommentedMapValuesView(self)
+
+    else:
+
+        def values(self):
+            # type: () -> Any
+            return CommentedMapValuesView(self)
+
+    def _items(self):
+        # type: () -> Any
+        for x in ordereddict.__iter__(self):
+            yield x, ordereddict.__getitem__(self, x)
+
+    if PY2:
+
+        def items(self):
+            # type: () -> Any
+            return list(self._items())
+
+        def iteritems(self):
+            # type: () -> Any
+            return self._items()
+
+        def viewitems(self):
+            # type: () -> Any
+            return CommentedMapItemsView(self)
+
+    else:
+
+        def items(self):
+            # type: () -> Any
+            return CommentedMapItemsView(self)
+
+    @property
+    def merge(self):
+        # type: () -> Any
+        if not hasattr(self, merge_attrib):
+            setattr(self, merge_attrib, [])
+        return getattr(self, merge_attrib)
+
+    def copy(self):
+        # type: () -> Any
+        x = type(self)()  # update doesn't work
+        for k, v in self._items():
+            x[k] = v
+        self.copy_attributes(x)
+        return x
+
+    def add_referent(self, cm):
+        # type: (Any) -> None
+        if cm not in self._ref:
+            self._ref.append(cm)
+
+    def add_yaml_merge(self, value):
+        # type: (Any) -> None
+        for v in value:
+            v[1].add_referent(self)
+            for k, v in v[1].items():
+                if ordereddict.__contains__(self, k):
+                    continue
+                ordereddict.__setitem__(self, k, v)
+        self.merge.extend(value)
+
+    def update_key_value(self, key):
+        # type: (Any) -> None
+        if key in self._ok:
+            return
+        for v in self.merge:
+            if key in v[1]:
+                ordereddict.__setitem__(self, key, v[1][key])
+                return
+        ordereddict.__delitem__(self, key)
+
+    def __deepcopy__(self, memo):
+        # type: (Any) -> Any
+        res = self.__class__()
+        memo[id(self)] = res
+        for k in self:
+            res[k] = copy.deepcopy(self[k], memo)
+        self.copy_attributes(res, memo=memo)
+        return res
+
+
+# based on brownie mappings
+@classmethod  # type: ignore
+def raise_immutable(cls, *args, **kwargs):
+    # type: (Any, *Any, **Any) -> None
+    raise TypeError("{} objects are immutable".format(cls.__name__))
+
+
+class CommentedKeyMap(CommentedBase, Mapping):  # type: ignore
+    __slots__ = Comment.attrib, "_od"
+    """This primarily exists to be able to roundtrip keys that are mappings"""
+
+    def __init__(self, *args, **kw):
+        # type: (Any, Any) -> None
+        if hasattr(self, "_od"):
+            raise_immutable(self)
+        try:
+            self._od = ordereddict(*args, **kw)
+        except TypeError:
+            if PY2:
+                self._od = ordereddict(args[0].items())
+            else:
+                raise
+
+    __delitem__ = (
+        __setitem__
+    ) = clear = pop = popitem = setdefault = update = raise_immutable
+
+    # need to implement __getitem__, __iter__ and __len__
+    def __getitem__(self, index):
+        # type: (Any) -> Any
+        return self._od[index]
+
+    def __iter__(self):
+        # type: () -> Iterator[Any]
+        for x in self._od.__iter__():
+            yield x
+
+    def __len__(self):
+        # type: () -> int
+        return len(self._od)
+
+    def __hash__(self):
+        # type: () -> Any
+        return hash(tuple(self.items()))
+
+    def __repr__(self):
+        # type: () -> Any
+        if not hasattr(self, merge_attrib):
+            return self._od.__repr__()
+        return "ordereddict(" + repr(list(self._od.items())) + ")"
+
+    @classmethod
+    def fromkeys(keys, v=None):
+        # type: (Any, Any) -> Any
+        return CommentedKeyMap(dict.fromkeys(keys, v))
+
+    def _yaml_add_comment(self, comment, key=NoComment):
+        # type: (Any, Optional[Any]) -> None
+        if key is not NoComment:
+            self.yaml_key_comment_extend(key, comment)
+        else:
+            self.ca.comment = comment
+
+    def _yaml_add_eol_comment(self, comment, key):
+        # type: (Any, Any) -> None
+        self._yaml_add_comment(comment, key=key)
+
+    def _yaml_get_columnX(self, key):
+        # type: (Any) -> Any
+        return self.ca.items[key][0].start_mark.column
+
+    def _yaml_get_column(self, key):
+        # type: (Any) -> Any
+        column = None
+        sel_idx = None
+        pre, post = key - 1, key + 1
+        if pre in self.ca.items:
+            sel_idx = pre
+        elif post in self.ca.items:
+            sel_idx = post
+        else:
+            # self.ca.items is not ordered
+            for row_idx, _k1 in enumerate(self):
+                if row_idx >= key:
+                    break
+                if row_idx not in self.ca.items:
+                    continue
+                sel_idx = row_idx
+        if sel_idx is not None:
+            column = self._yaml_get_columnX(sel_idx)
+        return column
+
+    def _yaml_get_pre_comment(self):
+        # type: () -> Any
+        pre_comments = []  # type: List[Any]
+        if self.ca.comment is None:
+            self.ca.comment = [None, pre_comments]
+        else:
+            self.ca.comment[1] = pre_comments
+        return pre_comments
+
+
+class CommentedOrderedMap(CommentedMap):
+    __slots__ = (Comment.attrib,)
+
+
+class CommentedSet(MutableSet, CommentedBase):  # type: ignore  # NOQA
+    __slots__ = Comment.attrib, "odict"
+
+    def __init__(self, values=None):
+        # type: (Any) -> None
+        self.odict = ordereddict()
+        MutableSet.__init__(self)
+        if values is not None:
+            self |= values  # type: ignore
+
+    def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
+        # type: (Any, Optional[Any], Optional[Any]) -> None
+        """values is set to key to indicate a value attachment of comment"""
+        if key is not NoComment:
+            self.yaml_key_comment_extend(key, comment)
+            return
+        if value is not NoComment:
+            self.yaml_value_comment_extend(value, comment)
+        else:
+            self.ca.comment = comment
+
+    def _yaml_add_eol_comment(self, comment, key):
+        # type: (Any, Any) -> None
+        """add on the value line, with value specified by the key"""
+        self._yaml_add_comment(comment, value=key)
+
+    def add(self, value):
+        # type: (Any) -> None
+        """Add an element."""
+        self.odict[value] = None
+
+    def discard(self, value):
+        # type: (Any) -> None
+        """Remove an element.  Do not raise an exception if absent."""
+        del self.odict[value]
+
+    def __contains__(self, x):
+        # type: (Any) -> Any
+        return x in self.odict
+
+    def __iter__(self):
+        # type: () -> Any
+        for x in self.odict:
+            yield x
+
+    def __len__(self):
+        # type: () -> int
+        return len(self.odict)
+
+    def __repr__(self):
+        # type: () -> str
+        return "set({0!r})".format(self.odict.keys())
+
+
+class TaggedScalar(CommentedBase):
+    # the value and style attributes are set during roundtrip construction
+    def __init__(self, value=None, style=None, tag=None):
+        # type: (Any, Any, Any) -> None
+        self.value = value
+        self.style = style
+        if tag is not None:
+            self.yaml_set_tag(tag)
+
+    def __str__(self):
+        # type: () -> Any
+        return self.value
+
+
+def dump_comments(d, name="", sep=".", out=sys.stdout):
+    # type: (Any, str, str, Any) -> None
+    """
+    recursively dump comments, all but the toplevel preceded by the path
+    in dotted form x.0.a
+    """
+    if isinstance(d, dict) and hasattr(d, "ca"):
+        if name:
+            sys.stdout.write("{}\n".format(name))
+        out.write("{}\n".format(d.ca))  # type: ignore
+        for k in d:
+            dump_comments(d[k], name=(name + sep + k) if name else k, sep=sep, out=out)
+    elif isinstance(d, list) and hasattr(d, "ca"):
+        if name:
+            sys.stdout.write("{}\n".format(name))
+        out.write("{}\n".format(d.ca))  # type: ignore
+        for idx, k in enumerate(d):
+            dump_comments(
+                k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out
+            )
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/compat.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/compat.py
new file mode 100644
index 00000000..a44e00dd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/compat.py
@@ -0,0 +1,333 @@
+# coding: utf-8
+
+from __future__ import print_function
+
+# partially from package six by Benjamin Peterson
+
+import sys
+import os
+import types
+import traceback
+from abc import abstractmethod
+
+
+# fmt: off
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple  # NOQA
+    from typing import Optional  # NOQA
+# fmt: on
+
+_DEFAULT_YAML_VERSION = (1, 2)
+
+try:
+    from ruamel.ordereddict import ordereddict
+except:  # NOQA
+    try:
+        from collections import OrderedDict
+    except ImportError:
+        from ordereddict import OrderedDict  # type: ignore
+    # to get the right name import ... as ordereddict doesn't do that
+
+    class ordereddict(OrderedDict):  # type: ignore
+        if not hasattr(OrderedDict, "insert"):
+
+            def insert(self, pos, key, value):
+                # type: (int, Any, Any) -> None
+                if pos >= len(self):
+                    self[key] = value
+                    return
+                od = ordereddict()
+                od.update(self)
+                for k in od:
+                    del self[k]
+                for index, old_key in enumerate(od):
+                    if pos == index:
+                        self[key] = value
+                    self[old_key] = od[old_key]
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+
+if PY3:
+
+    def utf8(s):
+        # type: (str) -> str
+        return s
+
+    def to_str(s):
+        # type: (str) -> str
+        return s
+
+    def to_unicode(s):
+        # type: (str) -> str
+        return s
+
+
+else:
+    if False:
+        unicode = str
+
+    def utf8(s):
+        # type: (unicode) -> str
+        return s.encode("utf-8")
+
+    def to_str(s):
+        # type: (str) -> str
+        return str(s)
+
+    def to_unicode(s):
+        # type: (str) -> unicode
+        return unicode(s)  # NOQA
+
+
+if PY3:
+    string_types = str
+    integer_types = int
+    class_types = type
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+    unichr = chr
+    import io
+
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+    # have unlimited precision
+    no_limit_int = int
+    from collections.abc import (
+        Hashable,
+        MutableSequence,
+        MutableMapping,
+        Mapping,
+    )  # NOQA
+
+else:
+    string_types = basestring  # NOQA
+    integer_types = (int, long)  # NOQA
+    class_types = (type, types.ClassType)
+    text_type = unicode  # NOQA
+    binary_type = str
+
+    # to allow importing
+    unichr = unichr
+    from StringIO import StringIO as _StringIO
+
+    StringIO = _StringIO
+    import cStringIO
+
+    BytesIO = cStringIO.StringIO
+    # have unlimited precision
+    no_limit_int = long  # NOQA not available on Python 3
+    from collections import Hashable, MutableSequence, MutableMapping, Mapping  # NOQA
+
+if False:  # MYPY
+    # StreamType = Union[BinaryIO, IO[str], IO[unicode],  StringIO]
+    # StreamType = Union[BinaryIO, IO[str], StringIO]  # type: ignore
+    StreamType = Any
+
+    StreamTextType = StreamType  # Union[Text, StreamType]
+    VersionType = Union[List[int], str, Tuple[int, int]]
+
+if PY3:
+    builtins_module = "builtins"
+else:
+    builtins_module = "__builtin__"
+
+UNICODE_SIZE = 4 if sys.maxunicode > 65535 else 2
+
+
+def with_metaclass(meta, *bases):
+    # type: (Any, Any) -> Any
+    """Create a base class with a metaclass."""
+    return meta("NewBase", bases, {})
+
+
+DBG_TOKEN = 1
+DBG_EVENT = 2
+DBG_NODE = 4
+
+
+_debug = None  # type: Optional[int]
+if "RUAMELDEBUG" in os.environ:
+    _debugx = os.environ.get("RUAMELDEBUG")
+    if _debugx is None:
+        _debug = 0
+    else:
+        _debug = int(_debugx)
+
+
+if bool(_debug):
+
+    class ObjectCounter(object):
+        def __init__(self):
+            # type: () -> None
+            self.map = {}  # type: Dict[Any, Any]
+
+        def __call__(self, k):
+            # type: (Any) -> None
+            self.map[k] = self.map.get(k, 0) + 1
+
+        def dump(self):
+            # type: () -> None
+            for k in sorted(self.map):
+                sys.stdout.write("{} -> {}".format(k, self.map[k]))
+
+    object_counter = ObjectCounter()
+
+
+# used from yaml util when testing
+def dbg(val=None):
+    # type: (Any) -> Any
+    global _debug
+    if _debug is None:
+        # set to true or false
+        _debugx = os.environ.get("YAMLDEBUG")
+        if _debugx is None:
+            _debug = 0
+        else:
+            _debug = int(_debugx)
+    if val is None:
+        return _debug
+    return _debug & val
+
+
+class Nprint(object):
+    def __init__(self, file_name=None):
+        # type: (Any) -> None
+        self._max_print = None  # type: Any
+        self._count = None  # type: Any
+        self._file_name = file_name
+
+    def __call__(self, *args, **kw):
+        # type: (Any, Any) -> None
+        if not bool(_debug):
+            return
+        out = sys.stdout if self._file_name is None else open(self._file_name, "a")
+        dbgprint = print  # to fool checking for print statements by dv utility
+        kw1 = kw.copy()
+        kw1["file"] = out
+        dbgprint(*args, **kw1)
+        out.flush()
+        if self._max_print is not None:
+            if self._count is None:
+                self._count = self._max_print
+            self._count -= 1
+            if self._count == 0:
+                dbgprint("forced exit\n")
+                traceback.print_stack()
+                out.flush()
+                sys.exit(0)
+        if self._file_name:
+            out.close()
+
+    def set_max_print(self, i):
+        # type: (int) -> None
+        self._max_print = i
+        self._count = None
+
+
+nprint = Nprint()
+nprintf = Nprint("/var/tmp/strictyaml.ruamel.log")
+
+# char checkers following production rules
+
+
+def check_namespace_char(ch):
+    # type: (Any) -> bool
+    if u"\x21" <= ch <= u"\x7E":  # ! to ~
+        return True
+    if u"\xA0" <= ch <= u"\uD7FF":
+        return True
+    if (u"\uE000" <= ch <= u"\uFFFD") and ch != u"\uFEFF":  # excl. byte order mark
+        return True
+    if u"\U00010000" <= ch <= u"\U0010FFFF":
+        return True
+    return False
+
+
+def check_anchorname_char(ch):
+    # type: (Any) -> bool
+    if ch in u",[]{}":
+        return False
+    return check_namespace_char(ch)
+
+
+def version_tnf(t1, t2=None):
+    # type: (Any, Any) -> Any
+    """
+    return True if strictyaml.ruamel version_info < t1, None if t2 is specified and bigger else False
+    """
+    from strictyaml.ruamel import version_info  # NOQA
+
+    if version_info < t1:
+        return True
+    if t2 is not None and version_info < t2:
+        return None
+    return False
+
+
+class MutableSliceableSequence(MutableSequence):  # type: ignore
+    __slots__ = ()
+
+    def __getitem__(self, index):
+        # type: (Any) -> Any
+        if not isinstance(index, slice):
+            return self.__getsingleitem__(index)
+        return type(self)([self[i] for i in range(*index.indices(len(self)))])  # type: ignore
+
+    def __setitem__(self, index, value):
+        # type: (Any, Any) -> None
+        if not isinstance(index, slice):
+            return self.__setsingleitem__(index, value)
+        assert iter(value)
+        # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+        if index.step is None:
+            del self[index.start : index.stop]
+            for elem in reversed(value):
+                self.insert(0 if index.start is None else index.start, elem)
+        else:
+            range_parms = index.indices(len(self))
+            nr_assigned_items = (range_parms[1] - range_parms[0] - 1) // range_parms[
+                2
+            ] + 1
+            # need to test before changing, in case TypeError is caught
+            if nr_assigned_items < len(value):
+                raise TypeError(
+                    "too many elements in value {} < {}".format(
+                        nr_assigned_items, len(value)
+                    )
+                )
+            elif nr_assigned_items > len(value):
+                raise TypeError(
+                    "not enough elements in value {} > {}".format(
+                        nr_assigned_items, len(value)
+                    )
+                )
+            for idx, i in enumerate(range(*range_parms)):
+                self[i] = value[idx]
+
+    def __delitem__(self, index):
+        # type: (Any) -> None
+        if not isinstance(index, slice):
+            return self.__delsingleitem__(index)
+        # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+        for i in reversed(range(*index.indices(len(self)))):
+            del self[i]
+
+    @abstractmethod
+    def __getsingleitem__(self, index):
+        # type: (Any) -> Any
+        raise IndexError
+
+    @abstractmethod
+    def __setsingleitem__(self, index, value):
+        # type: (Any, Any) -> None
+        raise IndexError
+
+    @abstractmethod
+    def __delsingleitem__(self, index):
+        # type: (Any) -> None
+        raise IndexError
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/composer.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/composer.py
new file mode 100644
index 00000000..1bfd8018
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/composer.py
@@ -0,0 +1,243 @@
+# coding: utf-8
+
+from __future__ import absolute_import, print_function
+
+import warnings
+
+from strictyaml.ruamel.error import MarkedYAMLError, ReusedAnchorWarning
+from strictyaml.ruamel.compat import utf8, nprint, nprintf  # NOQA
+
+from strictyaml.ruamel.events import (
+    StreamStartEvent,
+    StreamEndEvent,
+    MappingStartEvent,
+    MappingEndEvent,
+    SequenceStartEvent,
+    SequenceEndEvent,
+    AliasEvent,
+    ScalarEvent,
+)
+from strictyaml.ruamel.nodes import MappingNode, ScalarNode, SequenceNode
+
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List  # NOQA
+
+__all__ = ["Composer", "ComposerError"]
+
+
+class ComposerError(MarkedYAMLError):
+    pass
+
+
+class Composer(object):
+    def __init__(self, loader=None):
+        # type: (Any) -> None
+        self.loader = loader
+        if self.loader is not None and getattr(self.loader, "_composer", None) is None:
+            self.loader._composer = self
+        self.anchors = {}  # type: Dict[Any, Any]
+
+    @property
+    def parser(self):
+        # type: () -> Any
+        if hasattr(self.loader, "typ"):
+            self.loader.parser
+        return self.loader._parser
+
+    @property
+    def resolver(self):
+        # type: () -> Any
+        # assert self.loader._resolver is not None
+        if hasattr(self.loader, "typ"):
+            self.loader.resolver
+        return self.loader._resolver
+
+    def check_node(self):
+        # type: () -> Any
+        # Drop the STREAM-START event.
+        if self.parser.check_event(StreamStartEvent):
+            self.parser.get_event()
+
+        # If there are more documents available?
+        return not self.parser.check_event(StreamEndEvent)
+
+    def get_node(self):
+        # type: () -> Any
+        # Get the root node of the next document.
+        if not self.parser.check_event(StreamEndEvent):
+            return self.compose_document()
+
+    def get_single_node(self):
+        # type: () -> Any
+        # Drop the STREAM-START event.
+        self.parser.get_event()
+
+        # Compose a document if the stream is not empty.
+        document = None  # type: Any
+        if not self.parser.check_event(StreamEndEvent):
+            document = self.compose_document()
+
+        # Ensure that the stream contains no more documents.
+        if not self.parser.check_event(StreamEndEvent):
+            event = self.parser.get_event()
+            raise ComposerError(
+                "expected a single document in the stream",
+                document.start_mark,
+                "but found another document",
+                event.start_mark,
+            )
+
+        # Drop the STREAM-END event.
+        self.parser.get_event()
+
+        return document
+
+    def compose_document(self):
+        # type: (Any) -> Any
+        # Drop the DOCUMENT-START event.
+        self.parser.get_event()
+
+        # Compose the root node.
+        node = self.compose_node(None, None)
+
+        # Drop the DOCUMENT-END event.
+        self.parser.get_event()
+
+        self.anchors = {}
+        return node
+
+    def compose_node(self, parent, index):
+        # type: (Any, Any) -> Any
+        if self.parser.check_event(AliasEvent):
+            event = self.parser.get_event()
+            alias = event.anchor
+            if alias not in self.anchors:
+                raise ComposerError(
+                    None,
+                    None,
+                    "found undefined alias %r" % utf8(alias),
+                    event.start_mark,
+                )
+            return self.anchors[alias]
+        event = self.parser.peek_event()
+        anchor = event.anchor
+        if anchor is not None:  # have an anchor
+            if anchor in self.anchors:
+                # raise ComposerError(
+                #     "found duplicate anchor %r; first occurrence"
+                #     % utf8(anchor), self.anchors[anchor].start_mark,
+                #     "second occurrence", event.start_mark)
+                ws = (
+                    "\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence "
+                    "{}".format(
+                        (anchor), self.anchors[anchor].start_mark, event.start_mark
+                    )
+                )
+                warnings.warn(ws, ReusedAnchorWarning)
+        self.resolver.descend_resolver(parent, index)
+        if self.parser.check_event(ScalarEvent):
+            node = self.compose_scalar_node(anchor)
+        elif self.parser.check_event(SequenceStartEvent):
+            node = self.compose_sequence_node(anchor)
+        elif self.parser.check_event(MappingStartEvent):
+            node = self.compose_mapping_node(anchor)
+        self.resolver.ascend_resolver()
+        return node
+
+    def compose_scalar_node(self, anchor):
+        # type: (Any) -> Any
+        event = self.parser.get_event()
+        tag = event.tag
+        if tag is None or tag == u"!":
+            tag = self.resolver.resolve(ScalarNode, event.value, event.implicit)
+        node = ScalarNode(
+            tag,
+            event.value,
+            event.start_mark,
+            event.end_mark,
+            style=event.style,
+            comment=event.comment,
+            anchor=anchor,
+        )
+        if anchor is not None:
+            self.anchors[anchor] = node
+        return node
+
+    def compose_sequence_node(self, anchor):
+        # type: (Any) -> Any
+        start_event = self.parser.get_event()
+        tag = start_event.tag
+        if tag is None or tag == u"!":
+            tag = self.resolver.resolve(SequenceNode, None, start_event.implicit)
+        node = SequenceNode(
+            tag,
+            [],
+            start_event.start_mark,
+            None,
+            flow_style=start_event.flow_style,
+            comment=start_event.comment,
+            anchor=anchor,
+        )
+        if anchor is not None:
+            self.anchors[anchor] = node
+        index = 0
+        while not self.parser.check_event(SequenceEndEvent):
+            node.value.append(self.compose_node(node, index))
+            index += 1
+        end_event = self.parser.get_event()
+        if node.flow_style is True and end_event.comment is not None:
+            if node.comment is not None:
+                nprint(
+                    "Warning: unexpected end_event commment in sequence "
+                    "node {}".format(node.flow_style)
+                )
+            node.comment = end_event.comment
+        node.end_mark = end_event.end_mark
+        self.check_end_doc_comment(end_event, node)
+        return node
+
+    def compose_mapping_node(self, anchor):
+        # type: (Any) -> Any
+        start_event = self.parser.get_event()
+        tag = start_event.tag
+        if tag is None or tag == u"!":
+            tag = self.resolver.resolve(MappingNode, None, start_event.implicit)
+        node = MappingNode(
+            tag,
+            [],
+            start_event.start_mark,
+            None,
+            flow_style=start_event.flow_style,
+            comment=start_event.comment,
+            anchor=anchor,
+        )
+        if anchor is not None:
+            self.anchors[anchor] = node
+        while not self.parser.check_event(MappingEndEvent):
+            # key_event = self.parser.peek_event()
+            item_key = self.compose_node(node, None)
+            # if item_key in node.value:
+            #     raise ComposerError("while composing a mapping",
+            #             start_event.start_mark,
+            #             "found duplicate key", key_event.start_mark)
+            item_value = self.compose_node(node, item_key)
+            # node.value[item_key] = item_value
+            node.value.append((item_key, item_value))
+        end_event = self.parser.get_event()
+        if node.flow_style is True and end_event.comment is not None:
+            node.comment = end_event.comment
+        node.end_mark = end_event.end_mark
+        self.check_end_doc_comment(end_event, node)
+        return node
+
+    def check_end_doc_comment(self, end_event, node):
+        # type: (Any, Any) -> None
+        if end_event.comment and end_event.comment[1]:
+            # pre comments on an end_event, no following to move to
+            if node.comment is None:
+                node.comment = [None, None]
+            assert not isinstance(node, ScalarEvent)
+            # this is a post comment on a mapping node, add as third element
+            # in the list
+            node.comment.append(end_event.comment[1])
+            end_event.comment[1] = None
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/configobjwalker.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/configobjwalker.py
new file mode 100644
index 00000000..3456c161
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/configobjwalker.py
@@ -0,0 +1,16 @@
+# coding: utf-8
+
+import warnings
+
+from strictyaml.ruamel.util import configobj_walker as new_configobj_walker
+
+if False:  # MYPY
+    from typing import Any  # NOQA
+
+
+def configobj_walker(cfg):
+    # type: (Any) -> Any
+    warnings.warn(
+        "configobj_walker has moved to strictyaml.ruamel.util, please update your code"
+    )
+    return new_configobj_walker(cfg)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/constructor.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/constructor.py
new file mode 100644
index 00000000..1853af34
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/constructor.py
@@ -0,0 +1,1891 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division
+
+import datetime
+import base64
+import binascii
+import re
+import sys
+import types
+import warnings
+
+# fmt: off
+from strictyaml.ruamel.error import (MarkedYAMLError, MarkedYAMLFutureWarning,
+                               MantissaNoDotYAML1_1Warning)
+from strictyaml.ruamel.nodes import *                               # NOQA
+from strictyaml.ruamel.nodes import (SequenceNode, MappingNode, ScalarNode)
+from strictyaml.ruamel.compat import (utf8, builtins_module, to_str, PY2, PY3,  # NOQA
+                                text_type, nprint, nprintf, version_tnf)
+from strictyaml.ruamel.compat import ordereddict, Hashable, MutableSequence  # type: ignore
+from strictyaml.ruamel.compat import MutableMapping  # type: ignore
+
+from strictyaml.ruamel.comments import *                               # NOQA
+from strictyaml.ruamel.comments import (CommentedMap, CommentedOrderedMap, CommentedSet,
+                                  CommentedKeySeq, CommentedSeq, TaggedScalar,
+                                  CommentedKeyMap)
+from strictyaml.ruamel.scalarstring import (SingleQuotedScalarString, DoubleQuotedScalarString,
+                                      LiteralScalarString, FoldedScalarString,
+                                      PlainScalarString, ScalarString,)
+from strictyaml.ruamel.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
+from strictyaml.ruamel.scalarfloat import ScalarFloat
+from strictyaml.ruamel.scalarbool import ScalarBoolean
+from strictyaml.ruamel.timestamp import TimeStamp
+from strictyaml.ruamel.util import RegExp
+
+if False:  # MYPY
+    from typing import Any, Dict, List, Set, Generator, Union, Optional  # NOQA
+
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+           'ConstructorError', 'RoundTripConstructor']
+# fmt: on
+
+
+class ConstructorError(MarkedYAMLError):
+    pass
+
+
+class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning):
+    pass
+
+
+class DuplicateKeyError(MarkedYAMLFutureWarning):
+    pass
+
+
+class BaseConstructor(object):
+
+    yaml_constructors = {}  # type: Dict[Any, Any]
+    yaml_multi_constructors = {}  # type: Dict[Any, Any]
+
+    def __init__(self, preserve_quotes=None, loader=None):
+        # type: (Optional[bool], Any) -> None
+        self.loader = loader
+        if (
+            self.loader is not None
+            and getattr(self.loader, "_constructor", None) is None
+        ):
+            self.loader._constructor = self
+        self.loader = loader
+        self.yaml_base_dict_type = dict
+        self.yaml_base_list_type = list
+        self.constructed_objects = {}  # type: Dict[Any, Any]
+        self.recursive_objects = {}  # type: Dict[Any, Any]
+        self.state_generators = []  # type: List[Any]
+        self.deep_construct = False
+        self._preserve_quotes = preserve_quotes
+        self.allow_duplicate_keys = version_tnf((0, 15, 1), (0, 16))
+
+    @property
+    def composer(self):
+        # type: () -> Any
+        if hasattr(self.loader, "typ"):
+            return self.loader.composer
+        try:
+            return self.loader._composer
+        except AttributeError:
+            sys.stdout.write("slt {}\n".format(type(self)))
+            sys.stdout.write("slc {}\n".format(self.loader._composer))
+            sys.stdout.write("{}\n".format(dir(self)))
+            raise
+
+    @property
+    def resolver(self):
+        # type: () -> Any
+        if hasattr(self.loader, "typ"):
+            return self.loader.resolver
+        return self.loader._resolver
+
+    def check_data(self):
+        # type: () -> Any
+        # If there are more documents available?
+        return self.composer.check_node()
+
+    def get_data(self):
+        # type: () -> Any
+        # Construct and return the next document.
+        if self.composer.check_node():
+            return self.construct_document(self.composer.get_node())
+
+    def get_single_data(self):
+        # type: () -> Any
+        # Ensure that the stream contains a single document and construct it.
+        node = self.composer.get_single_node()
+        if node is not None:
+            return self.construct_document(node)
+        return None
+
+    def construct_document(self, node):
+        # type: (Any) -> Any
+        data = self.construct_object(node)
+        while bool(self.state_generators):
+            state_generators = self.state_generators
+            self.state_generators = []
+            for generator in state_generators:
+                for _dummy in generator:
+                    pass
+        self.constructed_objects = {}
+        self.recursive_objects = {}
+        self.deep_construct = False
+        return data
+
+    def construct_object(self, node, deep=False):
+        # type: (Any, bool) -> Any
+        """deep is True when creating an object/mapping recursively,
+        in that case want the underlying elements available during construction
+        """
+        if node in self.constructed_objects:
+            return self.constructed_objects[node]
+        if deep:
+            old_deep = self.deep_construct
+            self.deep_construct = True
+        if node in self.recursive_objects:
+            return self.recursive_objects[node]
+            # raise ConstructorError(
+            #     None, None, 'found unconstructable recursive node', node.start_mark
+            # )
+        self.recursive_objects[node] = None
+        data = self.construct_non_recursive_object(node)
+
+        self.constructed_objects[node] = data
+        del self.recursive_objects[node]
+        if deep:
+            self.deep_construct = old_deep
+        return data
+
+    def construct_non_recursive_object(self, node, tag=None):
+        # type: (Any, Optional[str]) -> Any
+        constructor = None  # type: Any
+        tag_suffix = None
+        if tag is None:
+            tag = node.tag
+        if tag in self.yaml_constructors:
+            constructor = self.yaml_constructors[tag]
+        else:
+            for tag_prefix in self.yaml_multi_constructors:
+                if tag.startswith(tag_prefix):
+                    tag_suffix = tag[len(tag_prefix) :]
+                    constructor = self.yaml_multi_constructors[tag_prefix]
+                    break
+            else:
+                if None in self.yaml_multi_constructors:
+                    tag_suffix = tag
+                    constructor = self.yaml_multi_constructors[None]
+                elif None in self.yaml_constructors:
+                    constructor = self.yaml_constructors[None]
+                elif isinstance(node, ScalarNode):
+                    constructor = self.__class__.construct_scalar
+                elif isinstance(node, SequenceNode):
+                    constructor = self.__class__.construct_sequence
+                elif isinstance(node, MappingNode):
+                    constructor = self.__class__.construct_mapping
+        if tag_suffix is None:
+            data = constructor(self, node)
+        else:
+            data = constructor(self, tag_suffix, node)
+        if isinstance(data, types.GeneratorType):
+            generator = data
+            data = next(generator)
+            if self.deep_construct:
+                for _dummy in generator:
+                    pass
+            else:
+                self.state_generators.append(generator)
+        return data
+
+    def construct_scalar(self, node):
+        # type: (Any) -> Any
+        if not isinstance(node, ScalarNode):
+            raise ConstructorError(
+                None,
+                None,
+                "expected a scalar node, but found %s" % node.id,
+                node.start_mark,
+            )
+        return node.value
+
+    def construct_sequence(self, node, deep=False):
+        # type: (Any, bool) -> Any
+        """deep is True when creating an object/mapping recursively,
+        in that case want the underlying elements available during construction
+        """
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                None,
+                None,
+                "expected a sequence node, but found %s" % node.id,
+                node.start_mark,
+            )
+        return [self.construct_object(child, deep=deep) for child in node.value]
+
+    def construct_mapping(self, node, deep=False):
+        # type: (Any, bool) -> Any
+        """deep is True when creating an object/mapping recursively,
+        in that case want the underlying elements available during construction
+        """
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(
+                None,
+                None,
+                "expected a mapping node, but found %s" % node.id,
+                node.start_mark,
+            )
+        total_mapping = self.yaml_base_dict_type()
+        if getattr(node, "merge", None) is not None:
+            todo = [(node.merge, False), (node.value, False)]
+        else:
+            todo = [(node.value, True)]
+        for values, check in todo:
+            mapping = self.yaml_base_dict_type()  # type: Dict[Any, Any]
+            for key_node, value_node in values:
+                # keys can be list -> deep
+                key = self.construct_object(key_node, deep=True)
+                # lists are not hashable, but tuples are
+                if not isinstance(key, Hashable):
+                    if isinstance(key, list):
+                        key = tuple(key)
+                if PY2:
+                    try:
+                        hash(key)
+                    except TypeError as exc:
+                        raise ConstructorError(
+                            "while constructing a mapping",
+                            node.start_mark,
+                            "found unacceptable key (%s)" % exc,
+                            key_node.start_mark,
+                        )
+                else:
+                    if not isinstance(key, Hashable):
+                        raise ConstructorError(
+                            "while constructing a mapping",
+                            node.start_mark,
+                            "found unhashable key",
+                            key_node.start_mark,
+                        )
+
+                value = self.construct_object(value_node, deep=deep)
+                if check:
+                    if self.check_mapping_key(node, key_node, mapping, key, value):
+                        mapping[key] = value
+                else:
+                    mapping[key] = value
+            total_mapping.update(mapping)
+        return total_mapping
+
+    def check_mapping_key(self, node, key_node, mapping, key, value):
+        # type: (Any, Any, Any, Any, Any) -> bool
+        """return True if key is unique"""
+        if key in mapping:
+            if not self.allow_duplicate_keys:
+                mk = mapping.get(key)
+                if PY2:
+                    if isinstance(key, unicode):
+                        key = key.encode("utf-8")
+                    if isinstance(value, unicode):
+                        value = value.encode("utf-8")
+                    if isinstance(mk, unicode):
+                        mk = mk.encode("utf-8")
+                args = [
+                    "while constructing a mapping",
+                    node.start_mark,
+                    'found duplicate key "{}" with value "{}" '
+                    '(original value: "{}")'.format(key, value, mk),
+                    key_node.start_mark,
+                    """
+                    To suppress this check see:
+                        http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+                    """,
+                    """\
+                    Duplicate keys will become an error in future releases, and are errors
+                    by default when using the new API.
+                    """,
+                ]
+                if self.allow_duplicate_keys is None:
+                    warnings.warn(DuplicateKeyFutureWarning(*args))
+                else:
+                    raise DuplicateKeyError(*args)
+            return False
+        return True
+
+    def check_set_key(self, node, key_node, setting, key):
+        # type: (Any, Any, Any, Any, Any) -> None
+        if key in setting:
+            if not self.allow_duplicate_keys:
+                if PY2:
+                    if isinstance(key, unicode):
+                        key = key.encode("utf-8")
+                args = [
+                    "while constructing a set",
+                    node.start_mark,
+                    'found duplicate key "{}"'.format(key),
+                    key_node.start_mark,
+                    """
+                    To suppress this check see:
+                        http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+                    """,
+                    """\
+                    Duplicate keys will become an error in future releases, and are errors
+                    by default when using the new API.
+                    """,
+                ]
+                if self.allow_duplicate_keys is None:
+                    warnings.warn(DuplicateKeyFutureWarning(*args))
+                else:
+                    raise DuplicateKeyError(*args)
+
+    def construct_pairs(self, node, deep=False):
+        # type: (Any, bool) -> Any
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(
+                None,
+                None,
+                "expected a mapping node, but found %s" % node.id,
+                node.start_mark,
+            )
+        pairs = []
+        for key_node, value_node in node.value:
+            key = self.construct_object(key_node, deep=deep)
+            value = self.construct_object(value_node, deep=deep)
+            pairs.append((key, value))
+        return pairs
+
+    @classmethod
+    def add_constructor(cls, tag, constructor):
+        # type: (Any, Any) -> None
+        if "yaml_constructors" not in cls.__dict__:
+            cls.yaml_constructors = cls.yaml_constructors.copy()
+        cls.yaml_constructors[tag] = constructor
+
+    @classmethod
+    def add_multi_constructor(cls, tag_prefix, multi_constructor):
+        # type: (Any, Any) -> None
+        if "yaml_multi_constructors" not in cls.__dict__:
+            cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+        cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+
+class SafeConstructor(BaseConstructor):
+    def construct_scalar(self, node):
+        # type: (Any) -> Any
+        if isinstance(node, MappingNode):
+            for key_node, value_node in node.value:
+                if key_node.tag == u"tag:yaml.org,2002:value":
+                    return self.construct_scalar(value_node)
+        return BaseConstructor.construct_scalar(self, node)
+
+    def flatten_mapping(self, node):
+        # type: (Any) -> Any
+        """
+        This implements the merge key feature http://yaml.org/type/merge.html
+        by inserting keys from the merge dict/list of dicts if not yet
+        available in this node
+        """
+        merge = []  # type: List[Any]
+        index = 0
+        while index < len(node.value):
+            key_node, value_node = node.value[index]
+            if key_node.tag == u"tag:yaml.org,2002:merge":
+                if merge:  # double << key
+                    if self.allow_duplicate_keys:
+                        del node.value[index]
+                        index += 1
+                        continue
+                    args = [
+                        "while constructing a mapping",
+                        node.start_mark,
+                        'found duplicate key "{}"'.format(key_node.value),
+                        key_node.start_mark,
+                        """
+                        To suppress this check see:
+                           http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+                        """,
+                        """\
+                        Duplicate keys will become an error in future releases, and are errors
+                        by default when using the new API.
+                        """,
+                    ]
+                    if self.allow_duplicate_keys is None:
+                        warnings.warn(DuplicateKeyFutureWarning(*args))
+                    else:
+                        raise DuplicateKeyError(*args)
+                del node.value[index]
+                if isinstance(value_node, MappingNode):
+                    self.flatten_mapping(value_node)
+                    merge.extend(value_node.value)
+                elif isinstance(value_node, SequenceNode):
+                    submerge = []
+                    for subnode in value_node.value:
+                        if not isinstance(subnode, MappingNode):
+                            raise ConstructorError(
+                                "while constructing a mapping",
+                                node.start_mark,
+                                "expected a mapping for merging, but found %s"
+                                % subnode.id,
+                                subnode.start_mark,
+                            )
+                        self.flatten_mapping(subnode)
+                        submerge.append(subnode.value)
+                    submerge.reverse()
+                    for value in submerge:
+                        merge.extend(value)
+                else:
+                    raise ConstructorError(
+                        "while constructing a mapping",
+                        node.start_mark,
+                        "expected a mapping or list of mappings for merging, "
+                        "but found %s" % value_node.id,
+                        value_node.start_mark,
+                    )
+            elif key_node.tag == u"tag:yaml.org,2002:value":
+                key_node.tag = u"tag:yaml.org,2002:str"
+                index += 1
+            else:
+                index += 1
+        if bool(merge):
+            node.merge = (
+                merge  # separate merge keys to be able to update without duplicate
+            )
+            node.value = merge + node.value
+
+    def construct_mapping(self, node, deep=False):
+        # type: (Any, bool) -> Any
+        """deep is True when creating an object/mapping recursively,
+        in that case want the underlying elements available during construction
+        """
+        if isinstance(node, MappingNode):
+            self.flatten_mapping(node)
+        return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+    def construct_yaml_null(self, node):
+        # type: (Any) -> Any
+        self.construct_scalar(node)
+        return None
+
+    # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does
+    bool_values = {
+        u"yes": True,
+        u"no": False,
+        u"y": True,
+        u"n": False,
+        u"true": True,
+        u"false": False,
+        u"on": True,
+        u"off": False,
+    }
+
+    def construct_yaml_bool(self, node):
+        # type: (Any) -> bool
+        value = self.construct_scalar(node)
+        return self.bool_values[value.lower()]
+
+    def construct_yaml_int(self, node):
+        # type: (Any) -> int
+        value_s = to_str(self.construct_scalar(node))
+        value_s = value_s.replace("_", "")
+        sign = +1
+        if value_s[0] == "-":
+            sign = -1
+        if value_s[0] in "+-":
+            value_s = value_s[1:]
+        if value_s == "0":
+            return 0
+        elif value_s.startswith("0b"):
+            return sign * int(value_s[2:], 2)
+        elif value_s.startswith("0x"):
+            return sign * int(value_s[2:], 16)
+        elif value_s.startswith("0o"):
+            return sign * int(value_s[2:], 8)
+        elif self.resolver.processing_version == (1, 1) and value_s[0] == "0":
+            return sign * int(value_s, 8)
+        elif self.resolver.processing_version == (1, 1) and ":" in value_s:
+            digits = [int(part) for part in value_s.split(":")]
+            digits.reverse()
+            base = 1
+            value = 0
+            for digit in digits:
+                value += digit * base
+                base *= 60
+            return sign * value
+        else:
+            return sign * int(value_s)
+
+    inf_value = 1e300
+    while inf_value != inf_value * inf_value:
+        inf_value *= inf_value
+    nan_value = -inf_value / inf_value  # Trying to make a quiet NaN (like C99).
+
+    def construct_yaml_float(self, node):
+        # type: (Any) -> float
+        value_so = to_str(self.construct_scalar(node))
+        value_s = value_so.replace("_", "").lower()
+        sign = +1
+        if value_s[0] == "-":
+            sign = -1
+        if value_s[0] in "+-":
+            value_s = value_s[1:]
+        if value_s == ".inf":
+            return sign * self.inf_value
+        elif value_s == ".nan":
+            return self.nan_value
+        elif self.resolver.processing_version != (1, 2) and ":" in value_s:
+            digits = [float(part) for part in value_s.split(":")]
+            digits.reverse()
+            base = 1
+            value = 0.0
+            for digit in digits:
+                value += digit * base
+                base *= 60
+            return sign * value
+        else:
+            if self.resolver.processing_version != (1, 2) and "e" in value_s:
+                # value_s is lower case independent of input
+                mantissa, exponent = value_s.split("e")
+                if "." not in mantissa:
+                    warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so))
+            return sign * float(value_s)
+
+    if PY3:
+
+        def construct_yaml_binary(self, node):
+            # type: (Any) -> Any
+            try:
+                value = self.construct_scalar(node).encode("ascii")
+            except UnicodeEncodeError as exc:
+                raise ConstructorError(
+                    None,
+                    None,
+                    "failed to convert base64 data into ascii: %s" % exc,
+                    node.start_mark,
+                )
+            try:
+                if hasattr(base64, "decodebytes"):
+                    return base64.decodebytes(value)
+                else:
+                    return base64.decodestring(value)
+            except binascii.Error as exc:
+                raise ConstructorError(
+                    None,
+                    None,
+                    "failed to decode base64 data: %s" % exc,
+                    node.start_mark,
+                )
+
+    else:
+
+        def construct_yaml_binary(self, node):
+            # type: (Any) -> Any
+            value = self.construct_scalar(node)
+            try:
+                return to_str(value).decode("base64")
+            except (binascii.Error, UnicodeEncodeError) as exc:
+                raise ConstructorError(
+                    None,
+                    None,
+                    "failed to decode base64 data: %s" % exc,
+                    node.start_mark,
+                )
+
+    timestamp_regexp = RegExp(
+        u"""^(?P<year>[0-9][0-9][0-9][0-9])
+          -(?P<month>[0-9][0-9]?)
+          -(?P<day>[0-9][0-9]?)
+          (?:((?P<t>[Tt])|[ \\t]+)   # explictly not retaining extra spaces
+          (?P<hour>[0-9][0-9]?)
+          :(?P<minute>[0-9][0-9])
+          :(?P<second>[0-9][0-9])
+          (?:\\.(?P<fraction>[0-9]*))?
+          (?:[ \\t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+          (?::(?P<tz_minute>[0-9][0-9]))?))?)?$""",
+        re.X,
+    )
+
+    def construct_yaml_timestamp(self, node, values=None):
+        # type: (Any, Any) -> Any
+        if values is None:
+            try:
+                match = self.timestamp_regexp.match(node.value)
+            except TypeError:
+                match = None
+            if match is None:
+                raise ConstructorError(
+                    None,
+                    None,
+                    'failed to construct timestamp from "{}"'.format(node.value),
+                    node.start_mark,
+                )
+            values = match.groupdict()
+        year = int(values["year"])
+        month = int(values["month"])
+        day = int(values["day"])
+        if not values["hour"]:
+            return datetime.date(year, month, day)
+        hour = int(values["hour"])
+        minute = int(values["minute"])
+        second = int(values["second"])
+        fraction = 0
+        if values["fraction"]:
+            fraction_s = values["fraction"][:6]
+            while len(fraction_s) < 6:
+                fraction_s += "0"
+            fraction = int(fraction_s)
+            if len(values["fraction"]) > 6 and int(values["fraction"][6]) > 4:
+                fraction += 1
+        delta = None
+        if values["tz_sign"]:
+            tz_hour = int(values["tz_hour"])
+            minutes = values["tz_minute"]
+            tz_minute = int(minutes) if minutes else 0
+            delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+            if values["tz_sign"] == "-":
+                delta = -delta
+        # should do something else instead (or hook this up to the preceding if statement
+        # in reverse
+        #  if delta is None:
+        #      return datetime.datetime(year, month, day, hour, minute, second, fraction)
+        #  return datetime.datetime(year, month, day, hour, minute, second, fraction,
+        #                           datetime.timezone.utc)
+        # the above is not good enough though, should provide tzinfo. In Python3 that is easily
+        # doable drop that kind of support for Python2 as it has not native tzinfo
+        data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+        if delta:
+            data -= delta
+        return data
+
+    def construct_yaml_omap(self, node):
+        # type: (Any) -> Any
+        # Note: we do now check for duplicate keys
+        omap = ordereddict()
+        yield omap
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                "while constructing an ordered map",
+                node.start_mark,
+                "expected a sequence, but found %s" % node.id,
+                node.start_mark,
+            )
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError(
+                    "while constructing an ordered map",
+                    node.start_mark,
+                    "expected a mapping of length 1, but found %s" % subnode.id,
+                    subnode.start_mark,
+                )
+            if len(subnode.value) != 1:
+                raise ConstructorError(
+                    "while constructing an ordered map",
+                    node.start_mark,
+                    "expected a single mapping item, but found %d items"
+                    % len(subnode.value),
+                    subnode.start_mark,
+                )
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            assert key not in omap
+            value = self.construct_object(value_node)
+            omap[key] = value
+
+    def construct_yaml_pairs(self, node):
+        # type: (Any) -> Any
+        # Note: the same code as `construct_yaml_omap`.
+        pairs = []  # type: List[Any]
+        yield pairs
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                "while constructing pairs",
+                node.start_mark,
+                "expected a sequence, but found %s" % node.id,
+                node.start_mark,
+            )
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError(
+                    "while constructing pairs",
+                    node.start_mark,
+                    "expected a mapping of length 1, but found %s" % subnode.id,
+                    subnode.start_mark,
+                )
+            if len(subnode.value) != 1:
+                raise ConstructorError(
+                    "while constructing pairs",
+                    node.start_mark,
+                    "expected a single mapping item, but found %d items"
+                    % len(subnode.value),
+                    subnode.start_mark,
+                )
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            value = self.construct_object(value_node)
+            pairs.append((key, value))
+
+    def construct_yaml_set(self, node):
+        # type: (Any) -> Any
+        data = set()  # type: Set[Any]
+        yield data
+        value = self.construct_mapping(node)
+        data.update(value)
+
+    def construct_yaml_str(self, node):
+        # type: (Any) -> Any
+        value = self.construct_scalar(node)
+        if PY3:
+            return value
+        try:
+            return value.encode("ascii")
+        except UnicodeEncodeError:
+            return value
+
+    def construct_yaml_seq(self, node):
+        # type: (Any) -> Any
+        data = self.yaml_base_list_type()  # type: List[Any]
+        yield data
+        data.extend(self.construct_sequence(node))
+
+    def construct_yaml_map(self, node):
+        # type: (Any) -> Any
+        data = self.yaml_base_dict_type()  # type: Dict[Any, Any]
+        yield data
+        value = self.construct_mapping(node)
+        data.update(value)
+
+    def construct_yaml_object(self, node, cls):
+        # type: (Any, Any) -> Any
+        data = cls.__new__(cls)
+        yield data
+        if hasattr(data, "__setstate__"):
+            state = self.construct_mapping(node, deep=True)
+            data.__setstate__(state)
+        else:
+            state = self.construct_mapping(node)
+            data.__dict__.update(state)
+
+    def construct_undefined(self, node):
+        # type: (Any) -> None
+        raise ConstructorError(
+            None,
+            None,
+            "could not determine a constructor for the tag %r" % utf8(node.tag),
+            node.start_mark,
+        )
+
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:null", SafeConstructor.construct_yaml_null
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:bool", SafeConstructor.construct_yaml_bool
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:int", SafeConstructor.construct_yaml_int
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:float", SafeConstructor.construct_yaml_float
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:binary", SafeConstructor.construct_yaml_binary
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:timestamp", SafeConstructor.construct_yaml_timestamp
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:omap", SafeConstructor.construct_yaml_omap
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:pairs", SafeConstructor.construct_yaml_pairs
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:set", SafeConstructor.construct_yaml_set
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:str", SafeConstructor.construct_yaml_str
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:seq", SafeConstructor.construct_yaml_seq
+)
+
+SafeConstructor.add_constructor(
+    u"tag:yaml.org,2002:map", SafeConstructor.construct_yaml_map
+)
+
+SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined)
+
+if PY2:
+
+    class classobj:
+        pass
+
+
+class Constructor(SafeConstructor):
+    def construct_python_str(self, node):
+        # type: (Any) -> Any
+        return utf8(self.construct_scalar(node))
+
+    def construct_python_unicode(self, node):
+        # type: (Any) -> Any
+        return self.construct_scalar(node)
+
+    if PY3:
+
+        def construct_python_bytes(self, node):
+            # type: (Any) -> Any
+            try:
+                value = self.construct_scalar(node).encode("ascii")
+            except UnicodeEncodeError as exc:
+                raise ConstructorError(
+                    None,
+                    None,
+                    "failed to convert base64 data into ascii: %s" % exc,
+                    node.start_mark,
+                )
+            try:
+                if hasattr(base64, "decodebytes"):
+                    return base64.decodebytes(value)
+                else:
+                    return base64.decodestring(value)
+            except binascii.Error as exc:
+                raise ConstructorError(
+                    None,
+                    None,
+                    "failed to decode base64 data: %s" % exc,
+                    node.start_mark,
+                )
+
+    def construct_python_long(self, node):
+        # type: (Any) -> int
+        val = self.construct_yaml_int(node)
+        if PY3:
+            return val
+        return int(val)
+
+    def construct_python_complex(self, node):
+        # type: (Any) -> Any
+        return complex(self.construct_scalar(node))
+
+    def construct_python_tuple(self, node):
+        # type: (Any) -> Any
+        return tuple(self.construct_sequence(node))
+
+    def find_python_module(self, name, mark):
+        # type: (Any, Any) -> Any
+        if not name:
+            raise ConstructorError(
+                "while constructing a Python module",
+                mark,
+                "expected non-empty name appended to the tag",
+                mark,
+            )
+        try:
+            __import__(name)
+        except ImportError as exc:
+            raise ConstructorError(
+                "while constructing a Python module",
+                mark,
+                "cannot find module %r (%s)" % (utf8(name), exc),
+                mark,
+            )
+        return sys.modules[name]
+
+    def find_python_name(self, name, mark):
+        # type: (Any, Any) -> Any
+        if not name:
+            raise ConstructorError(
+                "while constructing a Python object",
+                mark,
+                "expected non-empty name appended to the tag",
+                mark,
+            )
+        if u"." in name:
+            lname = name.split(".")
+            lmodule_name = lname
+            lobject_name = []  # type: List[Any]
+            while len(lmodule_name) > 1:
+                lobject_name.insert(0, lmodule_name.pop())
+                module_name = ".".join(lmodule_name)
+                try:
+                    __import__(module_name)
+                    # object_name = '.'.join(object_name)
+                    break
+                except ImportError:
+                    continue
+        else:
+            module_name = builtins_module
+            lobject_name = [name]
+        try:
+            __import__(module_name)
+        except ImportError as exc:
+            raise ConstructorError(
+                "while constructing a Python object",
+                mark,
+                "cannot find module %r (%s)" % (utf8(module_name), exc),
+                mark,
+            )
+        module = sys.modules[module_name]
+        object_name = ".".join(lobject_name)
+        obj = module
+        while lobject_name:
+            if not hasattr(obj, lobject_name[0]):
+
+                raise ConstructorError(
+                    "while constructing a Python object",
+                    mark,
+                    "cannot find %r in the module %r"
+                    % (utf8(object_name), module.__name__),
+                    mark,
+                )
+            obj = getattr(obj, lobject_name.pop(0))
+        return obj
+
+    def construct_python_name(self, suffix, node):
+        # type: (Any, Any) -> Any
+        value = self.construct_scalar(node)
+        if value:
+            raise ConstructorError(
+                "while constructing a Python name",
+                node.start_mark,
+                "expected the empty value, but found %r" % utf8(value),
+                node.start_mark,
+            )
+        return self.find_python_name(suffix, node.start_mark)
+
+    def construct_python_module(self, suffix, node):
+        # type: (Any, Any) -> Any
+        value = self.construct_scalar(node)
+        if value:
+            raise ConstructorError(
+                "while constructing a Python module",
+                node.start_mark,
+                "expected the empty value, but found %r" % utf8(value),
+                node.start_mark,
+            )
+        return self.find_python_module(suffix, node.start_mark)
+
+    def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
+        # type: (Any, Any, Any, Any, bool) -> Any
+        if not args:
+            args = []
+        if not kwds:
+            kwds = {}
+        cls = self.find_python_name(suffix, node.start_mark)
+        if PY3:
+            if newobj and isinstance(cls, type):
+                return cls.__new__(cls, *args, **kwds)
+            else:
+                return cls(*args, **kwds)
+        else:
+            if newobj and isinstance(cls, type(classobj)) and not args and not kwds:
+                instance = classobj()
+                instance.__class__ = cls
+                return instance
+            elif newobj and isinstance(cls, type):
+                return cls.__new__(cls, *args, **kwds)
+            else:
+                return cls(*args, **kwds)
+
+    def set_python_instance_state(self, instance, state):
+        # type: (Any, Any) -> None
+        if hasattr(instance, "__setstate__"):
+            instance.__setstate__(state)
+        else:
+            slotstate = {}  # type: Dict[Any, Any]
+            if isinstance(state, tuple) and len(state) == 2:
+                state, slotstate = state
+            if hasattr(instance, "__dict__"):
+                instance.__dict__.update(state)
+            elif state:
+                slotstate.update(state)
+            for key, value in slotstate.items():
+                setattr(instance, key, value)
+
+    def construct_python_object(self, suffix, node):
+        # type: (Any, Any) -> Any
+        # Format:
+        #   !!python/object:module.name { ... state ... }
+        instance = self.make_python_instance(suffix, node, newobj=True)
+        self.recursive_objects[node] = instance
+        yield instance
+        deep = hasattr(instance, "__setstate__")
+        state = self.construct_mapping(node, deep=deep)
+        self.set_python_instance_state(instance, state)
+
+    def construct_python_object_apply(self, suffix, node, newobj=False):
+        # type: (Any, Any, bool) -> Any
+        # Format:
+        #   !!python/object/apply       # (or !!python/object/new)
+        #   args: [ ... arguments ... ]
+        #   kwds: { ... keywords ... }
+        #   state: ... state ...
+        #   listitems: [ ... listitems ... ]
+        #   dictitems: { ... dictitems ... }
+        # or short format:
+        #   !!python/object/apply [ ... arguments ... ]
+        # The difference between !!python/object/apply and !!python/object/new
+        # is how an object is created, check make_python_instance for details.
+        if isinstance(node, SequenceNode):
+            args = self.construct_sequence(node, deep=True)
+            kwds = {}  # type: Dict[Any, Any]
+            state = {}  # type: Dict[Any, Any]
+            listitems = []  # type: List[Any]
+            dictitems = {}  # type: Dict[Any, Any]
+        else:
+            value = self.construct_mapping(node, deep=True)
+            args = value.get("args", [])
+            kwds = value.get("kwds", {})
+            state = value.get("state", {})
+            listitems = value.get("listitems", [])
+            dictitems = value.get("dictitems", {})
+        instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+        if bool(state):
+            self.set_python_instance_state(instance, state)
+        if bool(listitems):
+            instance.extend(listitems)
+        if bool(dictitems):
+            for key in dictitems:
+                instance[key] = dictitems[key]
+        return instance
+
+    def construct_python_object_new(self, suffix, node):
+        # type: (Any, Any) -> Any
+        return self.construct_python_object_apply(suffix, node, newobj=True)
+
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/none", Constructor.construct_yaml_null
+)
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/bool", Constructor.construct_yaml_bool
+)
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/str", Constructor.construct_python_str
+)
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/unicode", Constructor.construct_python_unicode
+)
+
+if PY3:
+    Constructor.add_constructor(
+        u"tag:yaml.org,2002:python/bytes", Constructor.construct_python_bytes
+    )
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/int", Constructor.construct_yaml_int
+)
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/long", Constructor.construct_python_long
+)
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/float", Constructor.construct_yaml_float
+)
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/complex", Constructor.construct_python_complex
+)
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/list", Constructor.construct_yaml_seq
+)
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/tuple", Constructor.construct_python_tuple
+)
+
+Constructor.add_constructor(
+    u"tag:yaml.org,2002:python/dict", Constructor.construct_yaml_map
+)
+
+Constructor.add_multi_constructor(
+    u"tag:yaml.org,2002:python/name:", Constructor.construct_python_name
+)
+
+Constructor.add_multi_constructor(
+    u"tag:yaml.org,2002:python/module:", Constructor.construct_python_module
+)
+
+Constructor.add_multi_constructor(
+    u"tag:yaml.org,2002:python/object:", Constructor.construct_python_object
+)
+
+Constructor.add_multi_constructor(
+    u"tag:yaml.org,2002:python/object/apply:", Constructor.construct_python_object_apply
+)
+
+Constructor.add_multi_constructor(
+    u"tag:yaml.org,2002:python/object/new:", Constructor.construct_python_object_new
+)
+
+
+class RoundTripConstructor(SafeConstructor):
+    """need to store the comments on the node itself,
+    as well as on the items
+    """
+
+    def construct_scalar(self, node):
+        # type: (Any) -> Any
+        if not isinstance(node, ScalarNode):
+            raise ConstructorError(
+                None,
+                None,
+                "expected a scalar node, but found %s" % node.id,
+                node.start_mark,
+            )
+
+        if node.style == "|" and isinstance(node.value, text_type):
+            lss = LiteralScalarString(node.value, anchor=node.anchor)
+            if node.comment and node.comment[1]:
+                lss.comment = node.comment[1][0]  # type: ignore
+            return lss
+        if node.style == ">" and isinstance(node.value, text_type):
+            fold_positions = []  # type: List[int]
+            idx = -1
+            while True:
+                idx = node.value.find("\a", idx + 1)
+                if idx < 0:
+                    break
+                fold_positions.append(idx - len(fold_positions))
+            fss = FoldedScalarString(node.value.replace("\a", ""), anchor=node.anchor)
+            if node.comment and node.comment[1]:
+                fss.comment = node.comment[1][0]  # type: ignore
+            if fold_positions:
+                fss.fold_pos = fold_positions  # type: ignore
+            return fss
+        elif bool(self._preserve_quotes) and isinstance(node.value, text_type):
+            if node.style == "'":
+                return SingleQuotedScalarString(node.value, anchor=node.anchor)
+            if node.style == '"':
+                return DoubleQuotedScalarString(node.value, anchor=node.anchor)
+        if node.anchor:
+            return PlainScalarString(node.value, anchor=node.anchor)
+        return node.value
+
+    def construct_yaml_int(self, node):
+        # type: (Any) -> Any
+        width = None  # type: Any
+        value_su = to_str(self.construct_scalar(node))
+        try:
+            sx = value_su.rstrip("_")
+            underscore = [len(sx) - sx.rindex("_") - 1, False, False]  # type: Any
+        except ValueError:
+            underscore = None
+        except IndexError:
+            underscore = None
+        value_s = value_su.replace("_", "")
+        sign = +1
+        if value_s[0] == "-":
+            sign = -1
+        if value_s[0] in "+-":
+            value_s = value_s[1:]
+        if value_s == "0":
+            return 0
+        elif value_s.startswith("0b"):
+            if self.resolver.processing_version > (1, 1) and value_s[2] == "0":
+                width = len(value_s[2:])
+            if underscore is not None:
+                underscore[1] = value_su[2] == "_"
+                underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == "_"
+            return BinaryInt(
+                sign * int(value_s[2:], 2),
+                width=width,
+                underscore=underscore,
+                anchor=node.anchor,
+            )
+        elif value_s.startswith("0x"):
+            # default to lower-case if no a-fA-F in string
+            if self.resolver.processing_version > (1, 1) and value_s[2] == "0":
+                width = len(value_s[2:])
+            hex_fun = HexInt  # type: Any
+            for ch in value_s[2:]:
+                if ch in "ABCDEF":  # first non-digit is capital
+                    hex_fun = HexCapsInt
+                    break
+                if ch in "abcdef":
+                    break
+            if underscore is not None:
+                underscore[1] = value_su[2] == "_"
+                underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == "_"
+            return hex_fun(
+                sign * int(value_s[2:], 16),
+                width=width,
+                underscore=underscore,
+                anchor=node.anchor,
+            )
+        elif value_s.startswith("0o"):
+            if self.resolver.processing_version > (1, 1) and value_s[2] == "0":
+                width = len(value_s[2:])
+            if underscore is not None:
+                underscore[1] = value_su[2] == "_"
+                underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == "_"
+            return OctalInt(
+                sign * int(value_s[2:], 8),
+                width=width,
+                underscore=underscore,
+                anchor=node.anchor,
+            )
+        elif self.resolver.processing_version != (1, 2) and value_s[0] == "0":
+            return sign * int(value_s, 8)
+        elif self.resolver.processing_version != (1, 2) and ":" in value_s:
+            digits = [int(part) for part in value_s.split(":")]
+            digits.reverse()
+            base = 1
+            value = 0
+            for digit in digits:
+                value += digit * base
+                base *= 60
+            return sign * value
+        elif self.resolver.processing_version > (1, 1) and value_s[0] == "0":
+            # not an octal, an integer with leading zero(s)
+            if underscore is not None:
+                # cannot have a leading underscore
+                underscore[2] = len(value_su) > 1 and value_su[-1] == "_"
+            return ScalarInt(
+                sign * int(value_s), width=len(value_s), underscore=underscore
+            )
+        elif underscore:
+            # cannot have a leading underscore
+            underscore[2] = len(value_su) > 1 and value_su[-1] == "_"
+            return ScalarInt(
+                sign * int(value_s),
+                width=None,
+                underscore=underscore,
+                anchor=node.anchor,
+            )
+        elif node.anchor:
+            return ScalarInt(sign * int(value_s), width=None, anchor=node.anchor)
+        else:
+            return sign * int(value_s)
+
+    def construct_yaml_float(self, node):
+        # type: (Any) -> Any
+        def leading_zeros(v):
+            # type: (Any) -> int
+            lead0 = 0
+            idx = 0
+            while idx < len(v) and v[idx] in "0.":
+                if v[idx] == "0":
+                    lead0 += 1
+                idx += 1
+            return lead0
+
+        # underscore = None
+        m_sign = False  # type: Any
+        value_so = to_str(self.construct_scalar(node))
+        value_s = value_so.replace("_", "").lower()
+        sign = +1
+        if value_s[0] == "-":
+            sign = -1
+        if value_s[0] in "+-":
+            m_sign = value_s[0]
+            value_s = value_s[1:]
+        if value_s == ".inf":
+            return sign * self.inf_value
+        if value_s == ".nan":
+            return self.nan_value
+        if self.resolver.processing_version != (1, 2) and ":" in value_s:
+            digits = [float(part) for part in value_s.split(":")]
+            digits.reverse()
+            base = 1
+            value = 0.0
+            for digit in digits:
+                value += digit * base
+                base *= 60
+            return sign * value
+        if "e" in value_s:
+            try:
+                mantissa, exponent = value_so.split("e")
+                exp = "e"
+            except ValueError:
+                mantissa, exponent = value_so.split("E")
+                exp = "E"
+            if self.resolver.processing_version != (1, 2):
+                # value_s is lower case independent of input
+                if "." not in mantissa:
+                    warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so))
+            lead0 = leading_zeros(mantissa)
+            width = len(mantissa)
+            prec = mantissa.find(".")
+            if m_sign:
+                width -= 1
+            e_width = len(exponent)
+            e_sign = exponent[0] in "+-"
+            # nprint('sf', width, prec, m_sign, exp, e_width, e_sign)
+            return ScalarFloat(
+                sign * float(value_s),
+                width=width,
+                prec=prec,
+                m_sign=m_sign,
+                m_lead0=lead0,
+                exp=exp,
+                e_width=e_width,
+                e_sign=e_sign,
+                anchor=node.anchor,
+            )
+        width = len(value_so)
+        prec = value_so.index(
+            "."
+        )  # you can use index, this would not be float without dot
+        lead0 = leading_zeros(value_so)
+        return ScalarFloat(
+            sign * float(value_s),
+            width=width,
+            prec=prec,
+            m_sign=m_sign,
+            m_lead0=lead0,
+            anchor=node.anchor,
+        )
+
+    def construct_yaml_str(self, node):
+        # type: (Any) -> Any
+        value = self.construct_scalar(node)
+        if isinstance(value, ScalarString):
+            return value
+        if PY3:
+            return value
+        try:
+            return value.encode("ascii")
+        except AttributeError:
+            # in case you replace the node dynamically e.g. with a dict
+            return value
+        except UnicodeEncodeError:
+            return value
+
+    def construct_rt_sequence(self, node, seqtyp, deep=False):
+        # type: (Any, Any, bool) -> Any
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                None,
+                None,
+                "expected a sequence node, but found %s" % node.id,
+                node.start_mark,
+            )
+        ret_val = []
+        if node.comment:
+            seqtyp._yaml_add_comment(node.comment[:2])
+            if len(node.comment) > 2:
+                seqtyp.yaml_end_comment_extend(node.comment[2], clear=True)
+        if node.anchor:
+            from strictyaml.ruamel.serializer import templated_id
+
+            if not templated_id(node.anchor):
+                seqtyp.yaml_set_anchor(node.anchor)
+        for idx, child in enumerate(node.value):
+            if child.comment:
+                seqtyp._yaml_add_comment(child.comment, key=idx)
+                child.comment = None  # if moved to sequence remove from child
+            ret_val.append(self.construct_object(child, deep=deep))
+            seqtyp._yaml_set_idx_line_col(
+                idx, [child.start_mark.line, child.start_mark.column]
+            )
+        return ret_val
+
+    def flatten_mapping(self, node):
+        # type: (Any) -> Any
+        """
+        This implements the merge key feature http://yaml.org/type/merge.html
+        by inserting keys from the merge dict/list of dicts if not yet
+        available in this node
+        """
+
+        def constructed(value_node):
+            # type: (Any) -> Any
+            # If the contents of a merge are defined within the
+            # merge marker, then they won't have been constructed
+            # yet. But if they were already constructed, we need to use
+            # the existing object.
+            if value_node in self.constructed_objects:
+                value = self.constructed_objects[value_node]
+            else:
+                value = self.construct_object(value_node, deep=False)
+            return value
+
+        # merge = []
+        merge_map_list = []  # type: List[Any]
+        index = 0
+        while index < len(node.value):
+            key_node, value_node = node.value[index]
+            if key_node.tag == u"tag:yaml.org,2002:merge":
+                if merge_map_list:  # double << key
+                    if self.allow_duplicate_keys:
+                        del node.value[index]
+                        index += 1
+                        continue
+                    args = [
+                        "while constructing a mapping",
+                        node.start_mark,
+                        'found duplicate key "{}"'.format(key_node.value),
+                        key_node.start_mark,
+                        """
+                        To suppress this check see:
+                           http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+                        """,
+                        """\
+                        Duplicate keys will become an error in future releases, and are errors
+                        by default when using the new API.
+                        """,
+                    ]
+                    if self.allow_duplicate_keys is None:
+                        warnings.warn(DuplicateKeyFutureWarning(*args))
+                    else:
+                        raise DuplicateKeyError(*args)
+                del node.value[index]
+                if isinstance(value_node, MappingNode):
+                    merge_map_list.append((index, constructed(value_node)))
+                    # self.flatten_mapping(value_node)
+                    # merge.extend(value_node.value)
+                elif isinstance(value_node, SequenceNode):
+                    # submerge = []
+                    for subnode in value_node.value:
+                        if not isinstance(subnode, MappingNode):
+                            raise ConstructorError(
+                                "while constructing a mapping",
+                                node.start_mark,
+                                "expected a mapping for merging, but found %s"
+                                % subnode.id,
+                                subnode.start_mark,
+                            )
+                        merge_map_list.append((index, constructed(subnode)))
+                    #     self.flatten_mapping(subnode)
+                    #     submerge.append(subnode.value)
+                    # submerge.reverse()
+                    # for value in submerge:
+                    #     merge.extend(value)
+                else:
+                    raise ConstructorError(
+                        "while constructing a mapping",
+                        node.start_mark,
+                        "expected a mapping or list of mappings for merging, "
+                        "but found %s" % value_node.id,
+                        value_node.start_mark,
+                    )
+            elif key_node.tag == u"tag:yaml.org,2002:value":
+                key_node.tag = u"tag:yaml.org,2002:str"
+                index += 1
+            else:
+                index += 1
+        return merge_map_list
+        # if merge:
+        #     node.value = merge + node.value
+
+    def _sentinel(self):
+        # type: () -> None
+        pass
+
+    def construct_mapping(self, node, maptyp, deep=False):  # type: ignore
+        # type: (Any, Any, bool) -> Any
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(
+                None,
+                None,
+                "expected a mapping node, but found %s" % node.id,
+                node.start_mark,
+            )
+        merge_map = self.flatten_mapping(node)
+        # mapping = {}
+        if node.comment:
+            maptyp._yaml_add_comment(node.comment[:2])
+            if len(node.comment) > 2:
+                maptyp.yaml_end_comment_extend(node.comment[2], clear=True)
+        if node.anchor:
+            from strictyaml.ruamel.serializer import templated_id
+
+            if not templated_id(node.anchor):
+                maptyp.yaml_set_anchor(node.anchor)
+        last_key, last_value = None, self._sentinel
+        for key_node, value_node in node.value:
+            # keys can be list -> deep
+            key = self.construct_object(key_node, deep=True)
+            # lists are not hashable, but tuples are
+            if not isinstance(key, Hashable):
+                if isinstance(key, MutableSequence):
+                    key_s = CommentedKeySeq(key)
+                    if key_node.flow_style is True:
+                        key_s.fa.set_flow_style()
+                    elif key_node.flow_style is False:
+                        key_s.fa.set_block_style()
+                    key = key_s
+                elif isinstance(key, MutableMapping):
+                    key_m = CommentedKeyMap(key)
+                    if key_node.flow_style is True:
+                        key_m.fa.set_flow_style()
+                    elif key_node.flow_style is False:
+                        key_m.fa.set_block_style()
+                    key = key_m
+            if PY2:
+                try:
+                    hash(key)
+                except TypeError as exc:
+                    raise ConstructorError(
+                        "while constructing a mapping",
+                        node.start_mark,
+                        "found unacceptable key (%s)" % exc,
+                        key_node.start_mark,
+                    )
+            else:
+                if not isinstance(key, Hashable):
+                    raise ConstructorError(
+                        "while constructing a mapping",
+                        node.start_mark,
+                        "found unhashable key",
+                        key_node.start_mark,
+                    )
+            value = self.construct_object(value_node, deep=deep)
+            if self.check_mapping_key(node, key_node, maptyp, key, value):
+                if (
+                    key_node.comment
+                    and len(key_node.comment) > 4
+                    and key_node.comment[4]
+                ):
+                    if last_value is None:
+                        key_node.comment[0] = key_node.comment.pop(4)
+                        maptyp._yaml_add_comment(key_node.comment, value=last_key)
+                    else:
+                        key_node.comment[2] = key_node.comment.pop(4)
+                        maptyp._yaml_add_comment(key_node.comment, key=key)
+                    key_node.comment = None
+                if key_node.comment:
+                    maptyp._yaml_add_comment(key_node.comment, key=key)
+                if value_node.comment:
+                    maptyp._yaml_add_comment(value_node.comment, value=key)
+                maptyp._yaml_set_kv_line_col(
+                    key,
+                    [
+                        key_node.start_mark.line,
+                        key_node.start_mark.column,
+                        value_node.start_mark.line,
+                        value_node.start_mark.column,
+                    ],
+                )
+                maptyp[key] = value
+                last_key, last_value = key, value  # could use indexing
+        # do this last, or <<: before a key will prevent insertion in instances
+        # of collections.OrderedDict (as they have no __contains__
+        if merge_map:
+            maptyp.add_yaml_merge(merge_map)
+
+    def construct_setting(self, node, typ, deep=False):
+        # type: (Any, Any, bool) -> Any
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(
+                None,
+                None,
+                "expected a mapping node, but found %s" % node.id,
+                node.start_mark,
+            )
+        if node.comment:
+            typ._yaml_add_comment(node.comment[:2])
+            if len(node.comment) > 2:
+                typ.yaml_end_comment_extend(node.comment[2], clear=True)
+        if node.anchor:
+            from strictyaml.ruamel.serializer import templated_id
+
+            if not templated_id(node.anchor):
+                typ.yaml_set_anchor(node.anchor)
+        for key_node, value_node in node.value:
+            # keys can be list -> deep
+            key = self.construct_object(key_node, deep=True)
+            # lists are not hashable, but tuples are
+            if not isinstance(key, Hashable):
+                if isinstance(key, list):
+                    key = tuple(key)
+            if PY2:
+                try:
+                    hash(key)
+                except TypeError as exc:
+                    raise ConstructorError(
+                        "while constructing a mapping",
+                        node.start_mark,
+                        "found unacceptable key (%s)" % exc,
+                        key_node.start_mark,
+                    )
+            else:
+                if not isinstance(key, Hashable):
+                    raise ConstructorError(
+                        "while constructing a mapping",
+                        node.start_mark,
+                        "found unhashable key",
+                        key_node.start_mark,
+                    )
+            # construct but should be null
+            value = self.construct_object(value_node, deep=deep)  # NOQA
+            self.check_set_key(node, key_node, typ, key)
+            if key_node.comment:
+                typ._yaml_add_comment(key_node.comment, key=key)
+            if value_node.comment:
+                typ._yaml_add_comment(value_node.comment, value=key)
+            typ.add(key)
+
+    def construct_yaml_seq(self, node):
+        # type: (Any) -> Any
+        data = CommentedSeq()
+        data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+        if node.comment:
+            data._yaml_add_comment(node.comment)
+        yield data
+        data.extend(self.construct_rt_sequence(node, data))
+        self.set_collection_style(data, node)
+
+    def construct_yaml_map(self, node):
+        # type: (Any) -> Any
+        data = CommentedMap()
+        data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+        yield data
+        self.construct_mapping(node, data, deep=True)
+        self.set_collection_style(data, node)
+
+    def set_collection_style(self, data, node):
+        # type: (Any, Any) -> None
+        if len(data) == 0:
+            return
+        if node.flow_style is True:
+            data.fa.set_flow_style()
+        elif node.flow_style is False:
+            data.fa.set_block_style()
+
+    def construct_yaml_object(self, node, cls):
+        # type: (Any, Any) -> Any
+        data = cls.__new__(cls)
+        yield data
+        if hasattr(data, "__setstate__"):
+            state = SafeConstructor.construct_mapping(self, node, deep=True)
+            data.__setstate__(state)
+        else:
+            state = SafeConstructor.construct_mapping(self, node)
+            data.__dict__.update(state)
+
+    def construct_yaml_omap(self, node):
+        # type: (Any) -> Any
+        # Note: we do now check for duplicate keys
+        omap = CommentedOrderedMap()
+        omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+        if node.flow_style is True:
+            omap.fa.set_flow_style()
+        elif node.flow_style is False:
+            omap.fa.set_block_style()
+        yield omap
+        if node.comment:
+            omap._yaml_add_comment(node.comment[:2])
+            if len(node.comment) > 2:
+                omap.yaml_end_comment_extend(node.comment[2], clear=True)
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                "while constructing an ordered map",
+                node.start_mark,
+                "expected a sequence, but found %s" % node.id,
+                node.start_mark,
+            )
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError(
+                    "while constructing an ordered map",
+                    node.start_mark,
+                    "expected a mapping of length 1, but found %s" % subnode.id,
+                    subnode.start_mark,
+                )
+            if len(subnode.value) != 1:
+                raise ConstructorError(
+                    "while constructing an ordered map",
+                    node.start_mark,
+                    "expected a single mapping item, but found %d items"
+                    % len(subnode.value),
+                    subnode.start_mark,
+                )
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            assert key not in omap
+            value = self.construct_object(value_node)
+            if key_node.comment:
+                omap._yaml_add_comment(key_node.comment, key=key)
+            if subnode.comment:
+                omap._yaml_add_comment(subnode.comment, key=key)
+            if value_node.comment:
+                omap._yaml_add_comment(value_node.comment, value=key)
+            omap[key] = value
+
+    def construct_yaml_set(self, node):
+        # type: (Any) -> Any
+        data = CommentedSet()
+        data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+        yield data
+        self.construct_setting(node, data)
+
+    def construct_undefined(self, node):
+        # type: (Any) -> Any
+        try:
+            if isinstance(node, MappingNode):
+                data = CommentedMap()
+                data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+                if node.flow_style is True:
+                    data.fa.set_flow_style()
+                elif node.flow_style is False:
+                    data.fa.set_block_style()
+                data.yaml_set_tag(node.tag)
+                yield data
+                if node.anchor:
+                    data.yaml_set_anchor(node.anchor)
+                self.construct_mapping(node, data)
+                return
+            elif isinstance(node, ScalarNode):
+                data2 = TaggedScalar()
+                data2.value = self.construct_scalar(node)
+                data2.style = node.style
+                data2.yaml_set_tag(node.tag)
+                yield data2
+                if node.anchor:
+                    data2.yaml_set_anchor(node.anchor, always_dump=True)
+                return
+            elif isinstance(node, SequenceNode):
+                data3 = CommentedSeq()
+                data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+                if node.flow_style is True:
+                    data3.fa.set_flow_style()
+                elif node.flow_style is False:
+                    data3.fa.set_block_style()
+                data3.yaml_set_tag(node.tag)
+                yield data3
+                if node.anchor:
+                    data3.yaml_set_anchor(node.anchor)
+                data3.extend(self.construct_sequence(node))
+                return
+        except:  # NOQA
+            pass
+        raise ConstructorError(
+            None,
+            None,
+            "could not determine a constructor for the tag %r" % utf8(node.tag),
+            node.start_mark,
+        )
+
+    def construct_yaml_timestamp(self, node, values=None):
+        # type: (Any, Any) -> Any
+        try:
+            match = self.timestamp_regexp.match(node.value)
+        except TypeError:
+            match = None
+        if match is None:
+            raise ConstructorError(
+                None,
+                None,
+                'failed to construct timestamp from "{}"'.format(node.value),
+                node.start_mark,
+            )
+        values = match.groupdict()
+        if not values["hour"]:
+            return SafeConstructor.construct_yaml_timestamp(self, node, values)
+        for part in ["t", "tz_sign", "tz_hour", "tz_minute"]:
+            if values[part]:
+                break
+        else:
+            return SafeConstructor.construct_yaml_timestamp(self, node, values)
+        year = int(values["year"])
+        month = int(values["month"])
+        day = int(values["day"])
+        hour = int(values["hour"])
+        minute = int(values["minute"])
+        second = int(values["second"])
+        fraction = 0
+        if values["fraction"]:
+            fraction_s = values["fraction"][:6]
+            while len(fraction_s) < 6:
+                fraction_s += "0"
+            fraction = int(fraction_s)
+            if len(values["fraction"]) > 6 and int(values["fraction"][6]) > 4:
+                fraction += 1
+        delta = None
+        if values["tz_sign"]:
+            tz_hour = int(values["tz_hour"])
+            minutes = values["tz_minute"]
+            tz_minute = int(minutes) if minutes else 0
+            delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+            if values["tz_sign"] == "-":
+                delta = -delta
+        # shold check for NOne and solve issue 366 should be tzinfo=delta)
+        if delta:
+            dt = datetime.datetime(year, month, day, hour, minute)
+            dt -= delta
+            data = TimeStamp(
+                dt.year, dt.month, dt.day, dt.hour, dt.minute, second, fraction
+            )
+            data._yaml["delta"] = delta
+            tz = values["tz_sign"] + values["tz_hour"]
+            if values["tz_minute"]:
+                tz += ":" + values["tz_minute"]
+            data._yaml["tz"] = tz
+        else:
+            data = TimeStamp(year, month, day, hour, minute, second, fraction)
+            if values["tz"]:  # no delta
+                data._yaml["tz"] = values["tz"]
+
+        if values["t"]:
+            data._yaml["t"] = True
+        return data
+
+    def construct_yaml_bool(self, node):
+        # type: (Any) -> Any
+        b = SafeConstructor.construct_yaml_bool(self, node)
+        if node.anchor:
+            return ScalarBoolean(b, anchor=node.anchor)
+        return b
+
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:null", RoundTripConstructor.construct_yaml_null
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:bool", RoundTripConstructor.construct_yaml_bool
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:int", RoundTripConstructor.construct_yaml_int
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:float", RoundTripConstructor.construct_yaml_float
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:binary", RoundTripConstructor.construct_yaml_binary
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:timestamp", RoundTripConstructor.construct_yaml_timestamp
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:omap", RoundTripConstructor.construct_yaml_omap
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:pairs", RoundTripConstructor.construct_yaml_pairs
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:set", RoundTripConstructor.construct_yaml_set
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:str", RoundTripConstructor.construct_yaml_str
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:seq", RoundTripConstructor.construct_yaml_seq
+)
+
+RoundTripConstructor.add_constructor(
+    u"tag:yaml.org,2002:map", RoundTripConstructor.construct_yaml_map
+)
+
+RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_undefined)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/cyaml.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/cyaml.py
new file mode 100644
index 00000000..46592649
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/cyaml.py
@@ -0,0 +1,192 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+from _ruamel_yaml import CParser, CEmitter  # type: ignore
+
+from strictyaml.ruamel.constructor import Constructor, BaseConstructor, SafeConstructor
+from strictyaml.ruamel.representer import Representer, SafeRepresenter, BaseRepresenter
+from strictyaml.ruamel.resolver import Resolver, BaseResolver
+
+if False:  # MYPY
+    from typing import Any, Union, Optional  # NOQA
+    from strictyaml.ruamel.compat import StreamTextType, StreamType, VersionType  # NOQA
+
+__all__ = [
+    "CBaseLoader",
+    "CSafeLoader",
+    "CLoader",
+    "CBaseDumper",
+    "CSafeDumper",
+    "CDumper",
+]
+
+
+# this includes some hacks to solve the  usage of resolver by lower level
+# parts of the parser
+
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):  # type: ignore
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+        CParser.__init__(self, stream)
+        self._parser = self._composer = self
+        BaseConstructor.__init__(self, loader=self)
+        BaseResolver.__init__(self, loadumper=self)
+        # self.descend_resolver = self._resolver.descend_resolver
+        # self.ascend_resolver = self._resolver.ascend_resolver
+        # self.resolve = self._resolver.resolve
+
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):  # type: ignore
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+        CParser.__init__(self, stream)
+        self._parser = self._composer = self
+        SafeConstructor.__init__(self, loader=self)
+        Resolver.__init__(self, loadumper=self)
+        # self.descend_resolver = self._resolver.descend_resolver
+        # self.ascend_resolver = self._resolver.ascend_resolver
+        # self.resolve = self._resolver.resolve
+
+
+class CLoader(CParser, Constructor, Resolver):  # type: ignore
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+        CParser.__init__(self, stream)
+        self._parser = self._composer = self
+        Constructor.__init__(self, loader=self)
+        Resolver.__init__(self, loadumper=self)
+        # self.descend_resolver = self._resolver.descend_resolver
+        # self.ascend_resolver = self._resolver.ascend_resolver
+        # self.resolve = self._resolver.resolve
+
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):  # type: ignore
+    def __init__(
+        self,
+        stream,
+        default_style=None,
+        default_flow_style=None,
+        canonical=None,
+        indent=None,
+        width=None,
+        allow_unicode=None,
+        line_break=None,
+        encoding=None,
+        explicit_start=None,
+        explicit_end=None,
+        version=None,
+        tags=None,
+        block_seq_indent=None,
+        top_level_colon_align=None,
+        prefix_colon=None,
+    ):
+        # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None   # NOQA
+        CEmitter.__init__(
+            self,
+            stream,
+            canonical=canonical,
+            indent=indent,
+            width=width,
+            encoding=encoding,
+            allow_unicode=allow_unicode,
+            line_break=line_break,
+            explicit_start=explicit_start,
+            explicit_end=explicit_end,
+            version=version,
+            tags=tags,
+        )
+        self._emitter = self._serializer = self._representer = self
+        BaseRepresenter.__init__(
+            self,
+            default_style=default_style,
+            default_flow_style=default_flow_style,
+            dumper=self,
+        )
+        BaseResolver.__init__(self, loadumper=self)
+
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):  # type: ignore
+    def __init__(
+        self,
+        stream,
+        default_style=None,
+        default_flow_style=None,
+        canonical=None,
+        indent=None,
+        width=None,
+        allow_unicode=None,
+        line_break=None,
+        encoding=None,
+        explicit_start=None,
+        explicit_end=None,
+        version=None,
+        tags=None,
+        block_seq_indent=None,
+        top_level_colon_align=None,
+        prefix_colon=None,
+    ):
+        # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None   # NOQA
+        self._emitter = self._serializer = self._representer = self
+        CEmitter.__init__(
+            self,
+            stream,
+            canonical=canonical,
+            indent=indent,
+            width=width,
+            encoding=encoding,
+            allow_unicode=allow_unicode,
+            line_break=line_break,
+            explicit_start=explicit_start,
+            explicit_end=explicit_end,
+            version=version,
+            tags=tags,
+        )
+        self._emitter = self._serializer = self._representer = self
+        SafeRepresenter.__init__(
+            self, default_style=default_style, default_flow_style=default_flow_style
+        )
+        Resolver.__init__(self)
+
+
+class CDumper(CEmitter, Representer, Resolver):  # type: ignore
+    def __init__(
+        self,
+        stream,
+        default_style=None,
+        default_flow_style=None,
+        canonical=None,
+        indent=None,
+        width=None,
+        allow_unicode=None,
+        line_break=None,
+        encoding=None,
+        explicit_start=None,
+        explicit_end=None,
+        version=None,
+        tags=None,
+        block_seq_indent=None,
+        top_level_colon_align=None,
+        prefix_colon=None,
+    ):
+        # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None   # NOQA
+        CEmitter.__init__(
+            self,
+            stream,
+            canonical=canonical,
+            indent=indent,
+            width=width,
+            encoding=encoding,
+            allow_unicode=allow_unicode,
+            line_break=line_break,
+            explicit_start=explicit_start,
+            explicit_end=explicit_end,
+            version=version,
+            tags=tags,
+        )
+        self._emitter = self._serializer = self._representer = self
+        Representer.__init__(
+            self, default_style=default_style, default_flow_style=default_flow_style
+        )
+        Resolver.__init__(self)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/dumper.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/dumper.py
new file mode 100644
index 00000000..a91dd1b1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/dumper.py
@@ -0,0 +1,221 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+from strictyaml.ruamel.emitter import Emitter
+from strictyaml.ruamel.serializer import Serializer
+from strictyaml.ruamel.representer import (
+    Representer,
+    SafeRepresenter,
+    BaseRepresenter,
+    RoundTripRepresenter,
+)
+from strictyaml.ruamel.resolver import Resolver, BaseResolver, VersionedResolver
+
+if False:  # MYPY
+    from typing import Any, Dict, List, Union, Optional  # NOQA
+    from strictyaml.ruamel.compat import StreamType, VersionType  # NOQA
+
+__all__ = ["BaseDumper", "SafeDumper", "Dumper", "RoundTripDumper"]
+
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+    def __init__(
+        self,
+        stream,
+        default_style=None,
+        default_flow_style=None,
+        canonical=None,
+        indent=None,
+        width=None,
+        allow_unicode=None,
+        line_break=None,
+        encoding=None,
+        explicit_start=None,
+        explicit_end=None,
+        version=None,
+        tags=None,
+        block_seq_indent=None,
+        top_level_colon_align=None,
+        prefix_colon=None,
+    ):
+        # type: (Any, StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None   # NOQA
+        Emitter.__init__(
+            self,
+            stream,
+            canonical=canonical,
+            indent=indent,
+            width=width,
+            allow_unicode=allow_unicode,
+            line_break=line_break,
+            block_seq_indent=block_seq_indent,
+            dumper=self,
+        )
+        Serializer.__init__(
+            self,
+            encoding=encoding,
+            explicit_start=explicit_start,
+            explicit_end=explicit_end,
+            version=version,
+            tags=tags,
+            dumper=self,
+        )
+        BaseRepresenter.__init__(
+            self,
+            default_style=default_style,
+            default_flow_style=default_flow_style,
+            dumper=self,
+        )
+        BaseResolver.__init__(self, loadumper=self)
+
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+    def __init__(
+        self,
+        stream,
+        default_style=None,
+        default_flow_style=None,
+        canonical=None,
+        indent=None,
+        width=None,
+        allow_unicode=None,
+        line_break=None,
+        encoding=None,
+        explicit_start=None,
+        explicit_end=None,
+        version=None,
+        tags=None,
+        block_seq_indent=None,
+        top_level_colon_align=None,
+        prefix_colon=None,
+    ):
+        # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None  # NOQA
+        Emitter.__init__(
+            self,
+            stream,
+            canonical=canonical,
+            indent=indent,
+            width=width,
+            allow_unicode=allow_unicode,
+            line_break=line_break,
+            block_seq_indent=block_seq_indent,
+            dumper=self,
+        )
+        Serializer.__init__(
+            self,
+            encoding=encoding,
+            explicit_start=explicit_start,
+            explicit_end=explicit_end,
+            version=version,
+            tags=tags,
+            dumper=self,
+        )
+        SafeRepresenter.__init__(
+            self,
+            default_style=default_style,
+            default_flow_style=default_flow_style,
+            dumper=self,
+        )
+        Resolver.__init__(self, loadumper=self)
+
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+    def __init__(
+        self,
+        stream,
+        default_style=None,
+        default_flow_style=None,
+        canonical=None,
+        indent=None,
+        width=None,
+        allow_unicode=None,
+        line_break=None,
+        encoding=None,
+        explicit_start=None,
+        explicit_end=None,
+        version=None,
+        tags=None,
+        block_seq_indent=None,
+        top_level_colon_align=None,
+        prefix_colon=None,
+    ):
+        # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None   # NOQA
+        Emitter.__init__(
+            self,
+            stream,
+            canonical=canonical,
+            indent=indent,
+            width=width,
+            allow_unicode=allow_unicode,
+            line_break=line_break,
+            block_seq_indent=block_seq_indent,
+            dumper=self,
+        )
+        Serializer.__init__(
+            self,
+            encoding=encoding,
+            explicit_start=explicit_start,
+            explicit_end=explicit_end,
+            version=version,
+            tags=tags,
+            dumper=self,
+        )
+        Representer.__init__(
+            self,
+            default_style=default_style,
+            default_flow_style=default_flow_style,
+            dumper=self,
+        )
+        Resolver.__init__(self, loadumper=self)
+
+
+class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver):
+    def __init__(
+        self,
+        stream,
+        default_style=None,
+        default_flow_style=None,
+        canonical=None,
+        indent=None,
+        width=None,
+        allow_unicode=None,
+        line_break=None,
+        encoding=None,
+        explicit_start=None,
+        explicit_end=None,
+        version=None,
+        tags=None,
+        block_seq_indent=None,
+        top_level_colon_align=None,
+        prefix_colon=None,
+    ):
+        # type: (StreamType, Any, Optional[bool], Optional[int], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None  # NOQA
+        Emitter.__init__(
+            self,
+            stream,
+            canonical=canonical,
+            indent=indent,
+            width=width,
+            allow_unicode=allow_unicode,
+            line_break=line_break,
+            block_seq_indent=block_seq_indent,
+            top_level_colon_align=top_level_colon_align,
+            prefix_colon=prefix_colon,
+            dumper=self,
+        )
+        Serializer.__init__(
+            self,
+            encoding=encoding,
+            explicit_start=explicit_start,
+            explicit_end=explicit_end,
+            version=version,
+            tags=tags,
+            dumper=self,
+        )
+        RoundTripRepresenter.__init__(
+            self,
+            default_style=default_style,
+            default_flow_style=default_flow_style,
+            dumper=self,
+        )
+        VersionedResolver.__init__(self, loader=self)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/emitter.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/emitter.py
new file mode 100644
index 00000000..efda98bd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/emitter.py
@@ -0,0 +1,1738 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+import sys
+from strictyaml.ruamel.error import YAMLError, YAMLStreamError
+from strictyaml.ruamel.events import *  # NOQA
+
+# fmt: off
+from strictyaml.ruamel.compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT, \
+    check_anchorname_char
+# fmt: on
+
+if False:  # MYPY
+    from typing import Any, Dict, List, Union, Text, Tuple, Optional  # NOQA
+    from strictyaml.ruamel.compat import StreamType  # NOQA
+
+__all__ = ["Emitter", "EmitterError"]
+
+
+class EmitterError(YAMLError):
+    pass
+
+
+class ScalarAnalysis(object):
+    def __init__(
+        self,
+        scalar,
+        empty,
+        multiline,
+        allow_flow_plain,
+        allow_block_plain,
+        allow_single_quoted,
+        allow_double_quoted,
+        allow_block,
+    ):
+        # type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None
+        self.scalar = scalar
+        self.empty = empty
+        self.multiline = multiline
+        self.allow_flow_plain = allow_flow_plain
+        self.allow_block_plain = allow_block_plain
+        self.allow_single_quoted = allow_single_quoted
+        self.allow_double_quoted = allow_double_quoted
+        self.allow_block = allow_block
+
+
+class Indents(object):
+    # replacement for the list based stack of None/int
+    def __init__(self):
+        # type: () -> None
+        self.values = []  # type: List[Tuple[int, bool]]
+
+    def append(self, val, seq):
+        # type: (Any, Any) -> None
+        self.values.append((val, seq))
+
+    def pop(self):
+        # type: () -> Any
+        return self.values.pop()[0]
+
+    def last_seq(self):
+        # type: () -> bool
+        # return the seq(uence) value for the element added before the last one
+        # in increase_indent()
+        try:
+            return self.values[-2][1]
+        except IndexError:
+            return False
+
+    def seq_flow_align(self, seq_indent, column):
+        # type: (int, int) -> int
+        # extra spaces because of dash
+        if len(self.values) < 2 or not self.values[-1][1]:
+            return 0
+        # -1 for the dash
+        base = self.values[-1][0] if self.values[-1][0] is not None else 0
+        return base + seq_indent - column - 1
+
+    def __len__(self):
+        # type: () -> int
+        return len(self.values)
+
+
+class Emitter(object):
+    # fmt: off
+    DEFAULT_TAG_PREFIXES = {
+        u'!': u'!',
+        u'tag:yaml.org,2002:': u'!!',
+    }
+    # fmt: on
+
+    MAX_SIMPLE_KEY_LENGTH = 128
+
+    def __init__(
+        self,
+        stream,
+        canonical=None,
+        indent=None,
+        width=None,
+        allow_unicode=None,
+        line_break=None,
+        block_seq_indent=None,
+        top_level_colon_align=None,
+        prefix_colon=None,
+        brace_single_entry_mapping_in_flow_sequence=None,
+        dumper=None,
+    ):
+        # type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None  # NOQA
+        self.dumper = dumper
+        if self.dumper is not None and getattr(self.dumper, "_emitter", None) is None:
+            self.dumper._emitter = self
+        self.stream = stream
+
+        # Encoding can be overriden by STREAM-START.
+        self.encoding = None  # type: Optional[Text]
+        self.allow_space_break = None
+
+        # Emitter is a state machine with a stack of states to handle nested
+        # structures.
+        self.states = []  # type: List[Any]
+        self.state = self.expect_stream_start  # type: Any
+
+        # Current event and the event queue.
+        self.events = []  # type: List[Any]
+        self.event = None  # type: Any
+
+        # The current indentation level and the stack of previous indents.
+        self.indents = Indents()
+        self.indent = None  # type: Optional[int]
+
+        # flow_context is an expanding/shrinking list consisting of '{' and '['
+        # for each unclosed flow context. If empty list that means block context
+        self.flow_context = []  # type: List[Text]
+
+        # Contexts.
+        self.root_context = False
+        self.sequence_context = False
+        self.mapping_context = False
+        self.simple_key_context = False
+
+        # Characteristics of the last emitted character:
+        #  - current position.
+        #  - is it a whitespace?
+        #  - is it an indention character
+        #    (indentation space, '-', '?', or ':')?
+        self.line = 0
+        self.column = 0
+        self.whitespace = True
+        self.indention = True
+        self.compact_seq_seq = True  # dash after dash
+        self.compact_seq_map = True  # key after dash
+        # self.compact_ms = False   # dash after key, only when excplicit key with ?
+        self.no_newline = None  # type: Optional[bool]  # set if directly after `- `
+
+        # Whether the document requires an explicit document end indicator
+        self.open_ended = False
+
+        # colon handling
+        self.colon = u":"
+        self.prefixed_colon = (
+            self.colon if prefix_colon is None else prefix_colon + self.colon
+        )
+        # single entry mappings in flow sequence
+        self.brace_single_entry_mapping_in_flow_sequence = (
+            brace_single_entry_mapping_in_flow_sequence  # NOQA
+        )
+
+        # Formatting details.
+        self.canonical = canonical
+        self.allow_unicode = allow_unicode
+        # set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis
+        self.unicode_supplementary = sys.maxunicode > 0xFFFF
+        self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0
+        self.top_level_colon_align = top_level_colon_align
+        self.best_sequence_indent = 2
+        self.requested_indent = indent  # specific for literal zero indent
+        if indent and 1 < indent < 10:
+            self.best_sequence_indent = indent
+        self.best_map_indent = self.best_sequence_indent
+        # if self.best_sequence_indent < self.sequence_dash_offset + 1:
+        #     self.best_sequence_indent = self.sequence_dash_offset + 1
+        self.best_width = 80
+        if width and width > self.best_sequence_indent * 2:
+            self.best_width = width
+        self.best_line_break = u"\n"  # type: Any
+        if line_break in [u"\r", u"\n", u"\r\n"]:
+            self.best_line_break = line_break
+
+        # Tag prefixes.
+        self.tag_prefixes = None  # type: Any
+
+        # Prepared anchor and tag.
+        self.prepared_anchor = None  # type: Any
+        self.prepared_tag = None  # type: Any
+
+        # Scalar analysis and style.
+        self.analysis = None  # type: Any
+        self.style = None  # type: Any
+
+        self.scalar_after_indicator = True  # write a scalar on the same line as `---`
+
+        self.alt_null = "null"
+
+    @property
+    def stream(self):
+        # type: () -> Any
+        try:
+            return self._stream
+        except AttributeError:
+            raise YAMLStreamError("output stream needs to specified")
+
+    @stream.setter
+    def stream(self, val):
+        # type: (Any) -> None
+        if val is None:
+            return
+        if not hasattr(val, "write"):
+            raise YAMLStreamError("stream argument needs to have a write() method")
+        self._stream = val
+
+    @property
+    def serializer(self):
+        # type: () -> Any
+        try:
+            if hasattr(self.dumper, "typ"):
+                return self.dumper.serializer
+            return self.dumper._serializer
+        except AttributeError:
+            return self  # cyaml
+
+    @property
+    def flow_level(self):
+        # type: () -> int
+        return len(self.flow_context)
+
+    def dispose(self):
+        # type: () -> None
+        # Reset the state attributes (to clear self-references)
+        self.states = []
+        self.state = None
+
+    def emit(self, event):
+        # type: (Any) -> None
+        if dbg(DBG_EVENT):
+            nprint(event)
+        self.events.append(event)
+        while not self.need_more_events():
+            self.event = self.events.pop(0)
+            self.state()
+            self.event = None
+
+    # In some cases, we wait for a few next events before emitting.
+
+    def need_more_events(self):
+        # type: () -> bool
+        if not self.events:
+            return True
+        event = self.events[0]
+        if isinstance(event, DocumentStartEvent):
+            return self.need_events(1)
+        elif isinstance(event, SequenceStartEvent):
+            return self.need_events(2)
+        elif isinstance(event, MappingStartEvent):
+            return self.need_events(3)
+        else:
+            return False
+
+    def need_events(self, count):
+        # type: (int) -> bool
+        level = 0
+        for event in self.events[1:]:
+            if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+                level += 1
+            elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+                level -= 1
+            elif isinstance(event, StreamEndEvent):
+                level = -1
+            if level < 0:
+                return False
+        return len(self.events) < count + 1
+
+    def increase_indent(self, flow=False, sequence=None, indentless=False):
+        # type: (bool, Optional[bool], bool) -> None
+        self.indents.append(self.indent, sequence)
+        if self.indent is None:  # top level
+            if flow:
+                # self.indent = self.best_sequence_indent if self.indents.last_seq() else \
+                #              self.best_map_indent
+                # self.indent = self.best_sequence_indent
+                self.indent = self.requested_indent
+            else:
+                self.indent = 0
+        elif not indentless:
+            self.indent += (
+                self.best_sequence_indent
+                if self.indents.last_seq()
+                else self.best_map_indent
+            )
+            # if self.indents.last_seq():
+            #     if self.indent == 0: # top level block sequence
+            #         self.indent = self.best_sequence_indent - self.sequence_dash_offset
+            #     else:
+            #         self.indent += self.best_sequence_indent
+            # else:
+            #     self.indent += self.best_map_indent
+
+    # States.
+
+    # Stream handlers.
+
+    def expect_stream_start(self):
+        # type: () -> None
+        if isinstance(self.event, StreamStartEvent):
+            if PY2:
+                if self.event.encoding and not getattr(self.stream, "encoding", None):
+                    self.encoding = self.event.encoding
+            else:
+                if self.event.encoding and not hasattr(self.stream, "encoding"):
+                    self.encoding = self.event.encoding
+            self.write_stream_start()
+            self.state = self.expect_first_document_start
+        else:
+            raise EmitterError("expected StreamStartEvent, but got %s" % (self.event,))
+
+    def expect_nothing(self):
+        # type: () -> None
+        raise EmitterError("expected nothing, but got %s" % (self.event,))
+
+    # Document handlers.
+
+    def expect_first_document_start(self):
+        # type: () -> Any
+        return self.expect_document_start(first=True)
+
+    def expect_document_start(self, first=False):
+        # type: (bool) -> None
+        if isinstance(self.event, DocumentStartEvent):
+            if (self.event.version or self.event.tags) and self.open_ended:
+                self.write_indicator(u"...", True)
+                self.write_indent()
+            if self.event.version:
+                version_text = self.prepare_version(self.event.version)
+                self.write_version_directive(version_text)
+            self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+            if self.event.tags:
+                handles = sorted(self.event.tags.keys())
+                for handle in handles:
+                    prefix = self.event.tags[handle]
+                    self.tag_prefixes[prefix] = handle
+                    handle_text = self.prepare_tag_handle(handle)
+                    prefix_text = self.prepare_tag_prefix(prefix)
+                    self.write_tag_directive(handle_text, prefix_text)
+            implicit = (
+                first
+                and not self.event.explicit
+                and not self.canonical
+                and not self.event.version
+                and not self.event.tags
+                and not self.check_empty_document()
+            )
+            if not implicit:
+                self.write_indent()
+                self.write_indicator(u"---", True)
+                if self.canonical:
+                    self.write_indent()
+            self.state = self.expect_document_root
+        elif isinstance(self.event, StreamEndEvent):
+            if self.open_ended:
+                self.write_indicator(u"...", True)
+                self.write_indent()
+            self.write_stream_end()
+            self.state = self.expect_nothing
+        else:
+            raise EmitterError(
+                "expected DocumentStartEvent, but got %s" % (self.event,)
+            )
+
+    def expect_document_end(self):
+        # type: () -> None
+        if isinstance(self.event, DocumentEndEvent):
+            self.write_indent()
+            if self.event.explicit:
+                self.write_indicator(u"...", True)
+                self.write_indent()
+            self.flush_stream()
+            self.state = self.expect_document_start
+        else:
+            raise EmitterError("expected DocumentEndEvent, but got %s" % (self.event,))
+
+    def expect_document_root(self):
+        # type: () -> None
+        self.states.append(self.expect_document_end)
+        self.expect_node(root=True)
+
+    # Node handlers.
+
+    def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False):
+        # type: (bool, bool, bool, bool) -> None
+        self.root_context = root
+        self.sequence_context = sequence  # not used in PyYAML
+        self.mapping_context = mapping
+        self.simple_key_context = simple_key
+        if isinstance(self.event, AliasEvent):
+            self.expect_alias()
+        elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+            if (
+                self.process_anchor(u"&")
+                and isinstance(self.event, ScalarEvent)
+                and self.sequence_context
+            ):
+                self.sequence_context = False
+            if (
+                root
+                and isinstance(self.event, ScalarEvent)
+                and not self.scalar_after_indicator
+            ):
+                self.write_indent()
+            self.process_tag()
+            if isinstance(self.event, ScalarEvent):
+                # nprint('@', self.indention, self.no_newline, self.column)
+                self.expect_scalar()
+            elif isinstance(self.event, SequenceStartEvent):
+                # nprint('@', self.indention, self.no_newline, self.column)
+                i2, n2 = self.indention, self.no_newline  # NOQA
+                if self.event.comment:
+                    if self.event.flow_style is False and self.event.comment:
+                        if self.write_post_comment(self.event):
+                            self.indention = False
+                            self.no_newline = True
+                    if self.write_pre_comment(self.event):
+                        self.indention = i2
+                        self.no_newline = not self.indention
+                if (
+                    self.flow_level
+                    or self.canonical
+                    or self.event.flow_style
+                    or self.check_empty_sequence()
+                ):
+                    self.expect_flow_sequence()
+                else:
+                    self.expect_block_sequence()
+            elif isinstance(self.event, MappingStartEvent):
+                if self.event.flow_style is False and self.event.comment:
+                    self.write_post_comment(self.event)
+                if self.event.comment and self.event.comment[1]:
+                    self.write_pre_comment(self.event)
+                if (
+                    self.flow_level
+                    or self.canonical
+                    or self.event.flow_style
+                    or self.check_empty_mapping()
+                ):
+                    self.expect_flow_mapping(single=self.event.nr_items == 1)
+                else:
+                    self.expect_block_mapping()
+        else:
+            raise EmitterError("expected NodeEvent, but got %s" % (self.event,))
+
+    def expect_alias(self):
+        # type: () -> None
+        if self.event.anchor is None:
+            raise EmitterError("anchor is not specified for alias")
+        self.process_anchor(u"*")
+        self.state = self.states.pop()
+
+    def expect_scalar(self):
+        # type: () -> None
+        self.increase_indent(flow=True)
+        self.process_scalar()
+        self.indent = self.indents.pop()
+        self.state = self.states.pop()
+
+    # Flow sequence handlers.
+
+    def expect_flow_sequence(self):
+        # type: () -> None
+        ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
+        self.write_indicator(u" " * ind + u"[", True, whitespace=True)
+        self.increase_indent(flow=True, sequence=True)
+        self.flow_context.append("[")
+        self.state = self.expect_first_flow_sequence_item
+
+    def expect_first_flow_sequence_item(self):
+        # type: () -> None
+        if isinstance(self.event, SequenceEndEvent):
+            self.indent = self.indents.pop()
+            popped = self.flow_context.pop()
+            assert popped == "["
+            self.write_indicator(u"]", False)
+            if self.event.comment and self.event.comment[0]:
+                # eol comment on empty flow sequence
+                self.write_post_comment(self.event)
+            elif self.flow_level == 0:
+                self.write_line_break()
+            self.state = self.states.pop()
+        else:
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            self.states.append(self.expect_flow_sequence_item)
+            self.expect_node(sequence=True)
+
+    def expect_flow_sequence_item(self):
+        # type: () -> None
+        if isinstance(self.event, SequenceEndEvent):
+            self.indent = self.indents.pop()
+            popped = self.flow_context.pop()
+            assert popped == "["
+            if self.canonical:
+                self.write_indicator(u",", False)
+                self.write_indent()
+            self.write_indicator(u"]", False)
+            if self.event.comment and self.event.comment[0]:
+                # eol comment on flow sequence
+                self.write_post_comment(self.event)
+            else:
+                self.no_newline = False
+            self.state = self.states.pop()
+        else:
+            self.write_indicator(u",", False)
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            self.states.append(self.expect_flow_sequence_item)
+            self.expect_node(sequence=True)
+
+    # Flow mapping handlers.
+
+    def expect_flow_mapping(self, single=False):
+        # type: (Optional[bool]) -> None
+        ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
+        map_init = u"{"
+        if (
+            single
+            and self.flow_level
+            and self.flow_context[-1] == "["
+            and not self.canonical
+            and not self.brace_single_entry_mapping_in_flow_sequence
+        ):
+            # single map item with flow context, no curly braces necessary
+            map_init = u""
+        self.write_indicator(u" " * ind + map_init, True, whitespace=True)
+        self.flow_context.append(map_init)
+        self.increase_indent(flow=True, sequence=False)
+        self.state = self.expect_first_flow_mapping_key
+
+    def expect_first_flow_mapping_key(self):
+        # type: () -> None
+        if isinstance(self.event, MappingEndEvent):
+            self.indent = self.indents.pop()
+            popped = self.flow_context.pop()
+            assert popped == "{"  # empty flow mapping
+            self.write_indicator(u"}", False)
+            if self.event.comment and self.event.comment[0]:
+                # eol comment on empty mapping
+                self.write_post_comment(self.event)
+            elif self.flow_level == 0:
+                self.write_line_break()
+            self.state = self.states.pop()
+        else:
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            if not self.canonical and self.check_simple_key():
+                self.states.append(self.expect_flow_mapping_simple_value)
+                self.expect_node(mapping=True, simple_key=True)
+            else:
+                self.write_indicator(u"?", True)
+                self.states.append(self.expect_flow_mapping_value)
+                self.expect_node(mapping=True)
+
+    def expect_flow_mapping_key(self):
+        # type: () -> None
+        if isinstance(self.event, MappingEndEvent):
+            # if self.event.comment and self.event.comment[1]:
+            #     self.write_pre_comment(self.event)
+            self.indent = self.indents.pop()
+            popped = self.flow_context.pop()
+            assert popped in [u"{", u""]
+            if self.canonical:
+                self.write_indicator(u",", False)
+                self.write_indent()
+            if popped != u"":
+                self.write_indicator(u"}", False)
+            if self.event.comment and self.event.comment[0]:
+                # eol comment on flow mapping, never reached on empty mappings
+                self.write_post_comment(self.event)
+            else:
+                self.no_newline = False
+            self.state = self.states.pop()
+        else:
+            self.write_indicator(u",", False)
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            if not self.canonical and self.check_simple_key():
+                self.states.append(self.expect_flow_mapping_simple_value)
+                self.expect_node(mapping=True, simple_key=True)
+            else:
+                self.write_indicator(u"?", True)
+                self.states.append(self.expect_flow_mapping_value)
+                self.expect_node(mapping=True)
+
+    def expect_flow_mapping_simple_value(self):
+        # type: () -> None
+        self.write_indicator(self.prefixed_colon, False)
+        self.states.append(self.expect_flow_mapping_key)
+        self.expect_node(mapping=True)
+
+    def expect_flow_mapping_value(self):
+        # type: () -> None
+        if self.canonical or self.column > self.best_width:
+            self.write_indent()
+        self.write_indicator(self.prefixed_colon, True)
+        self.states.append(self.expect_flow_mapping_key)
+        self.expect_node(mapping=True)
+
+    # Block sequence handlers.
+
+    def expect_block_sequence(self):
+        # type: () -> None
+        if self.mapping_context:
+            indentless = not self.indention
+        else:
+            indentless = False
+            if not self.compact_seq_seq and self.column != 0:
+                self.write_line_break()
+        self.increase_indent(flow=False, sequence=True, indentless=indentless)
+        self.state = self.expect_first_block_sequence_item
+
+    def expect_first_block_sequence_item(self):
+        # type: () -> Any
+        return self.expect_block_sequence_item(first=True)
+
+    def expect_block_sequence_item(self, first=False):
+        # type: (bool) -> None
+        if not first and isinstance(self.event, SequenceEndEvent):
+            if self.event.comment and self.event.comment[1]:
+                # final comments on a block list e.g. empty line
+                self.write_pre_comment(self.event)
+            self.indent = self.indents.pop()
+            self.state = self.states.pop()
+            self.no_newline = False
+        else:
+            if self.event.comment and self.event.comment[1]:
+                self.write_pre_comment(self.event)
+            nonl = self.no_newline if self.column == 0 else False
+            self.write_indent()
+            ind = self.sequence_dash_offset  # if  len(self.indents) > 1 else 0
+            self.write_indicator(u" " * ind + u"-", True, indention=True)
+            if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent:
+                self.no_newline = True
+            self.states.append(self.expect_block_sequence_item)
+            self.expect_node(sequence=True)
+
+    # Block mapping handlers.
+
+    def expect_block_mapping(self):
+        # type: () -> None
+        if not self.mapping_context and not (self.compact_seq_map or self.column == 0):
+            self.write_line_break()
+        self.increase_indent(flow=False, sequence=False)
+        self.state = self.expect_first_block_mapping_key
+
+    def expect_first_block_mapping_key(self):
+        # type: () -> None
+        return self.expect_block_mapping_key(first=True)
+
+    def expect_block_mapping_key(self, first=False):
+        # type: (Any) -> None
+        if not first and isinstance(self.event, MappingEndEvent):
+            if self.event.comment and self.event.comment[1]:
+                # final comments from a doc
+                self.write_pre_comment(self.event)
+            self.indent = self.indents.pop()
+            self.state = self.states.pop()
+        else:
+            if self.event.comment and self.event.comment[1]:
+                # final comments from a doc
+                self.write_pre_comment(self.event)
+            self.write_indent()
+            if self.check_simple_key():
+                if not isinstance(
+                    self.event, (SequenceStartEvent, MappingStartEvent)
+                ):  # sequence keys
+                    try:
+                        if self.event.style == "?":
+                            self.write_indicator(u"?", True, indention=True)
+                    except AttributeError:  # aliases have no style
+                        pass
+                self.states.append(self.expect_block_mapping_simple_value)
+                self.expect_node(mapping=True, simple_key=True)
+                if isinstance(self.event, AliasEvent):
+                    self.stream.write(u" ")
+            else:
+                self.write_indicator(u"?", True, indention=True)
+                self.states.append(self.expect_block_mapping_value)
+                self.expect_node(mapping=True)
+
+    def expect_block_mapping_simple_value(self):
+        # type: () -> None
+        if getattr(self.event, "style", None) != "?":
+            # prefix = u''
+            if self.indent == 0 and self.top_level_colon_align is not None:
+                # write non-prefixed colon
+                c = u" " * (self.top_level_colon_align - self.column) + self.colon
+            else:
+                c = self.prefixed_colon
+            self.write_indicator(c, False)
+        self.states.append(self.expect_block_mapping_key)
+        self.expect_node(mapping=True)
+
+    def expect_block_mapping_value(self):
+        # type: () -> None
+        self.write_indent()
+        self.write_indicator(self.prefixed_colon, True, indention=True)
+        self.states.append(self.expect_block_mapping_key)
+        self.expect_node(mapping=True)
+
+    # Checkers.
+
+    def check_empty_sequence(self):
+        # type: () -> bool
+        return (
+            isinstance(self.event, SequenceStartEvent)
+            and bool(self.events)
+            and isinstance(self.events[0], SequenceEndEvent)
+        )
+
+    def check_empty_mapping(self):
+        # type: () -> bool
+        return (
+            isinstance(self.event, MappingStartEvent)
+            and bool(self.events)
+            and isinstance(self.events[0], MappingEndEvent)
+        )
+
+    def check_empty_document(self):
+        # type: () -> bool
+        if not isinstance(self.event, DocumentStartEvent) or not self.events:
+            return False
+        event = self.events[0]
+        return (
+            isinstance(event, ScalarEvent)
+            and event.anchor is None
+            and event.tag is None
+            and event.implicit
+            and event.value == ""
+        )
+
+    def check_simple_key(self):
+        # type: () -> bool
+        length = 0
+        if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+            if self.prepared_anchor is None:
+                self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+            length += len(self.prepared_anchor)
+        if (
+            isinstance(self.event, (ScalarEvent, CollectionStartEvent))
+            and self.event.tag is not None
+        ):
+            if self.prepared_tag is None:
+                self.prepared_tag = self.prepare_tag(self.event.tag)
+            length += len(self.prepared_tag)
+        if isinstance(self.event, ScalarEvent):
+            if self.analysis is None:
+                self.analysis = self.analyze_scalar(self.event.value)
+            length += len(self.analysis.scalar)
+        return length < self.MAX_SIMPLE_KEY_LENGTH and (
+            isinstance(self.event, AliasEvent)
+            or (
+                isinstance(self.event, SequenceStartEvent)
+                and self.event.flow_style is True
+            )
+            or (
+                isinstance(self.event, MappingStartEvent)
+                and self.event.flow_style is True
+            )
+            or (
+                isinstance(self.event, ScalarEvent)
+                # if there is an explicit style for an empty string, it is a simple key
+                and not (self.analysis.empty and self.style and self.style not in "'\"")
+                and not self.analysis.multiline
+            )
+            or self.check_empty_sequence()
+            or self.check_empty_mapping()
+        )
+
+    # Anchor, Tag, and Scalar processors.
+
+    def process_anchor(self, indicator):
+        # type: (Any) -> bool
+        if self.event.anchor is None:
+            self.prepared_anchor = None
+            return False
+        if self.prepared_anchor is None:
+            self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+        if self.prepared_anchor:
+            self.write_indicator(indicator + self.prepared_anchor, True)
+            # issue 288
+            self.no_newline = False
+        self.prepared_anchor = None
+        return True
+
+    def process_tag(self):
+        # type: () -> None
+        tag = self.event.tag
+        if isinstance(self.event, ScalarEvent):
+            if self.style is None:
+                self.style = self.choose_scalar_style()
+                if (
+                    self.event.value == ""
+                    and self.style == "'"
+                    and tag == "tag:yaml.org,2002:null"
+                    and self.alt_null is not None
+                ):
+                    self.event.value = self.alt_null
+                    self.analysis = None
+                    self.style = self.choose_scalar_style()
+            if (not self.canonical or tag is None) and (
+                (self.style == "" and self.event.implicit[0])
+                or (self.style != "" and self.event.implicit[1])
+            ):
+                self.prepared_tag = None
+                return
+            if self.event.implicit[0] and tag is None:
+                tag = u"!"
+                self.prepared_tag = None
+        else:
+            if (not self.canonical or tag is None) and self.event.implicit:
+                self.prepared_tag = None
+                return
+        if tag is None:
+            raise EmitterError("tag is not specified")
+        if self.prepared_tag is None:
+            self.prepared_tag = self.prepare_tag(tag)
+        if self.prepared_tag:
+            self.write_indicator(self.prepared_tag, True)
+            if (
+                self.sequence_context
+                and not self.flow_level
+                and isinstance(self.event, ScalarEvent)
+            ):
+                self.no_newline = True
+        self.prepared_tag = None
+
+    def choose_scalar_style(self):
+        # type: () -> Any
+        if self.analysis is None:
+            self.analysis = self.analyze_scalar(self.event.value)
+        if self.event.style == '"' or self.canonical:
+            return '"'
+        if (not self.event.style or self.event.style == "?") and (
+            self.event.implicit[0] or not self.event.implicit[2]
+        ):
+            if not (
+                self.simple_key_context
+                and (self.analysis.empty or self.analysis.multiline)
+            ) and (
+                self.flow_level
+                and self.analysis.allow_flow_plain
+                or (not self.flow_level and self.analysis.allow_block_plain)
+            ):
+                return ""
+        self.analysis.allow_block = True
+        if self.event.style and self.event.style in "|>":
+            if (
+                not self.flow_level
+                and not self.simple_key_context
+                and self.analysis.allow_block
+            ):
+                return self.event.style
+        if not self.event.style and self.analysis.allow_double_quoted:
+            if "'" in self.event.value or "\n" in self.event.value:
+                return '"'
+        if not self.event.style or self.event.style == "'":
+            if self.analysis.allow_single_quoted and not (
+                self.simple_key_context and self.analysis.multiline
+            ):
+                return "'"
+        return '"'
+
+    def process_scalar(self):
+        # type: () -> None
+        if self.analysis is None:
+            self.analysis = self.analyze_scalar(self.event.value)
+        if self.style is None:
+            self.style = self.choose_scalar_style()
+        split = not self.simple_key_context
+        # if self.analysis.multiline and split    \
+        #         and (not self.style or self.style in '\'\"'):
+        #     self.write_indent()
+        # nprint('xx', self.sequence_context, self.flow_level)
+        if self.sequence_context and not self.flow_level:
+            self.write_indent()
+        if self.style == '"':
+            self.write_double_quoted(self.analysis.scalar, split)
+        elif self.style == "'":
+            self.write_single_quoted(self.analysis.scalar, split)
+        elif self.style == ">":
+            self.write_folded(self.analysis.scalar)
+        elif self.style == "|":
+            self.write_literal(self.analysis.scalar, self.event.comment)
+        else:
+            self.write_plain(self.analysis.scalar, split)
+        self.analysis = None
+        self.style = None
+        if self.event.comment:
+            self.write_post_comment(self.event)
+
+    # Analyzers.
+
+    def prepare_version(self, version):
+        # type: (Any) -> Any
+        major, minor = version
+        if major != 1:
+            raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+        return u"%d.%d" % (major, minor)
+
+    def prepare_tag_handle(self, handle):
+        # type: (Any) -> Any
+        if not handle:
+            raise EmitterError("tag handle must not be empty")
+        if handle[0] != u"!" or handle[-1] != u"!":
+            raise EmitterError(
+                "tag handle must start and end with '!': %r" % (utf8(handle))
+            )
+        for ch in handle[1:-1]:
+            if not (
+                u"0" <= ch <= u"9"
+                or u"A" <= ch <= u"Z"
+                or u"a" <= ch <= u"z"
+                or ch in u"-_"
+            ):
+                raise EmitterError(
+                    "invalid character %r in the tag handle: %r"
+                    % (utf8(ch), utf8(handle))
+                )
+        return handle
+
+    def prepare_tag_prefix(self, prefix):
+        # type: (Any) -> Any
+        if not prefix:
+            raise EmitterError("tag prefix must not be empty")
+        chunks = []  # type: List[Any]
+        start = end = 0
+        if prefix[0] == u"!":
+            end = 1
+        ch_set = u"-;/?:@&=+$,_.~*'()[]"
+        if self.dumper:
+            version = getattr(self.dumper, "version", (1, 2))
+            if version is None or version >= (1, 2):
+                ch_set += u"#"
+        while end < len(prefix):
+            ch = prefix[end]
+            if (
+                u"0" <= ch <= u"9"
+                or u"A" <= ch <= u"Z"
+                or u"a" <= ch <= u"z"
+                or ch in ch_set
+            ):
+                end += 1
+            else:
+                if start < end:
+                    chunks.append(prefix[start:end])
+                start = end = end + 1
+                data = utf8(ch)
+                for ch in data:
+                    chunks.append(u"%%%02X" % ord(ch))
+        if start < end:
+            chunks.append(prefix[start:end])
+        return "".join(chunks)
+
+    def prepare_tag(self, tag):
+        # type: (Any) -> Any
+        if not tag:
+            raise EmitterError("tag must not be empty")
+        if tag == u"!":
+            return tag
+        handle = None
+        suffix = tag
+        prefixes = sorted(self.tag_prefixes.keys())
+        for prefix in prefixes:
+            if tag.startswith(prefix) and (prefix == u"!" or len(prefix) < len(tag)):
+                handle = self.tag_prefixes[prefix]
+                suffix = tag[len(prefix) :]
+        chunks = []  # type: List[Any]
+        start = end = 0
+        ch_set = u"-;/?:@&=+$,_.~*'()[]"
+        if self.dumper:
+            version = getattr(self.dumper, "version", (1, 2))
+            if version is None or version >= (1, 2):
+                ch_set += u"#"
+        while end < len(suffix):
+            ch = suffix[end]
+            if (
+                u"0" <= ch <= u"9"
+                or u"A" <= ch <= u"Z"
+                or u"a" <= ch <= u"z"
+                or ch in ch_set
+                or (ch == u"!" and handle != u"!")
+            ):
+                end += 1
+            else:
+                if start < end:
+                    chunks.append(suffix[start:end])
+                start = end = end + 1
+                data = utf8(ch)
+                for ch in data:
+                    chunks.append(u"%%%02X" % ord(ch))
+        if start < end:
+            chunks.append(suffix[start:end])
+        suffix_text = "".join(chunks)
+        if handle:
+            return u"%s%s" % (handle, suffix_text)
+        else:
+            return u"!<%s>" % suffix_text
+
+    def prepare_anchor(self, anchor):
+        # type: (Any) -> Any
+        if not anchor:
+            raise EmitterError("anchor must not be empty")
+        for ch in anchor:
+            if not check_anchorname_char(ch):
+                raise EmitterError(
+                    "invalid character %r in the anchor: %r" % (utf8(ch), utf8(anchor))
+                )
+        return anchor
+
+    def analyze_scalar(self, scalar):
+        # type: (Any) -> Any
+        # Empty scalar is a special case.
+        if not scalar:
+            return ScalarAnalysis(
+                scalar=scalar,
+                empty=True,
+                multiline=False,
+                allow_flow_plain=False,
+                allow_block_plain=True,
+                allow_single_quoted=True,
+                allow_double_quoted=True,
+                allow_block=False,
+            )
+
+        # Indicators and special characters.
+        block_indicators = False
+        flow_indicators = False
+        line_breaks = False
+        special_characters = False
+
+        # Important whitespace combinations.
+        leading_space = False
+        leading_break = False
+        trailing_space = False
+        trailing_break = False
+        break_space = False
+        space_break = False
+
+        # Check document indicators.
+        if scalar.startswith(u"---") or scalar.startswith(u"..."):
+            block_indicators = True
+            flow_indicators = True
+
+        # First character or preceded by a whitespace.
+        preceeded_by_whitespace = True
+
+        # Last character or followed by a whitespace.
+        followed_by_whitespace = (
+            len(scalar) == 1 or scalar[1] in u"\0 \t\r\n\x85\u2028\u2029"
+        )
+
+        # The previous character is a space.
+        previous_space = False
+
+        # The previous character is a break.
+        previous_break = False
+
+        index = 0
+        while index < len(scalar):
+            ch = scalar[index]
+
+            # Check for indicators.
+            if index == 0:
+                # Leading indicators are special characters.
+                if ch in u"#,[]{}&*!|>'\"%@`":
+                    flow_indicators = True
+                    block_indicators = True
+                if ch in u"?:":  # ToDo
+                    if self.serializer.use_version == (1, 1):
+                        flow_indicators = True
+                    elif len(scalar) == 1:  # single character
+                        flow_indicators = True
+                    if followed_by_whitespace:
+                        block_indicators = True
+                if ch == u"-" and followed_by_whitespace:
+                    flow_indicators = True
+                    block_indicators = True
+            else:
+                # Some indicators cannot appear within a scalar as well.
+                if ch in u",[]{}":  # http://yaml.org/spec/1.2/spec.html#id2788859
+                    flow_indicators = True
+                if ch == u"?" and self.serializer.use_version == (1, 1):
+                    flow_indicators = True
+                if ch == u":":
+                    if followed_by_whitespace:
+                        flow_indicators = True
+                        block_indicators = True
+                if ch == u"#" and preceeded_by_whitespace:
+                    flow_indicators = True
+                    block_indicators = True
+
+            # Check for line breaks, special, and unicode characters.
+            if ch in u"\n\x85\u2028\u2029":
+                line_breaks = True
+            if not (ch == u"\n" or u"\x20" <= ch <= u"\x7E"):
+                if (
+                    ch == u"\x85"
+                    or u"\xA0" <= ch <= u"\uD7FF"
+                    or u"\uE000" <= ch <= u"\uFFFD"
+                    or (
+                        self.unicode_supplementary
+                        and (u"\U00010000" <= ch <= u"\U0010FFFF")
+                    )
+                ) and ch != u"\uFEFF":
+                    # unicode_characters = True
+                    if not self.allow_unicode:
+                        special_characters = True
+                else:
+                    special_characters = True
+
+            # Detect important whitespace combinations.
+            if ch == u" ":
+                if index == 0:
+                    leading_space = True
+                if index == len(scalar) - 1:
+                    trailing_space = True
+                if previous_break:
+                    break_space = True
+                previous_space = True
+                previous_break = False
+            elif ch in u"\n\x85\u2028\u2029":
+                if index == 0:
+                    leading_break = True
+                if index == len(scalar) - 1:
+                    trailing_break = True
+                if previous_space:
+                    space_break = True
+                previous_space = False
+                previous_break = True
+            else:
+                previous_space = False
+                previous_break = False
+
+            # Prepare for the next character.
+            index += 1
+            preceeded_by_whitespace = ch in u"\0 \t\r\n\x85\u2028\u2029"
+            followed_by_whitespace = (
+                index + 1 >= len(scalar)
+                or scalar[index + 1] in u"\0 \t\r\n\x85\u2028\u2029"
+            )
+
+        # Let's decide what styles are allowed.
+        allow_flow_plain = True
+        allow_block_plain = True
+        allow_single_quoted = True
+        allow_double_quoted = True
+        allow_block = True
+
+        # Leading and trailing whitespaces are bad for plain scalars.
+        if leading_space or leading_break or trailing_space or trailing_break:
+            allow_flow_plain = allow_block_plain = False
+
+        # We do not permit trailing spaces for block scalars.
+        if trailing_space:
+            allow_block = False
+
+        # Spaces at the beginning of a new line are only acceptable for block
+        # scalars.
+        if break_space:
+            allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+        # Spaces followed by breaks, as well as special character are only
+        # allowed for double quoted scalars.
+        if special_characters:
+            allow_flow_plain = (
+                allow_block_plain
+            ) = allow_single_quoted = allow_block = False
+        elif space_break:
+            allow_flow_plain = allow_block_plain = allow_single_quoted = False
+            if not self.allow_space_break:
+                allow_block = False
+
+        # Although the plain scalar writer supports breaks, we never emit
+        # multiline plain scalars.
+        if line_breaks:
+            allow_flow_plain = allow_block_plain = False
+
+        # Flow indicators are forbidden for flow plain scalars.
+        if flow_indicators:
+            allow_flow_plain = False
+
+        # Block indicators are forbidden for block plain scalars.
+        if block_indicators:
+            allow_block_plain = False
+
+        return ScalarAnalysis(
+            scalar=scalar,
+            empty=False,
+            multiline=line_breaks,
+            allow_flow_plain=allow_flow_plain,
+            allow_block_plain=allow_block_plain,
+            allow_single_quoted=allow_single_quoted,
+            allow_double_quoted=allow_double_quoted,
+            allow_block=allow_block,
+        )
+
+    # Writers.
+
+    def flush_stream(self):
+        # type: () -> None
+        if hasattr(self.stream, "flush"):
+            self.stream.flush()
+
+    def write_stream_start(self):
+        # type: () -> None
+        # Write BOM if needed.
+        if self.encoding and self.encoding.startswith("utf-16"):
+            self.stream.write(u"\uFEFF".encode(self.encoding))
+
+    def write_stream_end(self):
+        # type: () -> None
+        self.flush_stream()
+
+    def write_indicator(
+        self, indicator, need_whitespace, whitespace=False, indention=False
+    ):
+        # type: (Any, Any, bool, bool) -> None
+        if self.whitespace or not need_whitespace:
+            data = indicator
+        else:
+            data = u" " + indicator
+        self.whitespace = whitespace
+        self.indention = self.indention and indention
+        self.column += len(data)
+        self.open_ended = False
+        if bool(self.encoding):
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+
+    def write_indent(self):
+        # type: () -> None
+        indent = self.indent or 0
+        if (
+            not self.indention
+            or self.column > indent
+            or (self.column == indent and not self.whitespace)
+        ):
+            if bool(self.no_newline):
+                self.no_newline = False
+            else:
+                self.write_line_break()
+        if self.column < indent:
+            self.whitespace = True
+            data = u" " * (indent - self.column)
+            self.column = indent
+            if self.encoding:
+                data = data.encode(self.encoding)
+            self.stream.write(data)
+
+    def write_line_break(self, data=None):
+        # type: (Any) -> None
+        if data is None:
+            data = self.best_line_break
+        self.whitespace = True
+        self.indention = True
+        self.line += 1
+        self.column = 0
+        if bool(self.encoding):
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+
+    def write_version_directive(self, version_text):
+        # type: (Any) -> None
+        data = u"%%YAML %s" % version_text
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+        self.write_line_break()
+
+    def write_tag_directive(self, handle_text, prefix_text):
+        # type: (Any, Any) -> None
+        data = u"%%TAG %s %s" % (handle_text, prefix_text)
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+        self.write_line_break()
+
+    # Scalar streams.
+
+    def write_single_quoted(self, text, split=True):
+        # type: (Any, Any) -> None
+        if self.root_context:
+            if self.requested_indent is not None:
+                self.write_line_break()
+                if self.requested_indent != 0:
+                    self.write_indent()
+        self.write_indicator(u"'", True)
+        spaces = False
+        breaks = False
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if spaces:
+                if ch is None or ch != u" ":
+                    if (
+                        start + 1 == end
+                        and self.column > self.best_width
+                        and split
+                        and start != 0
+                        and end != len(text)
+                    ):
+                        self.write_indent()
+                    else:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if bool(self.encoding):
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                    start = end
+            elif breaks:
+                if ch is None or ch not in u"\n\x85\u2028\u2029":
+                    if text[start] == u"\n":
+                        self.write_line_break()
+                    for br in text[start:end]:
+                        if br == u"\n":
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    self.write_indent()
+                    start = end
+            else:
+                if ch is None or ch in u" \n\x85\u2028\u2029" or ch == u"'":
+                    if start < end:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if bool(self.encoding):
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                        start = end
+            if ch == u"'":
+                data = u"''"
+                self.column += 2
+                if bool(self.encoding):
+                    data = data.encode(self.encoding)
+                self.stream.write(data)
+                start = end + 1
+            if ch is not None:
+                spaces = ch == u" "
+                breaks = ch in u"\n\x85\u2028\u2029"
+            end += 1
+        self.write_indicator(u"'", False)
+
+    ESCAPE_REPLACEMENTS = {
+        u"\0": u"0",
+        u"\x07": u"a",
+        u"\x08": u"b",
+        u"\x09": u"t",
+        u"\x0A": u"n",
+        u"\x0B": u"v",
+        u"\x0C": u"f",
+        u"\x0D": u"r",
+        u"\x1B": u"e",
+        u'"': u'"',
+        u"\\": u"\\",
+        u"\x85": u"N",
+        u"\xA0": u"_",
+        u"\u2028": u"L",
+        u"\u2029": u"P",
+    }
+
+    def write_double_quoted(self, text, split=True):
+        # type: (Any, Any) -> None
+        if self.root_context:
+            if self.requested_indent is not None:
+                self.write_line_break()
+                if self.requested_indent != 0:
+                    self.write_indent()
+        self.write_indicator(u'"', True)
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if (
+                ch is None
+                or ch in u'"\\\x85\u2028\u2029\uFEFF'
+                or not (
+                    u"\x20" <= ch <= u"\x7E"
+                    or (
+                        self.allow_unicode
+                        and (u"\xA0" <= ch <= u"\uD7FF" or u"\uE000" <= ch <= u"\uFFFD")
+                    )
+                )
+            ):
+                if start < end:
+                    data = text[start:end]
+                    self.column += len(data)
+                    if bool(self.encoding):
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    start = end
+                if ch is not None:
+                    if ch in self.ESCAPE_REPLACEMENTS:
+                        data = u"\\" + self.ESCAPE_REPLACEMENTS[ch]
+                    elif ch <= u"\xFF":
+                        data = u"\\x%02X" % ord(ch)
+                    elif ch <= u"\uFFFF":
+                        data = u"\\u%04X" % ord(ch)
+                    else:
+                        data = u"\\U%08X" % ord(ch)
+                    self.column += len(data)
+                    if bool(self.encoding):
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    start = end + 1
+            if (
+                0 < end < len(text) - 1
+                and (ch == u" " or start >= end)
+                and self.column + (end - start) > self.best_width
+                and split
+            ):
+                data = text[start:end] + u"\\"
+                if start < end:
+                    start = end
+                self.column += len(data)
+                if bool(self.encoding):
+                    data = data.encode(self.encoding)
+                self.stream.write(data)
+                self.write_indent()
+                self.whitespace = False
+                self.indention = False
+                if text[start] == u" ":
+                    data = u"\\"
+                    self.column += len(data)
+                    if bool(self.encoding):
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+            end += 1
+        self.write_indicator(u'"', False)
+
+    def determine_block_hints(self, text):
+        # type: (Any) -> Any
+        indent = 0
+        indicator = u""
+        hints = u""
+        if text:
+            if text[0] in u" \n\x85\u2028\u2029":
+                indent = self.best_sequence_indent
+                hints += text_type(indent)
+            elif self.root_context:
+                for end in ["\n---", "\n..."]:
+                    pos = 0
+                    while True:
+                        pos = text.find(end, pos)
+                        if pos == -1:
+                            break
+                        try:
+                            if text[pos + 4] in " \r\n":
+                                break
+                        except IndexError:
+                            pass
+                        pos += 1
+                    if pos > -1:
+                        break
+                if pos > 0:
+                    indent = self.best_sequence_indent
+            if text[-1] not in u"\n\x85\u2028\u2029":
+                indicator = u"-"
+            elif len(text) == 1 or text[-2] in u"\n\x85\u2028\u2029":
+                indicator = u"+"
+        hints += indicator
+        return hints, indent, indicator
+
+    def write_folded(self, text):
+        # type: (Any) -> None
+        hints, _indent, _indicator = self.determine_block_hints(text)
+        self.write_indicator(u">" + hints, True)
+        if _indicator == u"+":
+            self.open_ended = True
+        self.write_line_break()
+        leading_space = True
+        spaces = False
+        breaks = True
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if breaks:
+                if ch is None or ch not in u"\n\x85\u2028\u2029\a":
+                    if (
+                        not leading_space
+                        and ch is not None
+                        and ch != u" "
+                        and text[start] == u"\n"
+                    ):
+                        self.write_line_break()
+                    leading_space = ch == u" "
+                    for br in text[start:end]:
+                        if br == u"\n":
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    if ch is not None:
+                        self.write_indent()
+                    start = end
+            elif spaces:
+                if ch != u" ":
+                    if start + 1 == end and self.column > self.best_width:
+                        self.write_indent()
+                    else:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if bool(self.encoding):
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                    start = end
+            else:
+                if ch is None or ch in u" \n\x85\u2028\u2029\a":
+                    data = text[start:end]
+                    self.column += len(data)
+                    if bool(self.encoding):
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    if ch == u"\a":
+                        if end < (len(text) - 1) and not text[end + 2].isspace():
+                            self.write_line_break()
+                            self.write_indent()
+                            end += 2  # \a and the space that is inserted on the fold
+                        else:
+                            raise EmitterError(
+                                "unexcpected fold indicator \\a before space"
+                            )
+                    if ch is None:
+                        self.write_line_break()
+                    start = end
+            if ch is not None:
+                breaks = ch in u"\n\x85\u2028\u2029"
+                spaces = ch == u" "
+            end += 1
+
+    def write_literal(self, text, comment=None):
+        # type: (Any, Any) -> None
+        hints, _indent, _indicator = self.determine_block_hints(text)
+        self.write_indicator(u"|" + hints, True)
+        try:
+            comment = comment[1][0]
+            if comment:
+                self.stream.write(comment)
+        except (TypeError, IndexError):
+            pass
+        if _indicator == u"+":
+            self.open_ended = True
+        self.write_line_break()
+        breaks = True
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if breaks:
+                if ch is None or ch not in u"\n\x85\u2028\u2029":
+                    for br in text[start:end]:
+                        if br == u"\n":
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    if ch is not None:
+                        if self.root_context:
+                            idnx = self.indent if self.indent is not None else 0
+                            self.stream.write(u" " * (_indent + idnx))
+                        else:
+                            self.write_indent()
+                    start = end
+            else:
+                if ch is None or ch in u"\n\x85\u2028\u2029":
+                    data = text[start:end]
+                    if bool(self.encoding):
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    if ch is None:
+                        self.write_line_break()
+                    start = end
+            if ch is not None:
+                breaks = ch in u"\n\x85\u2028\u2029"
+            end += 1
+
+    def write_plain(self, text, split=True):
+        # type: (Any, Any) -> None
+        if self.root_context:
+            if self.requested_indent is not None:
+                self.write_line_break()
+                if self.requested_indent != 0:
+                    self.write_indent()
+            else:
+                self.open_ended = True
+        if not text:
+            return
+        if not self.whitespace:
+            data = u" "
+            self.column += len(data)
+            if self.encoding:
+                data = data.encode(self.encoding)
+            self.stream.write(data)
+        self.whitespace = False
+        self.indention = False
+        spaces = False
+        breaks = False
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if spaces:
+                if ch != u" ":
+                    if start + 1 == end and self.column > self.best_width and split:
+                        self.write_indent()
+                        self.whitespace = False
+                        self.indention = False
+                    else:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if self.encoding:
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                    start = end
+            elif breaks:
+                if ch not in u"\n\x85\u2028\u2029":  # type: ignore
+                    if text[start] == u"\n":
+                        self.write_line_break()
+                    for br in text[start:end]:
+                        if br == u"\n":
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    self.write_indent()
+                    self.whitespace = False
+                    self.indention = False
+                    start = end
+            else:
+                if ch is None or ch in u" \n\x85\u2028\u2029":
+                    data = text[start:end]
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    try:
+                        self.stream.write(data)
+                    except:  # NOQA
+                        sys.stdout.write(repr(data) + "\n")
+                        raise
+                    start = end
+            if ch is not None:
+                spaces = ch == u" "
+                breaks = ch in u"\n\x85\u2028\u2029"
+            end += 1
+
+    def write_comment(self, comment, pre=False):
+        # type: (Any, bool) -> None
+        value = comment.value
+        # nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value))
+        if not pre and value[-1] == "\n":
+            value = value[:-1]
+        try:
+            # get original column position
+            col = comment.start_mark.column
+            if comment.value and comment.value.startswith("\n"):
+                # never inject extra spaces if the comment starts with a newline
+                # and not a real comment (e.g. if you have an empty line following a key-value
+                col = self.column
+            elif col < self.column + 1:
+                ValueError
+        except ValueError:
+            col = self.column + 1
+        # nprint('post_comment', self.line, self.column, value)
+        try:
+            # at least one space if the current column >= the start column of the comment
+            # but not at the start of a line
+            nr_spaces = col - self.column
+            if self.column and value.strip() and nr_spaces < 1 and value[0] != "\n":
+                nr_spaces = 1
+            value = " " * nr_spaces + value
+            try:
+                if bool(self.encoding):
+                    value = value.encode(self.encoding)
+            except UnicodeDecodeError:
+                pass
+            self.stream.write(value)
+        except TypeError:
+            raise
+        if not pre:
+            self.write_line_break()
+
+    def write_pre_comment(self, event):
+        # type: (Any) -> bool
+        comments = event.comment[1]
+        if comments is None:
+            return False
+        try:
+            start_events = (MappingStartEvent, SequenceStartEvent)
+            for comment in comments:
+                if isinstance(event, start_events) and getattr(
+                    comment, "pre_done", None
+                ):
+                    continue
+                if self.column != 0:
+                    self.write_line_break()
+                self.write_comment(comment, pre=True)
+                if isinstance(event, start_events):
+                    comment.pre_done = True
+        except TypeError:
+            sys.stdout.write("eventtt {} {}".format(type(event), event))
+            raise
+        return True
+
+    def write_post_comment(self, event):
+        # type: (Any) -> bool
+        if self.event.comment[0] is None:
+            return False
+        comment = event.comment[0]
+        self.write_comment(comment)
+        return True
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/error.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/error.py
new file mode 100644
index 00000000..01db4f40
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/error.py
@@ -0,0 +1,321 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+import warnings
+import textwrap
+
+from strictyaml.ruamel.compat import utf8
+
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List, Text  # NOQA
+
+
+__all__ = [
+    "FileMark",
+    "StringMark",
+    "CommentMark",
+    "YAMLError",
+    "MarkedYAMLError",
+    "ReusedAnchorWarning",
+    "UnsafeLoaderWarning",
+    "MarkedYAMLWarning",
+    "MarkedYAMLFutureWarning",
+]
+
+
+class StreamMark(object):
+    __slots__ = "name", "index", "line", "column"
+
+    def __init__(self, name, index, line, column):
+        # type: (Any, int, int, int) -> None
+        self.name = name
+        self.index = index
+        self.line = line
+        self.column = column
+
+    def __str__(self):
+        # type: () -> Any
+        where = '  in "%s", line %d, column %d' % (
+            self.name,
+            self.line + 1,
+            self.column + 1,
+        )
+        return where
+
+    def __eq__(self, other):
+        # type: (Any) -> bool
+        if self.line != other.line or self.column != other.column:
+            return False
+        if self.name != other.name or self.index != other.index:
+            return False
+        return True
+
+    def __ne__(self, other):
+        # type: (Any) -> bool
+        return not self.__eq__(other)
+
+
+class FileMark(StreamMark):
+    __slots__ = ()
+
+
+class StringMark(StreamMark):
+    __slots__ = "name", "index", "line", "column", "buffer", "pointer"
+
+    def __init__(self, name, index, line, column, buffer, pointer):
+        # type: (Any, int, int, int, Any, Any) -> None
+        StreamMark.__init__(self, name, index, line, column)
+        self.buffer = buffer
+        self.pointer = pointer
+
+    def get_snippet(self, indent=4, max_length=75):
+        # type: (int, int) -> Any
+        if self.buffer is None:  # always False
+            return None
+        head = ""
+        start = self.pointer
+        while start > 0 and self.buffer[start - 1] not in u"\0\r\n\x85\u2028\u2029":
+            start -= 1
+            if self.pointer - start > max_length / 2 - 1:
+                head = " ... "
+                start += 5
+                break
+        tail = ""
+        end = self.pointer
+        while (
+            end < len(self.buffer) and self.buffer[end] not in u"\0\r\n\x85\u2028\u2029"
+        ):
+            end += 1
+            if end - self.pointer > max_length / 2 - 1:
+                tail = " ... "
+                end -= 5
+                break
+        snippet = utf8(self.buffer[start:end])
+        caret = "^"
+        caret = "^ (line: {})".format(self.line + 1)
+        return (
+            " " * indent
+            + head
+            + snippet
+            + tail
+            + "\n"
+            + " " * (indent + self.pointer - start + len(head))
+            + caret
+        )
+
+    def __str__(self):
+        # type: () -> Any
+        snippet = self.get_snippet()
+        where = '  in "%s", line %d, column %d' % (
+            self.name,
+            self.line + 1,
+            self.column + 1,
+        )
+        if snippet is not None:
+            where += ":\n" + snippet
+        return where
+
+
+class CommentMark(object):
+    __slots__ = ("column",)
+
+    def __init__(self, column):
+        # type: (Any) -> None
+        self.column = column
+
+
+class YAMLError(Exception):
+    pass
+
+
+class MarkedYAMLError(YAMLError):
+    def __init__(
+        self,
+        context=None,
+        context_mark=None,
+        problem=None,
+        problem_mark=None,
+        note=None,
+        warn=None,
+    ):
+        # type: (Any, Any, Any, Any, Any, Any) -> None
+        self.context = context
+        self.context_mark = context_mark
+        self.problem = problem
+        self.problem_mark = problem_mark
+        self.note = note
+        # warn is ignored
+
+    def __str__(self):
+        # type: () -> Any
+        lines = []  # type: List[str]
+        if self.context is not None:
+            lines.append(self.context)
+        if self.context_mark is not None and (
+            self.problem is None
+            or self.problem_mark is None
+            or self.context_mark.name != self.problem_mark.name
+            or self.context_mark.line != self.problem_mark.line
+            or self.context_mark.column != self.problem_mark.column
+        ):
+            lines.append(str(self.context_mark))
+        if self.problem is not None:
+            lines.append(self.problem)
+        if self.problem_mark is not None:
+            lines.append(str(self.problem_mark))
+        if self.note is not None and self.note:
+            note = textwrap.dedent(self.note)
+            lines.append(note)
+        return "\n".join(lines)
+
+
+class YAMLStreamError(Exception):
+    pass
+
+
+class YAMLWarning(Warning):
+    pass
+
+
+class MarkedYAMLWarning(YAMLWarning):
+    def __init__(
+        self,
+        context=None,
+        context_mark=None,
+        problem=None,
+        problem_mark=None,
+        note=None,
+        warn=None,
+    ):
+        # type: (Any, Any, Any, Any, Any, Any) -> None
+        self.context = context
+        self.context_mark = context_mark
+        self.problem = problem
+        self.problem_mark = problem_mark
+        self.note = note
+        self.warn = warn
+
+    def __str__(self):
+        # type: () -> Any
+        lines = []  # type: List[str]
+        if self.context is not None:
+            lines.append(self.context)
+        if self.context_mark is not None and (
+            self.problem is None
+            or self.problem_mark is None
+            or self.context_mark.name != self.problem_mark.name
+            or self.context_mark.line != self.problem_mark.line
+            or self.context_mark.column != self.problem_mark.column
+        ):
+            lines.append(str(self.context_mark))
+        if self.problem is not None:
+            lines.append(self.problem)
+        if self.problem_mark is not None:
+            lines.append(str(self.problem_mark))
+        if self.note is not None and self.note:
+            note = textwrap.dedent(self.note)
+            lines.append(note)
+        if self.warn is not None and self.warn:
+            warn = textwrap.dedent(self.warn)
+            lines.append(warn)
+        return "\n".join(lines)
+
+
+class ReusedAnchorWarning(YAMLWarning):
+    pass
+
+
+class UnsafeLoaderWarning(YAMLWarning):
+    text = """
+The default 'Loader' for 'load(stream)' without further arguments can be unsafe.
+Use 'load(stream, Loader=strictyaml.ruamel.Loader)' explicitly if that is OK.
+Alternatively include the following in your code:
+
+  import warnings
+  warnings.simplefilter('ignore', strictyaml.ruamel.error.UnsafeLoaderWarning)
+
+In most other cases you should consider using 'safe_load(stream)'"""
+    pass
+
+
+warnings.simplefilter("once", UnsafeLoaderWarning)
+
+
+class MantissaNoDotYAML1_1Warning(YAMLWarning):
+    def __init__(self, node, flt_str):
+        # type: (Any, Any) -> None
+        self.node = node
+        self.flt = flt_str
+
+    def __str__(self):
+        # type: () -> Any
+        line = self.node.start_mark.line
+        col = self.node.start_mark.column
+        return """
+In YAML 1.1 floating point values should have a dot ('.') in their mantissa.
+See the Floating-Point Language-Independent Type for YAMLâ„¢ Version 1.1 specification
+( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2
+
+Correct your float: "{}" on line: {}, column: {}
+
+or alternatively include the following in your code:
+
+  import warnings
+  warnings.simplefilter('ignore', strictyaml.ruamel.error.MantissaNoDotYAML1_1Warning)
+
+""".format(
+            self.flt, line, col
+        )
+
+
+warnings.simplefilter("once", MantissaNoDotYAML1_1Warning)
+
+
+class YAMLFutureWarning(Warning):
+    pass
+
+
+class MarkedYAMLFutureWarning(YAMLFutureWarning):
+    def __init__(
+        self,
+        context=None,
+        context_mark=None,
+        problem=None,
+        problem_mark=None,
+        note=None,
+        warn=None,
+    ):
+        # type: (Any, Any, Any, Any, Any, Any) -> None
+        self.context = context
+        self.context_mark = context_mark
+        self.problem = problem
+        self.problem_mark = problem_mark
+        self.note = note
+        self.warn = warn
+
+    def __str__(self):
+        # type: () -> Any
+        lines = []  # type: List[str]
+        if self.context is not None:
+            lines.append(self.context)
+
+        if self.context_mark is not None and (
+            self.problem is None
+            or self.problem_mark is None
+            or self.context_mark.name != self.problem_mark.name
+            or self.context_mark.line != self.problem_mark.line
+            or self.context_mark.column != self.problem_mark.column
+        ):
+            lines.append(str(self.context_mark))
+        if self.problem is not None:
+            lines.append(self.problem)
+        if self.problem_mark is not None:
+            lines.append(str(self.problem_mark))
+        if self.note is not None and self.note:
+            note = textwrap.dedent(self.note)
+            lines.append(note)
+        if self.warn is not None and self.warn:
+            warn = textwrap.dedent(self.warn)
+            lines.append(warn)
+        return "\n".join(lines)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/events.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/events.py
new file mode 100644
index 00000000..24fd370e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/events.py
@@ -0,0 +1,159 @@
+# coding: utf-8
+
+# Abstract classes.
+
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List  # NOQA
+
+
+def CommentCheck():
+    # type: () -> None
+    pass
+
+
+class Event(object):
+    __slots__ = "start_mark", "end_mark", "comment"
+
+    def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
+        # type: (Any, Any, Any) -> None
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        # assert comment is not CommentCheck
+        if comment is CommentCheck:
+            comment = None
+        self.comment = comment
+
+    def __repr__(self):
+        # type: () -> Any
+        attributes = [
+            key
+            for key in ["anchor", "tag", "implicit", "value", "flow_style", "style"]
+            if hasattr(self, key)
+        ]
+        arguments = ", ".join(
+            ["%s=%r" % (key, getattr(self, key)) for key in attributes]
+        )
+        if self.comment not in [None, CommentCheck]:
+            arguments += ", comment={!r}".format(self.comment)
+        return "%s(%s)" % (self.__class__.__name__, arguments)
+
+
+class NodeEvent(Event):
+    __slots__ = ("anchor",)
+
+    def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
+        # type: (Any, Any, Any, Any) -> None
+        Event.__init__(self, start_mark, end_mark, comment)
+        self.anchor = anchor
+
+
+class CollectionStartEvent(NodeEvent):
+    __slots__ = "tag", "implicit", "flow_style", "nr_items"
+
+    def __init__(
+        self,
+        anchor,
+        tag,
+        implicit,
+        start_mark=None,
+        end_mark=None,
+        flow_style=None,
+        comment=None,
+        nr_items=None,
+    ):
+        # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None
+        NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+        self.tag = tag
+        self.implicit = implicit
+        self.flow_style = flow_style
+        self.nr_items = nr_items
+
+
+class CollectionEndEvent(Event):
+    __slots__ = ()
+
+
+# Implementations.
+
+
+class StreamStartEvent(Event):
+    __slots__ = ("encoding",)
+
+    def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None):
+        # type: (Any, Any, Any, Any) -> None
+        Event.__init__(self, start_mark, end_mark, comment)
+        self.encoding = encoding
+
+
+class StreamEndEvent(Event):
+    __slots__ = ()
+
+
+class DocumentStartEvent(Event):
+    __slots__ = "explicit", "version", "tags"
+
+    def __init__(
+        self,
+        start_mark=None,
+        end_mark=None,
+        explicit=None,
+        version=None,
+        tags=None,
+        comment=None,
+    ):
+        # type: (Any, Any, Any, Any, Any, Any) -> None
+        Event.__init__(self, start_mark, end_mark, comment)
+        self.explicit = explicit
+        self.version = version
+        self.tags = tags
+
+
+class DocumentEndEvent(Event):
+    __slots__ = ("explicit",)
+
+    def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None):
+        # type: (Any, Any, Any, Any) -> None
+        Event.__init__(self, start_mark, end_mark, comment)
+        self.explicit = explicit
+
+
+class AliasEvent(NodeEvent):
+    __slots__ = ()
+
+
+class ScalarEvent(NodeEvent):
+    __slots__ = "tag", "implicit", "value", "style"
+
+    def __init__(
+        self,
+        anchor,
+        tag,
+        implicit,
+        value,
+        start_mark=None,
+        end_mark=None,
+        style=None,
+        comment=None,
+    ):
+        # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None
+        NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+        self.tag = tag
+        self.implicit = implicit
+        self.value = value
+        self.style = style
+
+
+class SequenceStartEvent(CollectionStartEvent):
+    __slots__ = ()
+
+
+class SequenceEndEvent(CollectionEndEvent):
+    __slots__ = ()
+
+
+class MappingStartEvent(CollectionStartEvent):
+    __slots__ = ()
+
+
+class MappingEndEvent(CollectionEndEvent):
+    __slots__ = ()
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/loader.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/loader.py
new file mode 100644
index 00000000..fcf13ea8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/loader.py
@@ -0,0 +1,76 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+
+from strictyaml.ruamel.reader import Reader
+from strictyaml.ruamel.scanner import Scanner, RoundTripScanner
+from strictyaml.ruamel.parser import Parser, RoundTripParser
+from strictyaml.ruamel.composer import Composer
+from strictyaml.ruamel.constructor import (
+    BaseConstructor,
+    SafeConstructor,
+    Constructor,
+    RoundTripConstructor,
+)
+from strictyaml.ruamel.resolver import VersionedResolver
+
+if False:  # MYPY
+    from typing import Any, Dict, List, Union, Optional  # NOQA
+    from strictyaml.ruamel.compat import StreamTextType, VersionType  # NOQA
+
+__all__ = ["BaseLoader", "SafeLoader", "Loader", "RoundTripLoader"]
+
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+        Reader.__init__(self, stream, loader=self)
+        Scanner.__init__(self, loader=self)
+        Parser.__init__(self, loader=self)
+        Composer.__init__(self, loader=self)
+        BaseConstructor.__init__(self, loader=self)
+        VersionedResolver.__init__(self, version, loader=self)
+
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver):
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+        Reader.__init__(self, stream, loader=self)
+        Scanner.__init__(self, loader=self)
+        Parser.__init__(self, loader=self)
+        Composer.__init__(self, loader=self)
+        SafeConstructor.__init__(self, loader=self)
+        VersionedResolver.__init__(self, version, loader=self)
+
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+        Reader.__init__(self, stream, loader=self)
+        Scanner.__init__(self, loader=self)
+        Parser.__init__(self, loader=self)
+        Composer.__init__(self, loader=self)
+        Constructor.__init__(self, loader=self)
+        VersionedResolver.__init__(self, version, loader=self)
+
+
+class RoundTripLoader(
+    Reader,
+    RoundTripScanner,
+    RoundTripParser,
+    Composer,
+    RoundTripConstructor,
+    VersionedResolver,
+):
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+        # self.reader = Reader.__init__(self, stream)
+        Reader.__init__(self, stream, loader=self)
+        RoundTripScanner.__init__(self, loader=self)
+        RoundTripParser.__init__(self, loader=self)
+        Composer.__init__(self, loader=self)
+        RoundTripConstructor.__init__(
+            self, preserve_quotes=preserve_quotes, loader=self
+        )
+        VersionedResolver.__init__(self, version, loader=self)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/main.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/main.py
new file mode 100644
index 00000000..3cbf9351
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/main.py
@@ -0,0 +1,1581 @@
+# coding: utf-8
+
+from __future__ import absolute_import, unicode_literals, print_function
+
+import sys
+import os
+import warnings
+import glob
+from importlib import import_module
+
+
+import strictyaml.ruamel
+from strictyaml.ruamel.error import UnsafeLoaderWarning, YAMLError  # NOQA
+
+from strictyaml.ruamel.tokens import *  # NOQA
+from strictyaml.ruamel.events import *  # NOQA
+from strictyaml.ruamel.nodes import *  # NOQA
+
+from strictyaml.ruamel.loader import (
+    BaseLoader,
+    SafeLoader,
+    Loader,
+    RoundTripLoader,
+)  # NOQA
+from strictyaml.ruamel.dumper import (
+    BaseDumper,
+    SafeDumper,
+    Dumper,
+    RoundTripDumper,
+)  # NOQA
+from strictyaml.ruamel.compat import StringIO, BytesIO, with_metaclass, PY3, nprint
+from strictyaml.ruamel.resolver import VersionedResolver, Resolver  # NOQA
+from strictyaml.ruamel.representer import (
+    BaseRepresenter,
+    SafeRepresenter,
+    Representer,
+    RoundTripRepresenter,
+)
+from strictyaml.ruamel.constructor import (
+    BaseConstructor,
+    SafeConstructor,
+    Constructor,
+    RoundTripConstructor,
+)
+from strictyaml.ruamel.loader import Loader as UnsafeLoader
+
+if False:  # MYPY
+    from typing import List, Set, Dict, Union, Any, Callable, Optional, Text  # NOQA
+    from strictyaml.ruamel.compat import StreamType, StreamTextType, VersionType  # NOQA
+
+    if PY3:
+        from pathlib import Path
+    else:
+        Path = Any
+
+try:
+    from _ruamel_yaml import CParser, CEmitter  # type: ignore
+except:  # NOQA
+    CParser = CEmitter = None
+
+# import io
+
+enforce = object()
+
+
+# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a
+# subset of abbreviations, which should be all caps according to PEP8
+
+
+class YAML(object):
+    def __init__(
+        self,
+        _kw=enforce,
+        typ=None,
+        pure=False,
+        output=None,
+        plug_ins=None,  # input=None,
+    ):
+        # type: (Any, Optional[Text], Any, Any, Any) -> None
+        """
+        _kw: not used, forces keyword arguments in 2.7 (in 3 you can do (*, safe_load=..)
+        typ: 'rt'/None -> RoundTripLoader/RoundTripDumper,  (default)
+             'safe'    -> SafeLoader/SafeDumper,
+             'unsafe'  -> normal/unsafe Loader/Dumper
+             'base'    -> baseloader
+        pure: if True only use Python modules
+        input/output: needed to work as context manager
+        plug_ins: a list of plug-in files
+        """
+        if _kw is not enforce:
+            raise TypeError(
+                "{}.__init__() takes no positional argument but at least "
+                "one was given ({!r})".format(self.__class__.__name__, _kw)
+            )
+
+        self.typ = ["rt"] if typ is None else (typ if isinstance(typ, list) else [typ])
+        self.pure = pure
+
+        # self._input = input
+        self._output = output
+        self._context_manager = None  # type: Any
+
+        self.plug_ins = []  # type: List[Any]
+        for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins():
+            file_name = pu.replace(os.sep, ".")
+            self.plug_ins.append(import_module(file_name))
+        self.Resolver = strictyaml.ruamel.resolver.VersionedResolver  # type: Any
+        self.allow_unicode = True
+        self.Reader = None  # type: Any
+        self.Representer = None  # type: Any
+        self.Constructor = None  # type: Any
+        self.Scanner = None  # type: Any
+        self.Serializer = None  # type: Any
+        self.default_flow_style = None  # type: Any
+        typ_found = 1
+        setup_rt = False
+        if "rt" in self.typ:
+            setup_rt = True
+        elif "safe" in self.typ:
+            self.Emitter = (
+                strictyaml.ruamel.emitter.Emitter
+                if pure or CEmitter is None
+                else CEmitter
+            )
+            self.Representer = strictyaml.ruamel.representer.SafeRepresenter
+            self.Parser = (
+                strictyaml.ruamel.parser.Parser if pure or CParser is None else CParser
+            )
+            self.Composer = strictyaml.ruamel.composer.Composer
+            self.Constructor = strictyaml.ruamel.constructor.SafeConstructor
+        elif "base" in self.typ:
+            self.Emitter = strictyaml.ruamel.emitter.Emitter
+            self.Representer = strictyaml.ruamel.representer.BaseRepresenter
+            self.Parser = (
+                strictyaml.ruamel.parser.Parser if pure or CParser is None else CParser
+            )
+            self.Composer = strictyaml.ruamel.composer.Composer
+            self.Constructor = strictyaml.ruamel.constructor.BaseConstructor
+        elif "unsafe" in self.typ:
+            self.Emitter = (
+                strictyaml.ruamel.emitter.Emitter
+                if pure or CEmitter is None
+                else CEmitter
+            )
+            self.Representer = strictyaml.ruamel.representer.Representer
+            self.Parser = (
+                strictyaml.ruamel.parser.Parser if pure or CParser is None else CParser
+            )
+            self.Composer = strictyaml.ruamel.composer.Composer
+            self.Constructor = strictyaml.ruamel.constructor.Constructor
+        else:
+            setup_rt = True
+            typ_found = 0
+        if setup_rt:
+            self.default_flow_style = False
+            # no optimized rt-dumper yet
+            self.Emitter = strictyaml.ruamel.emitter.Emitter
+            self.Serializer = strictyaml.ruamel.serializer.Serializer
+            self.Representer = strictyaml.ruamel.representer.RoundTripRepresenter
+            self.Scanner = strictyaml.ruamel.scanner.RoundTripScanner
+            # no optimized rt-parser yet
+            self.Parser = strictyaml.ruamel.parser.RoundTripParser
+            self.Composer = strictyaml.ruamel.composer.Composer
+            self.Constructor = strictyaml.ruamel.constructor.RoundTripConstructor
+        del setup_rt
+        self.stream = None
+        self.canonical = None
+        self.old_indent = None
+        self.width = None
+        self.line_break = None
+
+        self.map_indent = None
+        self.sequence_indent = None
+        self.sequence_dash_offset = 0
+        self.compact_seq_seq = None
+        self.compact_seq_map = None
+        self.sort_base_mapping_type_on_output = None  # default: sort
+
+        self.top_level_colon_align = None
+        self.prefix_colon = None
+        self.version = None
+        self.preserve_quotes = None
+        self.allow_duplicate_keys = False  # duplicate keys in map, set
+        self.encoding = "utf-8"
+        self.explicit_start = None
+        self.explicit_end = None
+        self.tags = None
+        self.default_style = None
+        self.top_level_block_style_scalar_no_indent_error_1_1 = False
+        # directives end indicator with single scalar document
+        self.scalar_after_indicator = None
+        # [a, b: 1, c: {d: 2}]  vs. [a, {b: 1}, {c: {d: 2}}]
+        self.brace_single_entry_mapping_in_flow_sequence = False
+        for module in self.plug_ins:
+            if getattr(module, "typ", None) in self.typ:
+                typ_found += 1
+                module.init_typ(self)
+                break
+        if typ_found == 0:
+            raise NotImplementedError(
+                'typ "{}"not recognised (need to install plug-in?)'.format(self.typ)
+            )
+
+    @property
+    def reader(self):
+        # type: () -> Any
+        try:
+            return self._reader  # type: ignore
+        except AttributeError:
+            self._reader = self.Reader(None, loader=self)
+            return self._reader
+
+    @property
+    def scanner(self):
+        # type: () -> Any
+        try:
+            return self._scanner  # type: ignore
+        except AttributeError:
+            self._scanner = self.Scanner(loader=self)
+            return self._scanner
+
+    @property
+    def parser(self):
+        # type: () -> Any
+        attr = "_" + sys._getframe().f_code.co_name
+        if not hasattr(self, attr):
+            if self.Parser is not CParser:
+                setattr(self, attr, self.Parser(loader=self))
+            else:
+                if getattr(self, "_stream", None) is None:
+                    # wait for the stream
+                    return None
+                else:
+                    # if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'):
+                    #     # pathlib.Path() instance
+                    #     setattr(self, attr, CParser(self._stream))
+                    # else:
+                    setattr(self, attr, CParser(self._stream))
+                    # self._parser = self._composer = self
+                    # nprint('scanner', self.loader.scanner)
+
+        return getattr(self, attr)
+
+    @property
+    def composer(self):
+        # type: () -> Any
+        attr = "_" + sys._getframe().f_code.co_name
+        if not hasattr(self, attr):
+            setattr(self, attr, self.Composer(loader=self))
+        return getattr(self, attr)
+
+    @property
+    def constructor(self):
+        # type: () -> Any
+        attr = "_" + sys._getframe().f_code.co_name
+        if not hasattr(self, attr):
+            cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self)
+            cnst.allow_duplicate_keys = self.allow_duplicate_keys
+            setattr(self, attr, cnst)
+        return getattr(self, attr)
+
+    @property
+    def resolver(self):
+        # type: () -> Any
+        attr = "_" + sys._getframe().f_code.co_name
+        if not hasattr(self, attr):
+            setattr(self, attr, self.Resolver(version=self.version, loader=self))
+        return getattr(self, attr)
+
+    @property
+    def emitter(self):
+        # type: () -> Any
+        attr = "_" + sys._getframe().f_code.co_name
+        if not hasattr(self, attr):
+            if self.Emitter is not CEmitter:
+                _emitter = self.Emitter(
+                    None,
+                    canonical=self.canonical,
+                    indent=self.old_indent,
+                    width=self.width,
+                    allow_unicode=self.allow_unicode,
+                    line_break=self.line_break,
+                    prefix_colon=self.prefix_colon,
+                    brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence,  # NOQA
+                    dumper=self,
+                )
+                setattr(self, attr, _emitter)
+                if self.map_indent is not None:
+                    _emitter.best_map_indent = self.map_indent
+                if self.sequence_indent is not None:
+                    _emitter.best_sequence_indent = self.sequence_indent
+                if self.sequence_dash_offset is not None:
+                    _emitter.sequence_dash_offset = self.sequence_dash_offset
+                    # _emitter.block_seq_indent = self.sequence_dash_offset
+                if self.compact_seq_seq is not None:
+                    _emitter.compact_seq_seq = self.compact_seq_seq
+                if self.compact_seq_map is not None:
+                    _emitter.compact_seq_map = self.compact_seq_map
+            else:
+                if getattr(self, "_stream", None) is None:
+                    # wait for the stream
+                    return None
+                return None
+        return getattr(self, attr)
+
+    @property
+    def serializer(self):
+        # type: () -> Any
+        attr = "_" + sys._getframe().f_code.co_name
+        if not hasattr(self, attr):
+            setattr(
+                self,
+                attr,
+                self.Serializer(
+                    encoding=self.encoding,
+                    explicit_start=self.explicit_start,
+                    explicit_end=self.explicit_end,
+                    version=self.version,
+                    tags=self.tags,
+                    dumper=self,
+                ),
+            )
+        return getattr(self, attr)
+
+    @property
+    def representer(self):
+        # type: () -> Any
+        attr = "_" + sys._getframe().f_code.co_name
+        if not hasattr(self, attr):
+            repres = self.Representer(
+                default_style=self.default_style,
+                default_flow_style=self.default_flow_style,
+                dumper=self,
+            )
+            if self.sort_base_mapping_type_on_output is not None:
+                repres.sort_base_mapping_type_on_output = (
+                    self.sort_base_mapping_type_on_output
+                )
+            setattr(self, attr, repres)
+        return getattr(self, attr)
+
+    # separate output resolver?
+
+    # def load(self, stream=None):
+    #     if self._context_manager:
+    #        if not self._input:
+    #             raise TypeError("Missing input stream while dumping from context manager")
+    #         for data in self._context_manager.load():
+    #             yield data
+    #         return
+    #     if stream is None:
+    #         raise TypeError("Need a stream argument when not loading from context manager")
+    #     return self.load_one(stream)
+
+    def load(self, stream):
+        # type: (Union[Path, StreamTextType]) -> Any
+        """
+        at this point you either have the non-pure Parser (which has its own reader and
+        scanner) or you have the pure Parser.
+        If the pure Parser is set, then set the Reader and Scanner, if not already set.
+        If either the Scanner or Reader are set, you cannot use the non-pure Parser,
+            so reset it to the pure parser and set the Reader resp. Scanner if necessary
+        """
+        if not hasattr(stream, "read") and hasattr(stream, "open"):
+            # pathlib.Path() instance
+            with stream.open("rb") as fp:
+                return self.load(fp)
+        constructor, parser = self.get_constructor_parser(stream)
+        try:
+            return constructor.get_single_data()
+        finally:
+            parser.dispose()
+            try:
+                self._reader.reset_reader()
+            except AttributeError:
+                pass
+            try:
+                self._scanner.reset_scanner()
+            except AttributeError:
+                pass
+
+    def load_all(self, stream, _kw=enforce):  # , skip=None):
+        # type: (Union[Path, StreamTextType], Any) -> Any
+        if _kw is not enforce:
+            raise TypeError(
+                "{}.__init__() takes no positional argument but at least "
+                "one was given ({!r})".format(self.__class__.__name__, _kw)
+            )
+        if not hasattr(stream, "read") and hasattr(stream, "open"):
+            # pathlib.Path() instance
+            with stream.open("r") as fp:
+                for d in self.load_all(fp, _kw=enforce):
+                    yield d
+                return
+        # if skip is None:
+        #     skip = []
+        # elif isinstance(skip, int):
+        #     skip = [skip]
+        constructor, parser = self.get_constructor_parser(stream)
+        try:
+            while constructor.check_data():
+                yield constructor.get_data()
+        finally:
+            parser.dispose()
+            try:
+                self._reader.reset_reader()
+            except AttributeError:
+                pass
+            try:
+                self._scanner.reset_scanner()
+            except AttributeError:
+                pass
+
+    def get_constructor_parser(self, stream):
+        # type: (StreamTextType) -> Any
+        """
+        the old cyaml needs special setup, and therefore the stream
+        """
+        if self.Parser is not CParser:
+            if self.Reader is None:
+                self.Reader = strictyaml.ruamel.reader.Reader
+            if self.Scanner is None:
+                self.Scanner = strictyaml.ruamel.scanner.Scanner
+            self.reader.stream = stream
+        else:
+            if self.Reader is not None:
+                if self.Scanner is None:
+                    self.Scanner = strictyaml.ruamel.scanner.Scanner
+                self.Parser = strictyaml.ruamel.parser.Parser
+                self.reader.stream = stream
+            elif self.Scanner is not None:
+                if self.Reader is None:
+                    self.Reader = strictyaml.ruamel.reader.Reader
+                self.Parser = strictyaml.ruamel.parser.Parser
+                self.reader.stream = stream
+            else:
+                # combined C level reader>scanner>parser
+                # does some calls to the resolver, e.g. BaseResolver.descend_resolver
+                # if you just initialise the CParser, to much of resolver.py
+                # is actually used
+                rslvr = self.Resolver
+                # if rslvr is strictyaml.ruamel.resolver.VersionedResolver:
+                #     rslvr = strictyaml.ruamel.resolver.Resolver
+
+                class XLoader(self.Parser, self.Constructor, rslvr):  # type: ignore
+                    def __init__(
+                        selfx, stream, version=self.version, preserve_quotes=None
+                    ):
+                        # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None  # NOQA
+                        CParser.__init__(selfx, stream)
+                        selfx._parser = selfx._composer = selfx
+                        self.Constructor.__init__(selfx, loader=selfx)
+                        selfx.allow_duplicate_keys = self.allow_duplicate_keys
+                        rslvr.__init__(selfx, version=version, loadumper=selfx)
+
+                self._stream = stream
+                loader = XLoader(stream)
+                return loader, loader
+        return self.constructor, self.parser
+
+    def dump(self, data, stream=None, _kw=enforce, transform=None):
+        # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+        if self._context_manager:
+            if not self._output:
+                raise TypeError(
+                    "Missing output stream while dumping from context manager"
+                )
+            if _kw is not enforce:
+                raise TypeError(
+                    "{}.dump() takes one positional argument but at least "
+                    "two were given ({!r})".format(self.__class__.__name__, _kw)
+                )
+            if transform is not None:
+                raise TypeError(
+                    "{}.dump() in the context manager cannot have transform keyword "
+                    "".format(self.__class__.__name__)
+                )
+            self._context_manager.dump(data)
+        else:  # old style
+            if stream is None:
+                raise TypeError(
+                    "Need a stream argument when not dumping from context manager"
+                )
+            return self.dump_all([data], stream, _kw, transform=transform)
+
+    def dump_all(self, documents, stream, _kw=enforce, transform=None):
+        # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+        if self._context_manager:
+            raise NotImplementedError
+        if _kw is not enforce:
+            raise TypeError(
+                "{}.dump(_all) takes two positional argument but at least "
+                "three were given ({!r})".format(self.__class__.__name__, _kw)
+            )
+        self._output = stream
+        self._context_manager = YAMLContextManager(self, transform=transform)
+        for data in documents:
+            self._context_manager.dump(data)
+        self._context_manager.teardown_output()
+        self._output = None
+        self._context_manager = None
+
+    def Xdump_all(self, documents, stream, _kw=enforce, transform=None):
+        # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+        """
+        Serialize a sequence of Python objects into a YAML stream.
+        """
+        if not hasattr(stream, "write") and hasattr(stream, "open"):
+            # pathlib.Path() instance
+            with stream.open("w") as fp:
+                return self.dump_all(documents, fp, _kw, transform=transform)
+        if _kw is not enforce:
+            raise TypeError(
+                "{}.dump(_all) takes two positional argument but at least "
+                "three were given ({!r})".format(self.__class__.__name__, _kw)
+            )
+        # The stream should have the methods `write` and possibly `flush`.
+        if self.top_level_colon_align is True:
+            tlca = max([len(str(x)) for x in documents[0]])  # type: Any
+        else:
+            tlca = self.top_level_colon_align
+        if transform is not None:
+            fstream = stream
+            if self.encoding is None:
+                stream = StringIO()
+            else:
+                stream = BytesIO()
+        serializer, representer, emitter = self.get_serializer_representer_emitter(
+            stream, tlca
+        )
+        try:
+            self.serializer.open()
+            for data in documents:
+                try:
+                    self.representer.represent(data)
+                except AttributeError:
+                    # nprint(dir(dumper._representer))
+                    raise
+            self.serializer.close()
+        finally:
+            try:
+                self.emitter.dispose()
+            except AttributeError:
+                raise
+                # self.dumper.dispose()  # cyaml
+            delattr(self, "_serializer")
+            delattr(self, "_emitter")
+        if transform:
+            val = stream.getvalue()
+            if self.encoding:
+                val = val.decode(self.encoding)
+            if fstream is None:
+                transform(val)
+            else:
+                fstream.write(transform(val))
+        return None
+
+    def get_serializer_representer_emitter(self, stream, tlca):
+        # type: (StreamType, Any) -> Any
+        # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler
+        if self.Emitter is not CEmitter:
+            if self.Serializer is None:
+                self.Serializer = strictyaml.ruamel.serializer.Serializer
+            self.emitter.stream = stream
+            self.emitter.top_level_colon_align = tlca
+            if self.scalar_after_indicator is not None:
+                self.emitter.scalar_after_indicator = self.scalar_after_indicator
+            return self.serializer, self.representer, self.emitter
+        if self.Serializer is not None:
+            # cannot set serializer with CEmitter
+            self.Emitter = strictyaml.ruamel.emitter.Emitter
+            self.emitter.stream = stream
+            self.emitter.top_level_colon_align = tlca
+            if self.scalar_after_indicator is not None:
+                self.emitter.scalar_after_indicator = self.scalar_after_indicator
+            return self.serializer, self.representer, self.emitter
+        # C routines
+
+        rslvr = (
+            strictyaml.ruamel.resolver.BaseResolver
+            if "base" in self.typ
+            else strictyaml.ruamel.resolver.Resolver
+        )
+
+        class XDumper(CEmitter, self.Representer, rslvr):  # type: ignore
+            def __init__(
+                selfx,
+                stream,
+                default_style=None,
+                default_flow_style=None,
+                canonical=None,
+                indent=None,
+                width=None,
+                allow_unicode=None,
+                line_break=None,
+                encoding=None,
+                explicit_start=None,
+                explicit_end=None,
+                version=None,
+                tags=None,
+                block_seq_indent=None,
+                top_level_colon_align=None,
+                prefix_colon=None,
+            ):
+                # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None   # NOQA
+                CEmitter.__init__(
+                    selfx,
+                    stream,
+                    canonical=canonical,
+                    indent=indent,
+                    width=width,
+                    encoding=encoding,
+                    allow_unicode=allow_unicode,
+                    line_break=line_break,
+                    explicit_start=explicit_start,
+                    explicit_end=explicit_end,
+                    version=version,
+                    tags=tags,
+                )
+                selfx._emitter = selfx._serializer = selfx._representer = selfx
+                self.Representer.__init__(
+                    selfx,
+                    default_style=default_style,
+                    default_flow_style=default_flow_style,
+                )
+                rslvr.__init__(selfx)
+
+        self._stream = stream
+        dumper = XDumper(
+            stream,
+            default_style=self.default_style,
+            default_flow_style=self.default_flow_style,
+            canonical=self.canonical,
+            indent=self.old_indent,
+            width=self.width,
+            allow_unicode=self.allow_unicode,
+            line_break=self.line_break,
+            explicit_start=self.explicit_start,
+            explicit_end=self.explicit_end,
+            version=self.version,
+            tags=self.tags,
+        )
+        self._emitter = self._serializer = dumper
+        return dumper, dumper, dumper
+
+    # basic types
+    def map(self, **kw):
+        # type: (Any) -> Any
+        if "rt" in self.typ:
+            from strictyaml.ruamel.comments import CommentedMap
+
+            return CommentedMap(**kw)
+        else:
+            return dict(**kw)
+
+    def seq(self, *args):
+        # type: (Any) -> Any
+        if "rt" in self.typ:
+            from strictyaml.ruamel.comments import CommentedSeq
+
+            return CommentedSeq(*args)
+        else:
+            return list(*args)
+
+    # helpers
+    def official_plug_ins(self):
+        # type: () -> Any
+        bd = os.path.dirname(__file__)
+        gpbd = os.path.dirname(os.path.dirname(bd))
+        res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + "/*/__plug_in__.py")]
+        return res
+
+    def register_class(self, cls):
+        # type:(Any) -> Any
+        """
+        register a class for dumping loading
+        - if it has attribute yaml_tag use that to register, else use class name
+        - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
+          as mapping
+        """
+        tag = getattr(cls, "yaml_tag", "!" + cls.__name__)
+        try:
+            self.representer.add_representer(cls, cls.to_yaml)
+        except AttributeError:
+
+            def t_y(representer, data):
+                # type: (Any, Any) -> Any
+                return representer.represent_yaml_object(
+                    tag, data, cls, flow_style=representer.default_flow_style
+                )
+
+            self.representer.add_representer(cls, t_y)
+        try:
+            self.constructor.add_constructor(tag, cls.from_yaml)
+        except AttributeError:
+
+            def f_y(constructor, node):
+                # type: (Any, Any) -> Any
+                return constructor.construct_yaml_object(node, cls)
+
+            self.constructor.add_constructor(tag, f_y)
+        return cls
+
+    def parse(self, stream):
+        # type: (StreamTextType) -> Any
+        """
+        Parse a YAML stream and produce parsing events.
+        """
+        _, parser = self.get_constructor_parser(stream)
+        try:
+            while parser.check_event():
+                yield parser.get_event()
+        finally:
+            parser.dispose()
+            try:
+                self._reader.reset_reader()
+            except AttributeError:
+                pass
+            try:
+                self._scanner.reset_scanner()
+            except AttributeError:
+                pass
+
+    # ### context manager
+
+    def __enter__(self):
+        # type: () -> Any
+        self._context_manager = YAMLContextManager(self)
+        return self
+
+    def __exit__(self, typ, value, traceback):
+        # type: (Any, Any, Any) -> None
+        if typ:
+            nprint("typ", typ)
+        self._context_manager.teardown_output()
+        # self._context_manager.teardown_input()
+        self._context_manager = None
+
+    # ### backwards compatibility
+    def _indent(self, mapping=None, sequence=None, offset=None):
+        # type: (Any, Any, Any) -> None
+        if mapping is not None:
+            self.map_indent = mapping
+        if sequence is not None:
+            self.sequence_indent = sequence
+        if offset is not None:
+            self.sequence_dash_offset = offset
+
+    @property
+    def indent(self):
+        # type: () -> Any
+        return self._indent
+
+    @indent.setter
+    def indent(self, val):
+        # type: (Any) -> None
+        self.old_indent = val
+
+    @property
+    def block_seq_indent(self):
+        # type: () -> Any
+        return self.sequence_dash_offset
+
+    @block_seq_indent.setter
+    def block_seq_indent(self, val):
+        # type: (Any) -> None
+        self.sequence_dash_offset = val
+
+    def compact(self, seq_seq=None, seq_map=None):
+        # type: (Any, Any) -> None
+        self.compact_seq_seq = seq_seq
+        self.compact_seq_map = seq_map
+
+
+class YAMLContextManager(object):
+    def __init__(self, yaml, transform=None):
+        # type: (Any, Any) -> None  # used to be: (Any, Optional[Callable]) -> None
+        self._yaml = yaml
+        self._output_inited = False
+        self._output_path = None
+        self._output = self._yaml._output
+        self._transform = transform
+
+        # self._input_inited = False
+        # self._input = input
+        # self._input_path = None
+        # self._transform = yaml.transform
+        # self._fstream = None
+
+        if not hasattr(self._output, "write") and hasattr(self._output, "open"):
+            # pathlib.Path() instance, open with the same mode
+            self._output_path = self._output
+            self._output = self._output_path.open("w")
+
+        # if not hasattr(self._stream, 'write') and hasattr(stream, 'open'):
+        # if not hasattr(self._input, 'read') and hasattr(self._input, 'open'):
+        #    # pathlib.Path() instance, open with the same mode
+        #    self._input_path = self._input
+        #    self._input = self._input_path.open('r')
+
+        if self._transform is not None:
+            self._fstream = self._output
+            if self._yaml.encoding is None:
+                self._output = StringIO()
+            else:
+                self._output = BytesIO()
+
+    def teardown_output(self):
+        # type: () -> None
+        if self._output_inited:
+            self._yaml.serializer.close()
+        else:
+            return
+        try:
+            self._yaml.emitter.dispose()
+        except AttributeError:
+            raise
+            # self.dumper.dispose()  # cyaml
+        try:
+            delattr(self._yaml, "_serializer")
+            delattr(self._yaml, "_emitter")
+        except AttributeError:
+            raise
+        if self._transform:
+            val = self._output.getvalue()
+            if self._yaml.encoding:
+                val = val.decode(self._yaml.encoding)
+            if self._fstream is None:
+                self._transform(val)
+            else:
+                self._fstream.write(self._transform(val))
+                self._fstream.flush()
+                self._output = self._fstream  # maybe not necessary
+        if self._output_path is not None:
+            self._output.close()
+
+    def init_output(self, first_data):
+        # type: (Any) -> None
+        if self._yaml.top_level_colon_align is True:
+            tlca = max([len(str(x)) for x in first_data])  # type: Any
+        else:
+            tlca = self._yaml.top_level_colon_align
+        self._yaml.get_serializer_representer_emitter(self._output, tlca)
+        self._yaml.serializer.open()
+        self._output_inited = True
+
+    def dump(self, data):
+        # type: (Any) -> None
+        if not self._output_inited:
+            self.init_output(data)
+        try:
+            self._yaml.representer.represent(data)
+        except AttributeError:
+            # nprint(dir(dumper._representer))
+            raise
+
+    # def teardown_input(self):
+    #     pass
+    #
+    # def init_input(self):
+    #     # set the constructor and parser on YAML() instance
+    #     self._yaml.get_constructor_parser(stream)
+    #
+    # def load(self):
+    #     if not self._input_inited:
+    #         self.init_input()
+    #     try:
+    #         while self._yaml.constructor.check_data():
+    #             yield self._yaml.constructor.get_data()
+    #     finally:
+    #         parser.dispose()
+    #         try:
+    #             self._reader.reset_reader()  # type: ignore
+    #         except AttributeError:
+    #             pass
+    #         try:
+    #             self._scanner.reset_scanner()  # type: ignore
+    #         except AttributeError:
+    #             pass
+
+
+def yaml_object(yml):
+    # type: (Any) -> Any
+    """decorator for classes that needs to dump/load objects
+    The tag for such objects is taken from the class attribute yaml_tag (or the
+    class name in lowercase in case unavailable)
+    If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
+    loading, default routines (dumping a mapping of the attributes) used otherwise.
+    """
+
+    def yo_deco(cls):
+        # type: (Any) -> Any
+        tag = getattr(cls, "yaml_tag", "!" + cls.__name__)
+        try:
+            yml.representer.add_representer(cls, cls.to_yaml)
+        except AttributeError:
+
+            def t_y(representer, data):
+                # type: (Any, Any) -> Any
+                return representer.represent_yaml_object(
+                    tag, data, cls, flow_style=representer.default_flow_style
+                )
+
+            yml.representer.add_representer(cls, t_y)
+        try:
+            yml.constructor.add_constructor(tag, cls.from_yaml)
+        except AttributeError:
+
+            def f_y(constructor, node):
+                # type: (Any, Any) -> Any
+                return constructor.construct_yaml_object(node, cls)
+
+            yml.constructor.add_constructor(tag, f_y)
+        return cls
+
+    return yo_deco
+
+
+########################################################################################
+
+
+def scan(stream, Loader=Loader):
+    # type: (StreamTextType, Any) -> Any
+    """
+    Scan a YAML stream and produce scanning tokens.
+    """
+    loader = Loader(stream)
+    try:
+        while loader.scanner.check_token():
+            yield loader.scanner.get_token()
+    finally:
+        loader._parser.dispose()
+
+
+def parse(stream, Loader=Loader):
+    # type: (StreamTextType, Any) -> Any
+    """
+    Parse a YAML stream and produce parsing events.
+    """
+    loader = Loader(stream)
+    try:
+        while loader._parser.check_event():
+            yield loader._parser.get_event()
+    finally:
+        loader._parser.dispose()
+
+
+def compose(stream, Loader=Loader):
+    # type: (StreamTextType, Any) -> Any
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding representation tree.
+    """
+    loader = Loader(stream)
+    try:
+        return loader.get_single_node()
+    finally:
+        loader.dispose()
+
+
+def compose_all(stream, Loader=Loader):
+    # type: (StreamTextType, Any) -> Any
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding representation trees.
+    """
+    loader = Loader(stream)
+    try:
+        while loader.check_node():
+            yield loader._composer.get_node()
+    finally:
+        loader._parser.dispose()
+
+
+def load(stream, Loader=None, version=None, preserve_quotes=None):
+    # type: (StreamTextType, Any, Optional[VersionType], Any) -> Any
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+    """
+    if Loader is None:
+        warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+        Loader = UnsafeLoader
+    loader = Loader(stream, version, preserve_quotes=preserve_quotes)
+    try:
+        return loader._constructor.get_single_data()
+    finally:
+        loader._parser.dispose()
+        try:
+            loader._reader.reset_reader()
+        except AttributeError:
+            pass
+        try:
+            loader._scanner.reset_scanner()
+        except AttributeError:
+            pass
+
+
+def load_all(stream, Loader=None, version=None, preserve_quotes=None):
+    # type: (Optional[StreamTextType], Any, Optional[VersionType], Optional[bool]) -> Any  # NOQA
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+    """
+    if Loader is None:
+        warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+        Loader = UnsafeLoader
+    loader = Loader(stream, version, preserve_quotes=preserve_quotes)
+    try:
+        while loader._constructor.check_data():
+            yield loader._constructor.get_data()
+    finally:
+        loader._parser.dispose()
+        try:
+            loader._reader.reset_reader()
+        except AttributeError:
+            pass
+        try:
+            loader._scanner.reset_scanner()
+        except AttributeError:
+            pass
+
+
+def safe_load(stream, version=None):
+    # type: (StreamTextType, Optional[VersionType]) -> Any
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+    Resolve only basic YAML tags.
+    """
+    return load(stream, SafeLoader, version)
+
+
+def safe_load_all(stream, version=None):
+    # type: (StreamTextType, Optional[VersionType]) -> Any
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+    Resolve only basic YAML tags.
+    """
+    return load_all(stream, SafeLoader, version)
+
+
+def round_trip_load(stream, version=None, preserve_quotes=None):
+    # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+    Resolve only basic YAML tags.
+    """
+    return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def round_trip_load_all(stream, version=None, preserve_quotes=None):
+    # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+    Resolve only basic YAML tags.
+    """
+    return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def emit(
+    events,
+    stream=None,
+    Dumper=Dumper,
+    canonical=None,
+    indent=None,
+    width=None,
+    allow_unicode=None,
+    line_break=None,
+):
+    # type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any  # NOQA
+    """
+    Emit YAML parsing events into a stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        stream = StringIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(
+        stream,
+        canonical=canonical,
+        indent=indent,
+        width=width,
+        allow_unicode=allow_unicode,
+        line_break=line_break,
+    )
+    try:
+        for event in events:
+            dumper.emit(event)
+    finally:
+        try:
+            dumper._emitter.dispose()
+        except AttributeError:
+            raise
+            dumper.dispose()  # cyaml
+    if getvalue is not None:
+        return getvalue()
+
+
+enc = None if PY3 else "utf-8"
+
+
+def serialize_all(
+    nodes,
+    stream=None,
+    Dumper=Dumper,
+    canonical=None,
+    indent=None,
+    width=None,
+    allow_unicode=None,
+    line_break=None,
+    encoding=enc,
+    explicit_start=None,
+    explicit_end=None,
+    version=None,
+    tags=None,
+):
+    # type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA
+    """
+    Serialize a sequence of representation trees into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        if encoding is None:
+            stream = StringIO()
+        else:
+            stream = BytesIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(
+        stream,
+        canonical=canonical,
+        indent=indent,
+        width=width,
+        allow_unicode=allow_unicode,
+        line_break=line_break,
+        encoding=encoding,
+        version=version,
+        tags=tags,
+        explicit_start=explicit_start,
+        explicit_end=explicit_end,
+    )
+    try:
+        dumper._serializer.open()
+        for node in nodes:
+            dumper.serialize(node)
+        dumper._serializer.close()
+    finally:
+        try:
+            dumper._emitter.dispose()
+        except AttributeError:
+            raise
+            dumper.dispose()  # cyaml
+    if getvalue is not None:
+        return getvalue()
+
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+    # type: (Any, Optional[StreamType], Any, Any) -> Any
+    """
+    Serialize a representation tree into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+
+def dump_all(
+    documents,
+    stream=None,
+    Dumper=Dumper,
+    default_style=None,
+    default_flow_style=None,
+    canonical=None,
+    indent=None,
+    width=None,
+    allow_unicode=None,
+    line_break=None,
+    encoding=enc,
+    explicit_start=None,
+    explicit_end=None,
+    version=None,
+    tags=None,
+    block_seq_indent=None,
+    top_level_colon_align=None,
+    prefix_colon=None,
+):
+    # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Optional[str]   # NOQA
+    """
+    Serialize a sequence of Python objects into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if top_level_colon_align is True:
+        top_level_colon_align = max([len(str(x)) for x in documents[0]])
+    if stream is None:
+        if encoding is None:
+            stream = StringIO()
+        else:
+            stream = BytesIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(
+        stream,
+        default_style=default_style,
+        default_flow_style=default_flow_style,
+        canonical=canonical,
+        indent=indent,
+        width=width,
+        allow_unicode=allow_unicode,
+        line_break=line_break,
+        encoding=encoding,
+        explicit_start=explicit_start,
+        explicit_end=explicit_end,
+        version=version,
+        tags=tags,
+        block_seq_indent=block_seq_indent,
+        top_level_colon_align=top_level_colon_align,
+        prefix_colon=prefix_colon,
+    )
+    try:
+        dumper._serializer.open()
+        for data in documents:
+            try:
+                dumper._representer.represent(data)
+            except AttributeError:
+                # nprint(dir(dumper._representer))
+                raise
+        dumper._serializer.close()
+    finally:
+        try:
+            dumper._emitter.dispose()
+        except AttributeError:
+            raise
+            dumper.dispose()  # cyaml
+    if getvalue is not None:
+        return getvalue()
+    return None
+
+
+def dump(
+    data,
+    stream=None,
+    Dumper=Dumper,
+    default_style=None,
+    default_flow_style=None,
+    canonical=None,
+    indent=None,
+    width=None,
+    allow_unicode=None,
+    line_break=None,
+    encoding=enc,
+    explicit_start=None,
+    explicit_end=None,
+    version=None,
+    tags=None,
+    block_seq_indent=None,
+):
+    # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[str]   # NOQA
+    """
+    Serialize a Python object into a YAML stream.
+    If stream is None, return the produced string instead.
+
+    default_style ∈ None, '', '"', "'", '|', '>'
+
+    """
+    return dump_all(
+        [data],
+        stream,
+        Dumper=Dumper,
+        default_style=default_style,
+        default_flow_style=default_flow_style,
+        canonical=canonical,
+        indent=indent,
+        width=width,
+        allow_unicode=allow_unicode,
+        line_break=line_break,
+        encoding=encoding,
+        explicit_start=explicit_start,
+        explicit_end=explicit_end,
+        version=version,
+        tags=tags,
+        block_seq_indent=block_seq_indent,
+    )
+
+
+def safe_dump_all(documents, stream=None, **kwds):
+    # type: (Any, Optional[StreamType], Any) -> Optional[str]
+    """
+    Serialize a sequence of Python objects into a YAML stream.
+    Produce only basic YAML tags.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+
+def safe_dump(data, stream=None, **kwds):
+    # type: (Any, Optional[StreamType], Any) -> Optional[str]
+    """
+    Serialize a Python object into a YAML stream.
+    Produce only basic YAML tags.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+
+def round_trip_dump(
+    data,
+    stream=None,
+    Dumper=RoundTripDumper,
+    default_style=None,
+    default_flow_style=None,
+    canonical=None,
+    indent=None,
+    width=None,
+    allow_unicode=None,
+    line_break=None,
+    encoding=enc,
+    explicit_start=None,
+    explicit_end=None,
+    version=None,
+    tags=None,
+    block_seq_indent=None,
+    top_level_colon_align=None,
+    prefix_colon=None,
+):
+    # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[str]   # NOQA
+    allow_unicode = True if allow_unicode is None else allow_unicode
+    return dump_all(
+        [data],
+        stream,
+        Dumper=Dumper,
+        default_style=default_style,
+        default_flow_style=default_flow_style,
+        canonical=canonical,
+        indent=indent,
+        width=width,
+        allow_unicode=allow_unicode,
+        line_break=line_break,
+        encoding=encoding,
+        explicit_start=explicit_start,
+        explicit_end=explicit_end,
+        version=version,
+        tags=tags,
+        block_seq_indent=block_seq_indent,
+        top_level_colon_align=top_level_colon_align,
+        prefix_colon=prefix_colon,
+    )
+
+
+# Loader/Dumper are no longer composites, to get to the associated
+# Resolver()/Representer(), etc., you need to instantiate the class
+
+
+def add_implicit_resolver(
+    tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver
+):
+    # type: (Any, Any, Any, Any, Any, Any) -> None
+    """
+    Add an implicit scalar detector.
+    If an implicit scalar value matches the given regexp,
+    the corresponding tag is assigned to the scalar.
+    first is a sequence of possible initial characters or None.
+    """
+    if Loader is None and Dumper is None:
+        resolver.add_implicit_resolver(tag, regexp, first)
+        return
+    if Loader:
+        if hasattr(Loader, "add_implicit_resolver"):
+            Loader.add_implicit_resolver(tag, regexp, first)
+        elif issubclass(
+            Loader,
+            (BaseLoader, SafeLoader, strictyaml.ruamel.loader.Loader, RoundTripLoader),
+        ):
+            Resolver.add_implicit_resolver(tag, regexp, first)
+        else:
+            raise NotImplementedError
+    if Dumper:
+        if hasattr(Dumper, "add_implicit_resolver"):
+            Dumper.add_implicit_resolver(tag, regexp, first)
+        elif issubclass(
+            Dumper,
+            (BaseDumper, SafeDumper, strictyaml.ruamel.dumper.Dumper, RoundTripDumper),
+        ):
+            Resolver.add_implicit_resolver(tag, regexp, first)
+        else:
+            raise NotImplementedError
+
+
+# this code currently not tested
+def add_path_resolver(
+    tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver
+):
+    # type: (Any, Any, Any, Any, Any, Any) -> None
+    """
+    Add a path based resolver for the given tag.
+    A path is a list of keys that forms a path
+    to a node in the representation tree.
+    Keys can be string values, integers, or None.
+    """
+    if Loader is None and Dumper is None:
+        resolver.add_path_resolver(tag, path, kind)
+        return
+    if Loader:
+        if hasattr(Loader, "add_path_resolver"):
+            Loader.add_path_resolver(tag, path, kind)
+        elif issubclass(
+            Loader,
+            (BaseLoader, SafeLoader, strictyaml.ruamel.loader.Loader, RoundTripLoader),
+        ):
+            Resolver.add_path_resolver(tag, path, kind)
+        else:
+            raise NotImplementedError
+    if Dumper:
+        if hasattr(Dumper, "add_path_resolver"):
+            Dumper.add_path_resolver(tag, path, kind)
+        elif issubclass(
+            Dumper,
+            (BaseDumper, SafeDumper, strictyaml.ruamel.dumper.Dumper, RoundTripDumper),
+        ):
+            Resolver.add_path_resolver(tag, path, kind)
+        else:
+            raise NotImplementedError
+
+
+def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor):
+    # type: (Any, Any, Any, Any) -> None
+    """
+    Add an object constructor for the given tag.
+    object_onstructor is a function that accepts a Loader instance
+    and a node object and produces the corresponding Python object.
+    """
+    if Loader is None:
+        constructor.add_constructor(tag, object_constructor)
+    else:
+        if hasattr(Loader, "add_constructor"):
+            Loader.add_constructor(tag, object_constructor)
+            return
+        if issubclass(Loader, BaseLoader):
+            BaseConstructor.add_constructor(tag, object_constructor)
+        elif issubclass(Loader, SafeLoader):
+            SafeConstructor.add_constructor(tag, object_constructor)
+        elif issubclass(Loader, Loader):
+            Constructor.add_constructor(tag, object_constructor)
+        elif issubclass(Loader, RoundTripLoader):
+            RoundTripConstructor.add_constructor(tag, object_constructor)
+        else:
+            raise NotImplementedError
+
+
+def add_multi_constructor(
+    tag_prefix, multi_constructor, Loader=None, constructor=Constructor
+):
+    # type: (Any, Any, Any, Any) -> None
+    """
+    Add a multi-constructor for the given tag prefix.
+    Multi-constructor is called for a node if its tag starts with tag_prefix.
+    Multi-constructor accepts a Loader instance, a tag suffix,
+    and a node object and produces the corresponding Python object.
+    """
+    if Loader is None:
+        constructor.add_multi_constructor(tag_prefix, multi_constructor)
+    else:
+        if False and hasattr(Loader, "add_multi_constructor"):
+            Loader.add_multi_constructor(tag_prefix, constructor)
+            return
+        if issubclass(Loader, BaseLoader):
+            BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+        elif issubclass(Loader, SafeLoader):
+            SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+        elif issubclass(Loader, strictyaml.ruamel.loader.Loader):
+            Constructor.add_multi_constructor(tag_prefix, multi_constructor)
+        elif issubclass(Loader, RoundTripLoader):
+            RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+        else:
+            raise NotImplementedError
+
+
+def add_representer(
+    data_type, object_representer, Dumper=None, representer=Representer
+):
+    # type: (Any, Any, Any, Any) -> None
+    """
+    Add a representer for the given type.
+    object_representer is a function accepting a Dumper instance
+    and an instance of the given data type
+    and producing the corresponding representation node.
+    """
+    if Dumper is None:
+        representer.add_representer(data_type, object_representer)
+    else:
+        if hasattr(Dumper, "add_representer"):
+            Dumper.add_representer(data_type, object_representer)
+            return
+        if issubclass(Dumper, BaseDumper):
+            BaseRepresenter.add_representer(data_type, object_representer)
+        elif issubclass(Dumper, SafeDumper):
+            SafeRepresenter.add_representer(data_type, object_representer)
+        elif issubclass(Dumper, Dumper):
+            Representer.add_representer(data_type, object_representer)
+        elif issubclass(Dumper, RoundTripDumper):
+            RoundTripRepresenter.add_representer(data_type, object_representer)
+        else:
+            raise NotImplementedError
+
+
+# this code currently not tested
+def add_multi_representer(
+    data_type, multi_representer, Dumper=None, representer=Representer
+):
+    # type: (Any, Any, Any, Any) -> None
+    """
+    Add a representer for the given type.
+    multi_representer is a function accepting a Dumper instance
+    and an instance of the given data type or subtype
+    and producing the corresponding representation node.
+    """
+    if Dumper is None:
+        representer.add_multi_representer(data_type, multi_representer)
+    else:
+        if hasattr(Dumper, "add_multi_representer"):
+            Dumper.add_multi_representer(data_type, multi_representer)
+            return
+        if issubclass(Dumper, BaseDumper):
+            BaseRepresenter.add_multi_representer(data_type, multi_representer)
+        elif issubclass(Dumper, SafeDumper):
+            SafeRepresenter.add_multi_representer(data_type, multi_representer)
+        elif issubclass(Dumper, Dumper):
+            Representer.add_multi_representer(data_type, multi_representer)
+        elif issubclass(Dumper, RoundTripDumper):
+            RoundTripRepresenter.add_multi_representer(data_type, multi_representer)
+        else:
+            raise NotImplementedError
+
+
+class YAMLObjectMetaclass(type):
+    """
+    The metaclass for YAMLObject.
+    """
+
+    def __init__(cls, name, bases, kwds):
+        # type: (Any, Any, Any) -> None
+        super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+        if "yaml_tag" in kwds and kwds["yaml_tag"] is not None:
+            cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml)  # type: ignore
+            cls.yaml_representer.add_representer(cls, cls.to_yaml)  # type: ignore
+
+
+class YAMLObject(with_metaclass(YAMLObjectMetaclass)):  # type: ignore
+    """
+    An object that can dump itself to a YAML stream
+    and load itself from a YAML stream.
+    """
+
+    __slots__ = ()  # no direct instantiation, so allow immutable subclasses
+
+    yaml_constructor = Constructor
+    yaml_representer = Representer
+
+    yaml_tag = None  # type: Any
+    yaml_flow_style = None  # type: Any
+
+    @classmethod
+    def from_yaml(cls, constructor, node):
+        # type: (Any, Any) -> Any
+        """
+        Convert a representation node to a Python object.
+        """
+        return constructor.construct_yaml_object(node, cls)
+
+    @classmethod
+    def to_yaml(cls, representer, data):
+        # type: (Any, Any) -> Any
+        """
+        Convert a Python object to a representation node.
+        """
+        return representer.represent_yaml_object(
+            cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style
+        )
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/nodes.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/nodes.py
new file mode 100644
index 00000000..c47a7b67
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/nodes.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+from __future__ import print_function
+
+import sys
+from .compat import string_types
+
+if False:  # MYPY
+    from typing import Dict, Any, Text  # NOQA
+
+
+class Node(object):
+    __slots__ = "tag", "value", "start_mark", "end_mark", "comment", "anchor"
+
+    def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None):
+        # type: (Any, Any, Any, Any, Any, Any) -> None
+        self.tag = tag
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.comment = comment
+        self.anchor = anchor
+
+    def __repr__(self):
+        # type: () -> str
+        value = self.value
+        # if isinstance(value, list):
+        #     if len(value) == 0:
+        #         value = '<empty>'
+        #     elif len(value) == 1:
+        #         value = '<1 item>'
+        #     else:
+        #         value = '<%d items>' % len(value)
+        # else:
+        #     if len(value) > 75:
+        #         value = repr(value[:70]+u' ... ')
+        #     else:
+        #         value = repr(value)
+        value = repr(value)
+        return "%s(tag=%r, value=%s)" % (self.__class__.__name__, self.tag, value)
+
+    def dump(self, indent=0):
+        # type: (int) -> None
+        if isinstance(self.value, string_types):
+            sys.stdout.write(
+                "{}{}(tag={!r}, value={!r})\n".format(
+                    "  " * indent, self.__class__.__name__, self.tag, self.value
+                )
+            )
+            if self.comment:
+                sys.stdout.write(
+                    "    {}comment: {})\n".format("  " * indent, self.comment)
+                )
+            return
+        sys.stdout.write(
+            "{}{}(tag={!r})\n".format("  " * indent, self.__class__.__name__, self.tag)
+        )
+        if self.comment:
+            sys.stdout.write("    {}comment: {})\n".format("  " * indent, self.comment))
+        for v in self.value:
+            if isinstance(v, tuple):
+                for v1 in v:
+                    v1.dump(indent + 1)
+            elif isinstance(v, Node):
+                v.dump(indent + 1)
+            else:
+                sys.stdout.write("Node value type? {}\n".format(type(v)))
+
+
+class ScalarNode(Node):
+    """
+    styles:
+      ? -> set() ? key, no value
+      " -> double quoted
+      ' -> single quoted
+      | -> literal style
+      > -> folding style
+    """
+
+    __slots__ = ("style",)
+    id = "scalar"
+
+    def __init__(
+        self,
+        tag,
+        value,
+        start_mark=None,
+        end_mark=None,
+        style=None,
+        comment=None,
+        anchor=None,
+    ):
+        # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+        Node.__init__(
+            self, tag, value, start_mark, end_mark, comment=comment, anchor=anchor
+        )
+        self.style = style
+
+
+class CollectionNode(Node):
+    __slots__ = ("flow_style",)
+
+    def __init__(
+        self,
+        tag,
+        value,
+        start_mark=None,
+        end_mark=None,
+        flow_style=None,
+        comment=None,
+        anchor=None,
+    ):
+        # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+        Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
+        self.flow_style = flow_style
+        self.anchor = anchor
+
+
+class SequenceNode(CollectionNode):
+    __slots__ = ()
+    id = "sequence"
+
+
+class MappingNode(CollectionNode):
+    __slots__ = ("merge",)
+    id = "mapping"
+
+    def __init__(
+        self,
+        tag,
+        value,
+        start_mark=None,
+        end_mark=None,
+        flow_style=None,
+        comment=None,
+        anchor=None,
+    ):
+        # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+        CollectionNode.__init__(
+            self, tag, value, start_mark, end_mark, flow_style, comment, anchor
+        )
+        self.merge = None
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/parser.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/parser.py
new file mode 100644
index 00000000..8c004b9f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/parser.py
@@ -0,0 +1,844 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream            ::= STREAM-START implicit_document? explicit_document*
+#                                                                   STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+#                       ALIAS
+#                       | properties (block_content |
+#                                                   indentless_block_sequence)?
+#                       | block_content
+#                       | indentless_block_sequence
+# block_node        ::= ALIAS
+#                       | properties block_content?
+#                       | block_content
+# flow_node         ::= ALIAS
+#                       | properties flow_content?
+#                       | flow_content
+# properties        ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content     ::= block_collection | flow_collection | SCALAR
+# flow_content      ::= flow_collection | SCALAR
+# block_collection  ::= block_sequence | block_mapping
+# flow_collection   ::= flow_sequence | flow_mapping
+# block_sequence    ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+#                                                                   BLOCK-END
+# indentless_sequence   ::= (BLOCK-ENTRY block_node?)+
+# block_mapping     ::= BLOCK-MAPPING_START
+#                       ((KEY block_node_or_indentless_sequence?)?
+#                       (VALUE block_node_or_indentless_sequence?)?)*
+#                       BLOCK-END
+# flow_sequence     ::= FLOW-SEQUENCE-START
+#                       (flow_sequence_entry FLOW-ENTRY)*
+#                       flow_sequence_entry?
+#                       FLOW-SEQUENCE-END
+# flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping      ::= FLOW-MAPPING-START
+#                       (flow_mapping_entry FLOW-ENTRY)*
+#                       flow_mapping_entry?
+#                       FLOW-MAPPING-END
+# flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START
+#                  BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START
+#                               FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR
+#               BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START
+#               FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+#                                                    FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+#                                                    FLOW-MAPPING-START KEY }
+
+# need to have full path with import, as pkg_resources tries to load parser.py in __init__.py
+# only to not do anything with the package afterwards
+# and for Jython too
+
+
+from strictyaml.ruamel.error import MarkedYAMLError
+from strictyaml.ruamel.tokens import *  # NOQA
+from strictyaml.ruamel.events import *  # NOQA
+from strictyaml.ruamel.scanner import Scanner, RoundTripScanner, ScannerError  # NOQA
+from strictyaml.ruamel.compat import utf8, nprint, nprintf  # NOQA
+
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List  # NOQA
+
+__all__ = ["Parser", "RoundTripParser", "ParserError"]
+
+
+class ParserError(MarkedYAMLError):
+    pass
+
+
+class Parser(object):
+    # Since writing a recursive-descendant parser is a straightforward task, we
+    # do not give many comments here.
+
+    DEFAULT_TAGS = {u"!": u"!", u"!!": u"tag:yaml.org,2002:"}
+
+    def __init__(self, loader):
+        # type: (Any) -> None
+        self.loader = loader
+        if self.loader is not None and getattr(self.loader, "_parser", None) is None:
+            self.loader._parser = self
+        self.reset_parser()
+
+    def reset_parser(self):
+        # type: () -> None
+        # Reset the state attributes (to clear self-references)
+        self.current_event = None
+        self.tag_handles = {}  # type: Dict[Any, Any]
+        self.states = []  # type: List[Any]
+        self.marks = []  # type: List[Any]
+        self.state = self.parse_stream_start  # type: Any
+
+    def dispose(self):
+        # type: () -> None
+        self.reset_parser()
+
+    @property
+    def scanner(self):
+        # type: () -> Any
+        if hasattr(self.loader, "typ"):
+            return self.loader.scanner
+        return self.loader._scanner
+
+    @property
+    def resolver(self):
+        # type: () -> Any
+        if hasattr(self.loader, "typ"):
+            return self.loader.resolver
+        return self.loader._resolver
+
+    def check_event(self, *choices):
+        # type: (Any) -> bool
+        # Check the type of the next event.
+        if self.current_event is None:
+            if self.state:
+                self.current_event = self.state()
+        if self.current_event is not None:
+            if not choices:
+                return True
+            for choice in choices:
+                if isinstance(self.current_event, choice):
+                    return True
+        return False
+
+    def peek_event(self):
+        # type: () -> Any
+        # Get the next event.
+        if self.current_event is None:
+            if self.state:
+                self.current_event = self.state()
+        return self.current_event
+
+    def get_event(self):
+        # type: () -> Any
+        # Get the next event and proceed further.
+        if self.current_event is None:
+            if self.state:
+                self.current_event = self.state()
+        value = self.current_event
+        self.current_event = None
+        return value
+
+    # stream    ::= STREAM-START implicit_document? explicit_document*
+    #                                                               STREAM-END
+    # implicit_document ::= block_node DOCUMENT-END*
+    # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+    def parse_stream_start(self):
+        # type: () -> Any
+        # Parse the stream start.
+        token = self.scanner.get_token()
+        token.move_comment(self.scanner.peek_token())
+        event = StreamStartEvent(
+            token.start_mark, token.end_mark, encoding=token.encoding
+        )
+
+        # Prepare the next state.
+        self.state = self.parse_implicit_document_start
+
+        return event
+
+    def parse_implicit_document_start(self):
+        # type: () -> Any
+        # Parse an implicit document.
+        if not self.scanner.check_token(
+            DirectiveToken, DocumentStartToken, StreamEndToken
+        ):
+            self.tag_handles = self.DEFAULT_TAGS
+            token = self.scanner.peek_token()
+            start_mark = end_mark = token.start_mark
+            event = DocumentStartEvent(start_mark, end_mark, explicit=False)
+
+            # Prepare the next state.
+            self.states.append(self.parse_document_end)
+            self.state = self.parse_block_node
+
+            return event
+
+        else:
+            return self.parse_document_start()
+
+    def parse_document_start(self):
+        # type: () -> Any
+        # Parse any extra document end indicators.
+        while self.scanner.check_token(DocumentEndToken):
+            self.scanner.get_token()
+        # Parse an explicit document.
+        if not self.scanner.check_token(StreamEndToken):
+            token = self.scanner.peek_token()
+            start_mark = token.start_mark
+            version, tags = self.process_directives()
+            if not self.scanner.check_token(DocumentStartToken):
+                raise ParserError(
+                    None,
+                    None,
+                    "expected '<document start>', but found %r"
+                    % self.scanner.peek_token().id,
+                    self.scanner.peek_token().start_mark,
+                )
+            token = self.scanner.get_token()
+            end_mark = token.end_mark
+            # if self.loader is not None and \
+            #    end_mark.line != self.scanner.peek_token().start_mark.line:
+            #     self.loader.scalar_after_indicator = False
+            event = DocumentStartEvent(
+                start_mark, end_mark, explicit=True, version=version, tags=tags
+            )  # type: Any
+            self.states.append(self.parse_document_end)
+            self.state = self.parse_document_content
+        else:
+            # Parse the end of the stream.
+            token = self.scanner.get_token()
+            event = StreamEndEvent(
+                token.start_mark, token.end_mark, comment=token.comment
+            )
+            assert not self.states
+            assert not self.marks
+            self.state = None
+        return event
+
+    def parse_document_end(self):
+        # type: () -> Any
+        # Parse the document end.
+        token = self.scanner.peek_token()
+        start_mark = end_mark = token.start_mark
+        explicit = False
+        if self.scanner.check_token(DocumentEndToken):
+            token = self.scanner.get_token()
+            end_mark = token.end_mark
+            explicit = True
+        event = DocumentEndEvent(start_mark, end_mark, explicit=explicit)
+
+        # Prepare the next state.
+        if self.resolver.processing_version == (1, 1):
+            self.state = self.parse_document_start
+        else:
+            self.state = self.parse_implicit_document_start
+
+        return event
+
+    def parse_document_content(self):
+        # type: () -> Any
+        if self.scanner.check_token(
+            DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken
+        ):
+            event = self.process_empty_scalar(self.scanner.peek_token().start_mark)
+            self.state = self.states.pop()
+            return event
+        else:
+            return self.parse_block_node()
+
+    def process_directives(self):
+        # type: () -> Any
+        yaml_version = None
+        self.tag_handles = {}
+        while self.scanner.check_token(DirectiveToken):
+            token = self.scanner.get_token()
+            if token.name == u"YAML":
+                if yaml_version is not None:
+                    raise ParserError(
+                        None, None, "found duplicate YAML directive", token.start_mark
+                    )
+                major, minor = token.value
+                if major != 1:
+                    raise ParserError(
+                        None,
+                        None,
+                        "found incompatible YAML document (version 1.* is " "required)",
+                        token.start_mark,
+                    )
+                yaml_version = token.value
+            elif token.name == u"TAG":
+                handle, prefix = token.value
+                if handle in self.tag_handles:
+                    raise ParserError(
+                        None,
+                        None,
+                        "duplicate tag handle %r" % utf8(handle),
+                        token.start_mark,
+                    )
+                self.tag_handles[handle] = prefix
+        if bool(self.tag_handles):
+            value = yaml_version, self.tag_handles.copy()  # type: Any
+        else:
+            value = yaml_version, None
+        if self.loader is not None and hasattr(self.loader, "tags"):
+            self.loader.version = yaml_version
+            if self.loader.tags is None:
+                self.loader.tags = {}
+            for k in self.tag_handles:
+                self.loader.tags[k] = self.tag_handles[k]
+        for key in self.DEFAULT_TAGS:
+            if key not in self.tag_handles:
+                self.tag_handles[key] = self.DEFAULT_TAGS[key]
+        return value
+
+    # block_node_or_indentless_sequence ::= ALIAS
+    #               | properties (block_content | indentless_block_sequence)?
+    #               | block_content
+    #               | indentless_block_sequence
+    # block_node    ::= ALIAS
+    #                   | properties block_content?
+    #                   | block_content
+    # flow_node     ::= ALIAS
+    #                   | properties flow_content?
+    #                   | flow_content
+    # properties    ::= TAG ANCHOR? | ANCHOR TAG?
+    # block_content     ::= block_collection | flow_collection | SCALAR
+    # flow_content      ::= flow_collection | SCALAR
+    # block_collection  ::= block_sequence | block_mapping
+    # flow_collection   ::= flow_sequence | flow_mapping
+
+    def parse_block_node(self):
+        # type: () -> Any
+        return self.parse_node(block=True)
+
+    def parse_flow_node(self):
+        # type: () -> Any
+        return self.parse_node()
+
+    def parse_block_node_or_indentless_sequence(self):
+        # type: () -> Any
+        return self.parse_node(block=True, indentless_sequence=True)
+
+    def transform_tag(self, handle, suffix):
+        # type: (Any, Any) -> Any
+        return self.tag_handles[handle] + suffix
+
+    def parse_node(self, block=False, indentless_sequence=False):
+        # type: (bool, bool) -> Any
+        if self.scanner.check_token(AliasToken):
+            token = self.scanner.get_token()
+            event = AliasEvent(
+                token.value, token.start_mark, token.end_mark
+            )  # type: Any
+            self.state = self.states.pop()
+            return event
+
+        anchor = None
+        tag = None
+        start_mark = end_mark = tag_mark = None
+        if self.scanner.check_token(AnchorToken):
+            token = self.scanner.get_token()
+            start_mark = token.start_mark
+            end_mark = token.end_mark
+            anchor = token.value
+            if self.scanner.check_token(TagToken):
+                token = self.scanner.get_token()
+                tag_mark = token.start_mark
+                end_mark = token.end_mark
+                tag = token.value
+        elif self.scanner.check_token(TagToken):
+            token = self.scanner.get_token()
+            start_mark = tag_mark = token.start_mark
+            end_mark = token.end_mark
+            tag = token.value
+            if self.scanner.check_token(AnchorToken):
+                token = self.scanner.get_token()
+                start_mark = tag_mark = token.start_mark
+                end_mark = token.end_mark
+                anchor = token.value
+        if tag is not None:
+            handle, suffix = tag
+            if handle is not None:
+                if handle not in self.tag_handles:
+                    raise ParserError(
+                        "while parsing a node",
+                        start_mark,
+                        "found undefined tag handle %r" % utf8(handle),
+                        tag_mark,
+                    )
+                tag = self.transform_tag(handle, suffix)
+            else:
+                tag = suffix
+        # if tag == u'!':
+        #     raise ParserError("while parsing a node", start_mark,
+        #             "found non-specific tag '!'", tag_mark,
+        #      "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag'
+        #     and share your opinion.")
+        if start_mark is None:
+            start_mark = end_mark = self.scanner.peek_token().start_mark
+        event = None
+        implicit = tag is None or tag == u"!"
+        if indentless_sequence and self.scanner.check_token(BlockEntryToken):
+            comment = None
+            pt = self.scanner.peek_token()
+            if pt.comment and pt.comment[0]:
+                comment = [pt.comment[0], []]
+                pt.comment[0] = None
+            end_mark = self.scanner.peek_token().end_mark
+            event = SequenceStartEvent(
+                anchor,
+                tag,
+                implicit,
+                start_mark,
+                end_mark,
+                flow_style=False,
+                comment=comment,
+            )
+            self.state = self.parse_indentless_sequence_entry
+            return event
+
+        if self.scanner.check_token(ScalarToken):
+            token = self.scanner.get_token()
+            # self.scanner.peek_token_same_line_comment(token)
+            end_mark = token.end_mark
+            if (token.plain and tag is None) or tag == u"!":
+                implicit = (True, False)
+            elif tag is None:
+                implicit = (False, True)
+            else:
+                implicit = (False, False)
+            # nprint('se', token.value, token.comment)
+            event = ScalarEvent(
+                anchor,
+                tag,
+                implicit,
+                token.value,
+                start_mark,
+                end_mark,
+                style=token.style,
+                comment=token.comment,
+            )
+            self.state = self.states.pop()
+        elif self.scanner.check_token(FlowSequenceStartToken):
+            pt = self.scanner.peek_token()
+            end_mark = pt.end_mark
+            event = SequenceStartEvent(
+                anchor,
+                tag,
+                implicit,
+                start_mark,
+                end_mark,
+                flow_style=True,
+                comment=pt.comment,
+            )
+            self.state = self.parse_flow_sequence_first_entry
+        elif self.scanner.check_token(FlowMappingStartToken):
+            pt = self.scanner.peek_token()
+            end_mark = pt.end_mark
+            event = MappingStartEvent(
+                anchor,
+                tag,
+                implicit,
+                start_mark,
+                end_mark,
+                flow_style=True,
+                comment=pt.comment,
+            )
+            self.state = self.parse_flow_mapping_first_key
+        elif block and self.scanner.check_token(BlockSequenceStartToken):
+            end_mark = self.scanner.peek_token().start_mark
+            # should inserting the comment be dependent on the
+            # indentation?
+            pt = self.scanner.peek_token()
+            comment = pt.comment
+            # nprint('pt0', type(pt))
+            if comment is None or comment[1] is None:
+                comment = pt.split_comment()
+            # nprint('pt1', comment)
+            event = SequenceStartEvent(
+                anchor,
+                tag,
+                implicit,
+                start_mark,
+                end_mark,
+                flow_style=False,
+                comment=comment,
+            )
+            self.state = self.parse_block_sequence_first_entry
+        elif block and self.scanner.check_token(BlockMappingStartToken):
+            end_mark = self.scanner.peek_token().start_mark
+            comment = self.scanner.peek_token().comment
+            event = MappingStartEvent(
+                anchor,
+                tag,
+                implicit,
+                start_mark,
+                end_mark,
+                flow_style=False,
+                comment=comment,
+            )
+            self.state = self.parse_block_mapping_first_key
+        elif anchor is not None or tag is not None:
+            # Empty scalars are allowed even if a tag or an anchor is
+            # specified.
+            event = ScalarEvent(
+                anchor, tag, (implicit, False), "", start_mark, end_mark
+            )
+            self.state = self.states.pop()
+        else:
+            if block:
+                node = "block"
+            else:
+                node = "flow"
+            token = self.scanner.peek_token()
+            raise ParserError(
+                "while parsing a %s node" % node,
+                start_mark,
+                "expected the node content, but found %r" % token.id,
+                token.start_mark,
+            )
+        return event
+
+    # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+    #                                                               BLOCK-END
+
+    def parse_block_sequence_first_entry(self):
+        # type: () -> Any
+        token = self.scanner.get_token()
+        # move any comment from start token
+        # token.move_comment(self.scanner.peek_token())
+        self.marks.append(token.start_mark)
+        return self.parse_block_sequence_entry()
+
+    def parse_block_sequence_entry(self):
+        # type: () -> Any
+        if self.scanner.check_token(BlockEntryToken):
+            token = self.scanner.get_token()
+            token.move_comment(self.scanner.peek_token())
+            if not self.scanner.check_token(BlockEntryToken, BlockEndToken):
+                self.states.append(self.parse_block_sequence_entry)
+                return self.parse_block_node()
+            else:
+                self.state = self.parse_block_sequence_entry
+                return self.process_empty_scalar(token.end_mark)
+        if not self.scanner.check_token(BlockEndToken):
+            token = self.scanner.peek_token()
+            raise ParserError(
+                "while parsing a block collection",
+                self.marks[-1],
+                "expected <block end>, but found %r" % token.id,
+                token.start_mark,
+            )
+        token = self.scanner.get_token()  # BlockEndToken
+        event = SequenceEndEvent(
+            token.start_mark, token.end_mark, comment=token.comment
+        )
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+    # indentless_sequence?
+    # sequence:
+    # - entry
+    #  - nested
+
+    def parse_indentless_sequence_entry(self):
+        # type: () -> Any
+        if self.scanner.check_token(BlockEntryToken):
+            token = self.scanner.get_token()
+            token.move_comment(self.scanner.peek_token())
+            if not self.scanner.check_token(
+                BlockEntryToken, KeyToken, ValueToken, BlockEndToken
+            ):
+                self.states.append(self.parse_indentless_sequence_entry)
+                return self.parse_block_node()
+            else:
+                self.state = self.parse_indentless_sequence_entry
+                return self.process_empty_scalar(token.end_mark)
+        token = self.scanner.peek_token()
+        event = SequenceEndEvent(
+            token.start_mark, token.start_mark, comment=token.comment
+        )
+        self.state = self.states.pop()
+        return event
+
+    # block_mapping     ::= BLOCK-MAPPING_START
+    #                       ((KEY block_node_or_indentless_sequence?)?
+    #                       (VALUE block_node_or_indentless_sequence?)?)*
+    #                       BLOCK-END
+
+    def parse_block_mapping_first_key(self):
+        # type: () -> Any
+        token = self.scanner.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_block_mapping_key()
+
+    def parse_block_mapping_key(self):
+        # type: () -> Any
+        if self.scanner.check_token(KeyToken):
+            token = self.scanner.get_token()
+            token.move_comment(self.scanner.peek_token())
+            if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+                self.states.append(self.parse_block_mapping_value)
+                return self.parse_block_node_or_indentless_sequence()
+            else:
+                self.state = self.parse_block_mapping_value
+                return self.process_empty_scalar(token.end_mark)
+        if self.resolver.processing_version > (1, 1) and self.scanner.check_token(
+            ValueToken
+        ):
+            self.state = self.parse_block_mapping_value
+            return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+        if not self.scanner.check_token(BlockEndToken):
+            token = self.scanner.peek_token()
+            raise ParserError(
+                "while parsing a block mapping",
+                self.marks[-1],
+                "expected <block end>, but found %r" % token.id,
+                token.start_mark,
+            )
+        token = self.scanner.get_token()
+        token.move_comment(self.scanner.peek_token())
+        event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    def parse_block_mapping_value(self):
+        # type: () -> Any
+        if self.scanner.check_token(ValueToken):
+            token = self.scanner.get_token()
+            # value token might have post comment move it to e.g. block
+            if self.scanner.check_token(ValueToken):
+                token.move_comment(self.scanner.peek_token())
+            else:
+                if not self.scanner.check_token(KeyToken):
+                    token.move_comment(self.scanner.peek_token(), empty=True)
+                # else: empty value for this key cannot move token.comment
+            if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+                self.states.append(self.parse_block_mapping_key)
+                return self.parse_block_node_or_indentless_sequence()
+            else:
+                self.state = self.parse_block_mapping_key
+                comment = token.comment
+                if comment is None:
+                    token = self.scanner.peek_token()
+                    comment = token.comment
+                    if comment:
+                        token._comment = [None, comment[1]]
+                        comment = [comment[0], None]
+                return self.process_empty_scalar(token.end_mark, comment=comment)
+        else:
+            self.state = self.parse_block_mapping_key
+            token = self.scanner.peek_token()
+            return self.process_empty_scalar(token.start_mark)
+
+    # flow_sequence     ::= FLOW-SEQUENCE-START
+    #                       (flow_sequence_entry FLOW-ENTRY)*
+    #                       flow_sequence_entry?
+    #                       FLOW-SEQUENCE-END
+    # flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+    #
+    # Note that while production rules for both flow_sequence_entry and
+    # flow_mapping_entry are equal, their interpretations are different.
+    # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+    # generate an inline mapping (set syntax).
+
+    def parse_flow_sequence_first_entry(self):
+        # type: () -> Any
+        token = self.scanner.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_flow_sequence_entry(first=True)
+
+    def parse_flow_sequence_entry(self, first=False):
+        # type: (bool) -> Any
+        if not self.scanner.check_token(FlowSequenceEndToken):
+            if not first:
+                if self.scanner.check_token(FlowEntryToken):
+                    self.scanner.get_token()
+                else:
+                    token = self.scanner.peek_token()
+                    raise ParserError(
+                        "while parsing a flow sequence",
+                        self.marks[-1],
+                        "expected ',' or ']', but got %r" % token.id,
+                        token.start_mark,
+                    )
+
+            if self.scanner.check_token(KeyToken):
+                token = self.scanner.peek_token()
+                event = MappingStartEvent(
+                    None, None, True, token.start_mark, token.end_mark, flow_style=True
+                )  # type: Any
+                self.state = self.parse_flow_sequence_entry_mapping_key
+                return event
+            elif not self.scanner.check_token(FlowSequenceEndToken):
+                self.states.append(self.parse_flow_sequence_entry)
+                return self.parse_flow_node()
+        token = self.scanner.get_token()
+        event = SequenceEndEvent(
+            token.start_mark, token.end_mark, comment=token.comment
+        )
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    def parse_flow_sequence_entry_mapping_key(self):
+        # type: () -> Any
+        token = self.scanner.get_token()
+        if not self.scanner.check_token(
+            ValueToken, FlowEntryToken, FlowSequenceEndToken
+        ):
+            self.states.append(self.parse_flow_sequence_entry_mapping_value)
+            return self.parse_flow_node()
+        else:
+            self.state = self.parse_flow_sequence_entry_mapping_value
+            return self.process_empty_scalar(token.end_mark)
+
+    def parse_flow_sequence_entry_mapping_value(self):
+        # type: () -> Any
+        if self.scanner.check_token(ValueToken):
+            token = self.scanner.get_token()
+            if not self.scanner.check_token(FlowEntryToken, FlowSequenceEndToken):
+                self.states.append(self.parse_flow_sequence_entry_mapping_end)
+                return self.parse_flow_node()
+            else:
+                self.state = self.parse_flow_sequence_entry_mapping_end
+                return self.process_empty_scalar(token.end_mark)
+        else:
+            self.state = self.parse_flow_sequence_entry_mapping_end
+            token = self.scanner.peek_token()
+            return self.process_empty_scalar(token.start_mark)
+
+    def parse_flow_sequence_entry_mapping_end(self):
+        # type: () -> Any
+        self.state = self.parse_flow_sequence_entry
+        token = self.scanner.peek_token()
+        return MappingEndEvent(token.start_mark, token.start_mark)
+
+    # flow_mapping  ::= FLOW-MAPPING-START
+    #                   (flow_mapping_entry FLOW-ENTRY)*
+    #                   flow_mapping_entry?
+    #                   FLOW-MAPPING-END
+    # flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+    def parse_flow_mapping_first_key(self):
+        # type: () -> Any
+        token = self.scanner.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_flow_mapping_key(first=True)
+
+    def parse_flow_mapping_key(self, first=False):
+        # type: (Any) -> Any
+        if not self.scanner.check_token(FlowMappingEndToken):
+            if not first:
+                if self.scanner.check_token(FlowEntryToken):
+                    self.scanner.get_token()
+                else:
+                    token = self.scanner.peek_token()
+                    raise ParserError(
+                        "while parsing a flow mapping",
+                        self.marks[-1],
+                        "expected ',' or '}', but got %r" % token.id,
+                        token.start_mark,
+                    )
+            if self.scanner.check_token(KeyToken):
+                token = self.scanner.get_token()
+                if not self.scanner.check_token(
+                    ValueToken, FlowEntryToken, FlowMappingEndToken
+                ):
+                    self.states.append(self.parse_flow_mapping_value)
+                    return self.parse_flow_node()
+                else:
+                    self.state = self.parse_flow_mapping_value
+                    return self.process_empty_scalar(token.end_mark)
+            elif self.resolver.processing_version > (1, 1) and self.scanner.check_token(
+                ValueToken
+            ):
+                self.state = self.parse_flow_mapping_value
+                return self.process_empty_scalar(self.scanner.peek_token().end_mark)
+            elif not self.scanner.check_token(FlowMappingEndToken):
+                self.states.append(self.parse_flow_mapping_empty_value)
+                return self.parse_flow_node()
+        token = self.scanner.get_token()
+        event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    def parse_flow_mapping_value(self):
+        # type: () -> Any
+        if self.scanner.check_token(ValueToken):
+            token = self.scanner.get_token()
+            if not self.scanner.check_token(FlowEntryToken, FlowMappingEndToken):
+                self.states.append(self.parse_flow_mapping_key)
+                return self.parse_flow_node()
+            else:
+                self.state = self.parse_flow_mapping_key
+                return self.process_empty_scalar(token.end_mark)
+        else:
+            self.state = self.parse_flow_mapping_key
+            token = self.scanner.peek_token()
+            return self.process_empty_scalar(token.start_mark)
+
+    def parse_flow_mapping_empty_value(self):
+        # type: () -> Any
+        self.state = self.parse_flow_mapping_key
+        return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+
+    def process_empty_scalar(self, mark, comment=None):
+        # type: (Any, Any) -> Any
+        return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment)
+
+
+class RoundTripParser(Parser):
+    """roundtrip is a safe loader, that wants to see the unmangled tag"""
+
+    def transform_tag(self, handle, suffix):
+        # type: (Any, Any) -> Any
+        # return self.tag_handles[handle]+suffix
+        if handle == "!!" and suffix in (
+            u"null",
+            u"bool",
+            u"int",
+            u"float",
+            u"binary",
+            u"timestamp",
+            u"omap",
+            u"pairs",
+            u"set",
+            u"str",
+            u"seq",
+            u"map",
+        ):
+            return Parser.transform_tag(self, handle, suffix)
+        return handle + suffix
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/reader.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/reader.py
new file mode 100644
index 00000000..7bca3e8d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/reader.py
@@ -0,0 +1,325 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+#   Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+#   Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+#   reader.peek(length=1) - return the next `length` characters
+#   reader.forward(length=1) - move the current position to `length`
+#      characters.
+#   reader.index - the number of the current character.
+#   reader.line, stream.column - the line and the column of the current
+#      character.
+
+import codecs
+
+from strictyaml.ruamel.error import YAMLError, FileMark, StringMark, YAMLStreamError
+from strictyaml.ruamel.compat import text_type, binary_type, PY3, UNICODE_SIZE
+from strictyaml.ruamel.util import RegExp
+
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List, Union, Text, Tuple, Optional  # NOQA
+#    from strictyaml.ruamel.compat import StreamTextType  # NOQA
+
+__all__ = ["Reader", "ReaderError"]
+
+
+class ReaderError(YAMLError):
+    def __init__(self, name, position, character, encoding, reason):
+        # type: (Any, Any, Any, Any, Any) -> None
+        self.name = name
+        self.character = character
+        self.position = position
+        self.encoding = encoding
+        self.reason = reason
+
+    def __str__(self):
+        # type: () -> str
+        if isinstance(self.character, binary_type):
+            return (
+                "'%s' codec can't decode byte #x%02x: %s\n"
+                '  in "%s", position %d'
+                % (
+                    self.encoding,
+                    ord(self.character),
+                    self.reason,
+                    self.name,
+                    self.position,
+                )
+            )
+        else:
+            return "unacceptable character #x%04x: %s\n" '  in "%s", position %d' % (
+                self.character,
+                self.reason,
+                self.name,
+                self.position,
+            )
+
+
+class Reader(object):
+    # Reader:
+    # - determines the data encoding and converts it to a unicode string,
+    # - checks if characters are in allowed range,
+    # - adds '\0' to the end.
+
+    # Reader accepts
+    #  - a `str` object (PY2) / a `bytes` object (PY3),
+    #  - a `unicode` object (PY2) / a `str` object (PY3),
+    #  - a file-like object with its `read` method returning `str`,
+    #  - a file-like object with its `read` method returning `unicode`.
+
+    # Yeah, it's ugly and slow.
+
+    def __init__(self, stream, loader=None):
+        # type: (Any, Any) -> None
+        self.loader = loader
+        if self.loader is not None and getattr(self.loader, "_reader", None) is None:
+            self.loader._reader = self
+        self.reset_reader()
+        self.stream = stream  # type: Any  # as .read is called
+
+    def reset_reader(self):
+        # type: () -> None
+        self.name = None  # type: Any
+        self.stream_pointer = 0
+        self.eof = True
+        self.buffer = ""
+        self.pointer = 0
+        self.raw_buffer = None  # type: Any
+        self.raw_decode = None
+        self.encoding = None  # type: Optional[Text]
+        self.index = 0
+        self.line = 0
+        self.column = 0
+
+    @property
+    def stream(self):
+        # type: () -> Any
+        try:
+            return self._stream
+        except AttributeError:
+            raise YAMLStreamError("input stream needs to specified")
+
+    @stream.setter
+    def stream(self, val):
+        # type: (Any) -> None
+        if val is None:
+            return
+        self._stream = None
+        if isinstance(val, text_type):
+            self.name = "<unicode string>"
+            self.check_printable(val)
+            self.buffer = val + u"\0"  # type: ignore
+        elif isinstance(val, binary_type):
+            self.name = "<byte string>"
+            self.raw_buffer = val
+            self.determine_encoding()
+        else:
+            if not hasattr(val, "read"):
+                raise YAMLStreamError("stream argument needs to have a read() method")
+            self._stream = val
+            self.name = getattr(self.stream, "name", "<file>")
+            self.eof = False
+            self.raw_buffer = None
+            self.determine_encoding()
+
+    def peek(self, index=0):
+        # type: (int) -> Text
+        try:
+            return self.buffer[self.pointer + index]
+        except IndexError:
+            self.update(index + 1)
+            return self.buffer[self.pointer + index]
+
+    def prefix(self, length=1):
+        # type: (int) -> Any
+        if self.pointer + length >= len(self.buffer):
+            self.update(length)
+        return self.buffer[self.pointer : self.pointer + length]
+
+    def forward_1_1(self, length=1):
+        # type: (int) -> None
+        if self.pointer + length + 1 >= len(self.buffer):
+            self.update(length + 1)
+        while length != 0:
+            ch = self.buffer[self.pointer]
+            self.pointer += 1
+            self.index += 1
+            if ch in u"\n\x85\u2028\u2029" or (
+                ch == u"\r" and self.buffer[self.pointer] != u"\n"
+            ):
+                self.line += 1
+                self.column = 0
+            elif ch != u"\uFEFF":
+                self.column += 1
+            length -= 1
+
+    def forward(self, length=1):
+        # type: (int) -> None
+        if self.pointer + length + 1 >= len(self.buffer):
+            self.update(length + 1)
+        while length != 0:
+            ch = self.buffer[self.pointer]
+            self.pointer += 1
+            self.index += 1
+            if ch == u"\n" or (ch == u"\r" and self.buffer[self.pointer] != u"\n"):
+                self.line += 1
+                self.column = 0
+            elif ch != u"\uFEFF":
+                self.column += 1
+            length -= 1
+
+    def get_mark(self):
+        # type: () -> Any
+        if self.stream is None:
+            return StringMark(
+                self.name, self.index, self.line, self.column, self.buffer, self.pointer
+            )
+        else:
+            return FileMark(self.name, self.index, self.line, self.column)
+
+    def determine_encoding(self):
+        # type: () -> None
+        while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+            self.update_raw()
+        if isinstance(self.raw_buffer, binary_type):
+            if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+                self.raw_decode = codecs.utf_16_le_decode  # type: ignore
+                self.encoding = "utf-16-le"
+            elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+                self.raw_decode = codecs.utf_16_be_decode  # type: ignore
+                self.encoding = "utf-16-be"
+            else:
+                self.raw_decode = codecs.utf_8_decode  # type: ignore
+                self.encoding = "utf-8"
+        self.update(1)
+
+    if UNICODE_SIZE == 2:
+        NON_PRINTABLE = RegExp(
+            u"[^\x09\x0A\x0D\x20-\x7E\x85" u"\xA0-\uD7FF" u"\uE000-\uFFFD" u"]"
+        )
+    else:
+        NON_PRINTABLE = RegExp(
+            u"[^\x09\x0A\x0D\x20-\x7E\x85"
+            u"\xA0-\uD7FF"
+            u"\uE000-\uFFFD"
+            u"\U00010000-\U0010FFFF"
+            u"]"
+        )
+
+    _printable_ascii = ("\x09\x0A\x0D" + "".join(map(chr, range(0x20, 0x7F)))).encode(
+        "ascii"
+    )
+
+    @classmethod
+    def _get_non_printable_ascii(cls, data):  # type: ignore
+        # type: (Text, bytes) -> Optional[Tuple[int, Text]]
+        ascii_bytes = data.encode("ascii")
+        non_printables = ascii_bytes.translate(None, cls._printable_ascii)  # type: ignore
+        if not non_printables:
+            return None
+        non_printable = non_printables[:1]
+        return ascii_bytes.index(non_printable), non_printable.decode("ascii")
+
+    @classmethod
+    def _get_non_printable_regex(cls, data):
+        # type: (Text) -> Optional[Tuple[int, Text]]
+        match = cls.NON_PRINTABLE.search(data)
+        if not bool(match):
+            return None
+        return match.start(), match.group()
+
+    @classmethod
+    def _get_non_printable(cls, data):
+        # type: (Text) -> Optional[Tuple[int, Text]]
+        try:
+            return cls._get_non_printable_ascii(data)  # type: ignore
+        except UnicodeEncodeError:
+            return cls._get_non_printable_regex(data)
+
+    def check_printable(self, data):
+        # type: (Any) -> None
+        non_printable_match = self._get_non_printable(data)
+        if non_printable_match is not None:
+            start, character = non_printable_match
+            position = self.index + (len(self.buffer) - self.pointer) + start
+            raise ReaderError(
+                self.name,
+                position,
+                ord(character),
+                "unicode",
+                "special characters are not allowed",
+            )
+
+    def update(self, length):
+        # type: (int) -> None
+        if self.raw_buffer is None:
+            return
+        self.buffer = self.buffer[self.pointer :]
+        self.pointer = 0
+        while len(self.buffer) < length:
+            if not self.eof:
+                self.update_raw()
+            if self.raw_decode is not None:
+                try:
+                    data, converted = self.raw_decode(
+                        self.raw_buffer, "strict", self.eof
+                    )
+                except UnicodeDecodeError as exc:
+                    if PY3:
+                        character = self.raw_buffer[exc.start]
+                    else:
+                        character = exc.object[exc.start]
+                    if self.stream is not None:
+                        position = (
+                            self.stream_pointer - len(self.raw_buffer) + exc.start
+                        )
+                    elif self.stream is not None:
+                        position = (
+                            self.stream_pointer - len(self.raw_buffer) + exc.start
+                        )
+                    else:
+                        position = exc.start
+                    raise ReaderError(
+                        self.name, position, character, exc.encoding, exc.reason
+                    )
+            else:
+                data = self.raw_buffer
+                converted = len(data)
+            self.check_printable(data)
+            self.buffer += data
+            self.raw_buffer = self.raw_buffer[converted:]
+            if self.eof:
+                self.buffer += "\0"
+                self.raw_buffer = None
+                break
+
+    def update_raw(self, size=None):
+        # type: (Optional[int]) -> None
+        if size is None:
+            size = 4096 if PY3 else 1024
+        data = self.stream.read(size)
+        if self.raw_buffer is None:
+            self.raw_buffer = data
+        else:
+            self.raw_buffer += data
+        self.stream_pointer += len(data)
+        if not data:
+            self.eof = True
+
+
+# try:
+#     import psyco
+#     psyco.bind(Reader)
+# except ImportError:
+#     pass
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/representer.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/representer.py
new file mode 100644
index 00000000..0553994e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/representer.py
@@ -0,0 +1,1335 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division
+
+
+from strictyaml.ruamel.error import *  # NOQA
+from strictyaml.ruamel.nodes import *  # NOQA
+from strictyaml.ruamel.compat import text_type, binary_type, to_unicode, PY2, PY3
+from strictyaml.ruamel.compat import ordereddict  # type: ignore
+from strictyaml.ruamel.compat import nprint, nprintf  # NOQA
+from strictyaml.ruamel.scalarstring import (
+    LiteralScalarString,
+    FoldedScalarString,
+    SingleQuotedScalarString,
+    DoubleQuotedScalarString,
+    PlainScalarString,
+)
+from strictyaml.ruamel.comments import (
+    CommentedMap,
+    CommentedOrderedMap,
+    CommentedSeq,
+    CommentedKeySeq,
+    CommentedKeyMap,
+    CommentedSet,
+    comment_attrib,
+    merge_attrib,
+    TaggedScalar,
+)
+from strictyaml.ruamel.scalarint import (
+    ScalarInt,
+    BinaryInt,
+    OctalInt,
+    HexInt,
+    HexCapsInt,
+)
+from strictyaml.ruamel.scalarfloat import ScalarFloat
+from strictyaml.ruamel.scalarbool import ScalarBoolean
+from strictyaml.ruamel.timestamp import TimeStamp
+
+import datetime
+import sys
+import types
+
+if PY3:
+    import copyreg
+    import base64
+else:
+    import copy_reg as copyreg  # type: ignore
+
+if False:  # MYPY
+    from typing import Dict, List, Any, Union, Text, Optional  # NOQA
+
+# fmt: off
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+           'RepresenterError', 'RoundTripRepresenter']
+# fmt: on
+
+
+class RepresenterError(YAMLError):
+    pass
+
+
+if PY2:
+
+    def get_classobj_bases(cls):
+        # type: (Any) -> Any
+        bases = [cls]
+        for base in cls.__bases__:
+            bases.extend(get_classobj_bases(base))
+        return bases
+
+
+class BaseRepresenter(object):
+
+    yaml_representers = {}  # type: Dict[Any, Any]
+    yaml_multi_representers = {}  # type: Dict[Any, Any]
+
+    def __init__(self, default_style=None, default_flow_style=None, dumper=None):
+        # type: (Any, Any, Any, Any) -> None
+        self.dumper = dumper
+        if self.dumper is not None:
+            self.dumper._representer = self
+        self.default_style = default_style
+        self.default_flow_style = default_flow_style
+        self.represented_objects = {}  # type: Dict[Any, Any]
+        self.object_keeper = []  # type: List[Any]
+        self.alias_key = None  # type: Optional[int]
+        self.sort_base_mapping_type_on_output = True
+
+    @property
+    def serializer(self):
+        # type: () -> Any
+        try:
+            if hasattr(self.dumper, "typ"):
+                return self.dumper.serializer
+            return self.dumper._serializer
+        except AttributeError:
+            return self  # cyaml
+
+    def represent(self, data):
+        # type: (Any) -> None
+        node = self.represent_data(data)
+        self.serializer.serialize(node)
+        self.represented_objects = {}
+        self.object_keeper = []
+        self.alias_key = None
+
+    def represent_data(self, data):
+        # type: (Any) -> Any
+        if self.ignore_aliases(data):
+            self.alias_key = None
+        else:
+            self.alias_key = id(data)
+        if self.alias_key is not None:
+            if self.alias_key in self.represented_objects:
+                node = self.represented_objects[self.alias_key]
+                # if node is None:
+                #     raise RepresenterError(
+                #          "recursive objects are not allowed: %r" % data)
+                return node
+            # self.represented_objects[alias_key] = None
+            self.object_keeper.append(data)
+        data_types = type(data).__mro__
+        if PY2:
+            # if type(data) is types.InstanceType:
+            if isinstance(data, types.InstanceType):
+                data_types = get_classobj_bases(data.__class__) + list(data_types)
+        if data_types[0] in self.yaml_representers:
+            node = self.yaml_representers[data_types[0]](self, data)
+        else:
+            for data_type in data_types:
+                if data_type in self.yaml_multi_representers:
+                    node = self.yaml_multi_representers[data_type](self, data)
+                    break
+            else:
+                if None in self.yaml_multi_representers:
+                    node = self.yaml_multi_representers[None](self, data)
+                elif None in self.yaml_representers:
+                    node = self.yaml_representers[None](self, data)
+                else:
+                    node = ScalarNode(None, text_type(data))
+        # if alias_key is not None:
+        #     self.represented_objects[alias_key] = node
+        return node
+
+    def represent_key(self, data):
+        # type: (Any) -> Any
+        """
+        David Fraser: Extract a method to represent keys in mappings, so that
+        a subclass can choose not to quote them (for example)
+        used in represent_mapping
+        https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c
+        """
+        return self.represent_data(data)
+
+    @classmethod
+    def add_representer(cls, data_type, representer):
+        # type: (Any, Any) -> None
+        if "yaml_representers" not in cls.__dict__:
+            cls.yaml_representers = cls.yaml_representers.copy()
+        cls.yaml_representers[data_type] = representer
+
+    @classmethod
+    def add_multi_representer(cls, data_type, representer):
+        # type: (Any, Any) -> None
+        if "yaml_multi_representers" not in cls.__dict__:
+            cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+        cls.yaml_multi_representers[data_type] = representer
+
+    def represent_scalar(self, tag, value, style=None, anchor=None):
+        # type: (Any, Any, Any, Any) -> Any
+        if style is None:
+            style = self.default_style
+        comment = None
+        if style and style[0] in "|>":
+            comment = getattr(value, "comment", None)
+            if comment:
+                comment = [None, [comment]]
+        node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        return node
+
+    def represent_sequence(self, tag, sequence, flow_style=None):
+        # type: (Any, Any, Any) -> Any
+        value = []  # type: List[Any]
+        node = SequenceNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        for item in sequence:
+            node_item = self.represent_data(item)
+            if not (isinstance(node_item, ScalarNode) and not node_item.style):
+                best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def represent_omap(self, tag, omap, flow_style=None):
+        # type: (Any, Any, Any) -> Any
+        value = []  # type: List[Any]
+        node = SequenceNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        for item_key in omap:
+            item_val = omap[item_key]
+            node_item = self.represent_data({item_key: item_val})
+            # if not (isinstance(node_item, ScalarNode) \
+            #    and not node_item.style):
+            #     best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def represent_mapping(self, tag, mapping, flow_style=None):
+        # type: (Any, Any, Any) -> Any
+        value = []  # type: List[Any]
+        node = MappingNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        if hasattr(mapping, "items"):
+            mapping = list(mapping.items())
+            if self.sort_base_mapping_type_on_output:
+                try:
+                    mapping = sorted(mapping)
+                except TypeError:
+                    pass
+        for item_key, item_value in mapping:
+            node_key = self.represent_key(item_key)
+            node_value = self.represent_data(item_value)
+            if not (isinstance(node_key, ScalarNode) and not node_key.style):
+                best_style = False
+            if not (isinstance(node_value, ScalarNode) and not node_value.style):
+                best_style = False
+            value.append((node_key, node_value))
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def ignore_aliases(self, data):
+        # type: (Any) -> bool
+        return False
+
+
+class SafeRepresenter(BaseRepresenter):
+    def ignore_aliases(self, data):
+        # type: (Any) -> bool
+        # https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
+        # "i.e. two occurrences of the empty tuple may or may not yield the same object"
+        # so "data is ()" should not be used
+        if data is None or (isinstance(data, tuple) and data == ()):
+            return True
+        if isinstance(data, (binary_type, text_type, bool, int, float)):
+            return True
+        return False
+
+    def represent_none(self, data):
+        # type: (Any) -> Any
+        return self.represent_scalar(u"tag:yaml.org,2002:null", u"null")
+
+    if PY3:
+
+        def represent_str(self, data):
+            # type: (Any) -> Any
+            return self.represent_scalar(u"tag:yaml.org,2002:str", data)
+
+        def represent_binary(self, data):
+            # type: (Any) -> Any
+            if hasattr(base64, "encodebytes"):
+                data = base64.encodebytes(data).decode("ascii")
+            else:
+                data = base64.encodestring(data).decode("ascii")
+            return self.represent_scalar(u"tag:yaml.org,2002:binary", data, style="|")
+
+    else:
+
+        def represent_str(self, data):
+            # type: (Any) -> Any
+            tag = None
+            style = None
+            try:
+                data = unicode(data, "ascii")
+                tag = u"tag:yaml.org,2002:str"
+            except UnicodeDecodeError:
+                try:
+                    data = unicode(data, "utf-8")
+                    tag = u"tag:yaml.org,2002:str"
+                except UnicodeDecodeError:
+                    data = data.encode("base64")
+                    tag = u"tag:yaml.org,2002:binary"
+                    style = "|"
+            return self.represent_scalar(tag, data, style=style)
+
+        def represent_unicode(self, data):
+            # type: (Any) -> Any
+            return self.represent_scalar(u"tag:yaml.org,2002:str", data)
+
+    def represent_bool(self, data, anchor=None):
+        # type: (Any, Optional[Any]) -> Any
+        try:
+            value = self.dumper.boolean_representation[bool(data)]
+        except AttributeError:
+            if data:
+                value = u"true"
+            else:
+                value = u"false"
+        return self.represent_scalar(u"tag:yaml.org,2002:bool", value, anchor=anchor)
+
+    def represent_int(self, data):
+        # type: (Any) -> Any
+        return self.represent_scalar(u"tag:yaml.org,2002:int", text_type(data))
+
+    if PY2:
+
+        def represent_long(self, data):
+            # type: (Any) -> Any
+            return self.represent_scalar(u"tag:yaml.org,2002:int", text_type(data))
+
+    inf_value = 1e300
+    while repr(inf_value) != repr(inf_value * inf_value):
+        inf_value *= inf_value
+
+    def represent_float(self, data):
+        # type: (Any) -> Any
+        if data != data or (data == 0.0 and data == 1.0):
+            value = u".nan"
+        elif data == self.inf_value:
+            value = u".inf"
+        elif data == -self.inf_value:
+            value = u"-.inf"
+        else:
+            value = to_unicode(repr(data)).lower()
+            if getattr(self.serializer, "use_version", None) == (1, 1):
+                if u"." not in value and u"e" in value:
+                    # Note that in some cases `repr(data)` represents a float number
+                    # without the decimal parts.  For instance:
+                    #   >>> repr(1e17)
+                    #   '1e17'
+                    # Unfortunately, this is not a valid float representation according
+                    # to the definition of the `!!float` tag in YAML 1.1.  We fix
+                    # this by adding '.0' before the 'e' symbol.
+                    value = value.replace(u"e", u".0e", 1)
+        return self.represent_scalar(u"tag:yaml.org,2002:float", value)
+
+    def represent_list(self, data):
+        # type: (Any) -> Any
+        # pairs = (len(data) > 0 and isinstance(data, list))
+        # if pairs:
+        #     for item in data:
+        #         if not isinstance(item, tuple) or len(item) != 2:
+        #             pairs = False
+        #             break
+        # if not pairs:
+        return self.represent_sequence(u"tag:yaml.org,2002:seq", data)
+
+    # value = []
+    # for item_key, item_value in data:
+    #     value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+    #         [(item_key, item_value)]))
+    # return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+    def represent_dict(self, data):
+        # type: (Any) -> Any
+        return self.represent_mapping(u"tag:yaml.org,2002:map", data)
+
+    def represent_ordereddict(self, data):
+        # type: (Any) -> Any
+        return self.represent_omap(u"tag:yaml.org,2002:omap", data)
+
+    def represent_set(self, data):
+        # type: (Any) -> Any
+        value = {}  # type: Dict[Any, None]
+        for key in data:
+            value[key] = None
+        return self.represent_mapping(u"tag:yaml.org,2002:set", value)
+
+    def represent_date(self, data):
+        # type: (Any) -> Any
+        value = to_unicode(data.isoformat())
+        return self.represent_scalar(u"tag:yaml.org,2002:timestamp", value)
+
+    def represent_datetime(self, data):
+        # type: (Any) -> Any
+        value = to_unicode(data.isoformat(" "))
+        return self.represent_scalar(u"tag:yaml.org,2002:timestamp", value)
+
+    def represent_yaml_object(self, tag, data, cls, flow_style=None):
+        # type: (Any, Any, Any, Any) -> Any
+        if hasattr(data, "__getstate__"):
+            state = data.__getstate__()
+        else:
+            state = data.__dict__.copy()
+        return self.represent_mapping(tag, state, flow_style=flow_style)
+
+    def represent_undefined(self, data):
+        # type: (Any) -> None
+        raise RepresenterError("cannot represent an object: %s" % (data,))
+
+
+SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str, SafeRepresenter.represent_str)
+
+if PY2:
+    SafeRepresenter.add_representer(unicode, SafeRepresenter.represent_unicode)
+else:
+    SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int, SafeRepresenter.represent_int)
+
+if PY2:
+    SafeRepresenter.add_representer(long, SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float, SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set, SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict)
+
+if sys.version_info >= (2, 7):
+    import collections
+
+    SafeRepresenter.add_representer(
+        collections.OrderedDict, SafeRepresenter.represent_ordereddict
+    )
+
+SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined)
+
+
+class Representer(SafeRepresenter):
+    if PY2:
+
+        def represent_str(self, data):
+            # type: (Any) -> Any
+            tag = None
+            style = None
+            try:
+                data = unicode(data, "ascii")
+                tag = u"tag:yaml.org,2002:str"
+            except UnicodeDecodeError:
+                try:
+                    data = unicode(data, "utf-8")
+                    tag = u"tag:yaml.org,2002:python/str"
+                except UnicodeDecodeError:
+                    data = data.encode("base64")
+                    tag = u"tag:yaml.org,2002:binary"
+                    style = "|"
+            return self.represent_scalar(tag, data, style=style)
+
+        def represent_unicode(self, data):
+            # type: (Any) -> Any
+            tag = None
+            try:
+                data.encode("ascii")
+                tag = u"tag:yaml.org,2002:python/unicode"
+            except UnicodeEncodeError:
+                tag = u"tag:yaml.org,2002:str"
+            return self.represent_scalar(tag, data)
+
+        def represent_long(self, data):
+            # type: (Any) -> Any
+            tag = u"tag:yaml.org,2002:int"
+            if int(data) is not data:
+                tag = u"tag:yaml.org,2002:python/long"
+            return self.represent_scalar(tag, to_unicode(data))
+
+    def represent_complex(self, data):
+        # type: (Any) -> Any
+        if data.imag == 0.0:
+            data = u"%r" % data.real
+        elif data.real == 0.0:
+            data = u"%rj" % data.imag
+        elif data.imag > 0:
+            data = u"%r+%rj" % (data.real, data.imag)
+        else:
+            data = u"%r%rj" % (data.real, data.imag)
+        return self.represent_scalar(u"tag:yaml.org,2002:python/complex", data)
+
+    def represent_tuple(self, data):
+        # type: (Any) -> Any
+        return self.represent_sequence(u"tag:yaml.org,2002:python/tuple", data)
+
+    def represent_name(self, data):
+        # type: (Any) -> Any
+        try:
+            name = u"%s.%s" % (data.__module__, data.__qualname__)
+        except AttributeError:
+            # probably PY2
+            name = u"%s.%s" % (data.__module__, data.__name__)
+        return self.represent_scalar(u"tag:yaml.org,2002:python/name:" + name, "")
+
+    def represent_module(self, data):
+        # type: (Any) -> Any
+        return self.represent_scalar(
+            u"tag:yaml.org,2002:python/module:" + data.__name__, ""
+        )
+
+    if PY2:
+
+        def represent_instance(self, data):
+            # type: (Any) -> Any
+            # For instances of classic classes, we use __getinitargs__ and
+            # __getstate__ to serialize the data.
+
+            # If data.__getinitargs__ exists, the object must be reconstructed
+            # by calling cls(**args), where args is a tuple returned by
+            # __getinitargs__. Otherwise, the cls.__init__ method should never
+            # be called and the class instance is created by instantiating a
+            # trivial class and assigning to the instance's __class__ variable.
+
+            # If data.__getstate__ exists, it returns the state of the object.
+            # Otherwise, the state of the object is data.__dict__.
+
+            # We produce either a !!python/object or !!python/object/new node.
+            # If data.__getinitargs__ does not exist and state is a dictionary,
+            # we produce a !!python/object node . Otherwise we produce a
+            # !!python/object/new node.
+
+            cls = data.__class__
+            class_name = u"%s.%s" % (cls.__module__, cls.__name__)
+            args = None
+            state = None
+            if hasattr(data, "__getinitargs__"):
+                args = list(data.__getinitargs__())
+            if hasattr(data, "__getstate__"):
+                state = data.__getstate__()
+            else:
+                state = data.__dict__
+            if args is None and isinstance(state, dict):
+                return self.represent_mapping(
+                    u"tag:yaml.org,2002:python/object:" + class_name, state
+                )
+            if isinstance(state, dict) and not state:
+                return self.represent_sequence(
+                    u"tag:yaml.org,2002:python/object/new:" + class_name, args
+                )
+            value = {}
+            if bool(args):
+                value["args"] = args
+            value["state"] = state  # type: ignore
+            return self.represent_mapping(
+                u"tag:yaml.org,2002:python/object/new:" + class_name, value
+            )
+
+    def represent_object(self, data):
+        # type: (Any) -> Any
+        # We use __reduce__ API to save the data. data.__reduce__ returns
+        # a tuple of length 2-5:
+        #   (function, args, state, listitems, dictitems)
+
+        # For reconstructing, we calls function(*args), then set its state,
+        # listitems, and dictitems if they are not None.
+
+        # A special case is when function.__name__ == '__newobj__'. In this
+        # case we create the object with args[0].__new__(*args).
+
+        # Another special case is when __reduce__ returns a string - we don't
+        # support it.
+
+        # We produce a !!python/object, !!python/object/new or
+        # !!python/object/apply node.
+
+        cls = type(data)
+        if cls in copyreg.dispatch_table:
+            reduce = copyreg.dispatch_table[cls](data)
+        elif hasattr(data, "__reduce_ex__"):
+            reduce = data.__reduce_ex__(2)
+        elif hasattr(data, "__reduce__"):
+            reduce = data.__reduce__()
+        else:
+            raise RepresenterError("cannot represent object: %r" % (data,))
+        reduce = (list(reduce) + [None] * 5)[:5]
+        function, args, state, listitems, dictitems = reduce
+        args = list(args)
+        if state is None:
+            state = {}
+        if listitems is not None:
+            listitems = list(listitems)
+        if dictitems is not None:
+            dictitems = dict(dictitems)
+        if function.__name__ == "__newobj__":
+            function = args[0]
+            args = args[1:]
+            tag = u"tag:yaml.org,2002:python/object/new:"
+            newobj = True
+        else:
+            tag = u"tag:yaml.org,2002:python/object/apply:"
+            newobj = False
+        try:
+            function_name = u"%s.%s" % (function.__module__, function.__qualname__)
+        except AttributeError:
+            # probably PY2
+            function_name = u"%s.%s" % (function.__module__, function.__name__)
+        if (
+            not args
+            and not listitems
+            and not dictitems
+            and isinstance(state, dict)
+            and newobj
+        ):
+            return self.represent_mapping(
+                u"tag:yaml.org,2002:python/object:" + function_name, state
+            )
+        if not listitems and not dictitems and isinstance(state, dict) and not state:
+            return self.represent_sequence(tag + function_name, args)
+        value = {}
+        if args:
+            value["args"] = args
+        if state or not isinstance(state, dict):
+            value["state"] = state
+        if listitems:
+            value["listitems"] = listitems
+        if dictitems:
+            value["dictitems"] = dictitems
+        return self.represent_mapping(tag + function_name, value)
+
+
+if PY2:
+    Representer.add_representer(str, Representer.represent_str)
+
+    Representer.add_representer(unicode, Representer.represent_unicode)
+
+    Representer.add_representer(long, Representer.represent_long)
+
+Representer.add_representer(complex, Representer.represent_complex)
+
+Representer.add_representer(tuple, Representer.represent_tuple)
+
+Representer.add_representer(type, Representer.represent_name)
+
+if PY2:
+    Representer.add_representer(types.ClassType, Representer.represent_name)
+
+Representer.add_representer(types.FunctionType, Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name)
+
+Representer.add_representer(types.ModuleType, Representer.represent_module)
+
+if PY2:
+    Representer.add_multi_representer(
+        types.InstanceType, Representer.represent_instance
+    )
+
+Representer.add_multi_representer(object, Representer.represent_object)
+
+Representer.add_multi_representer(type, Representer.represent_name)
+
+
+class RoundTripRepresenter(SafeRepresenter):
+    # need to add type here and write out the .comment
+    # in serializer and emitter
+
+    def __init__(self, default_style=None, default_flow_style=None, dumper=None):
+        # type: (Any, Any, Any) -> None
+        if not hasattr(dumper, "typ") and default_flow_style is None:
+            default_flow_style = False
+        SafeRepresenter.__init__(
+            self,
+            default_style=default_style,
+            default_flow_style=default_flow_style,
+            dumper=dumper,
+        )
+
+    def ignore_aliases(self, data):
+        # type: (Any) -> bool
+        try:
+            if data.anchor is not None and data.anchor.value is not None:
+                return False
+        except AttributeError:
+            pass
+        return SafeRepresenter.ignore_aliases(self, data)
+
+    def represent_none(self, data):
+        # type: (Any) -> Any
+        if (
+            len(self.represented_objects) == 0
+            and not self.serializer.use_explicit_start
+        ):
+            # this will be open ended (although it is not yet)
+            return self.represent_scalar(u"tag:yaml.org,2002:null", u"null")
+        return self.represent_scalar(u"tag:yaml.org,2002:null", "")
+
+    def represent_literal_scalarstring(self, data):
+        # type: (Any) -> Any
+        tag = None
+        style = "|"
+        anchor = data.yaml_anchor(any=True)
+        if PY2 and not isinstance(data, unicode):
+            data = unicode(data, "ascii")
+        tag = u"tag:yaml.org,2002:str"
+        return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+    represent_preserved_scalarstring = represent_literal_scalarstring
+
+    def represent_folded_scalarstring(self, data):
+        # type: (Any) -> Any
+        tag = None
+        style = ">"
+        anchor = data.yaml_anchor(any=True)
+        for fold_pos in reversed(getattr(data, "fold_pos", [])):
+            if (
+                data[fold_pos] == " "
+                and (fold_pos > 0 and not data[fold_pos - 1].isspace())
+                and (fold_pos < len(data) and not data[fold_pos + 1].isspace())
+            ):
+                data = data[:fold_pos] + "\a" + data[fold_pos:]
+        if PY2 and not isinstance(data, unicode):
+            data = unicode(data, "ascii")
+        tag = u"tag:yaml.org,2002:str"
+        return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+    def represent_single_quoted_scalarstring(self, data):
+        # type: (Any) -> Any
+        tag = None
+        style = "'"
+        anchor = data.yaml_anchor(any=True)
+        if PY2 and not isinstance(data, unicode):
+            data = unicode(data, "ascii")
+        tag = u"tag:yaml.org,2002:str"
+        return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+    def represent_double_quoted_scalarstring(self, data):
+        # type: (Any) -> Any
+        tag = None
+        style = '"'
+        anchor = data.yaml_anchor(any=True)
+        if PY2 and not isinstance(data, unicode):
+            data = unicode(data, "ascii")
+        tag = u"tag:yaml.org,2002:str"
+        return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+    def represent_plain_scalarstring(self, data):
+        # type: (Any) -> Any
+        tag = None
+        style = ""
+        anchor = data.yaml_anchor(any=True)
+        if PY2 and not isinstance(data, unicode):
+            data = unicode(data, "ascii")
+        tag = u"tag:yaml.org,2002:str"
+        return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+    def insert_underscore(self, prefix, s, underscore, anchor=None):
+        # type: (Any, Any, Any, Any) -> Any
+        if underscore is None:
+            return self.represent_scalar(
+                u"tag:yaml.org,2002:int", prefix + s, anchor=anchor
+            )
+        if underscore[0]:
+            sl = list(s)
+            pos = len(s) - underscore[0]
+            while pos > 0:
+                sl.insert(pos, "_")
+                pos -= underscore[0]
+            s = "".join(sl)
+        if underscore[1]:
+            s = "_" + s
+        if underscore[2]:
+            s += "_"
+        return self.represent_scalar(
+            u"tag:yaml.org,2002:int", prefix + s, anchor=anchor
+        )
+
+    def represent_scalar_int(self, data):
+        # type: (Any) -> Any
+        if data._width is not None:
+            s = "{:0{}d}".format(data, data._width)
+        else:
+            s = format(data, "d")
+        anchor = data.yaml_anchor(any=True)
+        return self.insert_underscore("", s, data._underscore, anchor=anchor)
+
+    def represent_binary_int(self, data):
+        # type: (Any) -> Any
+        if data._width is not None:
+            # cannot use '{:#0{}b}', that strips the zeros
+            s = "{:0{}b}".format(data, data._width)
+        else:
+            s = format(data, "b")
+        anchor = data.yaml_anchor(any=True)
+        return self.insert_underscore("0b", s, data._underscore, anchor=anchor)
+
+    def represent_octal_int(self, data):
+        # type: (Any) -> Any
+        if data._width is not None:
+            # cannot use '{:#0{}o}', that strips the zeros
+            s = "{:0{}o}".format(data, data._width)
+        else:
+            s = format(data, "o")
+        anchor = data.yaml_anchor(any=True)
+        return self.insert_underscore("0o", s, data._underscore, anchor=anchor)
+
+    def represent_hex_int(self, data):
+        # type: (Any) -> Any
+        if data._width is not None:
+            # cannot use '{:#0{}x}', that strips the zeros
+            s = "{:0{}x}".format(data, data._width)
+        else:
+            s = format(data, "x")
+        anchor = data.yaml_anchor(any=True)
+        return self.insert_underscore("0x", s, data._underscore, anchor=anchor)
+
+    def represent_hex_caps_int(self, data):
+        # type: (Any) -> Any
+        if data._width is not None:
+            # cannot use '{:#0{}X}', that strips the zeros
+            s = "{:0{}X}".format(data, data._width)
+        else:
+            s = format(data, "X")
+        anchor = data.yaml_anchor(any=True)
+        return self.insert_underscore("0x", s, data._underscore, anchor=anchor)
+
+    def represent_scalar_float(self, data):
+        # type: (Any) -> Any
+        """ this is way more complicated """
+        value = None
+        anchor = data.yaml_anchor(any=True)
+        if data != data or (data == 0.0 and data == 1.0):
+            value = u".nan"
+        elif data == self.inf_value:
+            value = u".inf"
+        elif data == -self.inf_value:
+            value = u"-.inf"
+        if value:
+            return self.represent_scalar(
+                u"tag:yaml.org,2002:float", value, anchor=anchor
+            )
+        if data._exp is None and data._prec > 0 and data._prec == data._width - 1:
+            # no exponent, but trailing dot
+            value = u"{}{:d}.".format(
+                data._m_sign if data._m_sign else "", abs(int(data))
+            )
+        elif data._exp is None:
+            # no exponent, "normal" dot
+            prec = data._prec
+            ms = data._m_sign if data._m_sign else ""
+            # -1 for the dot
+            value = u"{}{:0{}.{}f}".format(
+                ms, abs(data), data._width - len(ms), data._width - prec - 1
+            )
+            if prec == 0 or (prec == 1 and ms != ""):
+                value = value.replace(u"0.", u".")
+            while len(value) < data._width:
+                value += u"0"
+        else:
+            # exponent
+            m, es = u"{:{}.{}e}".format(
+                # data, data._width, data._width - data._prec + (1 if data._m_sign else 0)
+                data,
+                data._width,
+                data._width + (1 if data._m_sign else 0),
+            ).split("e")
+            w = data._width if data._prec > 0 else (data._width + 1)
+            if data < 0:
+                w += 1
+            m = m[:w]
+            e = int(es)
+            m1, m2 = m.split(".")  # always second?
+            while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0):
+                m2 += u"0"
+            if data._m_sign and data > 0:
+                m1 = "+" + m1
+            esgn = u"+" if data._e_sign else ""
+            if data._prec < 0:  # mantissa without dot
+                if m2 != u"0":
+                    e -= len(m2)
+                else:
+                    m2 = ""
+                while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width:
+                    m2 += u"0"
+                    e -= 1
+                value = (
+                    m1 + m2 + data._exp + u"{:{}0{}d}".format(e, esgn, data._e_width)
+                )
+            elif data._prec == 0:  # mantissa with trailing dot
+                e -= len(m2)
+                value = (
+                    m1
+                    + m2
+                    + u"."
+                    + data._exp
+                    + u"{:{}0{}d}".format(e, esgn, data._e_width)
+                )
+            else:
+                if data._m_lead0 > 0:
+                    m2 = u"0" * (data._m_lead0 - 1) + m1 + m2
+                    m1 = u"0"
+                    m2 = m2[: -data._m_lead0]  # these should be zeros
+                    e += data._m_lead0
+                while len(m1) < data._prec:
+                    m1 += m2[0]
+                    m2 = m2[1:]
+                    e -= 1
+                value = (
+                    m1
+                    + u"."
+                    + m2
+                    + data._exp
+                    + u"{:{}0{}d}".format(e, esgn, data._e_width)
+                )
+
+        if value is None:
+            value = to_unicode(repr(data)).lower()
+        return self.represent_scalar(u"tag:yaml.org,2002:float", value, anchor=anchor)
+
+    def represent_sequence(self, tag, sequence, flow_style=None):
+        # type: (Any, Any, Any) -> Any
+        value = []  # type: List[Any]
+        # if the flow_style is None, the flow style tacked on to the object
+        # explicitly will be taken. If that is None as well the default flow
+        # style rules
+        try:
+            flow_style = sequence.fa.flow_style(flow_style)
+        except AttributeError:
+            flow_style = flow_style
+        try:
+            anchor = sequence.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        try:
+            comment = getattr(sequence, comment_attrib)
+            node.comment = comment.comment
+            # reset any comment already printed information
+            if node.comment and node.comment[1]:
+                for ct in node.comment[1]:
+                    ct.reset()
+            item_comments = comment.items
+            for v in item_comments.values():
+                if v and v[1]:
+                    for ct in v[1]:
+                        ct.reset()
+            item_comments = comment.items
+            node.comment = comment.comment
+            try:
+                node.comment.append(comment.end)
+            except AttributeError:
+                pass
+        except AttributeError:
+            item_comments = {}
+        for idx, item in enumerate(sequence):
+            node_item = self.represent_data(item)
+            self.merge_comments(node_item, item_comments.get(idx))
+            if not (isinstance(node_item, ScalarNode) and not node_item.style):
+                best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if len(sequence) != 0 and self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def merge_comments(self, node, comments):
+        # type: (Any, Any) -> Any
+        if comments is None:
+            assert hasattr(node, "comment")
+            return node
+        if getattr(node, "comment", None) is not None:
+            for idx, val in enumerate(comments):
+                if idx >= len(node.comment):
+                    continue
+                nc = node.comment[idx]
+                if nc is not None:
+                    assert val is None or val == nc
+                    comments[idx] = nc
+        node.comment = comments
+        return node
+
+    def represent_key(self, data):
+        # type: (Any) -> Any
+        if isinstance(data, CommentedKeySeq):
+            self.alias_key = None
+            return self.represent_sequence(
+                u"tag:yaml.org,2002:seq", data, flow_style=True
+            )
+        if isinstance(data, CommentedKeyMap):
+            self.alias_key = None
+            return self.represent_mapping(
+                u"tag:yaml.org,2002:map", data, flow_style=True
+            )
+        return SafeRepresenter.represent_key(self, data)
+
+    def represent_mapping(self, tag, mapping, flow_style=None):
+        # type: (Any, Any, Any) -> Any
+        value = []  # type: List[Any]
+        try:
+            flow_style = mapping.fa.flow_style(flow_style)
+        except AttributeError:
+            flow_style = flow_style
+        try:
+            anchor = mapping.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        # no sorting! !!
+        try:
+            comment = getattr(mapping, comment_attrib)
+            node.comment = comment.comment
+            if node.comment and node.comment[1]:
+                for ct in node.comment[1]:
+                    ct.reset()
+            item_comments = comment.items
+            for v in item_comments.values():
+                if v and v[1]:
+                    for ct in v[1]:
+                        ct.reset()
+            try:
+                node.comment.append(comment.end)
+            except AttributeError:
+                pass
+        except AttributeError:
+            item_comments = {}
+        merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])]
+        try:
+            merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0]
+        except IndexError:
+            merge_pos = 0
+        item_count = 0
+        if bool(merge_list):
+            items = mapping.non_merged_items()
+        else:
+            items = mapping.items()
+        for item_key, item_value in items:
+            item_count += 1
+            node_key = self.represent_key(item_key)
+            node_value = self.represent_data(item_value)
+            item_comment = item_comments.get(item_key)
+            if item_comment:
+                assert getattr(node_key, "comment", None) is None
+                node_key.comment = item_comment[:2]
+                nvc = getattr(node_value, "comment", None)
+                if nvc is not None:  # end comment already there
+                    nvc[0] = item_comment[2]
+                    nvc[1] = item_comment[3]
+                else:
+                    node_value.comment = item_comment[2:]
+            if not (isinstance(node_key, ScalarNode) and not node_key.style):
+                best_style = False
+            if not (isinstance(node_value, ScalarNode) and not node_value.style):
+                best_style = False
+            value.append((node_key, node_value))
+        if flow_style is None:
+            if (
+                (item_count != 0) or bool(merge_list)
+            ) and self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        if bool(merge_list):
+            # because of the call to represent_data here, the anchors
+            # are marked as being used and thereby created
+            if len(merge_list) == 1:
+                arg = self.represent_data(merge_list[0])
+            else:
+                arg = self.represent_data(merge_list)
+                arg.flow_style = True
+            value.insert(merge_pos, (ScalarNode(u"tag:yaml.org,2002:merge", "<<"), arg))
+        return node
+
+    def represent_omap(self, tag, omap, flow_style=None):
+        # type: (Any, Any, Any) -> Any
+        value = []  # type: List[Any]
+        try:
+            flow_style = omap.fa.flow_style(flow_style)
+        except AttributeError:
+            flow_style = flow_style
+        try:
+            anchor = omap.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        try:
+            comment = getattr(omap, comment_attrib)
+            node.comment = comment.comment
+            if node.comment and node.comment[1]:
+                for ct in node.comment[1]:
+                    ct.reset()
+            item_comments = comment.items
+            for v in item_comments.values():
+                if v and v[1]:
+                    for ct in v[1]:
+                        ct.reset()
+            try:
+                node.comment.append(comment.end)
+            except AttributeError:
+                pass
+        except AttributeError:
+            item_comments = {}
+        for item_key in omap:
+            item_val = omap[item_key]
+            node_item = self.represent_data({item_key: item_val})
+            # node_item.flow_style = False
+            # node item has two scalars in value: node_key and node_value
+            item_comment = item_comments.get(item_key)
+            if item_comment:
+                if item_comment[1]:
+                    node_item.comment = [None, item_comment[1]]
+                assert getattr(node_item.value[0][0], "comment", None) is None
+                node_item.value[0][0].comment = [item_comment[0], None]
+                nvc = getattr(node_item.value[0][1], "comment", None)
+                if nvc is not None:  # end comment already there
+                    nvc[0] = item_comment[2]
+                    nvc[1] = item_comment[3]
+                else:
+                    node_item.value[0][1].comment = item_comment[2:]
+            # if not (isinstance(node_item, ScalarNode) \
+            #    and not node_item.style):
+            #     best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def represent_set(self, setting):
+        # type: (Any) -> Any
+        flow_style = False
+        tag = u"tag:yaml.org,2002:set"
+        # return self.represent_mapping(tag, value)
+        value = []  # type: List[Any]
+        flow_style = setting.fa.flow_style(flow_style)
+        try:
+            anchor = setting.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        # no sorting! !!
+        try:
+            comment = getattr(setting, comment_attrib)
+            node.comment = comment.comment
+            if node.comment and node.comment[1]:
+                for ct in node.comment[1]:
+                    ct.reset()
+            item_comments = comment.items
+            for v in item_comments.values():
+                if v and v[1]:
+                    for ct in v[1]:
+                        ct.reset()
+            try:
+                node.comment.append(comment.end)
+            except AttributeError:
+                pass
+        except AttributeError:
+            item_comments = {}
+        for item_key in setting.odict:
+            node_key = self.represent_key(item_key)
+            node_value = self.represent_data(None)
+            item_comment = item_comments.get(item_key)
+            if item_comment:
+                assert getattr(node_key, "comment", None) is None
+                node_key.comment = item_comment[:2]
+            node_key.style = node_value.style = "?"
+            if not (isinstance(node_key, ScalarNode) and not node_key.style):
+                best_style = False
+            if not (isinstance(node_value, ScalarNode) and not node_value.style):
+                best_style = False
+            value.append((node_key, node_value))
+        best_style = best_style
+        return node
+
+    def represent_dict(self, data):
+        # type: (Any) -> Any
+        """write out tag if saved on loading"""
+        try:
+            t = data.tag.value
+        except AttributeError:
+            t = None
+        if t:
+            if t.startswith("!!"):
+                tag = "tag:yaml.org,2002:" + t[2:]
+            else:
+                tag = t
+        else:
+            tag = u"tag:yaml.org,2002:map"
+        return self.represent_mapping(tag, data)
+
+    def represent_list(self, data):
+        # type: (Any) -> Any
+        try:
+            t = data.tag.value
+        except AttributeError:
+            t = None
+        if t:
+            if t.startswith("!!"):
+                tag = "tag:yaml.org,2002:" + t[2:]
+            else:
+                tag = t
+        else:
+            tag = u"tag:yaml.org,2002:seq"
+        return self.represent_sequence(tag, data)
+
+    def represent_datetime(self, data):
+        # type: (Any) -> Any
+        inter = "T" if data._yaml["t"] else " "
+        _yaml = data._yaml
+        if _yaml["delta"]:
+            data += _yaml["delta"]
+            value = data.isoformat(inter)
+        else:
+            value = data.isoformat(inter)
+        if _yaml["tz"]:
+            value += _yaml["tz"]
+        return self.represent_scalar(u"tag:yaml.org,2002:timestamp", to_unicode(value))
+
+    def represent_tagged_scalar(self, data):
+        # type: (Any) -> Any
+        try:
+            tag = data.tag.value
+        except AttributeError:
+            tag = None
+        try:
+            anchor = data.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor)
+
+    def represent_scalar_bool(self, data):
+        # type: (Any) -> Any
+        try:
+            anchor = data.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        return SafeRepresenter.represent_bool(self, data, anchor=anchor)
+
+
+RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none)
+
+RoundTripRepresenter.add_representer(
+    LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+    FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+    SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+    DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+    PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring
+)
+
+# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple)
+
+RoundTripRepresenter.add_representer(
+    ScalarInt, RoundTripRepresenter.represent_scalar_int
+)
+
+RoundTripRepresenter.add_representer(
+    BinaryInt, RoundTripRepresenter.represent_binary_int
+)
+
+RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int)
+
+RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int)
+
+RoundTripRepresenter.add_representer(
+    HexCapsInt, RoundTripRepresenter.represent_hex_caps_int
+)
+
+RoundTripRepresenter.add_representer(
+    ScalarFloat, RoundTripRepresenter.represent_scalar_float
+)
+
+RoundTripRepresenter.add_representer(
+    ScalarBoolean, RoundTripRepresenter.represent_scalar_bool
+)
+
+RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list)
+
+RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict)
+
+RoundTripRepresenter.add_representer(
+    CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict
+)
+
+if sys.version_info >= (2, 7):
+    import collections
+
+    RoundTripRepresenter.add_representer(
+        collections.OrderedDict, RoundTripRepresenter.represent_ordereddict
+    )
+
+RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set)
+
+RoundTripRepresenter.add_representer(
+    TaggedScalar, RoundTripRepresenter.represent_tagged_scalar
+)
+
+RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/resolver.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/resolver.py
new file mode 100644
index 00000000..c21d45af
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/resolver.py
@@ -0,0 +1,410 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+import re
+
+if False:  # MYPY
+    from typing import Any, Dict, List, Union, Text, Optional  # NOQA
+    from strictyaml.ruamel.compat import VersionType  # NOQA
+
+from strictyaml.ruamel.compat import string_types, _DEFAULT_YAML_VERSION  # NOQA
+from strictyaml.ruamel.error import *  # NOQA
+from strictyaml.ruamel.nodes import MappingNode, ScalarNode, SequenceNode  # NOQA
+from strictyaml.ruamel.util import RegExp  # NOQA
+
+__all__ = ["BaseResolver", "Resolver", "VersionedResolver"]
+
+
+# fmt: off
+# resolvers consist of
+# - a list of applicable version
+# - a tag
+# - a regexp
+# - a list of first characters to match
+implicit_resolvers = [
+    ([(1, 2)],
+        u'tag:yaml.org,2002:bool',
+        RegExp(u'''^(?:true|True|TRUE|false|False|FALSE)$''', re.X),
+        list(u'tTfF')),
+    ([(1, 1)],
+        u'tag:yaml.org,2002:bool',
+        RegExp(u'''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO
+        |true|True|TRUE|false|False|FALSE
+        |on|On|ON|off|Off|OFF)$''', re.X),
+        list(u'yYnNtTfFoO')),
+    ([(1, 2)],
+        u'tag:yaml.org,2002:float',
+        RegExp(u'''^(?:
+         [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+        |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+        |[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)?
+        |[-+]?\\.(?:inf|Inf|INF)
+        |\\.(?:nan|NaN|NAN))$''', re.X),
+        list(u'-+0123456789.')),
+    ([(1, 1)],
+        u'tag:yaml.org,2002:float',
+        RegExp(u'''^(?:
+         [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+        |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+        |\\.[0-9_]+(?:[eE][-+][0-9]+)?
+        |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*  # sexagesimal float
+        |[-+]?\\.(?:inf|Inf|INF)
+        |\\.(?:nan|NaN|NAN))$''', re.X),
+        list(u'-+0123456789.')),
+    ([(1, 2)],
+        u'tag:yaml.org,2002:int',
+        RegExp(u'''^(?:[-+]?0b[0-1_]+
+        |[-+]?0o?[0-7_]+
+        |[-+]?[0-9_]+
+        |[-+]?0x[0-9a-fA-F_]+)$''', re.X),
+        list(u'-+0123456789')),
+    ([(1, 1)],
+        u'tag:yaml.org,2002:int',
+        RegExp(u'''^(?:[-+]?0b[0-1_]+
+        |[-+]?0?[0-7_]+
+        |[-+]?(?:0|[1-9][0-9_]*)
+        |[-+]?0x[0-9a-fA-F_]+
+        |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),  # sexagesimal int
+        list(u'-+0123456789')),
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:merge',
+        RegExp(u'^(?:<<)$'),
+        [u'<']),
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:null',
+        RegExp(u'''^(?: ~
+        |null|Null|NULL
+        | )$''', re.X),
+        [u'~', u'n', u'N', u'']),
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:timestamp',
+        RegExp(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+        |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+        (?:[Tt]|[ \\t]+)[0-9][0-9]?
+        :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
+        (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+        list(u'0123456789')),
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:value',
+        RegExp(u'^(?:=)$'),
+        [u'=']),
+    # The following resolver is only for documentation purposes. It cannot work
+    # because plain scalars cannot start with '!', '&', or '*'.
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:yaml',
+        RegExp(u'^(?:!|&|\\*)$'),
+        list(u'!&*')),
+]
+# fmt: on
+
+
+class ResolverError(YAMLError):
+    pass
+
+
+class BaseResolver(object):
+
+    DEFAULT_SCALAR_TAG = u"tag:yaml.org,2002:str"
+    DEFAULT_SEQUENCE_TAG = u"tag:yaml.org,2002:seq"
+    DEFAULT_MAPPING_TAG = u"tag:yaml.org,2002:map"
+
+    yaml_implicit_resolvers = {}  # type: Dict[Any, Any]
+    yaml_path_resolvers = {}  # type: Dict[Any, Any]
+
+    def __init__(self, loadumper=None):
+        # type: (Any, Any) -> None
+        self.loadumper = loadumper
+        if (
+            self.loadumper is not None
+            and getattr(self.loadumper, "_resolver", None) is None
+        ):
+            self.loadumper._resolver = self.loadumper
+        self._loader_version = None  # type: Any
+        self.resolver_exact_paths = []  # type: List[Any]
+        self.resolver_prefix_paths = []  # type: List[Any]
+
+    @property
+    def parser(self):
+        # type: () -> Any
+        if self.loadumper is not None:
+            if hasattr(self.loadumper, "typ"):
+                return self.loadumper.parser
+            return self.loadumper._parser
+        return None
+
+    @classmethod
+    def add_implicit_resolver_base(cls, tag, regexp, first):
+        # type: (Any, Any, Any) -> None
+        if "yaml_implicit_resolvers" not in cls.__dict__:
+            # deepcopy doesn't work here
+            cls.yaml_implicit_resolvers = dict(
+                (k, cls.yaml_implicit_resolvers[k][:])
+                for k in cls.yaml_implicit_resolvers
+            )
+        if first is None:
+            first = [None]
+        for ch in first:
+            cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+    @classmethod
+    def add_implicit_resolver(cls, tag, regexp, first):
+        # type: (Any, Any, Any) -> None
+        if "yaml_implicit_resolvers" not in cls.__dict__:
+            # deepcopy doesn't work here
+            cls.yaml_implicit_resolvers = dict(
+                (k, cls.yaml_implicit_resolvers[k][:])
+                for k in cls.yaml_implicit_resolvers
+            )
+        if first is None:
+            first = [None]
+        for ch in first:
+            cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+        implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first))
+
+    # @classmethod
+    # def add_implicit_resolver(cls, tag, regexp, first):
+
+    @classmethod
+    def add_path_resolver(cls, tag, path, kind=None):
+        # type: (Any, Any, Any) -> None
+        # Note: `add_path_resolver` is experimental.  The API could be changed.
+        # `new_path` is a pattern that is matched against the path from the
+        # root to the node that is being considered.  `node_path` elements are
+        # tuples `(node_check, index_check)`.  `node_check` is a node class:
+        # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`.  `None`
+        # matches any kind of a node.  `index_check` could be `None`, a boolean
+        # value, a string value, or a number.  `None` and `False` match against
+        # any _value_ of sequence and mapping nodes.  `True` matches against
+        # any _key_ of a mapping node.  A string `index_check` matches against
+        # a mapping value that corresponds to a scalar key which content is
+        # equal to the `index_check` value.  An integer `index_check` matches
+        # against a sequence value with the index equal to `index_check`.
+        if "yaml_path_resolvers" not in cls.__dict__:
+            cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+        new_path = []  # type: List[Any]
+        for element in path:
+            if isinstance(element, (list, tuple)):
+                if len(element) == 2:
+                    node_check, index_check = element
+                elif len(element) == 1:
+                    node_check = element[0]
+                    index_check = True
+                else:
+                    raise ResolverError("Invalid path element: %s" % (element,))
+            else:
+                node_check = None
+                index_check = element
+            if node_check is str:
+                node_check = ScalarNode
+            elif node_check is list:
+                node_check = SequenceNode
+            elif node_check is dict:
+                node_check = MappingNode
+            elif (
+                node_check not in [ScalarNode, SequenceNode, MappingNode]
+                and not isinstance(node_check, string_types)
+                and node_check is not None
+            ):
+                raise ResolverError("Invalid node checker: %s" % (node_check,))
+            if (
+                not isinstance(index_check, (string_types, int))
+                and index_check is not None
+            ):
+                raise ResolverError("Invalid index checker: %s" % (index_check,))
+            new_path.append((node_check, index_check))
+        if kind is str:
+            kind = ScalarNode
+        elif kind is list:
+            kind = SequenceNode
+        elif kind is dict:
+            kind = MappingNode
+        elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None:
+            raise ResolverError("Invalid node kind: %s" % (kind,))
+        cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+    def descend_resolver(self, current_node, current_index):
+        # type: (Any, Any) -> None
+        if not self.yaml_path_resolvers:
+            return
+        exact_paths = {}
+        prefix_paths = []
+        if current_node:
+            depth = len(self.resolver_prefix_paths)
+            for path, kind in self.resolver_prefix_paths[-1]:
+                if self.check_resolver_prefix(
+                    depth, path, kind, current_node, current_index
+                ):
+                    if len(path) > depth:
+                        prefix_paths.append((path, kind))
+                    else:
+                        exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+        else:
+            for path, kind in self.yaml_path_resolvers:
+                if not path:
+                    exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+                else:
+                    prefix_paths.append((path, kind))
+        self.resolver_exact_paths.append(exact_paths)
+        self.resolver_prefix_paths.append(prefix_paths)
+
+    def ascend_resolver(self):
+        # type: () -> None
+        if not self.yaml_path_resolvers:
+            return
+        self.resolver_exact_paths.pop()
+        self.resolver_prefix_paths.pop()
+
+    def check_resolver_prefix(self, depth, path, kind, current_node, current_index):
+        # type: (int, Text, Any, Any, Any) -> bool
+        node_check, index_check = path[depth - 1]
+        if isinstance(node_check, string_types):
+            if current_node.tag != node_check:
+                return False
+        elif node_check is not None:
+            if not isinstance(current_node, node_check):
+                return False
+        if index_check is True and current_index is not None:
+            return False
+        if (index_check is False or index_check is None) and current_index is None:
+            return False
+        if isinstance(index_check, string_types):
+            if not (
+                isinstance(current_index, ScalarNode)
+                and index_check == current_index.value
+            ):
+                return False
+        elif isinstance(index_check, int) and not isinstance(index_check, bool):
+            if index_check != current_index:
+                return False
+        return True
+
+    def resolve(self, kind, value, implicit):
+        # type: (Any, Any, Any) -> Any
+        if kind is ScalarNode and implicit[0]:
+            if value == "":
+                resolvers = self.yaml_implicit_resolvers.get("", [])
+            else:
+                resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+            resolvers += self.yaml_implicit_resolvers.get(None, [])
+            for tag, regexp in resolvers:
+                if regexp.match(value):
+                    return tag
+            implicit = implicit[1]
+        if bool(self.yaml_path_resolvers):
+            exact_paths = self.resolver_exact_paths[-1]
+            if kind in exact_paths:
+                return exact_paths[kind]
+            if None in exact_paths:
+                return exact_paths[None]
+        if kind is ScalarNode:
+            return self.DEFAULT_SCALAR_TAG
+        elif kind is SequenceNode:
+            return self.DEFAULT_SEQUENCE_TAG
+        elif kind is MappingNode:
+            return self.DEFAULT_MAPPING_TAG
+
+    @property
+    def processing_version(self):
+        # type: () -> Any
+        return None
+
+
+class Resolver(BaseResolver):
+    pass
+
+
+for ir in implicit_resolvers:
+    if (1, 2) in ir[0]:
+        Resolver.add_implicit_resolver_base(*ir[1:])
+
+
+class VersionedResolver(BaseResolver):
+    """
+    contrary to the "normal" resolver, the smart resolver delays loading
+    the pattern matching rules. That way it can decide to load 1.1 rules
+    or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals
+    and Yes/No/On/Off booleans.
+    """
+
+    def __init__(self, version=None, loader=None, loadumper=None):
+        # type: (Optional[VersionType], Any, Any) -> None
+        if loader is None and loadumper is not None:
+            loader = loadumper
+        BaseResolver.__init__(self, loader)
+        self._loader_version = self.get_loader_version(version)
+        self._version_implicit_resolver = {}  # type: Dict[Any, Any]
+
+    def add_version_implicit_resolver(self, version, tag, regexp, first):
+        # type: (VersionType, Any, Any, Any) -> None
+        if first is None:
+            first = [None]
+        impl_resolver = self._version_implicit_resolver.setdefault(version, {})
+        for ch in first:
+            impl_resolver.setdefault(ch, []).append((tag, regexp))
+
+    def get_loader_version(self, version):
+        # type: (Optional[VersionType]) -> Any
+        if version is None or isinstance(version, tuple):
+            return version
+        if isinstance(version, list):
+            return tuple(version)
+        # assume string
+        return tuple(map(int, version.split(u".")))
+
+    @property
+    def versioned_resolver(self):
+        # type: () -> Any
+        """
+        select the resolver based on the version we are parsing
+        """
+        version = self.processing_version
+        if version not in self._version_implicit_resolver:
+            for x in implicit_resolvers:
+                if version in x[0]:
+                    self.add_version_implicit_resolver(version, x[1], x[2], x[3])
+        return self._version_implicit_resolver[version]
+
+    def resolve(self, kind, value, implicit):
+        # type: (Any, Any, Any) -> Any
+        if kind is ScalarNode and implicit[0]:
+            if value == "":
+                resolvers = self.versioned_resolver.get("", [])
+            else:
+                resolvers = self.versioned_resolver.get(value[0], [])
+            resolvers += self.versioned_resolver.get(None, [])
+            for tag, regexp in resolvers:
+                if regexp.match(value):
+                    return tag
+            implicit = implicit[1]
+        if bool(self.yaml_path_resolvers):
+            exact_paths = self.resolver_exact_paths[-1]
+            if kind in exact_paths:
+                return exact_paths[kind]
+            if None in exact_paths:
+                return exact_paths[None]
+        if kind is ScalarNode:
+            return self.DEFAULT_SCALAR_TAG
+        elif kind is SequenceNode:
+            return self.DEFAULT_SEQUENCE_TAG
+        elif kind is MappingNode:
+            return self.DEFAULT_MAPPING_TAG
+
+    @property
+    def processing_version(self):
+        # type: () -> Any
+        try:
+            version = self.loadumper._scanner.yaml_version
+        except AttributeError:
+            try:
+                if hasattr(self.loadumper, "typ"):
+                    version = self.loadumper.version
+                else:
+                    version = self.loadumper._serializer.use_version  # dumping
+            except AttributeError:
+                version = None
+        if version is None:
+            version = self._loader_version
+            if version is None:
+                version = _DEFAULT_YAML_VERSION
+        return version
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarbool.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarbool.py
new file mode 100644
index 00000000..0e88c7d3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarbool.py
@@ -0,0 +1,51 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+"""
+You cannot subclass bool, and this is necessary for round-tripping anchored
+bool values (and also if you want to preserve the original way of writing)
+
+bool.__bases__ is type 'int', so that is what is used as the basis for ScalarBoolean as well.
+
+You can use these in an if statement, but not when testing equivalence
+"""
+
+from strictyaml.ruamel.anchor import Anchor
+
+if False:  # MYPY
+    from typing import Text, Any, Dict, List  # NOQA
+
+__all__ = ["ScalarBoolean"]
+
+# no need for no_limit_int -> int
+
+
+class ScalarBoolean(int):
+    def __new__(cls, *args, **kw):
+        # type: (Any, Any, Any) -> Any
+        anchor = kw.pop("anchor", None)  # type: ignore
+        b = int.__new__(cls, *args, **kw)  # type: ignore
+        if anchor is not None:
+            b.yaml_set_anchor(anchor, always_dump=True)
+        return b
+
+    @property
+    def anchor(self):
+        # type: () -> Any
+        if not hasattr(self, Anchor.attrib):
+            setattr(self, Anchor.attrib, Anchor())
+        return getattr(self, Anchor.attrib)
+
+    def yaml_anchor(self, any=False):
+        # type: (bool) -> Any
+        if not hasattr(self, Anchor.attrib):
+            return None
+        if any or self.anchor.always_dump:
+            return self.anchor
+        return None
+
+    def yaml_set_anchor(self, value, always_dump=False):
+        # type: (Any, bool) -> None
+        self.anchor.value = value
+        self.anchor.always_dump = always_dump
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarfloat.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarfloat.py
new file mode 100644
index 00000000..cda9189c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarfloat.py
@@ -0,0 +1,137 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+import sys
+from .compat import no_limit_int  # NOQA
+from strictyaml.ruamel.anchor import Anchor
+
+if False:  # MYPY
+    from typing import Text, Any, Dict, List  # NOQA
+
+__all__ = ["ScalarFloat", "ExponentialFloat", "ExponentialCapsFloat"]
+
+
+class ScalarFloat(float):
+    def __new__(cls, *args, **kw):
+        # type: (Any, Any, Any) -> Any
+        width = kw.pop("width", None)  # type: ignore
+        prec = kw.pop("prec", None)  # type: ignore
+        m_sign = kw.pop("m_sign", None)  # type: ignore
+        m_lead0 = kw.pop("m_lead0", 0)  # type: ignore
+        exp = kw.pop("exp", None)  # type: ignore
+        e_width = kw.pop("e_width", None)  # type: ignore
+        e_sign = kw.pop("e_sign", None)  # type: ignore
+        underscore = kw.pop("underscore", None)  # type: ignore
+        anchor = kw.pop("anchor", None)  # type: ignore
+        v = float.__new__(cls, *args, **kw)  # type: ignore
+        v._width = width
+        v._prec = prec
+        v._m_sign = m_sign
+        v._m_lead0 = m_lead0
+        v._exp = exp
+        v._e_width = e_width
+        v._e_sign = e_sign
+        v._underscore = underscore
+        if anchor is not None:
+            v.yaml_set_anchor(anchor, always_dump=True)
+        return v
+
+    def __iadd__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        return float(self) + a
+        x = type(self)(self + a)
+        x._width = self._width
+        x._underscore = (
+            self._underscore[:] if self._underscore is not None else None
+        )  # NOQA
+        return x
+
+    def __ifloordiv__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        return float(self) // a
+        x = type(self)(self // a)
+        x._width = self._width
+        x._underscore = (
+            self._underscore[:] if self._underscore is not None else None
+        )  # NOQA
+        return x
+
+    def __imul__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        return float(self) * a
+        x = type(self)(self * a)
+        x._width = self._width
+        x._underscore = (
+            self._underscore[:] if self._underscore is not None else None
+        )  # NOQA
+        x._prec = self._prec  # check for others
+        return x
+
+    def __ipow__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        return float(self) ** a
+        x = type(self)(self ** a)
+        x._width = self._width
+        x._underscore = (
+            self._underscore[:] if self._underscore is not None else None
+        )  # NOQA
+        return x
+
+    def __isub__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        return float(self) - a
+        x = type(self)(self - a)
+        x._width = self._width
+        x._underscore = (
+            self._underscore[:] if self._underscore is not None else None
+        )  # NOQA
+        return x
+
+    @property
+    def anchor(self):
+        # type: () -> Any
+        if not hasattr(self, Anchor.attrib):
+            setattr(self, Anchor.attrib, Anchor())
+        return getattr(self, Anchor.attrib)
+
+    def yaml_anchor(self, any=False):
+        # type: (bool) -> Any
+        if not hasattr(self, Anchor.attrib):
+            return None
+        if any or self.anchor.always_dump:
+            return self.anchor
+        return None
+
+    def yaml_set_anchor(self, value, always_dump=False):
+        # type: (Any, bool) -> None
+        self.anchor.value = value
+        self.anchor.always_dump = always_dump
+
+    def dump(self, out=sys.stdout):
+        # type: (Any) -> Any
+        out.write(
+            "ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n".format(
+                self,
+                self._width,  # type: ignore
+                self._prec,  # type: ignore
+                self._m_sign,  # type: ignore
+                self._m_lead0,  # type: ignore
+                self._underscore,  # type: ignore
+                self._exp,  # type: ignore
+                self._e_width,  # type: ignore
+                self._e_sign,  # type: ignore
+            )
+        )
+
+
+class ExponentialFloat(ScalarFloat):
+    def __new__(cls, value, width=None, underscore=None):
+        # type: (Any, Any, Any) -> Any
+        return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
+
+
+class ExponentialCapsFloat(ScalarFloat):
+    def __new__(cls, value, width=None, underscore=None):
+        # type: (Any, Any, Any) -> Any
+        return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarint.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarint.py
new file mode 100644
index 00000000..d3c78b44
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarint.py
@@ -0,0 +1,140 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+from .compat import no_limit_int  # NOQA
+from strictyaml.ruamel.anchor import Anchor
+
+if False:  # MYPY
+    from typing import Text, Any, Dict, List  # NOQA
+
+__all__ = ["ScalarInt", "BinaryInt", "OctalInt", "HexInt", "HexCapsInt", "DecimalInt"]
+
+
+class ScalarInt(no_limit_int):
+    def __new__(cls, *args, **kw):
+        # type: (Any, Any, Any) -> Any
+        width = kw.pop("width", None)  # type: ignore
+        underscore = kw.pop("underscore", None)  # type: ignore
+        anchor = kw.pop("anchor", None)  # type: ignore
+        v = no_limit_int.__new__(cls, *args, **kw)  # type: ignore
+        v._width = width
+        v._underscore = underscore
+        if anchor is not None:
+            v.yaml_set_anchor(anchor, always_dump=True)
+        return v
+
+    def __iadd__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        x = type(self)(self + a)
+        x._width = self._width  # type: ignore
+        x._underscore = (  # type: ignore
+            self._underscore[:] if self._underscore is not None else None  # type: ignore
+        )  # NOQA
+        return x
+
+    def __ifloordiv__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        x = type(self)(self // a)
+        x._width = self._width  # type: ignore
+        x._underscore = (  # type: ignore
+            self._underscore[:] if self._underscore is not None else None  # type: ignore
+        )  # NOQA
+        return x
+
+    def __imul__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        x = type(self)(self * a)
+        x._width = self._width  # type: ignore
+        x._underscore = (  # type: ignore
+            self._underscore[:] if self._underscore is not None else None  # type: ignore
+        )  # NOQA
+        return x
+
+    def __ipow__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        x = type(self)(self ** a)
+        x._width = self._width  # type: ignore
+        x._underscore = (  # type: ignore
+            self._underscore[:] if self._underscore is not None else None  # type: ignore
+        )  # NOQA
+        return x
+
+    def __isub__(self, a):  # type: ignore
+        # type: (Any) -> Any
+        x = type(self)(self - a)
+        x._width = self._width  # type: ignore
+        x._underscore = (  # type: ignore
+            self._underscore[:] if self._underscore is not None else None  # type: ignore
+        )  # NOQA
+        return x
+
+    @property
+    def anchor(self):
+        # type: () -> Any
+        if not hasattr(self, Anchor.attrib):
+            setattr(self, Anchor.attrib, Anchor())
+        return getattr(self, Anchor.attrib)
+
+    def yaml_anchor(self, any=False):
+        # type: (bool) -> Any
+        if not hasattr(self, Anchor.attrib):
+            return None
+        if any or self.anchor.always_dump:
+            return self.anchor
+        return None
+
+    def yaml_set_anchor(self, value, always_dump=False):
+        # type: (Any, bool) -> None
+        self.anchor.value = value
+        self.anchor.always_dump = always_dump
+
+
+class BinaryInt(ScalarInt):
+    def __new__(cls, value, width=None, underscore=None, anchor=None):
+        # type: (Any, Any, Any, Any) -> Any
+        return ScalarInt.__new__(
+            cls, value, width=width, underscore=underscore, anchor=anchor
+        )
+
+
+class OctalInt(ScalarInt):
+    def __new__(cls, value, width=None, underscore=None, anchor=None):
+        # type: (Any, Any, Any, Any) -> Any
+        return ScalarInt.__new__(
+            cls, value, width=width, underscore=underscore, anchor=anchor
+        )
+
+
+# mixed casing of A-F is not supported, when loading the first non digit
+# determines the case
+
+
+class HexInt(ScalarInt):
+    """uses lower case (a-f)"""
+
+    def __new__(cls, value, width=None, underscore=None, anchor=None):
+        # type: (Any, Any, Any, Any) -> Any
+        return ScalarInt.__new__(
+            cls, value, width=width, underscore=underscore, anchor=anchor
+        )
+
+
+class HexCapsInt(ScalarInt):
+    """uses upper case (A-F)"""
+
+    def __new__(cls, value, width=None, underscore=None, anchor=None):
+        # type: (Any, Any, Any, Any) -> Any
+        return ScalarInt.__new__(
+            cls, value, width=width, underscore=underscore, anchor=anchor
+        )
+
+
+class DecimalInt(ScalarInt):
+    """needed if anchor"""
+
+    def __new__(cls, value, width=None, underscore=None, anchor=None):
+        # type: (Any, Any, Any, Any) -> Any
+        return ScalarInt.__new__(
+            cls, value, width=width, underscore=underscore, anchor=anchor
+        )
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarstring.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarstring.py
new file mode 100644
index 00000000..c4aa30c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scalarstring.py
@@ -0,0 +1,156 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+from strictyaml.ruamel.compat import text_type
+from strictyaml.ruamel.anchor import Anchor
+
+if False:  # MYPY
+    from typing import Text, Any, Dict, List  # NOQA
+
+__all__ = [
+    "ScalarString",
+    "LiteralScalarString",
+    "FoldedScalarString",
+    "SingleQuotedScalarString",
+    "DoubleQuotedScalarString",
+    "PlainScalarString",
+    # PreservedScalarString is the old name, as it was the first to be preserved on rt,
+    # use LiteralScalarString instead
+    "PreservedScalarString",
+]
+
+
+class ScalarString(text_type):
+    __slots__ = Anchor.attrib
+
+    def __new__(cls, *args, **kw):
+        # type: (Any, Any) -> Any
+        anchor = kw.pop("anchor", None)  # type: ignore
+        ret_val = text_type.__new__(cls, *args, **kw)  # type: ignore
+        if anchor is not None:
+            ret_val.yaml_set_anchor(anchor, always_dump=True)
+        return ret_val
+
+    def replace(self, old, new, maxreplace=-1):
+        # type: (Any, Any, int) -> Any
+        return type(self)((text_type.replace(self, old, new, maxreplace)))
+
+    @property
+    def anchor(self):
+        # type: () -> Any
+        if not hasattr(self, Anchor.attrib):
+            setattr(self, Anchor.attrib, Anchor())
+        return getattr(self, Anchor.attrib)
+
+    def yaml_anchor(self, any=False):
+        # type: (bool) -> Any
+        if not hasattr(self, Anchor.attrib):
+            return None
+        if any or self.anchor.always_dump:
+            return self.anchor
+        return None
+
+    def yaml_set_anchor(self, value, always_dump=False):
+        # type: (Any, bool) -> None
+        self.anchor.value = value
+        self.anchor.always_dump = always_dump
+
+
+class LiteralScalarString(ScalarString):
+    __slots__ = "comment"  # the comment after the | on the first line
+
+    style = "|"
+
+    def __new__(cls, value, anchor=None):
+        # type: (Text, Any) -> Any
+        return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+PreservedScalarString = LiteralScalarString
+
+
+class FoldedScalarString(ScalarString):
+    __slots__ = ("fold_pos", "comment")  # the comment after the > on the first line
+
+    style = ">"
+
+    def __new__(cls, value, anchor=None):
+        # type: (Text, Any) -> Any
+        return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class SingleQuotedScalarString(ScalarString):
+    __slots__ = ()
+
+    style = "'"
+
+    def __new__(cls, value, anchor=None):
+        # type: (Text, Any) -> Any
+        return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class DoubleQuotedScalarString(ScalarString):
+    __slots__ = ()
+
+    style = '"'
+
+    def __new__(cls, value, anchor=None):
+        # type: (Text, Any) -> Any
+        return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class PlainScalarString(ScalarString):
+    __slots__ = ()
+
+    style = ""
+
+    def __new__(cls, value, anchor=None):
+        # type: (Text, Any) -> Any
+        return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+def preserve_literal(s):
+    # type: (Text) -> Text
+    return LiteralScalarString(s.replace("\r\n", "\n").replace("\r", "\n"))
+
+
+def walk_tree(base, map=None):
+    # type: (Any, Any) -> None
+    """
+    the routine here walks over a simple yaml tree (recursing in
+    dict values and list items) and converts strings that
+    have multiple lines to literal scalars
+
+    You can also provide an explicit (ordered) mapping for multiple transforms
+    (first of which is executed):
+        map = strictyaml.ruamel.compat.ordereddict
+        map['\n'] = preserve_literal
+        map[':'] = SingleQuotedScalarString
+        walk_tree(data, map=map)
+    """
+    from strictyaml.ruamel.compat import string_types
+    from strictyaml.ruamel.compat import MutableMapping, MutableSequence  # type: ignore
+
+    if map is None:
+        map = {"\n": preserve_literal}
+
+    if isinstance(base, MutableMapping):
+        for k in base:
+            v = base[k]  # type: Text
+            if isinstance(v, string_types):
+                for ch in map:
+                    if ch in v:
+                        base[k] = map[ch](v)
+                        break
+            else:
+                walk_tree(v, map=map)
+    elif isinstance(base, MutableSequence):
+        for idx, elem in enumerate(base):
+            if isinstance(elem, string_types):
+                for ch in map:
+                    if ch in elem:  # type: ignore
+                        base[idx] = map[ch](elem)
+                        break
+            else:
+                walk_tree(elem, map=map)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scanner.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scanner.py
new file mode 100644
index 00000000..2f417051
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/scanner.py
@@ -0,0 +1,2017 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# RoundTripScanner
+# COMMENT(value)
+#
+# Read comments in the Scanner code for more details.
+#
+
+from strictyaml.ruamel.error import MarkedYAMLError
+from strictyaml.ruamel.tokens import *  # NOQA
+from strictyaml.ruamel.compat import (
+    utf8,
+    unichr,
+    PY3,
+    check_anchorname_char,
+    nprint,
+)  # NOQA
+
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List, Union, Text  # NOQA
+    from strictyaml.ruamel.compat import VersionType  # NOQA
+
+__all__ = ["Scanner", "RoundTripScanner", "ScannerError"]
+
+
+_THE_END = "\n\0\r\x85\u2028\u2029"
+_THE_END_SPACE_TAB = " \n\0\t\r\x85\u2028\u2029"
+_SPACE_TAB = " \t"
+
+
+class ScannerError(MarkedYAMLError):
+    pass
+
+
+class SimpleKey(object):
+    # See below simple keys treatment.
+
+    def __init__(self, token_number, required, index, line, column, mark):
+        # type: (Any, Any, int, int, int, Any) -> None
+        self.token_number = token_number
+        self.required = required
+        self.index = index
+        self.line = line
+        self.column = column
+        self.mark = mark
+
+
+class Scanner(object):
+    def __init__(self, loader=None):
+        # type: (Any) -> None
+        """Initialize the scanner."""
+        # It is assumed that Scanner and Reader will have a common descendant.
+        # Reader do the dirty work of checking for BOM and converting the
+        # input data to Unicode. It also adds NUL to the end.
+        #
+        # Reader supports the following methods
+        #   self.peek(i=0)    # peek the next i-th character
+        #   self.prefix(l=1)  # peek the next l characters
+        #   self.forward(l=1) # read the next l characters and move the pointer
+
+        self.loader = loader
+        if self.loader is not None and getattr(self.loader, "_scanner", None) is None:
+            self.loader._scanner = self
+        self.reset_scanner()
+        self.first_time = False
+        self.yaml_version = None  # type: Any
+
+    @property
+    def flow_level(self):
+        # type: () -> int
+        return len(self.flow_context)
+
+    def reset_scanner(self):
+        # type: () -> None
+        # Had we reached the end of the stream?
+        self.done = False
+
+        # flow_context is an expanding/shrinking list consisting of '{' and '['
+        # for each unclosed flow context. If empty list that means block context
+        self.flow_context = []  # type: List[Text]
+
+        # List of processed tokens that are not yet emitted.
+        self.tokens = []  # type: List[Any]
+
+        # Add the STREAM-START token.
+        self.fetch_stream_start()
+
+        # Number of tokens that were emitted through the `get_token` method.
+        self.tokens_taken = 0
+
+        # The current indentation level.
+        self.indent = -1
+
+        # Past indentation levels.
+        self.indents = []  # type: List[int]
+
+        # Variables related to simple keys treatment.
+
+        # A simple key is a key that is not denoted by the '?' indicator.
+        # Example of simple keys:
+        #   ---
+        #   block simple key: value
+        #   ? not a simple key:
+        #   : { flow simple key: value }
+        # We emit the KEY token before all keys, so when we find a potential
+        # simple key, we try to locate the corresponding ':' indicator.
+        # Simple keys should be limited to a single line and 1024 characters.
+
+        # Can a simple key start at the current position? A simple key may
+        # start:
+        # - at the beginning of the line, not counting indentation spaces
+        #       (in block context),
+        # - after '{', '[', ',' (in the flow context),
+        # - after '?', ':', '-' (in the block context).
+        # In the block context, this flag also signifies if a block collection
+        # may start at the current position.
+        self.allow_simple_key = True
+
+        # Keep track of possible simple keys. This is a dictionary. The key
+        # is `flow_level`; there can be no more that one possible simple key
+        # for each level. The value is a SimpleKey record:
+        #   (token_number, required, index, line, column, mark)
+        # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+        # '[', or '{' tokens.
+        self.possible_simple_keys = {}  # type: Dict[Any, Any]
+
+    @property
+    def reader(self):
+        # type: () -> Any
+        try:
+            return self._scanner_reader  # type: ignore
+        except AttributeError:
+            if hasattr(self.loader, "typ"):
+                self._scanner_reader = self.loader.reader
+            else:
+                self._scanner_reader = self.loader._reader
+            return self._scanner_reader
+
+    @property
+    def scanner_processing_version(self):  # prefix until un-composited
+        # type: () -> Any
+        if hasattr(self.loader, "typ"):
+            return self.loader.resolver.processing_version
+        return self.loader.processing_version
+
+    # Public methods.
+
+    def check_token(self, *choices):
+        # type: (Any) -> bool
+        # Check if the next token is one of the given types.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        if bool(self.tokens):
+            if not choices:
+                return True
+            for choice in choices:
+                if isinstance(self.tokens[0], choice):
+                    return True
+        return False
+
+    def peek_token(self):
+        # type: () -> Any
+        # Return the next token, but do not delete if from the queue.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        if bool(self.tokens):
+            return self.tokens[0]
+
+    def get_token(self):
+        # type: () -> Any
+        # Return the next token.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        if bool(self.tokens):
+            self.tokens_taken += 1
+            return self.tokens.pop(0)
+
+    # Private methods.
+
+    def need_more_tokens(self):
+        # type: () -> bool
+        if self.done:
+            return False
+        if not self.tokens:
+            return True
+        # The current token may be a potential simple key, so we
+        # need to look further.
+        self.stale_possible_simple_keys()
+        if self.next_possible_simple_key() == self.tokens_taken:
+            return True
+        return False
+
+    def fetch_comment(self, comment):
+        # type: (Any) -> None
+        raise NotImplementedError
+
+    def fetch_more_tokens(self):
+        # type: () -> Any
+        # Eat whitespaces and comments until we reach the next token.
+        comment = self.scan_to_next_token()
+        if comment is not None:  # never happens for base scanner
+            return self.fetch_comment(comment)
+        # Remove obsolete possible simple keys.
+        self.stale_possible_simple_keys()
+
+        # Compare the current indentation and column. It may add some tokens
+        # and decrease the current indentation level.
+        self.unwind_indent(self.reader.column)
+
+        # Peek the next character.
+        ch = self.reader.peek()
+
+        # Is it the end of stream?
+        if ch == "\0":
+            return self.fetch_stream_end()
+
+        # Is it a directive?
+        if ch == "%" and self.check_directive():
+            return self.fetch_directive()
+
+        # Is it the document start?
+        if ch == "-" and self.check_document_start():
+            return self.fetch_document_start()
+
+        # Is it the document end?
+        if ch == "." and self.check_document_end():
+            return self.fetch_document_end()
+
+        # TODO: support for BOM within a stream.
+        # if ch == u'\uFEFF':
+        #     return self.fetch_bom()    <-- issue BOMToken
+
+        # Note: the order of the following checks is NOT significant.
+
+        # Is it the flow sequence start indicator?
+        if ch == "[":
+            return self.fetch_flow_sequence_start()
+
+        # Is it the flow mapping start indicator?
+        if ch == "{":
+            return self.fetch_flow_mapping_start()
+
+        # Is it the flow sequence end indicator?
+        if ch == "]":
+            return self.fetch_flow_sequence_end()
+
+        # Is it the flow mapping end indicator?
+        if ch == "}":
+            return self.fetch_flow_mapping_end()
+
+        # Is it the flow entry indicator?
+        if ch == ",":
+            return self.fetch_flow_entry()
+
+        # Is it the block entry indicator?
+        if ch == "-" and self.check_block_entry():
+            return self.fetch_block_entry()
+
+        # Is it the key indicator?
+        if ch == "?" and self.check_key():
+            return self.fetch_key()
+
+        # Is it the value indicator?
+        if ch == ":" and self.check_value():
+            return self.fetch_value()
+
+        # Is it an alias?
+        if ch == "*":
+            return self.fetch_alias()
+
+        # Is it an anchor?
+        if ch == "&":
+            return self.fetch_anchor()
+
+        # Is it a tag?
+        if ch == "!":
+            return self.fetch_tag()
+
+        # Is it a literal scalar?
+        if ch == "|" and not self.flow_level:
+            return self.fetch_literal()
+
+        # Is it a folded scalar?
+        if ch == ">" and not self.flow_level:
+            return self.fetch_folded()
+
+        # Is it a single quoted scalar?
+        if ch == "'":
+            return self.fetch_single()
+
+        # Is it a double quoted scalar?
+        if ch == '"':
+            return self.fetch_double()
+
+        # It must be a plain scalar then.
+        if self.check_plain():
+            return self.fetch_plain()
+
+        # No? It's an error. Let's produce a nice error message.
+        raise ScannerError(
+            "while scanning for the next token",
+            None,
+            "found character %r that cannot start any token" % utf8(ch),
+            self.reader.get_mark(),
+        )
+
+    # Simple keys treatment.
+
+    def next_possible_simple_key(self):
+        # type: () -> Any
+        # Return the number of the nearest possible simple key. Actually we
+        # don't need to loop through the whole dictionary. We may replace it
+        # with the following code:
+        #   if not self.possible_simple_keys:
+        #       return None
+        #   return self.possible_simple_keys[
+        #           min(self.possible_simple_keys.keys())].token_number
+        min_token_number = None
+        for level in self.possible_simple_keys:
+            key = self.possible_simple_keys[level]
+            if min_token_number is None or key.token_number < min_token_number:
+                min_token_number = key.token_number
+        return min_token_number
+
+    def stale_possible_simple_keys(self):
+        # type: () -> None
+        # Remove entries that are no longer possible simple keys. According to
+        # the YAML specification, simple keys
+        # - should be limited to a single line,
+        # - should be no longer than 1024 characters.
+        # Disabling this procedure will allow simple keys of any length and
+        # height (may cause problems if indentation is broken though).
+        for level in list(self.possible_simple_keys):
+            key = self.possible_simple_keys[level]
+            if key.line != self.reader.line or self.reader.index - key.index > 1024:
+                if key.required:
+                    raise ScannerError(
+                        "while scanning a simple key",
+                        key.mark,
+                        "could not find expected ':'",
+                        self.reader.get_mark(),
+                    )
+                del self.possible_simple_keys[level]
+
+    def save_possible_simple_key(self):
+        # type: () -> None
+        # The next token may start a simple key. We check if it's possible
+        # and save its position. This function is called for
+        #   ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+        # Check if a simple key is required at the current position.
+        required = not self.flow_level and self.indent == self.reader.column
+
+        # The next token might be a simple key. Let's save it's number and
+        # position.
+        if self.allow_simple_key:
+            self.remove_possible_simple_key()
+            token_number = self.tokens_taken + len(self.tokens)
+            key = SimpleKey(
+                token_number,
+                required,
+                self.reader.index,
+                self.reader.line,
+                self.reader.column,
+                self.reader.get_mark(),
+            )
+            self.possible_simple_keys[self.flow_level] = key
+
+    def remove_possible_simple_key(self):
+        # type: () -> None
+        # Remove the saved possible key position at the current flow level.
+        if self.flow_level in self.possible_simple_keys:
+            key = self.possible_simple_keys[self.flow_level]
+
+            if key.required:
+                raise ScannerError(
+                    "while scanning a simple key",
+                    key.mark,
+                    "could not find expected ':'",
+                    self.reader.get_mark(),
+                )
+
+            del self.possible_simple_keys[self.flow_level]
+
+    # Indentation functions.
+
+    def unwind_indent(self, column):
+        # type: (Any) -> None
+        # In flow context, tokens should respect indentation.
+        # Actually the condition should be `self.indent >= column` according to
+        # the spec. But this condition will prohibit intuitively correct
+        # constructions such as
+        # key : {
+        # }
+        # ####
+        # if self.flow_level and self.indent > column:
+        #     raise ScannerError(None, None,
+        #             "invalid intendation or unclosed '[' or '{'",
+        #             self.reader.get_mark())
+
+        # In the flow context, indentation is ignored. We make the scanner less
+        # restrictive then specification requires.
+        if bool(self.flow_level):
+            return
+
+        # In block context, we may need to issue the BLOCK-END tokens.
+        while self.indent > column:
+            mark = self.reader.get_mark()
+            self.indent = self.indents.pop()
+            self.tokens.append(BlockEndToken(mark, mark))
+
+    def add_indent(self, column):
+        # type: (int) -> bool
+        # Check if we need to increase indentation.
+        if self.indent < column:
+            self.indents.append(self.indent)
+            self.indent = column
+            return True
+        return False
+
+    # Fetchers.
+
+    def fetch_stream_start(self):
+        # type: () -> None
+        # We always add STREAM-START as the first token and STREAM-END as the
+        # last token.
+        # Read the token.
+        mark = self.reader.get_mark()
+        # Add STREAM-START.
+        self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding))
+
+    def fetch_stream_end(self):
+        # type: () -> None
+        # Set the current intendation to -1.
+        self.unwind_indent(-1)
+        # Reset simple keys.
+        self.remove_possible_simple_key()
+        self.allow_simple_key = False
+        self.possible_simple_keys = {}
+        # Read the token.
+        mark = self.reader.get_mark()
+        # Add STREAM-END.
+        self.tokens.append(StreamEndToken(mark, mark))
+        # The steam is finished.
+        self.done = True
+
+    def fetch_directive(self):
+        # type: () -> None
+        # Set the current intendation to -1.
+        self.unwind_indent(-1)
+
+        # Reset simple keys.
+        self.remove_possible_simple_key()
+        self.allow_simple_key = False
+
+        # Scan and add DIRECTIVE.
+        self.tokens.append(self.scan_directive())
+
+    def fetch_document_start(self):
+        # type: () -> None
+        self.fetch_document_indicator(DocumentStartToken)
+
+    def fetch_document_end(self):
+        # type: () -> None
+        self.fetch_document_indicator(DocumentEndToken)
+
+    def fetch_document_indicator(self, TokenClass):
+        # type: (Any) -> None
+        # Set the current intendation to -1.
+        self.unwind_indent(-1)
+
+        # Reset simple keys. Note that there could not be a block collection
+        # after '---'.
+        self.remove_possible_simple_key()
+        self.allow_simple_key = False
+
+        # Add DOCUMENT-START or DOCUMENT-END.
+        start_mark = self.reader.get_mark()
+        self.reader.forward(3)
+        end_mark = self.reader.get_mark()
+        self.tokens.append(TokenClass(start_mark, end_mark))
+
+    def fetch_flow_sequence_start(self):
+        # type: () -> None
+        self.fetch_flow_collection_start(FlowSequenceStartToken, to_push="[")
+
+    def fetch_flow_mapping_start(self):
+        # type: () -> None
+        self.fetch_flow_collection_start(FlowMappingStartToken, to_push="{")
+
+    def fetch_flow_collection_start(self, TokenClass, to_push):
+        # type: (Any, Text) -> None
+        # '[' and '{' may start a simple key.
+        self.save_possible_simple_key()
+        # Increase the flow level.
+        self.flow_context.append(to_push)
+        # Simple keys are allowed after '[' and '{'.
+        self.allow_simple_key = True
+        # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+        start_mark = self.reader.get_mark()
+        self.reader.forward()
+        end_mark = self.reader.get_mark()
+        self.tokens.append(TokenClass(start_mark, end_mark))
+
+    def fetch_flow_sequence_end(self):
+        # type: () -> None
+        self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+    def fetch_flow_mapping_end(self):
+        # type: () -> None
+        self.fetch_flow_collection_end(FlowMappingEndToken)
+
+    def fetch_flow_collection_end(self, TokenClass):
+        # type: (Any) -> None
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+        # Decrease the flow level.
+        try:
+            popped = self.flow_context.pop()  # NOQA
+        except IndexError:
+            # We must not be in a list or object.
+            # Defer error handling to the parser.
+            pass
+        # No simple keys after ']' or '}'.
+        self.allow_simple_key = False
+        # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+        start_mark = self.reader.get_mark()
+        self.reader.forward()
+        end_mark = self.reader.get_mark()
+        self.tokens.append(TokenClass(start_mark, end_mark))
+
+    def fetch_flow_entry(self):
+        # type: () -> None
+        # Simple keys are allowed after ','.
+        self.allow_simple_key = True
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+        # Add FLOW-ENTRY.
+        start_mark = self.reader.get_mark()
+        self.reader.forward()
+        end_mark = self.reader.get_mark()
+        self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+    def fetch_block_entry(self):
+        # type: () -> None
+        # Block context needs additional checks.
+        if not self.flow_level:
+            # Are we allowed to start a new entry?
+            if not self.allow_simple_key:
+                raise ScannerError(
+                    None,
+                    None,
+                    "sequence entries are not allowed here",
+                    self.reader.get_mark(),
+                )
+            # We may need to add BLOCK-SEQUENCE-START.
+            if self.add_indent(self.reader.column):
+                mark = self.reader.get_mark()
+                self.tokens.append(BlockSequenceStartToken(mark, mark))
+        # It's an error for the block entry to occur in the flow context,
+        # but we let the parser detect this.
+        else:
+            pass
+        # Simple keys are allowed after '-'.
+        self.allow_simple_key = True
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Add BLOCK-ENTRY.
+        start_mark = self.reader.get_mark()
+        self.reader.forward()
+        end_mark = self.reader.get_mark()
+        self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+    def fetch_key(self):
+        # type: () -> None
+        # Block context needs additional checks.
+        if not self.flow_level:
+
+            # Are we allowed to start a key (not nessesary a simple)?
+            if not self.allow_simple_key:
+                raise ScannerError(
+                    None,
+                    None,
+                    "mapping keys are not allowed here",
+                    self.reader.get_mark(),
+                )
+
+            # We may need to add BLOCK-MAPPING-START.
+            if self.add_indent(self.reader.column):
+                mark = self.reader.get_mark()
+                self.tokens.append(BlockMappingStartToken(mark, mark))
+
+        # Simple keys are allowed after '?' in the block context.
+        self.allow_simple_key = not self.flow_level
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Add KEY.
+        start_mark = self.reader.get_mark()
+        self.reader.forward()
+        end_mark = self.reader.get_mark()
+        self.tokens.append(KeyToken(start_mark, end_mark))
+
+    def fetch_value(self):
+        # type: () -> None
+        # Do we determine a simple key?
+        if self.flow_level in self.possible_simple_keys:
+            # Add KEY.
+            key = self.possible_simple_keys[self.flow_level]
+            del self.possible_simple_keys[self.flow_level]
+            self.tokens.insert(
+                key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark)
+            )
+
+            # If this key starts a new block mapping, we need to add
+            # BLOCK-MAPPING-START.
+            if not self.flow_level:
+                if self.add_indent(key.column):
+                    self.tokens.insert(
+                        key.token_number - self.tokens_taken,
+                        BlockMappingStartToken(key.mark, key.mark),
+                    )
+
+            # There cannot be two simple keys one after another.
+            self.allow_simple_key = False
+
+        # It must be a part of a complex key.
+        else:
+
+            # Block context needs additional checks.
+            # (Do we really need them? They will be caught by the parser
+            # anyway.)
+            if not self.flow_level:
+
+                # We are allowed to start a complex value if and only if
+                # we can start a simple key.
+                if not self.allow_simple_key:
+                    raise ScannerError(
+                        None,
+                        None,
+                        "mapping values are not allowed here",
+                        self.reader.get_mark(),
+                    )
+
+            # If this value starts a new block mapping, we need to add
+            # BLOCK-MAPPING-START.  It will be detected as an error later by
+            # the parser.
+            if not self.flow_level:
+                if self.add_indent(self.reader.column):
+                    mark = self.reader.get_mark()
+                    self.tokens.append(BlockMappingStartToken(mark, mark))
+
+            # Simple keys are allowed after ':' in the block context.
+            self.allow_simple_key = not self.flow_level
+
+            # Reset possible simple key on the current level.
+            self.remove_possible_simple_key()
+
+        # Add VALUE.
+        start_mark = self.reader.get_mark()
+        self.reader.forward()
+        end_mark = self.reader.get_mark()
+        self.tokens.append(ValueToken(start_mark, end_mark))
+
+    def fetch_alias(self):
+        # type: () -> None
+        # ALIAS could be a simple key.
+        self.save_possible_simple_key()
+        # No simple keys after ALIAS.
+        self.allow_simple_key = False
+        # Scan and add ALIAS.
+        self.tokens.append(self.scan_anchor(AliasToken))
+
+    def fetch_anchor(self):
+        # type: () -> None
+        # ANCHOR could start a simple key.
+        self.save_possible_simple_key()
+        # No simple keys after ANCHOR.
+        self.allow_simple_key = False
+        # Scan and add ANCHOR.
+        self.tokens.append(self.scan_anchor(AnchorToken))
+
+    def fetch_tag(self):
+        # type: () -> None
+        # TAG could start a simple key.
+        self.save_possible_simple_key()
+        # No simple keys after TAG.
+        self.allow_simple_key = False
+        # Scan and add TAG.
+        self.tokens.append(self.scan_tag())
+
+    def fetch_literal(self):
+        # type: () -> None
+        self.fetch_block_scalar(style="|")
+
+    def fetch_folded(self):
+        # type: () -> None
+        self.fetch_block_scalar(style=">")
+
+    def fetch_block_scalar(self, style):
+        # type: (Any) -> None
+        # A simple key may follow a block scalar.
+        self.allow_simple_key = True
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+        # Scan and add SCALAR.
+        self.tokens.append(self.scan_block_scalar(style))
+
+    def fetch_single(self):
+        # type: () -> None
+        self.fetch_flow_scalar(style="'")
+
+    def fetch_double(self):
+        # type: () -> None
+        self.fetch_flow_scalar(style='"')
+
+    def fetch_flow_scalar(self, style):
+        # type: (Any) -> None
+        # A flow scalar could be a simple key.
+        self.save_possible_simple_key()
+        # No simple keys after flow scalars.
+        self.allow_simple_key = False
+        # Scan and add SCALAR.
+        self.tokens.append(self.scan_flow_scalar(style))
+
+    def fetch_plain(self):
+        # type: () -> None
+        # A plain scalar could be a simple key.
+        self.save_possible_simple_key()
+        # No simple keys after plain scalars. But note that `scan_plain` will
+        # change this flag if the scan is finished at the beginning of the
+        # line.
+        self.allow_simple_key = False
+        # Scan and add SCALAR. May change `allow_simple_key`.
+        self.tokens.append(self.scan_plain())
+
+    # Checkers.
+
+    def check_directive(self):
+        # type: () -> Any
+        # DIRECTIVE:        ^ '%' ...
+        # The '%' indicator is already checked.
+        if self.reader.column == 0:
+            return True
+        return None
+
+    def check_document_start(self):
+        # type: () -> Any
+        # DOCUMENT-START:   ^ '---' (' '|'\n')
+        if self.reader.column == 0:
+            if (
+                self.reader.prefix(3) == "---"
+                and self.reader.peek(3) in _THE_END_SPACE_TAB
+            ):
+                return True
+        return None
+
+    def check_document_end(self):
+        # type: () -> Any
+        # DOCUMENT-END:     ^ '...' (' '|'\n')
+        if self.reader.column == 0:
+            if (
+                self.reader.prefix(3) == "..."
+                and self.reader.peek(3) in _THE_END_SPACE_TAB
+            ):
+                return True
+        return None
+
+    def check_block_entry(self):
+        # type: () -> Any
+        # BLOCK-ENTRY:      '-' (' '|'\n')
+        return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+    def check_key(self):
+        # type: () -> Any
+        # KEY(flow context):    '?'
+        if bool(self.flow_level):
+            return True
+        # KEY(block context):   '?' (' '|'\n')
+        return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+    def check_value(self):
+        # type: () -> Any
+        # VALUE(flow context):  ':'
+        if self.scanner_processing_version == (1, 1):
+            if bool(self.flow_level):
+                return True
+        else:
+            if bool(self.flow_level):
+                if self.flow_context[-1] == "[":
+                    if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+                        return False
+                elif self.tokens and isinstance(self.tokens[-1], ValueToken):
+                    # mapping flow context scanning a value token
+                    if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+                        return False
+                return True
+        # VALUE(block context): ':' (' '|'\n')
+        return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+    def check_plain(self):
+        # type: () -> Any
+        # A plain scalar may start with any non-space character except:
+        #   '-', '?', ':', ',', '[', ']', '{', '}',
+        #   '#', '&', '*', '!', '|', '>', '\'', '\"',
+        #   '%', '@', '`'.
+        #
+        # It may also start with
+        #   '-', '?', ':'
+        # if it is followed by a non-space character.
+        #
+        # Note that we limit the last rule to the block context (except the
+        # '-' character) because we want the flow context to be space
+        # independent.
+        srp = self.reader.peek
+        ch = srp()
+        if self.scanner_processing_version == (1, 1):
+            return ch not in "\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>'\"%@`" or (
+                srp(1) not in _THE_END_SPACE_TAB
+                and (ch == "-" or (not self.flow_level and ch in "?:"))
+            )
+        # YAML 1.2
+        if ch not in "\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>'\"%@`":
+            # ###################                ^ ???
+            return True
+        ch1 = srp(1)
+        if ch == "-" and ch1 not in _THE_END_SPACE_TAB:
+            return True
+        if ch == ":" and bool(self.flow_level) and ch1 not in _SPACE_TAB:
+            return True
+
+        return srp(1) not in _THE_END_SPACE_TAB and (
+            ch == "-" or (not self.flow_level and ch in "?:")
+        )
+
+    # Scanners.
+
+    def scan_to_next_token(self):
+        # type: () -> Any
+        # We ignore spaces, line breaks and comments.
+        # If we find a line break in the block context, we set the flag
+        # `allow_simple_key` on.
+        # The byte order mark is stripped if it's the first character in the
+        # stream. We do not yet support BOM inside the stream as the
+        # specification requires. Any such mark will be considered as a part
+        # of the document.
+        #
+        # TODO: We need to make tab handling rules more sane. A good rule is
+        #   Tabs cannot precede tokens
+        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+        #   KEY(block), VALUE(block), BLOCK-ENTRY
+        # So the checking code is
+        #   if <TAB>:
+        #       self.allow_simple_keys = False
+        # We also need to add the check for `allow_simple_keys == True` to
+        # `unwind_indent` before issuing BLOCK-END.
+        # Scanners for block, flow, and plain scalars need to be modified.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        if self.reader.index == 0 and srp() == "\uFEFF":
+            srf()
+        found = False
+        _the_end = _THE_END
+        while not found:
+            while srp() == " ":
+                srf()
+            if srp() == "#":
+                while srp() not in _the_end:
+                    srf()
+            if self.scan_line_break():
+                if not self.flow_level:
+                    self.allow_simple_key = True
+            else:
+                found = True
+        return None
+
+    def scan_directive(self):
+        # type: () -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        start_mark = self.reader.get_mark()
+        srf()
+        name = self.scan_directive_name(start_mark)
+        value = None
+        if name == "YAML":
+            value = self.scan_yaml_directive_value(start_mark)
+            end_mark = self.reader.get_mark()
+        elif name == "TAG":
+            value = self.scan_tag_directive_value(start_mark)
+            end_mark = self.reader.get_mark()
+        else:
+            end_mark = self.reader.get_mark()
+            while srp() not in _THE_END:
+                srf()
+        self.scan_directive_ignored_line(start_mark)
+        return DirectiveToken(name, value, start_mark, end_mark)
+
+    def scan_directive_name(self, start_mark):
+        # type: (Any) -> Any
+        # See the specification for details.
+        length = 0
+        srp = self.reader.peek
+        ch = srp(length)
+        while "0" <= ch <= "9" or "A" <= ch <= "Z" or "a" <= ch <= "z" or ch in "-_:.":
+            length += 1
+            ch = srp(length)
+        if not length:
+            raise ScannerError(
+                "while scanning a directive",
+                start_mark,
+                "expected alphabetic or numeric character, but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        value = self.reader.prefix(length)
+        self.reader.forward(length)
+        ch = srp()
+        if ch not in "\0 \r\n\x85\u2028\u2029":
+            raise ScannerError(
+                "while scanning a directive",
+                start_mark,
+                "expected alphabetic or numeric character, but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        return value
+
+    def scan_yaml_directive_value(self, start_mark):
+        # type: (Any) -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        while srp() == " ":
+            srf()
+        major = self.scan_yaml_directive_number(start_mark)
+        if srp() != ".":
+            raise ScannerError(
+                "while scanning a directive",
+                start_mark,
+                "expected a digit or '.', but found %r" % utf8(srp()),
+                self.reader.get_mark(),
+            )
+        srf()
+        minor = self.scan_yaml_directive_number(start_mark)
+        if srp() not in "\0 \r\n\x85\u2028\u2029":
+            raise ScannerError(
+                "while scanning a directive",
+                start_mark,
+                "expected a digit or ' ', but found %r" % utf8(srp()),
+                self.reader.get_mark(),
+            )
+        self.yaml_version = (major, minor)
+        return self.yaml_version
+
+    def scan_yaml_directive_number(self, start_mark):
+        # type: (Any) -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        ch = srp()
+        if not ("0" <= ch <= "9"):
+            raise ScannerError(
+                "while scanning a directive",
+                start_mark,
+                "expected a digit, but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        length = 0
+        while "0" <= srp(length) <= "9":
+            length += 1
+        value = int(self.reader.prefix(length))
+        srf(length)
+        return value
+
+    def scan_tag_directive_value(self, start_mark):
+        # type: (Any) -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        while srp() == " ":
+            srf()
+        handle = self.scan_tag_directive_handle(start_mark)
+        while srp() == " ":
+            srf()
+        prefix = self.scan_tag_directive_prefix(start_mark)
+        return (handle, prefix)
+
+    def scan_tag_directive_handle(self, start_mark):
+        # type: (Any) -> Any
+        # See the specification for details.
+        value = self.scan_tag_handle("directive", start_mark)
+        ch = self.reader.peek()
+        if ch != " ":
+            raise ScannerError(
+                "while scanning a directive",
+                start_mark,
+                "expected ' ', but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        return value
+
+    def scan_tag_directive_prefix(self, start_mark):
+        # type: (Any) -> Any
+        # See the specification for details.
+        value = self.scan_tag_uri("directive", start_mark)
+        ch = self.reader.peek()
+        if ch not in "\0 \r\n\x85\u2028\u2029":
+            raise ScannerError(
+                "while scanning a directive",
+                start_mark,
+                "expected ' ', but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        return value
+
+    def scan_directive_ignored_line(self, start_mark):
+        # type: (Any) -> None
+        # See the specification for details.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        while srp() == " ":
+            srf()
+        if srp() == "#":
+            while srp() not in _THE_END:
+                srf()
+        ch = srp()
+        if ch not in _THE_END:
+            raise ScannerError(
+                "while scanning a directive",
+                start_mark,
+                "expected a comment or a line break, but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        self.scan_line_break()
+
+    def scan_anchor(self, TokenClass):
+        # type: (Any) -> Any
+        # The specification does not restrict characters for anchors and
+        # aliases. This may lead to problems, for instance, the document:
+        #   [ *alias, value ]
+        # can be interpteted in two ways, as
+        #   [ "value" ]
+        # and
+        #   [ *alias , "value" ]
+        # Therefore we restrict aliases to numbers and ASCII letters.
+        srp = self.reader.peek
+        start_mark = self.reader.get_mark()
+        indicator = srp()
+        if indicator == "*":
+            name = "alias"
+        else:
+            name = "anchor"
+        self.reader.forward()
+        length = 0
+        ch = srp(length)
+        # while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+        #         or ch in u'-_':
+        while check_anchorname_char(ch):
+            length += 1
+            ch = srp(length)
+        if not length:
+            raise ScannerError(
+                "while scanning an %s" % (name,),
+                start_mark,
+                "expected alphabetic or numeric character, but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        value = self.reader.prefix(length)
+        self.reader.forward(length)
+        # ch1 = ch
+        # ch = srp()   # no need to peek, ch is already set
+        # assert ch1 == ch
+        if ch not in "\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`":
+            raise ScannerError(
+                "while scanning an %s" % (name,),
+                start_mark,
+                "expected alphabetic or numeric character, but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        end_mark = self.reader.get_mark()
+        return TokenClass(value, start_mark, end_mark)
+
+    def scan_tag(self):
+        # type: () -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        start_mark = self.reader.get_mark()
+        ch = srp(1)
+        if ch == "<":
+            handle = None
+            self.reader.forward(2)
+            suffix = self.scan_tag_uri("tag", start_mark)
+            if srp() != ">":
+                raise ScannerError(
+                    "while parsing a tag",
+                    start_mark,
+                    "expected '>', but found %r" % utf8(srp()),
+                    self.reader.get_mark(),
+                )
+            self.reader.forward()
+        elif ch in _THE_END_SPACE_TAB:
+            handle = None
+            suffix = "!"
+            self.reader.forward()
+        else:
+            length = 1
+            use_handle = False
+            while ch not in "\0 \r\n\x85\u2028\u2029":
+                if ch == "!":
+                    use_handle = True
+                    break
+                length += 1
+                ch = srp(length)
+            handle = "!"
+            if use_handle:
+                handle = self.scan_tag_handle("tag", start_mark)
+            else:
+                handle = "!"
+                self.reader.forward()
+            suffix = self.scan_tag_uri("tag", start_mark)
+        ch = srp()
+        if ch not in "\0 \r\n\x85\u2028\u2029":
+            raise ScannerError(
+                "while scanning a tag",
+                start_mark,
+                "expected ' ', but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        value = (handle, suffix)
+        end_mark = self.reader.get_mark()
+        return TagToken(value, start_mark, end_mark)
+
+    def scan_block_scalar(self, style, rt=False):
+        # type: (Any, Optional[bool]) -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        if style == ">":
+            folded = True
+        else:
+            folded = False
+
+        chunks = []  # type: List[Any]
+        start_mark = self.reader.get_mark()
+
+        # Scan the header.
+        self.reader.forward()
+        chomping, increment = self.scan_block_scalar_indicators(start_mark)
+        # block scalar comment e.g. : |+  # comment text
+        block_scalar_comment = self.scan_block_scalar_ignored_line(start_mark)
+
+        # Determine the indentation level and go to the first non-empty line.
+        min_indent = self.indent + 1
+        if increment is None:
+            # no increment and top level, min_indent could be 0
+            if min_indent < 1 and (
+                style not in "|>"
+                or (self.scanner_processing_version == (1, 1))
+                and getattr(
+                    self.loader,
+                    "top_level_block_style_scalar_no_indent_error_1_1",
+                    False,
+                )
+            ):
+                min_indent = 1
+            breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+            indent = max(min_indent, max_indent)
+        else:
+            if min_indent < 1:
+                min_indent = 1
+            indent = min_indent + increment - 1
+            breaks, end_mark = self.scan_block_scalar_breaks(indent)
+        line_break = ""
+
+        # Scan the inner part of the block scalar.
+        while self.reader.column == indent and srp() != "\0":
+            chunks.extend(breaks)
+            leading_non_space = srp() not in " \t"
+            length = 0
+            while srp(length) not in _THE_END:
+                length += 1
+            chunks.append(self.reader.prefix(length))
+            self.reader.forward(length)
+            line_break = self.scan_line_break()
+            breaks, end_mark = self.scan_block_scalar_breaks(indent)
+            if style in "|>" and min_indent == 0:
+                # at the beginning of a line, if in block style see if
+                # end of document/start_new_document
+                if self.check_document_start() or self.check_document_end():
+                    break
+            if self.reader.column == indent and srp() != "\0":
+
+                # Unfortunately, folding rules are ambiguous.
+                #
+                # This is the folding according to the specification:
+
+                if rt and folded and line_break == "\n":
+                    chunks.append("\a")
+                if (
+                    folded
+                    and line_break == "\n"
+                    and leading_non_space
+                    and srp() not in " \t"
+                ):
+                    if not breaks:
+                        chunks.append(" ")
+                else:
+                    chunks.append(line_break)
+
+                # This is Clark Evans's interpretation (also in the spec
+                # examples):
+                #
+                # if folded and line_break == u'\n':
+                #     if not breaks:
+                #         if srp() not in ' \t':
+                #             chunks.append(u' ')
+                #         else:
+                #             chunks.append(line_break)
+                # else:
+                #     chunks.append(line_break)
+            else:
+                break
+
+        # Process trailing line breaks. The 'chomping' setting determines
+        # whether they are included in the value.
+        trailing = []  # type: List[Any]
+        if chomping in [None, True]:
+            chunks.append(line_break)
+        if chomping is True:
+            chunks.extend(breaks)
+        elif chomping in [None, False]:
+            trailing.extend(breaks)
+
+        # We are done.
+        token = ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+        if block_scalar_comment is not None:
+            token.add_pre_comments([block_scalar_comment])
+        if len(trailing) > 0:
+            # nprint('trailing 1', trailing)  # XXXXX
+            # Eat whitespaces and comments until we reach the next token.
+            comment = self.scan_to_next_token()
+            while comment:
+                trailing.append(" " * comment[1].column + comment[0])
+                comment = self.scan_to_next_token()
+
+            # Keep track of the trailing whitespace and following comments
+            # as a comment token, if isn't all included in the actual value.
+            comment_end_mark = self.reader.get_mark()
+            comment = CommentToken("".join(trailing), end_mark, comment_end_mark)
+            token.add_post_comment(comment)
+        return token
+
+    def scan_block_scalar_indicators(self, start_mark):
+        # type: (Any) -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        chomping = None
+        increment = None
+        ch = srp()
+        if ch in "+-":
+            if ch == "+":
+                chomping = True
+            else:
+                chomping = False
+            self.reader.forward()
+            ch = srp()
+            if ch in "0123456789":
+                increment = int(ch)
+                if increment == 0:
+                    raise ScannerError(
+                        "while scanning a block scalar",
+                        start_mark,
+                        "expected indentation indicator in the range 1-9, "
+                        "but found 0",
+                        self.reader.get_mark(),
+                    )
+                self.reader.forward()
+        elif ch in "0123456789":
+            increment = int(ch)
+            if increment == 0:
+                raise ScannerError(
+                    "while scanning a block scalar",
+                    start_mark,
+                    "expected indentation indicator in the range 1-9, " "but found 0",
+                    self.reader.get_mark(),
+                )
+            self.reader.forward()
+            ch = srp()
+            if ch in "+-":
+                if ch == "+":
+                    chomping = True
+                else:
+                    chomping = False
+                self.reader.forward()
+        ch = srp()
+        if ch not in "\0 \r\n\x85\u2028\u2029":
+            raise ScannerError(
+                "while scanning a block scalar",
+                start_mark,
+                "expected chomping or indentation indicators, but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        return chomping, increment
+
+    def scan_block_scalar_ignored_line(self, start_mark):
+        # type: (Any) -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        prefix = ""
+        comment = None
+        while srp() == " ":
+            prefix += srp()
+            srf()
+        if srp() == "#":
+            comment = prefix
+            while srp() not in _THE_END:
+                comment += srp()
+                srf()
+        ch = srp()
+        if ch not in _THE_END:
+            raise ScannerError(
+                "while scanning a block scalar",
+                start_mark,
+                "expected a comment or a line break, but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        self.scan_line_break()
+        return comment
+
+    def scan_block_scalar_indentation(self):
+        # type: () -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        chunks = []
+        max_indent = 0
+        end_mark = self.reader.get_mark()
+        while srp() in " \r\n\x85\u2028\u2029":
+            if srp() != " ":
+                chunks.append(self.scan_line_break())
+                end_mark = self.reader.get_mark()
+            else:
+                srf()
+                if self.reader.column > max_indent:
+                    max_indent = self.reader.column
+        return chunks, max_indent, end_mark
+
+    def scan_block_scalar_breaks(self, indent):
+        # type: (int) -> Any
+        # See the specification for details.
+        chunks = []
+        srp = self.reader.peek
+        srf = self.reader.forward
+        end_mark = self.reader.get_mark()
+        while self.reader.column < indent and srp() == " ":
+            srf()
+        while srp() in "\r\n\x85\u2028\u2029":
+            chunks.append(self.scan_line_break())
+            end_mark = self.reader.get_mark()
+            while self.reader.column < indent and srp() == " ":
+                srf()
+        return chunks, end_mark
+
+    def scan_flow_scalar(self, style):
+        # type: (Any) -> Any
+        # See the specification for details.
+        # Note that we loose indentation rules for quoted scalars. Quoted
+        # scalars don't need to adhere indentation because " and ' clearly
+        # mark the beginning and the end of them. Therefore we are less
+        # restrictive then the specification requires. We only need to check
+        # that document separators are not included in scalars.
+        if style == '"':
+            double = True
+        else:
+            double = False
+        srp = self.reader.peek
+        chunks = []  # type: List[Any]
+        start_mark = self.reader.get_mark()
+        quote = srp()
+        self.reader.forward()
+        chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+        while srp() != quote:
+            chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+            chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+        self.reader.forward()
+        end_mark = self.reader.get_mark()
+        return ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+
+    ESCAPE_REPLACEMENTS = {
+        "0": "\0",
+        "a": "\x07",
+        "b": "\x08",
+        "t": "\x09",
+        "\t": "\x09",
+        "n": "\x0A",
+        "v": "\x0B",
+        "f": "\x0C",
+        "r": "\x0D",
+        "e": "\x1B",
+        " ": "\x20",
+        '"': '"',
+        "/": "/",  # as per http://www.json.org/
+        "\\": "\\",
+        "N": "\x85",
+        "_": "\xA0",
+        "L": "\u2028",
+        "P": "\u2029",
+    }
+
+    ESCAPE_CODES = {"x": 2, "u": 4, "U": 8}
+
+    def scan_flow_scalar_non_spaces(self, double, start_mark):
+        # type: (Any, Any) -> Any
+        # See the specification for details.
+        chunks = []  # type: List[Any]
+        srp = self.reader.peek
+        srf = self.reader.forward
+        while True:
+            length = 0
+            while srp(length) not in " \n'\"\\\0\t\r\x85\u2028\u2029":
+                length += 1
+            if length != 0:
+                chunks.append(self.reader.prefix(length))
+                srf(length)
+            ch = srp()
+            if not double and ch == "'" and srp(1) == "'":
+                chunks.append("'")
+                srf(2)
+            elif (double and ch == "'") or (not double and ch in '"\\'):
+                chunks.append(ch)
+                srf()
+            elif double and ch == "\\":
+                srf()
+                ch = srp()
+                if ch in self.ESCAPE_REPLACEMENTS:
+                    chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+                    srf()
+                elif ch in self.ESCAPE_CODES:
+                    length = self.ESCAPE_CODES[ch]
+                    srf()
+                    for k in range(length):
+                        if srp(k) not in "0123456789ABCDEFabcdef":
+                            raise ScannerError(
+                                "while scanning a double-quoted scalar",
+                                start_mark,
+                                "expected escape sequence of %d hexdecimal "
+                                "numbers, but found %r" % (length, utf8(srp(k))),
+                                self.reader.get_mark(),
+                            )
+                    code = int(self.reader.prefix(length), 16)
+                    chunks.append(unichr(code))
+                    srf(length)
+                elif ch in "\n\r\x85\u2028\u2029":
+                    self.scan_line_break()
+                    chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+                else:
+                    raise ScannerError(
+                        "while scanning a double-quoted scalar",
+                        start_mark,
+                        "found unknown escape character %r" % utf8(ch),
+                        self.reader.get_mark(),
+                    )
+            else:
+                return chunks
+
+    def scan_flow_scalar_spaces(self, double, start_mark):
+        # type: (Any, Any) -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        chunks = []
+        length = 0
+        while srp(length) in " \t":
+            length += 1
+        whitespaces = self.reader.prefix(length)
+        self.reader.forward(length)
+        ch = srp()
+        if ch == "\0":
+            raise ScannerError(
+                "while scanning a quoted scalar",
+                start_mark,
+                "found unexpected end of stream",
+                self.reader.get_mark(),
+            )
+        elif ch in "\r\n\x85\u2028\u2029":
+            line_break = self.scan_line_break()
+            breaks = self.scan_flow_scalar_breaks(double, start_mark)
+            if line_break != "\n":
+                chunks.append(line_break)
+            elif not breaks:
+                chunks.append(" ")
+            chunks.extend(breaks)
+        else:
+            chunks.append(whitespaces)
+        return chunks
+
+    def scan_flow_scalar_breaks(self, double, start_mark):
+        # type: (Any, Any) -> Any
+        # See the specification for details.
+        chunks = []  # type: List[Any]
+        srp = self.reader.peek
+        srf = self.reader.forward
+        while True:
+            # Instead of checking indentation, we check for document
+            # separators.
+            prefix = self.reader.prefix(3)
+            if (prefix == "---" or prefix == "...") and srp(3) in _THE_END_SPACE_TAB:
+                raise ScannerError(
+                    "while scanning a quoted scalar",
+                    start_mark,
+                    "found unexpected document separator",
+                    self.reader.get_mark(),
+                )
+            while srp() in " \t":
+                srf()
+            if srp() in "\r\n\x85\u2028\u2029":
+                chunks.append(self.scan_line_break())
+            else:
+                return chunks
+
+    def scan_plain(self):
+        # type: () -> Any
+        # See the specification for details.
+        # We add an additional restriction for the flow context:
+        #   plain scalars in the flow context cannot contain ',', ': '  and '?'.
+        # We also keep track of the `allow_simple_key` flag here.
+        # Indentation rules are loosed for the flow context.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        chunks = []  # type: List[Any]
+        start_mark = self.reader.get_mark()
+        end_mark = start_mark
+        indent = self.indent + 1
+        # We allow zero indentation for scalars, but then we need to check for
+        # document separators at the beginning of the line.
+        # if indent == 0:
+        #     indent = 1
+        spaces = []  # type: List[Any]
+        while True:
+            length = 0
+            if srp() == "#":
+                break
+            while True:
+                ch = srp(length)
+                if ch == ":" and srp(length + 1) not in _THE_END_SPACE_TAB:
+                    pass
+                elif ch == "?" and self.scanner_processing_version != (1, 1):
+                    pass
+                elif (
+                    ch in _THE_END_SPACE_TAB
+                    or (
+                        not self.flow_level
+                        and ch == ":"
+                        and srp(length + 1) in _THE_END_SPACE_TAB
+                    )
+                    or (self.flow_level and ch in ",:?[]{}")
+                ):
+                    break
+                length += 1
+            # It's not clear what we should do with ':' in the flow context.
+            if (
+                self.flow_level
+                and ch == ":"
+                and srp(length + 1) not in "\0 \t\r\n\x85\u2028\u2029,[]{}"
+            ):
+                srf(length)
+                raise ScannerError(
+                    "while scanning a plain scalar",
+                    start_mark,
+                    "found unexpected ':'",
+                    self.reader.get_mark(),
+                    "Please check "
+                    "http://pyyaml.org/wiki/YAMLColonInFlowContext "
+                    "for details.",
+                )
+            if length == 0:
+                break
+            self.allow_simple_key = False
+            chunks.extend(spaces)
+            chunks.append(self.reader.prefix(length))
+            srf(length)
+            end_mark = self.reader.get_mark()
+            spaces = self.scan_plain_spaces(indent, start_mark)
+            if (
+                not spaces
+                or srp() == "#"
+                or (not self.flow_level and self.reader.column < indent)
+            ):
+                break
+
+        token = ScalarToken("".join(chunks), True, start_mark, end_mark)
+        if spaces and spaces[0] == "\n":
+            # Create a comment token to preserve the trailing line breaks.
+            comment = CommentToken("".join(spaces) + "\n", start_mark, end_mark)
+            token.add_post_comment(comment)
+        return token
+
+    def scan_plain_spaces(self, indent, start_mark):
+        # type: (Any, Any) -> Any
+        # See the specification for details.
+        # The specification is really confusing about tabs in plain scalars.
+        # We just forbid them completely. Do not use tabs in YAML!
+        srp = self.reader.peek
+        srf = self.reader.forward
+        chunks = []
+        length = 0
+        while srp(length) in " ":
+            length += 1
+        whitespaces = self.reader.prefix(length)
+        self.reader.forward(length)
+        ch = srp()
+        if ch in "\r\n\x85\u2028\u2029":
+            line_break = self.scan_line_break()
+            self.allow_simple_key = True
+            prefix = self.reader.prefix(3)
+            if (prefix == "---" or prefix == "...") and srp(3) in _THE_END_SPACE_TAB:
+                return
+            breaks = []
+            while srp() in " \r\n\x85\u2028\u2029":
+                if srp() == " ":
+                    srf()
+                else:
+                    breaks.append(self.scan_line_break())
+                    prefix = self.reader.prefix(3)
+                    if (prefix == "---" or prefix == "...") and srp(
+                        3
+                    ) in _THE_END_SPACE_TAB:
+                        return
+            if line_break != "\n":
+                chunks.append(line_break)
+            elif not breaks:
+                chunks.append(" ")
+            chunks.extend(breaks)
+        elif whitespaces:
+            chunks.append(whitespaces)
+        return chunks
+
+    def scan_tag_handle(self, name, start_mark):
+        # type: (Any, Any) -> Any
+        # See the specification for details.
+        # For some strange reasons, the specification does not allow '_' in
+        # tag handles. I have allowed it anyway.
+        srp = self.reader.peek
+        ch = srp()
+        if ch != "!":
+            raise ScannerError(
+                "while scanning a %s" % (name,),
+                start_mark,
+                "expected '!', but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        length = 1
+        ch = srp(length)
+        if ch != " ":
+            while (
+                "0" <= ch <= "9" or "A" <= ch <= "Z" or "a" <= ch <= "z" or ch in "-_"
+            ):
+                length += 1
+                ch = srp(length)
+            if ch != "!":
+                self.reader.forward(length)
+                raise ScannerError(
+                    "while scanning a %s" % (name,),
+                    start_mark,
+                    "expected '!', but found %r" % utf8(ch),
+                    self.reader.get_mark(),
+                )
+            length += 1
+        value = self.reader.prefix(length)
+        self.reader.forward(length)
+        return value
+
+    def scan_tag_uri(self, name, start_mark):
+        # type: (Any, Any) -> Any
+        # See the specification for details.
+        # Note: we do not check if URI is well-formed.
+        srp = self.reader.peek
+        chunks = []
+        length = 0
+        ch = srp(length)
+        while (
+            "0" <= ch <= "9"
+            or "A" <= ch <= "Z"
+            or "a" <= ch <= "z"
+            or ch in "-;/?:@&=+$,_.!~*'()[]%"
+            or ((self.scanner_processing_version > (1, 1)) and ch == "#")
+        ):
+            if ch == "%":
+                chunks.append(self.reader.prefix(length))
+                self.reader.forward(length)
+                length = 0
+                chunks.append(self.scan_uri_escapes(name, start_mark))
+            else:
+                length += 1
+            ch = srp(length)
+        if length != 0:
+            chunks.append(self.reader.prefix(length))
+            self.reader.forward(length)
+            length = 0
+        if not chunks:
+            raise ScannerError(
+                "while parsing a %s" % (name,),
+                start_mark,
+                "expected URI, but found %r" % utf8(ch),
+                self.reader.get_mark(),
+            )
+        return "".join(chunks)
+
+    def scan_uri_escapes(self, name, start_mark):
+        # type: (Any, Any) -> Any
+        # See the specification for details.
+        srp = self.reader.peek
+        srf = self.reader.forward
+        code_bytes = []  # type: List[Any]
+        mark = self.reader.get_mark()
+        while srp() == "%":
+            srf()
+            for k in range(2):
+                if srp(k) not in "0123456789ABCDEFabcdef":
+                    raise ScannerError(
+                        "while scanning a %s" % (name,),
+                        start_mark,
+                        "expected URI escape sequence of 2 hexdecimal numbers,"
+                        " but found %r" % utf8(srp(k)),
+                        self.reader.get_mark(),
+                    )
+            if PY3:
+                code_bytes.append(int(self.reader.prefix(2), 16))
+            else:
+                code_bytes.append(chr(int(self.reader.prefix(2), 16)))
+            srf(2)
+        try:
+            if PY3:
+                value = bytes(code_bytes).decode("utf-8")
+            else:
+                value = unicode(b"".join(code_bytes), "utf-8")
+        except UnicodeDecodeError as exc:
+            raise ScannerError(
+                "while scanning a %s" % (name,), start_mark, str(exc), mark
+            )
+        return value
+
+    def scan_line_break(self):
+        # type: () -> Any
+        # Transforms:
+        #   '\r\n'      :   '\n'
+        #   '\r'        :   '\n'
+        #   '\n'        :   '\n'
+        #   '\x85'      :   '\n'
+        #   '\u2028'    :   '\u2028'
+        #   '\u2029     :   '\u2029'
+        #   default     :   ''
+        ch = self.reader.peek()
+        if ch in "\r\n\x85":
+            if self.reader.prefix(2) == "\r\n":
+                self.reader.forward(2)
+            else:
+                self.reader.forward()
+            return "\n"
+        elif ch in "\u2028\u2029":
+            self.reader.forward()
+            return ch
+        return ""
+
+
+class RoundTripScanner(Scanner):
+    def check_token(self, *choices):
+        # type: (Any) -> bool
+        # Check if the next token is one of the given types.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        self._gather_comments()
+        if bool(self.tokens):
+            if not choices:
+                return True
+            for choice in choices:
+                if isinstance(self.tokens[0], choice):
+                    return True
+        return False
+
+    def peek_token(self):
+        # type: () -> Any
+        # Return the next token, but do not delete if from the queue.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        self._gather_comments()
+        if bool(self.tokens):
+            return self.tokens[0]
+        return None
+
+    def _gather_comments(self):
+        # type: () -> Any
+        """combine multiple comment lines"""
+        comments = []  # type: List[Any]
+        if not self.tokens:
+            return comments
+        if isinstance(self.tokens[0], CommentToken):
+            comment = self.tokens.pop(0)
+            self.tokens_taken += 1
+            comments.append(comment)
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+            if not self.tokens:
+                return comments
+            if isinstance(self.tokens[0], CommentToken):
+                self.tokens_taken += 1
+                comment = self.tokens.pop(0)
+                # nprint('dropping2', comment)
+                comments.append(comment)
+        if len(comments) >= 1:
+            self.tokens[0].add_pre_comments(comments)
+        # pull in post comment on e.g. ':'
+        if not self.done and len(self.tokens) < 2:
+            self.fetch_more_tokens()
+
+    def get_token(self):
+        # type: () -> Any
+        # Return the next token.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        self._gather_comments()
+        if bool(self.tokens):
+            # nprint('tk', self.tokens)
+            # only add post comment to single line tokens:
+            # scalar, value token. FlowXEndToken, otherwise
+            # hidden streamtokens could get them (leave them and they will be
+            # pre comments for the next map/seq
+            if (
+                len(self.tokens) > 1
+                and isinstance(
+                    self.tokens[0],
+                    (
+                        ScalarToken,
+                        ValueToken,
+                        FlowSequenceEndToken,
+                        FlowMappingEndToken,
+                    ),
+                )
+                and isinstance(self.tokens[1], CommentToken)
+                and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line
+            ):
+                self.tokens_taken += 1
+                c = self.tokens.pop(1)
+                self.fetch_more_tokens()
+                while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+                    self.tokens_taken += 1
+                    c1 = self.tokens.pop(1)
+                    c.value = c.value + (" " * c1.start_mark.column) + c1.value
+                    self.fetch_more_tokens()
+                self.tokens[0].add_post_comment(c)
+            elif (
+                len(self.tokens) > 1
+                and isinstance(self.tokens[0], ScalarToken)
+                and isinstance(self.tokens[1], CommentToken)
+                and self.tokens[0].end_mark.line != self.tokens[1].start_mark.line
+            ):
+                self.tokens_taken += 1
+                c = self.tokens.pop(1)
+                c.value = (
+                    "\n" * (c.start_mark.line - self.tokens[0].end_mark.line)
+                    + (" " * c.start_mark.column)
+                    + c.value
+                )
+                self.tokens[0].add_post_comment(c)
+                self.fetch_more_tokens()
+                while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+                    self.tokens_taken += 1
+                    c1 = self.tokens.pop(1)
+                    c.value = c.value + (" " * c1.start_mark.column) + c1.value
+                    self.fetch_more_tokens()
+            self.tokens_taken += 1
+            return self.tokens.pop(0)
+        return None
+
+    def fetch_comment(self, comment):
+        # type: (Any) -> None
+        value, start_mark, end_mark = comment
+        while value and value[-1] == " ":
+            # empty line within indented key context
+            # no need to update end-mark, that is not used
+            value = value[:-1]
+        self.tokens.append(CommentToken(value, start_mark, end_mark))
+
+    # scanner
+
+    def scan_to_next_token(self):
+        # type: () -> Any
+        # We ignore spaces, line breaks and comments.
+        # If we find a line break in the block context, we set the flag
+        # `allow_simple_key` on.
+        # The byte order mark is stripped if it's the first character in the
+        # stream. We do not yet support BOM inside the stream as the
+        # specification requires. Any such mark will be considered as a part
+        # of the document.
+        #
+        # TODO: We need to make tab handling rules more sane. A good rule is
+        #   Tabs cannot precede tokens
+        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+        #   KEY(block), VALUE(block), BLOCK-ENTRY
+        # So the checking code is
+        #   if <TAB>:
+        #       self.allow_simple_keys = False
+        # We also need to add the check for `allow_simple_keys == True` to
+        # `unwind_indent` before issuing BLOCK-END.
+        # Scanners for block, flow, and plain scalars need to be modified.
+
+        srp = self.reader.peek
+        srf = self.reader.forward
+        if self.reader.index == 0 and srp() == "\uFEFF":
+            srf()
+        found = False
+        while not found:
+            while srp() == " ":
+                srf()
+            ch = srp()
+            if ch == "#":
+                start_mark = self.reader.get_mark()
+                comment = ch
+                srf()
+                while ch not in _THE_END:
+                    ch = srp()
+                    if ch == "\0":  # don't gobble the end-of-stream character
+                        # but add an explicit newline as "YAML processors should terminate
+                        # the stream with an explicit line break
+                        # https://yaml.org/spec/1.2/spec.html#id2780069
+                        comment += "\n"
+                        break
+                    comment += ch
+                    srf()
+                # gather any blank lines following the comment too
+                ch = self.scan_line_break()
+                while len(ch) > 0:
+                    comment += ch
+                    ch = self.scan_line_break()
+                end_mark = self.reader.get_mark()
+                if not self.flow_level:
+                    self.allow_simple_key = True
+                return comment, start_mark, end_mark
+            if bool(self.scan_line_break()):
+                start_mark = self.reader.get_mark()
+                if not self.flow_level:
+                    self.allow_simple_key = True
+                ch = srp()
+                if ch == "\n":  # empty toplevel lines
+                    start_mark = self.reader.get_mark()
+                    comment = ""
+                    while ch:
+                        ch = self.scan_line_break(empty_line=True)
+                        comment += ch
+                    if srp() == "#":
+                        # empty line followed by indented real comment
+                        comment = comment.rsplit("\n", 1)[0] + "\n"
+                    end_mark = self.reader.get_mark()
+                    return comment, start_mark, end_mark
+            else:
+                found = True
+        return None
+
+    def scan_line_break(self, empty_line=False):
+        # type: (bool) -> Text
+        # Transforms:
+        #   '\r\n'      :   '\n'
+        #   '\r'        :   '\n'
+        #   '\n'        :   '\n'
+        #   '\x85'      :   '\n'
+        #   '\u2028'    :   '\u2028'
+        #   '\u2029     :   '\u2029'
+        #   default     :   ''
+        ch = self.reader.peek()  # type: Text
+        if ch in "\r\n\x85":
+            if self.reader.prefix(2) == "\r\n":
+                self.reader.forward(2)
+            else:
+                self.reader.forward()
+            return "\n"
+        elif ch in "\u2028\u2029":
+            self.reader.forward()
+            return ch
+        elif empty_line and ch in "\t ":
+            self.reader.forward()
+            return ch
+        return ""
+
+    def scan_block_scalar(self, style, rt=True):
+        # type: (Any, Optional[bool]) -> Any
+        return Scanner.scan_block_scalar(self, style, rt=rt)
+
+
+# try:
+#     import psyco
+#     psyco.bind(Scanner)
+# except ImportError:
+#     pass
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/serializer.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/serializer.py
new file mode 100644
index 00000000..a8e231ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/serializer.py
@@ -0,0 +1,256 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+from strictyaml.ruamel.error import YAMLError
+from strictyaml.ruamel.compat import (
+    nprint,
+    DBG_NODE,
+    dbg,
+    string_types,
+    nprintf,
+)  # NOQA
+from strictyaml.ruamel.util import RegExp
+
+from strictyaml.ruamel.events import (
+    StreamStartEvent,
+    StreamEndEvent,
+    MappingStartEvent,
+    MappingEndEvent,
+    SequenceStartEvent,
+    SequenceEndEvent,
+    AliasEvent,
+    ScalarEvent,
+    DocumentStartEvent,
+    DocumentEndEvent,
+)
+from strictyaml.ruamel.nodes import MappingNode, ScalarNode, SequenceNode
+
+if False:  # MYPY
+    from typing import Any, Dict, Union, Text, Optional  # NOQA
+    from strictyaml.ruamel.compat import VersionType  # NOQA
+
+__all__ = ["Serializer", "SerializerError"]
+
+
+class SerializerError(YAMLError):
+    pass
+
+
+class Serializer(object):
+
+    # 'id' and 3+ numbers, but not 000
+    ANCHOR_TEMPLATE = u"id%03d"
+    ANCHOR_RE = RegExp(u"id(?!000$)\\d{3,}")
+
+    def __init__(
+        self,
+        encoding=None,
+        explicit_start=None,
+        explicit_end=None,
+        version=None,
+        tags=None,
+        dumper=None,
+    ):
+        # type: (Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> None  # NOQA
+        self.dumper = dumper
+        if self.dumper is not None:
+            self.dumper._serializer = self
+        self.use_encoding = encoding
+        self.use_explicit_start = explicit_start
+        self.use_explicit_end = explicit_end
+        if isinstance(version, string_types):
+            self.use_version = tuple(map(int, version.split(".")))
+        else:
+            self.use_version = version  # type: ignore
+        self.use_tags = tags
+        self.serialized_nodes = {}  # type: Dict[Any, Any]
+        self.anchors = {}  # type: Dict[Any, Any]
+        self.last_anchor_id = 0
+        self.closed = None  # type: Optional[bool]
+        self._templated_id = None
+
+    @property
+    def emitter(self):
+        # type: () -> Any
+        if hasattr(self.dumper, "typ"):
+            return self.dumper.emitter
+        return self.dumper._emitter
+
+    @property
+    def resolver(self):
+        # type: () -> Any
+        if hasattr(self.dumper, "typ"):
+            self.dumper.resolver
+        return self.dumper._resolver
+
+    def open(self):
+        # type: () -> None
+        if self.closed is None:
+            self.emitter.emit(StreamStartEvent(encoding=self.use_encoding))
+            self.closed = False
+        elif self.closed:
+            raise SerializerError("serializer is closed")
+        else:
+            raise SerializerError("serializer is already opened")
+
+    def close(self):
+        # type: () -> None
+        if self.closed is None:
+            raise SerializerError("serializer is not opened")
+        elif not self.closed:
+            self.emitter.emit(StreamEndEvent())
+            self.closed = True
+
+    # def __del__(self):
+    #     self.close()
+
+    def serialize(self, node):
+        # type: (Any) -> None
+        if dbg(DBG_NODE):
+            nprint("Serializing nodes")
+            node.dump()
+        if self.closed is None:
+            raise SerializerError("serializer is not opened")
+        elif self.closed:
+            raise SerializerError("serializer is closed")
+        self.emitter.emit(
+            DocumentStartEvent(
+                explicit=self.use_explicit_start,
+                version=self.use_version,
+                tags=self.use_tags,
+            )
+        )
+        self.anchor_node(node)
+        self.serialize_node(node, None, None)
+        self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+        self.serialized_nodes = {}
+        self.anchors = {}
+        self.last_anchor_id = 0
+
+    def anchor_node(self, node):
+        # type: (Any) -> None
+        if node in self.anchors:
+            if self.anchors[node] is None:
+                self.anchors[node] = self.generate_anchor(node)
+        else:
+            anchor = None
+            try:
+                if node.anchor.always_dump:
+                    anchor = node.anchor.value
+            except:  # NOQA
+                pass
+            self.anchors[node] = anchor
+            if isinstance(node, SequenceNode):
+                for item in node.value:
+                    self.anchor_node(item)
+            elif isinstance(node, MappingNode):
+                for key, value in node.value:
+                    self.anchor_node(key)
+                    self.anchor_node(value)
+
+    def generate_anchor(self, node):
+        # type: (Any) -> Any
+        try:
+            anchor = node.anchor.value
+        except:  # NOQA
+            anchor = None
+        if anchor is None:
+            self.last_anchor_id += 1
+            return self.ANCHOR_TEMPLATE % self.last_anchor_id
+        return anchor
+
+    def serialize_node(self, node, parent, index):
+        # type: (Any, Any, Any) -> None
+        alias = self.anchors[node]
+        if node in self.serialized_nodes:
+            self.emitter.emit(AliasEvent(alias))
+        else:
+            self.serialized_nodes[node] = True
+            self.resolver.descend_resolver(parent, index)
+            if isinstance(node, ScalarNode):
+                # here check if the node.tag equals the one that would result from parsing
+                # if not equal quoting is necessary for strings
+                detected_tag = self.resolver.resolve(
+                    ScalarNode, node.value, (True, False)
+                )
+                default_tag = self.resolver.resolve(
+                    ScalarNode, node.value, (False, True)
+                )
+                implicit = (
+                    (node.tag == detected_tag),
+                    (node.tag == default_tag),
+                    node.tag.startswith("tag:yaml.org,2002:"),
+                )
+                self.emitter.emit(
+                    ScalarEvent(
+                        alias,
+                        node.tag,
+                        implicit,
+                        node.value,
+                        style=node.style,
+                        comment=node.comment,
+                    )
+                )
+            elif isinstance(node, SequenceNode):
+                implicit = node.tag == self.resolver.resolve(
+                    SequenceNode, node.value, True
+                )
+                comment = node.comment
+                end_comment = None
+                seq_comment = None
+                if node.flow_style is True:
+                    if comment:  # eol comment on flow style sequence
+                        seq_comment = comment[0]
+                        # comment[0] = None
+                if comment and len(comment) > 2:
+                    end_comment = comment[2]
+                else:
+                    end_comment = None
+                self.emitter.emit(
+                    SequenceStartEvent(
+                        alias,
+                        node.tag,
+                        implicit,
+                        flow_style=node.flow_style,
+                        comment=node.comment,
+                    )
+                )
+                index = 0
+                for item in node.value:
+                    self.serialize_node(item, node, index)
+                    index += 1
+                self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment]))
+            elif isinstance(node, MappingNode):
+                implicit = node.tag == self.resolver.resolve(
+                    MappingNode, node.value, True
+                )
+                comment = node.comment
+                end_comment = None
+                map_comment = None
+                if node.flow_style is True:
+                    if comment:  # eol comment on flow style sequence
+                        map_comment = comment[0]
+                        # comment[0] = None
+                if comment and len(comment) > 2:
+                    end_comment = comment[2]
+                self.emitter.emit(
+                    MappingStartEvent(
+                        alias,
+                        node.tag,
+                        implicit,
+                        flow_style=node.flow_style,
+                        comment=node.comment,
+                        nr_items=len(node.value),
+                    )
+                )
+                for key, value in node.value:
+                    self.serialize_node(key, node, None)
+                    self.serialize_node(value, node, key)
+                self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment]))
+            self.resolver.ascend_resolver()
+
+
+def templated_id(s):
+    # type: (Text) -> Any
+    return Serializer.ANCHOR_RE.match(s)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/timestamp.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/timestamp.py
new file mode 100644
index 00000000..8887ea7d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/timestamp.py
@@ -0,0 +1,66 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+import datetime
+import copy
+
+# ToDo: at least on PY3 you could probably attach the tzinfo correctly to the object
+#       a more complete datetime might be used by safe loading as well
+
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List  # NOQA
+
+
+class TimeStamp(datetime.datetime):
+    def __init__(self, *args, **kw):
+        # type: (Any, Any) -> None
+        self._yaml = dict(t=False, tz=None, delta=0)  # type: Dict[Any, Any]
+
+    def __new__(cls, *args, **kw):  # datetime is immutable
+        # type: (Any, Any) -> Any
+        return datetime.datetime.__new__(cls, *args, **kw)  # type: ignore
+
+    def __deepcopy__(self, memo):
+        # type: (Any) -> Any
+        ts = TimeStamp(
+            self.year, self.month, self.day, self.hour, self.minute, self.second
+        )
+        ts._yaml = copy.deepcopy(self._yaml)
+        return ts
+
+    def replace(
+        self,
+        year=None,
+        month=None,
+        day=None,
+        hour=None,
+        minute=None,
+        second=None,
+        microsecond=None,
+        tzinfo=True,
+        fold=None,
+    ):
+        if year is None:
+            year = self.year
+        if month is None:
+            month = self.month
+        if day is None:
+            day = self.day
+        if hour is None:
+            hour = self.hour
+        if minute is None:
+            minute = self.minute
+        if second is None:
+            second = self.second
+        if microsecond is None:
+            microsecond = self.microsecond
+        if tzinfo is True:
+            tzinfo = self.tzinfo
+        if fold is None:
+            fold = self.fold
+        ts = type(self)(
+            year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold
+        )
+        ts._yaml = copy.deepcopy(self._yaml)
+        return ts
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/tokens.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/tokens.py
new file mode 100644
index 00000000..ea457c76
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/tokens.py
@@ -0,0 +1,288 @@
+# # header
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+if False:  # MYPY
+    from typing import Text, Any, Dict, Optional, List  # NOQA
+    from .error import StreamMark  # NOQA
+
+SHOWLINES = True
+
+
+class Token(object):
+    __slots__ = "start_mark", "end_mark", "_comment"
+
+    def __init__(self, start_mark, end_mark):
+        # type: (StreamMark, StreamMark) -> None
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+    def __repr__(self):
+        # type: () -> Any
+        # attributes = [key for key in self.__slots__ if not key.endswith('_mark') and
+        #               hasattr('self', key)]
+        attributes = [key for key in self.__slots__ if not key.endswith("_mark")]
+        attributes.sort()
+        arguments = ", ".join(
+            ["%s=%r" % (key, getattr(self, key)) for key in attributes]
+        )
+        if SHOWLINES:
+            try:
+                arguments += ", line: " + str(self.start_mark.line)
+            except:  # NOQA
+                pass
+        try:
+            arguments += ", comment: " + str(self._comment)
+        except:  # NOQA
+            pass
+        return "{}({})".format(self.__class__.__name__, arguments)
+
+    def add_post_comment(self, comment):
+        # type: (Any) -> None
+        if not hasattr(self, "_comment"):
+            self._comment = [None, None]
+        self._comment[0] = comment
+
+    def add_pre_comments(self, comments):
+        # type: (Any) -> None
+        if not hasattr(self, "_comment"):
+            self._comment = [None, None]
+        assert self._comment[1] is None
+        self._comment[1] = comments
+
+    def get_comment(self):
+        # type: () -> Any
+        return getattr(self, "_comment", None)
+
+    @property
+    def comment(self):
+        # type: () -> Any
+        return getattr(self, "_comment", None)
+
+    def move_comment(self, target, empty=False):
+        # type: (Any, bool) -> Any
+        """move a comment from this token to target (normally next token)
+        used to combine e.g. comments before a BlockEntryToken to the
+        ScalarToken that follows it
+        empty is a special for empty values -> comment after key
+        """
+        c = self.comment
+        if c is None:
+            return
+        # don't push beyond last element
+        if isinstance(target, (StreamEndToken, DocumentStartToken)):
+            return
+        delattr(self, "_comment")
+        tc = target.comment
+        if not tc:  # target comment, just insert
+            # special for empty value in key: value issue 25
+            if empty:
+                c = [c[0], c[1], None, None, c[0]]
+            target._comment = c
+            # nprint('mco2:', self, target, target.comment, empty)
+            return self
+        if c[0] and tc[0] or c[1] and tc[1]:
+            raise NotImplementedError("overlap in comment %r %r" % (c, tc))
+        if c[0]:
+            tc[0] = c[0]
+        if c[1]:
+            tc[1] = c[1]
+        return self
+
+    def split_comment(self):
+        # type: () -> Any
+        """split the post part of a comment, and return it
+        as comment to be added. Delete second part if [None, None]
+         abc:  # this goes to sequence
+           # this goes to first element
+           - first element
+        """
+        comment = self.comment
+        if comment is None or comment[0] is None:
+            return None  # nothing to do
+        ret_val = [comment[0], None]
+        if comment[1] is None:
+            delattr(self, "_comment")
+        return ret_val
+
+
+# class BOMToken(Token):
+#     id = '<byte order mark>'
+
+
+class DirectiveToken(Token):
+    __slots__ = "name", "value"
+    id = "<directive>"
+
+    def __init__(self, name, value, start_mark, end_mark):
+        # type: (Any, Any, Any, Any) -> None
+        Token.__init__(self, start_mark, end_mark)
+        self.name = name
+        self.value = value
+
+
+class DocumentStartToken(Token):
+    __slots__ = ()
+    id = "<document start>"
+
+
+class DocumentEndToken(Token):
+    __slots__ = ()
+    id = "<document end>"
+
+
+class StreamStartToken(Token):
+    __slots__ = ("encoding",)
+    id = "<stream start>"
+
+    def __init__(self, start_mark=None, end_mark=None, encoding=None):
+        # type: (Any, Any, Any) -> None
+        Token.__init__(self, start_mark, end_mark)
+        self.encoding = encoding
+
+
+class StreamEndToken(Token):
+    __slots__ = ()
+    id = "<stream end>"
+
+
+class BlockSequenceStartToken(Token):
+    __slots__ = ()
+    id = "<block sequence start>"
+
+
+class BlockMappingStartToken(Token):
+    __slots__ = ()
+    id = "<block mapping start>"
+
+
+class BlockEndToken(Token):
+    __slots__ = ()
+    id = "<block end>"
+
+
+class FlowSequenceStartToken(Token):
+    __slots__ = ()
+    id = "["
+
+
+class FlowMappingStartToken(Token):
+    __slots__ = ()
+    id = "{"
+
+
+class FlowSequenceEndToken(Token):
+    __slots__ = ()
+    id = "]"
+
+
+class FlowMappingEndToken(Token):
+    __slots__ = ()
+    id = "}"
+
+
+class KeyToken(Token):
+    __slots__ = ()
+    id = "?"
+
+    # def x__repr__(self):
+    #     return 'KeyToken({})'.format(
+    #         self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0])
+
+
+class ValueToken(Token):
+    __slots__ = ()
+    id = ":"
+
+
+class BlockEntryToken(Token):
+    __slots__ = ()
+    id = "-"
+
+
+class FlowEntryToken(Token):
+    __slots__ = ()
+    id = ","
+
+
+class AliasToken(Token):
+    __slots__ = ("value",)
+    id = "<alias>"
+
+    def __init__(self, value, start_mark, end_mark):
+        # type: (Any, Any, Any) -> None
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+
+
+class AnchorToken(Token):
+    __slots__ = ("value",)
+    id = "<anchor>"
+
+    def __init__(self, value, start_mark, end_mark):
+        # type: (Any, Any, Any) -> None
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+
+
+class TagToken(Token):
+    __slots__ = ("value",)
+    id = "<tag>"
+
+    def __init__(self, value, start_mark, end_mark):
+        # type: (Any, Any, Any) -> None
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+
+
+class ScalarToken(Token):
+    __slots__ = "value", "plain", "style"
+    id = "<scalar>"
+
+    def __init__(self, value, plain, start_mark, end_mark, style=None):
+        # type: (Any, Any, Any, Any, Any) -> None
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+        self.plain = plain
+        self.style = style
+
+
+class CommentToken(Token):
+    __slots__ = "value", "pre_done"
+    id = "<comment>"
+
+    def __init__(self, value, start_mark, end_mark):
+        # type: (Any, Any, Any) -> None
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+
+    def reset(self):
+        # type: () -> None
+        if hasattr(self, "pre_done"):
+            delattr(self, "pre_done")
+
+    def __repr__(self):
+        # type: () -> Any
+        v = "{!r}".format(self.value)
+        if SHOWLINES:
+            try:
+                v += ", line: " + str(self.start_mark.line)
+                v += ", col: " + str(self.start_mark.column)
+            except:  # NOQA
+                pass
+        return "CommentToken({})".format(v)
+
+    def __eq__(self, other):
+        # type: (Any) -> bool
+        if self.start_mark != other.start_mark:
+            return False
+        if self.end_mark != other.end_mark:
+            return False
+        if self.value != other.value:
+            return False
+        return True
+
+    def __ne__(self, other):
+        # type: (Any) -> bool
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/strictyaml/ruamel/util.py b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/util.py
new file mode 100644
index 00000000..f78947e8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/strictyaml/ruamel/util.py
@@ -0,0 +1,190 @@
+# coding: utf-8
+
+"""
+some helper functions that might be generally useful
+"""
+
+from __future__ import absolute_import, print_function
+
+from functools import partial
+import re
+
+from .compat import text_type, binary_type
+
+if False:  # MYPY
+    from typing import Any, Dict, Optional, List, Text  # NOQA
+    from .compat import StreamTextType  # NOQA
+
+
+class LazyEval(object):
+    """
+    Lightweight wrapper around lazily evaluated func(*args, **kwargs).
+
+    func is only evaluated when any attribute of its return value is accessed.
+    Every attribute access is passed through to the wrapped value.
+    (This only excludes special cases like method-wrappers, e.g., __hash__.)
+    The sole additional attribute is the lazy_self function which holds the
+    return value (or, prior to evaluation, func and arguments), in its closure.
+    """
+
+    def __init__(self, func, *args, **kwargs):
+        # type: (Any, Any, Any) -> None
+        def lazy_self():
+            # type: () -> Any
+            return_value = func(*args, **kwargs)
+            object.__setattr__(self, "lazy_self", lambda: return_value)
+            return return_value
+
+        object.__setattr__(self, "lazy_self", lazy_self)
+
+    def __getattribute__(self, name):
+        # type: (Any) -> Any
+        lazy_self = object.__getattribute__(self, "lazy_self")
+        if name == "lazy_self":
+            return lazy_self
+        return getattr(lazy_self(), name)
+
+    def __setattr__(self, name, value):
+        # type: (Any, Any) -> None
+        setattr(self.lazy_self(), name, value)
+
+
+RegExp = partial(LazyEval, re.compile)
+
+
+# originally as comment
+# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605
+# if you use this in your code, I suggest adding a test in your test suite
+# that check this routines output against a known piece of your YAML
+# before upgrades to this code break your round-tripped YAML
+def load_yaml_guess_indent(stream, **kw):
+    # type: (StreamTextType, Any) -> Any
+    """guess the indent and block sequence indent of yaml stream/string
+
+    returns round_trip_loaded stream, indent level, block sequence indent
+    - block sequence indent is the number of spaces before a dash relative to previous indent
+    - if there are no block sequences, indent is taken from nested mappings, block sequence
+      indent is unset (None) in that case
+    """
+    from .main import round_trip_load
+
+    # load a YAML document, guess the indentation, if you use TABs you're on your own
+    def leading_spaces(line):
+        # type: (Any) -> int
+        idx = 0
+        while idx < len(line) and line[idx] == " ":
+            idx += 1
+        return idx
+
+    if isinstance(stream, text_type):
+        yaml_str = stream  # type: Any
+    elif isinstance(stream, binary_type):
+        # most likely, but the Reader checks BOM for this
+        yaml_str = stream.decode("utf-8")
+    else:
+        yaml_str = stream.read()
+    map_indent = None
+    indent = None  # default if not found for some reason
+    block_seq_indent = None
+    prev_line_key_only = None
+    key_indent = 0
+    for line in yaml_str.splitlines():
+        rline = line.rstrip()
+        lline = rline.lstrip()
+        if lline.startswith("- "):
+            l_s = leading_spaces(line)
+            block_seq_indent = l_s - key_indent
+            idx = l_s + 1
+            while line[idx] == " ":  # this will end as we rstripped
+                idx += 1
+            if line[idx] == "#":  # comment after -
+                continue
+            indent = idx - key_indent
+            break
+        if map_indent is None and prev_line_key_only is not None and rline:
+            idx = 0
+            while line[idx] in " -":
+                idx += 1
+            if idx > prev_line_key_only:
+                map_indent = idx - prev_line_key_only
+        if rline.endswith(":"):
+            key_indent = leading_spaces(line)
+            idx = 0
+            while line[idx] == " ":  # this will end on ':'
+                idx += 1
+            prev_line_key_only = idx
+            continue
+        prev_line_key_only = None
+    if indent is None and map_indent is not None:
+        indent = map_indent
+    return round_trip_load(yaml_str, **kw), indent, block_seq_indent
+
+
+def configobj_walker(cfg):
+    # type: (Any) -> Any
+    """
+    walks over a ConfigObj (INI file with comments) generating
+    corresponding YAML output (including comments
+    """
+    from configobj import ConfigObj  # type: ignore
+
+    assert isinstance(cfg, ConfigObj)
+    for c in cfg.initial_comment:
+        if c.strip():
+            yield c
+    for s in _walk_section(cfg):
+        if s.strip():
+            yield s
+    for c in cfg.final_comment:
+        if c.strip():
+            yield c
+
+
+def _walk_section(s, level=0):
+    # type: (Any, int) -> Any
+    from configobj import Section
+
+    assert isinstance(s, Section)
+    indent = u"  " * level
+    for name in s.scalars:
+        for c in s.comments[name]:
+            yield indent + c.strip()
+        x = s[name]
+        if u"\n" in x:
+            i = indent + u"  "
+            x = u"|\n" + i + x.strip().replace(u"\n", u"\n" + i)
+        elif ":" in x:
+            x = u"'" + x.replace(u"'", u"''") + u"'"
+        line = u"{0}{1}: {2}".format(indent, name, x)
+        c = s.inline_comments[name]
+        if c:
+            line += u" " + c
+        yield line
+    for name in s.sections:
+        for c in s.comments[name]:
+            yield indent + c.strip()
+        line = u"{0}{1}:".format(indent, name)
+        c = s.inline_comments[name]
+        if c:
+            line += u" " + c
+        yield line
+        for val in _walk_section(s[name], level=level + 1):
+            yield val
+
+
+# def config_obj_2_rt_yaml(cfg):
+#     from .comments import CommentedMap, CommentedSeq
+#     from configobj import ConfigObj
+#     assert isinstance(cfg, ConfigObj)
+#     #for c in cfg.initial_comment:
+#     #    if c.strip():
+#     #        pass
+#     cm = CommentedMap()
+#     for name in s.sections:
+#         cm[name] = d = CommentedMap()
+#
+#
+#     #for c in cfg.final_comment:
+#     #    if c.strip():
+#     #        yield c
+#     return cm