diff options
author | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
---|---|---|
committer | S. Solomon Darnell | 2025-03-28 21:52:21 -0500 |
commit | 4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch) | |
tree | ee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/bs4 | |
parent | cc961e04ba734dd72309fb548a2f97d67d578813 (diff) | |
download | gn-ai-master.tar.gz |
Diffstat (limited to '.venv/lib/python3.12/site-packages/bs4')
51 files changed, 17020 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/bs4/__init__.py b/.venv/lib/python3.12/site-packages/bs4/__init__.py new file mode 100644 index 00000000..68a992a7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/__init__.py @@ -0,0 +1,1170 @@ +"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend". + +http://www.crummy.com/software/BeautifulSoup/ + +Beautiful Soup uses a pluggable XML or HTML parser to parse a +(possibly invalid) document into a tree representation. Beautiful Soup +provides methods and Pythonic idioms that make it easy to navigate, +search, and modify the parse tree. + +Beautiful Soup works with Python 3.7 and up. It works better if lxml +and/or html5lib is installed, but they are not required. + +For more than you ever wanted to know about Beautiful Soup, see the +documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/ +""" + +__author__ = "Leonard Richardson (leonardr@segfault.org)" +__version__ = "4.13.3" +__copyright__ = "Copyright (c) 2004-2025 Leonard Richardson" +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +__all__ = [ + "AttributeResemblesVariableWarning", + "BeautifulSoup", + "Comment", + "Declaration", + "ProcessingInstruction", + "ResultSet", + "CSS", + "Script", + "Stylesheet", + "Tag", + "TemplateString", + "ElementFilter", + "UnicodeDammit", + "CData", + "Doctype", + + # Exceptions + "FeatureNotFound", + "ParserRejectedMarkup", + "StopParsing", + + # Warnings + "AttributeResemblesVariableWarning", + "GuessedAtParserWarning", + "MarkupResemblesLocatorWarning", + "UnusualUsageWarning", + "XMLParsedAsHTMLWarning", +] + +from collections import Counter +import sys +import warnings + +# The very first thing we do is give a useful error if someone is +# running this code under Python 2. +if sys.version_info.major < 3: + raise ImportError( + "You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3." + ) + +from .builder import ( + builder_registry, + TreeBuilder, +) +from .builder._htmlparser import HTMLParserTreeBuilder +from .dammit import UnicodeDammit +from .css import CSS +from ._deprecation import ( + _deprecated, +) +from .element import ( + CData, + Comment, + DEFAULT_OUTPUT_ENCODING, + Declaration, + Doctype, + NavigableString, + PageElement, + ProcessingInstruction, + PYTHON_SPECIFIC_ENCODINGS, + ResultSet, + Script, + Stylesheet, + Tag, + TemplateString, +) +from .formatter import Formatter +from .filter import ( + ElementFilter, + SoupStrainer, +) +from typing import ( + Any, + cast, + Counter as CounterType, + Dict, + Iterator, + List, + Sequence, + Optional, + Type, + Union, +) + +from bs4._typing import ( + _Encoding, + _Encodings, + _IncomingMarkup, + _InsertableElement, + _RawAttributeValue, + _RawAttributeValues, + _RawMarkup, +) + +# Import all warnings and exceptions into the main package. +from bs4.exceptions import ( + FeatureNotFound, + ParserRejectedMarkup, + StopParsing, +) +from bs4._warnings import ( + AttributeResemblesVariableWarning, + GuessedAtParserWarning, + MarkupResemblesLocatorWarning, + UnusualUsageWarning, + XMLParsedAsHTMLWarning, +) + + +class BeautifulSoup(Tag): + """A data structure representing a parsed HTML or XML document. + + Most of the methods you'll call on a BeautifulSoup object are inherited from + PageElement or Tag. + + Internally, this class defines the basic interface called by the + tree builders when converting an HTML/XML document into a data + structure. The interface abstracts away the differences between + parsers. To write a new tree builder, you'll need to understand + these methods as a whole. + + These methods will be called by the BeautifulSoup constructor: + * reset() + * feed(markup) + + The tree builder may call these methods from its feed() implementation: + * handle_starttag(name, attrs) # See note about return value + * handle_endtag(name) + * handle_data(data) # Appends to the current data node + * endData(containerClass) # Ends the current data node + + No matter how complicated the underlying parser is, you should be + able to build a tree using 'start tag' events, 'end tag' events, + 'data' events, and "done with data" events. + + If you encounter an empty-element tag (aka a self-closing tag, + like HTML's <br> tag), call handle_starttag and then + handle_endtag. + """ + + #: Since `BeautifulSoup` subclasses `Tag`, it's possible to treat it as + #: a `Tag` with a `Tag.name`. Hoever, this name makes it clear the + #: `BeautifulSoup` object isn't a real markup tag. + ROOT_TAG_NAME: str = "[document]" + + #: If the end-user gives no indication which tree builder they + #: want, look for one with these features. + DEFAULT_BUILDER_FEATURES: Sequence[str] = ["html", "fast"] + + #: A string containing all ASCII whitespace characters, used in + #: during parsing to detect data chunks that seem 'empty'. + ASCII_SPACES: str = "\x20\x0a\x09\x0c\x0d" + + # FUTURE PYTHON: + element_classes: Dict[Type[PageElement], Type[PageElement]] #: :meta private: + builder: TreeBuilder #: :meta private: + is_xml: bool + known_xml: Optional[bool] + parse_only: Optional[SoupStrainer] #: :meta private: + + # These members are only used while parsing markup. + markup: Optional[_RawMarkup] #: :meta private: + current_data: List[str] #: :meta private: + currentTag: Optional[Tag] #: :meta private: + tagStack: List[Tag] #: :meta private: + open_tag_counter: CounterType[str] #: :meta private: + preserve_whitespace_tag_stack: List[Tag] #: :meta private: + string_container_stack: List[Tag] #: :meta private: + _most_recent_element: Optional[PageElement] #: :meta private: + + #: Beautiful Soup's best guess as to the character encoding of the + #: original document. + original_encoding: Optional[_Encoding] + + #: The character encoding, if any, that was explicitly defined + #: in the original document. This may or may not match + #: `BeautifulSoup.original_encoding`. + declared_html_encoding: Optional[_Encoding] + + #: This is True if the markup that was parsed contains + #: U+FFFD REPLACEMENT_CHARACTER characters which were not present + #: in the original markup. These mark character sequences that + #: could not be represented in Unicode. + contains_replacement_characters: bool + + def __init__( + self, + markup: _IncomingMarkup = "", + features: Optional[Union[str, Sequence[str]]] = None, + builder: Optional[Union[TreeBuilder, Type[TreeBuilder]]] = None, + parse_only: Optional[SoupStrainer] = None, + from_encoding: Optional[_Encoding] = None, + exclude_encodings: Optional[_Encodings] = None, + element_classes: Optional[Dict[Type[PageElement], Type[PageElement]]] = None, + **kwargs: Any, + ): + """Constructor. + + :param markup: A string or a file-like object representing + markup to be parsed. + + :param features: Desirable features of the parser to be + used. This may be the name of a specific parser ("lxml", + "lxml-xml", "html.parser", or "html5lib") or it may be the + type of markup to be used ("html", "html5", "xml"). It's + recommended that you name a specific parser, so that + Beautiful Soup gives you the same results across platforms + and virtual environments. + + :param builder: A TreeBuilder subclass to instantiate (or + instance to use) instead of looking one up based on + `features`. You only need to use this if you've implemented a + custom TreeBuilder. + + :param parse_only: A SoupStrainer. Only parts of the document + matching the SoupStrainer will be considered. This is useful + when parsing part of a document that would otherwise be too + large to fit into memory. + + :param from_encoding: A string indicating the encoding of the + document to be parsed. Pass this in if Beautiful Soup is + guessing wrongly about the document's encoding. + + :param exclude_encodings: A list of strings indicating + encodings known to be wrong. Pass this in if you don't know + the document's encoding but you know Beautiful Soup's guess is + wrong. + + :param element_classes: A dictionary mapping BeautifulSoup + classes like Tag and NavigableString, to other classes you'd + like to be instantiated instead as the parse tree is + built. This is useful for subclassing Tag or NavigableString + to modify default behavior. + + :param kwargs: For backwards compatibility purposes, the + constructor accepts certain keyword arguments used in + Beautiful Soup 3. None of these arguments do anything in + Beautiful Soup 4; they will result in a warning and then be + ignored. + + Apart from this, any keyword arguments passed into the + BeautifulSoup constructor are propagated to the TreeBuilder + constructor. This makes it possible to configure a + TreeBuilder by passing in arguments, not just by saying which + one to use. + """ + if "convertEntities" in kwargs: + del kwargs["convertEntities"] + warnings.warn( + "BS4 does not respect the convertEntities argument to the " + "BeautifulSoup constructor. Entities are always converted " + "to Unicode characters." + ) + + if "markupMassage" in kwargs: + del kwargs["markupMassage"] + warnings.warn( + "BS4 does not respect the markupMassage argument to the " + "BeautifulSoup constructor. The tree builder is responsible " + "for any necessary markup massage." + ) + + if "smartQuotesTo" in kwargs: + del kwargs["smartQuotesTo"] + warnings.warn( + "BS4 does not respect the smartQuotesTo argument to the " + "BeautifulSoup constructor. Smart quotes are always converted " + "to Unicode characters." + ) + + if "selfClosingTags" in kwargs: + del kwargs["selfClosingTags"] + warnings.warn( + "Beautiful Soup 4 does not respect the selfClosingTags argument to the " + "BeautifulSoup constructor. The tree builder is responsible " + "for understanding self-closing tags." + ) + + if "isHTML" in kwargs: + del kwargs["isHTML"] + warnings.warn( + "Beautiful Soup 4 does not respect the isHTML argument to the " + "BeautifulSoup constructor. Suggest you use " + "features='lxml' for HTML and features='lxml-xml' for " + "XML." + ) + + def deprecated_argument(old_name: str, new_name: str) -> Optional[Any]: + if old_name in kwargs: + warnings.warn( + 'The "%s" argument to the BeautifulSoup constructor ' + 'was renamed to "%s" in Beautiful Soup 4.0.0' + % (old_name, new_name), + DeprecationWarning, + stacklevel=3, + ) + return kwargs.pop(old_name) + return None + + parse_only = parse_only or deprecated_argument("parseOnlyThese", "parse_only") + if parse_only is not None: + # Issue a warning if we can tell in advance that + # parse_only will exclude the entire tree. + if parse_only.excludes_everything: + warnings.warn( + f"The given value for parse_only will exclude everything: {parse_only}", + UserWarning, + stacklevel=3, + ) + + from_encoding = from_encoding or deprecated_argument( + "fromEncoding", "from_encoding" + ) + + if from_encoding and isinstance(markup, str): + warnings.warn( + "You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored." + ) + from_encoding = None + + self.element_classes = element_classes or dict() + + # We need this information to track whether or not the builder + # was specified well enough that we can omit the 'you need to + # specify a parser' warning. + original_builder = builder + original_features = features + + builder_class: Type[TreeBuilder] + if isinstance(builder, type): + # A builder class was passed in; it needs to be instantiated. + builder_class = builder + builder = None + elif builder is None: + if isinstance(features, str): + features = [features] + if features is None or len(features) == 0: + features = self.DEFAULT_BUILDER_FEATURES + possible_builder_class = builder_registry.lookup(*features) + if possible_builder_class is None: + raise FeatureNotFound( + "Couldn't find a tree builder with the features you " + "requested: %s. Do you need to install a parser library?" + % ",".join(features) + ) + builder_class = possible_builder_class + + # At this point either we have a TreeBuilder instance in + # builder, or we have a builder_class that we can instantiate + # with the remaining **kwargs. + if builder is None: + builder = builder_class(**kwargs) + if ( + not original_builder + and not ( + original_features == builder.NAME + or ( + isinstance(original_features, str) + and original_features in builder.ALTERNATE_NAMES + ) + ) + and markup + ): + # The user did not tell us which TreeBuilder to use, + # and we had to guess. Issue a warning. + if builder.is_xml: + markup_type = "XML" + else: + markup_type = "HTML" + + # This code adapted from warnings.py so that we get the same line + # of code as our warnings.warn() call gets, even if the answer is wrong + # (as it may be in a multithreading situation). + caller = None + try: + caller = sys._getframe(1) + except ValueError: + pass + if caller: + globals = caller.f_globals + line_number = caller.f_lineno + else: + globals = sys.__dict__ + line_number = 1 + filename = globals.get("__file__") + if filename: + fnl = filename.lower() + if fnl.endswith((".pyc", ".pyo")): + filename = filename[:-1] + if filename: + # If there is no filename at all, the user is most likely in a REPL, + # and the warning is not necessary. + values = dict( + filename=filename, + line_number=line_number, + parser=builder.NAME, + markup_type=markup_type, + ) + warnings.warn( + GuessedAtParserWarning.MESSAGE % values, + GuessedAtParserWarning, + stacklevel=2, + ) + else: + if kwargs: + warnings.warn( + "Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`." + ) + + self.builder = builder + self.is_xml = builder.is_xml + self.known_xml = self.is_xml + self._namespaces = dict() + self.parse_only = parse_only + + if hasattr(markup, "read"): # It's a file-type object. + markup = markup.read() + elif not isinstance(markup, (bytes, str)) and not hasattr(markup, "__len__"): + raise TypeError( + f"Incoming markup is of an invalid type: {markup!r}. Markup must be a string, a bytestring, or an open filehandle." + ) + elif len(markup) <= 256 and ( + (isinstance(markup, bytes) and b"<" not in markup and b"\n" not in markup) + or (isinstance(markup, str) and "<" not in markup and "\n" not in markup) + ): + # Issue warnings for a couple beginner problems + # involving passing non-markup to Beautiful Soup. + # Beautiful Soup will still parse the input as markup, + # since that is sometimes the intended behavior. + if not self._markup_is_url(markup): + self._markup_resembles_filename(markup) + + # At this point we know markup is a string or bytestring. If + # it was a file-type object, we've read from it. + markup = cast(_RawMarkup, markup) + + rejections = [] + success = False + for ( + self.markup, + self.original_encoding, + self.declared_html_encoding, + self.contains_replacement_characters, + ) in self.builder.prepare_markup( + markup, from_encoding, exclude_encodings=exclude_encodings + ): + self.reset() + self.builder.initialize_soup(self) + try: + self._feed() + success = True + break + except ParserRejectedMarkup as e: + rejections.append(e) + pass + + if not success: + other_exceptions = [str(e) for e in rejections] + raise ParserRejectedMarkup( + "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + + "\n ".join(other_exceptions) + ) + + # Clear out the markup and remove the builder's circular + # reference to this object. + self.markup = None + self.builder.soup = None + + def copy_self(self) -> "BeautifulSoup": + """Create a new BeautifulSoup object with the same TreeBuilder, + but not associated with any markup. + + This is the first step of the deepcopy process. + """ + clone = type(self)("", None, self.builder) + + # Keep track of the encoding of the original document, + # since we won't be parsing it again. + clone.original_encoding = self.original_encoding + return clone + + def __getstate__(self) -> Dict[str, Any]: + # Frequently a tree builder can't be pickled. + d = dict(self.__dict__) + if "builder" in d and d["builder"] is not None and not self.builder.picklable: + d["builder"] = type(self.builder) + # Store the contents as a Unicode string. + d["contents"] = [] + d["markup"] = self.decode() + + # If _most_recent_element is present, it's a Tag object left + # over from initial parse. It might not be picklable and we + # don't need it. + if "_most_recent_element" in d: + del d["_most_recent_element"] + return d + + def __setstate__(self, state: Dict[str, Any]) -> None: + # If necessary, restore the TreeBuilder by looking it up. + self.__dict__ = state + if isinstance(self.builder, type): + self.builder = self.builder() + elif not self.builder: + # We don't know which builder was used to build this + # parse tree, so use a default we know is always available. + self.builder = HTMLParserTreeBuilder() + self.builder.soup = self + self.reset() + self._feed() + + @classmethod + @_deprecated( + replaced_by="nothing (private method, will be removed)", version="4.13.0" + ) + def _decode_markup(cls, markup: _RawMarkup) -> str: + """Ensure `markup` is Unicode so it's safe to send into warnings.warn. + + warnings.warn had this problem back in 2010 but fortunately + not anymore. This has not been used for a long time; I just + noticed that fact while working on 4.13.0. + """ + if isinstance(markup, bytes): + decoded = markup.decode("utf-8", "replace") + else: + decoded = markup + return decoded + + @classmethod + def _markup_is_url(cls, markup: _RawMarkup) -> bool: + """Error-handling method to raise a warning if incoming markup looks + like a URL. + + :param markup: A string of markup. + :return: Whether or not the markup resembled a URL + closely enough to justify issuing a warning. + """ + problem: bool = False + if isinstance(markup, bytes): + problem = ( + any(markup.startswith(prefix) for prefix in (b"http:", b"https:")) + and b" " not in markup + ) + elif isinstance(markup, str): + problem = ( + any(markup.startswith(prefix) for prefix in ("http:", "https:")) + and " " not in markup + ) + else: + return False + + if not problem: + return False + warnings.warn( + MarkupResemblesLocatorWarning.URL_MESSAGE % dict(what="URL"), + MarkupResemblesLocatorWarning, + stacklevel=3, + ) + return True + + @classmethod + def _markup_resembles_filename(cls, markup: _RawMarkup) -> bool: + """Error-handling method to issue a warning if incoming markup + resembles a filename. + + :param markup: A string of markup. + :return: Whether or not the markup resembled a filename + closely enough to justify issuing a warning. + """ + markup_b: bytes + + # We're only checking ASCII characters, so rather than write + # the same tests twice, convert Unicode to a bytestring and + # operate on the bytestring. + if isinstance(markup, str): + markup_b = markup.encode("utf8") + else: + markup_b = markup + + # Step 1: does it end with a common textual file extension? + filelike = False + lower = markup_b.lower() + extensions = [b".html", b".htm", b".xml", b".xhtml", b".txt"] + if any(lower.endswith(ext) for ext in extensions): + filelike = True + if not filelike: + return False + + # Step 2: it _might_ be a file, but there are a few things + # we can look for that aren't very common in filenames. + + # Characters that have special meaning to Unix shells. (< was + # excluded before this method was called.) + # + # Many of these are also reserved characters that cannot + # appear in Windows filenames. + for byte in markup_b: + if byte in b"?*#&;>$|": + return False + + # Two consecutive forward slashes (as seen in a URL) or two + # consecutive spaces (as seen in fixed-width data). + # + # (Paths to Windows network shares contain consecutive + # backslashes, so checking that doesn't seem as helpful.) + if b"//" in markup_b: + return False + if b" " in markup_b: + return False + + # A colon in any position other than position 1 (e.g. after a + # Windows drive letter). + if markup_b.startswith(b":"): + return False + colon_i = markup_b.rfind(b":") + if colon_i not in (-1, 1): + return False + + # Step 3: If it survived all of those checks, it's similar + # enough to a file to justify issuing a warning. + warnings.warn( + MarkupResemblesLocatorWarning.FILENAME_MESSAGE % dict(what="filename"), + MarkupResemblesLocatorWarning, + stacklevel=3, + ) + return True + + def _feed(self) -> None: + """Internal method that parses previously set markup, creating a large + number of Tag and NavigableString objects. + """ + # Convert the document to Unicode. + self.builder.reset() + + if self.markup is not None: + self.builder.feed(self.markup) + # Close out any unfinished strings and close all the open tags. + self.endData() + while ( + self.currentTag is not None and self.currentTag.name != self.ROOT_TAG_NAME + ): + self.popTag() + + def reset(self) -> None: + """Reset this object to a state as though it had never parsed any + markup. + """ + Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) + self.hidden = True + self.builder.reset() + self.current_data = [] + self.currentTag = None + self.tagStack = [] + self.open_tag_counter = Counter() + self.preserve_whitespace_tag_stack = [] + self.string_container_stack = [] + self._most_recent_element = None + self.pushTag(self) + + def new_tag( + self, + name: str, + namespace: Optional[str] = None, + nsprefix: Optional[str] = None, + attrs: Optional[_RawAttributeValues] = None, + sourceline: Optional[int] = None, + sourcepos: Optional[int] = None, + string: Optional[str] = None, + **kwattrs: _RawAttributeValue, + ) -> Tag: + """Create a new Tag associated with this BeautifulSoup object. + + :param name: The name of the new Tag. + :param namespace: The URI of the new Tag's XML namespace, if any. + :param prefix: The prefix for the new Tag's XML namespace, if any. + :param attrs: A dictionary of this Tag's attribute values; can + be used instead of ``kwattrs`` for attributes like 'class' + that are reserved words in Python. + :param sourceline: The line number where this tag was + (purportedly) found in its source document. + :param sourcepos: The character position within ``sourceline`` where this + tag was (purportedly) found. + :param string: String content for the new Tag, if any. + :param kwattrs: Keyword arguments for the new Tag's attribute values. + + """ + attr_container = self.builder.attribute_dict_class(**kwattrs) + if attrs is not None: + attr_container.update(attrs) + tag_class = self.element_classes.get(Tag, Tag) + + # Assume that this is either Tag or a subclass of Tag. If not, + # the user brought type-unsafety upon themselves. + tag_class = cast(Type[Tag], tag_class) + tag = tag_class( + None, + self.builder, + name, + namespace, + nsprefix, + attr_container, + sourceline=sourceline, + sourcepos=sourcepos, + ) + + if string is not None: + tag.string = string + return tag + + def string_container( + self, base_class: Optional[Type[NavigableString]] = None + ) -> Type[NavigableString]: + """Find the class that should be instantiated to hold a given kind of + string. + + This may be a built-in Beautiful Soup class or a custom class passed + in to the BeautifulSoup constructor. + """ + container = base_class or NavigableString + + # The user may want us to use some other class (hopefully a + # custom subclass) instead of the one we'd use normally. + container = cast( + Type[NavigableString], self.element_classes.get(container, container) + ) + + # On top of that, we may be inside a tag that needs a special + # container class. + if self.string_container_stack and container is NavigableString: + container = self.builder.string_containers.get( + self.string_container_stack[-1].name, container + ) + return container + + def new_string( + self, s: str, subclass: Optional[Type[NavigableString]] = None + ) -> NavigableString: + """Create a new `NavigableString` associated with this `BeautifulSoup` + object. + + :param s: The string content of the `NavigableString` + :param subclass: The subclass of `NavigableString`, if any, to + use. If a document is being processed, an appropriate + subclass for the current location in the document will + be determined automatically. + """ + container = self.string_container(subclass) + return container(s) + + def insert_before(self, *args: _InsertableElement) -> List[PageElement]: + """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement + it because there is nothing before or after it in the parse tree. + """ + raise NotImplementedError( + "BeautifulSoup objects don't support insert_before()." + ) + + def insert_after(self, *args: _InsertableElement) -> List[PageElement]: + """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement + it because there is nothing before or after it in the parse tree. + """ + raise NotImplementedError("BeautifulSoup objects don't support insert_after().") + + def popTag(self) -> Optional[Tag]: + """Internal method called by _popToTag when a tag is closed. + + :meta private: + """ + if not self.tagStack: + # Nothing to pop. This shouldn't happen. + return None + tag = self.tagStack.pop() + if tag.name in self.open_tag_counter: + self.open_tag_counter[tag.name] -= 1 + if ( + self.preserve_whitespace_tag_stack + and tag == self.preserve_whitespace_tag_stack[-1] + ): + self.preserve_whitespace_tag_stack.pop() + if self.string_container_stack and tag == self.string_container_stack[-1]: + self.string_container_stack.pop() + # print("Pop", tag.name) + if self.tagStack: + self.currentTag = self.tagStack[-1] + return self.currentTag + + def pushTag(self, tag: Tag) -> None: + """Internal method called by handle_starttag when a tag is opened. + + :meta private: + """ + # print("Push", tag.name) + if self.currentTag is not None: + self.currentTag.contents.append(tag) + self.tagStack.append(tag) + self.currentTag = self.tagStack[-1] + if tag.name != self.ROOT_TAG_NAME: + self.open_tag_counter[tag.name] += 1 + if tag.name in self.builder.preserve_whitespace_tags: + self.preserve_whitespace_tag_stack.append(tag) + if tag.name in self.builder.string_containers: + self.string_container_stack.append(tag) + + def endData(self, containerClass: Optional[Type[NavigableString]] = None) -> None: + """Method called by the TreeBuilder when the end of a data segment + occurs. + + :param containerClass: The class to use when incorporating the + data segment into the parse tree. + + :meta private: + """ + if self.current_data: + current_data = "".join(self.current_data) + # If whitespace is not preserved, and this string contains + # nothing but ASCII spaces, replace it with a single space + # or newline. + if not self.preserve_whitespace_tag_stack: + strippable = True + for i in current_data: + if i not in self.ASCII_SPACES: + strippable = False + break + if strippable: + if "\n" in current_data: + current_data = "\n" + else: + current_data = " " + + # Reset the data collector. + self.current_data = [] + + # Should we add this string to the tree at all? + if ( + self.parse_only + and len(self.tagStack) <= 1 + and (not self.parse_only.allow_string_creation(current_data)) + ): + return + + containerClass = self.string_container(containerClass) + o = containerClass(current_data) + self.object_was_parsed(o) + + def object_was_parsed( + self, + o: PageElement, + parent: Optional[Tag] = None, + most_recent_element: Optional[PageElement] = None, + ) -> None: + """Method called by the TreeBuilder to integrate an object into the + parse tree. + + :meta private: + """ + if parent is None: + parent = self.currentTag + assert parent is not None + previous_element: Optional[PageElement] + if most_recent_element is not None: + previous_element = most_recent_element + else: + previous_element = self._most_recent_element + + next_element = previous_sibling = next_sibling = None + if isinstance(o, Tag): + next_element = o.next_element + next_sibling = o.next_sibling + previous_sibling = o.previous_sibling + if previous_element is None: + previous_element = o.previous_element + + fix = parent.next_element is not None + + o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) + + self._most_recent_element = o + parent.contents.append(o) + + # Check if we are inserting into an already parsed node. + if fix: + self._linkage_fixer(parent) + + def _linkage_fixer(self, el: Tag) -> None: + """Make sure linkage of this fragment is sound.""" + + first = el.contents[0] + child = el.contents[-1] + descendant: PageElement = child + + if child is first and el.parent is not None: + # Parent should be linked to first child + el.next_element = child + # We are no longer linked to whatever this element is + prev_el = child.previous_element + if prev_el is not None and prev_el is not el: + prev_el.next_element = None + # First child should be linked to the parent, and no previous siblings. + child.previous_element = el + child.previous_sibling = None + + # We have no sibling as we've been appended as the last. + child.next_sibling = None + + # This index is a tag, dig deeper for a "last descendant" + if isinstance(child, Tag) and child.contents: + # _last_decendant is typed as returning Optional[PageElement], + # but the value can't be None here, because el is a Tag + # which we know has contents. + descendant = cast(PageElement, child._last_descendant(False)) + + # As the final step, link last descendant. It should be linked + # to the parent's next sibling (if found), else walk up the chain + # and find a parent with a sibling. It should have no next sibling. + descendant.next_element = None + descendant.next_sibling = None + + target: Optional[Tag] = el + while True: + if target is None: + break + elif target.next_sibling is not None: + descendant.next_element = target.next_sibling + target.next_sibling.previous_element = child + break + target = target.parent + + def _popToTag( + self, name: str, nsprefix: Optional[str] = None, inclusivePop: bool = True + ) -> Optional[Tag]: + """Pops the tag stack up to and including the most recent + instance of the given tag. + + If there are no open tags with the given name, nothing will be + popped. + + :param name: Pop up to the most recent tag with this name. + :param nsprefix: The namespace prefix that goes with `name`. + :param inclusivePop: It this is false, pops the tag stack up + to but *not* including the most recent instqance of the + given tag. + + :meta private: + """ + # print("Popping to %s" % name) + if name == self.ROOT_TAG_NAME: + # The BeautifulSoup object itself can never be popped. + return None + + most_recently_popped = None + + stack_size = len(self.tagStack) + for i in range(stack_size - 1, 0, -1): + if not self.open_tag_counter.get(name): + break + t = self.tagStack[i] + if name == t.name and nsprefix == t.prefix: + if inclusivePop: + most_recently_popped = self.popTag() + break + most_recently_popped = self.popTag() + + return most_recently_popped + + def handle_starttag( + self, + name: str, + namespace: Optional[str], + nsprefix: Optional[str], + attrs: _RawAttributeValues, + sourceline: Optional[int] = None, + sourcepos: Optional[int] = None, + namespaces: Optional[Dict[str, str]] = None, + ) -> Optional[Tag]: + """Called by the tree builder when a new tag is encountered. + + :param name: Name of the tag. + :param nsprefix: Namespace prefix for the tag. + :param attrs: A dictionary of attribute values. Note that + attribute values are expected to be simple strings; processing + of multi-valued attributes such as "class" comes later. + :param sourceline: The line number where this tag was found in its + source document. + :param sourcepos: The character position within `sourceline` where this + tag was found. + :param namespaces: A dictionary of all namespace prefix mappings + currently in scope in the document. + + If this method returns None, the tag was rejected by an active + `ElementFilter`. You should proceed as if the tag had not occurred + in the document. For instance, if this was a self-closing tag, + don't call handle_endtag. + + :meta private: + """ + # print("Start tag %s: %s" % (name, attrs)) + self.endData() + + if ( + self.parse_only + and len(self.tagStack) <= 1 + and not self.parse_only.allow_tag_creation(nsprefix, name, attrs) + ): + return None + + tag_class = self.element_classes.get(Tag, Tag) + # Assume that this is either Tag or a subclass of Tag. If not, + # the user brought type-unsafety upon themselves. + tag_class = cast(Type[Tag], tag_class) + tag = tag_class( + self, + self.builder, + name, + namespace, + nsprefix, + attrs, + self.currentTag, + self._most_recent_element, + sourceline=sourceline, + sourcepos=sourcepos, + namespaces=namespaces, + ) + if tag is None: + return tag + if self._most_recent_element is not None: + self._most_recent_element.next_element = tag + self._most_recent_element = tag + self.pushTag(tag) + return tag + + def handle_endtag(self, name: str, nsprefix: Optional[str] = None) -> None: + """Called by the tree builder when an ending tag is encountered. + + :param name: Name of the tag. + :param nsprefix: Namespace prefix for the tag. + + :meta private: + """ + # print("End tag: " + name) + self.endData() + self._popToTag(name, nsprefix) + + def handle_data(self, data: str) -> None: + """Called by the tree builder when a chunk of textual data is + encountered. + + :meta private: + """ + self.current_data.append(data) + + def decode( + self, + indent_level: Optional[int] = None, + eventual_encoding: _Encoding = DEFAULT_OUTPUT_ENCODING, + formatter: Union[Formatter, str] = "minimal", + iterator: Optional[Iterator[PageElement]] = None, + **kwargs: Any, + ) -> str: + """Returns a string representation of the parse tree + as a full HTML or XML document. + + :param indent_level: Each line of the rendering will be + indented this many levels. (The ``formatter`` decides what a + 'level' means, in terms of spaces or other characters + output.) This is used internally in recursive calls while + pretty-printing. + :param eventual_encoding: The encoding of the final document. + If this is None, the document will be a Unicode string. + :param formatter: Either a `Formatter` object, or a string naming one of + the standard formatters. + :param iterator: The iterator to use when navigating over the + parse tree. This is only used by `Tag.decode_contents` and + you probably won't need to use it. + """ + if self.is_xml: + # Print the XML declaration + encoding_part = "" + declared_encoding: Optional[str] = eventual_encoding + if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS: + # This is a special Python encoding; it can't actually + # go into an XML document because it means nothing + # outside of Python. + declared_encoding = None + if declared_encoding is not None: + encoding_part = ' encoding="%s"' % declared_encoding + prefix = '<?xml version="1.0"%s?>\n' % encoding_part + else: + prefix = "" + + # Prior to 4.13.0, the first argument to this method was a + # bool called pretty_print, which gave the method a different + # signature from its superclass implementation, Tag.decode. + # + # The signatures of the two methods now match, but just in + # case someone is still passing a boolean in as the first + # argument to this method (or a keyword argument with the old + # name), we can handle it and put out a DeprecationWarning. + warning: Optional[str] = None + if isinstance(indent_level, bool): + if indent_level is True: + indent_level = 0 + elif indent_level is False: + indent_level = None + warning = f"As of 4.13.0, the first argument to BeautifulSoup.decode has been changed from bool to int, to match Tag.decode. Pass in a value of {indent_level} instead." + else: + pretty_print = kwargs.pop("pretty_print", None) + assert not kwargs + if pretty_print is not None: + if pretty_print is True: + indent_level = 0 + elif pretty_print is False: + indent_level = None + warning = f"As of 4.13.0, the pretty_print argument to BeautifulSoup.decode has been removed, to match Tag.decode. Pass in a value of indent_level={indent_level} instead." + + if warning: + warnings.warn(warning, DeprecationWarning, stacklevel=2) + elif indent_level is False or pretty_print is False: + indent_level = None + return prefix + super(BeautifulSoup, self).decode( + indent_level, eventual_encoding, formatter, iterator + ) + + +# Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup' +_s = BeautifulSoup +_soup = BeautifulSoup + + +class BeautifulStoneSoup(BeautifulSoup): + """Deprecated interface to an XML parser.""" + + def __init__(self, *args: Any, **kwargs: Any): + kwargs["features"] = "xml" + warnings.warn( + "The BeautifulStoneSoup class was deprecated in version 4.0.0. Instead of using " + 'it, pass features="xml" into the BeautifulSoup constructor.', + DeprecationWarning, + stacklevel=2, + ) + super(BeautifulStoneSoup, self).__init__(*args, **kwargs) + + +# If this file is run as a script, act as an HTML pretty-printer. +if __name__ == "__main__": + import sys + + soup = BeautifulSoup(sys.stdin) + print((soup.prettify())) diff --git a/.venv/lib/python3.12/site-packages/bs4/_deprecation.py b/.venv/lib/python3.12/site-packages/bs4/_deprecation.py new file mode 100644 index 00000000..a0d7fdc0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/_deprecation.py @@ -0,0 +1,80 @@ +"""Helper functions for deprecation. + +This interface is itself unstable and may change without warning. Do +not use these functions yourself, even as a joke. The underscores are +there for a reason. No support will be given. + +In particular, most of this will go away without warning once +Beautiful Soup drops support for Python 3.11, since Python 3.12 +defines a `@typing.deprecated() +decorator. <https://peps.python.org/pep-0702/>`_ +""" + +import functools +import warnings + +from typing import ( + Any, + Callable, +) + + +def _deprecated_alias(old_name: str, new_name: str, version: str): + """Alias one attribute name to another for backward compatibility + + :meta private: + """ + + @property + def alias(self) -> Any: + ":meta private:" + warnings.warn( + f"Access to deprecated property {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.", + DeprecationWarning, + stacklevel=2, + ) + return getattr(self, new_name) + + @alias.setter + def alias(self, value: str) -> None: + ":meta private:" + warnings.warn( + f"Write to deprecated property {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.", + DeprecationWarning, + stacklevel=2, + ) + return setattr(self, new_name, value) + + return alias + + +def _deprecated_function_alias( + old_name: str, new_name: str, version: str +) -> Callable[[Any], Any]: + def alias(self, *args: Any, **kwargs: Any) -> Any: + ":meta private:" + warnings.warn( + f"Call to deprecated method {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.", + DeprecationWarning, + stacklevel=2, + ) + return getattr(self, new_name)(*args, **kwargs) + + return alias + + +def _deprecated(replaced_by: str, version: str) -> Callable: + def deprecate(func: Callable) -> Callable: + @functools.wraps(func) + def with_warning(*args: Any, **kwargs: Any) -> Any: + ":meta private:" + warnings.warn( + f"Call to deprecated method {func.__name__}. (Replaced by {replaced_by}) -- Deprecated since version {version}.", + DeprecationWarning, + stacklevel=2, + ) + return func(*args, **kwargs) + + return with_warning + + return deprecate diff --git a/.venv/lib/python3.12/site-packages/bs4/_typing.py b/.venv/lib/python3.12/site-packages/bs4/_typing.py new file mode 100644 index 00000000..ac4ec340 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/_typing.py @@ -0,0 +1,196 @@ +# Custom type aliases used throughout Beautiful Soup to improve readability. + +# Notes on improvements to the type system in newer versions of Python +# that can be used once Beautiful Soup drops support for older +# versions: +# +# * ClassVar can be put on class variables now. +# * In 3.10, x|y is an accepted shorthand for Union[x,y]. +# * In 3.10, TypeAlias gains capabilities that can be used to +# improve the tree matching types (I don't remember what, exactly). +# * In 3.9 it's possible to specialize the re.Match type, +# e.g. re.Match[str]. In 3.8 there's a typing.re namespace for this, +# but it's removed in 3.12, so to support the widest possible set of +# versions I'm not using it. + +from typing_extensions import ( + runtime_checkable, + Protocol, + TypeAlias, +) +from typing import ( + Any, + Callable, + Dict, + IO, + Iterable, + Mapping, + Optional, + Pattern, + TYPE_CHECKING, + Union, +) + +if TYPE_CHECKING: + from bs4.element import ( + AttributeValueList, + NamespacedAttribute, + NavigableString, + PageElement, + ResultSet, + Tag, + ) + + +@runtime_checkable +class _RegularExpressionProtocol(Protocol): + """A protocol object which can accept either Python's built-in + `re.Pattern` objects, or the similar ``Regex`` objects defined by the + third-party ``regex`` package. + """ + + def search( + self, string: str, pos: int = ..., endpos: int = ... + ) -> Optional[Any]: ... + + @property + def pattern(self) -> str: ... + + +# Aliases for markup in various stages of processing. +# +#: The rawest form of markup: either a string, bytestring, or an open filehandle. +_IncomingMarkup: TypeAlias = Union[str, bytes, IO[str], IO[bytes]] + +#: Markup that is in memory but has (potentially) yet to be converted +#: to Unicode. +_RawMarkup: TypeAlias = Union[str, bytes] + +# Aliases for character encodings +# + +#: A data encoding. +_Encoding: TypeAlias = str + +#: One or more data encodings. +_Encodings: TypeAlias = Iterable[_Encoding] + +# Aliases for XML namespaces +# + +#: The prefix for an XML namespace. +_NamespacePrefix: TypeAlias = str + +#: The URL of an XML namespace +_NamespaceURL: TypeAlias = str + +#: A mapping of prefixes to namespace URLs. +_NamespaceMapping: TypeAlias = Dict[_NamespacePrefix, _NamespaceURL] + +#: A mapping of namespace URLs to prefixes +_InvertedNamespaceMapping: TypeAlias = Dict[_NamespaceURL, _NamespacePrefix] + +# Aliases for the attribute values associated with HTML/XML tags. +# + +#: The value associated with an HTML or XML attribute. This is the +#: relatively unprocessed value Beautiful Soup expects to come from a +#: `TreeBuilder`. +_RawAttributeValue: TypeAlias = str + +#: A dictionary of names to `_RawAttributeValue` objects. This is how +#: Beautiful Soup expects a `TreeBuilder` to represent a tag's +#: attribute values. +_RawAttributeValues: TypeAlias = ( + "Mapping[Union[str, NamespacedAttribute], _RawAttributeValue]" +) + +#: An attribute value in its final form, as stored in the +# `Tag` class, after it has been processed and (in some cases) +# split into a list of strings. +_AttributeValue: TypeAlias = Union[str, "AttributeValueList"] + +#: A dictionary of names to :py:data:`_AttributeValue` objects. This is what +#: a tag's attributes look like after processing. +_AttributeValues: TypeAlias = Dict[str, _AttributeValue] + +#: The methods that deal with turning :py:data:`_RawAttributeValue` into +#: :py:data:`_AttributeValue` may be called several times, even after the values +#: are already processed (e.g. when cloning a tag), so they need to +#: be able to acommodate both possibilities. +_RawOrProcessedAttributeValues: TypeAlias = Union[_RawAttributeValues, _AttributeValues] + +#: A number of tree manipulation methods can take either a `PageElement` or a +#: normal Python string (which will be converted to a `NavigableString`). +_InsertableElement: TypeAlias = Union["PageElement", str] + +# Aliases to represent the many possibilities for matching bits of a +# parse tree. +# +# This is very complicated because we're applying a formal type system +# to some very DWIM code. The types we end up with will be the types +# of the arguments to the SoupStrainer constructor and (more +# familiarly to Beautiful Soup users) the find* methods. + +#: A function that takes a PageElement and returns a yes-or-no answer. +_PageElementMatchFunction: TypeAlias = Callable[["PageElement"], bool] + +#: A function that takes the raw parsed ingredients of a markup tag +#: and returns a yes-or-no answer. +# Not necessary at the moment. +# _AllowTagCreationFunction:TypeAlias = Callable[[Optional[str], str, Optional[_RawAttributeValues]], bool] + +#: A function that takes the raw parsed ingredients of a markup string node +#: and returns a yes-or-no answer. +# Not necessary at the moment. +# _AllowStringCreationFunction:TypeAlias = Callable[[Optional[str]], bool] + +#: A function that takes a `Tag` and returns a yes-or-no answer. +#: A `TagNameMatchRule` expects this kind of function, if you're +#: going to pass it a function. +_TagMatchFunction: TypeAlias = Callable[["Tag"], bool] + +#: A function that takes a single string and returns a yes-or-no +#: answer. An `AttributeValueMatchRule` expects this kind of function, if +#: you're going to pass it a function. So does a `StringMatchRule`. +_StringMatchFunction: TypeAlias = Callable[[str], bool] + +#: Either a tag name, an attribute value or a string can be matched +#: against a string, bytestring, regular expression, or a boolean. +_BaseStrainable: TypeAlias = Union[str, bytes, Pattern[str], bool] + +#: A tag can be matched either with the `_BaseStrainable` options, or +#: using a function that takes the `Tag` as its sole argument. +_BaseStrainableElement: TypeAlias = Union[_BaseStrainable, _TagMatchFunction] + +#: A tag's attribute vgalue can be matched either with the +#: `_BaseStrainable` options, or using a function that takes that +#: value as its sole argument. +_BaseStrainableAttribute: TypeAlias = Union[_BaseStrainable, _StringMatchFunction] + +#: A tag can be matched using either a single criterion or a list of +#: criteria. +_StrainableElement: TypeAlias = Union[ + _BaseStrainableElement, Iterable[_BaseStrainableElement] +] + +#: An attribute value can be matched using either a single criterion +#: or a list of criteria. +_StrainableAttribute: TypeAlias = Union[ + _BaseStrainableAttribute, Iterable[_BaseStrainableAttribute] +] + +#: An string can be matched using the same techniques as +#: an attribute value. +_StrainableString: TypeAlias = _StrainableAttribute + +#: A dictionary may be used to match against multiple attribute vlaues at once. +_StrainableAttributes: TypeAlias = Dict[str, _StrainableAttribute] + +#: Many Beautiful soup methods return a PageElement or an ResultSet of +#: PageElements. A PageElement is either a Tag or a NavigableString. +#: These convenience aliases make it easier for IDE users to see which methods +#: are available on the objects they're dealing with. +_OneElement: TypeAlias = Union["PageElement", "Tag", "NavigableString"] +_AtMostOneElement: TypeAlias = Optional[_OneElement] +_QueryResults: TypeAlias = "ResultSet[_OneElement]" diff --git a/.venv/lib/python3.12/site-packages/bs4/_warnings.py b/.venv/lib/python3.12/site-packages/bs4/_warnings.py new file mode 100644 index 00000000..43094730 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/_warnings.py @@ -0,0 +1,98 @@ +"""Define some custom warnings.""" + + +class GuessedAtParserWarning(UserWarning): + """The warning issued when BeautifulSoup has to guess what parser to + use -- probably because no parser was specified in the constructor. + """ + + MESSAGE: str = """No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system ("%(parser)s"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently. + +The code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features="%(parser)s"' to the BeautifulSoup constructor. +""" + + +class UnusualUsageWarning(UserWarning): + """A superclass for warnings issued when Beautiful Soup sees + something that is typically the result of a mistake in the calling + code, but might be intentional on the part of the user. If it is + in fact intentional, you can filter the individual warning class + to get rid of the warning. If you don't like Beautiful Soup + second-guessing what you are doing, you can filter the + UnusualUsageWarningclass itself and get rid of these entirely. + """ + + +class MarkupResemblesLocatorWarning(UnusualUsageWarning): + """The warning issued when BeautifulSoup is given 'markup' that + actually looks like a resource locator -- a URL or a path to a file + on disk. + """ + + #: :meta private: + GENERIC_MESSAGE: str = """ + +However, if you want to parse some data that happens to look like a %(what)s, then nothing has gone wrong: you are using Beautiful Soup correctly, and this warning is spurious and can be filtered. To make this warning go away, run this code before calling the BeautifulSoup constructor: + + from bs4 import MarkupResemblesLocatorWarning + import warnings + + warnings.filterwarnings("ignore", category=MarkupResemblesLocatorWarning) + """ + + URL_MESSAGE: str = ( + """The input passed in on this line looks more like a URL than HTML or XML. + +If you meant to use Beautiful Soup to parse the web page found at a certain URL, then something has gone wrong. You should use an Python package like 'requests' to fetch the content behind the URL. Once you have the content as a string, you can feed that string into Beautiful Soup.""" + + GENERIC_MESSAGE + ) + + FILENAME_MESSAGE: str = ( + """The input passed in on this line looks more like a filename than HTML or XML. + +If you meant to use Beautiful Soup to parse the contents of a file on disk, then something has gone wrong. You should open the file first, using code like this: + + filehandle = open(your filename) + +You can then feed the open filehandle into Beautiful Soup instead of using the filename.""" + + GENERIC_MESSAGE + ) + + +class AttributeResemblesVariableWarning(UnusualUsageWarning, SyntaxWarning): + """The warning issued when Beautiful Soup suspects a provided + attribute name may actually be the misspelled name of a Beautiful + Soup variable. Generally speaking, this is only used in cases like + "_class" where it's very unlikely the user would be referencing an + XML attribute with that name. + """ + + MESSAGE: str = """%(original)r is an unusual attribute name and is a common misspelling for %(autocorrect)r. + +If you meant %(autocorrect)r, change your code to use it, and this warning will go away. + +If you really did mean to check the %(original)r attribute, this warning is spurious and can be filtered. To make it go away, run this code before creating your BeautifulSoup object: + + from bs4 import AttributeResemblesVariableWarning + import warnings + + warnings.filterwarnings("ignore", category=AttributeResemblesVariableWarning) +""" + + +class XMLParsedAsHTMLWarning(UnusualUsageWarning): + """The warning issued when an HTML parser is used to parse + XML that is not (as far as we can tell) XHTML. + """ + + MESSAGE: str = """It looks like you're using an HTML parser to parse an XML document. + +Assuming this really is an XML document, what you're doing might work, but you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the Python package 'lxml' installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor. + +If you want or need to use an HTML parser on this document, you can make this warning go away by filtering it. To do that, run this code before calling the BeautifulSoup constructor: + + from bs4 import XMLParsedAsHTMLWarning + import warnings + + warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning) +""" diff --git a/.venv/lib/python3.12/site-packages/bs4/builder/__init__.py b/.venv/lib/python3.12/site-packages/bs4/builder/__init__.py new file mode 100644 index 00000000..5f2b38de --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/builder/__init__.py @@ -0,0 +1,848 @@ +from __future__ import annotations + +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +from collections import defaultdict +import re +from types import ModuleType +from typing import ( + Any, + cast, + Dict, + Iterable, + List, + Optional, + Pattern, + Set, + Tuple, + Type, + TYPE_CHECKING, +) +import warnings +import sys +from bs4.element import ( + AttributeDict, + AttributeValueList, + CharsetMetaAttributeValue, + ContentMetaAttributeValue, + RubyParenthesisString, + RubyTextString, + Stylesheet, + Script, + TemplateString, + nonwhitespace_re, +) + +# Exceptions were moved to their own module in 4.13. Import here for +# backwards compatibility. +from bs4.exceptions import ParserRejectedMarkup + +from bs4._typing import ( + _AttributeValues, + _RawAttributeValue, +) + +from bs4._warnings import XMLParsedAsHTMLWarning + +if TYPE_CHECKING: + from bs4 import BeautifulSoup + from bs4.element import ( + NavigableString, + Tag, + ) + from bs4._typing import ( + _AttributeValue, + _Encoding, + _Encodings, + _RawOrProcessedAttributeValues, + _RawMarkup, + ) + +__all__ = [ + "HTMLTreeBuilder", + "SAXTreeBuilder", + "TreeBuilder", + "TreeBuilderRegistry", +] + +# Some useful features for a TreeBuilder to have. +FAST = "fast" +PERMISSIVE = "permissive" +STRICT = "strict" +XML = "xml" +HTML = "html" +HTML_5 = "html5" + +__all__ = [ + "TreeBuilderRegistry", + "TreeBuilder", + "HTMLTreeBuilder", + "DetectsXMLParsedAsHTML", + + "ParserRejectedMarkup", # backwards compatibility only as of 4.13.0 +] + +class TreeBuilderRegistry(object): + """A way of looking up TreeBuilder subclasses by their name or by desired + features. + """ + + builders_for_feature: Dict[str, List[Type[TreeBuilder]]] + builders: List[Type[TreeBuilder]] + + def __init__(self) -> None: + self.builders_for_feature = defaultdict(list) + self.builders = [] + + def register(self, treebuilder_class: type[TreeBuilder]) -> None: + """Register a treebuilder based on its advertised features. + + :param treebuilder_class: A subclass of `TreeBuilder`. its + `TreeBuilder.features` attribute should list its features. + """ + for feature in treebuilder_class.features: + self.builders_for_feature[feature].insert(0, treebuilder_class) + self.builders.insert(0, treebuilder_class) + + def lookup(self, *features: str) -> Optional[Type[TreeBuilder]]: + """Look up a TreeBuilder subclass with the desired features. + + :param features: A list of features to look for. If none are + provided, the most recently registered TreeBuilder subclass + will be used. + :return: A TreeBuilder subclass, or None if there's no + registered subclass with all the requested features. + """ + if len(self.builders) == 0: + # There are no builders at all. + return None + + if len(features) == 0: + # They didn't ask for any features. Give them the most + # recently registered builder. + return self.builders[0] + + # Go down the list of features in order, and eliminate any builders + # that don't match every feature. + feature_list = list(features) + feature_list.reverse() + candidates = None + candidate_set = None + while len(feature_list) > 0: + feature = feature_list.pop() + we_have_the_feature = self.builders_for_feature.get(feature, []) + if len(we_have_the_feature) > 0: + if candidates is None: + candidates = we_have_the_feature + candidate_set = set(candidates) + else: + # Eliminate any candidates that don't have this feature. + candidate_set = candidate_set.intersection(set(we_have_the_feature)) + + # The only valid candidates are the ones in candidate_set. + # Go through the original list of candidates and pick the first one + # that's in candidate_set. + if candidate_set is None or candidates is None: + return None + for candidate in candidates: + if candidate in candidate_set: + return candidate + return None + + +#: The `BeautifulSoup` constructor will take a list of features +#: and use it to look up `TreeBuilder` classes in this registry. +builder_registry: TreeBuilderRegistry = TreeBuilderRegistry() + + +class TreeBuilder(object): + """Turn a textual document into a Beautiful Soup object tree. + + This is an abstract superclass which smooths out the behavior of + different parser libraries into a single, unified interface. + + :param multi_valued_attributes: If this is set to None, the + TreeBuilder will not turn any values for attributes like + 'class' into lists. Setting this to a dictionary will + customize this behavior; look at :py:attr:`bs4.builder.HTMLTreeBuilder.DEFAULT_CDATA_LIST_ATTRIBUTES` + for an example. + + Internally, these are called "CDATA list attributes", but that + probably doesn't make sense to an end-user, so the argument name + is ``multi_valued_attributes``. + + :param preserve_whitespace_tags: A set of tags to treat + the way <pre> tags are treated in HTML. Tags in this set + are immune from pretty-printing; their contents will always be + output as-is. + + :param string_containers: A dictionary mapping tag names to + the classes that should be instantiated to contain the textual + contents of those tags. The default is to use NavigableString + for every tag, no matter what the name. You can override the + default by changing :py:attr:`DEFAULT_STRING_CONTAINERS`. + + :param store_line_numbers: If the parser keeps track of the line + numbers and positions of the original markup, that information + will, by default, be stored in each corresponding + :py:class:`bs4.element.Tag` object. You can turn this off by + passing store_line_numbers=False; then Tag.sourcepos and + Tag.sourceline will always be None. If the parser you're using + doesn't keep track of this information, then store_line_numbers + is irrelevant. + + :param attribute_dict_class: The value of a multi-valued attribute + (such as HTML's 'class') willl be stored in an instance of this + class. The default is Beautiful Soup's built-in + `AttributeValueList`, which is a normal Python list, and you + will probably never need to change it. + """ + + USE_DEFAULT: Any = object() #: :meta private: + + def __init__( + self, + multi_valued_attributes: Dict[str, Set[str]] = USE_DEFAULT, + preserve_whitespace_tags: Set[str] = USE_DEFAULT, + store_line_numbers: bool = USE_DEFAULT, + string_containers: Dict[str, Type[NavigableString]] = USE_DEFAULT, + empty_element_tags: Set[str] = USE_DEFAULT, + attribute_dict_class: Type[AttributeDict] = AttributeDict, + attribute_value_list_class: Type[AttributeValueList] = AttributeValueList, + ): + self.soup = None + if multi_valued_attributes is self.USE_DEFAULT: + multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES + self.cdata_list_attributes = multi_valued_attributes + if preserve_whitespace_tags is self.USE_DEFAULT: + preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS + self.preserve_whitespace_tags = preserve_whitespace_tags + if empty_element_tags is self.USE_DEFAULT: + self.empty_element_tags = self.DEFAULT_EMPTY_ELEMENT_TAGS + else: + self.empty_element_tags = empty_element_tags + # TODO: store_line_numbers is probably irrelevant now that + # the behavior of sourceline and sourcepos has been made consistent + # everywhere. + if store_line_numbers == self.USE_DEFAULT: + store_line_numbers = self.TRACKS_LINE_NUMBERS + self.store_line_numbers = store_line_numbers + if string_containers == self.USE_DEFAULT: + string_containers = self.DEFAULT_STRING_CONTAINERS + self.string_containers = string_containers + self.attribute_dict_class = attribute_dict_class + self.attribute_value_list_class = attribute_value_list_class + + NAME: str = "[Unknown tree builder]" + ALTERNATE_NAMES: Iterable[str] = [] + features: Iterable[str] = [] + + is_xml: bool = False + picklable: bool = False + + soup: Optional[BeautifulSoup] #: :meta private: + + #: A tag will be considered an empty-element + #: tag when and only when it has no contents. + empty_element_tags: Optional[Set[str]] = None #: :meta private: + cdata_list_attributes: Dict[str, Set[str]] #: :meta private: + preserve_whitespace_tags: Set[str] #: :meta private: + string_containers: Dict[str, Type[NavigableString]] #: :meta private: + tracks_line_numbers: bool #: :meta private: + + #: A value for these tag/attribute combinations is a space- or + #: comma-separated list of CDATA, rather than a single CDATA. + DEFAULT_CDATA_LIST_ATTRIBUTES: Dict[str, Set[str]] = defaultdict(set) + + #: Whitespace should be preserved inside these tags. + DEFAULT_PRESERVE_WHITESPACE_TAGS: Set[str] = set() + + #: The textual contents of tags with these names should be + #: instantiated with some class other than `bs4.element.NavigableString`. + DEFAULT_STRING_CONTAINERS: Dict[str, Type[bs4.element.NavigableString]] = {} + + #: By default, tags are treated as empty-element tags if they have + #: no contents--that is, using XML rules. HTMLTreeBuilder + #: defines a different set of DEFAULT_EMPTY_ELEMENT_TAGS based on the + #: HTML 4 and HTML5 standards. + DEFAULT_EMPTY_ELEMENT_TAGS: Optional[Set[str]] = None + + #: Most parsers don't keep track of line numbers. + TRACKS_LINE_NUMBERS: bool = False + + def initialize_soup(self, soup: BeautifulSoup) -> None: + """The BeautifulSoup object has been initialized and is now + being associated with the TreeBuilder. + + :param soup: A BeautifulSoup object. + """ + self.soup = soup + + def reset(self) -> None: + """Do any work necessary to reset the underlying parser + for a new document. + + By default, this does nothing. + """ + pass + + def can_be_empty_element(self, tag_name: str) -> bool: + """Might a tag with this name be an empty-element tag? + + The final markup may or may not actually present this tag as + self-closing. + + For instance: an HTMLBuilder does not consider a <p> tag to be + an empty-element tag (it's not in + HTMLBuilder.empty_element_tags). This means an empty <p> tag + will be presented as "<p></p>", not "<p/>" or "<p>". + + The default implementation has no opinion about which tags are + empty-element tags, so a tag will be presented as an + empty-element tag if and only if it has no children. + "<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will + be left alone. + + :param tag_name: The name of a markup tag. + """ + if self.empty_element_tags is None: + return True + return tag_name in self.empty_element_tags + + def feed(self, markup: _RawMarkup) -> None: + """Run incoming markup through some parsing process.""" + raise NotImplementedError() + + def prepare_markup( + self, + markup: _RawMarkup, + user_specified_encoding: Optional[_Encoding] = None, + document_declared_encoding: Optional[_Encoding] = None, + exclude_encodings: Optional[_Encodings] = None, + ) -> Iterable[Tuple[_RawMarkup, Optional[_Encoding], Optional[_Encoding], bool]]: + """Run any preliminary steps necessary to make incoming markup + acceptable to the parser. + + :param markup: The markup that's about to be parsed. + :param user_specified_encoding: The user asked to try this encoding + to convert the markup into a Unicode string. + :param document_declared_encoding: The markup itself claims to be + in this encoding. NOTE: This argument is not used by the + calling code and can probably be removed. + :param exclude_encodings: The user asked *not* to try any of + these encodings. + + :yield: A series of 4-tuples: (markup, encoding, declared encoding, + has undergone character replacement) + + Each 4-tuple represents a strategy that the parser can try + to convert the document to Unicode and parse it. Each + strategy will be tried in turn. + + By default, the only strategy is to parse the markup + as-is. See `LXMLTreeBuilderForXML` and + `HTMLParserTreeBuilder` for implementations that take into + account the quirks of particular parsers. + + :meta private: + + """ + yield markup, None, None, False + + def test_fragment_to_document(self, fragment: str) -> str: + """Wrap an HTML fragment to make it look like a document. + + Different parsers do this differently. For instance, lxml + introduces an empty <head> tag, and html5lib + doesn't. Abstracting this away lets us write simple tests + which run HTML fragments through the parser and compare the + results against other HTML fragments. + + This method should not be used outside of unit tests. + + :param fragment: A fragment of HTML. + :return: A full HTML document. + :meta private: + """ + return fragment + + def set_up_substitutions(self, tag: Tag) -> bool: + """Set up any substitutions that will need to be performed on + a `Tag` when it's output as a string. + + By default, this does nothing. See `HTMLTreeBuilder` for a + case where this is used. + + :return: Whether or not a substitution was performed. + :meta private: + """ + return False + + def _replace_cdata_list_attribute_values( + self, tag_name: str, attrs: _RawOrProcessedAttributeValues + ) -> _AttributeValues: + """When an attribute value is associated with a tag that can + have multiple values for that attribute, convert the string + value to a list of strings. + + Basically, replaces class="foo bar" with class=["foo", "bar"] + + NOTE: This method modifies its input in place. + + :param tag_name: The name of a tag. + :param attrs: A dictionary containing the tag's attributes. + Any appropriate attribute values will be modified in place. + :return: The modified dictionary that was originally passed in. + """ + + # First, cast the attrs dict to _AttributeValues. This might + # not be accurate yet, but it will be by the time this method + # returns. + modified_attrs = cast(_AttributeValues, attrs) + if not modified_attrs or not self.cdata_list_attributes: + # Nothing to do. + return modified_attrs + + # There is at least a possibility that we need to modify one of + # the attribute values. + universal: Set[str] = self.cdata_list_attributes.get("*", set()) + tag_specific = self.cdata_list_attributes.get(tag_name.lower(), None) + for attr in list(modified_attrs.keys()): + modified_value: _AttributeValue + if attr in universal or (tag_specific and attr in tag_specific): + # We have a "class"-type attribute whose string + # value is a whitespace-separated list of + # values. Split it into a list. + original_value: _AttributeValue = modified_attrs[attr] + if isinstance(original_value, _RawAttributeValue): + # This is a _RawAttributeValue (a string) that + # needs to be split and converted to a + # AttributeValueList so it can be an + # _AttributeValue. + modified_value = self.attribute_value_list_class( + nonwhitespace_re.findall(original_value) + ) + else: + # html5lib calls setAttributes twice for the + # same tag when rearranging the parse tree. On + # the second call the attribute value here is + # already a list. This can also happen when a + # Tag object is cloned. If this happens, leave + # the value alone rather than trying to split + # it again. + modified_value = original_value + modified_attrs[attr] = modified_value + return modified_attrs + + +class SAXTreeBuilder(TreeBuilder): + """A Beautiful Soup treebuilder that listens for SAX events. + + This is not currently used for anything, and it will be removed + soon. It was a good idea, but it wasn't properly integrated into the + rest of Beautiful Soup, so there have been long stretches where it + hasn't worked properly. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + warnings.warn( + "The SAXTreeBuilder class was deprecated in 4.13.0 and will be removed soon thereafter. It is completely untested and probably doesn't work; do not use it.", + DeprecationWarning, + stacklevel=2, + ) + super(SAXTreeBuilder, self).__init__(*args, **kwargs) + + def feed(self, markup: _RawMarkup) -> None: + raise NotImplementedError() + + def close(self) -> None: + pass + + def startElement(self, name: str, attrs: Dict[str, str]) -> None: + attrs = AttributeDict((key[1], value) for key, value in list(attrs.items())) + # print("Start %s, %r" % (name, attrs)) + assert self.soup is not None + self.soup.handle_starttag(name, None, None, attrs) + + def endElement(self, name: str) -> None: + # print("End %s" % name) + assert self.soup is not None + self.soup.handle_endtag(name) + + def startElementNS( + self, nsTuple: Tuple[str, str], nodeName: str, attrs: Dict[str, str] + ) -> None: + # Throw away (ns, nodeName) for now. + self.startElement(nodeName, attrs) + + def endElementNS(self, nsTuple: Tuple[str, str], nodeName: str) -> None: + # Throw away (ns, nodeName) for now. + self.endElement(nodeName) + # handler.endElementNS((ns, node.nodeName), node.nodeName) + + def startPrefixMapping(self, prefix: str, nodeValue: str) -> None: + # Ignore the prefix for now. + pass + + def endPrefixMapping(self, prefix: str) -> None: + # Ignore the prefix for now. + # handler.endPrefixMapping(prefix) + pass + + def characters(self, content: str) -> None: + assert self.soup is not None + self.soup.handle_data(content) + + def startDocument(self) -> None: + pass + + def endDocument(self) -> None: + pass + + +class HTMLTreeBuilder(TreeBuilder): + """This TreeBuilder knows facts about HTML, such as which tags are treated + specially by the HTML standard. + """ + + #: Some HTML tags are defined as having no contents. Beautiful Soup + #: treats these specially. + DEFAULT_EMPTY_ELEMENT_TAGS: Set[str] = set( + [ + # These are from HTML5. + "area", + "base", + "br", + "col", + "embed", + "hr", + "img", + "input", + "keygen", + "link", + "menuitem", + "meta", + "param", + "source", + "track", + "wbr", + # These are from earlier versions of HTML and are removed in HTML5. + "basefont", + "bgsound", + "command", + "frame", + "image", + "isindex", + "nextid", + "spacer", + ] + ) + + #: The HTML standard defines these tags as block-level elements. Beautiful + #: Soup does not treat these elements differently from other elements, + #: but it may do so eventually, and this information is available if + #: you need to use it. + DEFAULT_BLOCK_ELEMENTS: Set[str] = set( + [ + "address", + "article", + "aside", + "blockquote", + "canvas", + "dd", + "div", + "dl", + "dt", + "fieldset", + "figcaption", + "figure", + "footer", + "form", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "header", + "hr", + "li", + "main", + "nav", + "noscript", + "ol", + "output", + "p", + "pre", + "section", + "table", + "tfoot", + "ul", + "video", + ] + ) + + #: These HTML tags need special treatment so they can be + #: represented by a string class other than `bs4.element.NavigableString`. + #: + #: For some of these tags, it's because the HTML standard defines + #: an unusual content model for them. I made this list by going + #: through the HTML spec + #: (https://html.spec.whatwg.org/#metadata-content) and looking for + #: "metadata content" elements that can contain strings. + #: + #: The Ruby tags (<rt> and <rp>) are here despite being normal + #: "phrasing content" tags, because the content they contain is + #: qualitatively different from other text in the document, and it + #: can be useful to be able to distinguish it. + #: + #: TODO: Arguably <noscript> could go here but it seems + #: qualitatively different from the other tags. + DEFAULT_STRING_CONTAINERS: Dict[str, Type[bs4.element.NavigableString]] = { + "rt": RubyTextString, + "rp": RubyParenthesisString, + "style": Stylesheet, + "script": Script, + "template": TemplateString, + } + + #: The HTML standard defines these attributes as containing a + #: space-separated list of values, not a single value. That is, + #: class="foo bar" means that the 'class' attribute has two values, + #: 'foo' and 'bar', not the single value 'foo bar'. When we + #: encounter one of these attributes, we will parse its value into + #: a list of values if possible. Upon output, the list will be + #: converted back into a string. + DEFAULT_CDATA_LIST_ATTRIBUTES: Dict[str, Set[str]] = { + "*": {"class", "accesskey", "dropzone"}, + "a": {"rel", "rev"}, + "link": {"rel", "rev"}, + "td": {"headers"}, + "th": {"headers"}, + "form": {"accept-charset"}, + "object": {"archive"}, + # These are HTML5 specific, as are *.accesskey and *.dropzone above. + "area": {"rel"}, + "icon": {"sizes"}, + "iframe": {"sandbox"}, + "output": {"for"}, + } + + #: By default, whitespace inside these HTML tags will be + #: preserved rather than being collapsed. + DEFAULT_PRESERVE_WHITESPACE_TAGS: set[str] = set(["pre", "textarea"]) + + def set_up_substitutions(self, tag: Tag) -> bool: + """Replace the declared encoding in a <meta> tag with a placeholder, + to be substituted when the tag is output to a string. + + An HTML document may come in to Beautiful Soup as one + encoding, but exit in a different encoding, and the <meta> tag + needs to be changed to reflect this. + + :return: Whether or not a substitution was performed. + + :meta private: + """ + # We are only interested in <meta> tags + if tag.name != "meta": + return False + + # TODO: This cast will fail in the (very unlikely) scenario + # that the programmer who instantiates the TreeBuilder + # specifies meta['content'] or meta['charset'] as + # cdata_list_attributes. + content: Optional[str] = cast(Optional[str], tag.get("content")) + charset: Optional[str] = cast(Optional[str], tag.get("charset")) + + # But we can accommodate meta['http-equiv'] being made a + # cdata_list_attribute (again, very unlikely) without much + # trouble. + http_equiv: List[str] = tag.get_attribute_list("http-equiv") + + # We are interested in <meta> tags that say what encoding the + # document was originally in. This means HTML 5-style <meta> + # tags that provide the "charset" attribute. It also means + # HTML 4-style <meta> tags that provide the "content" + # attribute and have "http-equiv" set to "content-type". + # + # In both cases we will replace the value of the appropriate + # attribute with a standin object that can take on any + # encoding. + substituted = False + if charset is not None: + # HTML 5 style: + # <meta charset="utf8"> + tag["charset"] = CharsetMetaAttributeValue(charset) + substituted = True + + elif content is not None and any( + x.lower() == "content-type" for x in http_equiv + ): + # HTML 4 style: + # <meta http-equiv="content-type" content="text/html; charset=utf8"> + tag["content"] = ContentMetaAttributeValue(content) + substituted = True + + return substituted + + +class DetectsXMLParsedAsHTML(object): + """A mixin class for any class (a TreeBuilder, or some class used by a + TreeBuilder) that's in a position to detect whether an XML + document is being incorrectly parsed as HTML, and issue an + appropriate warning. + + This requires being able to observe an incoming processing + instruction that might be an XML declaration, and also able to + observe tags as they're opened. If you can't do that for a given + `TreeBuilder`, there's a less reliable implementation based on + examining the raw markup. + """ + + #: Regular expression for seeing if string markup has an <html> tag. + LOOKS_LIKE_HTML: Pattern[str] = re.compile("<[^ +]html", re.I) + + #: Regular expression for seeing if byte markup has an <html> tag. + LOOKS_LIKE_HTML_B: Pattern[bytes] = re.compile(b"<[^ +]html", re.I) + + #: The start of an XML document string. + XML_PREFIX: str = "<?xml" + + #: The start of an XML document bytestring. + XML_PREFIX_B: bytes = b"<?xml" + + # This is typed as str, not `ProcessingInstruction`, because this + # check may be run before any Beautiful Soup objects are created. + _first_processing_instruction: Optional[str] #: :meta private: + _root_tag_name: Optional[str] #: :meta private: + + @classmethod + def warn_if_markup_looks_like_xml( + cls, markup: Optional[_RawMarkup], stacklevel: int = 3 + ) -> bool: + """Perform a check on some markup to see if it looks like XML + that's not XHTML. If so, issue a warning. + + This is much less reliable than doing the check while parsing, + but some of the tree builders can't do that. + + :param stacklevel: The stacklevel of the code calling this\ + function. + + :return: True if the markup looks like non-XHTML XML, False + otherwise. + """ + if markup is None: + return False + markup = markup[:500] + if isinstance(markup, bytes): + markup_b: bytes = markup + looks_like_xml = markup_b.startswith( + cls.XML_PREFIX_B + ) and not cls.LOOKS_LIKE_HTML_B.search(markup) + else: + markup_s: str = markup + looks_like_xml = markup_s.startswith( + cls.XML_PREFIX + ) and not cls.LOOKS_LIKE_HTML.search(markup) + + if looks_like_xml: + cls._warn(stacklevel=stacklevel + 2) + return True + return False + + @classmethod + def _warn(cls, stacklevel: int = 5) -> None: + """Issue a warning about XML being parsed as HTML.""" + warnings.warn( + XMLParsedAsHTMLWarning.MESSAGE, + XMLParsedAsHTMLWarning, + stacklevel=stacklevel, + ) + + def _initialize_xml_detector(self) -> None: + """Call this method before parsing a document.""" + self._first_processing_instruction = None + self._root_tag_name = None + + def _document_might_be_xml(self, processing_instruction: str) -> None: + """Call this method when encountering an XML declaration, or a + "processing instruction" that might be an XML declaration. + + This helps Beautiful Soup detect potential issues later, if + the XML document turns out to be a non-XHTML document that's + being parsed as XML. + """ + if ( + self._first_processing_instruction is not None + or self._root_tag_name is not None + ): + # The document has already started. Don't bother checking + # anymore. + return + + self._first_processing_instruction = processing_instruction + + # We won't know until we encounter the first tag whether or + # not this is actually a problem. + + def _root_tag_encountered(self, name: str) -> None: + """Call this when you encounter the document's root tag. + + This is where we actually check whether an XML document is + being incorrectly parsed as HTML, and issue the warning. + """ + if self._root_tag_name is not None: + # This method was incorrectly called multiple times. Do + # nothing. + return + + self._root_tag_name = name + + if ( + name != "html" + and self._first_processing_instruction is not None + and self._first_processing_instruction.lower().startswith("xml ") + ): + # We encountered an XML declaration and then a tag other + # than 'html'. This is a reliable indicator that a + # non-XHTML document is being parsed as XML. + self._warn(stacklevel=10) + + +def register_treebuilders_from(module: ModuleType) -> None: + """Copy TreeBuilders from the given module into this module.""" + this_module = sys.modules[__name__] + for name in module.__all__: + obj = getattr(module, name) + + if issubclass(obj, TreeBuilder): + setattr(this_module, name, obj) + this_module.__all__.append(name) + # Register the builder while we're at it. + this_module.builder_registry.register(obj) + + +# Builders are registered in reverse order of priority, so that custom +# builder registrations will take precedence. In general, we want lxml +# to take precedence over html5lib, because it's faster. And we only +# want to use HTMLParser as a last resort. +from . import _htmlparser # noqa: E402 + +register_treebuilders_from(_htmlparser) +try: + from . import _html5lib + + register_treebuilders_from(_html5lib) +except ImportError: + # They don't have html5lib installed. + pass +try: + from . import _lxml + + register_treebuilders_from(_lxml) +except ImportError: + # They don't have lxml installed. + pass diff --git a/.venv/lib/python3.12/site-packages/bs4/builder/_html5lib.py b/.venv/lib/python3.12/site-packages/bs4/builder/_html5lib.py new file mode 100644 index 00000000..c13439d0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/builder/_html5lib.py @@ -0,0 +1,594 @@ +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +__all__ = [ + "HTML5TreeBuilder", +] + +from typing import ( + Any, + cast, + Dict, + Iterable, + Optional, + Sequence, + TYPE_CHECKING, + Tuple, + Union, +) +from typing_extensions import TypeAlias +from bs4._typing import ( + _AttributeValue, + _AttributeValues, + _Encoding, + _Encodings, + _NamespaceURL, + _RawMarkup, +) + +import warnings +from bs4.builder import ( + DetectsXMLParsedAsHTML, + PERMISSIVE, + HTML, + HTML_5, + HTMLTreeBuilder, +) +from bs4.element import ( + NamespacedAttribute, + PageElement, + nonwhitespace_re, +) +import html5lib +from html5lib.constants import ( + namespaces, +) +from bs4.element import ( + Comment, + Doctype, + NavigableString, + Tag, +) + +if TYPE_CHECKING: + from bs4 import BeautifulSoup + +from html5lib.treebuilders import base as treebuilder_base + + +class HTML5TreeBuilder(HTMLTreeBuilder): + """Use `html5lib <https://github.com/html5lib/html5lib-python>`_ to + build a tree. + + Note that `HTML5TreeBuilder` does not support some common HTML + `TreeBuilder` features. Some of these features could theoretically + be implemented, but at the very least it's quite difficult, + because html5lib moves the parse tree around as it's being built. + + Specifically: + + * This `TreeBuilder` doesn't use different subclasses of + `NavigableString` (e.g. `Script`) based on the name of the tag + in which the string was found. + * You can't use a `SoupStrainer` to parse only part of a document. + """ + + NAME: str = "html5lib" + + features: Sequence[str] = [NAME, PERMISSIVE, HTML_5, HTML] + + #: html5lib can tell us which line number and position in the + #: original file is the source of an element. + TRACKS_LINE_NUMBERS: bool = True + + underlying_builder: "TreeBuilderForHtml5lib" #: :meta private: + user_specified_encoding: Optional[_Encoding] + + def prepare_markup( + self, + markup: _RawMarkup, + user_specified_encoding: Optional[_Encoding] = None, + document_declared_encoding: Optional[_Encoding] = None, + exclude_encodings: Optional[_Encodings] = None, + ) -> Iterable[Tuple[_RawMarkup, Optional[_Encoding], Optional[_Encoding], bool]]: + # Store the user-specified encoding for use later on. + self.user_specified_encoding = user_specified_encoding + + # document_declared_encoding and exclude_encodings aren't used + # ATM because the html5lib TreeBuilder doesn't use + # UnicodeDammit. + for variable, name in ( + (document_declared_encoding, "document_declared_encoding"), + (exclude_encodings, "exclude_encodings"), + ): + if variable: + warnings.warn( + f"You provided a value for {name}, but the html5lib tree builder doesn't support {name}.", + stacklevel=3, + ) + + # html5lib only parses HTML, so if it's given XML that's worth + # noting. + DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup, stacklevel=3) + + yield (markup, None, None, False) + + # These methods are defined by Beautiful Soup. + def feed(self, markup: _RawMarkup) -> None: + """Run some incoming markup through some parsing process, + populating the `BeautifulSoup` object in `HTML5TreeBuilder.soup`. + """ + if self.soup is not None and self.soup.parse_only is not None: + warnings.warn( + "You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.", + stacklevel=4, + ) + + # self.underlying_builder is probably None now, but it'll be set + # when html5lib calls self.create_treebuilder(). + parser = html5lib.HTMLParser(tree=self.create_treebuilder) + assert self.underlying_builder is not None + self.underlying_builder.parser = parser + extra_kwargs = dict() + if not isinstance(markup, str): + # kwargs, specifically override_encoding, will eventually + # be passed in to html5lib's + # HTMLBinaryInputStream.__init__. + extra_kwargs["override_encoding"] = self.user_specified_encoding + + doc = parser.parse(markup, **extra_kwargs) + + # Set the character encoding detected by the tokenizer. + if isinstance(markup, str): + # We need to special-case this because html5lib sets + # charEncoding to UTF-8 if it gets Unicode input. + doc.original_encoding = None + else: + original_encoding = parser.tokenizer.stream.charEncoding[0] + # The encoding is an html5lib Encoding object. We want to + # use a string for compatibility with other tree builders. + original_encoding = original_encoding.name + doc.original_encoding = original_encoding + self.underlying_builder.parser = None + + def create_treebuilder( + self, namespaceHTMLElements: bool + ) -> "TreeBuilderForHtml5lib": + """Called by html5lib to instantiate the kind of class it + calls a 'TreeBuilder'. + + :param namespaceHTMLElements: Whether or not to namespace HTML elements. + + :meta private: + """ + self.underlying_builder = TreeBuilderForHtml5lib( + namespaceHTMLElements, self.soup, store_line_numbers=self.store_line_numbers + ) + return self.underlying_builder + + def test_fragment_to_document(self, fragment: str) -> str: + """See `TreeBuilder`.""" + return "<html><head></head><body>%s</body></html>" % fragment + + +class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder): + soup: "BeautifulSoup" #: :meta private: + parser: Optional[html5lib.HTMLParser] #: :meta private: + + def __init__( + self, + namespaceHTMLElements: bool, + soup: Optional["BeautifulSoup"] = None, + store_line_numbers: bool = True, + **kwargs: Any, + ): + if soup: + self.soup = soup + else: + warnings.warn( + "The optionality of the 'soup' argument to the TreeBuilderForHtml5lib constructor is deprecated as of Beautiful Soup 4.13.0: 'soup' is now required. If you can't pass in a BeautifulSoup object here, or you get this warning and it seems mysterious to you, please contact the Beautiful Soup developer team for possible un-deprecation.", + DeprecationWarning, + stacklevel=2, + ) + from bs4 import BeautifulSoup + + # TODO: Why is the parser 'html.parser' here? Using + # html5lib doesn't cause an infinite loop and is more + # accurate. Best to get rid of this entire section, I think. + self.soup = BeautifulSoup( + "", "html.parser", store_line_numbers=store_line_numbers, **kwargs + ) + # TODO: What are **kwargs exactly? Should they be passed in + # here in addition to/instead of being passed to the BeautifulSoup + # constructor? + super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements) + + # This will be set later to a real html5lib HTMLParser object, + # which we can use to track the current line number. + self.parser = None + self.store_line_numbers = store_line_numbers + + def documentClass(self) -> "Element": + self.soup.reset() + return Element(self.soup, self.soup, None) + + def insertDoctype(self, token: Dict[str, Any]) -> None: + name: str = cast(str, token["name"]) + publicId: Optional[str] = cast(Optional[str], token["publicId"]) + systemId: Optional[str] = cast(Optional[str], token["systemId"]) + + doctype = Doctype.for_name_and_ids(name, publicId, systemId) + self.soup.object_was_parsed(doctype) + + def elementClass(self, name: str, namespace: str) -> "Element": + sourceline: Optional[int] = None + sourcepos: Optional[int] = None + if self.parser is not None and self.store_line_numbers: + # This represents the point immediately after the end of the + # tag. We don't know when the tag started, but we do know + # where it ended -- the character just before this one. + sourceline, sourcepos = self.parser.tokenizer.stream.position() + assert sourcepos is not None + sourcepos = sourcepos - 1 + tag = self.soup.new_tag( + name, namespace, sourceline=sourceline, sourcepos=sourcepos + ) + + return Element(tag, self.soup, namespace) + + def commentClass(self, data: str) -> "TextNode": + return TextNode(Comment(data), self.soup) + + def fragmentClass(self) -> "Element": + """This is only used by html5lib HTMLParser.parseFragment(), + which is never used by Beautiful Soup, only by the html5lib + unit tests. Since we don't currently hook into those tests, + the implementation is left blank. + """ + raise NotImplementedError() + + def getFragment(self) -> "Element": + """This is only used by the html5lib unit tests. Since we + don't currently hook into those tests, the implementation is + left blank. + """ + raise NotImplementedError() + + def appendChild(self, node: "Element") -> None: + # TODO: This code is not covered by the BS4 tests, and + # apparently not triggered by the html5lib test suite either. + # But it doesn't seem test-specific and there are calls to it + # (or a method with the same name) all over html5lib, so I'm + # leaving the implementation in place rather than replacing it + # with NotImplementedError() + self.soup.append(node.element) + + def getDocument(self) -> "BeautifulSoup": + return self.soup + + def testSerializer(self, element: "Element") -> str: + """This is only used by the html5lib unit tests. Since we + don't currently hook into those tests, the implementation is + left blank. + """ + raise NotImplementedError() + + +class AttrList(object): + """Represents a Tag's attributes in a way compatible with html5lib.""" + + element: Tag + attrs: _AttributeValues + + def __init__(self, element: Tag): + self.element = element + self.attrs = dict(self.element.attrs) + + def __iter__(self) -> Iterable[Tuple[str, _AttributeValue]]: + return list(self.attrs.items()).__iter__() + + def __setitem__(self, name: str, value: _AttributeValue) -> None: + # If this attribute is a multi-valued attribute for this element, + # turn its value into a list. + list_attr = self.element.cdata_list_attributes or {} + if name in list_attr.get("*", []) or ( + self.element.name in list_attr + and name in list_attr.get(self.element.name, []) + ): + # A node that is being cloned may have already undergone + # this procedure. Check for this and skip it. + if not isinstance(value, list): + assert isinstance(value, str) + value = self.element.attribute_value_list_class( + nonwhitespace_re.findall(value) + ) + self.element[name] = value + + def items(self) -> Iterable[Tuple[str, _AttributeValue]]: + return list(self.attrs.items()) + + def keys(self) -> Iterable[str]: + return list(self.attrs.keys()) + + def __len__(self) -> int: + return len(self.attrs) + + def __getitem__(self, name: str) -> _AttributeValue: + return self.attrs[name] + + def __contains__(self, name: str) -> bool: + return name in list(self.attrs.keys()) + + +class BeautifulSoupNode(treebuilder_base.Node): + element: PageElement + soup: "BeautifulSoup" + namespace: Optional[_NamespaceURL] + + @property + def nodeType(self) -> int: + """Return the html5lib constant corresponding to the type of + the underlying DOM object. + + NOTE: This property is only accessed by the html5lib test + suite, not by Beautiful Soup proper. + """ + raise NotImplementedError() + + # TODO-TYPING: typeshed stubs are incorrect about this; + # cloneNode returns a new Node, not None. + def cloneNode(self) -> treebuilder_base.Node: + raise NotImplementedError() + + +class Element(BeautifulSoupNode): + element: Tag + namespace: Optional[_NamespaceURL] + + def __init__( + self, element: Tag, soup: "BeautifulSoup", namespace: Optional[_NamespaceURL] + ): + treebuilder_base.Node.__init__(self, element.name) + self.element = element + self.soup = soup + self.namespace = namespace + + def appendChild(self, node: "BeautifulSoupNode") -> None: + string_child: Optional[NavigableString] = None + child: PageElement + if type(node.element) is NavigableString: + string_child = child = node.element + else: + child = node.element + node.parent = self + + if ( + child is not None + and child.parent is not None + and not isinstance(child, str) + ): + node.element.extract() + + if ( + string_child is not None + and self.element.contents + and type(self.element.contents[-1]) is NavigableString + ): + # We are appending a string onto another string. + # TODO This has O(n^2) performance, for input like + # "a</a>a</a>a</a>..." + old_element = self.element.contents[-1] + new_element = self.soup.new_string(old_element + string_child) + old_element.replace_with(new_element) + self.soup._most_recent_element = new_element + else: + if isinstance(node, str): + # Create a brand new NavigableString from this string. + child = self.soup.new_string(node) + + # Tell Beautiful Soup to act as if it parsed this element + # immediately after the parent's last descendant. (Or + # immediately after the parent, if it has no children.) + if self.element.contents: + most_recent_element = self.element._last_descendant(False) + elif self.element.next_element is not None: + # Something from further ahead in the parse tree is + # being inserted into this earlier element. This is + # very annoying because it means an expensive search + # for the last element in the tree. + most_recent_element = self.soup._last_descendant() + else: + most_recent_element = self.element + + self.soup.object_was_parsed( + child, parent=self.element, most_recent_element=most_recent_element + ) + + def getAttributes(self) -> AttrList: + if isinstance(self.element, Comment): + return {} + return AttrList(self.element) + + # An HTML5lib attribute name may either be a single string, + # or a tuple (namespace, name). + _Html5libAttributeName: TypeAlias = Union[str, Tuple[str, str]] + # Now we can define the type this method accepts as a dictionary + # mapping those attribute names to single string values. + _Html5libAttributes: TypeAlias = Dict[_Html5libAttributeName, str] + + def setAttributes(self, attributes: Optional[_Html5libAttributes]) -> None: + if attributes is not None and len(attributes) > 0: + # Replace any namespaced attributes with + # NamespacedAttribute objects. + for name, value in list(attributes.items()): + if isinstance(name, tuple): + new_name = NamespacedAttribute(*name) + del attributes[name] + attributes[new_name] = value + + # We can now cast attributes to the type of Dict + # used by Beautiful Soup. + normalized_attributes = cast(_AttributeValues, attributes) + + # Values for tags like 'class' came in as single strings; + # replace them with lists of strings as appropriate. + self.soup.builder._replace_cdata_list_attribute_values( + self.name, normalized_attributes + ) + + # Then set the attributes on the Tag associated with this + # BeautifulSoupNode. + for name, value_or_values in list(normalized_attributes.items()): + self.element[name] = value_or_values + + # The attributes may contain variables that need substitution. + # Call set_up_substitutions manually. + # + # The Tag constructor called this method when the Tag was created, + # but we just set/changed the attributes, so call it again. + self.soup.builder.set_up_substitutions(self.element) + + attributes = property(getAttributes, setAttributes) + + def insertText( + self, data: str, insertBefore: Optional["BeautifulSoupNode"] = None + ) -> None: + text = TextNode(self.soup.new_string(data), self.soup) + if insertBefore: + self.insertBefore(text, insertBefore) + else: + self.appendChild(text) + + def insertBefore( + self, node: "BeautifulSoupNode", refNode: "BeautifulSoupNode" + ) -> None: + index = self.element.index(refNode.element) + if ( + type(node.element) is NavigableString + and self.element.contents + and type(self.element.contents[index - 1]) is NavigableString + ): + # (See comments in appendChild) + old_node = self.element.contents[index - 1] + assert type(old_node) is NavigableString + new_str = self.soup.new_string(old_node + node.element) + old_node.replace_with(new_str) + else: + self.element.insert(index, node.element) + node.parent = self + + def removeChild(self, node: "Element") -> None: + node.element.extract() + + def reparentChildren(self, new_parent: "Element") -> None: + """Move all of this tag's children into another tag.""" + # print("MOVE", self.element.contents) + # print("FROM", self.element) + # print("TO", new_parent.element) + + element = self.element + new_parent_element = new_parent.element + # Determine what this tag's next_element will be once all the children + # are removed. + final_next_element = element.next_sibling + + new_parents_last_descendant = new_parent_element._last_descendant(False, False) + if len(new_parent_element.contents) > 0: + # The new parent already contains children. We will be + # appending this tag's children to the end. + + # We can make this assertion since we know new_parent has + # children. + assert new_parents_last_descendant is not None + new_parents_last_child = new_parent_element.contents[-1] + new_parents_last_descendant_next_element = ( + new_parents_last_descendant.next_element + ) + else: + # The new parent contains no children. + new_parents_last_child = None + new_parents_last_descendant_next_element = new_parent_element.next_element + + to_append = element.contents + if len(to_append) > 0: + # Set the first child's previous_element and previous_sibling + # to elements within the new parent + first_child = to_append[0] + if new_parents_last_descendant is not None: + first_child.previous_element = new_parents_last_descendant + else: + first_child.previous_element = new_parent_element + first_child.previous_sibling = new_parents_last_child + if new_parents_last_descendant is not None: + new_parents_last_descendant.next_element = first_child + else: + new_parent_element.next_element = first_child + if new_parents_last_child is not None: + new_parents_last_child.next_sibling = first_child + + # Find the very last element being moved. It is now the + # parent's last descendant. It has no .next_sibling and + # its .next_element is whatever the previous last + # descendant had. + last_childs_last_descendant = to_append[-1]._last_descendant( + is_initialized=False, accept_self=True + ) + + # Since we passed accept_self=True into _last_descendant, + # there's no possibility that the result is None. + assert last_childs_last_descendant is not None + last_childs_last_descendant.next_element = ( + new_parents_last_descendant_next_element + ) + if new_parents_last_descendant_next_element is not None: + # TODO-COVERAGE: This code has no test coverage and + # I'm not sure how to get html5lib to go through this + # path, but it's just the other side of the previous + # line. + new_parents_last_descendant_next_element.previous_element = ( + last_childs_last_descendant + ) + last_childs_last_descendant.next_sibling = None + + for child in to_append: + child.parent = new_parent_element + new_parent_element.contents.append(child) + + # Now that this element has no children, change its .next_element. + element.contents = [] + element.next_element = final_next_element + + # print("DONE WITH MOVE") + # print("FROM", self.element) + # print("TO", new_parent_element) + + # TODO-TYPING: typeshed stubs are incorrect about this; + # hasContent returns a boolean, not None. + def hasContent(self) -> bool: + return len(self.element.contents) > 0 + + # TODO-TYPING: typeshed stubs are incorrect about this; + # cloneNode returns a new Node, not None. + def cloneNode(self) -> treebuilder_base.Node: + tag = self.soup.new_tag(self.element.name, self.namespace) + node = Element(tag, self.soup, self.namespace) + for key, value in self.attributes: + node.attributes[key] = value + return node + + def getNameTuple(self) -> Tuple[Optional[_NamespaceURL], str]: + if self.namespace is None: + return namespaces["html"], self.name + else: + return self.namespace, self.name + + nameTuple = property(getNameTuple) + + +class TextNode(BeautifulSoupNode): + element: NavigableString + + def __init__(self, element: NavigableString, soup: "BeautifulSoup"): + treebuilder_base.Node.__init__(self, None) + self.element = element + self.soup = soup diff --git a/.venv/lib/python3.12/site-packages/bs4/builder/_htmlparser.py b/.venv/lib/python3.12/site-packages/bs4/builder/_htmlparser.py new file mode 100644 index 00000000..417f7dc4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/builder/_htmlparser.py @@ -0,0 +1,474 @@ +# encoding: utf-8 +"""Use the HTMLParser library to parse HTML files that aren't too bad.""" +from __future__ import annotations + +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +__all__ = [ + "HTMLParserTreeBuilder", +] + +from html.parser import HTMLParser + +from typing import ( + Any, + Callable, + cast, + Dict, + Iterable, + List, + Optional, + TYPE_CHECKING, + Tuple, + Type, + Union, +) + +from bs4.element import ( + AttributeDict, + CData, + Comment, + Declaration, + Doctype, + ProcessingInstruction, +) +from bs4.dammit import EntitySubstitution, UnicodeDammit + +from bs4.builder import ( + DetectsXMLParsedAsHTML, + HTML, + HTMLTreeBuilder, + STRICT, +) + +from bs4.exceptions import ParserRejectedMarkup + +if TYPE_CHECKING: + from bs4 import BeautifulSoup + from bs4.element import NavigableString + from bs4._typing import ( + _Encoding, + _Encodings, + _RawMarkup, + ) + +HTMLPARSER = "html.parser" + +_DuplicateAttributeHandler = Callable[[Dict[str, str], str, str], None] + + +class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML): + #: Constant to handle duplicate attributes by ignoring later values + #: and keeping the earlier ones. + REPLACE: str = "replace" + + #: Constant to handle duplicate attributes by replacing earlier values + #: with later ones. + IGNORE: str = "ignore" + + """A subclass of the Python standard library's HTMLParser class, which + listens for HTMLParser events and translates them into calls + to Beautiful Soup's tree construction API. + + :param on_duplicate_attribute: A strategy for what to do if a + tag includes the same attribute more than once. Accepted + values are: REPLACE (replace earlier values with later + ones, the default), IGNORE (keep the earliest value + encountered), or a callable. A callable must take three + arguments: the dictionary of attributes already processed, + the name of the duplicate attribute, and the most recent value + encountered. + """ + + def __init__( + self, + soup: BeautifulSoup, + *args: Any, + on_duplicate_attribute: Union[str, _DuplicateAttributeHandler] = REPLACE, + **kwargs: Any, + ): + self.soup = soup + self.on_duplicate_attribute = on_duplicate_attribute + self.attribute_dict_class = soup.builder.attribute_dict_class + HTMLParser.__init__(self, *args, **kwargs) + + # Keep a list of empty-element tags that were encountered + # without an explicit closing tag. If we encounter a closing tag + # of this type, we'll associate it with one of those entries. + # + # This isn't a stack because we don't care about the + # order. It's a list of closing tags we've already handled and + # will ignore, assuming they ever show up. + self.already_closed_empty_element = [] + + self._initialize_xml_detector() + + on_duplicate_attribute: Union[str, _DuplicateAttributeHandler] + already_closed_empty_element: List[str] + soup: BeautifulSoup + + def error(self, message: str) -> None: + # NOTE: This method is required so long as Python 3.9 is + # supported. The corresponding code is removed from HTMLParser + # in 3.5, but not removed from ParserBase until 3.10. + # https://github.com/python/cpython/issues/76025 + # + # The original implementation turned the error into a warning, + # but in every case I discovered, this made HTMLParser + # immediately crash with an error message that was less + # helpful than the warning. The new implementation makes it + # more clear that html.parser just can't parse this + # markup. The 3.10 implementation does the same, though it + # raises AssertionError rather than calling a method. (We + # catch this error and wrap it in a ParserRejectedMarkup.) + raise ParserRejectedMarkup(message) + + def handle_startendtag( + self, name: str, attrs: List[Tuple[str, Optional[str]]] + ) -> None: + """Handle an incoming empty-element tag. + + html.parser only calls this method when the markup looks like + <tag/>. + """ + # `handle_empty_element` tells handle_starttag not to close the tag + # just because its name matches a known empty-element tag. We + # know that this is an empty-element tag, and we want to call + # handle_endtag ourselves. + self.handle_starttag(name, attrs, handle_empty_element=False) + self.handle_endtag(name) + + def handle_starttag( + self, + name: str, + attrs: List[Tuple[str, Optional[str]]], + handle_empty_element: bool = True, + ) -> None: + """Handle an opening tag, e.g. '<tag>' + + :param handle_empty_element: True if this tag is known to be + an empty-element tag (i.e. there is not expected to be any + closing tag). + """ + # TODO: handle namespaces here? + attr_dict: AttributeDict = self.attribute_dict_class() + for key, value in attrs: + # Change None attribute values to the empty string + # for consistency with the other tree builders. + if value is None: + value = "" + if key in attr_dict: + # A single attribute shows up multiple times in this + # tag. How to handle it depends on the + # on_duplicate_attribute setting. + on_dupe = self.on_duplicate_attribute + if on_dupe == self.IGNORE: + pass + elif on_dupe in (None, self.REPLACE): + attr_dict[key] = value + else: + on_dupe = cast(_DuplicateAttributeHandler, on_dupe) + on_dupe(attr_dict, key, value) + else: + attr_dict[key] = value + # print("START", name) + sourceline: Optional[int] + sourcepos: Optional[int] + if self.soup.builder.store_line_numbers: + sourceline, sourcepos = self.getpos() + else: + sourceline = sourcepos = None + tag = self.soup.handle_starttag( + name, None, None, attr_dict, sourceline=sourceline, sourcepos=sourcepos + ) + if tag and tag.is_empty_element and handle_empty_element: + # Unlike other parsers, html.parser doesn't send separate end tag + # events for empty-element tags. (It's handled in + # handle_startendtag, but only if the original markup looked like + # <tag/>.) + # + # So we need to call handle_endtag() ourselves. Since we + # know the start event is identical to the end event, we + # don't want handle_endtag() to cross off any previous end + # events for tags of this name. + self.handle_endtag(name, check_already_closed=False) + + # But we might encounter an explicit closing tag for this tag + # later on. If so, we want to ignore it. + self.already_closed_empty_element.append(name) + + if self._root_tag_name is None: + self._root_tag_encountered(name) + + def handle_endtag(self, name: str, check_already_closed: bool = True) -> None: + """Handle a closing tag, e.g. '</tag>' + + :param name: A tag name. + :param check_already_closed: True if this tag is expected to + be the closing portion of an empty-element tag, + e.g. '<tag></tag>'. + """ + # print("END", name) + if check_already_closed and name in self.already_closed_empty_element: + # This is a redundant end tag for an empty-element tag. + # We've already called handle_endtag() for it, so just + # check it off the list. + # print("ALREADY CLOSED", name) + self.already_closed_empty_element.remove(name) + else: + self.soup.handle_endtag(name) + + def handle_data(self, data: str) -> None: + """Handle some textual data that shows up between tags.""" + self.soup.handle_data(data) + + def handle_charref(self, name: str) -> None: + """Handle a numeric character reference by converting it to the + corresponding Unicode character and treating it as textual + data. + + :param name: Character number, possibly in hexadecimal. + """ + # TODO: This was originally a workaround for a bug in + # HTMLParser. (http://bugs.python.org/issue13633) The bug has + # been fixed, but removing this code still makes some + # Beautiful Soup tests fail. This needs investigation. + if name.startswith("x"): + real_name = int(name.lstrip("x"), 16) + elif name.startswith("X"): + real_name = int(name.lstrip("X"), 16) + else: + real_name = int(name) + + data = None + if real_name < 256: + # HTML numeric entities are supposed to reference Unicode + # code points, but sometimes they reference code points in + # some other encoding (ahem, Windows-1252). E.g. “ + # instead of É for LEFT DOUBLE QUOTATION MARK. This + # code tries to detect this situation and compensate. + for encoding in (self.soup.original_encoding, "windows-1252"): + if not encoding: + continue + try: + data = bytearray([real_name]).decode(encoding) + except UnicodeDecodeError: + pass + if not data: + try: + data = chr(real_name) + except (ValueError, OverflowError): + pass + data = data or "\N{REPLACEMENT CHARACTER}" + self.handle_data(data) + + def handle_entityref(self, name: str) -> None: + """Handle a named entity reference by converting it to the + corresponding Unicode character(s) and treating it as textual + data. + + :param name: Name of the entity reference. + """ + character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name) + if character is not None: + data = character + else: + # If this were XML, it would be ambiguous whether "&foo" + # was an character entity reference with a missing + # semicolon or the literal string "&foo". Since this is + # HTML, we have a complete list of all character entity references, + # and this one wasn't found, so assume it's the literal string "&foo". + data = "&%s" % name + self.handle_data(data) + + def handle_comment(self, data: str) -> None: + """Handle an HTML comment. + + :param data: The text of the comment. + """ + self.soup.endData() + self.soup.handle_data(data) + self.soup.endData(Comment) + + def handle_decl(self, data: str) -> None: + """Handle a DOCTYPE declaration. + + :param data: The text of the declaration. + """ + self.soup.endData() + data = data[len("DOCTYPE ") :] + self.soup.handle_data(data) + self.soup.endData(Doctype) + + def unknown_decl(self, data: str) -> None: + """Handle a declaration of unknown type -- probably a CDATA block. + + :param data: The text of the declaration. + """ + cls: Type[NavigableString] + if data.upper().startswith("CDATA["): + cls = CData + data = data[len("CDATA[") :] + else: + cls = Declaration + self.soup.endData() + self.soup.handle_data(data) + self.soup.endData(cls) + + def handle_pi(self, data: str) -> None: + """Handle a processing instruction. + + :param data: The text of the instruction. + """ + self.soup.endData() + self.soup.handle_data(data) + self._document_might_be_xml(data) + self.soup.endData(ProcessingInstruction) + + +class HTMLParserTreeBuilder(HTMLTreeBuilder): + """A Beautiful soup `bs4.builder.TreeBuilder` that uses the + :py:class:`html.parser.HTMLParser` parser, found in the Python + standard library. + + """ + + is_xml: bool = False + picklable: bool = True + NAME: str = HTMLPARSER + features: Iterable[str] = [NAME, HTML, STRICT] + parser_args: Tuple[Iterable[Any], Dict[str, Any]] + + #: The html.parser knows which line number and position in the + #: original file is the source of an element. + TRACKS_LINE_NUMBERS: bool = True + + def __init__( + self, + parser_args: Optional[Iterable[Any]] = None, + parser_kwargs: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ): + """Constructor. + + :param parser_args: Positional arguments to pass into + the BeautifulSoupHTMLParser constructor, once it's + invoked. + :param parser_kwargs: Keyword arguments to pass into + the BeautifulSoupHTMLParser constructor, once it's + invoked. + :param kwargs: Keyword arguments for the superclass constructor. + """ + # Some keyword arguments will be pulled out of kwargs and placed + # into parser_kwargs. + extra_parser_kwargs = dict() + for arg in ("on_duplicate_attribute",): + if arg in kwargs: + value = kwargs.pop(arg) + extra_parser_kwargs[arg] = value + super(HTMLParserTreeBuilder, self).__init__(**kwargs) + parser_args = parser_args or [] + parser_kwargs = parser_kwargs or {} + parser_kwargs.update(extra_parser_kwargs) + parser_kwargs["convert_charrefs"] = False + self.parser_args = (parser_args, parser_kwargs) + + def prepare_markup( + self, + markup: _RawMarkup, + user_specified_encoding: Optional[_Encoding] = None, + document_declared_encoding: Optional[_Encoding] = None, + exclude_encodings: Optional[_Encodings] = None, + ) -> Iterable[Tuple[str, Optional[_Encoding], Optional[_Encoding], bool]]: + """Run any preliminary steps necessary to make incoming markup + acceptable to the parser. + + :param markup: Some markup -- probably a bytestring. + :param user_specified_encoding: The user asked to try this encoding. + :param document_declared_encoding: The markup itself claims to be + in this encoding. + :param exclude_encodings: The user asked _not_ to try any of + these encodings. + + :yield: A series of 4-tuples: (markup, encoding, declared encoding, + has undergone character replacement) + + Each 4-tuple represents a strategy for parsing the document. + This TreeBuilder uses Unicode, Dammit to convert the markup + into Unicode, so the ``markup`` element of the tuple will + always be a string. + """ + if isinstance(markup, str): + # Parse Unicode as-is. + yield (markup, None, None, False) + return + + # Ask UnicodeDammit to sniff the most likely encoding. + + known_definite_encodings: List[_Encoding] = [] + if user_specified_encoding: + # This was provided by the end-user; treat it as a known + # definite encoding per the algorithm laid out in the + # HTML5 spec. (See the EncodingDetector class for + # details.) + known_definite_encodings.append(user_specified_encoding) + + user_encodings: List[_Encoding] = [] + if document_declared_encoding: + # This was found in the document; treat it as a slightly + # lower-priority user encoding. + user_encodings.append(document_declared_encoding) + + dammit = UnicodeDammit( + markup, + known_definite_encodings=known_definite_encodings, + user_encodings=user_encodings, + is_html=True, + exclude_encodings=exclude_encodings, + ) + + if dammit.unicode_markup is None: + # In every case I've seen, Unicode, Dammit is able to + # convert the markup into Unicode, even if it needs to use + # REPLACEMENT CHARACTER. But there is a code path that + # could result in unicode_markup being None, and + # HTMLParser can only parse Unicode, so here we handle + # that code path. + raise ParserRejectedMarkup( + "Could not convert input to Unicode, and html.parser will not accept bytestrings." + ) + else: + yield ( + dammit.unicode_markup, + dammit.original_encoding, + dammit.declared_html_encoding, + dammit.contains_replacement_characters, + ) + + def feed(self, markup: _RawMarkup) -> None: + args, kwargs = self.parser_args + + # HTMLParser.feed will only handle str, but + # BeautifulSoup.markup is allowed to be _RawMarkup, because + # it's set by the yield value of + # TreeBuilder.prepare_markup. Fortunately, + # HTMLParserTreeBuilder.prepare_markup always yields a str + # (UnicodeDammit.unicode_markup). + assert isinstance(markup, str) + + # We know BeautifulSoup calls TreeBuilder.initialize_soup + # before calling feed(), so we can assume self.soup + # is set. + assert self.soup is not None + parser = BeautifulSoupHTMLParser(self.soup, *args, **kwargs) + + try: + parser.feed(markup) + parser.close() + except AssertionError as e: + # html.parser raises AssertionError in rare cases to + # indicate a fatal problem with the markup, especially + # when there's an error in the doctype declaration. + raise ParserRejectedMarkup(e) + parser.already_closed_empty_element = [] diff --git a/.venv/lib/python3.12/site-packages/bs4/builder/_lxml.py b/.venv/lib/python3.12/site-packages/bs4/builder/_lxml.py new file mode 100644 index 00000000..1f367da3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/builder/_lxml.py @@ -0,0 +1,490 @@ +# encoding: utf-8 +from __future__ import annotations + +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +__all__ = [ + "LXMLTreeBuilderForXML", + "LXMLTreeBuilder", +] + + +from typing import ( + Any, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, + Type, + TYPE_CHECKING, + Union, +) +from typing_extensions import TypeAlias + +from io import BytesIO +from io import StringIO +from lxml import etree +from bs4.element import ( + AttributeDict, + XMLAttributeDict, + Comment, + Doctype, + NamespacedAttribute, + ProcessingInstruction, + XMLProcessingInstruction, +) +from bs4.builder import ( + DetectsXMLParsedAsHTML, + FAST, + HTML, + HTMLTreeBuilder, + PERMISSIVE, + TreeBuilder, + XML, +) +from bs4.dammit import EncodingDetector +from bs4.exceptions import ParserRejectedMarkup + +if TYPE_CHECKING: + from bs4._typing import ( + _Encoding, + _Encodings, + _NamespacePrefix, + _NamespaceURL, + _NamespaceMapping, + _InvertedNamespaceMapping, + _RawMarkup, + ) + from bs4 import BeautifulSoup + +LXML: str = "lxml" + + +def _invert(d: dict[Any, Any]) -> dict[Any, Any]: + "Invert a dictionary." + return dict((v, k) for k, v in list(d.items())) + + +_LXMLParser: TypeAlias = Union[etree.XMLParser, etree.HTMLParser] +_ParserOrParserClass: TypeAlias = Union[ + _LXMLParser, Type[etree.XMLParser], Type[etree.HTMLParser] +] + + +class LXMLTreeBuilderForXML(TreeBuilder): + DEFAULT_PARSER_CLASS: Type[etree.XMLParser] = etree.XMLParser + + is_xml: bool = True + + processing_instruction_class: Type[ProcessingInstruction] + + NAME: str = "lxml-xml" + ALTERNATE_NAMES: Iterable[str] = ["xml"] + + # Well, it's permissive by XML parser standards. + features: Iterable[str] = [NAME, LXML, XML, FAST, PERMISSIVE] + + CHUNK_SIZE: int = 512 + + # This namespace mapping is specified in the XML Namespace + # standard. + DEFAULT_NSMAPS: _NamespaceMapping = dict(xml="http://www.w3.org/XML/1998/namespace") + + DEFAULT_NSMAPS_INVERTED: _InvertedNamespaceMapping = _invert(DEFAULT_NSMAPS) + + nsmaps: List[Optional[_InvertedNamespaceMapping]] + empty_element_tags: Set[str] + parser: Any + _default_parser: Optional[etree.XMLParser] + + # NOTE: If we parsed Element objects and looked at .sourceline, + # we'd be able to see the line numbers from the original document. + # But instead we build an XMLParser or HTMLParser object to serve + # as the target of parse messages, and those messages don't include + # line numbers. + # See: https://bugs.launchpad.net/lxml/+bug/1846906 + + def initialize_soup(self, soup: BeautifulSoup) -> None: + """Let the BeautifulSoup object know about the standard namespace + mapping. + + :param soup: A `BeautifulSoup`. + """ + # Beyond this point, self.soup is set, so we can assume (and + # assert) it's not None whenever necessary. + super(LXMLTreeBuilderForXML, self).initialize_soup(soup) + self._register_namespaces(self.DEFAULT_NSMAPS) + + def _register_namespaces(self, mapping: Dict[str, str]) -> None: + """Let the BeautifulSoup object know about namespaces encountered + while parsing the document. + + This might be useful later on when creating CSS selectors. + + This will track (almost) all namespaces, even ones that were + only in scope for part of the document. If two namespaces have + the same prefix, only the first one encountered will be + tracked. Un-prefixed namespaces are not tracked. + + :param mapping: A dictionary mapping namespace prefixes to URIs. + """ + assert self.soup is not None + for key, value in list(mapping.items()): + # This is 'if key' and not 'if key is not None' because we + # don't track un-prefixed namespaces. Soupselect will + # treat an un-prefixed namespace as the default, which + # causes confusion in some cases. + if key and key not in self.soup._namespaces: + # Let the BeautifulSoup object know about a new namespace. + # If there are multiple namespaces defined with the same + # prefix, the first one in the document takes precedence. + self.soup._namespaces[key] = value + + def default_parser(self, encoding: Optional[_Encoding]) -> _ParserOrParserClass: + """Find the default parser for the given encoding. + + :return: Either a parser object or a class, which + will be instantiated with default arguments. + """ + if self._default_parser is not None: + return self._default_parser + return self.DEFAULT_PARSER_CLASS(target=self, recover=True, encoding=encoding) + + def parser_for(self, encoding: Optional[_Encoding]) -> _LXMLParser: + """Instantiate an appropriate parser for the given encoding. + + :param encoding: A string. + :return: A parser object such as an `etree.XMLParser`. + """ + # Use the default parser. + parser = self.default_parser(encoding) + + if callable(parser): + # Instantiate the parser with default arguments + parser = parser(target=self, recover=True, encoding=encoding) + return parser + + def __init__( + self, + parser: Optional[etree.XMLParser] = None, + empty_element_tags: Optional[Set[str]] = None, + **kwargs: Any, + ): + # TODO: Issue a warning if parser is present but not a + # callable, since that means there's no way to create new + # parsers for different encodings. + self._default_parser = parser + self.soup = None + self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED] + self.active_namespace_prefixes = [dict(self.DEFAULT_NSMAPS)] + if "attribute_dict_class" not in kwargs: + kwargs["attribute_dict_class"] = XMLAttributeDict + super(LXMLTreeBuilderForXML, self).__init__(**kwargs) + + def _getNsTag(self, tag: str) -> Tuple[Optional[str], str]: + # Split the namespace URL out of a fully-qualified lxml tag + # name. Copied from lxml's src/lxml/sax.py. + if tag[0] == "{": + namespace, name = tag[1:].split("}", 1) + return (namespace, name) + else: + return (None, tag) + + def prepare_markup( + self, + markup: _RawMarkup, + user_specified_encoding: Optional[_Encoding] = None, + document_declared_encoding: Optional[_Encoding] = None, + exclude_encodings: Optional[_Encodings] = None, + ) -> Iterable[ + Tuple[Union[str, bytes], Optional[_Encoding], Optional[_Encoding], bool] + ]: + """Run any preliminary steps necessary to make incoming markup + acceptable to the parser. + + lxml really wants to get a bytestring and convert it to + Unicode itself. So instead of using UnicodeDammit to convert + the bytestring to Unicode using different encodings, this + implementation uses EncodingDetector to iterate over the + encodings, and tell lxml to try to parse the document as each + one in turn. + + :param markup: Some markup -- hopefully a bytestring. + :param user_specified_encoding: The user asked to try this encoding. + :param document_declared_encoding: The markup itself claims to be + in this encoding. + :param exclude_encodings: The user asked _not_ to try any of + these encodings. + + :yield: A series of 4-tuples: (markup, encoding, declared encoding, + has undergone character replacement) + + Each 4-tuple represents a strategy for converting the + document to Unicode and parsing it. Each strategy will be tried + in turn. + """ + is_html = not self.is_xml + if is_html: + self.processing_instruction_class = ProcessingInstruction + # We're in HTML mode, so if we're given XML, that's worth + # noting. + DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup, stacklevel=3) + else: + self.processing_instruction_class = XMLProcessingInstruction + + if isinstance(markup, str): + # We were given Unicode. Maybe lxml can parse Unicode on + # this system? + + # TODO: This is a workaround for + # https://bugs.launchpad.net/lxml/+bug/1948551. + # We can remove it once the upstream issue is fixed. + if len(markup) > 0 and markup[0] == "\N{BYTE ORDER MARK}": + markup = markup[1:] + yield markup, None, document_declared_encoding, False + + if isinstance(markup, str): + # No, apparently not. Convert the Unicode to UTF-8 and + # tell lxml to parse it as UTF-8. + yield (markup.encode("utf8"), "utf8", document_declared_encoding, False) + + # Since the document was Unicode in the first place, there + # is no need to try any more strategies; we know this will + # work. + return + + known_definite_encodings: List[_Encoding] = [] + if user_specified_encoding: + # This was provided by the end-user; treat it as a known + # definite encoding per the algorithm laid out in the + # HTML5 spec. (See the EncodingDetector class for + # details.) + known_definite_encodings.append(user_specified_encoding) + + user_encodings: List[_Encoding] = [] + if document_declared_encoding: + # This was found in the document; treat it as a slightly + # lower-priority user encoding. + user_encodings.append(document_declared_encoding) + + detector = EncodingDetector( + markup, + known_definite_encodings=known_definite_encodings, + user_encodings=user_encodings, + is_html=is_html, + exclude_encodings=exclude_encodings, + ) + for encoding in detector.encodings: + yield (detector.markup, encoding, document_declared_encoding, False) + + def feed(self, markup: _RawMarkup) -> None: + io: Union[BytesIO, StringIO] + if isinstance(markup, bytes): + io = BytesIO(markup) + elif isinstance(markup, str): + io = StringIO(markup) + + # initialize_soup is called before feed, so we know this + # is not None. + assert self.soup is not None + + # Call feed() at least once, even if the markup is empty, + # or the parser won't be initialized. + data = io.read(self.CHUNK_SIZE) + try: + self.parser = self.parser_for(self.soup.original_encoding) + self.parser.feed(data) + while len(data) != 0: + # Now call feed() on the rest of the data, chunk by chunk. + data = io.read(self.CHUNK_SIZE) + if len(data) != 0: + self.parser.feed(data) + self.parser.close() + except (UnicodeDecodeError, LookupError, etree.ParserError) as e: + raise ParserRejectedMarkup(e) + + def close(self) -> None: + self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED] + + def start( + self, + tag: str | bytes, + attrs: Dict[str | bytes, str | bytes], + nsmap: _NamespaceMapping = {}, + ) -> None: + # This is called by lxml code as a result of calling + # BeautifulSoup.feed(), and we know self.soup is set by the time feed() + # is called. + assert self.soup is not None + assert isinstance(tag, str) + + # We need to recreate the attribute dict for three + # reasons. First, for type checking, so we can assert there + # are no bytestrings in the keys or values. Second, because we + # need a mutable dict--lxml might send us an immutable + # dictproxy. Third, so we can handle namespaced attribute + # names by converting the keys to NamespacedAttributes. + new_attrs: Dict[Union[str, NamespacedAttribute], str] = ( + self.attribute_dict_class() + ) + for k, v in attrs.items(): + assert isinstance(k, str) + assert isinstance(v, str) + new_attrs[k] = v + + nsprefix: Optional[_NamespacePrefix] = None + namespace: Optional[_NamespaceURL] = None + # Invert each namespace map as it comes in. + if len(nsmap) == 0 and len(self.nsmaps) > 1: + # There are no new namespaces for this tag, but + # non-default namespaces are in play, so we need a + # separate tag stack to know when they end. + self.nsmaps.append(None) + elif len(nsmap) > 0: + # A new namespace mapping has come into play. + + # First, Let the BeautifulSoup object know about it. + self._register_namespaces(nsmap) + + # Then, add it to our running list of inverted namespace + # mappings. + self.nsmaps.append(_invert(nsmap)) + + # The currently active namespace prefixes have + # changed. Calculate the new mapping so it can be stored + # with all Tag objects created while these prefixes are in + # scope. + current_mapping = dict(self.active_namespace_prefixes[-1]) + current_mapping.update(nsmap) + + # We should not track un-prefixed namespaces as we can only hold one + # and it will be recognized as the default namespace by soupsieve, + # which may be confusing in some situations. + if "" in current_mapping: + del current_mapping[""] + self.active_namespace_prefixes.append(current_mapping) + + # Also treat the namespace mapping as a set of attributes on the + # tag, so we can recreate it later. + for prefix, namespace in list(nsmap.items()): + attribute = NamespacedAttribute( + "xmlns", prefix, "http://www.w3.org/2000/xmlns/" + ) + new_attrs[attribute] = namespace + + # Namespaces are in play. Find any attributes that came in + # from lxml with namespaces attached to their names, and + # turn then into NamespacedAttribute objects. + final_attrs: AttributeDict = self.attribute_dict_class() + for attr, value in list(new_attrs.items()): + namespace, attr = self._getNsTag(attr) + if namespace is None: + final_attrs[attr] = value + else: + nsprefix = self._prefix_for_namespace(namespace) + attr = NamespacedAttribute(nsprefix, attr, namespace) + final_attrs[attr] = value + + namespace, tag = self._getNsTag(tag) + nsprefix = self._prefix_for_namespace(namespace) + self.soup.handle_starttag( + tag, + namespace, + nsprefix, + final_attrs, + namespaces=self.active_namespace_prefixes[-1], + ) + + def _prefix_for_namespace( + self, namespace: Optional[_NamespaceURL] + ) -> Optional[_NamespacePrefix]: + """Find the currently active prefix for the given namespace.""" + if namespace is None: + return None + for inverted_nsmap in reversed(self.nsmaps): + if inverted_nsmap is not None and namespace in inverted_nsmap: + return inverted_nsmap[namespace] + return None + + def end(self, name: str | bytes) -> None: + assert self.soup is not None + assert isinstance(name, str) + self.soup.endData() + namespace, name = self._getNsTag(name) + nsprefix = None + if namespace is not None: + for inverted_nsmap in reversed(self.nsmaps): + if inverted_nsmap is not None and namespace in inverted_nsmap: + nsprefix = inverted_nsmap[namespace] + break + self.soup.handle_endtag(name, nsprefix) + if len(self.nsmaps) > 1: + # This tag, or one of its parents, introduced a namespace + # mapping, so pop it off the stack. + out_of_scope_nsmap = self.nsmaps.pop() + + if out_of_scope_nsmap is not None: + # This tag introduced a namespace mapping which is no + # longer in scope. Recalculate the currently active + # namespace prefixes. + self.active_namespace_prefixes.pop() + + def pi(self, target: str, data: str) -> None: + assert self.soup is not None + self.soup.endData() + data = target + " " + data + self.soup.handle_data(data) + self.soup.endData(self.processing_instruction_class) + + def data(self, data: str | bytes) -> None: + assert self.soup is not None + assert isinstance(data, str) + self.soup.handle_data(data) + + def doctype(self, name: str, pubid: str, system: str) -> None: + assert self.soup is not None + self.soup.endData() + doctype_string = Doctype._string_for_name_and_ids(name, pubid, system) + self.soup.handle_data(doctype_string) + self.soup.endData(containerClass=Doctype) + + def comment(self, text: str | bytes) -> None: + "Handle comments as Comment objects." + assert self.soup is not None + assert isinstance(text, str) + self.soup.endData() + self.soup.handle_data(text) + self.soup.endData(Comment) + + def test_fragment_to_document(self, fragment: str) -> str: + """See `TreeBuilder`.""" + return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment + + +class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): + NAME: str = LXML + ALTERNATE_NAMES: Iterable[str] = ["lxml-html"] + + features: Iterable[str] = list(ALTERNATE_NAMES) + [NAME, HTML, FAST, PERMISSIVE] + is_xml: bool = False + + def default_parser(self, encoding: Optional[_Encoding]) -> _ParserOrParserClass: + return etree.HTMLParser + + def feed(self, markup: _RawMarkup) -> None: + # We know self.soup is set by the time feed() is called. + assert self.soup is not None + encoding = self.soup.original_encoding + try: + self.parser = self.parser_for(encoding) + self.parser.feed(markup) + self.parser.close() + except (UnicodeDecodeError, LookupError, etree.ParserError) as e: + raise ParserRejectedMarkup(e) + + def test_fragment_to_document(self, fragment: str) -> str: + """See `TreeBuilder`.""" + return "<html><body>%s</body></html>" % fragment diff --git a/.venv/lib/python3.12/site-packages/bs4/css.py b/.venv/lib/python3.12/site-packages/bs4/css.py new file mode 100644 index 00000000..c20850fc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/css.py @@ -0,0 +1,338 @@ +"""Integration code for CSS selectors using `Soup Sieve <https://facelessuser.github.io/soupsieve/>`_ (pypi: ``soupsieve``). + +Acquire a `CSS` object through the `element.Tag.css` attribute of +the starting point of your CSS selector, or (if you want to run a +selector against the entire document) of the `BeautifulSoup` object +itself. + +The main advantage of doing this instead of using ``soupsieve`` +functions is that you don't need to keep passing the `element.Tag` to be +selected against, since the `CSS` object is permanently scoped to that +`element.Tag`. + +""" + +from __future__ import annotations + +from types import ModuleType +from typing import ( + Any, + cast, + Iterable, + Iterator, + Optional, + TYPE_CHECKING, +) +import warnings +from bs4._typing import _NamespaceMapping + +if TYPE_CHECKING: + from soupsieve import SoupSieve + from bs4 import element + from bs4.element import ResultSet, Tag + +soupsieve: Optional[ModuleType] +try: + import soupsieve +except ImportError: + soupsieve = None + warnings.warn( + "The soupsieve package is not installed. CSS selectors cannot be used." + ) + + +class CSS(object): + """A proxy object against the ``soupsieve`` library, to simplify its + CSS selector API. + + You don't need to instantiate this class yourself; instead, use + `element.Tag.css`. + + :param tag: All CSS selectors run by this object will use this as + their starting point. + + :param api: An optional drop-in replacement for the ``soupsieve`` module, + intended for use in unit tests. + """ + + def __init__(self, tag: element.Tag, api: Optional[ModuleType] = None): + if api is None: + api = soupsieve + if api is None: + raise NotImplementedError( + "Cannot execute CSS selectors because the soupsieve package is not installed." + ) + self.api = api + self.tag = tag + + def escape(self, ident: str) -> str: + """Escape a CSS identifier. + + This is a simple wrapper around `soupsieve.escape() <https://facelessuser.github.io/soupsieve/api/#soupsieveescape>`_. See the + documentation for that function for more information. + """ + if soupsieve is None: + raise NotImplementedError( + "Cannot escape CSS identifiers because the soupsieve package is not installed." + ) + return cast(str, self.api.escape(ident)) + + def _ns( + self, ns: Optional[_NamespaceMapping], select: str + ) -> Optional[_NamespaceMapping]: + """Normalize a dictionary of namespaces.""" + if not isinstance(select, self.api.SoupSieve) and ns is None: + # If the selector is a precompiled pattern, it already has + # a namespace context compiled in, which cannot be + # replaced. + ns = self.tag._namespaces + return ns + + def _rs(self, results: Iterable[Tag]) -> ResultSet[Tag]: + """Normalize a list of results to a py:class:`ResultSet`. + + A py:class:`ResultSet` is more consistent with the rest of + Beautiful Soup's API, and :py:meth:`ResultSet.__getattr__` has + a helpful error message if you try to treat a list of results + as a single result (a common mistake). + """ + # Import here to avoid circular import + from bs4 import ResultSet + + return ResultSet(None, results) + + def compile( + self, + select: str, + namespaces: Optional[_NamespaceMapping] = None, + flags: int = 0, + **kwargs: Any, + ) -> SoupSieve: + """Pre-compile a selector and return the compiled object. + + :param selector: A CSS selector. + + :param namespaces: A dictionary mapping namespace prefixes + used in the CSS selector to namespace URIs. By default, + Beautiful Soup will use the prefixes it encountered while + parsing the document. + + :param flags: Flags to be passed into Soup Sieve's + `soupsieve.compile() <https://facelessuser.github.io/soupsieve/api/#soupsievecompile>`_ method. + + :param kwargs: Keyword arguments to be passed into Soup Sieve's + `soupsieve.compile() <https://facelessuser.github.io/soupsieve/api/#soupsievecompile>`_ method. + + :return: A precompiled selector object. + :rtype: soupsieve.SoupSieve + """ + return self.api.compile(select, self._ns(namespaces, select), flags, **kwargs) + + def select_one( + self, + select: str, + namespaces: Optional[_NamespaceMapping] = None, + flags: int = 0, + **kwargs: Any, + ) -> element.Tag | None: + """Perform a CSS selection operation on the current Tag and return the + first result, if any. + + This uses the Soup Sieve library. For more information, see + that library's documentation for the `soupsieve.select_one() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect_one>`_ method. + + :param selector: A CSS selector. + + :param namespaces: A dictionary mapping namespace prefixes + used in the CSS selector to namespace URIs. By default, + Beautiful Soup will use the prefixes it encountered while + parsing the document. + + :param flags: Flags to be passed into Soup Sieve's + `soupsieve.select_one() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect_one>`_ method. + + :param kwargs: Keyword arguments to be passed into Soup Sieve's + `soupsieve.select_one() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect_one>`_ method. + """ + return self.api.select_one( + select, self.tag, self._ns(namespaces, select), flags, **kwargs + ) + + def select( + self, + select: str, + namespaces: Optional[_NamespaceMapping] = None, + limit: int = 0, + flags: int = 0, + **kwargs: Any, + ) -> ResultSet[element.Tag]: + """Perform a CSS selection operation on the current `element.Tag`. + + This uses the Soup Sieve library. For more information, see + that library's documentation for the `soupsieve.select() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect>`_ method. + + :param selector: A CSS selector. + + :param namespaces: A dictionary mapping namespace prefixes + used in the CSS selector to namespace URIs. By default, + Beautiful Soup will pass in the prefixes it encountered while + parsing the document. + + :param limit: After finding this number of results, stop looking. + + :param flags: Flags to be passed into Soup Sieve's + `soupsieve.select() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect>`_ method. + + :param kwargs: Keyword arguments to be passed into Soup Sieve's + `soupsieve.select() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect>`_ method. + """ + if limit is None: + limit = 0 + + return self._rs( + self.api.select( + select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs + ) + ) + + def iselect( + self, + select: str, + namespaces: Optional[_NamespaceMapping] = None, + limit: int = 0, + flags: int = 0, + **kwargs: Any, + ) -> Iterator[element.Tag]: + """Perform a CSS selection operation on the current `element.Tag`. + + This uses the Soup Sieve library. For more information, see + that library's documentation for the `soupsieve.iselect() + <https://facelessuser.github.io/soupsieve/api/#soupsieveiselect>`_ + method. It is the same as select(), but it returns a generator + instead of a list. + + :param selector: A string containing a CSS selector. + + :param namespaces: A dictionary mapping namespace prefixes + used in the CSS selector to namespace URIs. By default, + Beautiful Soup will pass in the prefixes it encountered while + parsing the document. + + :param limit: After finding this number of results, stop looking. + + :param flags: Flags to be passed into Soup Sieve's + `soupsieve.iselect() <https://facelessuser.github.io/soupsieve/api/#soupsieveiselect>`_ method. + + :param kwargs: Keyword arguments to be passed into Soup Sieve's + `soupsieve.iselect() <https://facelessuser.github.io/soupsieve/api/#soupsieveiselect>`_ method. + """ + return self.api.iselect( + select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs + ) + + def closest( + self, + select: str, + namespaces: Optional[_NamespaceMapping] = None, + flags: int = 0, + **kwargs: Any, + ) -> Optional[element.Tag]: + """Find the `element.Tag` closest to this one that matches the given selector. + + This uses the Soup Sieve library. For more information, see + that library's documentation for the `soupsieve.closest() + <https://facelessuser.github.io/soupsieve/api/#soupsieveclosest>`_ + method. + + :param selector: A string containing a CSS selector. + + :param namespaces: A dictionary mapping namespace prefixes + used in the CSS selector to namespace URIs. By default, + Beautiful Soup will pass in the prefixes it encountered while + parsing the document. + + :param flags: Flags to be passed into Soup Sieve's + `soupsieve.closest() <https://facelessuser.github.io/soupsieve/api/#soupsieveclosest>`_ method. + + :param kwargs: Keyword arguments to be passed into Soup Sieve's + `soupsieve.closest() <https://facelessuser.github.io/soupsieve/api/#soupsieveclosest>`_ method. + + """ + return self.api.closest( + select, self.tag, self._ns(namespaces, select), flags, **kwargs + ) + + def match( + self, + select: str, + namespaces: Optional[_NamespaceMapping] = None, + flags: int = 0, + **kwargs: Any, + ) -> bool: + """Check whether or not this `element.Tag` matches the given CSS selector. + + This uses the Soup Sieve library. For more information, see + that library's documentation for the `soupsieve.match() + <https://facelessuser.github.io/soupsieve/api/#soupsievematch>`_ + method. + + :param: a CSS selector. + + :param namespaces: A dictionary mapping namespace prefixes + used in the CSS selector to namespace URIs. By default, + Beautiful Soup will pass in the prefixes it encountered while + parsing the document. + + :param flags: Flags to be passed into Soup Sieve's + `soupsieve.match() + <https://facelessuser.github.io/soupsieve/api/#soupsievematch>`_ + method. + + :param kwargs: Keyword arguments to be passed into SoupSieve's + `soupsieve.match() + <https://facelessuser.github.io/soupsieve/api/#soupsievematch>`_ + method. + """ + return cast( + bool, + self.api.match( + select, self.tag, self._ns(namespaces, select), flags, **kwargs + ), + ) + + def filter( + self, + select: str, + namespaces: Optional[_NamespaceMapping] = None, + flags: int = 0, + **kwargs: Any, + ) -> ResultSet[element.Tag]: + """Filter this `element.Tag`'s direct children based on the given CSS selector. + + This uses the Soup Sieve library. It works the same way as + passing a `element.Tag` into that library's `soupsieve.filter() + <https://facelessuser.github.io/soupsieve/api/#soupsievefilter>`_ + method. For more information, see the documentation for + `soupsieve.filter() + <https://facelessuser.github.io/soupsieve/api/#soupsievefilter>`_. + + :param namespaces: A dictionary mapping namespace prefixes + used in the CSS selector to namespace URIs. By default, + Beautiful Soup will pass in the prefixes it encountered while + parsing the document. + + :param flags: Flags to be passed into Soup Sieve's + `soupsieve.filter() + <https://facelessuser.github.io/soupsieve/api/#soupsievefilter>`_ + method. + + :param kwargs: Keyword arguments to be passed into SoupSieve's + `soupsieve.filter() + <https://facelessuser.github.io/soupsieve/api/#soupsievefilter>`_ + method. + """ + return self._rs( + self.api.filter( + select, self.tag, self._ns(namespaces, select), flags, **kwargs + ) + ) diff --git a/.venv/lib/python3.12/site-packages/bs4/dammit.py b/.venv/lib/python3.12/site-packages/bs4/dammit.py new file mode 100644 index 00000000..c9f42446 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/dammit.py @@ -0,0 +1,1408 @@ +# -*- coding: utf-8 -*- +"""Beautiful Soup bonus library: Unicode, Dammit + +This library converts a bytestream to Unicode through any means +necessary. It is heavily based on code from Mark Pilgrim's `Universal +Feed Parser <https://pypi.org/project/feedparser/>`_, now maintained +by Kurt McKee. It does not rewrite the body of an XML or HTML document +to reflect a new encoding; that's the job of `TreeBuilder`. + +""" + +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +from html.entities import codepoint2name +from collections import defaultdict +import codecs +from html.entities import html5 +import re +from logging import Logger, getLogger +from types import ModuleType +from typing import ( + Dict, + Iterator, + List, + Optional, + Pattern, + Set, + Tuple, + Type, + Union, + cast, +) +from typing_extensions import Literal +from bs4._typing import ( + _Encoding, + _Encodings, +) +import warnings + +# Import a library to autodetect character encodings. We'll support +# any of a number of libraries that all support the same API: +# +# * cchardet +# * chardet +# * charset-normalizer +chardet_module: Optional[ModuleType] = None +try: + # PyPI package: cchardet + import cchardet + + chardet_module = cchardet +except ImportError: + try: + # Debian package: python-chardet + # PyPI package: chardet + import chardet + + chardet_module = chardet + except ImportError: + try: + # PyPI package: charset-normalizer + import charset_normalizer + + chardet_module = charset_normalizer + except ImportError: + # No chardet available. + pass + + +def _chardet_dammit(s: bytes) -> Optional[str]: + """Try as hard as possible to detect the encoding of a bytestring.""" + if chardet_module is None or isinstance(s, str): + return None + module = chardet_module + return module.detect(s)["encoding"] + + +# Build bytestring and Unicode versions of regular expressions for finding +# a declared encoding inside an XML or HTML document. +xml_encoding: str = "^\\s*<\\?.*encoding=['\"](.*?)['\"].*\\?>" #: :meta private: +html_meta: str = ( + "<\\s*meta[^>]+charset\\s*=\\s*[\"']?([^>]*?)[ /;'\">]" #: :meta private: +) + +# TODO-TYPING: The Pattern type here could use more refinement, but it's tricky. +encoding_res: Dict[Type, Dict[str, Pattern]] = dict() +encoding_res[bytes] = { + "html": re.compile(html_meta.encode("ascii"), re.I), + "xml": re.compile(xml_encoding.encode("ascii"), re.I), +} +encoding_res[str] = { + "html": re.compile(html_meta, re.I), + "xml": re.compile(xml_encoding, re.I), +} + + +class EntitySubstitution(object): + """The ability to substitute XML or HTML entities for certain characters.""" + + #: A map of named HTML entities to the corresponding Unicode string. + #: + #: :meta hide-value: + HTML_ENTITY_TO_CHARACTER: Dict[str, str] + + #: A map of Unicode strings to the corresponding named HTML entities; + #: the inverse of HTML_ENTITY_TO_CHARACTER. + #: + #: :meta hide-value: + CHARACTER_TO_HTML_ENTITY: Dict[str, str] + + #: A regular expression that matches any character (or, in rare + #: cases, pair of characters) that can be replaced with a named + #: HTML entity. + #: + #: :meta hide-value: + CHARACTER_TO_HTML_ENTITY_RE: Pattern[str] + + #: A very similar regular expression to + #: CHARACTER_TO_HTML_ENTITY_RE, but which also matches unescaped + #: ampersands. This is used by the 'html' formatted to provide + #: backwards-compatibility, even though the HTML5 spec allows most + #: ampersands to go unescaped. + #: + #: :meta hide-value: + CHARACTER_TO_HTML_ENTITY_WITH_AMPERSAND_RE: Pattern[str] + + @classmethod + def _populate_class_variables(cls) -> None: + """Initialize variables used by this class to manage the plethora of + HTML5 named entities. + + This function sets the following class variables: + + CHARACTER_TO_HTML_ENTITY - A mapping of Unicode strings like "⦨" to + entity names like "angmsdaa". When a single Unicode string has + multiple entity names, we try to choose the most commonly-used + name. + + HTML_ENTITY_TO_CHARACTER: A mapping of entity names like "angmsdaa" to + Unicode strings like "⦨". + + CHARACTER_TO_HTML_ENTITY_RE: A regular expression matching (almost) any + Unicode string that corresponds to an HTML5 named entity. + + CHARACTER_TO_HTML_ENTITY_WITH_AMPERSAND_RE: A very similar + regular expression to CHARACTER_TO_HTML_ENTITY_RE, but which + also matches unescaped ampersands. This is used by the 'html' + formatted to provide backwards-compatibility, even though the HTML5 + spec allows most ampersands to go unescaped. + """ + unicode_to_name = {} + name_to_unicode = {} + + short_entities = set() + long_entities_by_first_character = defaultdict(set) + + for name_with_semicolon, character in sorted(html5.items()): + # "It is intentional, for legacy compatibility, that many + # code points have multiple character reference names. For + # example, some appear both with and without the trailing + # semicolon, or with different capitalizations." + # - https://html.spec.whatwg.org/multipage/named-characters.html#named-character-references + # + # The parsers are in charge of handling (or not) character + # references with no trailing semicolon, so we remove the + # semicolon whenever it appears. + if name_with_semicolon.endswith(";"): + name = name_with_semicolon[:-1] + else: + name = name_with_semicolon + + # When parsing HTML, we want to recognize any known named + # entity and convert it to a sequence of Unicode + # characters. + if name not in name_to_unicode: + name_to_unicode[name] = character + + # When _generating_ HTML, we want to recognize special + # character sequences that _could_ be converted to named + # entities. + unicode_to_name[character] = name + + # We also need to build a regular expression that lets us + # _find_ those characters in output strings so we can + # replace them. + # + # This is tricky, for two reasons. + + if len(character) == 1 and ord(character) < 128 and character not in "<>": + # First, it would be annoying to turn single ASCII + # characters like | into named entities like + # |. The exceptions are <>, which we _must_ + # turn into named entities to produce valid HTML. + continue + + if len(character) > 1 and all(ord(x) < 128 for x in character): + # We also do not want to turn _combinations_ of ASCII + # characters like 'fj' into named entities like 'fj', + # though that's more debateable. + continue + + # Second, some named entities have a Unicode value that's + # a subset of the Unicode value for some _other_ named + # entity. As an example, \u2267' is ≧, + # but '\u2267\u0338' is ≧̸. Our regular + # expression needs to match the first two characters of + # "\u2267\u0338foo", but only the first character of + # "\u2267foo". + # + # In this step, we build two sets of characters that + # _eventually_ need to go into the regular expression. But + # we won't know exactly what the regular expression needs + # to look like until we've gone through the entire list of + # named entities. + if len(character) == 1 and character != "&": + short_entities.add(character) + else: + long_entities_by_first_character[character[0]].add(character) + + # Now that we've been through the entire list of entities, we + # can create a regular expression that matches any of them. + particles = set() + for short in short_entities: + long_versions = long_entities_by_first_character[short] + if not long_versions: + particles.add(short) + else: + ignore = "".join([x[1] for x in long_versions]) + # This finds, e.g. \u2267 but only if it is _not_ + # followed by \u0338. + particles.add("%s(?![%s])" % (short, ignore)) + + for long_entities in list(long_entities_by_first_character.values()): + for long_entity in long_entities: + particles.add(long_entity) + + re_definition = "(%s)" % "|".join(particles) + + particles.add("&") + re_definition_with_ampersand = "(%s)" % "|".join(particles) + + # If an entity shows up in both html5 and codepoint2name, it's + # likely that HTML5 gives it several different names, such as + # 'rsquo' and 'rsquor'. When converting Unicode characters to + # named entities, the codepoint2name name should take + # precedence where possible, since that's the more easily + # recognizable one. + for codepoint, name in list(codepoint2name.items()): + character = chr(codepoint) + unicode_to_name[character] = name + + cls.CHARACTER_TO_HTML_ENTITY = unicode_to_name + cls.HTML_ENTITY_TO_CHARACTER = name_to_unicode + cls.CHARACTER_TO_HTML_ENTITY_RE = re.compile(re_definition) + cls.CHARACTER_TO_HTML_ENTITY_WITH_AMPERSAND_RE = re.compile( + re_definition_with_ampersand + ) + + #: A map of Unicode strings to the corresponding named XML entities. + #: + #: :meta hide-value: + CHARACTER_TO_XML_ENTITY: Dict[str, str] = { + "'": "apos", + '"': "quot", + "&": "amp", + "<": "lt", + ">": "gt", + } + + # Matches any named or numeric HTML entity. + ANY_ENTITY_RE = re.compile("&(#\\d+|#x[0-9a-fA-F]+|\\w+);", re.I) + + #: A regular expression matching an angle bracket or an ampersand that + #: is not part of an XML or HTML entity. + #: + #: :meta hide-value: + BARE_AMPERSAND_OR_BRACKET: Pattern[str] = re.compile( + "([<>]|" "&(?!#\\d+;|#x[0-9a-fA-F]+;|\\w+;)" ")" + ) + + #: A regular expression matching an angle bracket or an ampersand. + #: + #: :meta hide-value: + AMPERSAND_OR_BRACKET: Pattern[str] = re.compile("([<>&])") + + @classmethod + def _substitute_html_entity(cls, matchobj: re.Match) -> str: + """Used with a regular expression to substitute the + appropriate HTML entity for a special character string.""" + original_entity = matchobj.group(0) + entity = cls.CHARACTER_TO_HTML_ENTITY.get(original_entity) + if entity is None: + return "&%s;" % original_entity + return "&%s;" % entity + + @classmethod + def _substitute_xml_entity(cls, matchobj: re.Match) -> str: + """Used with a regular expression to substitute the + appropriate XML entity for a special character string.""" + entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)] + return "&%s;" % entity + + @classmethod + def _escape_entity_name(cls, matchobj: re.Match) -> str: + return "&%s;" % matchobj.group(1) + + @classmethod + def _escape_unrecognized_entity_name(cls, matchobj: re.Match) -> str: + possible_entity = matchobj.group(1) + if possible_entity in cls.HTML_ENTITY_TO_CHARACTER: + return "&%s;" % possible_entity + return "&%s;" % possible_entity + + @classmethod + def quoted_attribute_value(cls, value: str) -> str: + """Make a value into a quoted XML attribute, possibly escaping it. + + Most strings will be quoted using double quotes. + + Bob's Bar -> "Bob's Bar" + + If a string contains double quotes, it will be quoted using + single quotes. + + Welcome to "my bar" -> 'Welcome to "my bar"' + + If a string contains both single and double quotes, the + double quotes will be escaped, and the string will be quoted + using double quotes. + + Welcome to "Bob's Bar" -> Welcome to "Bob's bar" + + :param value: The XML attribute value to quote + :return: The quoted value + """ + quote_with = '"' + if '"' in value: + if "'" in value: + # The string contains both single and double + # quotes. Turn the double quotes into + # entities. We quote the double quotes rather than + # the single quotes because the entity name is + # """ whether this is HTML or XML. If we + # quoted the single quotes, we'd have to decide + # between ' and &squot;. + replace_with = """ + value = value.replace('"', replace_with) + else: + # There are double quotes but no single quotes. + # We can use single quotes to quote the attribute. + quote_with = "'" + return quote_with + value + quote_with + + @classmethod + def substitute_xml(cls, value: str, make_quoted_attribute: bool = False) -> str: + """Replace special XML characters with named XML entities. + + The less-than sign will become <, the greater-than sign + will become >, and any ampersands will become &. If you + want ampersands that seem to be part of an entity definition + to be left alone, use `substitute_xml_containing_entities` + instead. + + :param value: A string to be substituted. + + :param make_quoted_attribute: If True, then the string will be + quoted, as befits an attribute value. + + :return: A version of ``value`` with special characters replaced + with named entities. + """ + # Escape angle brackets and ampersands. + value = cls.AMPERSAND_OR_BRACKET.sub(cls._substitute_xml_entity, value) + + if make_quoted_attribute: + value = cls.quoted_attribute_value(value) + return value + + @classmethod + def substitute_xml_containing_entities( + cls, value: str, make_quoted_attribute: bool = False + ) -> str: + """Substitute XML entities for special XML characters. + + :param value: A string to be substituted. The less-than sign will + become <, the greater-than sign will become >, and any + ampersands that are not part of an entity defition will + become &. + + :param make_quoted_attribute: If True, then the string will be + quoted, as befits an attribute value. + """ + # Escape angle brackets, and ampersands that aren't part of + # entities. + value = cls.BARE_AMPERSAND_OR_BRACKET.sub(cls._substitute_xml_entity, value) + + if make_quoted_attribute: + value = cls.quoted_attribute_value(value) + return value + + @classmethod + def substitute_html(cls, s: str) -> str: + """Replace certain Unicode characters with named HTML entities. + + This differs from ``data.encode(encoding, 'xmlcharrefreplace')`` + in that the goal is to make the result more readable (to those + with ASCII displays) rather than to recover from + errors. There's absolutely nothing wrong with a UTF-8 string + containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that + character with "é" will make it more readable to some + people. + + :param s: The string to be modified. + :return: The string with some Unicode characters replaced with + HTML entities. + """ + # Convert any appropriate characters to HTML entities. + return cls.CHARACTER_TO_HTML_ENTITY_WITH_AMPERSAND_RE.sub( + cls._substitute_html_entity, s + ) + + @classmethod + def substitute_html5(cls, s: str) -> str: + """Replace certain Unicode characters with named HTML entities + using HTML5 rules. + + Specifically, this method is much less aggressive about + escaping ampersands than substitute_html. Only ambiguous + ampersands are escaped, per the HTML5 standard: + + "An ambiguous ampersand is a U+0026 AMPERSAND character (&) + that is followed by one or more ASCII alphanumerics, followed + by a U+003B SEMICOLON character (;), where these characters do + not match any of the names given in the named character + references section." + + Unlike substitute_html5_raw, this method assumes HTML entities + were converted to Unicode characters on the way in, as + Beautiful Soup does. By the time Beautiful Soup does its work, + the only ambiguous ampersands that need to be escaped are the + ones that were escaped in the original markup when mentioning + HTML entities. + + :param s: The string to be modified. + :return: The string with some Unicode characters replaced with + HTML entities. + """ + # First, escape any HTML entities found in the markup. + s = cls.ANY_ENTITY_RE.sub(cls._escape_entity_name, s) + + # Next, convert any appropriate characters to unescaped HTML entities. + s = cls.CHARACTER_TO_HTML_ENTITY_RE.sub(cls._substitute_html_entity, s) + + return s + + @classmethod + def substitute_html5_raw(cls, s: str) -> str: + """Replace certain Unicode characters with named HTML entities + using HTML5 rules. + + substitute_html5_raw is similar to substitute_html5 but it is + designed for standalone use (whereas substitute_html5 is + designed for use with Beautiful Soup). + + :param s: The string to be modified. + :return: The string with some Unicode characters replaced with + HTML entities. + """ + # First, escape the ampersand for anything that looks like an + # entity but isn't in the list of recognized entities. All other + # ampersands can be left alone. + s = cls.ANY_ENTITY_RE.sub(cls._escape_unrecognized_entity_name, s) + + # Then, convert a range of Unicode characters to unescaped + # HTML entities. + s = cls.CHARACTER_TO_HTML_ENTITY_RE.sub(cls._substitute_html_entity, s) + + return s + + +EntitySubstitution._populate_class_variables() + + +class EncodingDetector: + """This class is capable of guessing a number of possible encodings + for a bytestring. + + Order of precedence: + + 1. Encodings you specifically tell EncodingDetector to try first + (the ``known_definite_encodings`` argument to the constructor). + + 2. An encoding determined by sniffing the document's byte-order mark. + + 3. Encodings you specifically tell EncodingDetector to try if + byte-order mark sniffing fails (the ``user_encodings`` argument to the + constructor). + + 4. An encoding declared within the bytestring itself, either in an + XML declaration (if the bytestring is to be interpreted as an XML + document), or in a <meta> tag (if the bytestring is to be + interpreted as an HTML document.) + + 5. An encoding detected through textual analysis by chardet, + cchardet, or a similar external library. + + 6. UTF-8. + + 7. Windows-1252. + + :param markup: Some markup in an unknown encoding. + + :param known_definite_encodings: When determining the encoding + of ``markup``, these encodings will be tried first, in + order. In HTML terms, this corresponds to the "known + definite encoding" step defined in `section 13.2.3.1 of the HTML standard <https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding>`_. + + :param user_encodings: These encodings will be tried after the + ``known_definite_encodings`` have been tried and failed, and + after an attempt to sniff the encoding by looking at a + byte order mark has failed. In HTML terms, this + corresponds to the step "user has explicitly instructed + the user agent to override the document's character + encoding", defined in `section 13.2.3.2 of the HTML standard <https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding>`_. + + :param override_encodings: A **deprecated** alias for + ``known_definite_encodings``. Any encodings here will be tried + immediately after the encodings in + ``known_definite_encodings``. + + :param is_html: If True, this markup is considered to be + HTML. Otherwise it's assumed to be XML. + + :param exclude_encodings: These encodings will not be tried, + even if they otherwise would be. + + """ + + def __init__( + self, + markup: bytes, + known_definite_encodings: Optional[_Encodings] = None, + is_html: Optional[bool] = False, + exclude_encodings: Optional[_Encodings] = None, + user_encodings: Optional[_Encodings] = None, + override_encodings: Optional[_Encodings] = None, + ): + self.known_definite_encodings = list(known_definite_encodings or []) + if override_encodings: + warnings.warn( + "The 'override_encodings' argument was deprecated in 4.10.0. Use 'known_definite_encodings' instead.", + DeprecationWarning, + stacklevel=3, + ) + self.known_definite_encodings += override_encodings + self.user_encodings = user_encodings or [] + exclude_encodings = exclude_encodings or [] + self.exclude_encodings = set([x.lower() for x in exclude_encodings]) + self.chardet_encoding = None + self.is_html = False if is_html is None else is_html + self.declared_encoding: Optional[str] = None + + # First order of business: strip a byte-order mark. + self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup) + + known_definite_encodings: _Encodings + user_encodings: _Encodings + exclude_encodings: _Encodings + chardet_encoding: Optional[_Encoding] + is_html: bool + declared_encoding: Optional[_Encoding] + markup: bytes + sniffed_encoding: Optional[_Encoding] + + def _usable(self, encoding: Optional[_Encoding], tried: Set[_Encoding]) -> bool: + """Should we even bother to try this encoding? + + :param encoding: Name of an encoding. + :param tried: Encodings that have already been tried. This + will be modified as a side effect. + """ + if encoding is None: + return False + encoding = encoding.lower() + if encoding in self.exclude_encodings: + return False + if encoding not in tried: + tried.add(encoding) + return True + return False + + @property + def encodings(self) -> Iterator[_Encoding]: + """Yield a number of encodings that might work for this markup. + + :yield: A sequence of strings. Each is the name of an encoding + that *might* work to convert a bytestring into Unicode. + """ + tried: Set[_Encoding] = set() + + # First, try the known definite encodings + for e in self.known_definite_encodings: + if self._usable(e, tried): + yield e + + # Did the document originally start with a byte-order mark + # that indicated its encoding? + if self.sniffed_encoding is not None and self._usable( + self.sniffed_encoding, tried + ): + yield self.sniffed_encoding + + # Sniffing the byte-order mark did nothing; try the user + # encodings. + for e in self.user_encodings: + if self._usable(e, tried): + yield e + + # Look within the document for an XML or HTML encoding + # declaration. + if self.declared_encoding is None: + self.declared_encoding = self.find_declared_encoding( + self.markup, self.is_html + ) + if self.declared_encoding is not None and self._usable( + self.declared_encoding, tried + ): + yield self.declared_encoding + + # Use third-party character set detection to guess at the + # encoding. + if self.chardet_encoding is None: + self.chardet_encoding = _chardet_dammit(self.markup) + if self.chardet_encoding is not None and self._usable( + self.chardet_encoding, tried + ): + yield self.chardet_encoding + + # As a last-ditch effort, try utf-8 and windows-1252. + for e in ("utf-8", "windows-1252"): + if self._usable(e, tried): + yield e + + @classmethod + def strip_byte_order_mark(cls, data: bytes) -> Tuple[bytes, Optional[_Encoding]]: + """If a byte-order mark is present, strip it and return the encoding it implies. + + :param data: A bytestring that may or may not begin with a + byte-order mark. + + :return: A 2-tuple (data stripped of byte-order mark, encoding implied by byte-order mark) + """ + encoding = None + if isinstance(data, str): + # Unicode data cannot have a byte-order mark. + return data, encoding + if ( + (len(data) >= 4) + and (data[:2] == b"\xfe\xff") + and (data[2:4] != b"\x00\x00") + ): + encoding = "utf-16be" + data = data[2:] + elif ( + (len(data) >= 4) + and (data[:2] == b"\xff\xfe") + and (data[2:4] != b"\x00\x00") + ): + encoding = "utf-16le" + data = data[2:] + elif data[:3] == b"\xef\xbb\xbf": + encoding = "utf-8" + data = data[3:] + elif data[:4] == b"\x00\x00\xfe\xff": + encoding = "utf-32be" + data = data[4:] + elif data[:4] == b"\xff\xfe\x00\x00": + encoding = "utf-32le" + data = data[4:] + return data, encoding + + @classmethod + def find_declared_encoding( + cls, + markup: Union[bytes, str], + is_html: bool = False, + search_entire_document: bool = False, + ) -> Optional[_Encoding]: + """Given a document, tries to find an encoding declared within the + text of the document itself. + + An XML encoding is declared at the beginning of the document. + + An HTML encoding is declared in a <meta> tag, hopefully near the + beginning of the document. + + :param markup: Some markup. + :param is_html: If True, this markup is considered to be HTML. Otherwise + it's assumed to be XML. + :param search_entire_document: Since an encoding is supposed + to declared near the beginning of the document, most of + the time it's only necessary to search a few kilobytes of + data. Set this to True to force this method to search the + entire document. + :return: The declared encoding, if one is found. + """ + if search_entire_document: + xml_endpos = html_endpos = len(markup) + else: + xml_endpos = 1024 + html_endpos = max(2048, int(len(markup) * 0.05)) + + if isinstance(markup, bytes): + res = encoding_res[bytes] + else: + res = encoding_res[str] + + xml_re = res["xml"] + html_re = res["html"] + declared_encoding: Optional[_Encoding] = None + declared_encoding_match = xml_re.search(markup, endpos=xml_endpos) + if not declared_encoding_match and is_html: + declared_encoding_match = html_re.search(markup, endpos=html_endpos) + if declared_encoding_match is not None: + declared_encoding = declared_encoding_match.groups()[0] + if declared_encoding: + if isinstance(declared_encoding, bytes): + declared_encoding = declared_encoding.decode("ascii", "replace") + return declared_encoding.lower() + return None + + +class UnicodeDammit: + """A class for detecting the encoding of a bytestring containing an + HTML or XML document, and decoding it to Unicode. If the source + encoding is windows-1252, `UnicodeDammit` can also replace + Microsoft smart quotes with their HTML or XML equivalents. + + :param markup: HTML or XML markup in an unknown encoding. + + :param known_definite_encodings: When determining the encoding + of ``markup``, these encodings will be tried first, in + order. In HTML terms, this corresponds to the "known + definite encoding" step defined in `section 13.2.3.1 of the HTML standard <https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding>`_. + + :param user_encodings: These encodings will be tried after the + ``known_definite_encodings`` have been tried and failed, and + after an attempt to sniff the encoding by looking at a + byte order mark has failed. In HTML terms, this + corresponds to the step "user has explicitly instructed + the user agent to override the document's character + encoding", defined in `section 13.2.3.2 of the HTML standard <https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding>`_. + + :param override_encodings: A **deprecated** alias for + ``known_definite_encodings``. Any encodings here will be tried + immediately after the encodings in + ``known_definite_encodings``. + + :param smart_quotes_to: By default, Microsoft smart quotes will, + like all other characters, be converted to Unicode + characters. Setting this to ``ascii`` will convert them to ASCII + quotes instead. Setting it to ``xml`` will convert them to XML + entity references, and setting it to ``html`` will convert them + to HTML entity references. + + :param is_html: If True, ``markup`` is treated as an HTML + document. Otherwise it's treated as an XML document. + + :param exclude_encodings: These encodings will not be considered, + even if the sniffing code thinks they might make sense. + + """ + + def __init__( + self, + markup: bytes, + known_definite_encodings: Optional[_Encodings] = [], + smart_quotes_to: Optional[Literal["ascii", "xml", "html"]] = None, + is_html: bool = False, + exclude_encodings: Optional[_Encodings] = [], + user_encodings: Optional[_Encodings] = None, + override_encodings: Optional[_Encodings] = None, + ): + self.smart_quotes_to = smart_quotes_to + self.tried_encodings = [] + self.contains_replacement_characters = False + self.is_html = is_html + self.log = getLogger(__name__) + self.detector = EncodingDetector( + markup, + known_definite_encodings, + is_html, + exclude_encodings, + user_encodings, + override_encodings, + ) + + # Short-circuit if the data is in Unicode to begin with. + if isinstance(markup, str) or markup == b"": + self.markup = markup + self.unicode_markup = str(markup) + self.original_encoding = None + return + + # The encoding detector may have stripped a byte-order mark. + # Use the stripped markup from this point on. + self.markup = self.detector.markup + + u = None + for encoding in self.detector.encodings: + markup = self.detector.markup + u = self._convert_from(encoding) + if u is not None: + break + + if not u: + # None of the encodings worked. As an absolute last resort, + # try them again with character replacement. + + for encoding in self.detector.encodings: + if encoding != "ascii": + u = self._convert_from(encoding, "replace") + if u is not None: + self.log.warning( + "Some characters could not be decoded, and were " + "replaced with REPLACEMENT CHARACTER." + ) + + self.contains_replacement_characters = True + break + + # If none of that worked, we could at this point force it to + # ASCII, but that would destroy so much data that I think + # giving up is better. + # + # Note that this is extremely unlikely, probably impossible, + # because the "replace" strategy is so powerful. Even running + # the Python binary through Unicode, Dammit gives you Unicode, + # albeit Unicode riddled with REPLACEMENT CHARACTER. + if u is None: + self.original_encoding = None + self.unicode_markup = None + else: + self.unicode_markup = u + + #: The original markup, before it was converted to Unicode. + #: This is not necessarily the same as what was passed in to the + #: constructor, since any byte-order mark will be stripped. + markup: bytes + + #: The Unicode version of the markup, following conversion. This + #: is set to None if there was simply no way to convert the + #: bytestring to Unicode (as with binary data). + unicode_markup: Optional[str] + + #: This is True if `UnicodeDammit.unicode_markup` contains + #: U+FFFD REPLACEMENT_CHARACTER characters which were not present + #: in `UnicodeDammit.markup`. These mark character sequences that + #: could not be represented in Unicode. + contains_replacement_characters: bool + + #: Unicode, Dammit's best guess as to the original character + #: encoding of `UnicodeDammit.markup`. + original_encoding: Optional[_Encoding] + + #: The strategy used to handle Microsoft smart quotes. + smart_quotes_to: Optional[str] + + #: The (encoding, error handling strategy) 2-tuples that were used to + #: try and convert the markup to Unicode. + tried_encodings: List[Tuple[_Encoding, str]] + + log: Logger #: :meta private: + + def _sub_ms_char(self, match: re.Match) -> bytes: + """Changes a MS smart quote character to an XML or HTML + entity, or an ASCII character. + + TODO: Since this is only used to convert smart quotes, it + could be simplified, and MS_CHARS_TO_ASCII made much less + parochial. + """ + orig: bytes = match.group(1) + sub: bytes + if self.smart_quotes_to == "ascii": + if orig in self.MS_CHARS_TO_ASCII: + sub = self.MS_CHARS_TO_ASCII[orig].encode() + else: + # Shouldn't happen; substitute the character + # with itself. + sub = orig + else: + if orig in self.MS_CHARS: + substitutions = self.MS_CHARS[orig] + if type(substitutions) is tuple: + if self.smart_quotes_to == "xml": + sub = b"&#x" + substitutions[1].encode() + b";" + else: + sub = b"&" + substitutions[0].encode() + b";" + else: + substitutions = cast(str, substitutions) + sub = substitutions.encode() + else: + # Shouldn't happen; substitute the character + # for itself. + sub = orig + return sub + + #: This dictionary maps commonly seen values for "charset" in HTML + #: meta tags to the corresponding Python codec names. It only covers + #: values that aren't in Python's aliases and can't be determined + #: by the heuristics in `find_codec`. + #: + #: :meta hide-value: + CHARSET_ALIASES: Dict[str, _Encoding] = { + "macintosh": "mac-roman", + "x-sjis": "shift-jis", + } + + #: A list of encodings that tend to contain Microsoft smart quotes. + #: + #: :meta hide-value: + ENCODINGS_WITH_SMART_QUOTES: _Encodings = [ + "windows-1252", + "iso-8859-1", + "iso-8859-2", + ] + + def _convert_from( + self, proposed: _Encoding, errors: str = "strict" + ) -> Optional[str]: + """Attempt to convert the markup to the proposed encoding. + + :param proposed: The name of a character encoding. + :param errors: An error handling strategy, used when calling `str`. + :return: The converted markup, or `None` if the proposed + encoding/error handling strategy didn't work. + """ + lookup_result = self.find_codec(proposed) + if lookup_result is None or (lookup_result, errors) in self.tried_encodings: + return None + proposed = lookup_result + self.tried_encodings.append((proposed, errors)) + markup = self.markup + # Convert smart quotes to HTML if coming from an encoding + # that might have them. + if ( + self.smart_quotes_to is not None + and proposed in self.ENCODINGS_WITH_SMART_QUOTES + ): + smart_quotes_re = b"([\x80-\x9f])" + smart_quotes_compiled = re.compile(smart_quotes_re) + markup = smart_quotes_compiled.sub(self._sub_ms_char, markup) + + try: + # print("Trying to convert document to %s (errors=%s)" % ( + # proposed, errors)) + u = self._to_unicode(markup, proposed, errors) + self.unicode_markup = u + self.original_encoding = proposed + except Exception: + # print("That didn't work!") + # print(e) + return None + # print("Correct encoding: %s" % proposed) + return self.unicode_markup + + def _to_unicode( + self, data: bytes, encoding: _Encoding, errors: str = "strict" + ) -> str: + """Given a bytestring and its encoding, decodes the string into Unicode. + + :param encoding: The name of an encoding. + :param errors: An error handling strategy, used when calling `str`. + """ + return str(data, encoding, errors) + + @property + def declared_html_encoding(self) -> Optional[_Encoding]: + """If the markup is an HTML document, returns the encoding, if any, + declared *inside* the document. + """ + if not self.is_html: + return None + return self.detector.declared_encoding + + def find_codec(self, charset: _Encoding) -> Optional[str]: + """Look up the Python codec corresponding to a given character set. + + :param charset: The name of a character set. + :return: The name of a Python codec. + """ + value = ( + self._codec(self.CHARSET_ALIASES.get(charset, charset)) + or (charset and self._codec(charset.replace("-", ""))) + or (charset and self._codec(charset.replace("-", "_"))) + or (charset and charset.lower()) + or charset + ) + if value: + return value.lower() + return None + + def _codec(self, charset: _Encoding) -> Optional[str]: + if not charset: + return charset + codec = None + try: + codecs.lookup(charset) + codec = charset + except (LookupError, ValueError): + pass + return codec + + #: A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities. + #: + #: :meta hide-value: + MS_CHARS: Dict[bytes, Union[str, Tuple[str, str]]] = { + b"\x80": ("euro", "20AC"), + b"\x81": " ", + b"\x82": ("sbquo", "201A"), + b"\x83": ("fnof", "192"), + b"\x84": ("bdquo", "201E"), + b"\x85": ("hellip", "2026"), + b"\x86": ("dagger", "2020"), + b"\x87": ("Dagger", "2021"), + b"\x88": ("circ", "2C6"), + b"\x89": ("permil", "2030"), + b"\x8a": ("Scaron", "160"), + b"\x8b": ("lsaquo", "2039"), + b"\x8c": ("OElig", "152"), + b"\x8d": "?", + b"\x8e": ("#x17D", "17D"), + b"\x8f": "?", + b"\x90": "?", + b"\x91": ("lsquo", "2018"), + b"\x92": ("rsquo", "2019"), + b"\x93": ("ldquo", "201C"), + b"\x94": ("rdquo", "201D"), + b"\x95": ("bull", "2022"), + b"\x96": ("ndash", "2013"), + b"\x97": ("mdash", "2014"), + b"\x98": ("tilde", "2DC"), + b"\x99": ("trade", "2122"), + b"\x9a": ("scaron", "161"), + b"\x9b": ("rsaquo", "203A"), + b"\x9c": ("oelig", "153"), + b"\x9d": "?", + b"\x9e": ("#x17E", "17E"), + b"\x9f": ("Yuml", ""), + } + + #: A parochial partial mapping of ISO-Latin-1 to ASCII. Contains + #: horrors like stripping diacritical marks to turn á into a, but also + #: contains non-horrors like turning “ into ". + #: + #: Seriously, don't use this for anything other than removing smart + #: quotes. + #: + #: :meta private: + MS_CHARS_TO_ASCII: Dict[bytes, str] = { + b"\x80": "EUR", + b"\x81": " ", + b"\x82": ",", + b"\x83": "f", + b"\x84": ",,", + b"\x85": "...", + b"\x86": "+", + b"\x87": "++", + b"\x88": "^", + b"\x89": "%", + b"\x8a": "S", + b"\x8b": "<", + b"\x8c": "OE", + b"\x8d": "?", + b"\x8e": "Z", + b"\x8f": "?", + b"\x90": "?", + b"\x91": "'", + b"\x92": "'", + b"\x93": '"', + b"\x94": '"', + b"\x95": "*", + b"\x96": "-", + b"\x97": "--", + b"\x98": "~", + b"\x99": "(TM)", + b"\x9a": "s", + b"\x9b": ">", + b"\x9c": "oe", + b"\x9d": "?", + b"\x9e": "z", + b"\x9f": "Y", + b"\xa0": " ", + b"\xa1": "!", + b"\xa2": "c", + b"\xa3": "GBP", + b"\xa4": "$", # This approximation is especially parochial--this is the + # generic currency symbol. + b"\xa5": "YEN", + b"\xa6": "|", + b"\xa7": "S", + b"\xa8": "..", + b"\xa9": "", + b"\xaa": "(th)", + b"\xab": "<<", + b"\xac": "!", + b"\xad": " ", + b"\xae": "(R)", + b"\xaf": "-", + b"\xb0": "o", + b"\xb1": "+-", + b"\xb2": "2", + b"\xb3": "3", + b"\xb4": "'", + b"\xb5": "u", + b"\xb6": "P", + b"\xb7": "*", + b"\xb8": ",", + b"\xb9": "1", + b"\xba": "(th)", + b"\xbb": ">>", + b"\xbc": "1/4", + b"\xbd": "1/2", + b"\xbe": "3/4", + b"\xbf": "?", + b"\xc0": "A", + b"\xc1": "A", + b"\xc2": "A", + b"\xc3": "A", + b"\xc4": "A", + b"\xc5": "A", + b"\xc6": "AE", + b"\xc7": "C", + b"\xc8": "E", + b"\xc9": "E", + b"\xca": "E", + b"\xcb": "E", + b"\xcc": "I", + b"\xcd": "I", + b"\xce": "I", + b"\xcf": "I", + b"\xd0": "D", + b"\xd1": "N", + b"\xd2": "O", + b"\xd3": "O", + b"\xd4": "O", + b"\xd5": "O", + b"\xd6": "O", + b"\xd7": "*", + b"\xd8": "O", + b"\xd9": "U", + b"\xda": "U", + b"\xdb": "U", + b"\xdc": "U", + b"\xdd": "Y", + b"\xde": "b", + b"\xdf": "B", + b"\xe0": "a", + b"\xe1": "a", + b"\xe2": "a", + b"\xe3": "a", + b"\xe4": "a", + b"\xe5": "a", + b"\xe6": "ae", + b"\xe7": "c", + b"\xe8": "e", + b"\xe9": "e", + b"\xea": "e", + b"\xeb": "e", + b"\xec": "i", + b"\xed": "i", + b"\xee": "i", + b"\xef": "i", + b"\xf0": "o", + b"\xf1": "n", + b"\xf2": "o", + b"\xf3": "o", + b"\xf4": "o", + b"\xf5": "o", + b"\xf6": "o", + b"\xf7": "/", + b"\xf8": "o", + b"\xf9": "u", + b"\xfa": "u", + b"\xfb": "u", + b"\xfc": "u", + b"\xfd": "y", + b"\xfe": "b", + b"\xff": "y", + } + + #: A map used when removing rogue Windows-1252/ISO-8859-1 + #: characters in otherwise UTF-8 documents. + #: + #: Note that \\x81, \\x8d, \\x8f, \\x90, and \\x9d are undefined in + #: Windows-1252. + #: + #: :meta hide-value: + WINDOWS_1252_TO_UTF8: Dict[int, bytes] = { + 0x80: b"\xe2\x82\xac", # € + 0x82: b"\xe2\x80\x9a", # ‚ + 0x83: b"\xc6\x92", # Æ’ + 0x84: b"\xe2\x80\x9e", # „ + 0x85: b"\xe2\x80\xa6", # … + 0x86: b"\xe2\x80\xa0", # †+ 0x87: b"\xe2\x80\xa1", # ‡ + 0x88: b"\xcb\x86", # ˆ + 0x89: b"\xe2\x80\xb0", # ‰ + 0x8A: b"\xc5\xa0", # Å + 0x8B: b"\xe2\x80\xb9", # ‹ + 0x8C: b"\xc5\x92", # Å’ + 0x8E: b"\xc5\xbd", # Ž + 0x91: b"\xe2\x80\x98", # ‘ + 0x92: b"\xe2\x80\x99", # ’ + 0x93: b"\xe2\x80\x9c", # “ + 0x94: b"\xe2\x80\x9d", # †+ 0x95: b"\xe2\x80\xa2", # • + 0x96: b"\xe2\x80\x93", # – + 0x97: b"\xe2\x80\x94", # — + 0x98: b"\xcb\x9c", # Ëœ + 0x99: b"\xe2\x84\xa2", # â„¢ + 0x9A: b"\xc5\xa1", # Å¡ + 0x9B: b"\xe2\x80\xba", # › + 0x9C: b"\xc5\x93", # Å“ + 0x9E: b"\xc5\xbe", # ž + 0x9F: b"\xc5\xb8", # Ÿ + 0xA0: b"\xc2\xa0", # + 0xA1: b"\xc2\xa1", # ¡ + 0xA2: b"\xc2\xa2", # ¢ + 0xA3: b"\xc2\xa3", # £ + 0xA4: b"\xc2\xa4", # ¤ + 0xA5: b"\xc2\xa5", # Â¥ + 0xA6: b"\xc2\xa6", # ¦ + 0xA7: b"\xc2\xa7", # § + 0xA8: b"\xc2\xa8", # ¨ + 0xA9: b"\xc2\xa9", # © + 0xAA: b"\xc2\xaa", # ª + 0xAB: b"\xc2\xab", # « + 0xAC: b"\xc2\xac", # ¬ + 0xAD: b"\xc2\xad", #  + 0xAE: b"\xc2\xae", # ® + 0xAF: b"\xc2\xaf", # ¯ + 0xB0: b"\xc2\xb0", # ° + 0xB1: b"\xc2\xb1", # ± + 0xB2: b"\xc2\xb2", # ² + 0xB3: b"\xc2\xb3", # ³ + 0xB4: b"\xc2\xb4", # ´ + 0xB5: b"\xc2\xb5", # µ + 0xB6: b"\xc2\xb6", # ¶ + 0xB7: b"\xc2\xb7", # · + 0xB8: b"\xc2\xb8", # ¸ + 0xB9: b"\xc2\xb9", # ¹ + 0xBA: b"\xc2\xba", # º + 0xBB: b"\xc2\xbb", # » + 0xBC: b"\xc2\xbc", # ¼ + 0xBD: b"\xc2\xbd", # ½ + 0xBE: b"\xc2\xbe", # ¾ + 0xBF: b"\xc2\xbf", # ¿ + 0xC0: b"\xc3\x80", # À + 0xC1: b"\xc3\x81", # à + 0xC2: b"\xc3\x82", #  + 0xC3: b"\xc3\x83", # à + 0xC4: b"\xc3\x84", # Ä + 0xC5: b"\xc3\x85", # Ã… + 0xC6: b"\xc3\x86", # Æ + 0xC7: b"\xc3\x87", # Ç + 0xC8: b"\xc3\x88", # È + 0xC9: b"\xc3\x89", # É + 0xCA: b"\xc3\x8a", # Ê + 0xCB: b"\xc3\x8b", # Ë + 0xCC: b"\xc3\x8c", # ÃŒ + 0xCD: b"\xc3\x8d", # à + 0xCE: b"\xc3\x8e", # ÃŽ + 0xCF: b"\xc3\x8f", # à + 0xD0: b"\xc3\x90", # à + 0xD1: b"\xc3\x91", # Ñ + 0xD2: b"\xc3\x92", # Ã’ + 0xD3: b"\xc3\x93", # Ó + 0xD4: b"\xc3\x94", # Ô + 0xD5: b"\xc3\x95", # Õ + 0xD6: b"\xc3\x96", # Ö + 0xD7: b"\xc3\x97", # × + 0xD8: b"\xc3\x98", # Ø + 0xD9: b"\xc3\x99", # Ù + 0xDA: b"\xc3\x9a", # Ú + 0xDB: b"\xc3\x9b", # Û + 0xDC: b"\xc3\x9c", # Ü + 0xDD: b"\xc3\x9d", # à + 0xDE: b"\xc3\x9e", # Þ + 0xDF: b"\xc3\x9f", # ß + 0xE0: b"\xc3\xa0", # à + 0xE1: b"\xa1", # á + 0xE2: b"\xc3\xa2", # â + 0xE3: b"\xc3\xa3", # ã + 0xE4: b"\xc3\xa4", # ä + 0xE5: b"\xc3\xa5", # Ã¥ + 0xE6: b"\xc3\xa6", # æ + 0xE7: b"\xc3\xa7", # ç + 0xE8: b"\xc3\xa8", # è + 0xE9: b"\xc3\xa9", # é + 0xEA: b"\xc3\xaa", # ê + 0xEB: b"\xc3\xab", # ë + 0xEC: b"\xc3\xac", # ì + 0xED: b"\xc3\xad", # à + 0xEE: b"\xc3\xae", # î + 0xEF: b"\xc3\xaf", # ï + 0xF0: b"\xc3\xb0", # ð + 0xF1: b"\xc3\xb1", # ñ + 0xF2: b"\xc3\xb2", # ò + 0xF3: b"\xc3\xb3", # ó + 0xF4: b"\xc3\xb4", # ô + 0xF5: b"\xc3\xb5", # õ + 0xF6: b"\xc3\xb6", # ö + 0xF7: b"\xc3\xb7", # ÷ + 0xF8: b"\xc3\xb8", # ø + 0xF9: b"\xc3\xb9", # ù + 0xFA: b"\xc3\xba", # ú + 0xFB: b"\xc3\xbb", # û + 0xFC: b"\xc3\xbc", # ü + 0xFD: b"\xc3\xbd", # ý + 0xFE: b"\xc3\xbe", # þ + } + + #: :meta private: + MULTIBYTE_MARKERS_AND_SIZES: List[Tuple[int, int, int]] = [ + (0xC2, 0xDF, 2), # 2-byte characters start with a byte C2-DF + (0xE0, 0xEF, 3), # 3-byte characters start with E0-EF + (0xF0, 0xF4, 4), # 4-byte characters start with F0-F4 + ] + + #: :meta private: + FIRST_MULTIBYTE_MARKER: int = MULTIBYTE_MARKERS_AND_SIZES[0][0] + + #: :meta private: + LAST_MULTIBYTE_MARKER: int = MULTIBYTE_MARKERS_AND_SIZES[-1][1] + + @classmethod + def detwingle( + cls, + in_bytes: bytes, + main_encoding: _Encoding = "utf8", + embedded_encoding: _Encoding = "windows-1252", + ) -> bytes: + """Fix characters from one encoding embedded in some other encoding. + + Currently the only situation supported is Windows-1252 (or its + subset ISO-8859-1), embedded in UTF-8. + + :param in_bytes: A bytestring that you suspect contains + characters from multiple encodings. Note that this *must* + be a bytestring. If you've already converted the document + to Unicode, you're too late. + :param main_encoding: The primary encoding of ``in_bytes``. + :param embedded_encoding: The encoding that was used to embed characters + in the main document. + :return: A bytestring similar to ``in_bytes``, in which + ``embedded_encoding`` characters have been converted to + their ``main_encoding`` equivalents. + """ + if embedded_encoding.replace("_", "-").lower() not in ( + "windows-1252", + "windows_1252", + ): + raise NotImplementedError( + "Windows-1252 and ISO-8859-1 are the only currently supported " + "embedded encodings." + ) + + if main_encoding.lower() not in ("utf8", "utf-8"): + raise NotImplementedError( + "UTF-8 is the only currently supported main encoding." + ) + + byte_chunks = [] + + chunk_start = 0 + pos = 0 + while pos < len(in_bytes): + byte = in_bytes[pos] + if byte >= cls.FIRST_MULTIBYTE_MARKER and byte <= cls.LAST_MULTIBYTE_MARKER: + # This is the start of a UTF-8 multibyte character. Skip + # to the end. + for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES: + if byte >= start and byte <= end: + pos += size + break + elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8: + # We found a Windows-1252 character! + # Save the string up to this point as a chunk. + byte_chunks.append(in_bytes[chunk_start:pos]) + + # Now translate the Windows-1252 character into UTF-8 + # and add it as another, one-byte chunk. + byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte]) + pos += 1 + chunk_start = pos + else: + # Go on to the next character. + pos += 1 + if chunk_start == 0: + # The string is unchanged. + return in_bytes + else: + # Store the final chunk. + byte_chunks.append(in_bytes[chunk_start:]) + return b"".join(byte_chunks) diff --git a/.venv/lib/python3.12/site-packages/bs4/diagnose.py b/.venv/lib/python3.12/site-packages/bs4/diagnose.py new file mode 100644 index 00000000..e0fbbc46 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/diagnose.py @@ -0,0 +1,268 @@ +"""Diagnostic functions, mainly for use when doing tech support.""" + +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +import cProfile +from io import BytesIO +from html.parser import HTMLParser +import bs4 +from bs4 import BeautifulSoup, __version__ +from bs4.builder import builder_registry +from typing import ( + Any, + IO, + List, + Optional, + Tuple, + TYPE_CHECKING, +) + +if TYPE_CHECKING: + from bs4._typing import _IncomingMarkup + +import pstats +import random +import tempfile +import time +import traceback +import sys + + +def diagnose(data: "_IncomingMarkup") -> None: + """Diagnostic suite for isolating common problems. + + :param data: Some markup that needs to be explained. + :return: None; diagnostics are printed to standard output. + """ + print(("Diagnostic running on Beautiful Soup %s" % __version__)) + print(("Python version %s" % sys.version)) + + basic_parsers = ["html.parser", "html5lib", "lxml"] + for name in basic_parsers: + for builder in builder_registry.builders: + if name in builder.features: + break + else: + basic_parsers.remove(name) + print( + ("I noticed that %s is not installed. Installing it may help." % name) + ) + + if "lxml" in basic_parsers: + basic_parsers.append("lxml-xml") + try: + from lxml import etree + + print(("Found lxml version %s" % ".".join(map(str, etree.LXML_VERSION)))) + except ImportError: + print("lxml is not installed or couldn't be imported.") + + if "html5lib" in basic_parsers: + try: + import html5lib + + print(("Found html5lib version %s" % html5lib.__version__)) + except ImportError: + print("html5lib is not installed or couldn't be imported.") + + if hasattr(data, "read"): + data = data.read() + + for parser in basic_parsers: + print(("Trying to parse your markup with %s" % parser)) + success = False + try: + soup = BeautifulSoup(data, features=parser) + success = True + except Exception: + print(("%s could not parse the markup." % parser)) + traceback.print_exc() + if success: + print(("Here's what %s did with the markup:" % parser)) + print((soup.prettify())) + + print(("-" * 80)) + + +def lxml_trace(data: "_IncomingMarkup", html: bool = True, **kwargs: Any) -> None: + """Print out the lxml events that occur during parsing. + + This lets you see how lxml parses a document when no Beautiful + Soup code is running. You can use this to determine whether + an lxml-specific problem is in Beautiful Soup's lxml tree builders + or in lxml itself. + + :param data: Some markup. + :param html: If True, markup will be parsed with lxml's HTML parser. + if False, lxml's XML parser will be used. + """ + from lxml import etree + + recover = kwargs.pop("recover", True) + if isinstance(data, str): + data = data.encode("utf8") + if not isinstance(data, IO): + reader = BytesIO(data) + for event, element in etree.iterparse(reader, html=html, recover=recover, **kwargs): + print(("%s, %4s, %s" % (event, element.tag, element.text))) + + +class AnnouncingParser(HTMLParser): + """Subclass of HTMLParser that announces parse events, without doing + anything else. + + You can use this to get a picture of how html.parser sees a given + document. The easiest way to do this is to call `htmlparser_trace`. + """ + + def _p(self, s: str) -> None: + print(s) + + def handle_starttag( + self, + name: str, + attrs: List[Tuple[str, Optional[str]]], + handle_empty_element: bool = True, + ) -> None: + self._p(f"{name} {attrs} START") + + def handle_endtag(self, name: str, check_already_closed: bool = True) -> None: + self._p("%s END" % name) + + def handle_data(self, data: str) -> None: + self._p("%s DATA" % data) + + def handle_charref(self, name: str) -> None: + self._p("%s CHARREF" % name) + + def handle_entityref(self, name: str) -> None: + self._p("%s ENTITYREF" % name) + + def handle_comment(self, data: str) -> None: + self._p("%s COMMENT" % data) + + def handle_decl(self, data: str) -> None: + self._p("%s DECL" % data) + + def unknown_decl(self, data: str) -> None: + self._p("%s UNKNOWN-DECL" % data) + + def handle_pi(self, data: str) -> None: + self._p("%s PI" % data) + + +def htmlparser_trace(data: str) -> None: + """Print out the HTMLParser events that occur during parsing. + + This lets you see how HTMLParser parses a document when no + Beautiful Soup code is running. + + :param data: Some markup. + """ + parser = AnnouncingParser() + parser.feed(data) + + +_vowels: str = "aeiou" +_consonants: str = "bcdfghjklmnpqrstvwxyz" + + +def rword(length: int = 5) -> str: + """Generate a random word-like string. + + :meta private: + """ + s = "" + for i in range(length): + if i % 2 == 0: + t = _consonants + else: + t = _vowels + s += random.choice(t) + return s + + +def rsentence(length: int = 4) -> str: + """Generate a random sentence-like string. + + :meta private: + """ + return " ".join(rword(random.randint(4, 9)) for i in range(length)) + + +def rdoc(num_elements: int = 1000) -> str: + """Randomly generate an invalid HTML document. + + :meta private: + """ + tag_names = ["p", "div", "span", "i", "b", "script", "table"] + elements = [] + for i in range(num_elements): + choice = random.randint(0, 3) + if choice == 0: + # New tag. + tag_name = random.choice(tag_names) + elements.append("<%s>" % tag_name) + elif choice == 1: + elements.append(rsentence(random.randint(1, 4))) + elif choice == 2: + # Close a tag. + tag_name = random.choice(tag_names) + elements.append("</%s>" % tag_name) + return "<html>" + "\n".join(elements) + "</html>" + + +def benchmark_parsers(num_elements: int = 100000) -> None: + """Very basic head-to-head performance benchmark.""" + print(("Comparative parser benchmark on Beautiful Soup %s" % __version__)) + data = rdoc(num_elements) + print(("Generated a large invalid HTML document (%d bytes)." % len(data))) + + for parser_name in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: + success = False + try: + a = time.time() + BeautifulSoup(data, parser_name) + b = time.time() + success = True + except Exception: + print(("%s could not parse the markup." % parser_name)) + traceback.print_exc() + if success: + print(("BS4+%s parsed the markup in %.2fs." % (parser_name, b - a))) + + from lxml import etree + + a = time.time() + etree.HTML(data) + b = time.time() + print(("Raw lxml parsed the markup in %.2fs." % (b - a))) + + import html5lib + + parser = html5lib.HTMLParser() + a = time.time() + parser.parse(data) + b = time.time() + print(("Raw html5lib parsed the markup in %.2fs." % (b - a))) + + +def profile(num_elements: int = 100000, parser: str = "lxml") -> None: + """Use Python's profiler on a randomly generated document.""" + filehandle = tempfile.NamedTemporaryFile() + filename = filehandle.name + + data = rdoc(num_elements) + vars = dict(bs4=bs4, data=data, parser=parser) + cProfile.runctx("bs4.BeautifulSoup(data, parser)", vars, vars, filename) + + stats = pstats.Stats(filename) + # stats.strip_dirs() + stats.sort_stats("cumulative") + stats.print_stats("_html5lib|bs4", 50) + + +# If this file is run as a script, standard input is diagnosed. +if __name__ == "__main__": + diagnose(sys.stdin.read()) diff --git a/.venv/lib/python3.12/site-packages/bs4/element.py b/.venv/lib/python3.12/site-packages/bs4/element.py new file mode 100644 index 00000000..6276054b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/element.py @@ -0,0 +1,2886 @@ +from __future__ import annotations + +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +import re +import warnings + +from bs4.css import CSS +from bs4._deprecation import ( + _deprecated, + _deprecated_alias, + _deprecated_function_alias, +) +from bs4.formatter import ( + Formatter, + HTMLFormatter, + XMLFormatter, +) +from bs4._warnings import AttributeResemblesVariableWarning + +from typing import ( + Any, + Callable, + Dict, + Generic, + Iterable, + Iterator, + List, + Mapping, + Optional, + Pattern, + Set, + TYPE_CHECKING, + Tuple, + Type, + TypeVar, + Union, + cast, +) +from typing_extensions import ( + Self, + TypeAlias, +) + +if TYPE_CHECKING: + from bs4 import BeautifulSoup + from bs4.builder import TreeBuilder + from bs4.filter import ElementFilter + from bs4.formatter import ( + _EntitySubstitutionFunction, + _FormatterOrName, + ) + from bs4._typing import ( + _AtMostOneElement, + _AttributeValue, + _AttributeValues, + _Encoding, + _InsertableElement, + _OneElement, + _QueryResults, + _RawOrProcessedAttributeValues, + _StrainableElement, + _StrainableAttribute, + _StrainableAttributes, + _StrainableString, + ) + +_OneOrMoreStringTypes: TypeAlias = Union[ + Type["NavigableString"], Iterable[Type["NavigableString"]] +] + +_FindMethodName: TypeAlias = Optional[Union["_StrainableElement", "ElementFilter"]] + +# Deprecated module-level attributes. +# See https://peps.python.org/pep-0562/ +_deprecated_names = dict( + whitespace_re="The {name} attribute was deprecated in version 4.7.0. If you need it, make your own copy." +) +#: :meta private: +_deprecated_whitespace_re: Pattern[str] = re.compile(r"\s+") + + +def __getattr__(name: str) -> Any: + if name in _deprecated_names: + message = _deprecated_names[name] + warnings.warn(message.format(name=name), DeprecationWarning, stacklevel=2) + + return globals()[f"_deprecated_{name}"] + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +#: Documents output by Beautiful Soup will be encoded with +#: this encoding unless you specify otherwise. +DEFAULT_OUTPUT_ENCODING: str = "utf-8" + +#: A regular expression that can be used to split on whitespace. +nonwhitespace_re: Pattern[str] = re.compile(r"\S+") + +#: These encodings are recognized by Python (so `Tag.encode` +#: could theoretically support them) but XML and HTML don't recognize +#: them (so they should not show up in an XML or HTML document as that +#: document's encoding). +#: +#: If an XML document is encoded in one of these encodings, no encoding +#: will be mentioned in the XML declaration. If an HTML document is +#: encoded in one of these encodings, and the HTML document has a +#: <meta> tag that mentions an encoding, the encoding will be given as +#: the empty string. +#: +#: Source: +#: Python documentation, `Python Specific Encodings <https://docs.python.org/3/library/codecs.html#python-specific-encodings>`_ +PYTHON_SPECIFIC_ENCODINGS: Set[_Encoding] = set( + [ + "idna", + "mbcs", + "oem", + "palmos", + "punycode", + "raw_unicode_escape", + "undefined", + "unicode_escape", + "raw-unicode-escape", + "unicode-escape", + "string-escape", + "string_escape", + ] +) + + +class NamespacedAttribute(str): + """A namespaced attribute (e.g. the 'xml:lang' in 'xml:lang="en"') + which remembers the namespace prefix ('xml') and the name ('lang') + that were used to create it. + """ + + prefix: Optional[str] + name: Optional[str] + namespace: Optional[str] + + def __new__( + cls, + prefix: Optional[str], + name: Optional[str] = None, + namespace: Optional[str] = None, + ) -> Self: + if not name: + # This is the default namespace. Its name "has no value" + # per https://www.w3.org/TR/xml-names/#defaulting + name = None + + if not name: + obj = str.__new__(cls, prefix) + elif not prefix: + # Not really namespaced. + obj = str.__new__(cls, name) + else: + obj = str.__new__(cls, prefix + ":" + name) + obj.prefix = prefix + obj.name = name + obj.namespace = namespace + return obj + + +class AttributeValueWithCharsetSubstitution(str): + """An abstract class standing in for a character encoding specified + inside an HTML ``<meta>`` tag. + + Subclasses exist for each place such a character encoding might be + found: either inside the ``charset`` attribute + (`CharsetMetaAttributeValue`) or inside the ``content`` attribute + (`ContentMetaAttributeValue`) + + This allows Beautiful Soup to replace that part of the HTML file + with a different encoding when ouputting a tree as a string. + """ + + # The original, un-encoded value of the ``content`` attribute. + #: :meta private: + original_value: str + + def substitute_encoding(self, eventual_encoding: str) -> str: + """Do whatever's necessary in this implementation-specific + portion an HTML document to substitute in a specific encoding. + """ + raise NotImplementedError() + + +class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution): + """A generic stand-in for the value of a ``<meta>`` tag's ``charset`` + attribute. + + When Beautiful Soup parses the markup ``<meta charset="utf8">``, the + value of the ``charset`` attribute will become one of these objects. + + If the document is later encoded to an encoding other than UTF-8, its + ``<meta>`` tag will mention the new encoding instead of ``utf8``. + """ + + def __new__(cls, original_value: str) -> Self: + # We don't need to use the original value for anything, but + # it might be useful for the user to know. + obj = str.__new__(cls, original_value) + obj.original_value = original_value + return obj + + def substitute_encoding(self, eventual_encoding: _Encoding = "utf-8") -> str: + """When an HTML document is being encoded to a given encoding, the + value of a ``<meta>`` tag's ``charset`` becomes the name of + the encoding. + """ + if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS: + return "" + return eventual_encoding + + +class AttributeValueList(List[str]): + """Class for the list used to hold the values of attributes which + have multiple values (such as HTML's 'class'). It's just a regular + list, but you can subclass it and pass it in to the TreeBuilder + constructor as attribute_value_list_class, to have your subclass + instantiated instead. + """ + + +class AttributeDict(Dict[Any,Any]): + """Superclass for the dictionary used to hold a tag's + attributes. You can use this, but it's just a regular dict with no + special logic. + """ + + +class XMLAttributeDict(AttributeDict): + """A dictionary for holding a Tag's attributes, which processes + incoming values for consistency with the HTML spec. + """ + + def __setitem__(self, key: str, value: Any) -> None: + """Set an attribute value, possibly modifying it to comply with + the XML spec. + + This just means converting common non-string values to + strings: XML attributes may have "any literal string as a + value." + """ + if value is None: + value = "" + if isinstance(value, bool): + # XML does not define any rules for boolean attributes. + # Preserve the old Beautiful Soup behavior (a bool that + # gets converted to a string on output) rather than + # guessing what the value should be. + pass + elif isinstance(value, (int, float)): + # It's dangerous to convert _every_ attribute value into a + # plain string, since an attribute value may be a more + # sophisticated string-like object + # (e.g. CharsetMetaAttributeValue). But we can definitely + # convert numeric values and booleans, which are the most common. + value = str(value) + + super().__setitem__(key, value) + + +class HTMLAttributeDict(AttributeDict): + """A dictionary for holding a Tag's attributes, which processes + incoming values for consistency with the HTML spec, which says + 'Attribute values are a mixture of text and character + references...' + + Basically, this means converting common non-string values into + strings, like XMLAttributeDict, though HTML also has some rules + around boolean attributes that XML doesn't have. + """ + + def __setitem__(self, key: str, value: Any) -> None: + """Set an attribute value, possibly modifying it to comply + with the HTML spec, + """ + if value in (False, None): + # 'The values "true" and "false" are not allowed on + # boolean attributes. To represent a false value, the + # attribute has to be omitted altogether.' + if key in self: + del self[key] + return + if isinstance(value, bool): + # 'If the [boolean] attribute is present, its value must + # either be the empty string or a value that is an ASCII + # case-insensitive match for the attribute's canonical + # name, with no leading or trailing whitespace.' + # + # [fixme] It's not clear to me whether "canonical name" + # means fully-qualified name, unqualified name, or + # (probably not) name with namespace prefix. For now I'm + # going with unqualified name. + if isinstance(key, NamespacedAttribute): + value = key.name + else: + value = key + elif isinstance(value, (int, float)): + # See note in XMLAttributeDict for the reasoning why we + # only do this to numbers. + value = str(value) + super().__setitem__(key, value) + + +class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution): + """A generic stand-in for the value of a ``<meta>`` tag's ``content`` + attribute. + + When Beautiful Soup parses the markup: + ``<meta http-equiv="content-type" content="text/html; charset=utf8">`` + + The value of the ``content`` attribute will become one of these objects. + + If the document is later encoded to an encoding other than UTF-8, its + ``<meta>`` tag will mention the new encoding instead of ``utf8``. + """ + + #: Match the 'charset' argument inside the 'content' attribute + #: of a <meta> tag. + #: :meta private: + CHARSET_RE: Pattern[str] = re.compile(r"((^|;)\s*charset=)([^;]*)", re.M) + + def __new__(cls, original_value: str) -> Self: + cls.CHARSET_RE.search(original_value) + obj = str.__new__(cls, original_value) + obj.original_value = original_value + return obj + + def substitute_encoding(self, eventual_encoding: _Encoding = "utf-8") -> str: + """When an HTML document is being encoded to a given encoding, the + value of the ``charset=`` in a ``<meta>`` tag's ``content`` becomes + the name of the encoding. + """ + if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS: + return self.CHARSET_RE.sub("", self.original_value) + + def rewrite(match: re.Match[str]) -> str: + return match.group(1) + eventual_encoding + + return self.CHARSET_RE.sub(rewrite, self.original_value) + + +class PageElement(object): + """An abstract class representing a single element in the parse tree. + + `NavigableString`, `Tag`, etc. are all subclasses of + `PageElement`. For this reason you'll see a lot of methods that + return `PageElement`, but you'll never see an actual `PageElement` + object. For the most part you can think of `PageElement` as + meaning "a `Tag` or a `NavigableString`." + """ + + #: In general, we can't tell just by looking at an element whether + #: it's contained in an XML document or an HTML document. But for + #: `Tag` objects (q.v.) we can store this information at parse time. + #: :meta private: + known_xml: Optional[bool] = None + + #: Whether or not this element has been decomposed from the tree + #: it was created in. + _decomposed: bool + + parent: Optional[Tag] + next_element: _AtMostOneElement + previous_element: _AtMostOneElement + next_sibling: _AtMostOneElement + previous_sibling: _AtMostOneElement + + #: Whether or not this element is hidden from generated output. + #: Only the `BeautifulSoup` object itself is hidden. + hidden: bool = False + + def setup( + self, + parent: Optional[Tag] = None, + previous_element: _AtMostOneElement = None, + next_element: _AtMostOneElement = None, + previous_sibling: _AtMostOneElement = None, + next_sibling: _AtMostOneElement = None, + ) -> None: + """Sets up the initial relations between this element and + other elements. + + :param parent: The parent of this element. + + :param previous_element: The element parsed immediately before + this one. + + :param next_element: The element parsed immediately before + this one. + + :param previous_sibling: The most recently encountered element + on the same level of the parse tree as this one. + + :param previous_sibling: The next element to be encountered + on the same level of the parse tree as this one. + """ + self.parent = parent + + self.previous_element = previous_element + if self.previous_element is not None: + self.previous_element.next_element = self + + self.next_element = next_element + if self.next_element is not None: + self.next_element.previous_element = self + + self.next_sibling = next_sibling + if self.next_sibling is not None: + self.next_sibling.previous_sibling = self + + if ( + previous_sibling is None + and self.parent is not None + and self.parent.contents + ): + previous_sibling = self.parent.contents[-1] + + self.previous_sibling = previous_sibling + if self.previous_sibling is not None: + self.previous_sibling.next_sibling = self + + def format_string(self, s: str, formatter: Optional[_FormatterOrName]) -> str: + """Format the given string using the given formatter. + + :param s: A string. + :param formatter: A Formatter object, or a string naming one of the standard formatters. + """ + if formatter is None: + return s + if not isinstance(formatter, Formatter): + formatter = self.formatter_for_name(formatter) + output = formatter.substitute(s) + return output + + def formatter_for_name( + self, formatter_name: Union[_FormatterOrName, _EntitySubstitutionFunction] + ) -> Formatter: + """Look up or create a Formatter for the given identifier, + if necessary. + + :param formatter: Can be a `Formatter` object (used as-is), a + function (used as the entity substitution hook for an + `bs4.formatter.XMLFormatter` or + `bs4.formatter.HTMLFormatter`), or a string (used to look + up an `bs4.formatter.XMLFormatter` or + `bs4.formatter.HTMLFormatter` in the appropriate registry. + + """ + if isinstance(formatter_name, Formatter): + return formatter_name + c: type[Formatter] + registry: Mapping[Optional[str], Formatter] + if self._is_xml: + c = XMLFormatter + registry = XMLFormatter.REGISTRY + else: + c = HTMLFormatter + registry = HTMLFormatter.REGISTRY + if callable(formatter_name): + return c(entity_substitution=formatter_name) + return registry[formatter_name] + + @property + def _is_xml(self) -> bool: + """Is this element part of an XML tree or an HTML tree? + + This is used in formatter_for_name, when deciding whether an + XMLFormatter or HTMLFormatter is more appropriate. It can be + inefficient, but it should be called very rarely. + """ + if self.known_xml is not None: + # Most of the time we will have determined this when the + # document is parsed. + return self.known_xml + + # Otherwise, it's likely that this element was created by + # direct invocation of the constructor from within the user's + # Python code. + if self.parent is None: + # This is the top-level object. It should have .known_xml set + # from tree creation. If not, take a guess--BS is usually + # used on HTML markup. + return getattr(self, "is_xml", False) + return self.parent._is_xml + + nextSibling = _deprecated_alias("nextSibling", "next_sibling", "4.0.0") + previousSibling = _deprecated_alias("previousSibling", "previous_sibling", "4.0.0") + + def __deepcopy__(self, memo: Dict[Any, Any], recursive: bool = False) -> Self: + raise NotImplementedError() + + def __copy__(self) -> Self: + """A copy of a PageElement can only be a deep copy, because + only one PageElement can occupy a given place in a parse tree. + """ + return self.__deepcopy__({}) + + default: Iterable[type[NavigableString]] = tuple() #: :meta private: + + def _all_strings( + self, strip: bool = False, types: Iterable[type[NavigableString]] = default + ) -> Iterator[str]: + """Yield all strings of certain classes, possibly stripping them. + + This is implemented differently in `Tag` and `NavigableString`. + """ + raise NotImplementedError() + + @property + def stripped_strings(self) -> Iterator[str]: + """Yield all interesting strings in this PageElement, stripping them + first. + + See `Tag` for information on which strings are considered + interesting in a given context. + """ + for string in self._all_strings(True): + yield string + + def get_text( + self, + separator: str = "", + strip: bool = False, + types: Iterable[Type[NavigableString]] = default, + ) -> str: + """Get all child strings of this PageElement, concatenated using the + given separator. + + :param separator: Strings will be concatenated using this separator. + + :param strip: If True, strings will be stripped before being + concatenated. + + :param types: A tuple of NavigableString subclasses. Any + strings of a subclass not found in this list will be + ignored. Although there are exceptions, the default + behavior in most cases is to consider only NavigableString + and CData objects. That means no comments, processing + instructions, etc. + + :return: A string. + """ + return separator.join([s for s in self._all_strings(strip, types=types)]) + + getText = get_text + text = property(get_text) + + def replace_with(self, *args: PageElement) -> Self: + """Replace this `PageElement` with one or more other `PageElement`, + objects, keeping the rest of the tree the same. + + :return: This `PageElement`, no longer part of the tree. + """ + if self.parent is None: + raise ValueError( + "Cannot replace one element with another when the " + "element to be replaced is not part of a tree." + ) + if len(args) == 1 and args[0] is self: + # Replacing an element with itself is a no-op. + return self + if any(x is self.parent for x in args): + raise ValueError("Cannot replace a Tag with its parent.") + old_parent = self.parent + my_index = self.parent.index(self) + self.extract(_self_index=my_index) + for idx, replace_with in enumerate(args, start=my_index): + old_parent.insert(idx, replace_with) + return self + + replaceWith = _deprecated_function_alias("replaceWith", "replace_with", "4.0.0") + + def wrap(self, wrap_inside: Tag) -> Tag: + """Wrap this `PageElement` inside a `Tag`. + + :return: ``wrap_inside``, occupying the position in the tree that used + to be occupied by this object, and with this object now inside it. + """ + me = self.replace_with(wrap_inside) + wrap_inside.append(me) + return wrap_inside + + def extract(self, _self_index: Optional[int] = None) -> Self: + """Destructively rips this element out of the tree. + + :param _self_index: The location of this element in its parent's + .contents, if known. Passing this in allows for a performance + optimization. + + :return: this `PageElement`, no longer part of the tree. + """ + if self.parent is not None: + if _self_index is None: + _self_index = self.parent.index(self) + del self.parent.contents[_self_index] + + # Find the two elements that would be next to each other if + # this element (and any children) hadn't been parsed. Connect + # the two. + last_child = self._last_descendant() + + # last_child can't be None because we passed accept_self=True + # into _last_descendant. Worst case, last_child will be + # self. Making this cast removes several mypy complaints later + # on as we manipulate last_child. + last_child = cast(PageElement, last_child) + next_element = last_child.next_element + + if self.previous_element is not None: + if self.previous_element is not next_element: + self.previous_element.next_element = next_element + if next_element is not None and next_element is not self.previous_element: + next_element.previous_element = self.previous_element + self.previous_element = None + last_child.next_element = None + + self.parent = None + if ( + self.previous_sibling is not None + and self.previous_sibling is not self.next_sibling + ): + self.previous_sibling.next_sibling = self.next_sibling + if ( + self.next_sibling is not None + and self.next_sibling is not self.previous_sibling + ): + self.next_sibling.previous_sibling = self.previous_sibling + self.previous_sibling = self.next_sibling = None + return self + + def decompose(self) -> None: + """Recursively destroys this `PageElement` and its children. + + The element will be removed from the tree and wiped out; so + will everything beneath it. + + The behavior of a decomposed `PageElement` is undefined and you + should never use one for anything, but if you need to *check* + whether an element has been decomposed, you can use the + `PageElement.decomposed` property. + """ + self.extract() + e: _AtMostOneElement = self + next_up: _AtMostOneElement = None + while e is not None: + next_up = e.next_element + e.__dict__.clear() + if isinstance(e, Tag): + e.contents = [] + e._decomposed = True + e = next_up + + def _last_descendant( + self, is_initialized: bool = True, accept_self: bool = True + ) -> _AtMostOneElement: + """Finds the last element beneath this object to be parsed. + + Special note to help you figure things out if your type + checking is tripped up by the fact that this method returns + _AtMostOneElement instead of PageElement: the only time + this method returns None is if `accept_self` is False and the + `PageElement` has no children--either it's a NavigableString + or an empty Tag. + + :param is_initialized: Has `PageElement.setup` been called on + this `PageElement` yet? + + :param accept_self: Is ``self`` an acceptable answer to the + question? + """ + if is_initialized and self.next_sibling is not None: + last_child = self.next_sibling.previous_element + else: + last_child = self + while isinstance(last_child, Tag) and last_child.contents: + last_child = last_child.contents[-1] + if not accept_self and last_child is self: + last_child = None + return last_child + + _lastRecursiveChild = _deprecated_alias( + "_lastRecursiveChild", "_last_descendant", "4.0.0" + ) + + def insert_before(self, *args: _InsertableElement) -> List[PageElement]: + """Makes the given element(s) the immediate predecessor of this one. + + All the elements will have the same `PageElement.parent` as + this one, and the given elements will occur immediately before + this one. + + :param args: One or more PageElements. + + :return The list of PageElements that were inserted. + """ + parent = self.parent + if parent is None: + raise ValueError("Element has no parent, so 'before' has no meaning.") + if any(x is self for x in args): + raise ValueError("Can't insert an element before itself.") + results: List[PageElement] = [] + for predecessor in args: + # Extract first so that the index won't be screwed up if they + # are siblings. + if isinstance(predecessor, PageElement): + predecessor.extract() + index = parent.index(self) + results.extend(parent.insert(index, predecessor)) + + return results + + def insert_after(self, *args: _InsertableElement) -> List[PageElement]: + """Makes the given element(s) the immediate successor of this one. + + The elements will have the same `PageElement.parent` as this + one, and the given elements will occur immediately after this + one. + + :param args: One or more PageElements. + + :return The list of PageElements that were inserted. + """ + # Do all error checking before modifying the tree. + parent = self.parent + if parent is None: + raise ValueError("Element has no parent, so 'after' has no meaning.") + if any(x is self for x in args): + raise ValueError("Can't insert an element after itself.") + + offset = 0 + results: List[PageElement] = [] + for successor in args: + # Extract first so that the index won't be screwed up if they + # are siblings. + if isinstance(successor, PageElement): + successor.extract() + index = parent.index(self) + results.extend(parent.insert(index + 1 + offset, successor)) + offset += 1 + + return results + + def find_next( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + string: Optional[_StrainableString] = None, + **kwargs: _StrainableAttribute, + ) -> _AtMostOneElement: + """Find the first PageElement that matches the given criteria and + appears later in the document than this PageElement. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param string: A filter for a NavigableString with specific text. + :kwargs: Additional filters on attribute values. + """ + return self._find_one(self.find_all_next, name, attrs, string, **kwargs) + + findNext = _deprecated_function_alias("findNext", "find_next", "4.0.0") + + def find_all_next( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + string: Optional[_StrainableString] = None, + limit: Optional[int] = None, + _stacklevel: int = 2, + **kwargs: _StrainableAttribute, + ) -> _QueryResults: + """Find all `PageElement` objects that match the given criteria and + appear later in the document than this `PageElement`. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param string: A filter for a NavigableString with specific text. + :param limit: Stop looking after finding this many results. + :param _stacklevel: Used internally to improve warning messages. + :kwargs: Additional filters on attribute values. + """ + return self._find_all( + name, + attrs, + string, + limit, + self.next_elements, + _stacklevel=_stacklevel + 1, + **kwargs, + ) + + findAllNext = _deprecated_function_alias("findAllNext", "find_all_next", "4.0.0") + + def find_next_sibling( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + string: Optional[_StrainableString] = None, + **kwargs: _StrainableAttribute, + ) -> _AtMostOneElement: + """Find the closest sibling to this PageElement that matches the + given criteria and appears later in the document. + + All find_* methods take a common set of arguments. See the + online documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param string: A filter for a `NavigableString` with specific text. + :kwargs: Additional filters on attribute values. + """ + return self._find_one(self.find_next_siblings, name, attrs, string, **kwargs) + + findNextSibling = _deprecated_function_alias( + "findNextSibling", "find_next_sibling", "4.0.0" + ) + + def find_next_siblings( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + string: Optional[_StrainableString] = None, + limit: Optional[int] = None, + _stacklevel: int = 2, + **kwargs: _StrainableAttribute, + ) -> _QueryResults: + """Find all siblings of this `PageElement` that match the given criteria + and appear later in the document. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param string: A filter for a `NavigableString` with specific text. + :param limit: Stop looking after finding this many results. + :param _stacklevel: Used internally to improve warning messages. + :kwargs: Additional filters on attribute values. + """ + return self._find_all( + name, + attrs, + string, + limit, + self.next_siblings, + _stacklevel=_stacklevel + 1, + **kwargs, + ) + + findNextSiblings = _deprecated_function_alias( + "findNextSiblings", "find_next_siblings", "4.0.0" + ) + fetchNextSiblings = _deprecated_function_alias( + "fetchNextSiblings", "find_next_siblings", "3.0.0" + ) + + def find_previous( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + string: Optional[_StrainableString] = None, + **kwargs: _StrainableAttribute, + ) -> _AtMostOneElement: + """Look backwards in the document from this `PageElement` and find the + first `PageElement` that matches the given criteria. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param string: A filter for a `NavigableString` with specific text. + :kwargs: Additional filters on attribute values. + """ + return self._find_one(self.find_all_previous, name, attrs, string, **kwargs) + + findPrevious = _deprecated_function_alias("findPrevious", "find_previous", "3.0.0") + + def find_all_previous( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + string: Optional[_StrainableString] = None, + limit: Optional[int] = None, + _stacklevel: int = 2, + **kwargs: _StrainableAttribute, + ) -> _QueryResults: + """Look backwards in the document from this `PageElement` and find all + `PageElement` that match the given criteria. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param string: A filter for a `NavigableString` with specific text. + :param limit: Stop looking after finding this many results. + :param _stacklevel: Used internally to improve warning messages. + :kwargs: Additional filters on attribute values. + """ + return self._find_all( + name, + attrs, + string, + limit, + self.previous_elements, + _stacklevel=_stacklevel + 1, + **kwargs, + ) + + findAllPrevious = _deprecated_function_alias( + "findAllPrevious", "find_all_previous", "4.0.0" + ) + fetchAllPrevious = _deprecated_function_alias( + "fetchAllPrevious", "find_all_previous", "3.0.0" + ) + + def find_previous_sibling( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + string: Optional[_StrainableString] = None, + **kwargs: _StrainableAttribute, + ) -> _AtMostOneElement: + """Returns the closest sibling to this `PageElement` that matches the + given criteria and appears earlier in the document. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param string: A filter for a `NavigableString` with specific text. + :kwargs: Additional filters on attribute values. + """ + return self._find_one( + self.find_previous_siblings, name, attrs, string, **kwargs + ) + + findPreviousSibling = _deprecated_function_alias( + "findPreviousSibling", "find_previous_sibling", "4.0.0" + ) + + def find_previous_siblings( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + string: Optional[_StrainableString] = None, + limit: Optional[int] = None, + _stacklevel: int = 2, + **kwargs: _StrainableAttribute, + ) -> _QueryResults: + """Returns all siblings to this PageElement that match the + given criteria and appear earlier in the document. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param string: A filter for a NavigableString with specific text. + :param limit: Stop looking after finding this many results. + :param _stacklevel: Used internally to improve warning messages. + :kwargs: Additional filters on attribute values. + """ + return self._find_all( + name, + attrs, + string, + limit, + self.previous_siblings, + _stacklevel=_stacklevel + 1, + **kwargs, + ) + + findPreviousSiblings = _deprecated_function_alias( + "findPreviousSiblings", "find_previous_siblings", "4.0.0" + ) + fetchPreviousSiblings = _deprecated_function_alias( + "fetchPreviousSiblings", "find_previous_siblings", "3.0.0" + ) + + def find_parent( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + **kwargs: _StrainableAttribute, + ) -> _AtMostOneElement: + """Find the closest parent of this PageElement that matches the given + criteria. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param self: Whether the PageElement itself should be considered + as one of its 'parents'. + :kwargs: Additional filters on attribute values. + """ + # NOTE: We can't use _find_one because findParents takes a different + # set of arguments. + r = None + results = self.find_parents( + name, attrs, 1, _stacklevel=3, **kwargs + ) + if results: + r = results[0] + return r + + findParent = _deprecated_function_alias("findParent", "find_parent", "4.0.0") + + def find_parents( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + limit: Optional[int] = None, + _stacklevel: int = 2, + **kwargs: _StrainableAttribute, + ) -> _QueryResults: + """Find all parents of this `PageElement` that match the given criteria. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param limit: Stop looking after finding this many results. + :param _stacklevel: Used internally to improve warning messages. + :kwargs: Additional filters on attribute values. + """ + iterator = self.parents + return self._find_all( + name, attrs, None, limit, iterator, _stacklevel=_stacklevel + 1, **kwargs + ) + + findParents = _deprecated_function_alias("findParents", "find_parents", "4.0.0") + fetchParents = _deprecated_function_alias("fetchParents", "find_parents", "3.0.0") + + @property + def next(self) -> _AtMostOneElement: + """The `PageElement`, if any, that was parsed just after this one.""" + return self.next_element + + @property + def previous(self) -> _AtMostOneElement: + """The `PageElement`, if any, that was parsed just before this one.""" + return self.previous_element + + # These methods do the real heavy lifting. + + def _find_one( + self, + # TODO-TYPING: "There is no syntax to indicate optional or + # keyword arguments; such function types are rarely used + # as callback types." - So, not sure how to get more + # specific here. + method: Callable, + name: _FindMethodName, + attrs: _StrainableAttributes, + string: Optional[_StrainableString], + **kwargs: _StrainableAttribute, + ) -> _AtMostOneElement: + r: _AtMostOneElement = None + results: _QueryResults = method(name, attrs, string, 1, _stacklevel=4, **kwargs) + if results: + r = results[0] + return r + + def _find_all( + self, + name: _FindMethodName, + attrs: _StrainableAttributes, + string: Optional[_StrainableString], + limit: Optional[int], + generator: Iterator[PageElement], + _stacklevel: int = 3, + **kwargs: _StrainableAttribute, + ) -> _QueryResults: + """Iterates over a generator looking for things that match.""" + + if string is None and "text" in kwargs: + string = kwargs.pop("text") + warnings.warn( + "The 'text' argument to find()-type methods is deprecated. Use 'string' instead.", + DeprecationWarning, + stacklevel=_stacklevel, + ) + + if "_class" in kwargs: + warnings.warn( + AttributeResemblesVariableWarning.MESSAGE + % dict( + original="_class", + autocorrect="class_", + ), + AttributeResemblesVariableWarning, + stacklevel=_stacklevel, + ) + + from bs4.filter import ElementFilter + + if isinstance(name, ElementFilter): + matcher = name + else: + matcher = SoupStrainer(name, attrs, string, **kwargs) + + result: Iterable[_OneElement] + if string is None and not limit and not attrs and not kwargs: + if name is True or name is None: + # Optimization to find all tags. + result = (element for element in generator if isinstance(element, Tag)) + return ResultSet(matcher, result) + elif isinstance(name, str): + # Optimization to find all tags with a given name. + if name.count(":") == 1: + # This is a name with a prefix. If this is a namespace-aware document, + # we need to match the local name against tag.name. If not, + # we need to match the fully-qualified name against tag.name. + prefix, local_name = name.split(":", 1) + else: + prefix = None + local_name = name + result = [] + for element in generator: + if not isinstance(element, Tag): + continue + if element.name == name or ( + element.name == local_name + and (prefix is None or element.prefix == prefix) + ): + result.append(element) + return ResultSet(matcher, result) + return matcher.find_all(generator, limit) + + # These generators can be used to navigate starting from both + # NavigableStrings and Tags. + @property + def next_elements(self) -> Iterator[PageElement]: + """All PageElements that were parsed after this one.""" + i = self.next_element + while i is not None: + successor = i.next_element + yield i + i = successor + + @property + def self_and_next_elements(self) -> Iterator[PageElement]: + """This PageElement, then all PageElements that were parsed after it.""" + return self._self_and(self.next_elements) + + @property + def next_siblings(self) -> Iterator[PageElement]: + """All PageElements that are siblings of this one but were parsed + later. + """ + i = self.next_sibling + while i is not None: + successor = i.next_sibling + yield i + i = successor + + @property + def self_and_next_siblings(self) -> Iterator[PageElement]: + """This PageElement, then all of its siblings.""" + return self._self_and(self.next_siblings) + + @property + def previous_elements(self) -> Iterator[PageElement]: + """All PageElements that were parsed before this one. + + :yield: A sequence of PageElements. + """ + i = self.previous_element + while i is not None: + successor = i.previous_element + yield i + i = successor + + @property + def self_and_previous_elements(self) -> Iterator[PageElement]: + """This PageElement, then all elements that were parsed + earlier.""" + return self._self_and(self.previous_elements) + + @property + def previous_siblings(self) -> Iterator[PageElement]: + """All PageElements that are siblings of this one but were parsed + earlier. + + :yield: A sequence of PageElements. + """ + i = self.previous_sibling + while i is not None: + successor = i.previous_sibling + yield i + i = successor + + @property + def self_and_previous_siblings(self) -> Iterator[PageElement]: + """This PageElement, then all of its siblings that were parsed + earlier.""" + return self._self_and(self.previous_siblings) + + @property + def parents(self) -> Iterator[Tag]: + """All elements that are parents of this PageElement. + + :yield: A sequence of Tags, ending with a BeautifulSoup object. + """ + i = self.parent + while i is not None: + successor = i.parent + yield i + i = successor + + @property + def self_and_parents(self) -> Iterator[PageElement]: + """This element, then all of its parents. + + :yield: A sequence of PageElements, ending with a BeautifulSoup object. + """ + return self._self_and(self.parents) + + def _self_and(self, other_generator:Iterator[PageElement]) -> Iterator[PageElement]: + """Modify a generator by yielding this element, then everything + yielded by the other generator. + """ + if not self.hidden: + yield self + for i in other_generator: + yield i + + @property + def decomposed(self) -> bool: + """Check whether a PageElement has been decomposed.""" + return getattr(self, "_decomposed", False) or False + + @_deprecated("next_elements", "4.0.0") + def nextGenerator(self) -> Iterator[PageElement]: + ":meta private:" + return self.next_elements + + @_deprecated("next_siblings", "4.0.0") + def nextSiblingGenerator(self) -> Iterator[PageElement]: + ":meta private:" + return self.next_siblings + + @_deprecated("previous_elements", "4.0.0") + def previousGenerator(self) -> Iterator[PageElement]: + ":meta private:" + return self.previous_elements + + @_deprecated("previous_siblings", "4.0.0") + def previousSiblingGenerator(self) -> Iterator[PageElement]: + ":meta private:" + return self.previous_siblings + + @_deprecated("parents", "4.0.0") + def parentGenerator(self) -> Iterator[PageElement]: + ":meta private:" + return self.parents + + +class NavigableString(str, PageElement): + """A Python string that is part of a parse tree. + + When Beautiful Soup parses the markup ``<b>penguin</b>``, it will + create a `NavigableString` for the string "penguin". + """ + + #: A string prepended to the body of the 'real' string + #: when formatting it as part of a document, such as the '<!--' + #: in an HTML comment. + PREFIX: str = "" + + #: A string appended to the body of the 'real' string + #: when formatting it as part of a document, such as the '-->' + #: in an HTML comment. + SUFFIX: str = "" + + def __new__(cls, value: Union[str, bytes]) -> Self: + """Create a new NavigableString. + + When unpickling a NavigableString, this method is called with + the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be + passed in to the superclass's __new__ or the superclass won't know + how to handle non-ASCII characters. + """ + if isinstance(value, str): + u = str.__new__(cls, value) + else: + u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) + u.hidden = False + u.setup() + return u + + def __deepcopy__(self, memo: Dict[Any, Any], recursive: bool = False) -> Self: + """A copy of a NavigableString has the same contents and class + as the original, but it is not connected to the parse tree. + + :param recursive: This parameter is ignored; it's only defined + so that NavigableString.__deepcopy__ implements the same + signature as Tag.__deepcopy__. + """ + return type(self)(self) + + def __getnewargs__(self) -> Tuple[str]: + return (str(self),) + + @property + def string(self) -> str: + """Convenience property defined to match `Tag.string`. + + :return: This property always returns the `NavigableString` it was + called on. + + :meta private: + """ + return self + + def output_ready(self, formatter: _FormatterOrName = "minimal") -> str: + """Run the string through the provided formatter, making it + ready for output as part of an HTML or XML document. + + :param formatter: A `Formatter` object, or a string naming one + of the standard formatters. + """ + output = self.format_string(self, formatter) + return self.PREFIX + output + self.SUFFIX + + @property + def name(self) -> None: + """Since a NavigableString is not a Tag, it has no .name. + + This property is implemented so that code like this doesn't crash + when run on a mixture of Tag and NavigableString objects: + [x.name for x in tag.children] + + :meta private: + """ + return None + + @name.setter + def name(self, name: str) -> None: + """Prevent NavigableString.name from ever being set. + + :meta private: + """ + raise AttributeError("A NavigableString cannot be given a name.") + + def _all_strings( + self, strip: bool = False, types: _OneOrMoreStringTypes = PageElement.default + ) -> Iterator[str]: + """Yield all strings of certain classes, possibly stripping them. + + This makes it easy for NavigableString to implement methods + like get_text() as conveniences, creating a consistent + text-extraction API across all PageElements. + + :param strip: If True, all strings will be stripped before being + yielded. + + :param types: A tuple of NavigableString subclasses. If this + NavigableString isn't one of those subclasses, the + sequence will be empty. By default, the subclasses + considered are NavigableString and CData objects. That + means no comments, processing instructions, etc. + + :yield: A sequence that either contains this string, or is empty. + """ + if types is self.default: + # This is kept in Tag because it's full of subclasses of + # this class, which aren't defined until later in the file. + types = Tag.MAIN_CONTENT_STRING_TYPES + + # Do nothing if the caller is looking for specific types of + # string, and we're of a different type. + # + # We check specific types instead of using isinstance(self, + # types) because all of these classes subclass + # NavigableString. Anyone who's using this feature probably + # wants generic NavigableStrings but not other stuff. + my_type = type(self) + if types is not None: + if isinstance(types, type): + # Looking for a single type. + if my_type is not types: + return + elif my_type not in types: + # Looking for one of a list of types. + return + + value = self + if strip: + final_value = value.strip() + else: + final_value = self + if len(final_value) > 0: + yield final_value + + @property + def strings(self) -> Iterator[str]: + """Yield this string, but only if it is interesting. + + This is defined the way it is for compatibility with + `Tag.strings`. See `Tag` for information on which strings are + interesting in a given context. + + :yield: A sequence that either contains this string, or is empty. + """ + return self._all_strings() + + +class PreformattedString(NavigableString): + """A `NavigableString` not subject to the normal formatting rules. + + This is an abstract class used for special kinds of strings such + as comments (`Comment`) and CDATA blocks (`CData`). + """ + + PREFIX: str = "" + SUFFIX: str = "" + + def output_ready(self, formatter: Optional[_FormatterOrName] = None) -> str: + """Make this string ready for output by adding any subclass-specific + prefix or suffix. + + :param formatter: A `Formatter` object, or a string naming one + of the standard formatters. The string will be passed into the + `Formatter`, but only to trigger any side effects: the return + value is ignored. + + :return: The string, with any subclass-specific prefix and + suffix added on. + """ + if formatter is not None: + self.format_string(self, formatter) + return self.PREFIX + self + self.SUFFIX + + +class CData(PreformattedString): + """A `CDATA section <https://dev.w3.org/html5/spec-LC/syntax.html#cdata-sections>`_.""" + + PREFIX: str = "<![CDATA[" + SUFFIX: str = "]]>" + + +class ProcessingInstruction(PreformattedString): + """A SGML processing instruction.""" + + PREFIX: str = "<?" + SUFFIX: str = ">" + + +class XMLProcessingInstruction(ProcessingInstruction): + """An `XML processing instruction <https://www.w3.org/TR/REC-xml/#sec-pi>`_.""" + + PREFIX: str = "<?" + SUFFIX: str = "?>" + + +class Comment(PreformattedString): + """An `HTML comment <https://dev.w3.org/html5/spec-LC/syntax.html#comments>`_ or `XML comment <https://www.w3.org/TR/REC-xml/#sec-comments>`_.""" + + PREFIX: str = "<!--" + SUFFIX: str = "-->" + + +class Declaration(PreformattedString): + """An `XML declaration <https://www.w3.org/TR/REC-xml/#sec-prolog-dtd>`_.""" + + PREFIX: str = "<?" + SUFFIX: str = "?>" + + +class Doctype(PreformattedString): + """A `document type declaration <https://www.w3.org/TR/REC-xml/#dt-doctype>`_.""" + + @classmethod + def for_name_and_ids( + cls, name: str, pub_id: Optional[str], system_id: Optional[str] + ) -> Doctype: + """Generate an appropriate document type declaration for a given + public ID and system ID. + + :param name: The name of the document's root element, e.g. 'html'. + :param pub_id: The Formal Public Identifier for this document type, + e.g. '-//W3C//DTD XHTML 1.1//EN' + :param system_id: The system identifier for this document type, + e.g. 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd' + """ + return Doctype(cls._string_for_name_and_ids(name, pub_id, system_id)) + + @classmethod + def _string_for_name_and_ids( + self, name: str, pub_id: Optional[str], system_id: Optional[str] + ) -> str: + """Generate a string to be used as the basis of a Doctype object. + + This is a separate method from for_name_and_ids() because the lxml + TreeBuilder needs to call it. + """ + value = name or "" + if pub_id is not None: + value += ' PUBLIC "%s"' % pub_id + if system_id is not None: + value += ' "%s"' % system_id + elif system_id is not None: + value += ' SYSTEM "%s"' % system_id + return value + + PREFIX: str = "<!DOCTYPE " + SUFFIX: str = ">\n" + + +class Stylesheet(NavigableString): + """A `NavigableString` representing the contents of a `<style> HTML + tag <https://dev.w3.org/html5/spec-LC/Overview.html#the-style-element>`_ + (probably CSS). + + Used to distinguish embedded stylesheets from textual content. + """ + + +class Script(NavigableString): + """A `NavigableString` representing the contents of a `<script> + HTML tag + <https://dev.w3.org/html5/spec-LC/Overview.html#the-script-element>`_ + (probably Javascript). + + Used to distinguish executable code from textual content. + """ + + +class TemplateString(NavigableString): + """A `NavigableString` representing a string found inside an `HTML + <template> tag <https://html.spec.whatwg.org/multipage/scripting.html#the-template-element>`_ + embedded in a larger document. + + Used to distinguish such strings from the main body of the document. + """ + + +class RubyTextString(NavigableString): + """A NavigableString representing the contents of an `<rt> HTML + tag <https://dev.w3.org/html5/spec-LC/text-level-semantics.html#the-rt-element>`_. + + Can be used to distinguish such strings from the strings they're + annotating. + """ + + +class RubyParenthesisString(NavigableString): + """A NavigableString representing the contents of an `<rp> HTML + tag <https://dev.w3.org/html5/spec-LC/text-level-semantics.html#the-rp-element>`_. + """ + + +class Tag(PageElement): + """An HTML or XML tag that is part of a parse tree, along with its + attributes, contents, and relationships to other parts of the tree. + + When Beautiful Soup parses the markup ``<b>penguin</b>``, it will + create a `Tag` object representing the ``<b>`` tag. You can + instantiate `Tag` objects directly, but it's not necessary unless + you're adding entirely new markup to a parsed document. Most of + the constructor arguments are intended for use by the `TreeBuilder` + that's parsing a document. + + :param parser: A `BeautifulSoup` object representing the parse tree this + `Tag` will be part of. + :param builder: The `TreeBuilder` being used to build the tree. + :param name: The name of the tag. + :param namespace: The URI of this tag's XML namespace, if any. + :param prefix: The prefix for this tag's XML namespace, if any. + :param attrs: A dictionary of attribute values. + :param parent: The `Tag` to use as the parent of this `Tag`. May be + the `BeautifulSoup` object itself. + :param previous: The `PageElement` that was parsed immediately before + parsing this tag. + :param is_xml: If True, this is an XML tag. Otherwise, this is an + HTML tag. + :param sourceline: The line number where this tag was found in its + source document. + :param sourcepos: The character position within ``sourceline`` where this + tag was found. + :param can_be_empty_element: If True, this tag should be + represented as <tag/>. If False, this tag should be represented + as <tag></tag>. + :param cdata_list_attributes: A dictionary of attributes whose values should + be parsed as lists of strings if they ever show up on this tag. + :param preserve_whitespace_tags: Names of tags whose contents + should have their whitespace preserved if they are encountered inside + this tag. + :param interesting_string_types: When iterating over this tag's + string contents in methods like `Tag.strings` or + `PageElement.get_text`, these are the types of strings that are + interesting enough to be considered. By default, + `NavigableString` (normal strings) and `CData` (CDATA + sections) are the only interesting string subtypes. + :param namespaces: A dictionary mapping currently active + namespace prefixes to URIs, as of the point in the parsing process when + this tag was encountered. This can be used later to + construct CSS selectors. + + """ + + def __init__( + self, + parser: Optional[BeautifulSoup] = None, + builder: Optional[TreeBuilder] = None, + name: Optional[str] = None, + namespace: Optional[str] = None, + prefix: Optional[str] = None, + attrs: Optional[_RawOrProcessedAttributeValues] = None, + parent: Optional[Union[BeautifulSoup, Tag]] = None, + previous: _AtMostOneElement = None, + is_xml: Optional[bool] = None, + sourceline: Optional[int] = None, + sourcepos: Optional[int] = None, + can_be_empty_element: Optional[bool] = None, + cdata_list_attributes: Optional[Dict[str, Set[str]]] = None, + preserve_whitespace_tags: Optional[Set[str]] = None, + interesting_string_types: Optional[Set[Type[NavigableString]]] = None, + namespaces: Optional[Dict[str, str]] = None, + # NOTE: Any new arguments here need to be mirrored in + # Tag.copy_self, and potentially BeautifulSoup.new_tag + # as well. + ): + if parser is None: + self.parser_class = None + else: + # We don't actually store the parser object: that lets extracted + # chunks be garbage-collected. + self.parser_class = parser.__class__ + if name is None: + raise ValueError("No value provided for new tag's name.") + self.name = name + self.namespace = namespace + self._namespaces = namespaces or {} + self.prefix = prefix + if (not builder or builder.store_line_numbers) and ( + sourceline is not None or sourcepos is not None + ): + self.sourceline = sourceline + self.sourcepos = sourcepos + else: + self.sourceline = sourceline + self.sourcepos = sourcepos + + attr_dict_class: type[AttributeDict] + attribute_value_list_class: type[AttributeValueList] + if builder is None: + if is_xml: + attr_dict_class = XMLAttributeDict + else: + attr_dict_class = HTMLAttributeDict + attribute_value_list_class = AttributeValueList + else: + attr_dict_class = builder.attribute_dict_class + attribute_value_list_class = builder.attribute_value_list_class + self.attribute_value_list_class = attribute_value_list_class + + if attrs is None: + self.attrs = attr_dict_class() + else: + if builder is not None and builder.cdata_list_attributes: + self.attrs = builder._replace_cdata_list_attribute_values( + self.name, attrs + ) + else: + self.attrs = attr_dict_class() + # Make sure that the values of any multi-valued + # attributes (e.g. when a Tag is copied) are stored in + # new lists. + for k, v in attrs.items(): + if isinstance(v, list): + v = v.__class__(v) + self.attrs[k] = v + + # If possible, determine ahead of time whether this tag is an + # XML tag. + if builder: + self.known_xml = builder.is_xml + else: + self.known_xml = is_xml + self.contents: List[PageElement] = [] + self.setup(parent, previous) + self.hidden = False + + if builder is None: + # In the absence of a TreeBuilder, use whatever values were + # passed in here. They're probably None, unless this is a copy of some + # other tag. + self.can_be_empty_element = can_be_empty_element + self.cdata_list_attributes = cdata_list_attributes + self.preserve_whitespace_tags = preserve_whitespace_tags + self.interesting_string_types = interesting_string_types + else: + # Set up any substitutions for this tag, such as the charset in a META tag. + self.attribute_value_list_class = builder.attribute_value_list_class + builder.set_up_substitutions(self) + + # Ask the TreeBuilder whether this tag might be an empty-element tag. + self.can_be_empty_element = builder.can_be_empty_element(name) + + # Keep track of the list of attributes of this tag that + # might need to be treated as a list. + # + # For performance reasons, we store the whole data structure + # rather than asking the question of every tag. Asking would + # require building a new data structure every time, and + # (unlike can_be_empty_element), we almost never need + # to check this. + self.cdata_list_attributes = builder.cdata_list_attributes + + # Keep track of the names that might cause this tag to be treated as a + # whitespace-preserved tag. + self.preserve_whitespace_tags = builder.preserve_whitespace_tags + + if self.name in builder.string_containers: + # This sort of tag uses a special string container + # subclass for most of its strings. We need to be able + # to look up the proper container subclass. + self.interesting_string_types = {builder.string_containers[self.name]} + else: + self.interesting_string_types = self.MAIN_CONTENT_STRING_TYPES + + parser_class: Optional[type[BeautifulSoup]] + name: str + namespace: Optional[str] + prefix: Optional[str] + attrs: _AttributeValues + sourceline: Optional[int] + sourcepos: Optional[int] + known_xml: Optional[bool] + contents: List[PageElement] + hidden: bool + interesting_string_types: Optional[Set[Type[NavigableString]]] + + can_be_empty_element: Optional[bool] + cdata_list_attributes: Optional[Dict[str, Set[str]]] + preserve_whitespace_tags: Optional[Set[str]] + + #: :meta private: + parserClass = _deprecated_alias("parserClass", "parser_class", "4.0.0") + + def __deepcopy__(self, memo: Dict[Any, Any], recursive: bool = True) -> Self: + """A deepcopy of a Tag is a new Tag, unconnected to the parse tree. + Its contents are a copy of the old Tag's contents. + """ + clone = self.copy_self() + + if recursive: + # Clone this tag's descendants recursively, but without + # making any recursive function calls. + tag_stack: List[Tag] = [clone] + for event, element in self._event_stream(self.descendants): + if event is Tag.END_ELEMENT_EVENT: + # Stop appending incoming Tags to the Tag that was + # just closed. + tag_stack.pop() + else: + descendant_clone = element.__deepcopy__(memo, recursive=False) + # Add to its parent's .contents + tag_stack[-1].append(descendant_clone) + + if event is Tag.START_ELEMENT_EVENT: + # Add the Tag itself to the stack so that its + # children will be .appended to it. + tag_stack.append(cast(Tag, descendant_clone)) + return clone + + def copy_self(self) -> Self: + """Create a new Tag just like this one, but with no + contents and unattached to any parse tree. + + This is the first step in the deepcopy process, but you can + call it on its own to create a copy of a Tag without copying its + contents. + """ + clone = type(self)( + None, + None, + self.name, + self.namespace, + self.prefix, + self.attrs, + is_xml=self._is_xml, + sourceline=self.sourceline, + sourcepos=self.sourcepos, + can_be_empty_element=self.can_be_empty_element, + cdata_list_attributes=self.cdata_list_attributes, + preserve_whitespace_tags=self.preserve_whitespace_tags, + interesting_string_types=self.interesting_string_types, + namespaces=self._namespaces, + ) + for attr in ("can_be_empty_element", "hidden"): + setattr(clone, attr, getattr(self, attr)) + return clone + + @property + def is_empty_element(self) -> bool: + """Is this tag an empty-element tag? (aka a self-closing tag) + + A tag that has contents is never an empty-element tag. + + A tag that has no contents may or may not be an empty-element + tag. It depends on the `TreeBuilder` used to create the + tag. If the builder has a designated list of empty-element + tags, then only a tag whose name shows up in that list is + considered an empty-element tag. This is usually the case + for HTML documents. + + If the builder has no designated list of empty-element, then + any tag with no contents is an empty-element tag. This is usually + the case for XML documents. + """ + return len(self.contents) == 0 and self.can_be_empty_element is True + + @_deprecated("is_empty_element", "4.0.0") + def isSelfClosing(self) -> bool: + ": :meta private:" + return self.is_empty_element + + @property + def string(self) -> Optional[str]: + """Convenience property to get the single string within this + `Tag`, assuming there is just one. + + :return: If this `Tag` has a single child that's a + `NavigableString`, the return value is that string. If this + element has one child `Tag`, the return value is that child's + `Tag.string`, recursively. If this `Tag` has no children, + or has more than one child, the return value is ``None``. + + If this property is unexpectedly returning ``None`` for you, + it's probably because your `Tag` has more than one thing + inside it. + """ + if len(self.contents) != 1: + return None + child = self.contents[0] + if isinstance(child, NavigableString): + return child + elif isinstance(child, Tag): + return child.string + return None + + @string.setter + def string(self, string: str) -> None: + """Replace the `Tag.contents` of this `Tag` with a single string.""" + self.clear() + if isinstance(string, NavigableString): + new_class = string.__class__ + else: + new_class = NavigableString + self.append(new_class(string)) + + #: :meta private: + MAIN_CONTENT_STRING_TYPES = {NavigableString, CData} + + def _all_strings( + self, strip: bool = False, types: _OneOrMoreStringTypes = PageElement.default + ) -> Iterator[str]: + """Yield all strings of certain classes, possibly stripping them. + + :param strip: If True, all strings will be stripped before being + yielded. + + :param types: A tuple of NavigableString subclasses. Any strings of + a subclass not found in this list will be ignored. By + default, the subclasses considered are the ones found in + self.interesting_string_types. If that's not specified, + only NavigableString and CData objects will be + considered. That means no comments, processing + instructions, etc. + """ + if types is self.default: + if self.interesting_string_types is None: + types = self.MAIN_CONTENT_STRING_TYPES + else: + types = self.interesting_string_types + + for descendant in self.descendants: + if not isinstance(descendant, NavigableString): + continue + descendant_type = type(descendant) + if isinstance(types, type): + if descendant_type is not types: + # We're not interested in strings of this type. + continue + elif types is not None and descendant_type not in types: + # We're not interested in strings of this type. + continue + if strip: + stripped = descendant.strip() + if len(stripped) == 0: + continue + yield stripped + else: + yield descendant + + strings = property(_all_strings) + + def insert(self, position: int, *new_children: _InsertableElement) -> List[PageElement]: + """Insert one or more new PageElements as a child of this `Tag`. + + This works similarly to :py:meth:`list.insert`, except you can insert + multiple elements at once. + + :param position: The numeric position that should be occupied + in this Tag's `Tag.children` by the first new `PageElement`. + + :param new_children: The PageElements to insert. + + :return The newly inserted PageElements. + """ + inserted: List[PageElement] = [] + for new_child in new_children: + inserted.extend(self._insert(position, new_child)) + position += 1 + return inserted + + def _insert(self, position: int, new_child: _InsertableElement) -> List[PageElement]: + if new_child is None: + raise ValueError("Cannot insert None into a tag.") + if new_child is self: + raise ValueError("Cannot insert a tag into itself.") + if isinstance(new_child, str) and not isinstance(new_child, NavigableString): + new_child = NavigableString(new_child) + + from bs4 import BeautifulSoup + if isinstance(new_child, BeautifulSoup): + # We don't want to end up with a situation where one BeautifulSoup + # object contains another. Insert the BeautifulSoup's children and + # return them. + return self.insert(position, *list(new_child.contents)) + position = min(position, len(self.contents)) + if hasattr(new_child, "parent") and new_child.parent is not None: + # We're 'inserting' an element that's already one + # of this object's children. + if new_child.parent is self: + current_index = self.index(new_child) + if current_index < position: + # We're moving this element further down the list + # of this object's children. That means that when + # we extract this element, our target index will + # jump down one. + position -= 1 + elif current_index == position: + # We're 'inserting' an element into its current location. + # This is a no-op. + return [new_child] + new_child.extract() + + new_child.parent = self + previous_child = None + if position == 0: + new_child.previous_sibling = None + new_child.previous_element = self + else: + previous_child = self.contents[position - 1] + new_child.previous_sibling = previous_child + new_child.previous_sibling.next_sibling = new_child + new_child.previous_element = previous_child._last_descendant(False) + if new_child.previous_element is not None: + new_child.previous_element.next_element = new_child + + new_childs_last_element = new_child._last_descendant( + is_initialized=False, accept_self=True + ) + # new_childs_last_element can't be None because we passed + # accept_self=True into _last_descendant. Worst case, + # new_childs_last_element will be new_child itself. Making + # this cast removes several mypy complaints later on as we + # manipulate new_childs_last_element. + new_childs_last_element = cast(PageElement, new_childs_last_element) + + if position >= len(self.contents): + new_child.next_sibling = None + + parent: Optional[Tag] = self + parents_next_sibling = None + while parents_next_sibling is None and parent is not None: + parents_next_sibling = parent.next_sibling + parent = parent.parent + if parents_next_sibling is not None: + # We found the element that comes next in the document. + break + if parents_next_sibling is not None: + new_childs_last_element.next_element = parents_next_sibling + else: + # The last element of this tag is the last element in + # the document. + new_childs_last_element.next_element = None + else: + next_child = self.contents[position] + new_child.next_sibling = next_child + if new_child.next_sibling is not None: + new_child.next_sibling.previous_sibling = new_child + new_childs_last_element.next_element = next_child + + if new_childs_last_element.next_element is not None: + new_childs_last_element.next_element.previous_element = ( + new_childs_last_element + ) + self.contents.insert(position, new_child) + + return [new_child] + + def unwrap(self) -> Self: + """Replace this `PageElement` with its contents. + + :return: This object, no longer part of the tree. + """ + my_parent = self.parent + if my_parent is None: + raise ValueError( + "Cannot replace an element with its contents when that " + "element is not part of a tree." + ) + my_index = my_parent.index(self) + self.extract(_self_index=my_index) + for child in reversed(self.contents[:]): + my_parent.insert(my_index, child) + return self + + replace_with_children = unwrap + + @_deprecated("unwrap", "4.0.0") + def replaceWithChildren(self) -> _OneElement: + ": :meta private:" + return self.unwrap() + + def append(self, tag: _InsertableElement) -> PageElement: + """ + Appends the given `PageElement` to the contents of this `Tag`. + + :param tag: A PageElement. + + :return The newly appended PageElement. + """ + return self.insert(len(self.contents), tag)[0] + + def extend(self, tags: Union[Iterable[_InsertableElement], Tag]) -> List[PageElement]: + """Appends one or more objects to the contents of this + `Tag`. + + :param tags: If a list of `PageElement` objects is provided, + they will be appended to this tag's contents, one at a time. + If a single `Tag` is provided, its `Tag.contents` will be + used to extend this object's `Tag.contents`. + + :return The list of PageElements that were appended. + """ + tag_list: Iterable[_InsertableElement] + + if isinstance(tags, Tag): + tag_list = list(tags.contents) + elif isinstance(tags, (PageElement, str)): + # The caller should really be using append() instead, + # but we can make it work. + warnings.warn( + "A single non-Tag item was passed into Tag.extend. Use Tag.append instead.", + UserWarning, + stacklevel=2, + ) + if isinstance(tags, str) and not isinstance(tags, PageElement): + tags = NavigableString(tags) + tag_list = [tags] + elif isinstance(tags, Iterable): + # Moving items around the tree may change their position in + # the original list. Make a list that won't change. + tag_list = list(tags) + + results: List[PageElement] = [] + for tag in tag_list: + results.append(self.append(tag)) + + return results + + def clear(self, decompose: bool = False) -> None: + """Destroy all children of this `Tag` by calling + `PageElement.extract` on them. + + :param decompose: If this is True, `PageElement.decompose` (a + more destructive method) will be called instead of + `PageElement.extract`. + """ + for element in self.contents[:]: + if decompose: + element.decompose() + else: + element.extract() + + def smooth(self) -> None: + """Smooth out the children of this `Tag` by consolidating consecutive + strings. + + If you perform a lot of operations that modify the tree, + calling this method afterwards can make pretty-printed output + look more natural. + """ + # Mark the first position of every pair of children that need + # to be consolidated. Do this rather than making a copy of + # self.contents, since in most cases very few strings will be + # affected. + marked = [] + for i, a in enumerate(self.contents): + if isinstance(a, Tag): + # Recursively smooth children. + a.smooth() + if i == len(self.contents) - 1: + # This is the last item in .contents, and it's not a + # tag. There's no chance it needs any work. + continue + b = self.contents[i + 1] + if ( + isinstance(a, NavigableString) + and isinstance(b, NavigableString) + and not isinstance(a, PreformattedString) + and not isinstance(b, PreformattedString) + ): + marked.append(i) + + # Go over the marked positions in reverse order, so that + # removing items from .contents won't affect the remaining + # positions. + for i in reversed(marked): + a = cast(NavigableString, self.contents[i]) + b = cast(NavigableString, self.contents[i + 1]) + b.extract() + n = NavigableString(a + b) + a.replace_with(n) + + def index(self, element: PageElement) -> int: + """Find the index of a child of this `Tag` (by identity, not value). + + Doing this by identity avoids issues when a `Tag` contains two + children that have string equality. + + :param element: Look for this `PageElement` in this object's contents. + """ + for i, child in enumerate(self.contents): + if child is element: + return i + raise ValueError("Tag.index: element not in tag") + + def get( + self, key: str, default: Optional[_AttributeValue] = None + ) -> Optional[_AttributeValue]: + """Returns the value of the 'key' attribute for the tag, or + the value given for 'default' if it doesn't have that + attribute. + + :param key: The attribute to look for. + :param default: Use this value if the attribute is not present + on this `Tag`. + """ + return self.attrs.get(key, default) + + def get_attribute_list( + self, key: str, default: Optional[AttributeValueList] = None + ) -> AttributeValueList: + """The same as get(), but always returns a (possibly empty) list. + + :param key: The attribute to look for. + :param default: Use this value if the attribute is not present + on this `Tag`. + :return: A list of strings, usually empty or containing only a single + value. + """ + list_value: AttributeValueList + value = self.get(key, default) + if value is None: + list_value = self.attribute_value_list_class() + elif isinstance(value, list): + list_value = value + else: + if not isinstance(value, str): + value = cast(str, value) + list_value = self.attribute_value_list_class([value]) + return list_value + + def has_attr(self, key: str) -> bool: + """Does this `Tag` have an attribute with the given name?""" + return key in self.attrs + + def __hash__(self) -> int: + return str(self).__hash__() + + def __getitem__(self, key: str) -> _AttributeValue: + """tag[key] returns the value of the 'key' attribute for the Tag, + and throws an exception if it's not there.""" + return self.attrs[key] + + def __iter__(self) -> Iterator[PageElement]: + "Iterating over a Tag iterates over its contents." + return iter(self.contents) + + def __len__(self) -> int: + "The length of a Tag is the length of its list of contents." + return len(self.contents) + + def __contains__(self, x: Any) -> bool: + return x in self.contents + + def __bool__(self) -> bool: + "A tag is non-None even if it has no contents." + return True + + def __setitem__(self, key: str, value: _AttributeValue) -> None: + """Setting tag[key] sets the value of the 'key' attribute for the + tag.""" + self.attrs[key] = value + + def __delitem__(self, key: str) -> None: + "Deleting tag[key] deletes all 'key' attributes for the tag." + self.attrs.pop(key, None) + + def __call__( + self, + name: Optional[_StrainableElement] = None, + attrs: _StrainableAttributes = {}, + recursive: bool = True, + string: Optional[_StrainableString] = None, + limit: Optional[int] = None, + _stacklevel: int = 2, + **kwargs: _StrainableAttribute, + ) -> _QueryResults: + """Calling a Tag like a function is the same as calling its + find_all() method. Eg. tag('a') returns a list of all the A tags + found within this tag.""" + return self.find_all( + name, attrs, recursive, string, limit, _stacklevel, **kwargs + ) + + def __getattr__(self, subtag: str) -> Optional[Tag]: + """Calling tag.subtag is the same as calling tag.find(name="subtag")""" + # print("Getattr %s.%s" % (self.__class__, tag)) + result: _AtMostOneElement + if len(subtag) > 3 and subtag.endswith("Tag"): + # BS3: soup.aTag -> "soup.find("a") + tag_name = subtag[:-3] + warnings.warn( + '.%(name)sTag is deprecated, use .find("%(name)s") instead. If you really were looking for a tag called %(name)sTag, use .find("%(name)sTag")' + % dict(name=tag_name), + DeprecationWarning, + stacklevel=2, + ) + result = self.find(tag_name) + # We special case contents to avoid recursion. + elif not subtag.startswith("__") and not subtag == "contents": + result = self.find(subtag) + else: + raise AttributeError( + "'%s' object has no attribute '%s'" % (self.__class__, subtag) + ) + return cast(Optional[Tag], result) + + def __eq__(self, other: Any) -> bool: + """Returns true iff this Tag has the same name, the same attributes, + and the same contents (recursively) as `other`.""" + if self is other: + return True + if not isinstance(other, Tag): + return False + if ( + not hasattr(other, "name") + or not hasattr(other, "attrs") + or not hasattr(other, "contents") + or self.name != other.name + or self.attrs != other.attrs + or len(self) != len(other) + ): + return False + for i, my_child in enumerate(self.contents): + if my_child != other.contents[i]: + return False + return True + + def __ne__(self, other: Any) -> bool: + """Returns true iff this Tag is not identical to `other`, + as defined in __eq__.""" + return not self == other + + def __repr__(self) -> str: + """Renders this `Tag` as a string.""" + return self.decode() + + __str__ = __unicode__ = __repr__ + + def encode( + self, + encoding: _Encoding = DEFAULT_OUTPUT_ENCODING, + indent_level: Optional[int] = None, + formatter: _FormatterOrName = "minimal", + errors: str = "xmlcharrefreplace", + ) -> bytes: + """Render this `Tag` and its contents as a bytestring. + + :param encoding: The encoding to use when converting to + a bytestring. This may also affect the text of the document, + specifically any encoding declarations within the document. + :param indent_level: Each line of the rendering will be + indented this many levels. (The ``formatter`` decides what a + 'level' means, in terms of spaces or other characters + output.) This is used internally in recursive calls while + pretty-printing. + :param formatter: Either a `Formatter` object, or a string naming one of + the standard formatters. + :param errors: An error handling strategy such as + 'xmlcharrefreplace'. This value is passed along into + :py:meth:`str.encode` and its value should be one of the `error + handling constants defined by Python's codecs module + <https://docs.python.org/3/library/codecs.html#error-handlers>`_. + """ + # Turn the data structure into Unicode, then encode the + # Unicode. + u = self.decode(indent_level, encoding, formatter) + return u.encode(encoding, errors) + + def decode( + self, + indent_level: Optional[int] = None, + eventual_encoding: _Encoding = DEFAULT_OUTPUT_ENCODING, + formatter: _FormatterOrName = "minimal", + iterator: Optional[Iterator[PageElement]] = None, + ) -> str: + """Render this `Tag` and its contents as a Unicode string. + + :param indent_level: Each line of the rendering will be + indented this many levels. (The ``formatter`` decides what a + 'level' means, in terms of spaces or other characters + output.) This is used internally in recursive calls while + pretty-printing. + :param encoding: The encoding you intend to use when + converting the string to a bytestring. decode() is *not* + responsible for performing that encoding. This information + is needed so that a real encoding can be substituted in if + the document contains an encoding declaration (e.g. in a + <meta> tag). + :param formatter: Either a `Formatter` object, or a string + naming one of the standard formatters. + :param iterator: The iterator to use when navigating over the + parse tree. This is only used by `Tag.decode_contents` and + you probably won't need to use it. + """ + pieces = [] + # First off, turn a non-Formatter `formatter` into a Formatter + # object. This will stop the lookup from happening over and + # over again. + if not isinstance(formatter, Formatter): + formatter = self.formatter_for_name(formatter) + + if indent_level is True: + indent_level = 0 + + # The currently active tag that put us into string literal + # mode. Until this element is closed, children will be treated + # as string literals and not pretty-printed. String literal + # mode is turned on immediately after this tag begins, and + # turned off immediately before it's closed. This means there + # will be whitespace before and after the tag itself. + string_literal_tag = None + + for event, element in self._event_stream(iterator): + if event in (Tag.START_ELEMENT_EVENT, Tag.EMPTY_ELEMENT_EVENT): + element = cast(Tag, element) + piece = element._format_tag(eventual_encoding, formatter, opening=True) + elif event is Tag.END_ELEMENT_EVENT: + element = cast(Tag, element) + piece = element._format_tag(eventual_encoding, formatter, opening=False) + if indent_level is not None: + indent_level -= 1 + else: + element = cast(NavigableString, element) + piece = element.output_ready(formatter) + + # Now we need to apply the 'prettiness' -- extra + # whitespace before and/or after this tag. This can get + # complicated because certain tags, like <pre> and + # <script>, can't be prettified, since adding whitespace would + # change the meaning of the content. + + # The default behavior is to add whitespace before and + # after an element when string literal mode is off, and to + # leave things as they are when string literal mode is on. + if string_literal_tag: + indent_before = indent_after = False + else: + indent_before = indent_after = True + + # The only time the behavior is more complex than that is + # when we encounter an opening or closing tag that might + # put us into or out of string literal mode. + if ( + event is Tag.START_ELEMENT_EVENT + and not string_literal_tag + and not cast(Tag, element)._should_pretty_print() + ): + # We are about to enter string literal mode. Add + # whitespace before this tag, but not after. We + # will stay in string literal mode until this tag + # is closed. + indent_before = True + indent_after = False + string_literal_tag = element + elif event is Tag.END_ELEMENT_EVENT and element is string_literal_tag: + # We are about to exit string literal mode by closing + # the tag that sent us into that mode. Add whitespace + # after this tag, but not before. + indent_before = False + indent_after = True + string_literal_tag = None + + # Now we know whether to add whitespace before and/or + # after this element. + if indent_level is not None: + if indent_before or indent_after: + if isinstance(element, NavigableString): + piece = piece.strip() + if piece: + piece = self._indent_string( + piece, indent_level, formatter, indent_before, indent_after + ) + if event == Tag.START_ELEMENT_EVENT: + indent_level += 1 + pieces.append(piece) + return "".join(pieces) + + class _TreeTraversalEvent(object): + """An internal class representing an event in the process + of traversing a parse tree. + + :meta private: + """ + + # Stand-ins for the different events yielded by _event_stream + START_ELEMENT_EVENT = _TreeTraversalEvent() #: :meta private: + END_ELEMENT_EVENT = _TreeTraversalEvent() #: :meta private: + EMPTY_ELEMENT_EVENT = _TreeTraversalEvent() #: :meta private: + STRING_ELEMENT_EVENT = _TreeTraversalEvent() #: :meta private: + + def _event_stream( + self, iterator: Optional[Iterator[PageElement]] = None + ) -> Iterator[Tuple[_TreeTraversalEvent, PageElement]]: + """Yield a sequence of events that can be used to reconstruct the DOM + for this element. + + This lets us recreate the nested structure of this element + (e.g. when formatting it as a string) without using recursive + method calls. + + This is similar in concept to the SAX API, but it's a simpler + interface designed for internal use. The events are different + from SAX and the arguments associated with the events are Tags + and other Beautiful Soup objects. + + :param iterator: An alternate iterator to use when traversing + the tree. + """ + tag_stack: List[Tag] = [] + + iterator = iterator or self.self_and_descendants + + for c in iterator: + # If the parent of the element we're about to yield is not + # the tag currently on the stack, it means that the tag on + # the stack closed before this element appeared. + while tag_stack and c.parent != tag_stack[-1]: + now_closed_tag = tag_stack.pop() + yield Tag.END_ELEMENT_EVENT, now_closed_tag + + if isinstance(c, Tag): + if c.is_empty_element: + yield Tag.EMPTY_ELEMENT_EVENT, c + else: + yield Tag.START_ELEMENT_EVENT, c + tag_stack.append(c) + continue + else: + yield Tag.STRING_ELEMENT_EVENT, c + + while tag_stack: + now_closed_tag = tag_stack.pop() + yield Tag.END_ELEMENT_EVENT, now_closed_tag + + def _indent_string( + self, + s: str, + indent_level: int, + formatter: Formatter, + indent_before: bool, + indent_after: bool, + ) -> str: + """Add indentation whitespace before and/or after a string. + + :param s: The string to amend with whitespace. + :param indent_level: The indentation level; affects how much + whitespace goes before the string. + :param indent_before: Whether or not to add whitespace + before the string. + :param indent_after: Whether or not to add whitespace + (a newline) after the string. + """ + space_before = "" + if indent_before and indent_level: + space_before = formatter.indent * indent_level + + space_after = "" + if indent_after: + space_after = "\n" + + return space_before + s + space_after + + def _format_tag( + self, eventual_encoding: str, formatter: Formatter, opening: bool + ) -> str: + if self.hidden: + # A hidden tag is invisible, although its contents + # are visible. + return "" + + # A tag starts with the < character (see below). + + # Then the / character, if this is a closing tag. + closing_slash = "" + if not opening: + closing_slash = "/" + + # Then an optional namespace prefix. + prefix = "" + if self.prefix: + prefix = self.prefix + ":" + + # Then a list of attribute values, if this is an opening tag. + attribute_string = "" + if opening: + attributes = formatter.attributes(self) + attrs = [] + for key, val in attributes: + if val is None: + decoded = key + else: + if isinstance(val, list) or isinstance(val, tuple): + val = " ".join(val) + elif not isinstance(val, str): + val = str(val) + elif ( + isinstance(val, AttributeValueWithCharsetSubstitution) + and eventual_encoding is not None + ): + val = val.substitute_encoding(eventual_encoding) + + text = formatter.attribute_value(val) + decoded = str(key) + "=" + formatter.quoted_attribute_value(text) + attrs.append(decoded) + if attrs: + attribute_string = " " + " ".join(attrs) + + # Then an optional closing slash (for a void element in an + # XML document). + void_element_closing_slash = "" + if self.is_empty_element: + void_element_closing_slash = formatter.void_element_close_prefix or "" + + # Put it all together. + return ( + "<" + + closing_slash + + prefix + + self.name + + attribute_string + + void_element_closing_slash + + ">" + ) + + def _should_pretty_print(self, indent_level: int = 1) -> bool: + """Should this tag be pretty-printed? + + Most of them should, but some (such as <pre> in HTML + documents) should not. + """ + return indent_level is not None and ( + not self.preserve_whitespace_tags + or self.name not in self.preserve_whitespace_tags + ) + + def prettify( + self, + encoding: Optional[_Encoding] = None, + formatter: _FormatterOrName = "minimal", + ) -> Union[str, bytes]: + """Pretty-print this `Tag` as a string or bytestring. + + :param encoding: The encoding of the bytestring, or None if you want Unicode. + :param formatter: A Formatter object, or a string naming one of + the standard formatters. + :return: A string (if no ``encoding`` is provided) or a bytestring + (otherwise). + """ + if encoding is None: + return self.decode(indent_level=0, formatter=formatter) + else: + return self.encode(encoding=encoding, indent_level=0, formatter=formatter) + + def decode_contents( + self, + indent_level: Optional[int] = None, + eventual_encoding: _Encoding = DEFAULT_OUTPUT_ENCODING, + formatter: _FormatterOrName = "minimal", + ) -> str: + """Renders the contents of this tag as a Unicode string. + + :param indent_level: Each line of the rendering will be + indented this many levels. (The formatter decides what a + 'level' means in terms of spaces or other characters + output.) Used internally in recursive calls while + pretty-printing. + + :param eventual_encoding: The tag is destined to be + encoded into this encoding. decode_contents() is *not* + responsible for performing that encoding. This information + is needed so that a real encoding can be substituted in if + the document contains an encoding declaration (e.g. in a + <meta> tag). + + :param formatter: A `Formatter` object, or a string naming one of + the standard Formatters. + """ + return self.decode( + indent_level, eventual_encoding, formatter, iterator=self.descendants + ) + + def encode_contents( + self, + indent_level: Optional[int] = None, + encoding: _Encoding = DEFAULT_OUTPUT_ENCODING, + formatter: _FormatterOrName = "minimal", + ) -> bytes: + """Renders the contents of this PageElement as a bytestring. + + :param indent_level: Each line of the rendering will be + indented this many levels. (The ``formatter`` decides what a + 'level' means, in terms of spaces or other characters + output.) This is used internally in recursive calls while + pretty-printing. + :param formatter: Either a `Formatter` object, or a string naming one of + the standard formatters. + :param encoding: The bytestring will be in this encoding. + """ + contents = self.decode_contents(indent_level, encoding, formatter) + return contents.encode(encoding) + + @_deprecated("encode_contents", "4.0.0") + def renderContents( + self, + encoding: _Encoding = DEFAULT_OUTPUT_ENCODING, + prettyPrint: bool = False, + indentLevel: Optional[int] = 0, + ) -> bytes: + """Deprecated method for BS3 compatibility. + + :meta private: + """ + if not prettyPrint: + indentLevel = None + return self.encode_contents(indent_level=indentLevel, encoding=encoding) + + # Soup methods + + def find( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + recursive: bool = True, + string: Optional[_StrainableString] = None, + **kwargs: _StrainableAttribute, + ) -> _AtMostOneElement: + """Look in the children of this PageElement and find the first + PageElement that matches the given criteria. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param recursive: If this is True, find() will perform a + recursive search of this Tag's children. Otherwise, + only the direct children will be considered. + :param string: A filter on the `Tag.string` attribute. + :param limit: Stop looking after finding this many results. + :kwargs: Additional filters on attribute values. + """ + r = None + results = self.find_all(name, attrs, recursive, string, 1, _stacklevel=3, **kwargs) + if results: + r = results[0] + return r + + findChild = _deprecated_function_alias("findChild", "find", "3.0.0") + + def find_all( + self, + name: _FindMethodName = None, + attrs: _StrainableAttributes = {}, + recursive: bool = True, + string: Optional[_StrainableString] = None, + limit: Optional[int] = None, + _stacklevel: int = 2, + **kwargs: _StrainableAttribute, + ) -> _QueryResults: + """Look in the children of this `PageElement` and find all + `PageElement` objects that match the given criteria. + + All find_* methods take a common set of arguments. See the online + documentation for detailed explanations. + + :param name: A filter on tag name. + :param attrs: Additional filters on attribute values. + :param recursive: If this is True, find_all() will perform a + recursive search of this PageElement's children. Otherwise, + only the direct children will be considered. + :param limit: Stop looking after finding this many results. + :param _stacklevel: Used internally to improve warning messages. + :kwargs: Additional filters on attribute values. + """ + generator = self.descendants + if not recursive: + generator = self.children + return self._find_all( + name, attrs, string, limit, generator, _stacklevel=_stacklevel + 1, **kwargs + ) + + findAll = _deprecated_function_alias("findAll", "find_all", "4.0.0") + findChildren = _deprecated_function_alias("findChildren", "find_all", "3.0.0") + + # Generator methods + @property + def children(self) -> Iterator[PageElement]: + """Iterate over all direct children of this `PageElement`.""" + return (x for x in self.contents) + + @property + def self_and_descendants(self) -> Iterator[PageElement]: + """Iterate over this `Tag` and its children in a + breadth-first sequence. + """ + return self._self_and(self.descendants) + + @property + def descendants(self) -> Iterator[PageElement]: + """Iterate over all children of this `Tag` in a + breadth-first sequence. + """ + if not len(self.contents): + return + # _last_descendant() can't return None here because + # accept_self is True. Worst case, last_descendant will end up + # as self. + last_descendant = cast(PageElement, self._last_descendant(accept_self=True)) + stopNode = last_descendant.next_element + current: _AtMostOneElement = self.contents[0] + while current is not stopNode and current is not None: + successor = current.next_element + yield current + current = successor + + # CSS selector code + def select_one( + self, selector: str, namespaces: Optional[Dict[str, str]] = None, **kwargs: Any + ) -> Optional[Tag]: + """Perform a CSS selection operation on the current element. + + :param selector: A CSS selector. + + :param namespaces: A dictionary mapping namespace prefixes + used in the CSS selector to namespace URIs. By default, + Beautiful Soup will use the prefixes it encountered while + parsing the document. + + :param kwargs: Keyword arguments to be passed into Soup Sieve's + soupsieve.select() method. + """ + return self.css.select_one(selector, namespaces, **kwargs) + + def select( + self, + selector: str, + namespaces: Optional[Dict[str, str]] = None, + limit: int = 0, + **kwargs: Any, + ) -> ResultSet[Tag]: + """Perform a CSS selection operation on the current element. + + This uses the SoupSieve library. + + :param selector: A string containing a CSS selector. + + :param namespaces: A dictionary mapping namespace prefixes + used in the CSS selector to namespace URIs. By default, + Beautiful Soup will use the prefixes it encountered while + parsing the document. + + :param limit: After finding this number of results, stop looking. + + :param kwargs: Keyword arguments to be passed into SoupSieve's + soupsieve.select() method. + """ + return self.css.select(selector, namespaces, limit, **kwargs) + + @property + def css(self) -> CSS: + """Return an interface to the CSS selector API.""" + return CSS(self) + + # Old names for backwards compatibility + @_deprecated("children", "4.0.0") + def childGenerator(self) -> Iterator[PageElement]: + """Deprecated generator. + + :meta private: + """ + return self.children + + @_deprecated("descendants", "4.0.0") + def recursiveChildGenerator(self) -> Iterator[PageElement]: + """Deprecated generator. + + :meta private: + """ + return self.descendants + + @_deprecated("has_attr", "4.0.0") + def has_key(self, key: str) -> bool: + """Deprecated method. This was kind of misleading because has_key() + (attributes) was different from __in__ (contents). + + has_key() is gone in Python 3, anyway. + + :meta private: + """ + return self.has_attr(key) + + +_PageElementT = TypeVar("_PageElementT", bound=PageElement) + + +class ResultSet(List[_PageElementT], Generic[_PageElementT]): + """A ResultSet is a list of `PageElement` objects, gathered as the result + of matching an :py:class:`ElementFilter` against a parse tree. Basically, a list of + search results. + """ + + source: Optional[ElementFilter] + + def __init__( + self, source: Optional[ElementFilter], result: Iterable[_PageElementT] = () + ) -> None: + super(ResultSet, self).__init__(result) + self.source = source + + def __getattr__(self, key: str) -> None: + """Raise a helpful exception to explain a common code fix.""" + raise AttributeError( + f"""ResultSet object has no attribute "{key}". You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?""" + ) + + +# Now that all the classes used by SoupStrainer have been defined, +# import SoupStrainer itself into this module to preserve the +# backwards compatibility of anyone who imports +# bs4.element.SoupStrainer. +from bs4.filter import SoupStrainer # noqa: E402 diff --git a/.venv/lib/python3.12/site-packages/bs4/exceptions.py b/.venv/lib/python3.12/site-packages/bs4/exceptions.py new file mode 100644 index 00000000..1d1a8fb2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/exceptions.py @@ -0,0 +1,28 @@ +"""Exceptions defined by Beautiful Soup itself.""" + +from typing import Union + + +class StopParsing(Exception): + """Exception raised by a TreeBuilder if it's unable to continue parsing.""" + + +class FeatureNotFound(ValueError): + """Exception raised by the BeautifulSoup constructor if no parser with the + requested features is found. + """ + + +class ParserRejectedMarkup(Exception): + """An Exception to be raised when the underlying parser simply + refuses to parse the given markup. + """ + + def __init__(self, message_or_exception: Union[str, Exception]): + """Explain why the parser rejected the given markup, either + with a textual explanation or another exception. + """ + if isinstance(message_or_exception, Exception): + e = message_or_exception + message_or_exception = "%s: %s" % (e.__class__.__name__, str(e)) + super(ParserRejectedMarkup, self).__init__(message_or_exception) diff --git a/.venv/lib/python3.12/site-packages/bs4/filter.py b/.venv/lib/python3.12/site-packages/bs4/filter.py new file mode 100644 index 00000000..e3ce2e2f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/filter.py @@ -0,0 +1,755 @@ +from __future__ import annotations +from collections import defaultdict +import re +from typing import ( + Any, + Callable, + cast, + Dict, + Iterator, + Iterable, + List, + Optional, + Sequence, + Type, + Union, +) +import warnings + +from bs4._deprecation import _deprecated +from bs4.element import ( + AttributeDict, + NavigableString, + PageElement, + ResultSet, + Tag, +) +from bs4._typing import ( + _AtMostOneElement, + _AttributeValue, + _OneElement, + _PageElementMatchFunction, + _QueryResults, + _RawAttributeValues, + _RegularExpressionProtocol, + _StrainableAttribute, + _StrainableElement, + _StrainableString, + _StringMatchFunction, + _TagMatchFunction, +) + + +class ElementFilter(object): + """`ElementFilter` encapsulates the logic necessary to decide: + + 1. whether a `PageElement` (a `Tag` or a `NavigableString`) matches a + user-specified query. + + 2. whether a given sequence of markup found during initial parsing + should be turned into a `PageElement` at all, or simply discarded. + + The base class is the simplest `ElementFilter`. By default, it + matches everything and allows all markup to become `PageElement` + objects. You can make it more selective by passing in a + user-defined match function, or defining a subclass. + + Most users of Beautiful Soup will never need to use + `ElementFilter`, or its more capable subclass + `SoupStrainer`. Instead, they will use methods like + :py:meth:`Tag.find`, which will convert their arguments into + `SoupStrainer` objects and run them against the tree. + + However, if you find yourself wanting to treat the arguments to + Beautiful Soup's find_*() methods as first-class objects, those + objects will be `SoupStrainer` objects. You can create them + yourself and then make use of functions like + `ElementFilter.filter()`. + """ + + match_function: Optional[_PageElementMatchFunction] + + def __init__(self, match_function: Optional[_PageElementMatchFunction] = None): + """Pass in a match function to easily customize the behavior of + `ElementFilter.match` without needing to subclass. + + :param match_function: A function that takes a `PageElement` + and returns `True` if that `PageElement` matches some criteria. + """ + self.match_function = match_function + + @property + def includes_everything(self) -> bool: + """Does this `ElementFilter` obviously include everything? If so, + the filter process can be made much faster. + + The `ElementFilter` might turn out to include everything even + if this returns `False`, but it won't include everything in an + obvious way. + + The base `ElementFilter` implementation includes things based on + the match function, so includes_everything is only true if + there is no match function. + """ + return not self.match_function + + @property + def excludes_everything(self) -> bool: + """Does this `ElementFilter` obviously exclude everything? If + so, Beautiful Soup will issue a warning if you try to use it + when parsing a document. + + The `ElementFilter` might turn out to exclude everything even + if this returns `False`, but it won't exclude everything in an + obvious way. + + The base `ElementFilter` implementation excludes things based + on a match function we can't inspect, so excludes_everything + is always false. + """ + return False + + def match(self, element: PageElement, _known_rules:bool=False) -> bool: + """Does the given PageElement match the rules set down by this + ElementFilter? + + The base implementation delegates to the function passed in to + the constructor. + + :param _known_rules: Defined for compatibility with + SoupStrainer._match(). Used more for consistency than because + we need the performance optimization. + """ + if not _known_rules and self.includes_everything: + return True + if not self.match_function: + return True + return self.match_function(element) + + def filter(self, generator: Iterator[PageElement]) -> Iterator[_OneElement]: + """The most generic search method offered by Beautiful Soup. + + Acts like Python's built-in `filter`, using + `ElementFilter.match` as the filtering function. + """ + # If there are no rules at all, don't bother filtering. Let + # anything through. + if self.includes_everything: + for i in generator: + yield i + while True: + try: + i = next(generator) + except StopIteration: + break + if i: + if self.match(i, _known_rules=True): + yield cast("_OneElement", i) + + def find(self, generator: Iterator[PageElement]) -> _AtMostOneElement: + """A lower-level equivalent of :py:meth:`Tag.find`. + + You can pass in your own generator for iterating over + `PageElement` objects. The first one that matches this + `ElementFilter` will be returned. + + :param generator: A way of iterating over `PageElement` + objects. + """ + for match in self.filter(generator): + return match + return None + + def find_all( + self, generator: Iterator[PageElement], limit: Optional[int] = None + ) -> _QueryResults: + """A lower-level equivalent of :py:meth:`Tag.find_all`. + + You can pass in your own generator for iterating over + `PageElement` objects. Only elements that match this + `ElementFilter` will be returned in the :py:class:`ResultSet`. + + :param generator: A way of iterating over `PageElement` + objects. + + :param limit: Stop looking after finding this many results. + """ + results: _QueryResults = ResultSet(self) + for match in self.filter(generator): + results.append(match) + if limit is not None and len(results) >= limit: + break + return results + + def allow_tag_creation( + self, nsprefix: Optional[str], name: str, attrs: Optional[_RawAttributeValues] + ) -> bool: + """Based on the name and attributes of a tag, see whether this + `ElementFilter` will allow a `Tag` object to even be created. + + By default, all tags are parsed. To change this, subclass + `ElementFilter`. + + :param name: The name of the prospective tag. + :param attrs: The attributes of the prospective tag. + """ + return True + + def allow_string_creation(self, string: str) -> bool: + """Based on the content of a string, see whether this + `ElementFilter` will allow a `NavigableString` object based on + this string to be added to the parse tree. + + By default, all strings are processed into `NavigableString` + objects. To change this, subclass `ElementFilter`. + + :param str: The string under consideration. + """ + return True + + +class MatchRule(object): + """Each MatchRule encapsulates the logic behind a single argument + passed in to one of the Beautiful Soup find* methods. + """ + + string: Optional[str] + pattern: Optional[_RegularExpressionProtocol] + present: Optional[bool] + exclude_everything: Optional[bool] + # TODO-TYPING: All MatchRule objects also have an attribute + # ``function``, but the type of the function depends on the + # subclass. + + def __init__( + self, + string: Optional[Union[str, bytes]] = None, + pattern: Optional[_RegularExpressionProtocol] = None, + function: Optional[Callable] = None, + present: Optional[bool] = None, + exclude_everything: Optional[bool] = None + ): + if isinstance(string, bytes): + string = string.decode("utf8") + self.string = string + if isinstance(pattern, bytes): + self.pattern = re.compile(pattern.decode("utf8")) + elif isinstance(pattern, str): + self.pattern = re.compile(pattern) + else: + self.pattern = pattern + self.function = function + self.present = present + self.exclude_everything = exclude_everything + + values = [ + x + for x in (self.string, self.pattern, self.function, self.present, self.exclude_everything) + if x is not None + ] + if len(values) == 0: + raise ValueError( + "Either string, pattern, function, present, or exclude_everything must be provided." + ) + if len(values) > 1: + raise ValueError( + "At most one of string, pattern, function, present, and exclude_everything must be provided." + ) + + def _base_match(self, string: Optional[str]) -> Optional[bool]: + """Run the 'cheap' portion of a match, trying to get an answer without + calling a potentially expensive custom function. + + :return: True or False if we have a (positive or negative) + match; None if we need to keep trying. + """ + # self.exclude_everything matches nothing. + if self.exclude_everything: + return False + + # self.present==True matches everything except None. + if self.present is True: + return string is not None + + # self.present==False matches _only_ None. + if self.present is False: + return string is None + + # self.string does an exact string match. + if self.string is not None: + # print(f"{self.string} ?= {string}") + return self.string == string + + # self.pattern does a regular expression search. + if self.pattern is not None: + # print(f"{self.pattern} ?~ {string}") + if string is None: + return False + return self.pattern.search(string) is not None + + return None + + def matches_string(self, string: Optional[str]) -> bool: + _base_result = self._base_match(string) + if _base_result is not None: + # No need to invoke the test function. + return _base_result + if self.function is not None and not self.function(string): + # print(f"{self.function}({string}) == False") + return False + return True + + def __repr__(self) -> str: + cls = type(self).__name__ + return f"<{cls} string={self.string} pattern={self.pattern} function={self.function} present={self.present}>" + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, MatchRule) + and self.string == other.string + and self.pattern == other.pattern + and self.function == other.function + and self.present == other.present + ) + + +class TagNameMatchRule(MatchRule): + """A MatchRule implementing the rules for matches against tag name.""" + + function: Optional[_TagMatchFunction] + + def matches_tag(self, tag: Tag) -> bool: + base_value = self._base_match(tag.name) + if base_value is not None: + return base_value + + # The only remaining possibility is that the match is determined + # by a function call. Call the function. + function = cast(_TagMatchFunction, self.function) + if function(tag): + return True + return False + + +class AttributeValueMatchRule(MatchRule): + """A MatchRule implementing the rules for matches against attribute value.""" + + function: Optional[_StringMatchFunction] + + +class StringMatchRule(MatchRule): + """A MatchRule implementing the rules for matches against a NavigableString.""" + + function: Optional[_StringMatchFunction] + + +class SoupStrainer(ElementFilter): + """The `ElementFilter` subclass used internally by Beautiful Soup. + + A `SoupStrainer` encapsulates the logic necessary to perform the + kind of matches supported by methods such as + :py:meth:`Tag.find`. `SoupStrainer` objects are primarily created + internally, but you can create one yourself and pass it in as + ``parse_only`` to the `BeautifulSoup` constructor, to parse a + subset of a large document. + + Internally, `SoupStrainer` objects work by converting the + constructor arguments into `MatchRule` objects. Incoming + tags/markup are matched against those rules. + + :param name: One or more restrictions on the tags found in a document. + + :param attrs: A dictionary that maps attribute names to + restrictions on tags that use those attributes. + + :param string: One or more restrictions on the strings found in a + document. + + :param kwargs: A dictionary that maps attribute names to restrictions + on tags that use those attributes. These restrictions are additive to + any specified in ``attrs``. + + """ + + name_rules: List[TagNameMatchRule] + attribute_rules: Dict[str, List[AttributeValueMatchRule]] + string_rules: List[StringMatchRule] + + def __init__( + self, + name: Optional[_StrainableElement] = None, + attrs: Dict[str, _StrainableAttribute] = {}, + string: Optional[_StrainableString] = None, + **kwargs: _StrainableAttribute, + ): + if string is None and "text" in kwargs: + string = cast(Optional[_StrainableString], kwargs.pop("text")) + warnings.warn( + "As of version 4.11.0, the 'text' argument to the SoupStrainer constructor is deprecated. Use 'string' instead.", + DeprecationWarning, + stacklevel=2, + ) + + if name is None and not attrs and not string and not kwargs: + # Special case for backwards compatibility. Instantiating + # a SoupStrainer with no arguments whatsoever gets you one + # that matches all Tags, and only Tags. + self.name_rules = [TagNameMatchRule(present=True)] + else: + self.name_rules = cast( + List[TagNameMatchRule], list(self._make_match_rules(name, TagNameMatchRule)) + ) + self.attribute_rules = defaultdict(list) + + if not isinstance(attrs, dict): + # Passing something other than a dictionary as attrs is + # sugar for matching that thing against the 'class' + # attribute. + attrs = {"class": attrs} + + for attrdict in attrs, kwargs: + for attr, value in attrdict.items(): + if attr == "class_" and attrdict is kwargs: + # If you pass in 'class_' as part of kwargs, it's + # because class is a Python reserved word. If you + # pass it in as part of the attrs dict, it's + # because you really are looking for an attribute + # called 'class_'. + attr = "class" + + if value is None: + value = False + for rule_obj in self._make_match_rules(value, AttributeValueMatchRule): + self.attribute_rules[attr].append( + cast(AttributeValueMatchRule, rule_obj) + ) + + self.string_rules = cast( + List[StringMatchRule], list(self._make_match_rules(string, StringMatchRule)) + ) + + #: DEPRECATED 4.13.0: You shouldn't need to check this under + #: any name (.string or .text), and if you do, you're probably + #: not taking into account all of the types of values this + #: variable might have. Look at the .string_rules list instead. + self.__string = string + + @property + def includes_everything(self) -> bool: + """Check whether the provided rules will obviously include + everything. (They might include everything even if this returns `False`, + but not in an obvious way.) + """ + return not self.name_rules and not self.string_rules and not self.attribute_rules + + @property + def excludes_everything(self) -> bool: + """Check whether the provided rules will obviously exclude + everything. (They might exclude everything even if this returns `False`, + but not in an obvious way.) + """ + if (self.string_rules and (self.name_rules or self.attribute_rules)): + # This is self-contradictory, so the rules exclude everything. + return True + + # If there's a rule that ended up treated as an "exclude everything" + # rule due to creating a logical inconsistency, then the rules + # exclude everything. + if any(x.exclude_everything for x in self.string_rules): + return True + if any(x.exclude_everything for x in self.name_rules): + return True + for ruleset in self.attribute_rules.values(): + if any(x.exclude_everything for x in ruleset): + return True + return False + + @property + def string(self) -> Optional[_StrainableString]: + ":meta private:" + warnings.warn( + "Access to deprecated property string. (Look at .string_rules instead) -- Deprecated since version 4.13.0.", + DeprecationWarning, + stacklevel=2, + ) + return self.__string + + @property + def text(self) -> Optional[_StrainableString]: + ":meta private:" + warnings.warn( + "Access to deprecated property text. (Look at .string_rules instead) -- Deprecated since version 4.13.0.", + DeprecationWarning, + stacklevel=2, + ) + return self.__string + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} name={self.name_rules} attrs={self.attribute_rules} string={self.string_rules}>" + + @classmethod + def _make_match_rules( + cls, + obj: Optional[Union[_StrainableElement, _StrainableAttribute]], + rule_class: Type[MatchRule], + ) -> Iterator[MatchRule]: + """Convert a vaguely-specific 'object' into one or more well-defined + `MatchRule` objects. + + :param obj: Some kind of object that corresponds to one or more + matching rules. + :param rule_class: Create instances of this `MatchRule` subclass. + """ + if obj is None: + return + if isinstance(obj, (str, bytes)): + yield rule_class(string=obj) + elif isinstance(obj, bool): + yield rule_class(present=obj) + elif callable(obj): + yield rule_class(function=obj) + elif isinstance(obj, _RegularExpressionProtocol): + yield rule_class(pattern=obj) + elif hasattr(obj, "__iter__"): + if not obj: + # The attribute is being matched against the null set, + # which means it should exclude everything. + yield rule_class(exclude_everything=True) + for o in obj: + if not isinstance(o, (bytes, str)) and hasattr(o, "__iter__"): + # This is almost certainly the user's + # mistake. This list contains another list, which + # opens up the possibility of infinite + # self-reference. In the interests of avoiding + # infinite recursion, we'll treat this as an + # impossible match and issue a rule that excludes + # everything, rather than looking inside. + warnings.warn( + f"Ignoring nested list {o} to avoid the possibility of infinite recursion.", + stacklevel=5, + ) + yield rule_class(exclude_everything=True) + continue + for x in cls._make_match_rules(o, rule_class): + yield x + else: + yield rule_class(string=str(obj)) + + def matches_tag(self, tag: Tag) -> bool: + """Do the rules of this `SoupStrainer` trigger a match against the + given `Tag`? + + If the `SoupStrainer` has any `TagNameMatchRule`, at least one + must match the `Tag` or its `Tag.name`. + + If there are any `AttributeValueMatchRule` for a given + attribute, at least one of them must match the attribute + value. + + If there are any `StringMatchRule`, at least one must match, + but a `SoupStrainer` that *only* contains `StringMatchRule` + cannot match a `Tag`, only a `NavigableString`. + """ + # If there are no rules at all, let anything through. + #if self.includes_everything: + # return True + + # String rules cannot not match a Tag on their own. + if not self.name_rules and not self.attribute_rules: + return False + + # Optimization for a very common case where the user is + # searching for a tag with one specific name, and we're + # looking at a tag with a different name. + if ( + not tag.prefix + and len(self.name_rules) == 1 + and self.name_rules[0].string is not None + and tag.name != self.name_rules[0].string + ): + return False + + # If there are name rules, at least one must match. It can + # match either the Tag object itself or the prefixed name of + # the tag. + prefixed_name = None + if tag.prefix: + prefixed_name = f"{tag.prefix}:{tag.name}" + if self.name_rules: + name_matches = False + for rule in self.name_rules: + # attrs = " ".join( + # [f"{k}={v}" for k, v in sorted(tag.attrs.items())] + # ) + # print(f"Testing <{tag.name} {attrs}>{tag.string}</{tag.name}> against {rule}") + if rule.matches_tag(tag) or ( + prefixed_name is not None and rule.matches_string(prefixed_name) + ): + name_matches = True + break + + if not name_matches: + return False + + # If there are attribute rules for a given attribute, at least + # one of them must match. If there are rules for multiple + # attributes, each attribute must have at least one match. + for attr, rules in self.attribute_rules.items(): + attr_value = tag.get(attr, None) + this_attr_match = self._attribute_match(attr_value, rules) + if not this_attr_match: + return False + + # If there are string rules, at least one must match. + if self.string_rules: + _str = tag.string + if _str is None: + return False + if not self.matches_any_string_rule(_str): + return False + return True + + def _attribute_match( + self, + attr_value: Optional[_AttributeValue], + rules: Iterable[AttributeValueMatchRule], + ) -> bool: + attr_values: Sequence[Optional[str]] + if isinstance(attr_value, list): + attr_values = attr_value + else: + attr_values = [cast(str, attr_value)] + + def _match_attribute_value_helper(attr_values: Sequence[Optional[str]]) -> bool: + for rule in rules: + for attr_value in attr_values: + if rule.matches_string(attr_value): + return True + return False + + this_attr_match = _match_attribute_value_helper(attr_values) + if not this_attr_match and len(attr_values) > 1: + # This cast converts Optional[str] to plain str. + # + # We know if there's more than one value, there can't be + # any None in the list, because Beautiful Soup never uses + # None as a value of a multi-valued attribute, and if None + # is passed in as attr_value, it's turned into a list with + # a single element (thus len(attr_values) > 1 fails). + attr_values = cast(Sequence[str], attr_values) + + # Try again but treat the attribute value + # as a single string. + joined_attr_value = " ".join(attr_values) + this_attr_match = _match_attribute_value_helper([joined_attr_value]) + return this_attr_match + + def allow_tag_creation( + self, nsprefix: Optional[str], name: str, attrs: Optional[_RawAttributeValues] + ) -> bool: + """Based on the name and attributes of a tag, see whether this + `SoupStrainer` will allow a `Tag` object to even be created. + + :param name: The name of the prospective tag. + :param attrs: The attributes of the prospective tag. + """ + if self.string_rules: + # A SoupStrainer that has string rules can't be used to + # manage tag creation, because the string rule can't be + # evaluated until after the tag and all of its contents + # have been parsed. + return False + prefixed_name = None + if nsprefix: + prefixed_name = f"{nsprefix}:{name}" + if self.name_rules: + # At least one name rule must match. + name_match = False + for rule in self.name_rules: + for x in name, prefixed_name: + if x is not None: + if rule.matches_string(x): + name_match = True + break + if not name_match: + return False + + # For each attribute that has rules, at least one rule must + # match. + if attrs is None: + attrs = AttributeDict() + for attr, rules in self.attribute_rules.items(): + attr_value = attrs.get(attr) + if not self._attribute_match(attr_value, rules): + return False + + return True + + def allow_string_creation(self, string: str) -> bool: + """Based on the content of a markup string, see whether this + `SoupStrainer` will allow it to be instantiated as a + `NavigableString` object, or whether it should be ignored. + """ + if self.name_rules or self.attribute_rules: + # A SoupStrainer that has name or attribute rules won't + # match any strings; it's designed to match tags with + # certain properties. + return False + if not self.string_rules: + # A SoupStrainer with no string rules will match + # all strings. + return True + if not self.matches_any_string_rule(string): + return False + return True + + def matches_any_string_rule(self, string: str) -> bool: + """See whether the content of a string matches any of + this `SoupStrainer`'s string rules. + """ + if not self.string_rules: + return True + for string_rule in self.string_rules: + if string_rule.matches_string(string): + return True + return False + + def match(self, element: PageElement, _known_rules: bool=False) -> bool: + """Does the given `PageElement` match the rules set down by this + `SoupStrainer`? + + The find_* methods rely heavily on this method to find matches. + + :param element: A `PageElement`. + :param _known_rules: Set to true in the common case where + we already checked and found at least one rule in this SoupStrainer + that might exclude a PageElement. Without this, we need + to check .includes_everything every time, just to be safe. + :return: `True` if the element matches this `SoupStrainer`'s rules; `False` otherwise. + """ + # If there are no rules at all, let anything through. + if not _known_rules and self.includes_everything: + return True + if isinstance(element, Tag): + return self.matches_tag(element) + assert isinstance(element, NavigableString) + if not (self.name_rules or self.attribute_rules): + # A NavigableString can only match a SoupStrainer that + # does not define any name or attribute rules. + # Then it comes down to the string rules. + return self.matches_any_string_rule(element) + return False + + @_deprecated("allow_tag_creation", "4.13.0") + def search_tag(self, name: str, attrs: Optional[_RawAttributeValues]) -> bool: + """A less elegant version of `allow_tag_creation`. Deprecated as of 4.13.0""" + ":meta private:" + return self.allow_tag_creation(None, name, attrs) + + @_deprecated("match", "4.13.0") + def search(self, element: PageElement) -> Optional[PageElement]: + """A less elegant version of match(). Deprecated as of 4.13.0. + + :meta private: + """ + return element if self.match(element) else None diff --git a/.venv/lib/python3.12/site-packages/bs4/formatter.py b/.venv/lib/python3.12/site-packages/bs4/formatter.py new file mode 100644 index 00000000..bfa08764 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/formatter.py @@ -0,0 +1,276 @@ +from __future__ import annotations +from typing import Callable, Dict, Iterable, Optional, Set, Tuple, TYPE_CHECKING, Union +from typing_extensions import TypeAlias +from bs4.dammit import EntitySubstitution + +if TYPE_CHECKING: + from bs4._typing import _AttributeValue + + +class Formatter(EntitySubstitution): + """Describes a strategy to use when outputting a parse tree to a string. + + Some parts of this strategy come from the distinction between + HTML4, HTML5, and XML. Others are configurable by the user. + + Formatters are passed in as the `formatter` argument to methods + like `bs4.element.Tag.encode`. Most people won't need to + think about formatters, and most people who need to think about + them can pass in one of these predefined strings as `formatter` + rather than making a new Formatter object: + + For HTML documents: + * 'html' - HTML entity substitution for generic HTML documents. (default) + * 'html5' - HTML entity substitution for HTML5 documents, as + well as some optimizations in the way tags are rendered. + * 'html5-4.12.0' - The version of the 'html5' formatter used prior to + Beautiful Soup 4.13.0. + * 'minimal' - Only make the substitutions necessary to guarantee + valid HTML. + * None - Do not perform any substitution. This will be faster + but may result in invalid markup. + + For XML documents: + * 'html' - Entity substitution for XHTML documents. + * 'minimal' - Only make the substitutions necessary to guarantee + valid XML. (default) + * None - Do not perform any substitution. This will be faster + but may result in invalid markup. + + """ + + #: Constant name denoting HTML markup + HTML: str = "html" + + #: Constant name denoting XML markup + XML: str = "xml" + + #: Default values for the various constructor options when the + #: markup language is HTML. + HTML_DEFAULTS: Dict[str, Set[str]] = dict( + cdata_containing_tags=set(["script", "style"]), + ) + + language: Optional[str] #: :meta private: + entity_substitution: Optional[_EntitySubstitutionFunction] #: :meta private: + void_element_close_prefix: str #: :meta private: + cdata_containing_tags: Set[str] #: :meta private: + indent: str #: :meta private: + + #: If this is set to true by the constructor, then attributes whose + #: values are sent to the empty string will be treated as HTML + #: boolean attributes. (Attributes whose value is None are always + #: rendered this way.) + empty_attributes_are_booleans: bool + + def _default( + self, language: str, value: Optional[Set[str]], kwarg: str + ) -> Set[str]: + if value is not None: + return value + if language == self.XML: + # When XML is the markup language in use, all of the + # defaults are the empty list. + return set() + + # Otherwise, it depends on what's in HTML_DEFAULTS. + return self.HTML_DEFAULTS[kwarg] + + def __init__( + self, + language: Optional[str] = None, + entity_substitution: Optional[_EntitySubstitutionFunction] = None, + void_element_close_prefix: str = "/", + cdata_containing_tags: Optional[Set[str]] = None, + empty_attributes_are_booleans: bool = False, + indent: Union[int,str] = 1, + ): + r"""Constructor. + + :param language: This should be `Formatter.XML` if you are formatting + XML markup and `Formatter.HTML` if you are formatting HTML markup. + + :param entity_substitution: A function to call to replace special + characters with XML/HTML entities. For examples, see + bs4.dammit.EntitySubstitution.substitute_html and substitute_xml. + :param void_element_close_prefix: By default, void elements + are represented as <tag/> (XML rules) rather than <tag> + (HTML rules). To get <tag>, pass in the empty string. + :param cdata_containing_tags: The set of tags that are defined + as containing CDATA in this dialect. For example, in HTML, + <script> and <style> tags are defined as containing CDATA, + and their contents should not be formatted. + :param empty_attributes_are_booleans: If this is set to true, + then attributes whose values are sent to the empty string + will be treated as `HTML boolean + attributes<https://dev.w3.org/html5/spec-LC/common-microsyntaxes.html#boolean-attributes>`_. (Attributes + whose value is None are always rendered this way.) + :param indent: If indent is a non-negative integer or string, + then the contents of elements will be indented + appropriately when pretty-printing. An indent level of 0, + negative, or "" will only insert newlines. Using a + positive integer indent indents that many spaces per + level. If indent is a string (such as "\t"), that string + is used to indent each level. The default behavior is to + indent one space per level. + + """ + self.language = language or self.HTML + self.entity_substitution = entity_substitution + self.void_element_close_prefix = void_element_close_prefix + self.cdata_containing_tags = self._default( + self.language, cdata_containing_tags, "cdata_containing_tags" + ) + self.empty_attributes_are_booleans = empty_attributes_are_booleans + if indent is None: + indent = 0 + indent_str: str + if isinstance(indent, int): + if indent < 0: + indent = 0 + indent_str = " " * indent + elif isinstance(indent, str): + indent_str = indent + else: + indent_str = " " + self.indent = indent_str + + def substitute(self, ns: str) -> str: + """Process a string that needs to undergo entity substitution. + This may be a string encountered in an attribute value or as + text. + + :param ns: A string. + :return: The same string but with certain characters replaced by named + or numeric entities. + """ + if not self.entity_substitution: + return ns + from .element import NavigableString + + if ( + isinstance(ns, NavigableString) + and ns.parent is not None + and ns.parent.name in self.cdata_containing_tags + ): + # Do nothing. + return ns + # Substitute. + return self.entity_substitution(ns) + + def attribute_value(self, value: str) -> str: + """Process the value of an attribute. + + :param ns: A string. + :return: A string with certain characters replaced by named + or numeric entities. + """ + return self.substitute(value) + + def attributes( + self, tag: bs4.element.Tag + ) -> Iterable[Tuple[str, Optional[_AttributeValue]]]: + """Reorder a tag's attributes however you want. + + By default, attributes are sorted alphabetically. This makes + behavior consistent between Python 2 and Python 3, and preserves + backwards compatibility with older versions of Beautiful Soup. + + If `empty_attributes_are_booleans` is True, then + attributes whose values are set to the empty string will be + treated as boolean attributes. + """ + if tag.attrs is None: + return [] + + items: Iterable[Tuple[str, _AttributeValue]] = list(tag.attrs.items()) + return sorted( + (k, (None if self.empty_attributes_are_booleans and v == "" else v)) + for k, v in items + ) + + +class HTMLFormatter(Formatter): + """A generic Formatter for HTML.""" + + REGISTRY: Dict[Optional[str], HTMLFormatter] = {} + + def __init__( + self, + entity_substitution: Optional[_EntitySubstitutionFunction] = None, + void_element_close_prefix: str = "/", + cdata_containing_tags: Optional[Set[str]] = None, + empty_attributes_are_booleans: bool = False, + indent: Union[int,str] = 1, + ): + super(HTMLFormatter, self).__init__( + self.HTML, + entity_substitution, + void_element_close_prefix, + cdata_containing_tags, + empty_attributes_are_booleans, + indent=indent + ) + + +class XMLFormatter(Formatter): + """A generic Formatter for XML.""" + + REGISTRY: Dict[Optional[str], XMLFormatter] = {} + + def __init__( + self, + entity_substitution: Optional[_EntitySubstitutionFunction] = None, + void_element_close_prefix: str = "/", + cdata_containing_tags: Optional[Set[str]] = None, + empty_attributes_are_booleans: bool = False, + indent: Union[int,str] = 1, + ): + super(XMLFormatter, self).__init__( + self.XML, + entity_substitution, + void_element_close_prefix, + cdata_containing_tags, + empty_attributes_are_booleans, + indent=indent, + ) + + +# Set up aliases for the default formatters. +HTMLFormatter.REGISTRY["html"] = HTMLFormatter( + entity_substitution=EntitySubstitution.substitute_html +) + +HTMLFormatter.REGISTRY["html5"] = HTMLFormatter( + entity_substitution=EntitySubstitution.substitute_html5, + void_element_close_prefix="", + empty_attributes_are_booleans=True, +) +HTMLFormatter.REGISTRY["html5-4.12"] = HTMLFormatter( + entity_substitution=EntitySubstitution.substitute_html, + void_element_close_prefix="", + empty_attributes_are_booleans=True, +) +HTMLFormatter.REGISTRY["minimal"] = HTMLFormatter( + entity_substitution=EntitySubstitution.substitute_xml +) +HTMLFormatter.REGISTRY[None] = HTMLFormatter(entity_substitution=None) +XMLFormatter.REGISTRY["html"] = XMLFormatter( + entity_substitution=EntitySubstitution.substitute_html +) +XMLFormatter.REGISTRY["minimal"] = XMLFormatter( + entity_substitution=EntitySubstitution.substitute_xml +) + +XMLFormatter.REGISTRY[None] = XMLFormatter(entity_substitution=None) + +# Define type aliases to improve readability. +# + +#: A function to call to replace special characters with XML or HTML +#: entities. +_EntitySubstitutionFunction: TypeAlias = Callable[[str], str] + +# Many of the output-centered methods take an argument that can either +# be a Formatter object or the name of a Formatter to be looked up. +_FormatterOrName = Union[Formatter, str] diff --git a/.venv/lib/python3.12/site-packages/bs4/py.typed b/.venv/lib/python3.12/site-packages/bs4/py.typed new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/py.typed diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/__init__.py b/.venv/lib/python3.12/site-packages/bs4/tests/__init__.py new file mode 100644 index 00000000..b36f3f38 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/__init__.py @@ -0,0 +1,1305 @@ +# encoding: utf-8 +"""Helper classes for tests.""" + +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +import pickle +import importlib +import copy +import warnings +import pytest +from bs4 import BeautifulSoup +from bs4.element import ( + AttributeValueList, + CharsetMetaAttributeValue, + Comment, + ContentMetaAttributeValue, + Doctype, + PageElement, + PYTHON_SPECIFIC_ENCODINGS, + Script, + Stylesheet, + Tag, +) +from bs4.filter import SoupStrainer +from bs4.builder import ( + XMLParsedAsHTMLWarning, +) +from bs4._typing import _IncomingMarkup + +from bs4.builder import TreeBuilder +from bs4.builder._htmlparser import HTMLParserTreeBuilder + +from typing import ( + Any, + Iterable, + Optional, + Tuple, + Type, +) + +# Some tests depend on specific third-party libraries. We use +# @pytest.mark.skipIf on the following conditionals to skip them +# if the libraries are not installed. +try: + from soupsieve import SelectorSyntaxError + + SOUP_SIEVE_PRESENT = True +except ImportError: + SOUP_SIEVE_PRESENT = False + +HTML5LIB_PRESENT = importlib.util.find_spec("html5lib") is not None + +try: + import lxml.etree + LXML_PRESENT = True + LXML_VERSION = lxml.etree.LXML_VERSION +except ImportError: + LXML_PRESENT = False + LXML_VERSION = (0,) + +default_builder: Type[TreeBuilder] = HTMLParserTreeBuilder + +BAD_DOCUMENT: str = """A bare string +<!DOCTYPE xsl:stylesheet SYSTEM "htmlent.dtd"> +<!DOCTYPE xsl:stylesheet PUBLIC "htmlent.dtd"> +<div><![CDATA[A CDATA section where it doesn't belong]]></div> +<div><svg><![CDATA[HTML5 does allow CDATA sections in SVG]]></svg></div> +<div>A <meta> tag</div> +<div>A <br> tag that supposedly has contents.</br></div> +<div>AT&T</div> +<div><textarea>Within a textarea, markup like <b> tags and <&<& should be treated as literal</textarea></div> +<div><script>if (i < 2) { alert("<b>Markup within script tags should be treated as literal.</b>"); }</script></div> +<div>This numeric entity is missing the final semicolon: <x t="piñata"></div> +<div><a href="http://example.com/</a> that attribute value never got closed</div> +<div><a href="foo</a>, </a><a href="bar">that attribute value was closed by the subsequent tag</a></div> +<! This document starts with a bogus declaration ><div>a</div> +<div>This document contains <!an incomplete declaration <div>(do you see it?)</div> +<div>This document ends with <!an incomplete declaration +<div><a style={height:21px;}>That attribute value was bogus</a></div> +<! DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">The doctype is invalid because it contains extra whitespace +<div><table><td nowrap>That boolean attribute had no value</td></table></div> +<div>Here's a nonexistent entity: &#foo; (do you see it?)</div> +<div>This document ends before the entity finishes: > +<div><p>Paragraphs shouldn't contain block display elements, but this one does: <dl><dt>you see?</dt></p> +<b b="20" a="1" b="10" a="2" a="3" a="4">Multiple values for the same attribute.</b> +<div><table><tr><td>Here's a table</td></tr></table></div> +<div><table id="1"><tr><td>Here's a nested table:<table id="2"><tr><td>foo</td></tr></table></td></div> +<div>This tag contains nothing but whitespace: <b> </b></div> +<div><blockquote><p><b>This p tag is cut off by</blockquote></p>the end of the blockquote tag</div> +<div><table><div>This table contains bare markup</div></table></div> +<div><div id="1">\n <a href="link1">This link is never closed.\n</div>\n<div id="2">\n <div id="3">\n <a href="link2">This link is closed.</a>\n </div>\n</div></div> +<div>This document contains a <!DOCTYPE surprise>surprise doctype</div> +<div><a><B><Cd><EFG>Mixed case tags are folded to lowercase</efg></CD></b></A></div> +<div><our\u2603>Tag name contains Unicode characters</our\u2603></div> +<div><a \u2603="snowman">Attribute name contains Unicode characters</a></div> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +""" + + +class SoupTest(object): + @property + def default_builder(self) -> Type[TreeBuilder]: + return default_builder + + def soup(self, markup: _IncomingMarkup, **kwargs: Any) -> BeautifulSoup: + """Build a Beautiful Soup object from markup.""" + builder = kwargs.pop("builder", self.default_builder) + return BeautifulSoup(markup, builder=builder, **kwargs) + + def document_for(self, markup: str, **kwargs: Any) -> str: + """Turn an HTML fragment into a document. + + The details depend on the builder. + """ + return self.default_builder(**kwargs).test_fragment_to_document(markup) + + def assert_soup( + self, to_parse: _IncomingMarkup, compare_parsed_to: Optional[str] = None + ) -> None: + """Parse some markup using Beautiful Soup and verify that + the output markup is as expected. + """ + builder = self.default_builder + obj = BeautifulSoup(to_parse, builder=builder) + if compare_parsed_to is None: + assert isinstance(to_parse, str) + compare_parsed_to = to_parse + + # Verify that the documents come out the same. + assert obj.decode() == self.document_for(compare_parsed_to) + + # Also run some checks on the BeautifulSoup object itself: + + # Verify that every tag that was opened was eventually closed. + + # There are no tags in the open tag counter. + assert all(v == 0 for v in list(obj.open_tag_counter.values())) + + # The only tag in the tag stack is the one for the root + # document. + assert [obj.ROOT_TAG_NAME] == [x.name for x in obj.tagStack] + + assertSoupEquals = assert_soup + + def assertConnectedness(self, element: Tag) -> None: + """Ensure that next_element and previous_element are properly + set for all descendants of the given element. + """ + earlier = None + for e in element.descendants: + if earlier: + assert e == earlier.next_element + assert earlier == e.previous_element + earlier = e + + def linkage_validator( + self, el: Tag, _recursive_call: bool = False + ) -> Optional[PageElement]: + """Ensure proper linkage throughout the document.""" + descendant = None + # Document element should have no previous element or previous sibling. + # It also shouldn't have a next sibling. + if el.parent is None: + assert ( + el.previous_element is None + ), "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( + el, el.previous_element, None + ) + assert ( + el.previous_sibling is None + ), "Bad previous_sibling\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( + el, el.previous_sibling, None + ) + assert ( + el.next_sibling is None + ), "Bad next_sibling\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( + el, el.next_sibling, None + ) + + idx = 0 + child = None + last_child = None + last_idx = len(el.contents) - 1 + for child in el.contents: + descendant = None + + # Parent should link next element to their first child + # That child should have no previous sibling + if idx == 0: + if el.parent is not None: + assert ( + el.next_element is child + ), "Bad next_element\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( + el, el.next_element, child + ) + assert ( + child.previous_element is el + ), "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( + child, child.previous_element, el + ) + assert ( + child.previous_sibling is None + ), "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED: {}".format( + child, child.previous_sibling, None + ) + + # If not the first child, previous index should link as sibling to this index + # Previous element should match the last index or the last bubbled up descendant + else: + assert ( + child.previous_sibling is el.contents[idx - 1] + ), "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED {}".format( + child, child.previous_sibling, el.contents[idx - 1] + ) + assert ( + el.contents[idx - 1].next_sibling is child + ), "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + el.contents[idx - 1], el.contents[idx - 1].next_sibling, child + ) + + if last_child is not None: + assert ( + child.previous_element is last_child + ), "Bad previous_element\nNODE: {}\nPREV {}\nEXPECTED {}\nCONTENTS {}".format( + child, child.previous_element, last_child, child.parent.contents + ) + assert ( + last_child.next_element is child + ), "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + last_child, last_child.next_element, child + ) + + if isinstance(child, Tag) and child.contents: + descendant = self.linkage_validator(child, True) + assert descendant is not None + # A bubbled up descendant should have no next siblings + assert ( + descendant.next_sibling is None + ), "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + descendant, descendant.next_sibling, None + ) + + # Mark last child as either the bubbled up descendant or the current child + if descendant is not None: + last_child = descendant + else: + last_child = child + + # If last child, there are non next siblings + if idx == last_idx: + assert ( + child.next_sibling is None + ), "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + child, child.next_sibling, None + ) + idx += 1 + + child = descendant if descendant is not None else child + if child is None: + child = el + + if not _recursive_call and child is not None: + target: Optional[Tag] = el + while True: + if target is None: + assert ( + child.next_element is None + ), "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + child, child.next_element, None + ) + break + elif target.next_sibling is not None: + assert ( + child.next_element is target.next_sibling + ), "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + child, child.next_element, target.next_sibling + ) + break + target = target.parent + + # We are done, so nothing to return + return None + else: + # Return the child to the recursive caller + return child + + def assert_selects(self, tags: Iterable[Tag], should_match: Iterable[str]) -> None: + """Make sure that the given tags have the correct text. + + This is used in tests that define a bunch of tags, each + containing a single string, and then select certain strings by + some mechanism. + """ + assert [tag.string for tag in tags] == should_match + + def assert_selects_ids( + self, tags: Iterable[Tag], should_match: Iterable[str] + ) -> None: + """Make sure that the given tags have the correct IDs. + + This is used in tests that define a bunch of tags, each + containing a single string, and then select certain strings by + some mechanism. + """ + assert [tag["id"] for tag in tags] == should_match + + +class TreeBuilderSmokeTest(SoupTest): + # Tests that are common to HTML and XML tree builders. + + @pytest.mark.parametrize( + "multi_valued_attributes", [None, {}, dict(b=["class"]), {"*": ["notclass"]}] + ) + def test_attribute_not_multi_valued(self, multi_valued_attributes): + markup = '<html xmlns="http://www.w3.org/1999/xhtml"><a class="a b c"></html>' + soup = self.soup(markup, multi_valued_attributes=multi_valued_attributes) + assert soup.a["class"] == "a b c" + + @pytest.mark.parametrize( + "multi_valued_attributes", [dict(a=["class"]), {"*": ["class"]}] + ) + def test_attribute_multi_valued(self, multi_valued_attributes): + markup = '<a class="a b c">' + soup = self.soup(markup, multi_valued_attributes=multi_valued_attributes) + assert soup.a["class"] == ["a", "b", "c"] + + def test_invalid_doctype(self): + # We don't have an official opinion on how these are parsed, + # but they shouldn't crash any of the parsers. + markup = "<![if word]>content<![endif]>" + self.soup(markup) + markup = "<!DOCTYPE html]ff>" + self.soup(markup) + + def test_doctype_filtered(self): + markup = "<!DOCTYPE html>\n<html>\n</html>" + soup = self.soup(markup, parse_only=SoupStrainer(name="html")) + assert not any(isinstance(x, Doctype) for x in soup.descendants) + + def test_custom_attribute_dict_class(self): + class MyAttributeDict(dict): + def __setitem__(self, key: str, value: Any): + # Ignore the provided value and substitute a + # hard-coded one. + super().__setitem__(key, "OVERRIDDEN") + + markup = '<a attr1="val1" attr2="val2">f</a>' + builder = self.default_builder(attribute_dict_class=MyAttributeDict) + soup = self.soup(markup, builder=builder) + tag = soup.a + assert isinstance(tag.attrs, MyAttributeDict) + assert "OVERRIDDEN" == tag["attr1"] + tag["attr3"] = True + assert "OVERRIDDEN" == tag["attr3"] + + expect = '<a attr1="OVERRIDDEN" attr2="OVERRIDDEN" attr3="OVERRIDDEN">f</a>' + assert expect == tag.decode() + + def test_custom_attribute_value_list_class(self): + class MyCustomAttributeValueList(AttributeValueList): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.append("extra") + + builder = self.default_builder( + multi_valued_attributes={"*": set(["attr2"])}, + attribute_value_list_class=MyCustomAttributeValueList, + ) + markup = '<a attr1="val1" attr2="val2">f</a>' + soup = self.soup(markup, builder=builder) + tag = soup.a + assert tag["attr1"] == "val1" + assert tag["attr2"] == ["val2", "extra"] + assert isinstance(tag["attr2"], MyCustomAttributeValueList) + + +class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest): + """A basic test of a treebuilder's competence. + + Any HTML treebuilder, present or future, should be able to pass + these tests. With invalid markup, there's room for interpretation, + and different parsers can handle it differently. But with the + markup in these tests, there's not much room for interpretation. + """ + + def test_empty_element_tags(self): + """Verify that all HTML4 and HTML5 empty element (aka void element) tags + are handled correctly. + """ + for name in [ + "area", + "base", + "br", + "col", + "embed", + "hr", + "img", + "input", + "keygen", + "link", + "menuitem", + "meta", + "param", + "source", + "track", + "wbr", + "spacer", + "frame", + ]: + soup = self.soup("") + new_tag = soup.new_tag(name) + assert new_tag.is_empty_element is True + + self.assert_soup("<br/><br/><br/>", "<br/><br/><br/>") + self.assert_soup("<br /><br /><br />", "<br/><br/><br/>") + + def test_special_string_containers(self): + soup = self.soup("<style>Some CSS</style><script>Some Javascript</script>") + assert isinstance(soup.style.string, Stylesheet) + assert isinstance(soup.script.string, Script) + + soup = self.soup("<style><!--Some CSS--></style>") + assert isinstance(soup.style.string, Stylesheet) + # The contents of the style tag resemble an HTML comment, but + # it's not treated as a comment. + assert soup.style.string == "<!--Some CSS-->" + assert isinstance(soup.style.string, Stylesheet) + + def test_pickle_and_unpickle_identity(self): + # Pickling a tree, then unpickling it, yields a tree identical + # to the original. + tree = self.soup("<a><b>foo</a>") + dumped = pickle.dumps(tree, 2) + loaded = pickle.loads(dumped) + assert loaded.__class__ == BeautifulSoup + assert loaded.decode() == tree.decode() + + def assertDoctypeHandled(self, doctype_fragment: str) -> None: + """Assert that a given doctype string is handled correctly.""" + doctype_str, soup = self._document_with_doctype(doctype_fragment) + + # Make sure a Doctype object was created. + doctype = soup.contents[0] + assert doctype.__class__ == Doctype + assert doctype == doctype_fragment + assert soup.encode("utf8")[: len(doctype_str)] == doctype_str + + # Make sure that the doctype was correctly associated with the + # parse tree and that the rest of the document parsed. + assert soup.p is not None + assert soup.p.contents[0] == "foo" + + def _document_with_doctype( + self, doctype_fragment: str, doctype_string: str = "DOCTYPE" + ) -> Tuple[bytes, BeautifulSoup]: + """Generate and parse a document with the given doctype.""" + doctype = "<!%s %s>" % (doctype_string, doctype_fragment) + markup = doctype + "\n<p>foo</p>" + soup = self.soup(markup) + return doctype.encode("utf8"), soup + + def test_normal_doctypes(self): + """Make sure normal, everyday HTML doctypes are handled correctly.""" + self.assertDoctypeHandled("html") + self.assertDoctypeHandled( + 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' + ) + + def test_empty_doctype(self): + soup = self.soup("<!DOCTYPE>") + doctype = soup.contents[0] + assert "" == doctype.strip() + + def test_mixed_case_doctype(self): + # A lowercase or mixed-case doctype becomes a Doctype. + for doctype_fragment in ("doctype", "DocType"): + doctype_str, soup = self._document_with_doctype("html", doctype_fragment) + + # Make sure a Doctype object was created and that the DOCTYPE + # is uppercase. + doctype = soup.contents[0] + assert doctype.__class__ == Doctype + assert doctype == "html" + assert soup.encode("utf8")[: len(doctype_str)] == b"<!DOCTYPE html>" + + # Make sure that the doctype was correctly associated with the + # parse tree and that the rest of the document parsed. + assert soup.p.contents[0] == "foo" + + def test_public_doctype_with_url(self): + doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' + self.assertDoctypeHandled(doctype) + + def test_system_doctype(self): + self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') + + def test_namespaced_system_doctype(self): + # We can handle a namespaced doctype with a system ID. + self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') + + def test_namespaced_public_doctype(self): + # Test a namespaced doctype with a public id. + self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') + + def test_real_xhtml_document(self): + """A real XHTML document should come out more or less the same as it went in.""" + markup = b"""<?xml version="1.0" encoding="utf-8"?> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head><title>Hello.</title></head> +<body>Goodbye.</body> +</html>""" + with warnings.catch_warnings(record=True) as w: + soup = self.soup(markup) + assert soup.encode("utf-8").replace(b"\n", b"") == markup.replace(b"\n", b"") + + # No warning was issued about parsing an XML document as HTML, + # because XHTML is both. + assert w == [] + + def test_namespaced_html(self): + # When a namespaced XML document is parsed as HTML it should + # be treated as HTML with weird tag names. + markup = b"""<ns1:foo>content</ns1:foo><ns1:foo/><ns2:foo/>""" + with warnings.catch_warnings(record=True) as w: + soup = self.soup(markup) + + assert 2 == len(soup.find_all("ns1:foo")) + + # n.b. no "you're parsing XML as HTML" warning was given + # because there was no XML declaration. + assert [] == w + + def test_detect_xml_parsed_as_html(self): + # A warning is issued when parsing an XML document as HTML, + # but basic stuff should still work. + markup = b"""<?xml version="1.0" encoding="utf-8"?><tag>string</tag>""" + with warnings.catch_warnings(record=True) as w: + soup = self.soup(markup) + assert soup.tag.string == "string" + [warning] = w + assert isinstance(warning.message, XMLParsedAsHTMLWarning) + assert str(warning.message) == XMLParsedAsHTMLWarning.MESSAGE + + # NOTE: the warning is not issued if the document appears to + # be XHTML (tested with test_real_xhtml_document in the + # superclass) or if there is no XML declaration (tested with + # test_namespaced_html in the superclass). + + def test_processing_instruction(self): + # We test both Unicode and bytestring to verify that + # process_markup correctly sets processing_instruction_class + # even when the markup is already Unicode and there is no + # need to process anything. + markup = """<?PITarget PIContent?>""" + soup = self.soup(markup) + assert markup == soup.decode() + + markup = b"""<?PITarget PIContent?>""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_deepcopy(self): + """Make sure you can copy the tree builder. + + This is important because the builder is part of a + BeautifulSoup object, and we want to be able to copy that. + """ + copy.deepcopy(self.default_builder) + + def test_p_tag_is_never_empty_element(self): + """A <p> tag is never designated as an empty-element tag. + + Even if the markup shows it as an empty-element tag, it + shouldn't be presented that way. + """ + soup = self.soup("<p/>") + assert not soup.p.is_empty_element + assert str(soup.p) == "<p></p>" + + def test_unclosed_tags_get_closed(self): + """A tag that's not closed by the end of the document should be closed. + + This applies to all tags except empty-element tags. + """ + self.assert_soup("<p>", "<p></p>") + self.assert_soup("<b>", "<b></b>") + + self.assert_soup("<br>", "<br/>") + + def test_br_is_always_empty_element_tag(self): + """A <br> tag is designated as an empty-element tag. + + Some parsers treat <br></br> as one <br/> tag, some parsers as + two tags, but it should always be an empty-element tag. + """ + soup = self.soup("<br></br>") + assert soup.br.is_empty_element + assert str(soup.br) == "<br/>" + + def test_nested_formatting_elements(self): + self.assert_soup("<em><em></em></em>") + + def test_double_head(self): + html = """<!DOCTYPE html> +<html> +<head> +<title>Ordinary HEAD element test</title> +</head> +<script type="text/javascript"> +alert("Help!"); +</script> +<body> +Hello, world! +</body> +</html> +""" + soup = self.soup(html) + assert "text/javascript" == soup.find("script")["type"] + + def test_comment(self): + # Comments are represented as Comment objects. + markup = "<p>foo<!--foobar-->baz</p>" + self.assert_soup(markup) + + soup = self.soup(markup) + comment = soup.find(string="foobar") + assert comment.__class__ == Comment + + # The comment is properly integrated into the tree. + foo = soup.find(string="foo") + assert comment == foo.next_element + baz = soup.find(string="baz") + assert comment == baz.previous_element + + def test_preserved_whitespace_in_pre_and_textarea(self): + """Whitespace must be preserved in <pre> and <textarea> tags, + even if that would mean not prettifying the markup. + """ + pre_markup = "<pre>a z</pre>\n" + textarea_markup = "<textarea> woo\nwoo </textarea>\n" + self.assert_soup(pre_markup) + self.assert_soup(textarea_markup) + + soup = self.soup(pre_markup) + assert soup.pre.prettify() == pre_markup + + soup = self.soup(textarea_markup) + assert soup.textarea.prettify() == textarea_markup + + soup = self.soup("<textarea></textarea>") + assert soup.textarea.prettify() == "<textarea></textarea>\n" + + def test_nested_inline_elements(self): + """Inline elements can be nested indefinitely.""" + b_tag = "<b>Inside a B tag</b>" + self.assert_soup(b_tag) + + nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>" + self.assert_soup(nested_b_tag) + + double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>" + self.assert_soup(double_nested_b_tag) + + def test_nested_block_level_elements(self): + """Block elements can be nested.""" + soup = self.soup("<blockquote><p><b>Foo</b></p></blockquote>") + blockquote = soup.blockquote + assert blockquote.p.b.string == "Foo" + assert blockquote.b.string == "Foo" + + def test_correctly_nested_tables(self): + """One table can go inside another one.""" + markup = ( + '<table id="1">' + "<tr>" + "<td>Here's another table:" + '<table id="2">' + "<tr><td>foo</td></tr>" + "</table></td>" + ) + + self.assert_soup( + markup, + '<table id="1"><tr><td>Here\'s another table:' + '<table id="2"><tr><td>foo</td></tr></table>' + "</td></tr></table>", + ) + + self.assert_soup( + "<table><thead><tr><td>Foo</td></tr></thead>" + "<tbody><tr><td>Bar</td></tr></tbody>" + "<tfoot><tr><td>Baz</td></tr></tfoot></table>" + ) + + def test_multivalued_attribute_with_whitespace(self): + # Whitespace separating the values of a multi-valued attribute + # should be ignored. + + markup = '<div class=" foo bar "></a>' + soup = self.soup(markup) + assert ["foo", "bar"] == soup.div["class"] + + # If you search by the literal name of the class it's like the whitespace + # wasn't there. + assert soup.div == soup.find("div", class_="foo bar") + + def test_deeply_nested_multivalued_attribute(self): + # html5lib can set the attributes of the same tag many times + # as it rearranges the tree. This has caused problems with + # multivalued attributes. + markup = '<table><div><div class="css"></div></div></table>' + soup = self.soup(markup) + assert ["css"] == soup.div.div["class"] + + def test_multivalued_attribute_on_html(self): + # html5lib uses a different API to set the attributes ot the + # <html> tag. This has caused problems with multivalued + # attributes. + markup = '<html class="a b"></html>' + soup = self.soup(markup) + assert ["a", "b"] == soup.html["class"] + + def test_angle_brackets_in_attribute_values_are_escaped(self): + self.assert_soup('<a b="<a>"></a>', '<a b="<a>"></a>') + + def test_strings_resembling_character_entity_references(self): + # "&T" and "&p" look like incomplete character entities, but they are + # not. + self.assert_soup( + "<p>• AT&T is in the s&p 500</p>", + "<p>\u2022 AT&T is in the s&p 500</p>", + ) + + def test_apos_entity(self): + self.assert_soup( + "<p>Bob's Bar</p>", + "<p>Bob's Bar</p>", + ) + + def test_entities_in_foreign_document_encoding(self): + # “ and ” are invalid numeric entities referencing + # Windows-1252 characters. - references a character common + # to Windows-1252 and Unicode, and ☃ references a + # character only found in Unicode. + # + # All of these entities should be converted to Unicode + # characters. + markup = "<p>“Hello” -☃</p>" + soup = self.soup(markup) + assert "“Hello†-☃" == soup.p.string + + def test_entities_in_attributes_converted_to_unicode(self): + expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>' + self.assert_soup('<p id="piñata"></p>', expect) + self.assert_soup('<p id="piñata"></p>', expect) + self.assert_soup('<p id="piñata"></p>', expect) + self.assert_soup('<p id="piñata"></p>', expect) + + def test_entities_in_text_converted_to_unicode(self): + expect = "<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>" + self.assert_soup("<p>piñata</p>", expect) + self.assert_soup("<p>piñata</p>", expect) + self.assert_soup("<p>piñata</p>", expect) + self.assert_soup("<p>piñata</p>", expect) + + def test_quot_entity_converted_to_quotation_mark(self): + self.assert_soup( + "<p>I said "good day!"</p>", '<p>I said "good day!"</p>' + ) + + def test_out_of_range_entity(self): + expect = "\N{REPLACEMENT CHARACTER}" + self.assert_soup("�", expect) + self.assert_soup("�", expect) + self.assert_soup("�", expect) + + def test_multipart_strings(self): + "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." + soup = self.soup("<html><h2>\nfoo</h2><p></p></html>") + assert "p" == soup.h2.string.next_element.name + assert "p" == soup.p.name + self.assertConnectedness(soup) + + def test_invalid_html_entity(self): + # The html.parser treebuilder can't distinguish between an + # invalid HTML entity with a semicolon and an invalid HTML + # entity with no semicolon (see its subclass for the tested + # behavior). But the other treebuilders can. + markup = "<p>a &nosuchentity b</p>" + soup = self.soup(markup) + assert "<p>a &nosuchentity b</p>" == soup.p.decode() + + markup = "<p>a &nosuchentity; b</p>" + soup = self.soup(markup) + assert "<p>a &nosuchentity; b</p>" == soup.p.decode() + + def test_head_tag_between_head_and_body(self): + "Prevent recurrence of a bug in the html5lib treebuilder." + content = """<html><head></head> + <link></link> + <body>foo</body> +</html> +""" + soup = self.soup(content) + assert soup.html.body is not None + self.assertConnectedness(soup) + + def test_multiple_copies_of_a_tag(self): + "Prevent recurrence of a bug in the html5lib treebuilder." + content = """<!DOCTYPE html> +<html> + <body> + <article id="a" > + <div><a href="1"></div> + <footer> + <a href="2"></a> + </footer> + </article> + </body> +</html> +""" + soup = self.soup(content) + self.assertConnectedness(soup.article) + + def test_basic_namespaces(self): + """Parsers don't need to *understand* namespaces, but at the + very least they should not choke on namespaces or lose + data.""" + + markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>' + soup = self.soup(markup) + assert markup == soup.encode() + assert "http://www.w3.org/1999/xhtml" == soup.html["xmlns"] + assert "http://www.w3.org/1998/Math/MathML" == soup.html["xmlns:mathml"] + assert "http://www.w3.org/2000/svg" == soup.html["xmlns:svg"] + + def test_multivalued_attribute_value_becomes_list(self): + markup = b'<a class="foo bar">' + soup = self.soup(markup) + assert ["foo", "bar"] == soup.a["class"] + + # + # Generally speaking, tests below this point are more tests of + # Beautiful Soup than tests of the tree builders. But parsers are + # weird, so we run these tests separately for every tree builder + # to detect any differences between them. + # + + def test_can_parse_unicode_document(self): + # A seemingly innocuous document... but it's in Unicode! And + # it contains characters that can't be represented in the + # encoding found in the declaration! The horror! + markup = '<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>' + soup = self.soup(markup) + assert "Sacr\xe9 bleu!" == soup.body.string + + def test_soupstrainer(self): + """Parsers should be able to work with SoupStrainers.""" + strainer = SoupStrainer("b") + soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>", parse_only=strainer) + assert soup.decode() == "<b>bold</b>" + + def test_single_quote_attribute_values_become_double_quotes(self): + self.assert_soup("<foo attr='bar'></foo>", '<foo attr="bar"></foo>') + + def test_attribute_values_with_nested_quotes_are_left_alone(self): + text = """<foo attr='bar "brawls" happen'>a</foo>""" + self.assert_soup(text) + + def test_attribute_values_with_double_nested_quotes_get_quoted(self): + text = """<foo attr='bar "brawls" happen'>a</foo>""" + soup = self.soup(text) + soup.foo["attr"] = 'Brawls happen at "Bob\'s Bar"' + self.assert_soup( + soup.foo.decode(), + """<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""", + ) + + def test_ampersand_in_attribute_value_gets_escaped(self): + self.assert_soup( + '<this is="really messed up & stuff"></this>', + '<this is="really messed up & stuff"></this>', + ) + + self.assert_soup( + '<a href="http://example.org?a=1&b=2;3">foo</a>', + '<a href="http://example.org?a=1&b=2;3">foo</a>', + ) + + def test_escaped_ampersand_in_attribute_value_is_left_alone(self): + self.assert_soup('<a href="http://example.org?a=1&b=2;3"></a>') + + def test_entities_in_strings_converted_during_parsing(self): + # Both XML and HTML entities are converted to Unicode characters + # during parsing. + text = "<p><<sacré bleu!>></p>" + expected = ( + "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>" + ) + self.assert_soup(text, expected) + + def test_smart_quotes_converted_on_the_way_in(self): + # Microsoft smart quotes are converted to Unicode characters during + # parsing. + quote = b"<p>\x91Foo\x92</p>" + soup = self.soup(quote, from_encoding="windows-1252") + assert ( + soup.p.string + == "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}" + ) + + def test_non_breaking_spaces_converted_on_the_way_in(self): + soup = self.soup("<a> </a>") + assert soup.a.string == "\N{NO-BREAK SPACE}" * 2 + + def test_entities_converted_on_the_way_out(self): + text = "<p><<sacré bleu!>></p>" + expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode( + "utf-8" + ) + soup = self.soup(text) + assert soup.p.encode("utf-8") == expected + + def test_real_iso_8859_document(self): + # Smoke test of interrelated functionality, using an + # easy-to-understand document. + + # Here it is in Unicode. Note that it claims to be in ISO-8859-1. + unicode_html = '<html><head><meta content="text/html; charset=ISO-8859-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>' + + # That's because we're going to encode it into ISO-8859-1, + # and use that to test. + iso_latin_html = unicode_html.encode("iso-8859-1") + + # Parse the ISO-8859-1 HTML. + soup = self.soup(iso_latin_html) + + # Encode it to UTF-8. + result = soup.encode("utf-8") + + # What do we expect the result to look like? Well, it would + # look like unicode_html, except that the META tag would say + # UTF-8 instead of ISO-8859-1. + expected = unicode_html.replace("ISO-8859-1", "utf-8") + + # And, of course, it would be in UTF-8, not Unicode. + expected = expected.encode("utf-8") + + # Ta-da! + assert result == expected + + def test_real_shift_jis_document(self): + # Smoke test to make sure the parser can handle a document in + # Shift-JIS encoding, without choking. + shift_jis_html = ( + b"<html><head></head><body><pre>" + b"\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f" + b"\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c" + b"\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B" + b"</pre></body></html>" + ) + unicode_html = shift_jis_html.decode("shift-jis") + soup = self.soup(unicode_html) + + # Make sure the parse tree is correctly encoded to various + # encodings. + assert soup.encode("utf-8") == unicode_html.encode("utf-8") + assert soup.encode("euc_jp") == unicode_html.encode("euc_jp") + + def test_real_hebrew_document(self): + # A real-world test to make sure we can convert ISO-8859-9 (a + # Hebrew encoding) to UTF-8. + hebrew_document = b"<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>" + soup = self.soup(hebrew_document, from_encoding="iso8859-8") + # Some tree builders call it iso8859-8, others call it iso-8859-9. + # That's not a difference we really care about. + assert soup.original_encoding in ("iso8859-8", "iso-8859-8") + assert soup.encode("utf-8") == ( + hebrew_document.decode("iso8859-8").encode("utf-8") + ) + + def test_meta_tag_reflects_current_encoding(self): + # Here's the <meta> tag saying that a document is + # encoded in Shift-JIS. + meta_tag = ( + '<meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/>' + ) + + # Here's a document incorporating that meta tag. + shift_jis_html = ( + "<html><head>\n%s\n" + '<meta http-equiv="Content-language" content="ja"/>' + "</head><body>Shift-JIS markup goes here." + ) % meta_tag + soup = self.soup(shift_jis_html) + + # Parse the document, and the charset is seemingly unaffected. + parsed_meta = soup.find("meta", {"http-equiv": "Content-type"}) + content = parsed_meta["content"] + assert "text/html; charset=x-sjis" == content + + # But that value is actually a ContentMetaAttributeValue object. + assert isinstance(content, ContentMetaAttributeValue) + + # And it will take on a value that reflects its current + # encoding. + assert "text/html; charset=utf8" == content.substitute_encoding("utf8") + + # No matter how the <meta> tag is encoded, its charset attribute + # will always be accurate. + assert b"charset=utf8" in parsed_meta.encode("utf8") + assert b"charset=shift-jis" in parsed_meta.encode("shift-jis") + + # For the rest of the story, see TestSubstitutions in + # test_tree.py. + + def test_html5_style_meta_tag_reflects_current_encoding(self): + # Here's the <meta> tag saying that a document is + # encoded in Shift-JIS. + meta_tag = '<meta id="encoding" charset="x-sjis" />' + + # Here's a document incorporating that meta tag. + shift_jis_html = ( + "<html><head>\n%s\n" + '<meta http-equiv="Content-language" content="ja"/>' + "</head><body>Shift-JIS markup goes here." + ) % meta_tag + soup = self.soup(shift_jis_html) + + # Parse the document, and the charset is seemingly unaffected. + parsed_meta = soup.find("meta", id="encoding") + charset = parsed_meta["charset"] + assert "x-sjis" == charset + + # But that value is actually a CharsetMetaAttributeValue object. + assert isinstance(charset, CharsetMetaAttributeValue) + + # And it will take on a value that reflects its current + # encoding. + assert "utf8" == charset.substitute_encoding("utf8") + + # No matter how the <meta> tag is encoded, its charset attribute + # will always be accurate. + assert b'charset="utf8"' in parsed_meta.encode("utf8") + assert b'charset="shift-jis"' in parsed_meta.encode("shift-jis") + + def test_python_specific_encodings_not_used_in_charset(self): + # You can encode an HTML document using a Python-specific + # encoding, but that encoding won't be mentioned _inside_ the + # resulting document. Instead, the document will appear to + # have no encoding. + for markup in [ + b'<meta charset="utf8"></head>' b'<meta id="encoding" charset="utf-8" />' + ]: + soup = self.soup(markup) + for encoding in PYTHON_SPECIFIC_ENCODINGS: + if encoding in ( + "idna", + "mbcs", + "oem", + "undefined", + "string_escape", + "string-escape", + ): + # For one reason or another, these will raise an + # exception if we actually try to use them, so don't + # bother. + continue + encoded = soup.encode(encoding) + assert b'meta charset=""' in encoded + assert encoding.encode("ascii") not in encoded + + def test_tag_with_no_attributes_can_have_attributes_added(self): + data = self.soup("<a>text</a>") + data.a["foo"] = "bar" + assert '<a foo="bar">text</a>' == data.a.decode() + + def test_closing_tag_with_no_opening_tag(self): + # Without BeautifulSoup.open_tag_counter, the </span> tag will + # cause _popToTag to be called over and over again as we look + # for a <span> tag that wasn't there. The result is that 'text2' + # will show up outside the body of the document. + soup = self.soup("<body><div><p>text1</p></span>text2</div></body>") + assert "<body><div><p>text1</p>text2</div></body>" == soup.body.decode() + + def test_worst_case(self): + """Test the worst case (currently) for linking issues.""" + + soup = self.soup(BAD_DOCUMENT) + self.linkage_validator(soup) + + +class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest): + def test_pickle_and_unpickle_identity(self): + # Pickling a tree, then unpickling it, yields a tree identical + # to the original. + tree = self.soup("<a><b>foo</a>") + dumped = pickle.dumps(tree, 2) + loaded = pickle.loads(dumped) + assert loaded.__class__ == BeautifulSoup + assert loaded.decode() == tree.decode() + + def test_docstring_generated(self): + soup = self.soup("<root/>") + assert soup.encode() == b'<?xml version="1.0" encoding="utf-8"?>\n<root/>' + + def test_xml_declaration(self): + markup = b"""<?xml version="1.0" encoding="utf8"?>\n<foo/>""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_python_specific_encodings_not_used_in_xml_declaration(self): + # You can encode an XML document using a Python-specific + # encoding, but that encoding won't be mentioned _inside_ the + # resulting document. + markup = b"""<?xml version="1.0"?>\n<foo/>""" + soup = self.soup(markup) + for encoding in PYTHON_SPECIFIC_ENCODINGS: + if encoding in ( + "idna", + "mbcs", + "oem", + "undefined", + "string_escape", + "string-escape", + ): + # For one reason or another, these will raise an + # exception if we actually try to use them, so don't + # bother. + continue + encoded = soup.encode(encoding) + assert b'<?xml version="1.0"?>' in encoded + assert encoding.encode("ascii") not in encoded + + def test_processing_instruction(self): + markup = b"""<?xml version="1.0" encoding="utf8"?>\n<?PITarget PIContent?>""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_real_xhtml_document(self): + """A real XHTML document should come out *exactly* the same as it went in.""" + markup = b"""<?xml version="1.0" encoding="utf-8"?> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head><title>Hello.</title></head> +<body>Goodbye.</body> +</html>""" + soup = self.soup(markup) + assert soup.encode("utf-8") == markup + + def test_nested_namespaces(self): + doc = b"""<?xml version="1.0" encoding="utf-8"?> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> +<parent xmlns="http://ns1/"> +<child xmlns="http://ns2/" xmlns:ns3="http://ns3/"> +<grandchild ns3:attr="value" xmlns="http://ns4/"/> +</child> +</parent>""" + soup = self.soup(doc) + assert doc == soup.encode() + + def test_formatter_processes_script_tag_for_xml_documents(self): + doc = """ + <script type="text/javascript"> + </script> +""" + soup = BeautifulSoup(doc, "lxml-xml") + # lxml would have stripped this while parsing, but we can add + # it later. + soup.script.string = 'console.log("< < hey > > ");' + encoded = soup.encode() + assert b"< < hey > >" in encoded + + def test_can_parse_unicode_document(self): + markup = '<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>' + soup = self.soup(markup) + assert "Sacr\xe9 bleu!" == soup.root.string + + def test_can_parse_unicode_document_begining_with_bom(self): + markup = '\N{BYTE ORDER MARK}<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>' + soup = self.soup(markup) + assert "Sacr\xe9 bleu!" == soup.root.string + + def test_popping_namespaced_tag(self): + markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>' + soup = self.soup(markup) + assert str(soup.rss) == markup + + def test_docstring_includes_correct_encoding(self): + soup = self.soup("<root/>") + assert ( + soup.encode("latin1") == b'<?xml version="1.0" encoding="latin1"?>\n<root/>' + ) + + def test_large_xml_document(self): + """A large XML document should come out the same as it went in.""" + markup = ( + b'<?xml version="1.0" encoding="utf-8"?>\n<root>' + + b"0" * (2**12) + + b"</root>" + ) + soup = self.soup(markup) + assert soup.encode("utf-8") == markup + + def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): + self.assert_soup("<p>", "<p/>") + self.assert_soup("<p>foo</p>") + + def test_namespaces_are_preserved(self): + markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>' + soup = self.soup(markup) + root = soup.root + assert "http://example.com/" == root["xmlns:a"] + assert "http://example.net/" == root["xmlns:b"] + + def test_closing_namespaced_tag(self): + markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>' + soup = self.soup(markup) + assert str(soup.p) == markup + + def test_namespaced_attributes(self): + markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>' + soup = self.soup(markup) + assert str(soup.foo) == markup + + def test_namespaced_attributes_xml_namespace(self): + markup = '<foo xml:lang="fr">bar</foo>' + soup = self.soup(markup) + assert str(soup.foo) == markup + + def test_find_by_prefixed_name(self): + doc = """<?xml version="1.0" encoding="utf-8"?> +<Document xmlns="http://example.com/ns0" + xmlns:ns1="http://example.com/ns1" + xmlns:ns2="http://example.com/ns2"> + <ns1:tag>foo</ns1:tag> + <ns1:tag>bar</ns1:tag> + <ns2:tag key="value">baz</ns2:tag> +</Document> +""" + soup = self.soup(doc) + + # There are three <tag> tags. + assert 3 == len(soup.find_all("tag")) + + # But two of them are ns1:tag and one of them is ns2:tag. + assert 2 == len(soup.find_all("ns1:tag")) + assert 1 == len(soup.find_all("ns2:tag")) + + assert 1, len(soup.find_all("ns2:tag", key="value")) + assert 3, len(soup.find_all(["ns1:tag", "ns2:tag"])) + + def test_copy_tag_preserves_namespace(self): + xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?> +<w:document xmlns:w="http://example.com/ns0"/>""" + + soup = self.soup(xml) + tag = soup.document + duplicate = copy.copy(tag) + + # The two tags have the same namespace prefix. + assert tag.prefix == duplicate.prefix + + def test_worst_case(self): + """Test the worst case (currently) for linking issues.""" + + soup = self.soup(BAD_DOCUMENT) + self.linkage_validator(soup) + + +class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): + """Smoke test for a tree builder that supports HTML5.""" + + def test_real_xhtml_document(self): + # Since XHTML is not HTML5, HTML5 parsers are not tested to handle + # XHTML documents in any particular way. + pass + + def test_html_tags_have_namespace(self): + markup = "<a>" + soup = self.soup(markup) + assert "http://www.w3.org/1999/xhtml" == soup.a.namespace + + def test_svg_tags_have_namespace(self): + markup = "<svg><circle/></svg>" + soup = self.soup(markup) + namespace = "http://www.w3.org/2000/svg" + assert namespace == soup.svg.namespace + assert namespace == soup.circle.namespace + + def test_mathml_tags_have_namespace(self): + markup = "<math><msqrt>5</msqrt></math>" + soup = self.soup(markup) + namespace = "http://www.w3.org/1998/Math/MathML" + assert namespace == soup.math.namespace + assert namespace == soup.msqrt.namespace + + def test_xml_declaration_becomes_comment(self): + markup = '<?xml version="1.0" encoding="utf-8"?><html></html>' + soup = self.soup(markup) + assert isinstance(soup.contents[0], Comment) + assert soup.contents[0] == '?xml version="1.0" encoding="utf-8"?' + assert "html" == soup.contents[0].next_element.name diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase new file mode 100644 index 00000000..4828f8a4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase @@ -0,0 +1 @@ + ÿÿ ÿ <css
\ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320.testcase new file mode 100644 index 00000000..b34be8b1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320.testcase @@ -0,0 +1 @@ +ÿ<!DOCTyPEV PUBLIC'''Ð'
\ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456.testcase new file mode 100644 index 00000000..dbeed3f5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456.testcase @@ -0,0 +1 @@ +)<a><math><TR><a><mI><a><p><a>
\ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase Binary files differnew file mode 100644 index 00000000..8a585ce9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase Binary files differnew file mode 100644 index 00000000..0fe66dd2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase Binary files differnew file mode 100644 index 00000000..fd411427 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase new file mode 100644 index 00000000..6248b2c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase @@ -0,0 +1 @@ +ÿ ><applet></applet><applet></applet><apple|><applet><applet><appl›„><applet><applet></applet></applet></applet></applet><applet></applet><apple>t<applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet>et><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><azplet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><plet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet></applet></applet></applet></applet></appt></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet><<meta charset=utf-8>
\ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase Binary files differnew file mode 100644 index 00000000..107da539 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase new file mode 100644 index 00000000..367106c7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase @@ -0,0 +1,2 @@ + +<![
\ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896.testcase new file mode 100644 index 00000000..b8536ef0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896.testcase @@ -0,0 +1 @@ +-<math><sElect><mi><sElect><sElect>
\ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440.testcase Binary files differnew file mode 100644 index 00000000..d8b549c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440.testcase diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464.testcase new file mode 100644 index 00000000..123e56d4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464.testcase @@ -0,0 +1 @@ +)<math><math><math><math><math><math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&
\ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224.testcase new file mode 100644 index 00000000..2831c484 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224.testcase @@ -0,0 +1 @@ +ñ<table><svg><html>
\ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase new file mode 100644 index 00000000..b60a250c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase @@ -0,0 +1 @@ +- ÿÿ <math><select><mi><select><select>t
\ No newline at end of file diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase Binary files differnew file mode 100644 index 00000000..a823d557 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase Binary files differnew file mode 100644 index 00000000..65af44d8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase Binary files differnew file mode 100644 index 00000000..5559adbb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase Binary files differnew file mode 100644 index 00000000..88571155 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_builder.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_builder.py new file mode 100644 index 00000000..87d67587 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_builder.py @@ -0,0 +1,28 @@ +import pytest +from unittest.mock import patch +from bs4.builder import DetectsXMLParsedAsHTML + + +class TestDetectsXMLParsedAsHTML: + @pytest.mark.parametrize( + "markup,looks_like_xml", + [ + ("No xml declaration", False), + ("<html>obviously HTML</html", False), + ("<?xml ><html>Actually XHTML</html>", False), + ("<?xml> < html>Tricky XHTML</html>", False), + ("<?xml ><no-html-tag>", True), + ], + ) + def test_warn_if_markup_looks_like_xml(self, markup, looks_like_xml): + # Test of our ability to guess at whether markup looks XML-ish + # _and_ not HTML-ish. + with patch("bs4.builder.DetectsXMLParsedAsHTML._warn") as mock: + for data in markup, markup.encode("utf8"): + result = DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(data) + assert result == looks_like_xml + if looks_like_xml: + assert mock.called + else: + assert not mock.called + mock.reset_mock() diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_builder_registry.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_builder_registry.py new file mode 100644 index 00000000..ad4b5a9e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_builder_registry.py @@ -0,0 +1,139 @@ +"""Tests of the builder registry.""" + +import pytest +import warnings +from typing import Type + +from bs4 import BeautifulSoup +from bs4.builder import ( + builder_registry as registry, + TreeBuilder, + TreeBuilderRegistry, +) +from bs4.builder._htmlparser import HTMLParserTreeBuilder + +from . import ( + HTML5LIB_PRESENT, + LXML_PRESENT, +) + +if HTML5LIB_PRESENT: + from bs4.builder._html5lib import HTML5TreeBuilder + +if LXML_PRESENT: + from bs4.builder._lxml import ( + LXMLTreeBuilderForXML, + LXMLTreeBuilder, + ) + + +# TODO: Split out the lxml and html5lib tests into their own classes +# and gate with pytest.mark.skipIf. +class TestBuiltInRegistry(object): + """Test the built-in registry with the default builders registered.""" + + def test_combination(self): + assert registry.lookup("strict", "html") == HTMLParserTreeBuilder + if LXML_PRESENT: + assert registry.lookup("fast", "html") == LXMLTreeBuilder + assert registry.lookup("permissive", "xml") == LXMLTreeBuilderForXML + if HTML5LIB_PRESENT: + assert registry.lookup("html5lib", "html") == HTML5TreeBuilder + + def test_lookup_by_markup_type(self): + if LXML_PRESENT: + assert registry.lookup("html") == LXMLTreeBuilder + assert registry.lookup("xml") == LXMLTreeBuilderForXML + else: + assert registry.lookup("xml") is None + if HTML5LIB_PRESENT: + assert registry.lookup("html") == HTML5TreeBuilder + else: + assert registry.lookup("html") == HTMLParserTreeBuilder + + def test_named_library(self): + if LXML_PRESENT: + assert registry.lookup("lxml", "xml") == LXMLTreeBuilderForXML + assert registry.lookup("lxml", "html") == LXMLTreeBuilder + if HTML5LIB_PRESENT: + assert registry.lookup("html5lib") == HTML5TreeBuilder + + assert registry.lookup("html.parser") == HTMLParserTreeBuilder + + def test_beautifulsoup_constructor_does_lookup(self): + with warnings.catch_warnings(record=True): + # This will create a warning about not explicitly + # specifying a parser, but we'll ignore it. + + # You can pass in a string. + BeautifulSoup("", features="html") + # Or a list of strings. + BeautifulSoup("", features=["html", "fast"]) + pass + + # You'll get an exception if BS can't find an appropriate + # builder. + with pytest.raises(ValueError): + BeautifulSoup("", features="no-such-feature") + + +class TestRegistry(object): + """Test the TreeBuilderRegistry class in general.""" + + def setup_method(self): + self.registry = TreeBuilderRegistry() + + def builder_for_features(self, *feature_list: str) -> Type[TreeBuilder]: + cls = type( + "Builder_" + "_".join(feature_list), (object,), {"features": feature_list} + ) + + self.registry.register(cls) + return cls + + def test_register_with_no_features(self): + builder = self.builder_for_features() + + # Since the builder advertises no features, you can't find it + # by looking up features. + assert self.registry.lookup("foo") is None + + # But you can find it by doing a lookup with no features, if + # this happens to be the only registered builder. + assert self.registry.lookup() == builder + + def test_register_with_features_makes_lookup_succeed(self): + builder = self.builder_for_features("foo", "bar") + assert self.registry.lookup("foo") is builder + assert self.registry.lookup("bar") is builder + + def test_lookup_fails_when_no_builder_implements_feature(self): + assert self.registry.lookup("baz") is None + + def test_lookup_gets_most_recent_registration_when_no_feature_specified(self): + self.builder_for_features("foo") + builder2 = self.builder_for_features("bar") + assert self.registry.lookup() == builder2 + + def test_lookup_fails_when_no_tree_builders_registered(self): + assert self.registry.lookup() is None + + def test_lookup_gets_most_recent_builder_supporting_all_features(self): + self.builder_for_features("foo") + self.builder_for_features("bar") + has_both_early = self.builder_for_features("foo", "bar", "baz") + has_both_late = self.builder_for_features("foo", "bar", "quux") + self.builder_for_features("bar") + self.builder_for_features("foo") + + # There are two builders featuring 'foo' and 'bar', but + # the one that also features 'quux' was registered later. + assert self.registry.lookup("foo", "bar") == has_both_late + + # There is only one builder featuring 'foo', 'bar', and 'baz'. + assert self.registry.lookup("foo", "bar", "baz") == has_both_early + + def test_lookup_fails_when_cannot_reconcile_requested_features(self): + self.builder_for_features("foo", "bar") + self.builder_for_features("foo", "baz") + assert self.registry.lookup("bar", "baz") is None diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_css.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_css.py new file mode 100644 index 00000000..b1c42379 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_css.py @@ -0,0 +1,536 @@ +import pytest +import types + +from bs4 import ( + BeautifulSoup, + ResultSet, +) + +from typing import ( + Any, + List, + Tuple, + Type, +) + +from packaging.version import Version + +from . import ( + SoupTest, + SOUP_SIEVE_PRESENT, +) + +SOUPSIEVE_EXCEPTION_ON_UNSUPPORTED_PSEUDOCLASS: Type[Exception] +if SOUP_SIEVE_PRESENT: + from soupsieve import __version__, SelectorSyntaxError + + # Some behavior changes in soupsieve 2.6 that affects one of our + # tests. For the test to run under all versions of Python + # supported by Beautiful Soup (which includes versions of Python + # not supported by soupsieve 2.6) we need to check both behaviors. + SOUPSIEVE_EXCEPTION_ON_UNSUPPORTED_PSEUDOCLASS = SelectorSyntaxError + if Version(__version__) < Version("2.6"): + SOUPSIEVE_EXCEPTION_ON_UNSUPPORTED_PSEUDOCLASS = NotImplementedError + + +@pytest.mark.skipif(not SOUP_SIEVE_PRESENT, reason="Soup Sieve not installed") +class TestCSSSelectors(SoupTest): + """Test basic CSS selector functionality. + + This functionality is implemented in soupsieve, which has a much + more comprehensive test suite, so this is basically an extra check + that soupsieve works as expected. + """ + + HTML = """ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" +"http://www.w3.org/TR/html4/strict.dtd"> +<html> +<head> +<title>The title</title> +<link rel="stylesheet" href="blah.css" type="text/css" id="l1"> +</head> +<body> +<custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag> +<div id="main" class="fancy"> +<div id="inner"> +<h1 id="header1">An H1</h1> +<p>Some text</p> +<p class="onep" id="p1">Some more text</p> +<h2 id="header2">An H2</h2> +<p class="class1 class2 class3" id="pmulti">Another</p> +<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a> +<h2 id="header3">Another H2</h2> +<a id="me" href="http://simonwillison.net/" rel="me">me</a> +<span class="s1"> +<a href="#" id="s1a1">span1a1</a> +<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a> +<span class="span2"> +<a href="#" id="s2a1">span2a1</a> +</span> +<span class="span3"></span> +<custom-dashed-tag class="dashed" id="dash2"/> +<div data-tag="dashedvalue" id="data1"/> +</span> +</div> +<x id="xid"> +<z id="zida"/> +<z id="zidab"/> +<z id="zidac"/> +</x> +<y id="yid"> +<z id="zidb"/> +</y> +<p lang="en" id="lang-en">English</p> +<p lang="en-gb" id="lang-en-gb">English UK</p> +<p lang="en-us" id="lang-en-us">English US</p> +<p lang="fr" id="lang-fr">French</p> +</div> + +<div id="footer"> +</div> +""" + + def setup_method(self): + self._soup = BeautifulSoup(self.HTML, "html.parser") + + def assert_css_selects( + self, selector: str, expected_ids: List[str], **kwargs: Any + ) -> None: + results = self._soup.select(selector, **kwargs) + assert isinstance(results, ResultSet) + el_ids = [el["id"] for el in results] + el_ids.sort() + expected_ids.sort() + assert expected_ids == el_ids, "Selector %s, expected [%s], got [%s]" % ( + selector, + ", ".join(expected_ids), + ", ".join(el_ids), + ) + + assertSelect = assert_css_selects + + def assert_css_select_multiple(self, *tests: Tuple[str, List[str]]): + for selector, expected_ids in tests: + self.assert_css_selects(selector, expected_ids) + + def test_precompiled(self): + sel = self._soup.css.compile("div") + + els = self._soup.select(sel) + assert len(els) == 4 + for div in els: + assert div.name == "div" + + el = self._soup.select_one(sel) + assert "main" == el["id"] + + def test_one_tag_one(self): + els = self._soup.select("title") + assert len(els) == 1 + assert els[0].name == "title" + assert els[0].contents == ["The title"] + + def test_one_tag_many(self): + els = self._soup.select("div") + assert len(els) == 4 + for div in els: + assert div.name == "div" + + el = self._soup.select_one("div") + assert "main" == el["id"] + + def test_select_one_returns_none_if_no_match(self): + match = self._soup.select_one("nonexistenttag") + assert None is match + + def test_tag_in_tag_one(self): + self.assert_css_selects("div div", ["inner", "data1"]) + + def test_tag_in_tag_many(self): + for selector in ("html div", "html body div", "body div"): + self.assert_css_selects(selector, ["data1", "main", "inner", "footer"]) + + def test_limit(self): + self.assert_css_selects("html div", ["main"], limit=1) + self.assert_css_selects("html body div", ["inner", "main"], limit=2) + self.assert_css_selects( + "body div", ["data1", "main", "inner", "footer"], limit=10 + ) + + def test_tag_no_match(self): + assert len(self._soup.select("del")) == 0 + + def test_invalid_tag(self): + with pytest.raises(SelectorSyntaxError): + self._soup.select("tag%t") + + def test_select_dashed_tag_ids(self): + self.assert_css_selects("custom-dashed-tag", ["dash1", "dash2"]) + + def test_select_dashed_by_id(self): + dashed = self._soup.select('custom-dashed-tag[id="dash2"]') + assert dashed[0].name == "custom-dashed-tag" + assert dashed[0]["id"] == "dash2" + + def test_dashed_tag_text(self): + assert self._soup.select("body > custom-dashed-tag")[0].text == "Hello there." + + def test_select_dashed_matches_find_all(self): + assert self._soup.select("custom-dashed-tag") == self._soup.find_all( + "custom-dashed-tag" + ) + + def test_header_tags(self): + self.assert_css_select_multiple( + ("h1", ["header1"]), + ("h2", ["header2", "header3"]), + ) + + def test_class_one(self): + for selector in (".onep", "p.onep", "html p.onep"): + els = self._soup.select(selector) + assert len(els) == 1 + assert els[0].name == "p" + assert els[0]["class"] == ["onep"] + + def test_class_mismatched_tag(self): + els = self._soup.select("div.onep") + assert len(els) == 0 + + def test_one_id(self): + for selector in ("div#inner", "#inner", "div div#inner"): + self.assert_css_selects(selector, ["inner"]) + + def test_bad_id(self): + els = self._soup.select("#doesnotexist") + assert len(els) == 0 + + def test_items_in_id(self): + els = self._soup.select("div#inner p") + assert len(els) == 3 + for el in els: + assert el.name == "p" + assert els[1]["class"] == ["onep"] + assert not els[0].has_attr("class") + + def test_a_bunch_of_emptys(self): + for selector in ("div#main del", "div#main div.oops", "div div#main"): + assert len(self._soup.select(selector)) == 0 + + def test_multi_class_support(self): + for selector in ( + ".class1", + "p.class1", + ".class2", + "p.class2", + ".class3", + "p.class3", + "html p.class2", + "div#inner .class2", + ): + self.assert_css_selects(selector, ["pmulti"]) + + def test_multi_class_selection(self): + for selector in (".class1.class3", ".class3.class2", ".class1.class2.class3"): + self.assert_css_selects(selector, ["pmulti"]) + + def test_child_selector(self): + self.assert_css_selects(".s1 > a", ["s1a1", "s1a2"]) + self.assert_css_selects(".s1 > a span", ["s1a2s1"]) + + def test_child_selector_id(self): + self.assert_css_selects(".s1 > a#s1a2 span", ["s1a2s1"]) + + def test_attribute_equals(self): + self.assert_css_select_multiple( + ('p[class="onep"]', ["p1"]), + ('p[id="p1"]', ["p1"]), + ('[class="onep"]', ["p1"]), + ('[id="p1"]', ["p1"]), + ('link[rel="stylesheet"]', ["l1"]), + ('link[type="text/css"]', ["l1"]), + ('link[href="blah.css"]', ["l1"]), + ('link[href="no-blah.css"]', []), + ('[rel="stylesheet"]', ["l1"]), + ('[type="text/css"]', ["l1"]), + ('[href="blah.css"]', ["l1"]), + ('[href="no-blah.css"]', []), + ('p[href="no-blah.css"]', []), + ('[href="no-blah.css"]', []), + ) + + def test_attribute_tilde(self): + self.assert_css_select_multiple( + ('p[class~="class1"]', ["pmulti"]), + ('p[class~="class2"]', ["pmulti"]), + ('p[class~="class3"]', ["pmulti"]), + ('[class~="class1"]', ["pmulti"]), + ('[class~="class2"]', ["pmulti"]), + ('[class~="class3"]', ["pmulti"]), + ('a[rel~="friend"]', ["bob"]), + ('a[rel~="met"]', ["bob"]), + ('[rel~="friend"]', ["bob"]), + ('[rel~="met"]', ["bob"]), + ) + + def test_attribute_startswith(self): + self.assert_css_select_multiple( + ('[rel^="style"]', ["l1"]), + ('link[rel^="style"]', ["l1"]), + ('notlink[rel^="notstyle"]', []), + ('[rel^="notstyle"]', []), + ('link[rel^="notstyle"]', []), + ('link[href^="bla"]', ["l1"]), + ('a[href^="http://"]', ["bob", "me"]), + ('[href^="http://"]', ["bob", "me"]), + ('[id^="p"]', ["pmulti", "p1"]), + ('[id^="m"]', ["me", "main"]), + ('div[id^="m"]', ["main"]), + ('a[id^="m"]', ["me"]), + ('div[data-tag^="dashed"]', ["data1"]), + ) + + def test_attribute_endswith(self): + self.assert_css_select_multiple( + ('[href$=".css"]', ["l1"]), + ('link[href$=".css"]', ["l1"]), + ('link[id$="1"]', ["l1"]), + ( + '[id$="1"]', + ["data1", "l1", "p1", "header1", "s1a1", "s2a1", "s1a2s1", "dash1"], + ), + ('div[id$="1"]', ["data1"]), + ('[id$="noending"]', []), + ) + + def test_attribute_contains(self): + self.assert_css_select_multiple( + # From test_attribute_startswith + ('[rel*="style"]', ["l1"]), + ('link[rel*="style"]', ["l1"]), + ('notlink[rel*="notstyle"]', []), + ('[rel*="notstyle"]', []), + ('link[rel*="notstyle"]', []), + ('link[href*="bla"]', ["l1"]), + ('[href*="http://"]', ["bob", "me"]), + ('[id*="p"]', ["pmulti", "p1"]), + ('div[id*="m"]', ["main"]), + ('a[id*="m"]', ["me"]), + # From test_attribute_endswith + ('[href*=".css"]', ["l1"]), + ('link[href*=".css"]', ["l1"]), + ('link[id*="1"]', ["l1"]), + ( + '[id*="1"]', + [ + "data1", + "l1", + "p1", + "header1", + "s1a1", + "s1a2", + "s2a1", + "s1a2s1", + "dash1", + ], + ), + ('div[id*="1"]', ["data1"]), + ('[id*="noending"]', []), + # New for this test + ('[href*="."]', ["bob", "me", "l1"]), + ('a[href*="."]', ["bob", "me"]), + ('link[href*="."]', ["l1"]), + ('div[id*="n"]', ["main", "inner"]), + ('div[id*="nn"]', ["inner"]), + ('div[data-tag*="edval"]', ["data1"]), + ) + + def test_attribute_exact_or_hypen(self): + self.assert_css_select_multiple( + ('p[lang|="en"]', ["lang-en", "lang-en-gb", "lang-en-us"]), + ('[lang|="en"]', ["lang-en", "lang-en-gb", "lang-en-us"]), + ('p[lang|="fr"]', ["lang-fr"]), + ('p[lang|="gb"]', []), + ) + + def test_attribute_exists(self): + self.assert_css_select_multiple( + ("[rel]", ["l1", "bob", "me"]), + ("link[rel]", ["l1"]), + ("a[rel]", ["bob", "me"]), + ("[lang]", ["lang-en", "lang-en-gb", "lang-en-us", "lang-fr"]), + ("p[class]", ["p1", "pmulti"]), + ("[blah]", []), + ("p[blah]", []), + ("div[data-tag]", ["data1"]), + ) + + def test_quoted_space_in_selector_name(self): + html = """<div style="display: wrong">nope</div> + <div style="display: right">yes</div> + """ + soup = BeautifulSoup(html, "html.parser") + [chosen] = soup.select('div[style="display: right"]') + assert "yes" == chosen.string + + def test_unsupported_pseudoclass(self): + with pytest.raises(SOUPSIEVE_EXCEPTION_ON_UNSUPPORTED_PSEUDOCLASS): + self._soup.select("a:no-such-pseudoclass") + + with pytest.raises(SelectorSyntaxError): + self._soup.select("a:nth-of-type(a)") + + def test_nth_of_type(self): + # Try to select first paragraph + els = self._soup.select("div#inner p:nth-of-type(1)") + assert len(els) == 1 + assert els[0].string == "Some text" + + # Try to select third paragraph + els = self._soup.select("div#inner p:nth-of-type(3)") + assert len(els) == 1 + assert els[0].string == "Another" + + # Try to select (non-existent!) fourth paragraph + els = self._soup.select("div#inner p:nth-of-type(4)") + assert len(els) == 0 + + # Zero will select no tags. + els = self._soup.select("div p:nth-of-type(0)") + assert len(els) == 0 + + def test_nth_of_type_direct_descendant(self): + els = self._soup.select("div#inner > p:nth-of-type(1)") + assert len(els) == 1 + assert els[0].string == "Some text" + + def test_id_child_selector_nth_of_type(self): + self.assert_css_selects("#inner > p:nth-of-type(2)", ["p1"]) + + def test_select_on_element(self): + # Other tests operate on the tree; this operates on an element + # within the tree. + inner = self._soup.find("div", id="main") + selected = inner.select("div") + # The <div id="inner"> tag was selected. The <div id="footer"> + # tag was not. + self.assert_selects_ids(selected, ["inner", "data1"]) + + def test_overspecified_child_id(self): + self.assert_css_selects(".fancy #inner", ["inner"]) + self.assert_css_selects(".normal #inner", []) + + def test_adjacent_sibling_selector(self): + self.assert_css_selects("#p1 + h2", ["header2"]) + self.assert_css_selects("#p1 + h2 + p", ["pmulti"]) + self.assert_css_selects("#p1 + #header2 + .class1", ["pmulti"]) + assert [] == self._soup.select("#p1 + p") + + def test_general_sibling_selector(self): + self.assert_css_selects("#p1 ~ h2", ["header2", "header3"]) + self.assert_css_selects("#p1 ~ #header2", ["header2"]) + self.assert_css_selects("#p1 ~ h2 + a", ["me"]) + self.assert_css_selects('#p1 ~ h2 + [rel="me"]', ["me"]) + assert [] == self._soup.select("#inner ~ h2") + + def test_dangling_combinator(self): + with pytest.raises(SelectorSyntaxError): + self._soup.select("h1 >") + + def test_sibling_combinator_wont_select_same_tag_twice(self): + self.assert_css_selects("p[lang] ~ p", ["lang-en-gb", "lang-en-us", "lang-fr"]) + + # Test the selector grouping operator (the comma) + def test_multiple_select(self): + self.assert_css_selects("x, y", ["xid", "yid"]) + + def test_multiple_select_with_no_space(self): + self.assert_css_selects("x,y", ["xid", "yid"]) + + def test_multiple_select_with_more_space(self): + self.assert_css_selects("x, y", ["xid", "yid"]) + + def test_multiple_select_duplicated(self): + self.assert_css_selects("x, x", ["xid"]) + + def test_multiple_select_sibling(self): + self.assert_css_selects("x, y ~ p[lang=fr]", ["xid", "lang-fr"]) + + def test_multiple_select_tag_and_direct_descendant(self): + self.assert_css_selects("x, y > z", ["xid", "zidb"]) + + def test_multiple_select_direct_descendant_and_tags(self): + self.assert_css_selects( + "div > x, y, z", ["xid", "yid", "zida", "zidb", "zidab", "zidac"] + ) + + def test_multiple_select_indirect_descendant(self): + self.assert_css_selects( + "div x,y, z", ["xid", "yid", "zida", "zidb", "zidab", "zidac"] + ) + + def test_invalid_multiple_select(self): + with pytest.raises(SelectorSyntaxError): + self._soup.select(",x, y") + with pytest.raises(SelectorSyntaxError): + self._soup.select("x,,y") + + def test_multiple_select_attrs(self): + self.assert_css_selects("p[lang=en], p[lang=en-gb]", ["lang-en", "lang-en-gb"]) + + def test_multiple_select_ids(self): + self.assert_css_selects( + "x, y > z[id=zida], z[id=zidab], z[id=zidb]", ["xid", "zidb", "zidab"] + ) + + def test_multiple_select_nested(self): + self.assert_css_selects("body > div > x, y > z", ["xid", "zidb"]) + + def test_select_duplicate_elements(self): + # When markup contains duplicate elements, a multiple select + # will find all of them. + markup = '<div class="c1"/><div class="c2"/><div class="c1"/>' + soup = BeautifulSoup(markup, "html.parser") + selected = soup.select(".c1, .c2") + assert 3 == len(selected) + + # Verify that find_all finds the same elements, though because + # of an implementation detail it finds them in a different + # order. + for element in soup.find_all(class_=["c1", "c2"]): + assert element in selected + + def test_closest(self): + inner = self._soup.find("div", id="inner") + closest = inner.css.closest("div[id=main]") + assert closest == self._soup.find("div", id="main") + + def test_match(self): + inner = self._soup.find("div", id="inner") + main = self._soup.find("div", id="main") + assert inner.css.match("div[id=main]") is False + assert main.css.match("div[id=main]") is True + + def test_iselect(self): + gen = self._soup.css.iselect("h2") + assert isinstance(gen, types.GeneratorType) + [header2, header3] = gen + assert header2["id"] == "header2" + assert header3["id"] == "header3" + + def test_filter(self): + inner = self._soup.find("div", id="inner") + results = inner.css.filter("h2") + assert len(inner.css.filter("h2")) == 2 + + results = inner.css.filter("h2[id=header3]") + assert isinstance(results, ResultSet) + [result] = results + assert result["id"] == "header3" + + def test_escape(self): + m = self._soup.css.escape + assert m(".foo#bar") == "\\.foo\\#bar" + assert m("()[]{}") == "\\(\\)\\[\\]\\{\\}" + assert m(".foo") == self._soup.css.escape(".foo") diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_dammit.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_dammit.py new file mode 100644 index 00000000..ca554fea --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_dammit.py @@ -0,0 +1,433 @@ +# encoding: utf-8 +import pytest +import logging +import warnings +import bs4 +from bs4 import BeautifulSoup +from bs4.dammit import ( + EntitySubstitution, + EncodingDetector, + UnicodeDammit, +) + + +class TestUnicodeDammit(object): + """Standalone tests of UnicodeDammit.""" + + def test_unicode_input(self): + markup = "I'm already Unicode! \N{SNOWMAN}" + dammit = UnicodeDammit(markup) + assert dammit.unicode_markup == markup + + @pytest.mark.parametrize( + "smart_quotes_to,expect_converted", + [ + (None, "\u2018\u2019\u201c\u201d"), + ("xml", "‘’“”"), + ("html", "‘’“”"), + ("ascii", "''" + '""'), + ], + ) + def test_smart_quotes_to(self, smart_quotes_to, expect_converted): + """Verify the functionality of the smart_quotes_to argument + to the UnicodeDammit constructor.""" + markup = b"<foo>\x91\x92\x93\x94</foo>" + converted = UnicodeDammit( + markup, + known_definite_encodings=["windows-1252"], + smart_quotes_to=smart_quotes_to, + ).unicode_markup + assert converted == "<foo>{}</foo>".format(expect_converted) + + def test_detect_utf8(self): + utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83" + dammit = UnicodeDammit(utf8) + assert dammit.original_encoding.lower() == "utf-8" + assert dammit.unicode_markup == "Sacr\xe9 bleu! \N{SNOWMAN}" + + def test_convert_hebrew(self): + hebrew = b"\xed\xe5\xec\xf9" + dammit = UnicodeDammit(hebrew, ["iso-8859-8"]) + assert dammit.original_encoding.lower() == "iso-8859-8" + assert dammit.unicode_markup == "\u05dd\u05d5\u05dc\u05e9" + + def test_dont_see_smart_quotes_where_there_are_none(self): + utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch" + dammit = UnicodeDammit(utf_8) + assert dammit.original_encoding.lower() == "utf-8" + assert dammit.unicode_markup.encode("utf-8") == utf_8 + + def test_ignore_inappropriate_codecs(self): + utf8_data = "RäksmörgÃ¥s".encode("utf-8") + dammit = UnicodeDammit(utf8_data, ["iso-8859-8"]) + assert dammit.original_encoding.lower() == "utf-8" + + def test_ignore_invalid_codecs(self): + utf8_data = "RäksmörgÃ¥s".encode("utf-8") + for bad_encoding in [".utf8", "...", "utF---16.!"]: + dammit = UnicodeDammit(utf8_data, [bad_encoding]) + assert dammit.original_encoding.lower() == "utf-8" + + def test_exclude_encodings(self): + # This is UTF-8. + utf8_data = "RäksmörgÃ¥s".encode("utf-8") + + # But if we exclude UTF-8 from consideration, the guess is + # Windows-1252. + dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"]) + assert dammit.original_encoding.lower() == "windows-1252" + + # And if we exclude that, there is no valid guess at all. + dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8", "windows-1252"]) + assert dammit.original_encoding is None + + +class TestEncodingDetector(object): + def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character( + self, + ): + detected = EncodingDetector(b'<?xml version="1.0" encoding="UTF-\xdb" ?>') + encodings = list(detected.encodings) + assert "utf-\N{REPLACEMENT CHARACTER}" in encodings + + def test_detect_html5_style_meta_tag(self): + for data in ( + b'<html><meta charset="euc-jp" /></html>', + b"<html><meta charset='euc-jp' /></html>", + b"<html><meta charset=euc-jp /></html>", + b"<html><meta charset=euc-jp/></html>", + ): + dammit = UnicodeDammit(data, is_html=True) + assert "euc-jp" == dammit.original_encoding + + def test_last_ditch_entity_replacement(self): + # This is a UTF-8 document that contains bytestrings + # completely incompatible with UTF-8 (ie. encoded with some other + # encoding). + # + # Since there is no consistent encoding for the document, + # Unicode, Dammit will eventually encode the document as UTF-8 + # and encode the incompatible characters as REPLACEMENT + # CHARACTER. + # + # If chardet is installed, it will detect that the document + # can be converted into ISO-8859-1 without errors. This happens + # to be the wrong encoding, but it is a consistent encoding, so the + # code we're testing here won't run. + # + # So we temporarily disable chardet if it's present. + doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?> +<html><b>\330\250\330\252\330\261</b> +<i>\310\322\321\220\312\321\355\344</i></html>""" + chardet = bs4.dammit._chardet_dammit + logging.disable(logging.WARNING) + try: + + def noop(str): + return None + + bs4.dammit._chardet_dammit = noop + dammit = UnicodeDammit(doc) + assert True is dammit.contains_replacement_characters + assert "\ufffd" in dammit.unicode_markup + + soup = BeautifulSoup(doc, "html.parser") + assert soup.contains_replacement_characters + finally: + logging.disable(logging.NOTSET) + bs4.dammit._chardet_dammit = chardet + + def test_byte_order_mark_removed(self): + # A document written in UTF-16LE will have its byte order marker stripped. + data = b"\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00" + dammit = UnicodeDammit(data) + assert "<a>áé</a>" == dammit.unicode_markup + assert "utf-16le" == dammit.original_encoding + + def test_known_definite_versus_user_encodings(self): + # The known_definite_encodings are used before sniffing the + # byte-order mark; the user_encodings are used afterwards. + + # Here's a document in UTF-16LE. + data = b"\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00" + dammit = UnicodeDammit(data) + + # We can process it as UTF-16 by passing it in as a known + # definite encoding. + before = UnicodeDammit(data, known_definite_encodings=["utf-16"]) + assert "utf-16" == before.original_encoding + + # If we pass UTF-18 as a user encoding, it's not even + # tried--the encoding sniffed from the byte-order mark takes + # precedence. + after = UnicodeDammit(data, user_encodings=["utf-8"]) + assert "utf-16le" == after.original_encoding + assert ["utf-16le"] == [x[0] for x in dammit.tried_encodings] + + # Here's a document in ISO-8859-8. + hebrew = b"\xed\xe5\xec\xf9" + dammit = UnicodeDammit( + hebrew, known_definite_encodings=["utf-8"], user_encodings=["iso-8859-8"] + ) + + # The known_definite_encodings don't work, BOM sniffing does + # nothing (it only works for a few UTF encodings), but one of + # the user_encodings does work. + assert "iso-8859-8" == dammit.original_encoding + assert ["utf-8", "iso-8859-8"] == [x[0] for x in dammit.tried_encodings] + + def test_deprecated_override_encodings(self): + # override_encodings is a deprecated alias for + # known_definite_encodings. + hebrew = b"\xed\xe5\xec\xf9" + with warnings.catch_warnings(record=True) as w: + dammit = UnicodeDammit( + hebrew, + known_definite_encodings=["shift-jis"], + override_encodings=["utf-8"], + user_encodings=["iso-8859-8"], + ) + [warning] = w + message = warning.message + assert isinstance(message, DeprecationWarning) + assert warning.filename == __file__ + assert "iso-8859-8" == dammit.original_encoding + + # known_definite_encodings and override_encodings were tried + # before user_encodings. + assert ["shift-jis", "utf-8", "iso-8859-8"] == ( + [x[0] for x in dammit.tried_encodings] + ) + + def test_detwingle(self): + # Here's a UTF8 document. + utf8 = ("\N{SNOWMAN}" * 3).encode("utf8") + + # Here's a Windows-1252 document. + windows_1252 = ( + "\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!" + "\N{RIGHT DOUBLE QUOTATION MARK}" + ).encode("windows_1252") + + # Through some unholy alchemy, they've been stuck together. + doc = utf8 + windows_1252 + utf8 + + # The document can't be turned into UTF-8: + with pytest.raises(UnicodeDecodeError): + doc.decode("utf8") + + # Unicode, Dammit thinks the whole document is Windows-1252, + # and decodes it into "☃☃☃“Hi, I like Windows!â€Ã¢ËœÆ’☃☃" + + # But if we run it through fix_embedded_windows_1252, it's fixed: + fixed = UnicodeDammit.detwingle(doc) + assert "☃☃☃“Hi, I like Windows!â€â˜ƒâ˜ƒâ˜ƒ" == fixed.decode("utf8") + + def test_detwingle_ignores_multibyte_characters(self): + # Each of these characters has a UTF-8 representation ending + # in \x93. \x93 is a smart quote if interpreted as + # Windows-1252. But our code knows to skip over multibyte + # UTF-8 characters, so they'll survive the process unscathed. + for tricky_unicode_char in ( + "\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93' + "\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93' + "\xf0\x90\x90\x93", # This is a CJK character, not sure which one. + ): + input = tricky_unicode_char.encode("utf8") + assert input.endswith(b"\x93") + output = UnicodeDammit.detwingle(input) + assert output == input + + def test_find_declared_encoding(self): + # Test our ability to find a declared encoding inside an + # XML or HTML document. + # + # Even if the document comes in as Unicode, it may be + # interesting to know what encoding was claimed + # originally. + + html_unicode = '<html><head><meta charset="utf-8"></head></html>' + html_bytes = html_unicode.encode("ascii") + + xml_unicode = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + xml_bytes = xml_unicode.encode("ascii") + + m = EncodingDetector.find_declared_encoding + assert m(html_unicode, is_html=False) is None + assert "utf-8" == m(html_unicode, is_html=True) + assert "utf-8" == m(html_bytes, is_html=True) + + assert "iso-8859-1" == m(xml_unicode) + assert "iso-8859-1" == m(xml_bytes) + + # Normally, only the first few kilobytes of a document are checked for + # an encoding. + spacer = b" " * 5000 + assert m(spacer + html_bytes) is None + assert m(spacer + xml_bytes) is None + + # But you can tell find_declared_encoding to search an entire + # HTML document. + assert ( + m(spacer + html_bytes, is_html=True, search_entire_document=True) == "utf-8" + ) + + # The XML encoding declaration has to be the very first thing + # in the document. We'll allow whitespace before the document + # starts, but nothing else. + assert m(xml_bytes, search_entire_document=True) == "iso-8859-1" + assert m(b" " + xml_bytes, search_entire_document=True) == "iso-8859-1" + assert m(b"a" + xml_bytes, search_entire_document=True) is None + + +class TestEntitySubstitution(object): + """Standalone tests of the EntitySubstitution class.""" + + def setup_method(self): + self.sub = EntitySubstitution + + @pytest.mark.parametrize( + "original,substituted", + [ + # Basic case. Unicode characters corresponding to named + # HTML entites are substituted; others are not. + ("foo\u2200\N{SNOWMAN}\u00f5bar", "foo∀\N{SNOWMAN}õbar"), + # MS smart quotes are a common source of frustration, so we + # give them a special test. + ("‘’foo“â€", "‘’foo“”"), + ], + ) + def test_substitute_html(self, original, substituted): + assert self.sub.substitute_html(original) == substituted + + def test_html5_entity(self): + for entity, u in ( + # A few spot checks of our ability to recognize + # special character sequences and convert them + # to named entities. + ("⊧", "\u22a7"), + ("𝔑", "\U0001d511"), + ("≧̸", "\u2267\u0338"), + ("¬", "\xac"), + ("⫬", "\u2aec"), + # We _could_ convert | to &verbarr;, but we don't, because + # | is an ASCII character. + ("|" "|"), + # Similarly for the fj ligature, which we could convert to + # fj, but we don't. + ("fj", "fj"), + # We do convert _these_ ASCII characters to HTML entities, + # because that's required to generate valid HTML. + (">", ">"), + ("<", "<"), + ): + template = "3 %s 4" + raw = template % u + with_entities = template % entity + assert self.sub.substitute_html(raw) == with_entities + + def test_html5_entity_with_variation_selector(self): + # Some HTML5 entities correspond either to a single-character + # Unicode sequence _or_ to the same character plus U+FE00, + # VARIATION SELECTOR 1. We can handle this. + data = "fjords \u2294 penguins" + markup = "fjords ⊔ penguins" + assert self.sub.substitute_html(data) == markup + + data = "fjords \u2294\ufe00 penguins" + markup = "fjords ⊔︀ penguins" + assert self.sub.substitute_html(data) == markup + + def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self): + s = 'Welcome to "my bar"' + assert self.sub.substitute_xml(s, False) == s + + def test_xml_attribute_quoting_normally_uses_double_quotes(self): + assert self.sub.substitute_xml("Welcome", True) == '"Welcome"' + assert self.sub.substitute_xml("Bob's Bar", True) == '"Bob\'s Bar"' + + def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes( + self, + ): + s = 'Welcome to "my bar"' + assert self.sub.substitute_xml(s, True) == "'Welcome to \"my bar\"'" + + def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes( + self, + ): + s = 'Welcome to "Bob\'s Bar"' + assert self.sub.substitute_xml(s, True) == '"Welcome to "Bob\'s Bar""' + + def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self): + quoted = 'Welcome to "Bob\'s Bar"' + assert self.sub.substitute_xml(quoted) == quoted + + def test_xml_quoting_handles_angle_brackets(self): + assert self.sub.substitute_xml("foo<bar>") == "foo<bar>" + + def test_xml_quoting_handles_ampersands(self): + assert self.sub.substitute_xml("AT&T") == "AT&T" + + def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self): + assert self.sub.substitute_xml("ÁT&T") == "&Aacute;T&T" + + def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self): + assert ( + self.sub.substitute_xml_containing_entities("ÁT&T") + == "ÁT&T" + ) + + def test_quotes_not_html_substituted(self): + """There's no need to do this except inside attribute values.""" + text = 'Bob\'s "bar"' + assert self.sub.substitute_html(text) == text + + @pytest.mark.parametrize( + "markup, old", + [ + ("foo & bar", "foo & bar"), + ("foo&", "foo&"), + ("foo&&& bar", "foo&&& bar"), + ("x=1&y=2", "x=1&y=2"), + ("&123", "&123"), + ("&abc", "&abc"), + ("foo &0 bar", "foo &0 bar"), + ("foo &lolwat bar", "foo &lolwat bar"), + ], + ) + def test_unambiguous_ampersands_not_escaped(self, markup, old): + assert self.sub.substitute_html(markup) == old + assert self.sub.substitute_html5_raw(markup) == markup + + @pytest.mark.parametrize( + "markup,html,html5,html5raw", + [ + ("÷", "&divide;", "&divide;", "÷"), + ("&nonesuch;", "&nonesuch;", "&nonesuch;", "&nonesuch;"), + ("÷", "&#247;", "&#247;", "&#247;"), + ("¡", "&#xa1;", "&#xa1;", "&#xa1;"), + ], + ) + def test_when_entity_ampersands_are_escaped(self, markup, html, html5, html5raw): + # The html and html5 formatters always escape the ampersand + # that begins an entity reference, because they assume + # Beautiful Soup has already converted any unescaped entity references + # to Unicode characters. + # + # The html5_raw formatter does not escape the ampersand that + # begins a recognized HTML entity, because it does not + # fit the HTML5 definition of an ambiguous ampersand. + # + # The html5_raw formatter does escape the ampersands in front + # of unrecognized named entities, as well as numeric and + # hexadecimal entities, because they do fit the definition. + assert self.sub.substitute_html(markup) == html + assert self.sub.substitute_html5(markup) == html5 + assert self.sub.substitute_html5_raw(markup) == html5raw + + @pytest.mark.parametrize( + "markup,expect", [("&nosuchentity;", "&nosuchentity;")] + ) + def test_ambiguous_ampersands_escaped(self, markup, expect): + assert self.sub.substitute_html(markup) == expect + assert self.sub.substitute_html5_raw(markup) == expect diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_element.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_element.py new file mode 100644 index 00000000..0861eb1c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_element.py @@ -0,0 +1,138 @@ +"""Tests of classes in element.py. + +The really big classes -- Tag, PageElement, and NavigableString -- +are tested in separate files. +""" + +import pytest +from bs4.element import ( + HTMLAttributeDict, + XMLAttributeDict, + CharsetMetaAttributeValue, + ContentMetaAttributeValue, + NamespacedAttribute, + ResultSet, +) + +class TestNamedspacedAttribute: + def test_name_may_be_none_or_missing(self): + a = NamespacedAttribute("xmlns", None) + assert a == "xmlns" + + a = NamespacedAttribute("xmlns", "") + assert a == "xmlns" + + a = NamespacedAttribute("xmlns") + assert a == "xmlns" + + def test_namespace_may_be_none_or_missing(self): + a = NamespacedAttribute(None, "tag") + assert a == "tag" + + a = NamespacedAttribute("", "tag") + assert a == "tag" + + def test_attribute_is_equivalent_to_colon_separated_string(self): + a = NamespacedAttribute("a", "b") + assert "a:b" == a + + def test_attributes_are_equivalent_if_prefix_and_name_identical(self): + a = NamespacedAttribute("a", "b", "c") + b = NamespacedAttribute("a", "b", "c") + assert a == b + + # The actual namespace is not considered. + c = NamespacedAttribute("a", "b", None) + assert a == c + + # But name and prefix are important. + d = NamespacedAttribute("a", "z", "c") + assert a != d + + e = NamespacedAttribute("z", "b", "c") + assert a != e + + +class TestAttributeValueWithCharsetSubstitution: + """Certain attributes are designed to have the charset of the + final document substituted into their value. + """ + + def test_charset_meta_attribute_value(self): + # The value of a CharsetMetaAttributeValue is whatever + # encoding the string is in. + value = CharsetMetaAttributeValue("euc-jp") + assert "euc-jp" == value + assert "euc-jp" == value.original_value + assert "utf8" == value.substitute_encoding("utf8") + assert "ascii" == value.substitute_encoding("ascii") + + # If the target encoding is a Python internal encoding, + # no encoding will be mentioned in the output HTML. + assert "" == value.substitute_encoding("palmos") + + def test_content_meta_attribute_value(self): + value = ContentMetaAttributeValue("text/html; charset=euc-jp") + assert "text/html; charset=euc-jp" == value + assert "text/html; charset=euc-jp" == value.original_value + assert "text/html; charset=utf8" == value.substitute_encoding("utf8") + assert "text/html; charset=ascii" == value.substitute_encoding("ascii") + + # If the target encoding is a Python internal encoding, the + # charset argument will be omitted altogether. + assert "text/html" == value.substitute_encoding("palmos") + + +class TestAttributeDicts: + def test_xml_attribute_value_handling(self): + # Verify that attribute values are processed according to the + # XML spec's rules. + d = XMLAttributeDict() + d["v"] = 100 + assert d["v"] == "100" + d["v"] = 100.123 + assert d["v"] == "100.123" + + # This preserves Beautiful Soup's old behavior in the absence of + # guidance from the spec. + d["v"] = False + assert d["v"] is False + + d["v"] = True + assert d["v"] is True + + d["v"] = None + assert d["v"] == "" + + def test_html_attribute_value_handling(self): + # Verify that attribute values are processed according to the + # HTML spec's rules. + d = HTMLAttributeDict() + d["v"] = 100 + assert d["v"] == "100" + d["v"] = 100.123 + assert d["v"] == "100.123" + + d["v"] = False + assert "v" not in d + + d["v"] = None + assert "v" not in d + + d["v"] = True + assert d["v"] == "v" + + attribute = NamespacedAttribute("prefix", "name", "namespace") + d[attribute] = True + assert d[attribute] == "name" + + +class TestResultSet: + def test_getattr_exception(self): + rs = ResultSet(None) + with pytest.raises(AttributeError) as e: + rs.name + assert ( + """ResultSet object has no attribute "name". You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?""" + == str(e.value) + ) diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_filter.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_filter.py new file mode 100644 index 00000000..63b291ee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_filter.py @@ -0,0 +1,674 @@ +import pytest +import re +import warnings + +from . import ( + SoupTest, +) +from typing import ( + Callable, + Optional, + Tuple, +) +from bs4.element import Tag +from bs4.filter import ( + AttributeValueMatchRule, + ElementFilter, + MatchRule, + SoupStrainer, + StringMatchRule, + TagNameMatchRule, +) +from bs4._typing import _RawAttributeValues + + +class TestElementFilter(SoupTest): + def test_default_behavior(self): + # An unconfigured ElementFilter matches absolutely everything. + selector = ElementFilter() + assert not selector.excludes_everything + assert selector.includes_everything + soup = self.soup("<a>text</a>") + tag = soup.a + string = tag.string + assert True is selector.match(soup) + assert True is selector.match(tag) + assert True is selector.match(string) + assert soup.find(selector).name == "a" + + # And allows any incoming markup to be turned into PageElements. + assert True is selector.allow_tag_creation(None, "tag", None) + assert True is selector.allow_string_creation("some string") + + def test_setup_with_match_function(self): + # Configure an ElementFilter with a match function and + # we can no longer state with certainty that it includes everything. + selector = ElementFilter(lambda x: False) + assert not selector.includes_everything + + def test_match(self): + def m(pe): + return pe.string == "allow" or (isinstance(pe, Tag) and pe.name == "allow") + + soup = self.soup("<allow>deny</allow>allow<deny>deny</deny>") + allow_tag = soup.allow + allow_string = soup.find(string="allow") + deny_tag = soup.deny + deny_string = soup.find(string="deny") + + selector = ElementFilter(match_function=m) + assert True is selector.match(allow_tag) + assert True is selector.match(allow_string) + assert False is selector.match(deny_tag) + assert False is selector.match(deny_string) + + # Since only the match function was provided, there is + # no effect on tag or string creation. + soup = self.soup("<a>text</a>", parse_only=selector) + assert "text" == soup.a.string + + def test_allow_tag_creation(self): + # By default, ElementFilter.allow_tag_creation allows everything. + filter = ElementFilter() + f = filter.allow_tag_creation + assert True is f("allow", "ignore", {}) + assert True is f("ignore", "allow", {}) + assert True is f(None, "ignore", {"allow": "1"}) + assert True is f("no", "no", {"no": "nope"}) + + # You can customize this behavior by overriding + # allow_tag_creation in a subclass. + class MyFilter(ElementFilter): + def allow_tag_creation( + self, + nsprefix: Optional[str], + name: str, + attrs: Optional[_RawAttributeValues], + ): + return ( + nsprefix == "allow" + or name == "allow" + or (attrs is not None and "allow" in attrs) + ) + + filter = MyFilter() + f = filter.allow_tag_creation + assert True is f("allow", "ignore", {}) + assert True is f("ignore", "allow", {}) + assert True is f(None, "ignore", {"allow": "1"}) + assert False is f("no", "no", {"no": "nope"}) + + # Test the customized ElementFilter as a value for parse_only. + soup = self.soup( + "<deny>deny</deny> <allow>deny</allow> allow", parse_only=filter + ) + + # The <deny> tag was filtered out, but there was no effect on + # the strings, since only allow_tag_creation_function was + # overridden. + assert "deny <allow>deny</allow> allow" == soup.decode() + + # Similarly, since match_function was not defined, this + # ElementFilter matches everything. + assert soup.find(filter) == "deny" + + def test_allow_string_creation(self): + # By default, ElementFilter.allow_string_creation allows everything. + filter = ElementFilter() + f = filter.allow_string_creation + assert True is f("allow") + assert True is f("deny") + assert True is f("please allow") + + # You can customize this behavior by overriding allow_string_creation + # in a subclass. + class MyFilter(ElementFilter): + def allow_string_creation(self, s: str): + return s == "allow" + + filter = MyFilter() + f = filter.allow_string_creation + assert True is f("allow") + assert False is f("deny") + assert False is f("please allow") + + # Test the customized ElementFilter as a value for parse_only. + soup = self.soup( + "<deny>deny</deny> <allow>deny</allow> allow", parse_only=filter + ) + + # All incoming strings other than "allow" (even whitespace) + # were filtered out, but there was no effect on the tags, + # since only allow_string_creation_function was defined. + assert "<deny>deny</deny><allow>deny</allow>" == soup.decode() + + # Similarly, since match_function was not defined, this + # ElementFilter matches everything. + assert soup.find(filter).name == "deny" + + +class TestMatchRule(SoupTest): + def _tuple( + self, rule: MatchRule + ) -> Tuple[Optional[str], Optional[str], Optional[Callable], Optional[bool]]: + return ( + rule.string, + rule.pattern.pattern if rule.pattern else None, + rule.function, + rule.present, + ) + + @staticmethod + def tag_function(x: Tag) -> bool: + return False + + @staticmethod + def string_function(x: str) -> bool: + return False + + @pytest.mark.parametrize( + "constructor_args, constructor_kwargs, result", + [ + # String + ([], dict(string="a"), ("a", None, None, None)), + ( + [], + dict(string="\N{SNOWMAN}".encode("utf8")), + ("\N{SNOWMAN}", None, None, None), + ), + # Regular expression + ([], dict(pattern=re.compile("a")), (None, "a", None, None)), + ([], dict(pattern="b"), (None, "b", None, None)), + ([], dict(pattern=b"c"), (None, "c", None, None)), + # Function + ([], dict(function=tag_function), (None, None, tag_function, None)), + ([], dict(function=string_function), (None, None, string_function, None)), + # Boolean + ([], dict(present=True), (None, None, None, True)), + # With positional arguments rather than keywords + (("a", None, None, None), {}, ("a", None, None, None)), + ((None, "b", None, None), {}, (None, "b", None, None)), + ((None, None, tag_function, None), {}, (None, None, tag_function, None)), + ((None, None, None, True), {}, (None, None, None, True)), + ], + ) + def test_constructor(self, constructor_args, constructor_kwargs, result): + rule = MatchRule(*constructor_args, **constructor_kwargs) + assert result == self._tuple(rule) + + def test_empty_match_not_allowed(self): + with pytest.raises( + ValueError, + match="Either string, pattern, function, present, or exclude_everything must be provided.", + ): + MatchRule() + + def test_full_match_not_allowed(self): + with pytest.raises( + ValueError, + match="At most one of string, pattern, function, present, and exclude_everything must be provided.", + ): + MatchRule("a", "b", self.tag_function, True) + + @pytest.mark.parametrize( + "rule_kwargs, match_against, result", + [ + (dict(string="a"), "a", True), + (dict(string="a"), "ab", False), + (dict(pattern="a"), "a", True), + (dict(pattern="a"), "ab", True), + (dict(pattern="^a$"), "a", True), + (dict(pattern="^a$"), "ab", False), + (dict(present=True), "any random value", True), + (dict(present=True), None, False), + (dict(present=False), "any random value", False), + (dict(present=False), None, True), + (dict(function=lambda x: x.upper() == x), "UPPERCASE", True), + (dict(function=lambda x: x.upper() == x), "lowercase", False), + (dict(function=lambda x: x.lower() == x), "UPPERCASE", False), + (dict(function=lambda x: x.lower() == x), "lowercase", True), + ], + ) + def test_matches_string(self, rule_kwargs, match_against, result): + rule = MatchRule(**rule_kwargs) + assert rule.matches_string(match_against) == result + + +class TestTagNameMatchRule(SoupTest): + @pytest.mark.parametrize( + "rule_kwargs, tag_kwargs, result", + [ + (dict(string="a"), dict(name="a"), True), + (dict(string="a"), dict(name="ab"), False), + (dict(pattern="a"), dict(name="a"), True), + (dict(pattern="a"), dict(name="ab"), True), + (dict(pattern="^a$"), dict(name="a"), True), + (dict(pattern="^a$"), dict(name="ab"), False), + # This isn't very useful, but it will work. + (dict(present=True), dict(name="any random value"), True), + (dict(present=False), dict(name="any random value"), False), + ( + dict(function=lambda t: t.name in t.attrs), + dict(name="id", attrs=dict(id="a")), + True, + ), + ( + dict(function=lambda t: t.name in t.attrs), + dict(name="id", attrs={"class": "a"}), + False, + ), + ], + ) + def test_matches_tag(self, rule_kwargs, tag_kwargs, result): + rule = TagNameMatchRule(**rule_kwargs) + tag = Tag(**tag_kwargs) + assert rule.matches_tag(tag) == result + + +# AttributeValueMatchRule and StringMatchRule have the same +# logic as MatchRule. + + +class TestSoupStrainer(SoupTest): + + def test_constructor_string_deprecated_text_argument(self): + with warnings.catch_warnings(record=True) as w: + strainer = SoupStrainer(text="text") + assert strainer.text == "text" + [w1, w2] = w + msg = str(w1.message) + assert w1.filename == __file__ + assert ( + msg + == "As of version 4.11.0, the 'text' argument to the SoupStrainer constructor is deprecated. Use 'string' instead." + ) + + msg = str(w2.message) + assert w2.filename == __file__ + assert ( + msg + == "Access to deprecated property text. (Look at .string_rules instead) -- Deprecated since version 4.13.0." + ) + + def test_search_tag_deprecated(self): + strainer = SoupStrainer(name="a") + with warnings.catch_warnings(record=True) as w: + assert False is strainer.search_tag("b", {}) + [w1] = w + msg = str(w1.message) + assert w1.filename == __file__ + assert ( + msg + == "Call to deprecated method search_tag. (Replaced by allow_tag_creation) -- Deprecated since version 4.13.0." + ) + + def test_search_deprecated(self): + strainer = SoupStrainer(name="a") + soup = self.soup("<a></a><b></b>") + with warnings.catch_warnings(record=True) as w: + assert soup.a == strainer.search(soup.a) + assert None is strainer.search(soup.b) + [w1, w2] = w + msg = str(w1.message) + assert msg == str(w2.message) + assert w1.filename == __file__ + assert ( + msg + == "Call to deprecated method search. (Replaced by match) -- Deprecated since version 4.13.0." + ) + + # Dummy function used within tests. + def _match_function(x): + pass + + def test_constructor_default(self): + # The default SoupStrainer matches all tags, and only tags. + strainer = SoupStrainer() + [name_rule] = strainer.name_rules + assert True == name_rule.present + assert 0 == len(strainer.attribute_rules) + assert 0 == len(strainer.string_rules) + + def test_constructor(self): + strainer = SoupStrainer( + "tagname", + {"attr1": "value"}, + string=self._match_function, + attr2=["value1", False], + ) + [name_rule] = strainer.name_rules + assert name_rule == TagNameMatchRule(string="tagname") + + [attr1_rule] = strainer.attribute_rules.pop("attr1") + assert attr1_rule == AttributeValueMatchRule(string="value") + + [attr2_rule1, attr2_rule2] = strainer.attribute_rules.pop("attr2") + assert attr2_rule1 == AttributeValueMatchRule(string="value1") + assert attr2_rule2 == AttributeValueMatchRule(present=False) + + assert not strainer.attribute_rules + + [string_rule] = strainer.string_rules + assert string_rule == StringMatchRule(function=self._match_function) + + def test_scalar_attrs_becomes_class_restriction(self): + # For the sake of convenience, passing a scalar value as + # ``args`` results in a restriction on the 'class' attribute. + strainer = SoupStrainer(attrs="mainbody") + assert [] == strainer.name_rules + assert [] == strainer.string_rules + assert {"class": [AttributeValueMatchRule(string="mainbody")]} == ( + strainer.attribute_rules + ) + + def test_constructor_class_attribute(self): + # The 'class' HTML attribute is also treated specially because + # it's a Python reserved word. Passing in "class_" as a + # keyword argument results in a restriction on the 'class' + # attribute. + strainer = SoupStrainer(class_="mainbody") + assert [] == strainer.name_rules + assert [] == strainer.string_rules + assert {"class": [AttributeValueMatchRule(string="mainbody")]} == ( + strainer.attribute_rules + ) + + # But if you pass in "class_" as part of the ``attrs`` dict + # it's not changed. (Otherwise there'd be no way to actually put + # a restriction on an attribute called "class_".) + strainer = SoupStrainer(attrs=dict(class_="mainbody")) + assert [] == strainer.name_rules + assert [] == strainer.string_rules + assert {"class_": [AttributeValueMatchRule(string="mainbody")]} == ( + strainer.attribute_rules + ) + + def test_constructor_with_overlapping_attributes(self): + # If you specify the same attribute in args and **kwargs, you end up + # with two different AttributeValueMatchRule objects. + + # This happens whether you use the 'class' shortcut on attrs... + strainer = SoupStrainer(attrs="class1", class_="class2") + rule1, rule2 = strainer.attribute_rules["class"] + assert rule1.string == "class1" + assert rule2.string == "class2" + + # Or explicitly specify the same attribute twice. + strainer = SoupStrainer(attrs={"id": "id1"}, id="id2") + rule1, rule2 = strainer.attribute_rules["id"] + assert rule1.string == "id1" + assert rule2.string == "id2" + + @pytest.mark.parametrize( + "obj, result", + [ + ("a", MatchRule(string="a")), + (b"a", MatchRule(string="a")), + (True, MatchRule(present=True)), + (False, MatchRule(present=False)), + (re.compile("a"), MatchRule(pattern=re.compile("a"))), + (_match_function, MatchRule(function=_match_function)), + # Pass in a list and get back a list of rules. + (["a", b"b"], [MatchRule(string="a"), MatchRule(string="b")]), + ( + [re.compile("a"), _match_function], + [ + MatchRule(pattern=re.compile("a")), + MatchRule(function=_match_function), + ], + ), + # Anything that doesn't fit is converted to a string. + (100, MatchRule(string="100")), + ], + ) + def test__make_match_rules(self, obj, result): + actual = list(SoupStrainer._make_match_rules(obj, MatchRule)) + # Helper to reduce the number of single-item lists in the + # parameters. + if len(actual) == 1: + [actual] = actual + assert result == actual + + @pytest.mark.parametrize( + "cls, result", + [ + (AttributeValueMatchRule, AttributeValueMatchRule(string="a")), + (StringMatchRule, StringMatchRule(string="a")), + ], + ) + def test__make_match_rules_different_classes(self, cls, result): + actual = cls(string="a") + assert actual == result + + def test__make_match_rules_nested_list(self): + # If you pass a nested list into _make_match_rules, it's + # turned into a restriction that excludes everything, to avoid the + # possibility of an infinite recursion. + + # Create a self-referential object. + selfref = [] + selfref.append(selfref) + + with warnings.catch_warnings(record=True) as w: + rules = SoupStrainer._make_match_rules(["a", selfref, "b"], MatchRule) + assert list(rules) == [MatchRule(string="a"), MatchRule(exclude_everything=True), MatchRule(string="b")] + + [warning] = w + # Don't check the filename because the stacklevel is + # designed for normal use and we're testing the private + # method directly. + msg = str(warning.message) + assert ( + msg + == "Ignoring nested list [[...]] to avoid the possibility of infinite recursion." + ) + + def tag_matches( + self, + strainer: SoupStrainer, + name: str, + attrs: Optional[_RawAttributeValues] = None, + string: Optional[str] = None, + prefix: Optional[str] = None, + ) -> bool: + # Create a Tag with the given prefix, name and attributes, + # then make sure that strainer.matches_tag and allow_tag_creation + # both approve it. + tag = Tag(prefix=prefix, name=name, attrs=attrs) + if string: + tag.string = string + return strainer.matches_tag(tag) and strainer.allow_tag_creation( + prefix, name, attrs + ) + + def test_matches_tag_with_only_string(self): + # A SoupStrainer that only has StringMatchRules won't ever + # match a Tag. + strainer = SoupStrainer(string=["a string", re.compile("string")]) + tag = Tag(name="b", attrs=dict(id="1")) + tag.string = "a string" + assert not strainer.matches_tag(tag) + + # There has to be a TagNameMatchRule or an + # AttributeValueMatchRule as well. + strainer.name_rules.append(TagNameMatchRule(string="b")) + assert strainer.matches_tag(tag) + + strainer.name_rules = [] + strainer.attribute_rules["id"] = [AttributeValueMatchRule("1")] + assert strainer.matches_tag(tag) + + def test_matches_tag_with_prefix(self): + # If a tag has an attached namespace prefix, the tag's name is + # tested both with and without the prefix. + kwargs = dict(name="a", prefix="ns") + + assert self.tag_matches(SoupStrainer(name="a"), **kwargs) + assert self.tag_matches(SoupStrainer(name="ns:a"), **kwargs) + assert not self.tag_matches(SoupStrainer(name="ns2:a"), **kwargs) + + def test_one_name_rule_must_match(self): + # If there are TagNameMatchRule, at least one must match. + kwargs = dict(name="b") + + assert self.tag_matches(SoupStrainer(name="b"), **kwargs) + assert not self.tag_matches(SoupStrainer(name="c"), **kwargs) + assert self.tag_matches(SoupStrainer(name=["c", "d", "d", "b"]), **kwargs) + assert self.tag_matches( + SoupStrainer(name=[re.compile("c-f"), re.compile("[ab]$")]), **kwargs + ) + + def test_one_attribute_rule_must_match_for_each_attribute(self): + # If there is one or more AttributeValueMatchRule for a given + # attribute, at least one must match that attribute's + # value. This is true for *every* attribute -- just matching one + # attribute isn't enough. + kwargs = dict(name="b", attrs={"class": "main", "id": "1"}) + + # 'class' and 'id' match + assert self.tag_matches( + SoupStrainer( + class_=["other", "main"], id=["20", "a", re.compile("^[0-9]")] + ), + **kwargs, + ) + + # 'class' and 'id' are present and 'data' attribute is missing + assert self.tag_matches( + SoupStrainer(class_=True, id=True, data=False), **kwargs + ) + + # 'id' matches, 'class' does not. + assert not self.tag_matches(SoupStrainer(class_=["other"], id=["2"]), **kwargs) + + # 'class' matches, 'id' does not + assert not self.tag_matches(SoupStrainer(class_=["main"], id=["2"]), **kwargs) + + # 'class' and 'id' match but 'data' attribute is missing + assert not self.tag_matches( + SoupStrainer(class_=["main"], id=["1"], data=True), **kwargs + ) + + def test_match_against_multi_valued_attribute(self): + # If an attribute has multiple values, only one of them + # has to match the AttributeValueMatchRule. + kwargs = dict(name="b", attrs={"class": ["main", "big"]}) + assert self.tag_matches(SoupStrainer(attrs="main"), **kwargs) + assert self.tag_matches(SoupStrainer(attrs="big"), **kwargs) + assert self.tag_matches(SoupStrainer(attrs=["main", "big"]), **kwargs) + assert self.tag_matches(SoupStrainer(attrs=["big", "small"]), **kwargs) + assert not self.tag_matches(SoupStrainer(attrs=["small", "smaller"]), **kwargs) + + def test_match_against_multi_valued_attribute_as_string(self): + # If an attribute has multiple values, you can treat the entire + # thing as one string during a match. + kwargs = dict(name="b", attrs={"class": ["main", "big"]}) + assert self.tag_matches(SoupStrainer(attrs="main big"), **kwargs) + + # But you can't put them in any order; it's got to be the + # order they are present in the Tag, which basically means the + # order they were originally present in the document. + assert not self.tag_matches(SoupStrainer(attrs=["big main"]), **kwargs) + + def test_one_string_rule_must_match(self): + # If there's a TagNameMatchRule and/or an + # AttributeValueMatchRule, then the StringMatchRule is _not_ + # ignored, and must match as well. + tag = Tag(name="b", attrs=dict(id="1")) + tag.string = "A string" + + assert SoupStrainer(name="b", string="A string").matches_tag(tag) + assert not SoupStrainer(name="a", string="A string").matches_tag(tag) + assert not SoupStrainer(name="a", string="Wrong string").matches_tag(tag) + assert SoupStrainer(id="1", string="A string").matches_tag(tag) + assert not SoupStrainer(id="2", string="A string").matches_tag(tag) + assert not SoupStrainer(id="1", string="Wrong string").matches_tag(tag) + + assert SoupStrainer(name="b", id="1", string="A string").matches_tag(tag) + + # If there are multiple string rules, only one needs to match. + assert SoupStrainer( + name="b", + id="1", + string=["Wrong string", "Also wrong", re.compile("string")], + ).matches_tag(tag) + + def test_allowing_tag_implies_allowing_its_contents(self): + markup = "<a><b>one string<div>another string</div></b></a>" + + # Letting the <b> tag through implies parsing the <div> tag + # and both strings, even though they wouldn't match the + # SoupStrainer on their own. + assert ( + "<b>one string<div>another string</div></b>" + == self.soup(markup, parse_only=SoupStrainer(name="b")).decode() + ) + + @pytest.mark.parametrize( + "soupstrainer", + [ + SoupStrainer(name="b", string="one string"), + SoupStrainer(name="div", string="another string"), + ], + ) + def test_parse_only_combining_tag_and_string(self, soupstrainer): + # If you pass parse_only a SoupStrainer that contains both tag + # restrictions and string restrictions, you get no results, + # because the string restrictions can't be evaluated during + # the parsing process, and the tag restrictions eliminate + # any strings from consideration. + # + # We can detect this ahead of time, and warn about it, + # thanks to SoupStrainer.excludes_everything + markup = "<a><b>one string<div>another string</div></b></a>" + + with warnings.catch_warnings(record=True) as w: + assert True, soupstrainer.excludes_everything + assert "" == self.soup(markup, parse_only=soupstrainer).decode() + [warning] = w + str(warning.message) + assert warning.filename == __file__ + assert str(warning.message).startswith( + "The given value for parse_only will exclude everything:" + ) + + # The average SoupStrainer has excludes_everything=False + assert not SoupStrainer().excludes_everything + + def test_documentation_examples(self): + """Medium-weight real-world tests based on the Beautiful Soup + documentation. + """ + html_doc = """<html><head><title>The Dormouse's story</title></head> +<body> +<p class="title"><b>The Dormouse's story</b></p> + +<p class="story">Once upon a time there were three little sisters; and their names were +<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, +<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and +<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; +and they lived at the bottom of a well.</p> + +<p class="story">...</p> +""" + only_a_tags = SoupStrainer("a") + only_tags_with_id_link2 = SoupStrainer(id="link2") + + def is_short_string(string): + return string is not None and len(string) < 10 + + only_short_strings = SoupStrainer(string=is_short_string) + + a_soup = self.soup(html_doc, parse_only=only_a_tags) + assert ( + '<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a><a class="sister" href="http://example.com/lacie" id="link2">Lacie</a><a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>' + == a_soup.decode() + ) + + id_soup = self.soup(html_doc, parse_only=only_tags_with_id_link2) + assert ( + '<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>' + == id_soup.decode() + ) + string_soup = self.soup(html_doc, parse_only=only_short_strings) + assert "\n\n\nElsie,\nLacie and\nTillie\n...\n" == string_soup.decode() diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_formatter.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_formatter.py new file mode 100644 index 00000000..0b840c58 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_formatter.py @@ -0,0 +1,170 @@ +import pytest + +from bs4.element import Tag +from bs4.formatter import ( + Formatter, + HTMLFormatter, + XMLFormatter, +) +from . import SoupTest + + +class TestFormatter(SoupTest): + def test_default_attributes(self): + # Test the default behavior of Formatter.attributes(). + formatter = Formatter() + tag = Tag(name="tag") + tag["b"] = "1" + tag["a"] = "2" + + # Attributes come out sorted by name. In Python 3, attributes + # normally come out of a dictionary in the order they were + # added. + assert [("a", "2"), ("b", "1")] == formatter.attributes(tag) + + # This works even if Tag.attrs is None, though this shouldn't + # normally happen. + tag.attrs = None + assert [] == formatter.attributes(tag) + + assert " " == formatter.indent + + def test_sort_attributes(self): + # Test the ability to override Formatter.attributes() to, + # e.g., disable the normal sorting of attributes. + class UnsortedFormatter(Formatter): + def attributes(self, tag): + self.called_with = tag + for k, v in sorted(tag.attrs.items()): + if k == "ignore": + continue + yield k, v + + soup = self.soup('<p cval="1" aval="2" ignore="ignored"></p>') + formatter = UnsortedFormatter() + decoded = soup.decode(formatter=formatter) + + # attributes() was called on the <p> tag. It filtered out one + # attribute and sorted the other two. + assert formatter.called_with == soup.p + assert '<p aval="2" cval="1"></p>' == decoded + + def test_empty_attributes_are_booleans(self): + # Test the behavior of empty_attributes_are_booleans as well + # as which Formatters have it enabled. + + for name in ("html", "minimal", None): + formatter = HTMLFormatter.REGISTRY[name] + assert False is formatter.empty_attributes_are_booleans + + formatter = XMLFormatter.REGISTRY[None] + assert False is formatter.empty_attributes_are_booleans + + formatter = HTMLFormatter.REGISTRY["html5"] + assert True is formatter.empty_attributes_are_booleans + + # Verify that the constructor sets the value. + formatter = Formatter(empty_attributes_are_booleans=True) + assert True is formatter.empty_attributes_are_booleans + + # Now demonstrate what it does to markup. + for markup in ("<option selected></option>", '<option selected=""></option>'): + soup = self.soup(markup) + for formatter in ("html", "minimal", "xml", None): + assert b'<option selected=""></option>' == soup.option.encode( + formatter="html" + ) + assert b"<option selected></option>" == soup.option.encode( + formatter="html5" + ) + + @pytest.mark.parametrize( + "indent,expect", + [ + (None, "<a>\n<b>\ntext\n</b>\n</a>\n"), + (-1, "<a>\n<b>\ntext\n</b>\n</a>\n"), + (0, "<a>\n<b>\ntext\n</b>\n</a>\n"), + ("", "<a>\n<b>\ntext\n</b>\n</a>\n"), + (1, "<a>\n <b>\n text\n </b>\n</a>\n"), + (2, "<a>\n <b>\n text\n </b>\n</a>\n"), + ("\t", "<a>\n\t<b>\n\t\ttext\n\t</b>\n</a>\n"), + ("abc", "<a>\nabc<b>\nabcabctext\nabc</b>\n</a>\n"), + # Some invalid inputs -- the default behavior is used. + (object(), "<a>\n <b>\n text\n </b>\n</a>\n"), + (b"bytes", "<a>\n <b>\n text\n </b>\n</a>\n"), + ], + ) + def test_indent(self, indent, expect): + # Pretty-print a tree with a Formatter set to + # indent in a certain way and verify the results. + soup = self.soup("<a><b>text</b></a>") + formatter = Formatter(indent=indent) + assert soup.prettify(formatter=formatter) == expect + + # Pretty-printing only happens with prettify(), not + # encode(). + assert soup.encode(formatter=formatter) != expect + + def test_default_indent_value(self): + formatter = Formatter() + assert formatter.indent == " " + + @pytest.mark.parametrize("formatter,expect", + [ + (HTMLFormatter(indent=1), "<p>\n a\n</p>\n"), + (HTMLFormatter(indent=2), "<p>\n a\n</p>\n"), + (XMLFormatter(indent=1), "<p>\n a\n</p>\n"), + (XMLFormatter(indent="\t"), "<p>\n\ta\n</p>\n"), + ] ) + def test_indent_subclasses(self, formatter, expect): + soup = self.soup("<p>a</p>") + assert expect == soup.p.prettify(formatter=formatter) + + @pytest.mark.parametrize( + "s,expect_html,expect_html5", + [ + # The html5 formatter is much less aggressive about escaping ampersands + # than the html formatter. + ("foo & bar", "foo & bar", "foo & bar"), + ("foo&", "foo&", "foo&"), + ("foo&&& bar", "foo&&& bar", "foo&&& bar"), + ("x=1&y=2", "x=1&y=2", "x=1&y=2"), + ("&123", "&123", "&123"), + ("&abc", "&abc", "&abc"), + ("foo &0 bar", "foo &0 bar", "foo &0 bar"), + ("foo &lolwat bar", "foo &lolwat bar", "foo &lolwat bar"), + # But both formatters escape what the HTML5 spec considers ambiguous ampersands. + ("&nosuchentity;", "&nosuchentity;", "&nosuchentity;"), + ], + ) + def test_entity_substitution(self, s, expect_html, expect_html5): + assert HTMLFormatter.REGISTRY["html"].substitute(s) == expect_html + assert HTMLFormatter.REGISTRY["html5"].substitute(s) == expect_html5 + assert HTMLFormatter.REGISTRY["html5-4.12"].substitute(s) == expect_html + + def test_entity_round_trip(self): + # This is more an explanatory test and a way to avoid regressions than a test of functionality. + + markup = "<p>Some division signs: ÷ ÷ ÷ ÷. These are made with: ÷ &divide; &#247;</p>" + soup = self.soup(markup) + assert ( + "Some division signs: ÷ ÷ ÷ ÷. These are made with: ÷ ÷ ÷" + == soup.p.string + ) + + # Oops, I forgot to mention the entity. + soup.p.string = soup.p.string + " ÷" + + assert ( + "Some division signs: ÷ ÷ ÷ ÷. These are made with: ÷ ÷ ÷ ÷" + == soup.p.string + ) + + expect = "<p>Some division signs: ÷ ÷ ÷ ÷. These are made with: ÷ &divide; &#247; &#xf7;</p>" + assert expect == soup.p.decode(formatter="html") + assert expect == soup.p.decode(formatter="html5") + + markup = "<p>a & b</p>" + soup = self.soup(markup) + assert "<p>a & b</p>" == soup.p.decode(formatter="html") + assert "<p>a & b</p>" == soup.p.decode(formatter="html5") diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_fuzz.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_fuzz.py new file mode 100644 index 00000000..f5b0990d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_fuzz.py @@ -0,0 +1,181 @@ +"""This file contains test cases reported by third parties using +fuzzing tools, primarily from Google's oss-fuzz project. Some of these +represent real problems with Beautiful Soup, but many are problems in +libraries that Beautiful Soup depends on, and many of the test cases +represent different ways of triggering the same problem. + +Grouping these test cases together makes it easy to see which test +cases represent the same problem, and puts the test cases in close +proximity to code that can trigger the problems. +""" + +import os +import importlib +import pytest +from bs4 import ( + BeautifulSoup, + ParserRejectedMarkup, +) + +try: + from soupsieve.util import SelectorSyntaxError + has_lxml = importlib.util.find_spec("lxml") + has_html5lib = importlib.util.find_spec("html5lib") + fully_fuzzable = has_lxml != None and has_html5lib != None +except ImportError: + fully_fuzzable = False + + +@pytest.mark.skipif( + not fully_fuzzable, reason="Prerequisites for fuzz tests are not installed." +) +class TestFuzz(object): + # Test case markup files from fuzzers are given this extension so + # they can be included in builds. + TESTCASE_SUFFIX = ".testcase" + + # Copied 20230512 from + # https://github.com/google/oss-fuzz/blob/4ac6a645a197a695fe76532251feb5067076b3f3/projects/bs4/bs4_fuzzer.py + # + # Copying the code lets us precisely duplicate the behavior of + # oss-fuzz. The downside is that this code changes over time, so + # multiple copies of the code must be kept around to run against + # older tests. I'm not sure what to do about this, but I may + # retire old tests after a time. + def fuzz_test_with_css(self, filename: str) -> None: + data = self.__markup(filename) + parsers = ["lxml-xml", "html5lib", "html.parser", "lxml"] + try: + idx = int(data[0]) % len(parsers) + except ValueError: + return + + css_selector, data = data[1:10], data[10:] + + try: + soup = BeautifulSoup(data[1:], features=parsers[idx]) + except ParserRejectedMarkup: + return + except ValueError: + return + + list(soup.find_all(True)) + try: + soup.css.select(css_selector.decode("utf-8", "replace")) + except SelectorSyntaxError: + return + soup.prettify() + + # This class of error has been fixed by catching a less helpful + # exception from html.parser and raising ParserRejectedMarkup + # instead. + @pytest.mark.parametrize( + "filename", + [ + "clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912", + "crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a", + ], + ) + def test_rejected_markup(self, filename): + markup = self.__markup(filename) + with pytest.raises(ParserRejectedMarkup): + BeautifulSoup(markup, "html.parser") + + # This class of error has to do with very deeply nested documents + # which overflow the Python call stack when the tree is converted + # to a string. This is an issue with Beautiful Soup which was fixed + # as part of [bug=1471755]. + # + # These test cases are in the older format that doesn't specify + # which parser to use or give a CSS selector. + @pytest.mark.parametrize( + "filename", + [ + "clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440", + "clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632", + "clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464", + "clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400", + ], + ) + def test_deeply_nested_document_without_css(self, filename): + # Parsing the document and encoding it back to a string is + # sufficient to demonstrate that the overflow problem has + # been fixed. + markup = self.__markup(filename) + BeautifulSoup(markup, "html.parser").encode() + + # This class of error has to do with very deeply nested documents + # which overflow the Python call stack when the tree is converted + # to a string. This is an issue with Beautiful Soup which was fixed + # as part of [bug=1471755]. + @pytest.mark.parametrize( + "filename", + [ + "clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016", + "clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000", + "clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624", + ], + ) + def test_deeply_nested_document(self, filename): + self.fuzz_test_with_css(filename) + + @pytest.mark.parametrize( + "filename", + [ + "clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256", + "clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824", + ], + ) + def test_soupsieve_errors(self, filename): + self.fuzz_test_with_css(filename) + + # This class of error represents problems with html5lib's parser, + # not Beautiful Soup. I use + # https://github.com/html5lib/html5lib-python/issues/568 to notify + # the html5lib developers of these issues. + # + # These test cases are in the older format that doesn't specify + # which parser to use or give a CSS selector. + @pytest.mark.skip(reason="html5lib-specific problems") + @pytest.mark.parametrize( + "filename", + [ + # b"""ÿ<!DOCTyPEV PUBLIC'''Ã'""" + "clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320", + # b')<a><math><TR><a><mI><a><p><a>' + "clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456", + # b'-<math><sElect><mi><sElect><sElect>' + "clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896", + # b'ñ<table><svg><html>' + "clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224", + # <TABLE>, some ^@ characters, some <math> tags. + "clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744", + # Nested table + "crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08", + ], + ) + def test_html5lib_parse_errors_without_css(self, filename): + markup = self.__markup(filename) + print(BeautifulSoup(markup, "html5lib").encode()) + + # This class of error represents problems with html5lib's parser, + # not Beautiful Soup. I use + # https://github.com/html5lib/html5lib-python/issues/568 to notify + # the html5lib developers of these issues. + @pytest.mark.skip(reason="html5lib-specific problems") + @pytest.mark.parametrize( + "filename", + [ + # b'- \xff\xff <math>\x10<select><mi><select><select>t' + "clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640", + ], + ) + def test_html5lib_parse_errors(self, filename): + self.fuzz_test_with_css(filename) + + def __markup(self, filename: str) -> bytes: + if not filename.endswith(self.TESTCASE_SUFFIX): + filename += self.TESTCASE_SUFFIX + this_dir = os.path.split(__file__)[0] + path = os.path.join(this_dir, "fuzz", filename) + return open(path, "rb").read() diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_html5lib.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_html5lib.py new file mode 100644 index 00000000..593c12bd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_html5lib.py @@ -0,0 +1,264 @@ +"""Tests to ensure that the html5lib tree builder generates good trees.""" + +import pytest +import warnings + +from bs4 import BeautifulSoup +from bs4.filter import SoupStrainer +from . import ( + HTML5LIB_PRESENT, + HTML5TreeBuilderSmokeTest, +) + + +@pytest.mark.skipif( + not HTML5LIB_PRESENT, + reason="html5lib seems not to be present, not testing its tree builder.", +) +class TestHTML5LibBuilder(HTML5TreeBuilderSmokeTest): + """See ``HTML5TreeBuilderSmokeTest``.""" + + @property + def default_builder(self): + from bs4.builder import HTML5TreeBuilder + + return HTML5TreeBuilder + + def test_soupstrainer(self): + # The html5lib tree builder does not support parse_only. + strainer = SoupStrainer("b") + markup = "<p>A <b>bold</b> statement.</p>" + with warnings.catch_warnings(record=True) as w: + soup = BeautifulSoup(markup, "html5lib", parse_only=strainer) + assert soup.decode() == self.document_for(markup) + + [warning] = w + assert warning.filename == __file__ + assert "the html5lib tree builder doesn't support parse_only" in str( + warning.message + ) + + def test_correctly_nested_tables(self): + """html5lib inserts <tbody> tags where other parsers don't.""" + markup = ( + '<table id="1">' + "<tr>" + "<td>Here's another table:" + '<table id="2">' + "<tr><td>foo</td></tr>" + "</table></td>" + ) + + self.assert_soup( + markup, + '<table id="1"><tbody><tr><td>Here\'s another table:' + '<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>' + "</td></tr></tbody></table>", + ) + + self.assert_soup( + "<table><thead><tr><td>Foo</td></tr></thead>" + "<tbody><tr><td>Bar</td></tr></tbody>" + "<tfoot><tr><td>Baz</td></tr></tfoot></table>" + ) + + def test_xml_declaration_followed_by_doctype(self): + markup = """<?xml version="1.0" encoding="utf-8"?> +<!DOCTYPE html> +<html> + <head> + </head> + <body> + <p>foo</p> + </body> +</html>""" + soup = self.soup(markup) + # Verify that we can reach the <p> tag; this means the tree is connected. + assert b"<p>foo</p>" == soup.p.encode() + + def test_reparented_markup(self): + markup = "<p><em>foo</p>\n<p>bar<a></a></em></p>" + soup = self.soup(markup) + assert ( + "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>" + == soup.body.decode() + ) + assert 2 == len(soup.find_all("p")) + + def test_reparented_markup_ends_with_whitespace(self): + markup = "<p><em>foo</p>\n<p>bar<a></a></em></p>\n" + soup = self.soup(markup) + assert ( + "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>" + == soup.body.decode() + ) + assert 2 == len(soup.find_all("p")) + + def test_reparented_markup_containing_identical_whitespace_nodes(self): + """Verify that we keep the two whitespace nodes in this + document distinct when reparenting the adjacent <tbody> tags. + """ + markup = "<table> <tbody><tbody><ims></tbody> </table>" + soup = self.soup(markup) + space1, space2 = soup.find_all(string=" ") + tbody1, tbody2 = soup.find_all("tbody") + assert space1.next_element is tbody1 + assert tbody2.next_element is space2 + + def test_reparented_markup_containing_children(self): + markup = ( + "<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>" + ) + soup = self.soup(markup) + noscript = soup.noscript + assert "target" == noscript.next_element + target = soup.find(string="target") + + # The 'aftermath' string was duplicated; we want the second one. + final_aftermath = soup.find_all(string="aftermath")[-1] + + # The <noscript> tag was moved beneath a copy of the <a> tag, + # but the 'target' string within is still connected to the + # (second) 'aftermath' string. + assert final_aftermath == target.next_element + assert target == final_aftermath.previous_element + + def test_processing_instruction(self): + """Processing instructions become comments.""" + markup = b"""<?PITarget PIContent?>""" + soup = self.soup(markup) + assert str(soup).startswith("<!--?PITarget PIContent?-->") + + def test_cloned_multivalue_node(self): + markup = b"""<a class="my_class"><p></a>""" + soup = self.soup(markup) + a1, a2 = soup.find_all("a") + assert a1 == a2 + assert a1 is not a2 + + def test_foster_parenting(self): + markup = b"""<table><td></tbody>A""" + soup = self.soup(markup) + assert ( + "<body>A<table><tbody><tr><td></td></tr></tbody></table></body>" + == soup.body.decode() + ) + + def test_extraction(self): + """ + Test that extraction does not destroy the tree. + + https://bugs.launchpad.net/beautifulsoup/+bug/1782928 + """ + + markup = """ +<html><head></head> +<style> +</style><script></script><body><p>hello</p></body></html> +""" + soup = self.soup(markup) + [s.extract() for s in soup("script")] + [s.extract() for s in soup("style")] + + assert len(soup.find_all("p")) == 1 + + def test_empty_comment(self): + """ + Test that empty comment does not break structure. + + https://bugs.launchpad.net/beautifulsoup/+bug/1806598 + """ + + markup = """ +<html> +<body> +<form> +<!----><input type="text"> +</form> +</body> +</html> +""" + soup = self.soup(markup) + inputs = [] + for form in soup.find_all("form"): + inputs.extend(form.find_all("input")) + assert len(inputs) == 1 + + def test_tracking_line_numbers(self): + # The html.parser TreeBuilder keeps track of line number and + # position of each element. + markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>" + soup = self.soup(markup) + assert 2 == soup.p.sourceline + assert 5 == soup.p.sourcepos + assert "sourceline" == soup.p.find("sourceline").name + + # You can deactivate this behavior. + soup = self.soup(markup, store_line_numbers=False) + assert None is soup.p.sourceline + assert None is soup.p.sourcepos + + def test_special_string_containers(self): + # The html5lib tree builder doesn't support this standard feature, + # because there's no way of knowing, when a string is created, + # where in the tree it will eventually end up. + pass + + def test_html5_attributes(self): + # The html5lib TreeBuilder can convert any entity named in + # the HTML5 spec to a sequence of Unicode characters, and + # convert those Unicode characters to a (potentially + # different) named entity on the way out. + # + # This is a copy of the same test from + # HTMLParserTreeBuilderSmokeTest. It's not in the superclass + # because the lxml HTML TreeBuilder _doesn't_ work this way. + for input_element, output_unicode, output_element in ( + ("⇄", "\u21c4", b"⇄"), + ("⊧", "\u22a7", b"⊧"), + ("𝔑", "\U0001d511", b"𝔑"), + ("≧̸", "\u2267\u0338", b"≧̸"), + ("¬", "\xac", b"¬"), + ("⫬", "\u2aec", b"⫬"), + (""", '"', b'"'), + ("∴", "\u2234", b"∴"), + ("∴", "\u2234", b"∴"), + ("∴", "\u2234", b"∴"), + ("fj", "fj", b"fj"), + ("⊔", "\u2294", b"⊔"), + ("⊔︀", "\u2294\ufe00", b"⊔︀"), + ("'", "'", b"'"), + ("|", "|", b"|"), + ): + markup = "<div>%s</div>" % input_element + div = self.soup(markup).div + without_element = div.encode() + expect = b"<div>%s</div>" % output_unicode.encode("utf8") + assert without_element == expect + + with_element = div.encode(formatter="html") + expect = b"<div>%s</div>" % output_element + assert with_element == expect + + @pytest.mark.parametrize( + "name,value", + [("document_declared_encoding", "utf8"), ("exclude_encodings", ["utf8"])], + ) + def test_prepare_markup_warnings(self, name, value): + # html5lib doesn't support a couple of the common arguments to + # prepare_markup. + builder = self.default_builder() + kwargs = {name: value} + with warnings.catch_warnings(record=True) as w: + list(builder.prepare_markup("a", **kwargs)) + [warning] = w + msg = str(warning.message) + assert ( + msg + == f"You provided a value for {name}, but the html5lib tree builder doesn't support {name}." + ) + + def test_doctype_filtered(self): + # Since the html5lib parser doesn't support parse_only, this standard + # smoke-test test can't be run. + pass diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_htmlparser.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_htmlparser.py new file mode 100644 index 00000000..b2bd07fc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_htmlparser.py @@ -0,0 +1,161 @@ +"""Tests to ensure that the html.parser tree builder generates good +trees.""" + +import pickle +import pytest +from bs4.builder._htmlparser import ( + _DuplicateAttributeHandler, + BeautifulSoupHTMLParser, + HTMLParserTreeBuilder, +) +from bs4.exceptions import ParserRejectedMarkup +from typing import Any +from . import HTMLTreeBuilderSmokeTest + + +class TestHTMLParserTreeBuilder(HTMLTreeBuilderSmokeTest): + default_builder = HTMLParserTreeBuilder + + def test_rejected_input(self): + # Python's html.parser will occasionally reject markup, + # especially when there is a problem with the initial DOCTYPE + # declaration. Different versions of Python sound the alarm in + # different ways, but Beautiful Soup consistently raises + # errors as ParserRejectedMarkup exceptions. + bad_markup = [ + # https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=28873 + # https://github.com/guidovranken/python-library-fuzzers/blob/master/corp-html/519e5b4269a01185a0d5e76295251921da2f0700 + # https://github.com/python/cpython/issues/81928 + b"\n<![\xff\xfe\xfe\xcd\x00", + # https://github.com/guidovranken/python-library-fuzzers/blob/master/corp-html/de32aa55785be29bbc72a1a8e06b00611fb3d9f8 + # https://github.com/python/cpython/issues/78661 + # + b"<![n\x00", + b"<![UNKNOWN[]]>", + ] + for markup in bad_markup: + with pytest.raises(ParserRejectedMarkup): + self.soup(markup) + + def test_namespaced_system_doctype(self): + # html.parser can't handle namespaced doctypes, so skip this one. + pass + + def test_namespaced_public_doctype(self): + # html.parser can't handle namespaced doctypes, so skip this one. + pass + + def test_builder_is_pickled(self): + """Unlike most tree builders, HTMLParserTreeBuilder and will + be restored after pickling. + """ + tree = self.soup("<a><b>foo</a>") + dumped = pickle.dumps(tree, 2) + loaded = pickle.loads(dumped) + assert isinstance(loaded.builder, type(tree.builder)) + + def test_redundant_empty_element_closing_tags(self): + self.assert_soup("<br></br><br></br><br></br>", "<br/><br/><br/>") + self.assert_soup("</br></br></br>", "") + + def test_empty_element(self): + # This verifies that any buffered data present when the parser + # finishes working is handled. + self.assert_soup("foo &# bar", "foo &# bar") + + def test_tracking_line_numbers(self): + # The html.parser TreeBuilder keeps track of line number and + # position of each element. + markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>" + soup = self.soup(markup) + assert 2 == soup.p.sourceline + assert 3 == soup.p.sourcepos + assert "sourceline" == soup.p.find("sourceline").name + + # You can deactivate this behavior. + soup = self.soup(markup, store_line_numbers=False) + assert None is soup.p.sourceline + assert None is soup.p.sourcepos + + def test_on_duplicate_attribute(self): + # The html.parser tree builder has a variety of ways of + # handling a tag that contains the same attribute multiple times. + + markup = '<a class="cls" href="url1" href="url2" href="url3" id="id">' + + # If you don't provide any particular value for + # on_duplicate_attribute, later values replace earlier values. + soup = self.soup(markup) + assert "url3" == soup.a["href"] + assert ["cls"] == soup.a["class"] + assert "id" == soup.a["id"] + + # You can also get this behavior explicitly. + def assert_attribute( + on_duplicate_attribute: _DuplicateAttributeHandler, expected: Any + ) -> None: + soup = self.soup(markup, on_duplicate_attribute=on_duplicate_attribute) + assert soup.a is not None + assert expected == soup.a["href"] + + # Verify that non-duplicate attributes are treated normally. + assert ["cls"] == soup.a["class"] + assert "id" == soup.a["id"] + + assert_attribute(None, "url3") + assert_attribute(BeautifulSoupHTMLParser.REPLACE, "url3") + + # You can ignore subsequent values in favor of the first. + assert_attribute(BeautifulSoupHTMLParser.IGNORE, "url1") + + # And you can pass in a callable that does whatever you want. + def accumulate(attrs, key, value): + if not isinstance(attrs[key], list): + attrs[key] = [attrs[key]] + attrs[key].append(value) + + assert_attribute(accumulate, ["url1", "url2", "url3"]) + + def test_html5_attributes(self): + # The html.parser TreeBuilder can convert any entity named in + # the HTML5 spec to a sequence of Unicode characters, and + # convert those Unicode characters to a (potentially + # different) named entity on the way out. + for input_element, output_unicode, output_element in ( + ("⇄", "\u21c4", b"⇄"), + ("⊧", "\u22a7", b"⊧"), + ("𝔑", "\U0001d511", b"𝔑"), + ("≧̸", "\u2267\u0338", b"≧̸"), + ("¬", "\xac", b"¬"), + ("⫬", "\u2aec", b"⫬"), + (""", '"', b'"'), + ("∴", "\u2234", b"∴"), + ("∴", "\u2234", b"∴"), + ("∴", "\u2234", b"∴"), + ("fj", "fj", b"fj"), + ("⊔", "\u2294", b"⊔"), + ("⊔︀", "\u2294\ufe00", b"⊔︀"), + ("'", "'", b"'"), + ("|", "|", b"|"), + ): + markup = "<div>%s</div>" % input_element + div = self.soup(markup).div + without_element = div.encode() + expect = b"<div>%s</div>" % output_unicode.encode("utf8") + assert without_element == expect + + with_element = div.encode(formatter="html") + expect = b"<div>%s</div>" % output_element + assert with_element == expect + + def test_invalid_html_entity(self): + # The html.parser treebuilder can't distinguish between an invalid + # HTML entity with a semicolon and an invalid HTML entity with no + # semicolon. + markup = "<p>a &nosuchentity b</p>" + soup = self.soup(markup) + assert "<p>a &nosuchentity b</p>" == soup.p.decode() + + markup = "<p>a &nosuchentity; b</p>" + soup = self.soup(markup) + assert "<p>a &nosuchentity b</p>" == soup.p.decode() diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_lxml.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_lxml.py new file mode 100644 index 00000000..04a0ee88 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_lxml.py @@ -0,0 +1,196 @@ +"""Tests to ensure that the lxml tree builder generates good trees.""" + +import pickle +import pytest +import warnings +from . import LXML_PRESENT, LXML_VERSION + +if LXML_PRESENT: + from bs4.builder._lxml import LXMLTreeBuilder, LXMLTreeBuilderForXML + +from bs4 import ( + BeautifulStoneSoup, +) +from . import ( + HTMLTreeBuilderSmokeTest, + XMLTreeBuilderSmokeTest, + SOUP_SIEVE_PRESENT, +) + + +@pytest.mark.skipif( + not LXML_PRESENT, + reason="lxml seems not to be present, not testing its tree builder.", +) +class TestLXMLTreeBuilder(HTMLTreeBuilderSmokeTest): + """See ``HTMLTreeBuilderSmokeTest``.""" + + @property + def default_builder(self): + return LXMLTreeBuilder + + def test_out_of_range_entity(self): + self.assert_soup("<p>foo�bar</p>", "<p>foobar</p>") + self.assert_soup("<p>foo�bar</p>", "<p>foobar</p>") + self.assert_soup("<p>foo�bar</p>", "<p>foobar</p>") + + def test_entities_in_foreign_document_encoding(self): + # We can't implement this case correctly because by the time we + # hear about markup like "“", it's been (incorrectly) converted into + # a string like u'\x93' + pass + + # In lxml < 2.3.5, an empty doctype causes a segfault. Skip this + # test if an old version of lxml is installed. + + @pytest.mark.skipif( + not LXML_PRESENT or LXML_VERSION < (2, 3, 5, 0), + reason="Skipping doctype test for old version of lxml to avoid segfault.", + ) + def test_empty_doctype(self): + soup = self.soup("<!DOCTYPE>") + doctype = soup.contents[0] + assert "" == doctype.strip() + + def test_beautifulstonesoup_is_xml_parser(self): + # Make sure that the deprecated BSS class uses an xml builder + # if one is installed. + with warnings.catch_warnings(record=True) as w: + soup = BeautifulStoneSoup("<b />") + assert "<b/>" == str(soup.b) + [warning] = w + assert warning.filename == __file__ + assert "The BeautifulStoneSoup class was deprecated" in str(warning.message) + + def test_tracking_line_numbers(self): + # The lxml TreeBuilder cannot keep track of line numbers from + # the original markup. Even if you ask for line numbers, we + # don't have 'em. + # + # However, for consistency with other parsers, Tag.sourceline + # and Tag.sourcepos are always set to None, rather than being + # available as an alias for find(). + soup = self.soup( + "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>", + store_line_numbers=True, + ) + assert None is soup.p.sourceline + assert None is soup.p.sourcepos + + +@pytest.mark.skipif( + not LXML_PRESENT, + reason="lxml seems not to be present, not testing its XML tree builder.", +) +class TestLXMLXMLTreeBuilder(XMLTreeBuilderSmokeTest): + """See ``HTMLTreeBuilderSmokeTest``.""" + + @property + def default_builder(self): + return LXMLTreeBuilderForXML + + def test_namespace_indexing(self): + soup = self.soup( + '<?xml version="1.1"?>\n' + "<root>" + '<tag xmlns="http://unprefixed-namespace.com">content</tag>' + '<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</prefix:tag2>' + '<prefix2:tag3 xmlns:prefix2="http://another-namespace.com">' + '<subtag xmlns="http://another-unprefixed-namespace.com">' + '<subsubtag xmlns="http://yet-another-unprefixed-namespace.com">' + "</prefix2:tag3>" + "</root>" + ) + + # The BeautifulSoup object includes every namespace prefix + # defined in the entire document. This is the default set of + # namespaces used by soupsieve. + # + # Un-prefixed namespaces are not included, and if a given + # prefix is defined twice, only the first prefix encountered + # in the document shows up here. + assert soup._namespaces == { + "xml": "http://www.w3.org/XML/1998/namespace", + "prefix": "http://prefixed-namespace.com", + "prefix2": "http://another-namespace.com", + } + + # A Tag object includes only the namespace prefixes + # that were in scope when it was parsed. + + # We do not track un-prefixed namespaces as we can only hold + # one (the first one), and it will be recognized as the + # default namespace by soupsieve, even when operating from a + # tag with a different un-prefixed namespace. + assert soup.tag._namespaces == { + "xml": "http://www.w3.org/XML/1998/namespace", + } + + assert soup.tag2._namespaces == { + "prefix": "http://prefixed-namespace.com", + "xml": "http://www.w3.org/XML/1998/namespace", + } + + assert soup.subtag._namespaces == { + "prefix2": "http://another-namespace.com", + "xml": "http://www.w3.org/XML/1998/namespace", + } + + assert soup.subsubtag._namespaces == { + "prefix2": "http://another-namespace.com", + "xml": "http://www.w3.org/XML/1998/namespace", + } + + @pytest.mark.skipif(not SOUP_SIEVE_PRESENT, reason="Soup Sieve not installed") + def test_namespace_interaction_with_select_and_find(self): + # Demonstrate how namespaces interact with select* and + # find* methods. + + soup = self.soup( + '<?xml version="1.1"?>\n' + "<root>" + '<tag xmlns="http://unprefixed-namespace.com">content</tag>' + '<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</tag>' + '<subtag xmlns:prefix="http://another-namespace-same-prefix.com">' + "<prefix:tag3>" + "</subtag>" + "</root>" + ) + + # soupselect uses namespace URIs. + assert soup.select_one("tag").name == "tag" + assert soup.select_one("prefix|tag2").name == "tag2" + + # If a prefix is declared more than once, only the first usage + # is registered with the BeautifulSoup object. + assert soup.select_one("prefix|tag3") is None + + # But you can always explicitly specify a namespace dictionary. + assert ( + soup.select_one("prefix|tag3", namespaces=soup.subtag._namespaces).name + == "tag3" + ) + + # And a Tag (as opposed to the BeautifulSoup object) will + # have a set of default namespaces scoped to that Tag. + assert soup.subtag.select_one("prefix|tag3").name == "tag3" + + # the find() methods aren't fully namespace-aware; they just + # look at prefixes. + assert soup.find("tag").name == "tag" + assert soup.find("prefix:tag2").name == "tag2" + assert soup.find("prefix:tag3").name == "tag3" + assert soup.subtag.find("prefix:tag3").name == "tag3" + + def test_pickle_restores_builder(self): + # The lxml TreeBuilder is not picklable, so when unpickling + # a document created with it, a new TreeBuilder of the + # appropriate class is created. + soup = self.soup("<a>some markup</a>") + assert isinstance(soup.builder, self.default_builder) + pickled = pickle.dumps(soup) + unpickled = pickle.loads(pickled) + + assert "some markup" == unpickled.a.string + assert unpickled.builder != soup.builder + assert isinstance(unpickled.builder, self.default_builder) diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_navigablestring.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_navigablestring.py new file mode 100644 index 00000000..3e33469f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_navigablestring.py @@ -0,0 +1,144 @@ +import pytest + +from bs4.element import ( + CData, + Comment, + Declaration, + Doctype, + NavigableString, + RubyParenthesisString, + RubyTextString, + Script, + Stylesheet, + TemplateString, +) + +from . import SoupTest + + +class TestNavigableString(SoupTest): + def test_text_acquisition_methods(self): + # These methods are intended for use against Tag, but they + # work on NavigableString as well, + + s = NavigableString("fee ") + cdata = CData("fie ") + comment = Comment("foe ") + + assert "fee " == s.get_text() + assert "fee " == s.string + assert "fee" == s.get_text(strip=True) + assert ["fee "] == list(s.strings) + assert ["fee"] == list(s.stripped_strings) + assert ["fee "] == list(s._all_strings()) + + assert "fie " == cdata.get_text() + assert "fie " == cdata.string + assert "fie" == cdata.get_text(strip=True) + assert ["fie "] == list(cdata.strings) + assert ["fie"] == list(cdata.stripped_strings) + assert ["fie "] == list(cdata._all_strings()) + + # Since a Comment isn't normally considered 'text', + # these methods generally do nothing. + assert "" == comment.get_text() + assert [] == list(comment.strings) + assert [] == list(comment.stripped_strings) + assert [] == list(comment._all_strings()) + + # Unless you specifically say that comments are okay. + assert "foe" == comment.get_text(strip=True, types=Comment) + assert "foe " == comment.get_text(types=(Comment, NavigableString)) + + def test_string_has_immutable_name_property(self): + # string.name is defined as None and can't be modified + string = self.soup("s").string + assert None is string.name + with pytest.raises(AttributeError): + string.name = "foo" + + +class TestNavigableStringSubclasses(SoupTest): + def test_cdata(self): + # None of the current builders turn CDATA sections into CData + # objects, but you can create them manually. + soup = self.soup("") + cdata = CData("foo") + soup.insert(1, cdata) + assert str(soup) == "<![CDATA[foo]]>" + assert soup.find(string="foo") == "foo" + assert soup.contents[0] == "foo" + + def test_cdata_is_never_formatted(self): + """Text inside a CData object is passed into the formatter. + + But the return value is ignored. + """ + + self.count = 0 + + def increment(*args): + self.count += 1 + return "BITTER FAILURE" + + soup = self.soup("") + cdata = CData("<><><>") + soup.insert(1, cdata) + assert b"<![CDATA[<><><>]]>" == soup.encode(formatter=increment) + assert 1 == self.count + + def test_doctype_ends_in_newline(self): + # Unlike other NavigableString subclasses, a DOCTYPE always ends + # in a newline. + doctype = Doctype("foo") + soup = self.soup("") + soup.insert(1, doctype) + assert soup.encode() == b"<!DOCTYPE foo>\n" + + def test_declaration(self): + d = Declaration("foo") + assert "<?foo?>" == d.output_ready() + + def test_default_string_containers(self): + # In some cases, we use different NavigableString subclasses for + # the same text in different tags. + soup = self.soup("<div>text</div><script>text</script><style>text</style>") + assert [NavigableString, Script, Stylesheet] == [ + x.__class__ for x in soup.find_all(string=True) + ] + + # The TemplateString is a little unusual because it's generally found + # _inside_ children of a <template> element, not a direct child of the + # <template> element. + soup = self.soup( + "<template>Some text<p>In a tag</p></template>Some text outside" + ) + assert all( + isinstance(x, TemplateString) + for x in soup.template._all_strings(types=None) + ) + + # Once the <template> tag closed, we went back to using + # NavigableString. + outside = soup.template.next_sibling + assert isinstance(outside, NavigableString) + assert not isinstance(outside, TemplateString) + + # The TemplateString is also unusual because it can contain + # NavigableString subclasses of _other_ types, such as + # Comment. + markup = b"<template>Some text<p>In a tag</p><!--with a comment--></template>" + soup = self.soup(markup) + assert markup == soup.template.encode("utf8") + + def test_ruby_strings(self): + markup = "<ruby>æ¼¢ <rp>(</rp><rt>kan</rt><rp>)</rp> å— <rp>(</rp><rt>ji</rt><rp>)</rp></ruby>" + soup = self.soup(markup) + assert isinstance(soup.rp.string, RubyParenthesisString) + assert isinstance(soup.rt.string, RubyTextString) + + # Just as a demo, here's what this means for get_text usage. + assert "æ¼¢å—" == soup.get_text(strip=True) + assert "æ¼¢(kan)å—(ji)" == soup.get_text( + strip=True, types=(NavigableString, RubyTextString, RubyParenthesisString) + ) diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_pageelement.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_pageelement.py new file mode 100644 index 00000000..91d57792 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_pageelement.py @@ -0,0 +1,437 @@ +"""Tests of the bs4.element.PageElement class""" + +import copy +import pickle +import pytest +import sys +import warnings + +from bs4 import BeautifulSoup +from bs4.element import ( + AttributeValueList, + Comment, +) +from bs4.filter import SoupStrainer +from . import ( + SoupTest, +) + + +class TestEncoding(SoupTest): + """Test the ability to encode objects into strings.""" + + def test_unicode_string_can_be_encoded(self): + html = "<b>\N{SNOWMAN}</b>" + soup = self.soup(html) + assert soup.b.string.encode("utf-8") == "\N{SNOWMAN}".encode("utf-8") + + def test_tag_containing_unicode_string_can_be_encoded(self): + html = "<b>\N{SNOWMAN}</b>" + soup = self.soup(html) + assert soup.b.encode("utf-8") == html.encode("utf-8") + + def test_encoding_substitutes_unrecognized_characters_by_default(self): + html = "<b>\N{SNOWMAN}</b>" + soup = self.soup(html) + assert soup.b.encode("ascii") == b"<b>☃</b>" + + def test_encoding_can_be_made_strict(self): + html = "<b>\N{SNOWMAN}</b>" + soup = self.soup(html) + with pytest.raises(UnicodeEncodeError): + soup.encode("ascii", errors="strict") + + def test_decode_contents(self): + html = "<b>\N{SNOWMAN}</b>" + soup = self.soup(html) + assert "\N{SNOWMAN}" == soup.b.decode_contents() + + def test_encode_contents(self): + html = "<b>\N{SNOWMAN}</b>" + soup = self.soup(html) + assert "\N{SNOWMAN}".encode("utf8") == soup.b.encode_contents(encoding="utf8") + + def test_encode_deeply_nested_document(self): + # This test verifies that encoding a string doesn't involve + # any recursive function calls. If it did, this test would + # overflow the Python interpreter stack. + limit = sys.getrecursionlimit() + 1 + markup = "<span>" * limit + soup = self.soup(markup) + encoded = soup.encode() + assert limit == encoded.count(b"<span>") + + def test_deprecated_renderContents(self): + html = "<b>\N{SNOWMAN}</b>" + soup = self.soup(html) + with warnings.catch_warnings(record=True) as w: + soup.renderContents() + assert "\N{SNOWMAN}".encode("utf8") == soup.b.renderContents() + msgs = [str(warning.message) for warning in w] + assert all( + x + == "Call to deprecated method renderContents. (Replaced by encode_contents) -- Deprecated since version 4.0.0." + for x in msgs + ) + + def test_repr(self): + html = "<b>\N{SNOWMAN}</b>" + soup = self.soup(html) + assert html == repr(soup) + + +class TestFormatters(SoupTest): + """Test the formatting feature, used by methods like decode() and + prettify(), and the formatters themselves. + """ + + def test_default_formatter_is_minimal(self): + markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>" + soup = self.soup(markup) + decoded = soup.decode(formatter="minimal") + # The < is converted back into < but the e-with-acute is left alone. + assert decoded == self.document_for( + "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>" + ) + + def test_formatter_html(self): + markup = ( + "<br><b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>" + ) + soup = self.soup(markup) + decoded = soup.decode(formatter="html") + assert decoded == self.document_for( + "<br/><b><<Sacré bleu!>></b>" + ) + + def test_formatter_html5(self): + markup = ( + "<br><b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>" + ) + soup = self.soup(markup) + decoded = soup.decode(formatter="html5") + assert decoded == self.document_for( + "<br><b><<Sacré bleu!>></b>" + ) + + def test_formatter_minimal(self): + markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>" + soup = self.soup(markup) + decoded = soup.decode(formatter="minimal") + # The < is converted back into < but the e-with-acute is left alone. + assert decoded == self.document_for( + "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>" + ) + + def test_formatter_null(self): + markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>" + soup = self.soup(markup) + decoded = soup.decode(formatter=None) + # Neither the angle brackets nor the e-with-acute are converted. + # This is not valid HTML, but it's what the user wanted. + assert decoded == self.document_for( + "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>" + ) + + def test_formatter_custom(self): + markup = "<b><foo></b><b>bar</b><br/>" + soup = self.soup(markup) + decoded = soup.decode(formatter=lambda x: x.upper()) + # Instead of normal entity conversion code, the custom + # callable is called on every string. + assert decoded == self.document_for("<b><FOO></b><b>BAR</b><br/>") + + def test_formatter_is_run_on_attribute_values(self): + markup = '<a href="http://a.com?a=b&c=é">e</a>' + soup = self.soup(markup) + a = soup.a + + expect_minimal = '<a href="http://a.com?a=b&c=é">e</a>' + + assert expect_minimal == a.decode() + assert expect_minimal == a.decode(formatter="minimal") + + expect_html = '<a href="http://a.com?a=b&c=é">e</a>' + assert expect_html == a.decode(formatter="html") + + assert markup == a.decode(formatter=None) + expect_upper = '<a href="HTTP://A.COM?A=B&C=É">E</a>' + assert expect_upper == a.decode(formatter=lambda x: x.upper()) + + def test_formatter_skips_script_tag_for_html_documents(self): + doc = """ + <script type="text/javascript"> + console.log("< < hey > > "); + </script> +""" + encoded = BeautifulSoup(doc, "html.parser").encode() + assert b"< < hey > >" in encoded + + def test_formatter_skips_style_tag_for_html_documents(self): + doc = """ + <style type="text/css"> + console.log("< < hey > > "); + </style> +""" + encoded = BeautifulSoup(doc, "html.parser").encode() + assert b"< < hey > >" in encoded + + def test_prettify_leaves_preformatted_text_alone(self): + soup = self.soup( + "<div> foo <pre> \tbar\n \n </pre> baz <textarea> eee\nfff\t</textarea></div>" + ) + # Everything outside the <pre> tag is reformatted, but everything + # inside is left alone. + assert ( + "<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n <textarea> eee\nfff\t</textarea>\n</div>\n" + == soup.div.prettify() + ) + + def test_prettify_handles_nested_string_literal_tags(self): + # Most of this markup is inside a <pre> tag, so prettify() + # only does three things to it: + # 1. Add a newline and a space between the <div> and the <pre> + # 2. Add a newline after the </pre> + # 3. Add a newline at the end. + # + # The contents of the <pre> tag are left completely alone. In + # particular, we don't start adding whitespace again once we + # encounter the first </pre> tag, because we know it's not + # the one that put us into string literal mode. + markup = """<div><pre><code>some +<script><pre>code</pre></script> for you +</code></pre></div>""" + + expect = """<div> + <pre><code>some +<script><pre>code</pre></script> for you +</code></pre> +</div> +""" + soup = self.soup(markup) + assert expect == soup.div.prettify() + + def test_prettify_accepts_formatter_function(self): + soup = BeautifulSoup("<html><body>foo</body></html>", "html.parser") + pretty = soup.prettify(formatter=lambda x: x.upper()) + assert "FOO" in pretty + + def test_prettify_outputs_unicode_by_default(self): + soup = self.soup("<a></a>") + assert str is type(soup.prettify()) + + def test_prettify_can_encode_data(self): + soup = self.soup("<a></a>") + assert bytes is type(soup.prettify("utf-8")) + + def test_html_entity_substitution_off_by_default(self): + markup = "<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>" + soup = self.soup(markup) + encoded = soup.b.encode("utf-8") + assert encoded == markup.encode("utf-8") + + def test_encoding_substitution(self): + # Here's the <meta> tag saying that a document is + # encoded in Shift-JIS. + meta_tag = ( + '<meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/>' + ) + soup = self.soup(meta_tag) + + # Parse the document, and the charset apprears unchanged. + assert soup.meta["content"] == "text/html; charset=x-sjis" + + # Encode the document into some encoding, and the encoding is + # substituted into the meta tag. + utf_8 = soup.encode("utf-8") + assert b"charset=utf-8" in utf_8 + + euc_jp = soup.encode("euc_jp") + assert b"charset=euc_jp" in euc_jp + + shift_jis = soup.encode("shift-jis") + assert b"charset=shift-jis" in shift_jis + + utf_16_u = soup.encode("utf-16").decode("utf-16") + assert "charset=utf-16" in utf_16_u + + def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self): + markup = ( + '<head><meta content="text/html; charset=x-sjis" ' + 'http-equiv="Content-type"/></head><pre>foo</pre>' + ) + + # Beautiful Soup used to try to rewrite the meta tag even if the + # meta tag got filtered out by the strainer. This test makes + # sure that doesn't happen. + strainer = SoupStrainer("pre") + soup = self.soup(markup, parse_only=strainer) + assert soup.contents[0].name == "pre" + + +class TestPersistence(SoupTest): + "Testing features like pickle and deepcopy." + + def setup_method(self): + self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" +"http://www.w3.org/TR/REC-html40/transitional.dtd"> +<html> +<head> +<meta http-equiv="Content-Type" content="text/html; charset=utf-8"> +<title>Beautiful Soup: We called him Tortoise because he taught us.</title> +<link rev="made" href="mailto:leonardr@segfault.org"> +<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping."> +<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)"> +<meta name="author" content="Leonard Richardson"> +</head> +<body> +<a href="foo">foo</a> +<a href="foo"><b>bar</b></a> +</body> +</html>""" + self.tree = self.soup(self.page) + + def test_pickle_and_unpickle_identity(self): + # Pickling a tree, then unpickling it, yields a tree identical + # to the original. + dumped = pickle.dumps(self.tree, 2) + loaded = pickle.loads(dumped) + assert loaded.__class__ == BeautifulSoup + assert loaded.decode() == self.tree.decode() + + def test_deepcopy_identity(self): + # Making a deepcopy of a tree yields an identical tree. + copied = copy.deepcopy(self.tree) + assert copied.decode() == self.tree.decode() + + def test_copy_deeply_nested_document(self): + # This test verifies that copy and deepcopy don't involve any + # recursive function calls. If they did, this test would + # overflow the Python interpreter stack. + limit = sys.getrecursionlimit() + 1 + markup = "<span>" * limit + + soup = self.soup(markup) + + copy.copy(soup) + copy.deepcopy(soup) + + def test_copy_preserves_encoding(self): + soup = BeautifulSoup(b"<p> </p>", "html.parser") + encoding = soup.original_encoding + copy = soup.__copy__() + assert "<p> </p>" == str(copy) + assert encoding == copy.original_encoding + + def test_copy_preserves_builder_information(self): + tag = self.soup("<p></p>").p + + # Simulate a tag obtained from a source file. + tag.sourceline = 10 + tag.sourcepos = 33 + + copied = tag.__copy__() + + # The TreeBuilder object is no longer availble, but information + # obtained from it gets copied over to the new Tag object. + assert tag.sourceline == copied.sourceline + assert tag.sourcepos == copied.sourcepos + assert tag.can_be_empty_element == copied.can_be_empty_element + assert tag.cdata_list_attributes == copied.cdata_list_attributes + assert tag.preserve_whitespace_tags == copied.preserve_whitespace_tags + assert tag.interesting_string_types == copied.interesting_string_types + + def test_unicode_pickle(self): + # A tree containing Unicode characters can be pickled. + html = "<b>\N{SNOWMAN}</b>" + soup = self.soup(html) + dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL) + loaded = pickle.loads(dumped) + assert loaded.decode() == soup.decode() + + def test_copy_navigablestring_is_not_attached_to_tree(self): + html = "<b>Foo<a></a></b><b>Bar</b>" + soup = self.soup(html) + s1 = soup.find(string="Foo") + s2 = copy.copy(s1) + assert s1 == s2 + assert None is s2.parent + assert None is s2.next_element + assert None is not s1.next_sibling + assert None is s2.next_sibling + assert None is s2.previous_element + + def test_copy_navigablestring_subclass_has_same_type(self): + html = "<b><!--Foo--></b>" + soup = self.soup(html) + s1 = soup.string + s2 = copy.copy(s1) + assert s1 == s2 + assert isinstance(s2, Comment) + + def test_copy_entire_soup(self): + html = "<div><b>Foo<a></a></b><b>Bar</b></div>end" + soup = self.soup(html) + soup_copy = copy.copy(soup) + assert soup == soup_copy + + def test_copy_tag_copies_contents(self): + html = "<div class='a b c'><b>Foo<a></a></b><b>Bar</b></div>end" + soup = self.soup(html) + div = soup.div + div_copy = copy.copy(div) + + # The two tags look the same, and evaluate to equal. + assert str(div) == str(div_copy) + assert div == div_copy + + # But they're not the same object. + assert div is not div_copy + + # And they don't have the same relation to the parse tree. The + # copy is not associated with a parse tree at all. + assert None is div_copy.parent + assert None is div_copy.previous_element + assert None is div_copy.find(string="Bar").next_element + assert None is not div.find(string="Bar").next_element + + # Modifying one of the tag's multi-valued attributes + # doesn't modify the other. + assert div["class"] is not div_copy["class"] + div["class"].append("d") + assert "a b c d".split() == div["class"] + assert "a b c".split() == div_copy["class"] + assert isinstance(div_copy["class"], AttributeValueList) + + +class TestEquality(SoupTest): + + def test_comparison(self): + soup = self.soup("<a>string</a> <a>string</a>") + first_a, second_a = soup.find_all('a') + first_string, second_string = soup.find_all(string='string') + + # Tags with the same markup are equal. + assert first_a == second_a + + # NavigableStrings with the same content are equal, and also + # equal to a Python string with the same content... + assert first_string == second_string == "string" + + # ...but not equivalent to a bytestring with the same content. + assert first_string != b"string" + + def test_hash(self): + soup = self.soup("<a>string</a> <a>string</a>") + first_a, second_a = soup.find_all('a') + first_string, second_string = soup.find_all(string='string') + + # Tags with the same markup hash to the same value. + assert hash(first_a) == hash(second_a) + + # But they're not the same object. + assert id(first_a) != id(second_a) + + # NavigableStrings with the same contents hash to the value of + # the contents. + assert hash(first_string) == hash(second_string) == hash("string") diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_soup.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_soup.py new file mode 100644 index 00000000..5f771a40 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_soup.py @@ -0,0 +1,602 @@ +# -*- coding: utf-8 -*- +"""Tests of Beautiful Soup as a whole.""" + +import logging +import pickle +import pytest +from typing import Iterable + +from bs4 import ( + BeautifulSoup, + GuessedAtParserWarning, + dammit, +) +from bs4.builder import ( + TreeBuilder, +) +from bs4.element import ( + AttributeValueList, + XMLAttributeDict, + Comment, + PYTHON_SPECIFIC_ENCODINGS, + Tag, + NavigableString, +) +from bs4.filter import SoupStrainer +from bs4.exceptions import ( + ParserRejectedMarkup, +) +from bs4._warnings import ( + MarkupResemblesLocatorWarning, +) + + +from . import ( + default_builder, + LXML_PRESENT, + SoupTest, +) +import warnings +from typing import Type + + +class TestConstructor(SoupTest): + def test_short_unicode_input(self): + data = "<h1>éé</h1>" + soup = self.soup(data) + assert "éé" == soup.h1.string + + def test_embedded_null(self): + data = "<h1>foo\0bar</h1>" + soup = self.soup(data) + assert "foo\0bar" == soup.h1.string + + def test_exclude_encodings(self): + utf8_data = "RäksmörgÃ¥s".encode("utf-8") + soup = self.soup(utf8_data, exclude_encodings=["utf-8"]) + assert "windows-1252" == soup.original_encoding + + def test_custom_builder_class(self): + # Verify that you can pass in a custom Builder class and + # it'll be instantiated with the appropriate keyword arguments. + class Mock(object): + def __init__(self, **kwargs): + self.called_with = kwargs + self.is_xml = True + self.store_line_numbers = False + self.cdata_list_attributes = [] + self.preserve_whitespace_tags = [] + self.string_containers = {} + self.attribute_dict_class = XMLAttributeDict + self.attribute_value_list_class = AttributeValueList + + def initialize_soup(self, soup): + pass + + def feed(self, markup): + self.fed = markup + + def reset(self): + pass + + def ignore(self, ignore): + pass + + set_up_substitutions = can_be_empty_element = ignore + + def prepare_markup(self, *args, **kwargs): + yield ( + "prepared markup", + "original encoding", + "declared encoding", + "contains replacement characters", + ) + + kwargs = dict( + var="value", + # This is a deprecated BS3-era keyword argument, which + # will be stripped out. + convertEntities=True, + ) + with warnings.catch_warnings(record=True): + soup = BeautifulSoup("", builder=Mock, **kwargs) + assert isinstance(soup.builder, Mock) + assert dict(var="value") == soup.builder.called_with + assert "prepared markup" == soup.builder.fed + + # You can also instantiate the TreeBuilder yourself. In this + # case, that specific object is used and any keyword arguments + # to the BeautifulSoup constructor are ignored. + builder = Mock(**kwargs) + with warnings.catch_warnings(record=True) as w: + soup = BeautifulSoup( + "", + builder=builder, + ignored_value=True, + ) + msg = str(w[0].message) + assert msg.startswith( + "Keyword arguments to the BeautifulSoup constructor will be ignored." + ) + assert builder == soup.builder + assert kwargs == builder.called_with + + def test_parser_markup_rejection(self): + # If markup is completely rejected by the parser, an + # explanatory ParserRejectedMarkup exception is raised. + class Mock(TreeBuilder): + def feed(self, *args, **kwargs): + raise ParserRejectedMarkup("Nope.") + + def prepare_markup(self, markup, *args, **kwargs): + # We're going to try two different ways of preparing this markup, + # but feed() will reject both of them. + yield markup, None, None, False + yield markup, None, None, False + + + with pytest.raises(ParserRejectedMarkup) as exc_info: + BeautifulSoup("", builder=Mock) + assert ( + "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help." + in str(exc_info.value) + ) + + def test_cdata_list_attributes(self): + # Most attribute values are represented as scalars, but the + # HTML standard says that some attributes, like 'class' have + # space-separated lists as values. + markup = '<a id=" an id " class=" a class "></a>' + soup = self.soup(markup) + + # Note that the spaces are stripped for 'class' but not for 'id'. + a = soup.a + assert " an id " == a["id"] + assert ["a", "class"] == a["class"] + + # TreeBuilder takes an argument called 'multi_valued_attributes' which lets + # you customize or disable this. As always, you can customize the TreeBuilder + # by passing in a keyword argument to the BeautifulSoup constructor. + soup = self.soup(markup, builder=default_builder, multi_valued_attributes=None) + assert " a class " == soup.a["class"] + + # Here are two ways of saying that `id` is a multi-valued + # attribute in this context, but 'class' is not. + for switcheroo in ({"*": "id"}, {"a": "id"}): + with warnings.catch_warnings(record=True): + # This will create a warning about not explicitly + # specifying a parser, but we'll ignore it. + soup = self.soup( + markup, builder=None, multi_valued_attributes=switcheroo + ) + a = soup.a + assert ["an", "id"] == a["id"] + assert " a class " == a["class"] + + def test_replacement_classes(self): + # Test the ability to pass in replacements for element classes + # which will be used when building the tree. + class TagPlus(Tag): + pass + + class StringPlus(NavigableString): + pass + + class CommentPlus(Comment): + pass + + soup = self.soup( + "<a><b>foo</b>bar</a><!--whee-->", + element_classes={ + Tag: TagPlus, + NavigableString: StringPlus, + Comment: CommentPlus, + }, + ) + + # The tree was built with TagPlus, StringPlus, and CommentPlus objects, + # rather than Tag, String, and Comment objects. + assert all( + isinstance(x, (TagPlus, StringPlus, CommentPlus)) for x in soup.descendants + ) + + def test_alternate_string_containers(self): + # Test the ability to customize the string containers for + # different types of tags. + class PString(NavigableString): + pass + + class BString(NavigableString): + pass + + soup = self.soup( + "<div>Hello.<p>Here is <b>some <i>bolded</i></b> text", + string_containers={ + "b": BString, + "p": PString, + }, + ) + + # The string before the <p> tag is a regular NavigableString. + assert isinstance(soup.div.contents[0], NavigableString) + + # The string inside the <p> tag, but not inside the <i> tag, + # is a PString. + assert isinstance(soup.p.contents[0], PString) + + # Every string inside the <b> tag is a BString, even the one that + # was also inside an <i> tag. + for s in soup.b.strings: + assert isinstance(s, BString) + + # Now that parsing was complete, the string_container_stack + # (where this information was kept) has been cleared out. + assert [] == soup.string_container_stack + + @pytest.mark.parametrize("bad_markup", [1, False, lambda x: False]) + def test_invalid_markup_type(self, bad_markup): + with pytest.raises(TypeError) as exc_info: + BeautifulSoup(bad_markup, "html.parser") + assert ( + f"Incoming markup is of an invalid type: {bad_markup!r}. Markup must be a string, a bytestring, or an open filehandle." + in str(exc_info.value) + ) + + +class TestOutput(SoupTest): + @pytest.mark.parametrize( + "eventual_encoding,actual_encoding", + [ + ("utf-8", "utf-8"), + ("utf-16", "utf-16"), + ], + ) + def test_decode_xml_declaration(self, eventual_encoding, actual_encoding): + # Most of the time, calling decode() on an XML document will + # give you a document declaration that mentions the encoding + # you intend to use when encoding the document as a + # bytestring. + soup = self.soup("<tag></tag>") + soup.is_xml = True + assert ( + f'<?xml version="1.0" encoding="{actual_encoding}"?>\n<tag></tag>' + == soup.decode(eventual_encoding=eventual_encoding) + ) + + @pytest.mark.parametrize( + "eventual_encoding", [x for x in PYTHON_SPECIFIC_ENCODINGS] + [None] + ) + def test_decode_xml_declaration_with_missing_or_python_internal_eventual_encoding( + self, eventual_encoding + ): + # But if you pass a Python internal encoding into decode(), or + # omit the eventual_encoding altogether, the document + # declaration won't mention any particular encoding. + soup = BeautifulSoup("<tag></tag>", "html.parser") + soup.is_xml = True + assert '<?xml version="1.0"?>\n<tag></tag>' == soup.decode( + eventual_encoding=eventual_encoding + ) + + def test(self): + # BeautifulSoup subclasses Tag and extends the decode() method. + # Make sure the other Tag methods which call decode() call + # it correctly. + soup = self.soup("<tag></tag>") + assert b"<tag></tag>" == soup.encode(encoding="utf-8") + assert b"<tag></tag>" == soup.encode_contents(encoding="utf-8") + assert "<tag></tag>" == soup.decode_contents() + assert "<tag>\n</tag>\n" == soup.prettify() + + +class TestWarnings(SoupTest): + # Note that some of the tests in this class create BeautifulSoup + # objects directly rather than using self.soup(). That's + # because SoupTest.soup is defined in a different file, + # which will throw off the assertion in _assert_warning + # that the code that triggered the warning is in the same + # file as the test. + + def _assert_warning( + self, warnings: Iterable[warnings.WarningMessage], cls: Type[Warning] + ) -> warnings.WarningMessage: + for w in warnings: + if isinstance(w.message, cls): + assert w.filename == __file__ + return w + raise Exception("%s warning not found in %r" % (cls, warnings)) + + def _assert_no_parser_specified(self, w: Iterable[warnings.WarningMessage]) -> None: + warning = self._assert_warning(w, GuessedAtParserWarning) + message = str(warning.message) + assert message.startswith(GuessedAtParserWarning.MESSAGE[:60]) + + def test_warning_if_no_parser_specified(self): + with warnings.catch_warnings(record=True) as w: + BeautifulSoup("<a><b></b></a>") + self._assert_no_parser_specified(w) + + def test_warning_if_parser_specified_too_vague(self): + with warnings.catch_warnings(record=True) as w: + BeautifulSoup("<a><b></b></a>", "html") + self._assert_no_parser_specified(w) + + def test_no_warning_if_explicit_parser_specified(self): + with warnings.catch_warnings(record=True) as w: + self.soup("<a><b></b></a>") + assert [] == w + + def test_warning_if_strainer_filters_everything(self): + strainer = SoupStrainer(name="a", string="b") + with warnings.catch_warnings(record=True) as w: + self.soup("<a><b></b></a>", parse_only=strainer) + warning = self._assert_warning(w, UserWarning) + msg = str(warning.message) + assert msg.startswith("The given value for parse_only will exclude everything:") + + def test_parseOnlyThese_renamed_to_parse_only(self): + with warnings.catch_warnings(record=True) as w: + soup = BeautifulSoup( + "<a><b></b></a>", + "html.parser", + parseOnlyThese=SoupStrainer("b"), + ) + warning = self._assert_warning(w, DeprecationWarning) + msg = str(warning.message) + assert "parseOnlyThese" in msg + assert "parse_only" in msg + assert b"<b></b>" == soup.encode() + + def test_fromEncoding_renamed_to_from_encoding(self): + with warnings.catch_warnings(record=True) as w: + utf8 = b"\xc3\xa9" + soup = BeautifulSoup(utf8, "html.parser", fromEncoding="utf8") + warning = self._assert_warning(w, DeprecationWarning) + msg = str(warning.message) + assert "fromEncoding" in msg + assert "from_encoding" in msg + assert "utf8" == soup.original_encoding + + def test_unrecognized_keyword_argument(self): + with pytest.raises(TypeError): + self.soup("<a>", no_such_argument=True) + + @pytest.mark.parametrize( + "markup", + [ + "markup.html", + "markup.htm", + "markup.HTML", + "markup.txt", + "markup.xhtml", + "markup.xml", + "/home/user/file.txt", + r"c:\user\file.html" r"\\server\share\path\file.XhTml", + ], + ) + def test_resembles_filename_warning(self, markup): + # A warning is issued if the "markup" looks like the name of + # an HTML or text file, or a full path to a file on disk. + with warnings.catch_warnings(record=True) as w: + BeautifulSoup(markup, "html.parser") + warning = self._assert_warning(w, MarkupResemblesLocatorWarning) + assert "looks more like a filename" in str(warning.message) + + @pytest.mark.parametrize( + "markup", + [ + "filename", + "markuphtml", + "markup.com", + "", + # Excluded due to an irrelevant file extension. + "markup.js", + "markup.jpg", + "markup.markup", + # Excluded due to the lack of any file extension. + "/home/user/file", + r"c:\user\file.html" r"\\server\share\path\file", + # Excluded because of two consecutive slashes _and_ the + # colon. + "log message containing a url http://www.url.com/ right there.html", + # Excluded for containing various characters or combinations + # not usually found in filenames. + "two consecutive spaces.html", + "two//consecutive//slashes.html", + "looks/like/a/filename/but/oops/theres/a#comment.html", + "two\nlines.html", + "contains?.html", + "contains*.html", + "contains#.html", + "contains&.html", + "contains;.html", + "contains>.html", + "contains<.html", + "contains$.html", + "contains|.html", + "contains:.html", + ":-at-the-front.html", + ], + ) + def test_resembles_filename_no_warning(self, markup): + # The 'looks more like a filename' warning is not issued if + # the markup looks like a bare string, a domain name, or a + # file that's not an HTML file. + with warnings.catch_warnings(record=True) as w: + self.soup(markup) + assert [] == w + + def test_url_warning_with_bytes_url(self): + url = b"http://www.crummybytes.com/" + with warnings.catch_warnings(record=True) as warning_list: + BeautifulSoup(url, "html.parser") + warning = self._assert_warning(warning_list, MarkupResemblesLocatorWarning) + assert "looks more like a URL" in str(warning.message) + assert url not in str(warning.message).encode("utf8") + + def test_url_warning_with_unicode_url(self): + url = "http://www.crummyunicode.com/" + with warnings.catch_warnings(record=True) as warning_list: + # note - this url must differ from the bytes one otherwise + # python's warnings system swallows the second warning + BeautifulSoup(url, "html.parser") + warning = self._assert_warning(warning_list, MarkupResemblesLocatorWarning) + assert "looks more like a URL" in str(warning.message) + assert url not in str(warning.message) + + def test_url_warning_with_bytes_and_space(self): + # Here the markup contains something besides a URL, so no warning + # is issued. + with warnings.catch_warnings(record=True) as warning_list: + self.soup(b"http://www.crummybytes.com/ is great") + assert not any("looks more like a URL" in str(w.message) for w in warning_list) + + def test_url_warning_with_unicode_and_space(self): + with warnings.catch_warnings(record=True) as warning_list: + self.soup("http://www.crummyunicode.com/ is great") + assert not any("looks more like a URL" in str(w.message) for w in warning_list) + + +class TestSelectiveParsing(SoupTest): + def test_parse_with_soupstrainer(self): + markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>" + strainer = SoupStrainer("b") + soup = self.soup(markup, parse_only=strainer) + assert soup.encode() == b"<b>Yes</b><b>Yes <c>Yes</c></b>" + + +class TestNewTag(SoupTest): + """Test the BeautifulSoup.new_tag() method.""" + + def test_new_tag(self): + soup = self.soup("") + new_tag = soup.new_tag("foo", string="txt", bar="baz", attrs={"name": "a name"}) + assert isinstance(new_tag, Tag) + assert "foo" == new_tag.name + assert new_tag.string == "txt" + assert dict(bar="baz", name="a name") == new_tag.attrs + assert None is new_tag.parent + + # string can be null + new_tag = soup.new_tag("foo") + assert None is new_tag.string + new_tag = soup.new_tag("foo", string=None) + assert None is new_tag.string + + # Or the empty string + new_tag = soup.new_tag("foo", string="") + assert "" == new_tag.string + + @pytest.mark.skipif( + not LXML_PRESENT, reason="lxml not installed, cannot parse XML document" + ) + def test_xml_tag_inherits_self_closing_rules_from_builder(self): + xml_soup = BeautifulSoup("", "xml") + xml_br = xml_soup.new_tag("br") + xml_p = xml_soup.new_tag("p") + + # Both the <br> and <p> tag are empty-element, just because + # they have no contents. + assert b"<br/>" == xml_br.encode() + assert b"<p/>" == xml_p.encode() + + def test_tag_inherits_self_closing_rules_from_builder(self): + html_soup = BeautifulSoup("", "html.parser") + html_br = html_soup.new_tag("br") + html_p = html_soup.new_tag("p") + + # The HTML builder users HTML's rules about which tags are + # empty-element tags, and the new tags reflect these rules. + assert b"<br/>" == html_br.encode() + assert b"<p></p>" == html_p.encode() + + +class TestNewString(SoupTest): + """Test the BeautifulSoup.new_string() method.""" + + def test_new_string_creates_navigablestring(self): + soup = self.soup("") + s = soup.new_string("foo") + assert "foo" == s + assert isinstance(s, NavigableString) + + def test_new_string_can_create_navigablestring_subclass(self): + soup = self.soup("") + s = soup.new_string("foo", Comment) + assert "foo" == s + assert isinstance(s, Comment) + + +class TestPickle(SoupTest): + # Test our ability to pickle the BeautifulSoup object itself. + + def test_normal_pickle(self): + soup = self.soup("<a>some markup</a>") + pickled = pickle.dumps(soup) + unpickled = pickle.loads(pickled) + assert "some markup" == unpickled.a.string + + def test_pickle_with_no_builder(self): + # We had a bug that prevented pickling from working if + # the builder wasn't set. + soup = self.soup("some markup") + soup.builder = None + pickled = pickle.dumps(soup) + unpickled = pickle.loads(pickled) + assert "some markup" == unpickled.string + + +class TestEncodingConversion(SoupTest): + # Test Beautiful Soup's ability to decode and encode from various + # encodings. + + def setup_method(self): + self.unicode_data = '<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>' + self.utf8_data = self.unicode_data.encode("utf-8") + # Just so you know what it looks like. + assert ( + self.utf8_data + == b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>' + ) + + def test_ascii_in_unicode_out(self): + # ASCII input is converted to Unicode. The original_encoding + # attribute is set to 'utf-8', a superset of ASCII. + chardet = dammit._chardet_dammit + logging.disable(logging.WARNING) + try: + + def noop(str): + return None + + # Disable chardet, which will realize that the ASCII is ASCII. + dammit._chardet_dammit = noop + ascii = b"<foo>a</foo>" + soup_from_ascii = self.soup(ascii) + unicode_output = soup_from_ascii.decode() + assert isinstance(unicode_output, str) + assert unicode_output == self.document_for(ascii.decode()) + assert soup_from_ascii.original_encoding.lower() == "utf-8" + finally: + logging.disable(logging.NOTSET) + dammit._chardet_dammit = chardet + + def test_unicode_in_unicode_out(self): + # Unicode input is left alone. The original_encoding attribute + # is not set. + soup_from_unicode = self.soup(self.unicode_data) + assert soup_from_unicode.decode() == self.unicode_data + assert soup_from_unicode.foo.string == "Sacr\xe9 bleu!" + assert soup_from_unicode.original_encoding is None + + def test_utf8_in_unicode_out(self): + # UTF-8 input is converted to Unicode. The original_encoding + # attribute is set. + soup_from_utf8 = self.soup(self.utf8_data) + assert soup_from_utf8.decode() == self.unicode_data + assert soup_from_utf8.foo.string == "Sacr\xe9 bleu!" + + def test_utf8_out(self): + # The internal data structures can be encoded as UTF-8. + soup_from_unicode = self.soup(self.unicode_data) + assert soup_from_unicode.encode("utf-8") == self.utf8_data diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_tag.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_tag.py new file mode 100644 index 00000000..b83e829b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_tag.py @@ -0,0 +1,241 @@ +import warnings +from bs4.element import ( + Comment, + NavigableString, +) +from . import SoupTest + + +class TestTag(SoupTest): + """Test various methods of Tag which aren't so complicated they + need their own classes. + """ + + def test__should_pretty_print(self): + # Test the rules about when a tag should be pretty-printed. + tag = self.soup("").new_tag("a_tag") + + # No list of whitespace-preserving tags -> pretty-print + tag._preserve_whitespace_tags = None + assert True is tag._should_pretty_print(0) + + # List exists but tag is not on the list -> pretty-print + tag.preserve_whitespace_tags = ["some_other_tag"] + assert True is tag._should_pretty_print(1) + + # Indent level is None -> don't pretty-print + assert False is tag._should_pretty_print(None) + + # Tag is on the whitespace-preserving list -> don't pretty-print + tag.preserve_whitespace_tags = ["some_other_tag", "a_tag"] + assert False is tag._should_pretty_print(1) + + def test_len(self): + """The length of a Tag is its number of children.""" + soup = self.soup("<top>1<b>2</b>3</top>") + + # The BeautifulSoup object itself contains one element: the + # <top> tag. + assert len(soup.contents) == 1 + assert len(soup) == 1 + + # The <top> tag contains three elements: the text node "1", the + # <b> tag, and the text node "3". + assert len(soup.top) == 3 + assert len(soup.top.contents) == 3 + + def test_member_access_invokes_find(self): + """Accessing a Python member .foo invokes find('foo')""" + soup = self.soup("<b><i></i></b>") + assert soup.b == soup.find("b") + assert soup.b.i == soup.find("b").find("i") + assert soup.a is None + + def test_deprecated_member_access(self): + soup = self.soup("<b><i></i></b>") + with warnings.catch_warnings(record=True) as w: + tag = soup.bTag + assert soup.b == tag + assert ( + '.bTag is deprecated, use .find("b") instead. If you really were looking for a tag called bTag, use .find("bTag")' + == str(w[0].message) + ) + + def test_has_attr(self): + """has_attr() checks for the presence of an attribute. + + Please note note: has_attr() is different from + __in__. has_attr() checks the tag's attributes and __in__ + checks the tag's chidlren. + """ + soup = self.soup("<foo attr='bar'>") + assert soup.foo.has_attr("attr") + assert not soup.foo.has_attr("attr2") + + def test_attributes_come_out_in_alphabetical_order(self): + markup = '<b a="1" z="5" m="3" f="2" y="4"></b>' + self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>') + + def test_string(self): + # A Tag that contains only a text node makes that node + # available as .string. + soup = self.soup("<b>foo</b>") + assert soup.b.string == "foo" + + def test_empty_tag_has_no_string(self): + # A Tag with no children has no .stirng. + soup = self.soup("<b></b>") + assert soup.b.string is None + + def test_tag_with_multiple_children_has_no_string(self): + # A Tag with no children has no .string. + soup = self.soup("<a>foo<b></b><b></b></b>") + assert soup.b.string is None + + soup = self.soup("<a>foo<b></b>bar</b>") + assert soup.b.string is None + + # Even if all the children are strings, due to trickery, + # it won't work--but this would be a good optimization. + soup = self.soup("<a>foo</b>") + soup.a.insert(1, "bar") + assert soup.a.string is None + + def test_tag_with_recursive_string_has_string(self): + # A Tag with a single child which has a .string inherits that + # .string. + soup = self.soup("<a><b>foo</b></a>") + assert soup.a.string == "foo" + assert soup.string == "foo" + + def test_lack_of_string(self): + """Only a Tag containing a single text node has a .string.""" + soup = self.soup("<b>f<i>e</i>o</b>") + assert soup.b.string is None + + soup = self.soup("<b></b>") + assert soup.b.string is None + + def test_all_text(self): + """Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated""" + soup = self.soup("<a>a<b>r</b> <r> t </r></a>") + assert soup.a.text == "ar t " + assert soup.a.get_text(strip=True) == "art" + assert soup.a.get_text(",") == "a,r, , t " + assert soup.a.get_text(",", strip=True) == "a,r,t" + + def test_get_text_ignores_special_string_containers(self): + soup = self.soup("foo<!--IGNORE-->bar") + assert soup.get_text() == "foobar" + + assert soup.get_text(types=(NavigableString, Comment)) == "fooIGNOREbar" + assert soup.get_text(types=None) == "fooIGNOREbar" + + soup = self.soup("foo<style>CSS</style><script>Javascript</script>bar") + assert soup.get_text() == "foobar" + + def test_all_strings_ignores_special_string_containers(self): + soup = self.soup("foo<!--IGNORE-->bar") + assert ["foo", "bar"] == list(soup.strings) + + soup = self.soup("foo<style>CSS</style><script>Javascript</script>bar") + assert ["foo", "bar"] == list(soup.strings) + + def test_string_methods_inside_special_string_container_tags(self): + # Strings inside tags like <script> are generally ignored by + # methods like get_text, because they're not what humans + # consider 'text'. But if you call get_text on the <script> + # tag itself, those strings _are_ considered to be 'text', + # because there's nothing else you might be looking for. + + style = self.soup("<div>a<style>Some CSS</style></div>") + template = self.soup( + "<div>a<template><p>Templated <b>text</b>.</p><!--With a comment.--></template></div>" + ) + script = self.soup("<div>a<script><!--a comment-->Some text</script></div>") + + assert style.div.get_text() == "a" + assert list(style.div.strings) == ["a"] + assert style.div.style.get_text() == "Some CSS" + assert list(style.div.style.strings) == ["Some CSS"] + + # The comment is not picked up here. That's because it was + # parsed into a Comment object, which is not considered + # interesting by template.strings. + assert template.div.get_text() == "a" + assert list(template.div.strings) == ["a"] + assert template.div.template.get_text() == "Templated text." + assert list(template.div.template.strings) == ["Templated ", "text", "."] + + # The comment is included here, because it didn't get parsed + # into a Comment object--it's part of the Script string. + assert script.div.get_text() == "a" + assert list(script.div.strings) == ["a"] + assert script.div.script.get_text() == "<!--a comment-->Some text" + assert list(script.div.script.strings) == ["<!--a comment-->Some text"] + + +class TestMultiValuedAttributes(SoupTest): + """Test the behavior of multi-valued attributes like 'class'. + + The values of such attributes are always presented as lists. + """ + + def test_single_value_becomes_list(self): + soup = self.soup("<a class='foo'>") + assert ["foo"] == soup.a["class"] + + def test_multiple_values_becomes_list(self): + soup = self.soup("<a class='foo bar'>") + assert ["foo", "bar"] == soup.a["class"] + + def test_multiple_values_separated_by_weird_whitespace(self): + soup = self.soup("<a class='foo\tbar\nbaz'>") + assert ["foo", "bar", "baz"] == soup.a["class"] + + def test_attributes_joined_into_string_on_output(self): + soup = self.soup("<a class='foo\tbar'>") + assert b'<a class="foo bar"></a>' == soup.a.encode() + + def test_get_attribute_list(self): + soup = self.soup("<a id='abc def'>") + assert ["abc def"] == soup.a.get_attribute_list("id") + assert [] == soup.a.get_attribute_list("no such attribute") + + def test_accept_charset(self): + soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">') + assert ["ISO-8859-1", "UTF-8"] == soup.form["accept-charset"] + + def test_cdata_attribute_applying_only_to_one_tag(self): + data = '<a accept-charset="ISO-8859-1 UTF-8"></a>' + soup = self.soup(data) + # We saw in another test that accept-charset is a cdata-list + # attribute for the <form> tag. But it's not a cdata-list + # attribute for any other tag. + assert "ISO-8859-1 UTF-8" == soup.a["accept-charset"] + + def test_customization(self): + # It's possible to change which attributes of which tags + # are treated as multi-valued attributes. + # + # Here, 'id' is a multi-valued attribute and 'class' is not. + # + # TODO: This code is in the builder and should be tested there. + soup = self.soup( + '<a class="foo" id="bar">', multi_valued_attributes={"*": "id"} + ) + assert soup.a["class"] == "foo" + assert soup.a["id"] == ["bar"] + + def test_hidden_tag_is_invisible(self): + # Setting .hidden on a tag makes it invisible in output, but + # leaves its contents visible. + # + # This is not a documented or supported feature of Beautiful + # Soup (e.g. NavigableString doesn't support .hidden even + # though it could), but some people use it and it's not + # hurting anything to verify that it keeps working. + # + soup = self.soup('<div id="1"><span id="2">a string</span></div>') + soup.span.hidden = True + assert '<div id="1">a string</div>' == str(soup.div) diff --git a/.venv/lib/python3.12/site-packages/bs4/tests/test_tree.py b/.venv/lib/python3.12/site-packages/bs4/tests/test_tree.py new file mode 100644 index 00000000..06d62981 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/bs4/tests/test_tree.py @@ -0,0 +1,1452 @@ +# -*- coding: utf-8 -*- +"""Tests for Beautiful Soup's tree traversal methods. + +The tree traversal methods are the main advantage of using Beautiful +Soup over just using a parser. + +Different parsers will build different Beautiful Soup trees given the +same markup, but all Beautiful Soup trees can be traversed with the +methods tested here. +""" + +import pytest +import re +import warnings +from bs4 import BeautifulSoup +from bs4.builder import builder_registry +from bs4.element import ( + AttributeResemblesVariableWarning, + CData, + Comment, + NavigableString, + Tag, +) +from bs4.filter import SoupStrainer +from . import ( + SoupTest, +) + + +class TestFind(SoupTest): + """Basic tests of the find() method. + """ + + def test_find_tag(self): + soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>") + assert soup.find("b").string == "2" + + def test_unicode_text_find(self): + soup = self.soup("<h1>RäksmörgÃ¥s</h1>") + assert soup.find(string="RäksmörgÃ¥s") == "RäksmörgÃ¥s" + + def test_unicode_attribute_find(self): + soup = self.soup('<h1 id="RäksmörgÃ¥s">here it is</h1>') + str(soup) + assert "here it is" == soup.find(id="RäksmörgÃ¥s").text + + def test_find_everything(self): + """Test an optimization that finds all tags.""" + soup = self.soup("<a>foo</a><b>bar</b>") + assert 2 == len(soup.find_all()) + + def test_find_everything_with_name(self): + """Test an optimization that finds all tags with a given name.""" + soup = self.soup("<a>foo</a><b>bar</b><a>baz</a>") + assert 2 == len(soup.find_all("a")) + + def test_find_with_no_arguments(self): + soup = self.soup("<div></div><p></p>") + assert "div" == soup.find().name + assert "div" == soup.find("p").find_previous_sibling().name + assert "p" == soup.find("div").find_next_sibling().name + + def test_find_with_no_arguments_only_finds_tags(self): + soup = self.soup("text<div>text</div>text<p>text</p>") + assert "div" == soup.find().name + assert "div" == soup.find("p").find_previous_sibling().name + assert "p" == soup.find("div").find_next_sibling().name + + +class TestFindAll(SoupTest): + """Basic tests of the find_all() method.""" + + def test_find_all_with_no_arguments_only_finds_tags(self): + soup = self.soup("<body>text<div>text</div>text<p>text</p></body>") + assert 2 == len(soup.body.find_all()) + assert 1 == len(soup.find("p").find_previous_siblings()) + assert 1 == len(soup.find("div").find_next_siblings()) + + def test_find_all_text_nodes(self): + """You can search the tree for text nodes.""" + soup = self.soup("<html>Foo<b>bar</b>\xbb</html>") + # Exact match. + assert soup.find_all(string="bar") == ["bar"] + + # Match any of a number of strings. + assert soup.find_all(string=["Foo", "bar"]) == ["Foo", "bar"] + # Match a regular expression. + assert soup.find_all(string=re.compile(".*")) == ["Foo", "bar", "\xbb"] + # Match anything. + assert soup.find_all(string=True) == ["Foo", "bar", "\xbb"] + + def test_find_all_limit(self): + """You can limit the number of items returned by find_all.""" + soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>") + self.assert_selects(soup.find_all("a", limit=3), ["1", "2", "3"]) + self.assert_selects(soup.find_all("a", limit=1), ["1"]) + self.assert_selects(soup.find_all("a", limit=10), ["1", "2", "3", "4", "5"]) + + # A limit of 0 means no limit. + self.assert_selects(soup.find_all("a", limit=0), ["1", "2", "3", "4", "5"]) + + def test_calling_a_tag_is_calling_findall(self): + soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>") + self.assert_selects(soup("a", limit=1), ["1"]) + self.assert_selects(soup.b(id="foo"), ["3"]) + + def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion( + self, + ): + soup = self.soup("<a></a>") + # Create a self-referential list. + selfref = [] + selfref.append(selfref) + + # Without special code in SoupStrainer, this would cause infinite + # recursion. + with warnings.catch_warnings(record=True) as w: + assert [] == soup.find_all(selfref) + [warning] = w + assert warning.filename == __file__ + msg = str(warning.message) + assert ( + msg + == "Ignoring nested list [[...]] to avoid the possibility of infinite recursion." + ) + + def test_find_all_resultset(self): + """All find_all calls return a ResultSet""" + soup = self.soup("<a></a>") + result = soup.find_all("a") + assert hasattr(result, "source") + + result = soup.find_all(True) + assert hasattr(result, "source") + + result = soup.find_all(string="foo") + assert hasattr(result, "source") + + +class TestFindAllBasicNamespaces(SoupTest): + def test_find_by_namespaced_name(self): + soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">') + assert "4" == soup.find("mathml:msqrt").string + assert "a" == soup.find(attrs={"svg:fill": "red"}).name + + +class TestFindAllByName(SoupTest): + """Test ways of finding tags by tag name.""" + + def setup_method(self) -> None: + self.tree = self.soup("""<a>First tag.</a> + <b>Second tag.</b> + <c>Third <a>Nested tag.</a> tag.</c>""") + + def test_find_all_by_tag_name(self): + # Find all the <a> tags. + self.assert_selects(self.tree.find_all("a"), ["First tag.", "Nested tag."]) + + def test_find_all_by_name_and_text(self): + self.assert_selects( + self.tree.find_all("a", string="First tag."), ["First tag."] + ) + + self.assert_selects( + self.tree.find_all("a", string=True), ["First tag.", "Nested tag."] + ) + + self.assert_selects( + self.tree.find_all("a", string=re.compile("tag")), + ["First tag.", "Nested tag."], + ) + + def test_find_all_on_non_root_element(self): + # You can call find_all on any node, not just the root. + self.assert_selects(self.tree.c.find_all("a"), ["Nested tag."]) + + def test_calling_element_invokes_find_all(self): + self.assert_selects(self.tree("a"), ["First tag.", "Nested tag."]) + + def test_find_all_by_tag_strainer(self): + self.assert_selects( + self.tree.find_all(SoupStrainer("a")), ["First tag.", "Nested tag."] + ) + + def test_find_all_by_tag_names(self): + self.assert_selects( + self.tree.find_all(["a", "b"]), ["First tag.", "Second tag.", "Nested tag."] + ) + + def test_find_all_by_tag_dict(self): + self.assert_selects( + self.tree.find_all({"a": True, "b": True}), + ["First tag.", "Second tag.", "Nested tag."], + ) + + def test_find_all_by_tag_re(self): + self.assert_selects( + self.tree.find_all(re.compile("^[ab]$")), + ["First tag.", "Second tag.", "Nested tag."], + ) + + def test_find_all_with_tags_matching_method(self): + # You can define an oracle method that determines whether + # a tag matches the search. + def id_matches_name(tag): + return tag.name == tag.get("id") + + tree = self.soup("""<a id="a">Match 1.</a> + <a id="1">Does not match.</a> + <b id="b">Match 2.</a>""") + + self.assert_selects(tree.find_all(id_matches_name), ["Match 1.", "Match 2."]) + + def test_find_with_multi_valued_attribute(self): + soup = self.soup( + "<div class='a b'>1</div><div class='a c'>2</div><div class='a d'>3</div>" + ) + r1 = soup.find("div", "a d") + r2 = soup.find("div", re.compile(r"a d")) + r3, r4 = soup.find_all("div", ["a b", "a d"]) + assert "3" == r1.string + assert "3" == r2.string + assert "1" == r3.string + assert "3" == r4.string + + +class TestFindAllByAttribute(SoupTest): + def test_find_all_by_attribute_name(self): + # You can pass in keyword arguments to find_all to search by + # attribute. + tree = self.soup(""" + <a id="first">Matching a.</a> + <a id="second"> + Non-matching <b id="first">Matching b.</b>a. + </a>""") + self.assert_selects(tree.find_all(id="first"), ["Matching a.", "Matching b."]) + + def test_find_all_by_utf8_attribute_value(self): + peace = "×ולש".encode("utf8") + data = '<a title="×ולש"></a>'.encode("utf8") + soup = self.soup(data) + assert [soup.a] == soup.find_all(title=peace) + assert [soup.a] == soup.find_all(title=peace.decode("utf8")) + assert [soup.a], soup.find_all(title=[peace, "something else"]) + + def test_find_all_by_attribute_dict(self): + # You can pass in a dictionary as the argument 'attrs'. This + # lets you search for attributes like 'name' (a fixed argument + # to find_all) and 'class' (a reserved word in Python.) + tree = self.soup(""" + <a name="name1" class="class1">Name match.</a> + <a name="name2" class="class2">Class match.</a> + <a name="name3" class="class3">Non-match.</a> + <name1>A tag called 'name1'.</name1> + """) + + # This doesn't do what you want. + self.assert_selects(tree.find_all(name="name1"), ["A tag called 'name1'."]) + # This does what you want. + self.assert_selects(tree.find_all(attrs={"name": "name1"}), ["Name match."]) + + self.assert_selects(tree.find_all(attrs={"class": "class2"}), ["Class match."]) + + def test_find_all_by_class(self): + tree = self.soup(""" + <a class="1">Class 1.</a> + <a class="2">Class 2.</a> + <b class="1">Class 1.</b> + <c class="3 4">Class 3 and 4.</c> + """) + + # Passing in the class_ keyword argument will search against + # the 'class' attribute. + self.assert_selects(tree.find_all("a", class_="1"), ["Class 1."]) + self.assert_selects(tree.find_all("c", class_="3"), ["Class 3 and 4."]) + self.assert_selects(tree.find_all("c", class_="4"), ["Class 3 and 4."]) + + # Passing in a string to 'attrs' will also search the CSS class. + self.assert_selects(tree.find_all("a", "1"), ["Class 1."]) + self.assert_selects(tree.find_all(attrs="1"), ["Class 1.", "Class 1."]) + self.assert_selects(tree.find_all("c", "3"), ["Class 3 and 4."]) + self.assert_selects(tree.find_all("c", "4"), ["Class 3 and 4."]) + + def test_find_by_class_when_multiple_classes_present(self): + tree = self.soup("<gar class='foo bar'>Found it</gar>") + + f = tree.find_all("gar", class_=re.compile("o")) + self.assert_selects(f, ["Found it"]) + + f = tree.find_all("gar", class_=re.compile("a")) + self.assert_selects(f, ["Found it"]) + + # If the search fails to match the individual strings "foo" and "bar", + # it will be tried against the combined string "foo bar". + f = tree.find_all("gar", class_=re.compile("o b")) + self.assert_selects(f, ["Found it"]) + + def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self): + soup = self.soup("<a class='bar'>Found it</a>") + + self.assert_selects(soup.find_all("a", re.compile("ba")), ["Found it"]) + + def big_attribute_value(value): + return len(value) > 3 + + self.assert_selects(soup.find_all("a", big_attribute_value), []) + + def small_attribute_value(value): + return len(value) <= 3 + + self.assert_selects(soup.find_all("a", small_attribute_value), ["Found it"]) + + def test_find_all_with_string_for_attrs_finds_multiple_classes(self): + soup = self.soup('<a class="foo bar"></a><a class="foo"></a>') + a, a2 = soup.find_all("a") + assert [a, a2], soup.find_all("a", "foo") + assert [a], soup.find_all("a", "bar") + + # If you specify the class as a string that contains a + # space, only that specific value will be found. + assert [a] == soup.find_all("a", class_="foo bar") + assert [a] == soup.find_all("a", "foo bar") + assert [] == soup.find_all("a", "bar foo") + + def test_find_all_by_attribute_soupstrainer(self): + tree = self.soup(""" + <a id="first">Match.</a> + <a id="second">Non-match.</a>""") + + strainer = SoupStrainer(attrs={"id": "first"}) + self.assert_selects(tree.find_all(strainer), ["Match."]) + + def test_find_all_with_missing_attribute(self): + # You can pass in None as the value of an attribute to find_all. + # This will match tags that do not have that attribute set. + tree = self.soup("""<a id="1">ID present.</a> + <a>No ID present.</a> + <a id="">ID is empty.</a>""") + self.assert_selects(tree.find_all("a", id=None), ["No ID present."]) + + def test_find_all_with_defined_attribute(self): + # You can pass in None as the value of an attribute to find_all. + # This will match tags that have that attribute set to any value. + tree = self.soup("""<a id="1">ID present.</a> + <a>No ID present.</a> + <a id="">ID is empty.</a>""") + self.assert_selects(tree.find_all(id=True), ["ID present.", "ID is empty."]) + + def test_find_all_with_numeric_attribute(self): + # If you search for a number, it's treated as a string. + tree = self.soup("""<a id=1>Unquoted attribute.</a> + <a id="1">Quoted attribute.</a>""") + + expected = ["Unquoted attribute.", "Quoted attribute."] + self.assert_selects(tree.find_all(id=1), expected) + self.assert_selects(tree.find_all(id="1"), expected) + + def test_find_all_with_list_attribute_values(self): + # You can pass a list of attribute values instead of just one, + # and you'll get tags that match any of the values. + tree = self.soup("""<a id="1">1</a> + <a id="2">2</a> + <a id="3">3</a> + <a>No ID.</a>""") + self.assert_selects(tree.find_all(id=["1", "3", "4"]), ["1", "3"]) + + # If you pass in an empty list, you get nothing. + self.assert_selects(tree.find_all(id=[]), []) + + def test_find_all_with_regular_expression_attribute_value(self): + # You can pass a regular expression as an attribute value, and + # you'll get tags whose values for that attribute match the + # regular expression. + tree = self.soup("""<a id="a">One a.</a> + <a id="aa">Two as.</a> + <a id="ab">Mixed as and bs.</a> + <a id="b">One b.</a> + <a>No ID.</a>""") + + self.assert_selects(tree.find_all(id=re.compile("^a+$")), ["One a.", "Two as."]) + + def test_find_by_name_and_containing_string(self): + soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>") + a = soup.a + + assert [a] == soup.find_all("a", string="foo") + assert [] == soup.find_all("a", string="bar") + + def test_find_by_name_and_containing_string_when_string_is_buried(self): + soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>") + assert soup.find_all("a") == soup.find_all("a", string="foo") + + def test_find_by_attribute_and_containing_string(self): + soup = self.soup('<b id="1">foo</b><a id="2">foo</a>') + a = soup.a + + assert [a] == soup.find_all(id=2, string="foo") + assert [] == soup.find_all(id=1, string="bar") + + +class TestSmooth(SoupTest): + """Test Tag.smooth.""" + + def test_smooth(self): + soup = self.soup("<div>a</div>") + div = soup.div + div.append("b") + div.append("c") + div.append(Comment("Comment 1")) + div.append(Comment("Comment 2")) + div.append("d") + builder = self.default_builder() + span = Tag(soup, builder, "span") + span.append("1") + span.append("2") + div.append(span) + + # At this point the tree has a bunch of adjacent + # NavigableStrings. This is normal, but it has no meaning in + # terms of HTML, so we may want to smooth things out for + # output. + + # Since the <span> tag has two children, its .string is None. + assert None is div.span.string + + assert 7 == len(div.contents) + div.smooth() + assert 5 == len(div.contents) + + # The three strings at the beginning of div.contents have been + # merged into on string. + # + assert "abc" == div.contents[0] + + # The call is recursive -- the <span> tag was also smoothed. + assert "12" == div.span.string + + # The two comments have _not_ been merged, even though + # comments are strings. Merging comments would change the + # meaning of the HTML. + assert "Comment 1" == div.contents[1] + assert "Comment 2" == div.contents[2] + + +class TestIndex(SoupTest): + """Test Tag.index""" + + def test_index(self): + tree = self.soup("""<div> + <a>Identical</a> + <b>Not identical</b> + <a>Identical</a> + + <c><d>Identical with child</d></c> + <b>Also not identical</b> + <c><d>Identical with child</d></c> + </div>""") + div = tree.div + for i, element in enumerate(div.contents): + assert i == div.index(element) + with pytest.raises(ValueError): + tree.index(1) + + +class TestParentOperations(SoupTest): + """Test navigation and searching through an element's parents.""" + + def setup_method(self) -> None: + self.tree = self.soup("""<ul id="empty"></ul> + <ul id="top"> + <ul id="middle"> + <ul id="bottom"> + <b id="start">Start here</b> + </ul> + </ul>""") + self.start = self.tree.b + + def test_parent(self): + assert self.start.parent["id"] == "bottom" + assert self.start.parent.parent["id"] == "middle" + assert self.start.parent.parent.parent["id"] == "top" + + def test_parent_of_top_tag_is_soup_object(self): + top_tag = self.tree.contents[0] + assert top_tag.parent == self.tree + + def test_soup_object_has_no_parent(self): + assert None is self.tree.parent + + def test_find_parents(self): + self.assert_selects_ids( + self.start.find_parents("ul"), ["bottom", "middle", "top"] + ) + self.assert_selects_ids(self.start.find_parents("ul", id="middle"), ["middle"]) + assert self.start.find_parents(id="start") == [] + + def test_find_parent(self): + # assert self.start.find_parent('ul')['id'] == 'bottom' + assert self.start.find_parent("ul", id="top")["id"] == "top" + + assert self.start.find_parent(id="start") is None + + def test_parent_of_text_element(self): + text = self.tree.find(string="Start here") + assert text.parent.name == "b" + + def test_text_element_find_parent(self): + text = self.tree.find(string="Start here") + assert text.find_parent("ul")["id"] == "bottom" + + def test_parent_generator(self): + parents = [ + parent["id"] + for parent in self.start.parents + if parent is not None and "id" in parent.attrs + ] + assert parents == ["bottom", "middle", "top"] + + def test_self_and_parent_generator(self): + results = [ + parent["id"] + for parent in self.start.self_and_parents + if parent is not None and "id" in parent.attrs + ] + assert results == ["start", "bottom", "middle", "top"] + + +class ProximityTest(SoupTest): + def setup_method(self) -> None: + self.tree = self.soup( + '<html id="start"><head id="headtag"></head><body id="bodytag"><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>' + ) + + +class TestNextOperations(ProximityTest): + def setup_method(self) -> None: + super(TestNextOperations, self).setup_method() + self.start = self.tree.b + + def test_next(self): + assert self.start.next_element == "One" + assert self.start.next_element.next_element["id"] == "2" + + def test_next_of_last_item_is_none(self): + last = self.tree.find(string="Three") + assert last.next_element is None + + def test_next_of_root_is_none(self): + # The document root is outside the next/previous chain. + assert self.tree.next_element is None + + def test_find_all_next(self): + self.assert_selects(self.start.find_all_next("b"), ["Two", "Three"]) + self.start.find_all_next(id=3) + self.assert_selects(self.start.find_all_next(id=3), ["Three"]) + + def test_find_next(self): + assert self.start.find_next("b")["id"] == "2" + assert self.start.find_next(string="Three") == "Three" + + def test_find_next_for_text_element(self): + text = self.tree.find(string="One") + assert text.find_next("b").string == "Two" + self.assert_selects(text.find_all_next("b"), ["Two", "Three"]) + + def test_next_generators(self): + start = self.tree.find(string="Two") + successors = [node for node in start.next_elements] + # There are two successors: the final <b> tag and its text contents. + tag, contents = successors + assert tag["id"] == "3" + assert contents == "Three" + + successors2 = [node for node in start.self_and_next_elements] + assert successors2[1:] == successors + assert successors2[0] == start + + +class TestPreviousOperations(ProximityTest): + def setup_method(self) -> None: + super(TestPreviousOperations, self).setup_method() + self.end = self.tree.find(string="Three") + + def test_previous(self): + assert self.end.previous_element["id"] == "3" + assert self.end.previous_element.previous_element == "Two" + + def test_previous_of_first_item_is_none(self): + first = self.tree.find("html") + assert first.previous_element is None + + def test_previous_of_root_is_none(self): + # The document root is outside the next/previous chain. + assert self.tree.previous_element is None + + def test_find_all_previous(self): + # The <b> tag containing the "Three" node is the predecessor + # of the "Three" node itself, which is why "Three" shows up + # here. + self.assert_selects(self.end.find_all_previous("b"), ["Three", "Two", "One"]) + self.assert_selects(self.end.find_all_previous(id=1), ["One"]) + + def test_find_previous(self): + assert self.end.find_previous("b")["id"] == "3" + assert self.end.find_previous(string="One") == "One" + + def test_find_previous_for_text_element(self): + text = self.tree.find(string="Three") + assert text.find_previous("b").string == "Three" + self.assert_selects(text.find_all_previous("b"), ["Three", "Two", "One"]) + + def test_previous_generators(self): + start = self.tree.find("b", string="One") + self.assert_selects_ids(start.previous_elements, ["bodytag", "headtag", 'start']) + self.assert_selects_ids(start.self_and_previous_elements, ["1", "bodytag", "headtag", "start"]) + + +class SiblingTest(SoupTest): + def setup_method(self) -> None: + markup = """<html> + <span id="1"> + <span id="1.1"></span> + </span> + <span id="2"> + <span id="2.1"></span> + </span> + <span id="3"> + <span id="3.1"></span> + </span> + <span id="4"></span> + </html>""" + # All that whitespace looks good but makes the tests more + # difficult. Get rid of it. + markup = re.compile(r"\n\s*").sub("", markup) + self.tree = self.soup(markup) + + +class TestNextSibling(SiblingTest): + def setup_method(self) -> None: + super(TestNextSibling, self).setup_method() + self.start = self.tree.find(id="1") + + def test_next_sibling_of_root_is_none(self): + assert self.tree.next_sibling is None + + def test_next_sibling(self): + assert self.start.next_sibling["id"] == "2" + assert self.start.next_sibling.next_sibling["id"] == "3" + + # Note the difference between next_sibling and next_element. + assert self.start.next_element["id"] == "1.1" + + def test_next_sibling_may_not_exist(self): + assert self.tree.html.next_sibling is None + + nested_span = self.tree.find(id="1.1") + assert nested_span.next_sibling is None + + last_span = self.tree.find(id="4") + assert last_span.next_sibling is None + + def test_find_next_sibling(self): + assert self.start.find_next_sibling("span")["id"] == "2" + + def test_next_siblings(self): + self.assert_selects_ids(self.start.find_next_siblings("span"), ["2", "3", "4"]) + + self.assert_selects_ids(self.start.find_next_siblings(id="3"), ["3"]) + + def test_next_siblings_generators(self): + self.assert_selects_ids(self.start.next_siblings, ["2", "3", "4"]) + self.assert_selects_ids(self.start.self_and_next_siblings, ["1", "2", "3", "4"]) + + def test_next_sibling_for_text_element(self): + soup = self.soup("Foo<b>bar</b>baz") + start = soup.find(string="Foo") + assert start.next_sibling.name == "b" + assert start.next_sibling.next_sibling == "baz" + + self.assert_selects(start.find_next_siblings("b"), ["bar"]) + assert start.find_next_sibling(string="baz") == "baz" + assert start.find_next_sibling(string="nonesuch") is None + + +class TestPreviousSibling(SiblingTest): + def setup_method(self) -> None: + super(TestPreviousSibling, self).setup_method() + self.end = self.tree.find(id="4") + + def test_previous_sibling_of_root_is_none(self): + assert self.tree.previous_sibling is None + + def test_previous_sibling(self): + assert self.end.previous_sibling["id"] == "3" + assert self.end.previous_sibling.previous_sibling["id"] == "2" + + # Note the difference between previous_sibling and previous_element. + assert self.end.previous_element["id"] == "3.1" + + def test_previous_sibling_may_not_exist(self): + assert self.tree.html.previous_sibling is None + + nested_span = self.tree.find(id="1.1") + assert nested_span.previous_sibling is None + + first_span = self.tree.find(id="1") + assert first_span.previous_sibling is None + + def test_find_previous_sibling(self): + assert self.end.find_previous_sibling("span")["id"] == "3" + + def test_previous_siblings(self): + self.assert_selects_ids( + self.end.find_previous_siblings("span"), ["3", "2", "1"] + ) + + self.assert_selects_ids(self.end.find_previous_siblings(id="1"), ["1"]) + + def test_previous_siblings_generators(self): + self.assert_selects_ids(self.end.previous_siblings, ["3", "2", "1"]) + self.assert_selects_ids(self.end.self_and_previous_siblings, ["4", "3", "2", "1"]) + + def test_previous_sibling_for_text_element(self): + soup = self.soup("Foo<b>bar</b>baz") + start = soup.find(string="baz") + assert start.previous_sibling.name == "b" + assert start.previous_sibling.previous_sibling == "Foo" + + self.assert_selects(start.find_previous_siblings("b"), ["bar"]) + assert start.find_previous_sibling(string="Foo") == "Foo" + assert start.find_previous_sibling(string="nonesuch") is None + + +class TestTreeModification(SoupTest): + def test_attribute_modification(self): + soup = self.soup('<a id="1"></a>') + soup.a["id"] = 2 + assert soup.decode() == self.document_for('<a id="2"></a>') + del soup.a["id"] + assert soup.decode() == self.document_for("<a></a>") + soup.a["id2"] = "foo" + assert soup.decode() == self.document_for('<a id2="foo"></a>') + + def test_new_tag_creation(self): + builder = builder_registry.lookup("html")() + soup = self.soup("<body></body>", builder=builder) + a = Tag(soup, builder, "a") + ol = Tag(soup, builder, "ol") + a["href"] = "http://foo.com/" + soup.body.insert(0, a) + soup.body.insert(1, ol) + assert ( + soup.body.encode() + == b'<body><a href="http://foo.com/"></a><ol></ol></body>' + ) + + def test_append_to_contents_moves_tag(self): + doc = """<p id="1">Don't leave me <b>here</b>.</p> + <p id="2">Don\'t leave!</p>""" + soup = self.soup(doc) + second_para = soup.find(id="2") + bold = soup.b + + # Move the <b> tag to the end of the second paragraph. + soup.find(id="2").append(soup.b) + + # The <b> tag is now a child of the second paragraph. + assert bold.parent == second_para + + assert soup.decode() == self.document_for( + '<p id="1">Don\'t leave me .</p>\n' '<p id="2">Don\'t leave!<b>here</b></p>' + ) + + def test_insertion_returns_inserted_things(self): + soup = self.soup("<html></html>") + html = soup.find('html') + head = html.append(soup.new_tag('head')) + assert head.name == 'head' + + [title] = head.insert(0, soup.new_tag('title')) + assert title.name == 'title' + + text5 = title.append('5') + assert text5 == '5' + text34 = text5.insert_before('3', '4') + assert text34 == ['3', '4'] + text67 = text5.insert_after('6', '7') + assert text67 == ['6', '7'] + text89 = title.extend(['8', '9']) + assert text89 == ['8', '9'] + assert title.get_text() == '3456789' + + def test_replace_with_returns_thing_that_was_replaced(self): + text = "<a></a><b><c></c></b>" + soup = self.soup(text) + a = soup.a + new_a = a.replace_with(soup.c) + assert a == new_a + + def test_unwrap_returns_thing_that_was_replaced(self): + text = "<a><b></b><c></c></a>" + soup = self.soup(text) + a = soup.a + new_a = a.unwrap() + assert a == new_a + + def test_replace_with_and_unwrap_give_useful_exception_when_tag_has_no_parent(self): + soup = self.soup("<a><b>Foo</b></a><c>Bar</c>") + a = soup.a + a.extract() + assert None is a.parent + with pytest.raises(ValueError): + a.unwrap() + with pytest.raises(ValueError): + a.replace_with(soup.c) + + def test_replace_tag_with_itself(self): + text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>" + soup = self.soup(text) + c = soup.c + result = soup.c.replace_with(c) + assert result == c + assert soup.decode() == self.document_for(text) + + def test_replace_tag_with_its_parent_raises_exception(self): + text = "<a><b></b></a>" + soup = self.soup(text) + with pytest.raises(ValueError): + soup.b.replace_with(soup.a) + + def test_insert_tag_into_itself_raises_exception(self): + text = "<a><b></b></a>" + soup = self.soup(text) + with pytest.raises(ValueError): + soup.a.insert(0, soup.a) + + def test_insert_multiple_elements(self): + soup = self.soup("<p>And now, a word:</p><p>And we're back.</p>") + p2, p3 = soup.insert(1, soup.new_tag("p", string="p2"), soup.new_tag("p", string="p3")) + assert "p2" == p2.string + assert "p3" == p3.string + + p1, p2, p3, p4 = list(soup.children) + assert "And now, a word:" == p1.string + assert "p2" == p2.string + assert "p3" == p3.string + assert "And we're back." == p4.string + + def test_insert_beautifulsoup_object_inserts_children(self): + """Inserting one BeautifulSoup object into another actually inserts all + of its children -- you'll never combine BeautifulSoup objects. + """ + soup = self.soup("<p>And now, a word:</p><p>And we're back.</p>") + + text = "<p>p2</p><p>p3</p>" + to_insert = self.soup(text) + p2, p3 = soup.insert(1, to_insert) + assert "p2" == p2.string + assert "p3" == p3.string + + for i in soup.descendants: + assert not isinstance(i, BeautifulSoup) + + p1, p2, p3, p4 = list(soup.children) + assert "And now, a word:" == p1.string + assert "p2" == p2.string + assert "p3" == p3.string + assert "And we're back." == p4.string + + def test_replace_with_maintains_next_element_throughout(self): + soup = self.soup("<p><a>one</a><b>three</b></p>") + a = soup.a + # Make it so the <a> tag has two text children. + a.insert(1, "two") + + # Now replace each one with the empty string. + left, right = a.contents + left.replace_with("") + right.replace_with("") + + # The <b> tag is still connected to the tree. + assert "three" == soup.b.string + + def test_replace_final_node(self): + soup = self.soup("<b>Argh!</b>") + soup.find(string="Argh!").replace_with("Hooray!") + new_text = soup.find(string="Hooray!") + b = soup.b + assert new_text.previous_element == b + assert new_text.parent == b + assert new_text.previous_element.next_element == new_text + assert new_text.next_element is None + + def test_consecutive_text_nodes(self): + # A builder should never create two consecutive text nodes, + # but if you insert one next to another, Beautiful Soup will + # handle it correctly. + soup = self.soup("<a><b>Argh!</b><c></c></a>") + soup.b.insert(1, "Hooray!") + + assert soup.decode() == self.document_for("<a><b>Argh!Hooray!</b><c></c></a>") + + new_text = soup.find(string="Hooray!") + assert new_text.previous_element == "Argh!" + assert new_text.previous_element.next_element == new_text + + assert new_text.previous_sibling == "Argh!" + assert new_text.previous_sibling.next_sibling == new_text + + assert new_text.next_sibling is None + assert new_text.next_element == soup.c + + def test_insert_string(self): + soup = self.soup("<a></a>") + soup.a.insert(0, "bar") + soup.a.insert(0, "foo") + # The string were added to the tag. + assert ["foo", "bar"] == soup.a.contents + # And they were converted to NavigableStrings. + assert soup.a.contents[0].next_element == "bar" + + def test_append(self): + soup = self.soup("<b>1</b>") + result = soup.b.append("2") + assert result == "2" + assert soup.b.decode() == "<b>12</b>" + + def test_insert_tag(self): + builder = self.default_builder() + soup = self.soup("<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder) + magic_tag = Tag(soup, builder, "magictag") + magic_tag.insert(0, "the") + soup.a.insert(1, magic_tag) + + assert soup.decode() == self.document_for( + "<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>" + ) + + # Make sure all the relationships are hooked up correctly. + b_tag = soup.b + assert b_tag.next_sibling == magic_tag + assert magic_tag.previous_sibling == b_tag + + find = b_tag.find(string="Find") + assert find.next_element == magic_tag + assert magic_tag.previous_element == find + + c_tag = soup.c + assert magic_tag.next_sibling == c_tag + assert c_tag.previous_sibling == magic_tag + + the = magic_tag.find(string="the") + assert the.parent == magic_tag + assert the.next_element == c_tag + assert c_tag.previous_element == the + + def test_insert_into_the_current_location(self): + data = "<a>b<c></c>d</a>" + soup = self.soup(data) + soup.a.insert(1, soup.c) + assert data == soup.decode() + + def test_append_child_thats_already_at_the_end(self): + data = "<a><b></b></a>" + soup = self.soup(data) + soup.a.append(soup.b) + assert data == soup.decode() + + def test_extend_with_a_list_of_elements(self): + data = "<a><b><c><d><e><f><g></g></f></e></d></c></b></a>" + soup = self.soup(data) + elements = [soup.g, soup.f, soup.e, soup.d, soup.c, soup.b] + soup.a.extend(elements) + assert "<a><g></g><f></f><e></e><d></d><c></c><b></b></a>" == soup.decode() + + def test_extend_with_a_list_of_strings(self): + data = "<a></a>" + soup = self.soup(data) + elements = ["b", "c", NavigableString("d"), "e"] + soup.a.extend(elements) + assert "<a>bcde</a>" == soup.decode() + + @pytest.mark.parametrize("get_tags", [lambda tag: tag, lambda tag: tag.contents]) + def test_extend_with_another_tags_contents(self, get_tags): + data = '<body><div id="d1"><a>1</a><a>2</a><a>3</a><a>4</a></div><div id="d2"></div></body>' + soup = self.soup(data) + d1 = soup.find("div", id="d1") + d2 = soup.find("div", id="d2") + tags = get_tags(d1) + d2.extend(tags) + assert '<div id="d1"></div>' == d1.decode() + assert '<div id="d2"><a>1</a><a>2</a><a>3</a><a>4</a></div>' == d2.decode() + + @pytest.mark.parametrize( + "string_source,result", + ( + [lambda soup: soup.a.string, "<a></a><b>1</b>"], + [lambda soup: "abcde", "<a>1</a><b>abcde</b>"], + ), + ) + def test_extend_with_a_single_non_tag_element(self, string_source, result): + data = "<div><a>1</a><b></b></div>" + soup = self.soup(data) + with warnings.catch_warnings(record=True) as w: + string = string_source(soup) + soup.b.extend(string) + assert soup.div.decode_contents() == result + [warning] = w + assert warning.filename == __file__ + msg = str(warning.message) + assert ( + msg + == "A single non-Tag item was passed into Tag.extend. Use Tag.append instead." + ) + + def test_move_tag_to_beginning_of_parent(self): + data = "<a><b></b><c></c><d></d></a>" + soup = self.soup(data) + soup.a.insert(0, soup.d) + assert "<a><d></d><b></b><c></c></a>" == soup.decode() + + def test_insert_works_on_empty_element_tag(self): + # This is a little strange, since most HTML parsers don't allow + # markup like this to come through. But in general, we don't + # know what the parser would or wouldn't have allowed, so + # I'm letting this succeed for now. + soup = self.soup("<br/>") + soup.br.insert(1, "Contents") + assert str(soup.br) == "<br>Contents</br>" + + def test_insert_before(self): + soup = self.soup("<a>foo</a><b>bar</b>") + soup.b.insert_before("BAZ") + soup.a.insert_before("QUUX") + assert soup.decode() == self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>") + + soup.a.insert_before(soup.b) + assert soup.decode() == self.document_for("QUUX<b>bar</b><a>foo</a>BAZ") + + # Can't insert an element before itself. + b = soup.b + with pytest.raises(ValueError): + b.insert_before(b) + + # Can't insert before if an element has no parent. + b.extract() + with pytest.raises(ValueError): + b.insert_before("nope") + + # Can insert an identical element + soup = self.soup("<a>") + soup.a.insert_before(soup.new_tag("a")) + + # TODO: OK but what happens? + + def test_insert_multiple_before(self): + soup = self.soup("<a>foo</a><b>bar</b>") + soup.b.insert_before("BAZ", " ", "QUUX") + soup.a.insert_before("QUUX", " ", "BAZ") + assert soup.decode() == self.document_for( + "QUUX BAZ<a>foo</a>BAZ QUUX<b>bar</b>" + ) + + soup.a.insert_before(soup.b, "FOO") + assert soup.decode() == self.document_for( + "QUUX BAZ<b>bar</b>FOO<a>foo</a>BAZ QUUX" + ) + + def test_insert_after(self): + soup = self.soup("<a>foo</a><b>bar</b>") + soup.b.insert_after("BAZ") + soup.a.insert_after("QUUX") + assert soup.decode() == self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ") + soup.b.insert_after(soup.a) + assert soup.decode() == self.document_for("QUUX<b>bar</b><a>foo</a>BAZ") + + # Can't insert an element after itself. + b = soup.b + with pytest.raises(ValueError): + b.insert_after(b) + + # Can't insert after if an element has no parent. + b.extract() + with pytest.raises(ValueError): + b.insert_after("nope") + + # Can insert an identical element + soup = self.soup("<a>") + soup.a.insert_before(soup.new_tag("a")) + + # TODO: OK but what does it look like? + + def test_insert_multiple_after(self): + soup = self.soup("<a>foo</a><b>bar</b>") + soup.b.insert_after("BAZ", " ", "QUUX") + soup.a.insert_after("QUUX", " ", "BAZ") + assert soup.decode() == self.document_for( + "<a>foo</a>QUUX BAZ<b>bar</b>BAZ QUUX" + ) + soup.b.insert_after(soup.a, "FOO ") + assert soup.decode() == self.document_for( + "QUUX BAZ<b>bar</b><a>foo</a>FOO BAZ QUUX" + ) + + def test_insert_after_raises_exception_if_after_has_no_meaning(self): + soup = self.soup("") + tag = soup.new_tag("a") + string = soup.new_string("") + with pytest.raises(ValueError): + string.insert_after(tag) + with pytest.raises(NotImplementedError): + soup.insert_after(tag) + with pytest.raises(ValueError): + tag.insert_after(tag) + + def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self): + soup = self.soup("") + tag = soup.new_tag("a") + string = soup.new_string("") + with pytest.raises(ValueError): + string.insert_before(tag) + with pytest.raises(NotImplementedError): + soup.insert_before(tag) + with pytest.raises(ValueError): + tag.insert_before(tag) + + def test_replace_with(self): + soup = self.soup("<p>There's <b>no</b> business like <b>show</b> business</p>") + no, show = soup.find_all("b") + show.replace_with(no) + assert soup.decode() == self.document_for( + "<p>There's business like <b>no</b> business</p>" + ) + + assert show.parent is None + assert no.parent == soup.p + assert no.next_element == "no" + assert no.next_sibling == " business" + + def test_replace_with_errors(self): + # Can't replace a tag that's not part of a tree. + a_tag = Tag(name="a") + with pytest.raises(ValueError): + a_tag.replace_with("won't work") + + # Can't replace a tag with its parent. + a_tag = self.soup("<a><b></b></a>").a + with pytest.raises(ValueError): + a_tag.b.replace_with(a_tag) + + # Or with a list that includes its parent. + with pytest.raises(ValueError): + a_tag.b.replace_with("string1", a_tag, "string2") + + def test_replace_with_multiple(self): + data = "<a><b></b><c></c></a>" + soup = self.soup(data) + d_tag = soup.new_tag("d") + d_tag.string = "Text In D Tag" + e_tag = soup.new_tag("e") + f_tag = soup.new_tag("f") + a_string = "Random Text" + soup.c.replace_with(d_tag, e_tag, a_string, f_tag) + assert ( + soup.decode() + == "<a><b></b><d>Text In D Tag</d><e></e>Random Text<f></f></a>" + ) + assert soup.b.next_element == d_tag + assert d_tag.string.next_element == e_tag + assert e_tag.next_element.string == a_string + assert e_tag.next_element.next_element == f_tag + + def test_replace_first_child(self): + data = "<a><b></b><c></c></a>" + soup = self.soup(data) + soup.b.replace_with(soup.c) + assert "<a><c></c></a>" == soup.decode() + + def test_replace_last_child(self): + data = "<a><b></b><c></c></a>" + soup = self.soup(data) + soup.c.replace_with(soup.b) + assert "<a><b></b></a>" == soup.decode() + + def test_nested_tag_replace_with(self): + soup = self.soup( + """<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""" + ) + + # Replace the entire <b> tag and its contents ("reserve the + # right") with the <f> tag ("refuse"). + remove_tag = soup.b + move_tag = soup.f + remove_tag.replace_with(move_tag) + + assert soup.decode() == self.document_for( + "<a>We<f>refuse</f></a><e>to<g>service</g></e>" + ) + + # The <b> tag is now an orphan. + assert remove_tag.parent is None + assert remove_tag.find(string="right").next_element is None + assert remove_tag.previous_element is None + assert remove_tag.next_sibling is None + assert remove_tag.previous_sibling is None + + # The <f> tag is now connected to the <a> tag. + assert move_tag.parent == soup.a + assert move_tag.previous_element == "We" + assert move_tag.next_element.next_element == soup.e + assert move_tag.next_sibling is None + + # The gap where the <f> tag used to be has been mended, and + # the word "to" is now connected to the <g> tag. + to_text = soup.find(string="to") + g_tag = soup.g + assert to_text.next_element == g_tag + assert to_text.next_sibling == g_tag + assert g_tag.previous_element == to_text + assert g_tag.previous_sibling == to_text + + def test_unwrap(self): + tree = self.soup(""" + <p>Unneeded <em>formatting</em> is unneeded</p> + """) + tree.em.unwrap() + assert tree.em is None + assert tree.p.text == "Unneeded formatting is unneeded" + + def test_wrap(self): + soup = self.soup("I wish I was bold.") + value = soup.string.wrap(soup.new_tag("b")) + assert value.decode() == "<b>I wish I was bold.</b>" + assert soup.decode() == self.document_for("<b>I wish I was bold.</b>") + + def test_wrap_extracts_tag_from_elsewhere(self): + soup = self.soup("<b></b>I wish I was bold.") + soup.b.next_sibling.wrap(soup.b) + assert soup.decode() == self.document_for("<b>I wish I was bold.</b>") + + def test_wrap_puts_new_contents_at_the_end(self): + soup = self.soup("<b>I like being bold.</b>I wish I was bold.") + soup.b.next_sibling.wrap(soup.b) + assert 2 == len(soup.b.contents) + assert soup.decode() == self.document_for( + "<b>I like being bold.I wish I was bold.</b>" + ) + + def test_extract(self): + soup = self.soup( + '<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>' + ) + + assert len(soup.body.contents) == 3 + extracted = soup.find(id="nav").extract() + + assert soup.decode() == "<html><body>Some content. More content.</body></html>" + assert extracted.decode() == '<div id="nav">Nav crap</div>' + + # The extracted tag is now an orphan. + assert len(soup.body.contents) == 2 + assert extracted.parent is None + assert extracted.previous_element is None + assert extracted.next_element.next_element is None + + # The gap where the extracted tag used to be has been mended. + content_1 = soup.find(string="Some content. ") + content_2 = soup.find(string=" More content.") + assert content_1.next_element == content_2 + assert content_1.next_sibling == content_2 + assert content_2.previous_element == content_1 + assert content_2.previous_sibling == content_1 + + def test_extract_distinguishes_between_identical_strings(self): + soup = self.soup("<a>foo</a><b>bar</b>") + foo_1 = soup.a.string + foo_2 = soup.new_string("foo") + bar_2 = soup.new_string("bar") + soup.a.append(foo_2) + soup.b.append(bar_2) + + # Now there are two identical strings in the <a> tag, and two + # in the <b> tag. Let's remove the first "foo" and the second + # "bar". + foo_1.extract() + bar_2.extract() + assert foo_2 == soup.a.string + assert bar_2 == soup.b.string + + def test_extract_multiples_of_same_tag(self): + soup = self.soup(""" +<html> +<head> +<script>foo</script> +</head> +<body> + <script>bar</script> + <a></a> +</body> +<script>baz</script> +</html>""") + [soup.script.extract() for i in soup.find_all("script")] + assert "<body>\n\n<a></a>\n</body>" == str(soup.body) + + def test_extract_works_when_element_is_surrounded_by_identical_strings(self): + soup = self.soup("<html>\n" "<body>hi</body>\n" "</html>") + soup.find("body").extract() + assert None is soup.find("body") + + def test_clear(self): + """Tag.clear()""" + soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>") + # clear using extract() + a = soup.a + soup.p.clear() + assert len(soup.p.contents) == 0 + assert hasattr(a, "contents") + + # clear using decompose() + em = a.em + a.clear(decompose=True) + assert 0 == len(em.contents) + + @pytest.mark.parametrize( + "method_name,expected_result", + [ + ( + "descendants", + '<div><em>child1</em><p id="start"></p><p>child3</p></div>', + ), + ( + "next_siblings", + '<div><em>child1</em><p id="start"><a>Second <em>child</em></a></p></div>', + ), + # Confused about why child3 is still here in this test? It's because removing the <p id="start"> tag from the tree removes all of its children from the tree as well. 'child'.next_element becomes None, because 'child' is no longer in the tree, and iteration stops there. Don't do this kind of thing, is what I'm saying. + ( + "next_elements", + '<div><em>child1</em><p id="start"></p><p>child3</p></div>', + ), + ("children", '<div><em>child1</em><p id="start"></p><p>child3</p></div>'), + ("previous_elements", ""), + ( + "previous_siblings", + '<div><p id="start"><a>Second <em>child</em></a></p><p>child3</p></div>', + ), + ("parents", ""), + ], + ) + def test_extract_during_iteration(self, method_name, expected_result): + # The iterators should be able to proceed even if the most + # current yield got removed from the tree. This kind of code + # is a bad idea, but we should be able to run it without an exception. + soup = self.soup( + "<div><em>child1</em><p id='start'><a>Second <em>child</em></a></p><p>child3</p></div>" + ) + iterator = getattr(soup.p, method_name) + for i in iterator: + i.extract() + assert expected_result == soup.decode() + + def test_decompose(self): + # Test PageElement.decompose() and PageElement.decomposed + soup = self.soup("<p><a>String <em>Italicized</em></a></p><p>Another para</p>") + p1, p2 = soup.find_all("p") + a = p1.a + text = p1.em.string + for i in [p1, p2, a, text]: + assert False is i.decomposed + + # This sets p1 and everything beneath it to decomposed. + p1.decompose() + for i in [p1, a, text]: + assert True is i.decomposed + # p2 is unaffected. + assert False is p2.decomposed + + def test_decompose_string(self): + soup = self.soup("<div><p>String 1</p><p>String 2</p></p>") + div = soup.div + text = div.p.string + assert False is text.decomposed + text.decompose() + assert True is text.decomposed + assert "<div><p></p><p>String 2</p></div>" == div.decode() + + def test_string_set(self): + """Tag.string = 'string'""" + soup = self.soup("<a></a> <b><c></c></b>") + soup.a.string = "foo" + assert soup.a.contents == ["foo"] + soup.b.string = "bar" + assert soup.b.contents == ["bar"] + + def test_string_set_does_not_affect_original_string(self): + soup = self.soup("<a><b>foo</b><c>bar</c>") + soup.b.string = soup.c.string + assert soup.a.encode() == b"<a><b>bar</b><c>bar</c></a>" + + def test_set_string_preserves_class_of_string(self): + soup = self.soup("<a></a>") + cdata = CData("foo") + soup.a.string = cdata + assert isinstance(soup.a.string, CData) + + +all_find_type_methods = [ + "find", + "find_all", + "find_parent", + "find_parents", + "find_next", + "find_all_next", + "find_previous", + "find_all_previous", + "find_next_sibling", + "find_next_siblings", + "find_previous_sibling", + "find_previous_siblings", +] + + +class TestDeprecatedArguments(SoupTest): + @pytest.mark.parametrize("method_name", all_find_type_methods) + def test_find_type_method_string(self, method_name): + soup = self.soup("<a>some</a><b>markup</b>") + method = getattr(soup.b, method_name) + with warnings.catch_warnings(record=True) as w: + method(text="markup") + [warning] = w + assert warning.filename == __file__ + msg = str(warning.message) + assert ( + msg + == "The 'text' argument to find()-type methods is deprecated. Use 'string' instead." + ) + + +class TestWarnings(SoupTest): + @pytest.mark.parametrize("method_name", all_find_type_methods) + def test_suspicious_syntax_warning(self, method_name): + soup = self.soup("<a>some</a><b>markup</b>") + method = getattr(soup.b, method_name) + with warnings.catch_warnings(record=True) as w: + method(_class="u") + [warning] = w + assert warning.filename == __file__ + assert isinstance(warning.message, AttributeResemblesVariableWarning) + msg = str(warning.message) + assert ( + "'_class' is an unusual attribute name and is a common misspelling for 'class_'" + in msg + ) |