aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten')
-rw-r--r--.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__init__.py16
-rw-r--r--.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/connection.py255
-rw-r--r--.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/emscripten_fetch_worker.js110
-rw-r--r--.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/fetch.py708
-rw-r--r--.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/request.py22
-rw-r--r--.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/response.py285
6 files changed, 1396 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__init__.py b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__init__.py
new file mode 100644
index 00000000..8a3c5beb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/__init__.py
@@ -0,0 +1,16 @@
+from __future__ import annotations
+
+import urllib3.connection
+
+from ...connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from .connection import EmscriptenHTTPConnection, EmscriptenHTTPSConnection
+
+
+def inject_into_urllib3() -> None:
+ # override connection classes to use emscripten specific classes
+ # n.b. mypy complains about the overriding of classes below
+ # if it isn't ignored
+ HTTPConnectionPool.ConnectionCls = EmscriptenHTTPConnection
+ HTTPSConnectionPool.ConnectionCls = EmscriptenHTTPSConnection
+ urllib3.connection.HTTPConnection = EmscriptenHTTPConnection # type: ignore[misc,assignment]
+ urllib3.connection.HTTPSConnection = EmscriptenHTTPSConnection # type: ignore[misc,assignment]
diff --git a/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/connection.py b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/connection.py
new file mode 100644
index 00000000..41bfd279
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/connection.py
@@ -0,0 +1,255 @@
+from __future__ import annotations
+
+import os
+import typing
+
+# use http.client.HTTPException for consistency with non-emscripten
+from http.client import HTTPException as HTTPException # noqa: F401
+from http.client import ResponseNotReady
+
+from ..._base_connection import _TYPE_BODY
+from ...connection import HTTPConnection, ProxyConfig, port_by_scheme
+from ...exceptions import TimeoutError
+from ...response import BaseHTTPResponse
+from ...util.connection import _TYPE_SOCKET_OPTIONS
+from ...util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT
+from ...util.url import Url
+from .fetch import _RequestError, _TimeoutError, send_request, send_streaming_request
+from .request import EmscriptenRequest
+from .response import EmscriptenHttpResponseWrapper, EmscriptenResponse
+
+if typing.TYPE_CHECKING:
+ from ..._base_connection import BaseHTTPConnection, BaseHTTPSConnection
+
+
+class EmscriptenHTTPConnection:
+ default_port: typing.ClassVar[int] = port_by_scheme["http"]
+ default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS]
+
+ timeout: None | (float)
+
+ host: str
+ port: int
+ blocksize: int
+ source_address: tuple[str, int] | None
+ socket_options: _TYPE_SOCKET_OPTIONS | None
+
+ proxy: Url | None
+ proxy_config: ProxyConfig | None
+
+ is_verified: bool = False
+ proxy_is_verified: bool | None = None
+
+ _response: EmscriptenResponse | None
+
+ def __init__(
+ self,
+ host: str,
+ port: int = 0,
+ *,
+ timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
+ source_address: tuple[str, int] | None = None,
+ blocksize: int = 8192,
+ socket_options: _TYPE_SOCKET_OPTIONS | None = None,
+ proxy: Url | None = None,
+ proxy_config: ProxyConfig | None = None,
+ ) -> None:
+ self.host = host
+ self.port = port
+ self.timeout = timeout if isinstance(timeout, float) else 0.0
+ self.scheme = "http"
+ self._closed = True
+ self._response = None
+ # ignore these things because we don't
+ # have control over that stuff
+ self.proxy = None
+ self.proxy_config = None
+ self.blocksize = blocksize
+ self.source_address = None
+ self.socket_options = None
+ self.is_verified = False
+
+ def set_tunnel(
+ self,
+ host: str,
+ port: int | None = 0,
+ headers: typing.Mapping[str, str] | None = None,
+ scheme: str = "http",
+ ) -> None:
+ pass
+
+ def connect(self) -> None:
+ pass
+
+ def request(
+ self,
+ method: str,
+ url: str,
+ body: _TYPE_BODY | None = None,
+ headers: typing.Mapping[str, str] | None = None,
+ # We know *at least* botocore is depending on the order of the
+ # first 3 parameters so to be safe we only mark the later ones
+ # as keyword-only to ensure we have space to extend.
+ *,
+ chunked: bool = False,
+ preload_content: bool = True,
+ decode_content: bool = True,
+ enforce_content_length: bool = True,
+ ) -> None:
+ self._closed = False
+ if url.startswith("/"):
+ # no scheme / host / port included, make a full url
+ url = f"{self.scheme}://{self.host}:{self.port}" + url
+ request = EmscriptenRequest(
+ url=url,
+ method=method,
+ timeout=self.timeout if self.timeout else 0,
+ decode_content=decode_content,
+ )
+ request.set_body(body)
+ if headers:
+ for k, v in headers.items():
+ request.set_header(k, v)
+ self._response = None
+ try:
+ if not preload_content:
+ self._response = send_streaming_request(request)
+ if self._response is None:
+ self._response = send_request(request)
+ except _TimeoutError as e:
+ raise TimeoutError(e.message) from e
+ except _RequestError as e:
+ raise HTTPException(e.message) from e
+
+ def getresponse(self) -> BaseHTTPResponse:
+ if self._response is not None:
+ return EmscriptenHttpResponseWrapper(
+ internal_response=self._response,
+ url=self._response.request.url,
+ connection=self,
+ )
+ else:
+ raise ResponseNotReady()
+
+ def close(self) -> None:
+ self._closed = True
+ self._response = None
+
+ @property
+ def is_closed(self) -> bool:
+ """Whether the connection either is brand new or has been previously closed.
+ If this property is True then both ``is_connected`` and ``has_connected_to_proxy``
+ properties must be False.
+ """
+ return self._closed
+
+ @property
+ def is_connected(self) -> bool:
+ """Whether the connection is actively connected to any origin (proxy or target)"""
+ return True
+
+ @property
+ def has_connected_to_proxy(self) -> bool:
+ """Whether the connection has successfully connected to its proxy.
+ This returns False if no proxy is in use. Used to determine whether
+ errors are coming from the proxy layer or from tunnelling to the target origin.
+ """
+ return False
+
+
+class EmscriptenHTTPSConnection(EmscriptenHTTPConnection):
+ default_port = port_by_scheme["https"]
+ # all this is basically ignored, as browser handles https
+ cert_reqs: int | str | None = None
+ ca_certs: str | None = None
+ ca_cert_dir: str | None = None
+ ca_cert_data: None | str | bytes = None
+ cert_file: str | None
+ key_file: str | None
+ key_password: str | None
+ ssl_context: typing.Any | None
+ ssl_version: int | str | None = None
+ ssl_minimum_version: int | None = None
+ ssl_maximum_version: int | None = None
+ assert_hostname: None | str | typing.Literal[False]
+ assert_fingerprint: str | None = None
+
+ def __init__(
+ self,
+ host: str,
+ port: int = 0,
+ *,
+ timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
+ source_address: tuple[str, int] | None = None,
+ blocksize: int = 16384,
+ socket_options: (
+ None | _TYPE_SOCKET_OPTIONS
+ ) = HTTPConnection.default_socket_options,
+ proxy: Url | None = None,
+ proxy_config: ProxyConfig | None = None,
+ cert_reqs: int | str | None = None,
+ assert_hostname: None | str | typing.Literal[False] = None,
+ assert_fingerprint: str | None = None,
+ server_hostname: str | None = None,
+ ssl_context: typing.Any | None = None,
+ ca_certs: str | None = None,
+ ca_cert_dir: str | None = None,
+ ca_cert_data: None | str | bytes = None,
+ ssl_minimum_version: int | None = None,
+ ssl_maximum_version: int | None = None,
+ ssl_version: int | str | None = None, # Deprecated
+ cert_file: str | None = None,
+ key_file: str | None = None,
+ key_password: str | None = None,
+ ) -> None:
+ super().__init__(
+ host,
+ port=port,
+ timeout=timeout,
+ source_address=source_address,
+ blocksize=blocksize,
+ socket_options=socket_options,
+ proxy=proxy,
+ proxy_config=proxy_config,
+ )
+ self.scheme = "https"
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.key_password = key_password
+ self.ssl_context = ssl_context
+ self.server_hostname = server_hostname
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ self.ssl_version = ssl_version
+ self.ssl_minimum_version = ssl_minimum_version
+ self.ssl_maximum_version = ssl_maximum_version
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+ self.ca_cert_data = ca_cert_data
+
+ self.cert_reqs = None
+
+ # The browser will automatically verify all requests.
+ # We have no control over that setting.
+ self.is_verified = True
+
+ def set_cert(
+ self,
+ key_file: str | None = None,
+ cert_file: str | None = None,
+ cert_reqs: int | str | None = None,
+ key_password: str | None = None,
+ ca_certs: str | None = None,
+ assert_hostname: None | str | typing.Literal[False] = None,
+ assert_fingerprint: str | None = None,
+ ca_cert_dir: str | None = None,
+ ca_cert_data: None | str | bytes = None,
+ ) -> None:
+ pass
+
+
+# verify that this class implements BaseHTTP(s) connection correctly
+if typing.TYPE_CHECKING:
+ _supports_http_protocol: BaseHTTPConnection = EmscriptenHTTPConnection("", 0)
+ _supports_https_protocol: BaseHTTPSConnection = EmscriptenHTTPSConnection("", 0)
diff --git a/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/emscripten_fetch_worker.js b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/emscripten_fetch_worker.js
new file mode 100644
index 00000000..243b8622
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/emscripten_fetch_worker.js
@@ -0,0 +1,110 @@
+let Status = {
+ SUCCESS_HEADER: -1,
+ SUCCESS_EOF: -2,
+ ERROR_TIMEOUT: -3,
+ ERROR_EXCEPTION: -4,
+};
+
+let connections = {};
+let nextConnectionID = 1;
+const encoder = new TextEncoder();
+
+self.addEventListener("message", async function (event) {
+ if (event.data.close) {
+ let connectionID = event.data.close;
+ delete connections[connectionID];
+ return;
+ } else if (event.data.getMore) {
+ let connectionID = event.data.getMore;
+ let { curOffset, value, reader, intBuffer, byteBuffer } =
+ connections[connectionID];
+ // if we still have some in buffer, then just send it back straight away
+ if (!value || curOffset >= value.length) {
+ // read another buffer if required
+ try {
+ let readResponse = await reader.read();
+
+ if (readResponse.done) {
+ // read everything - clear connection and return
+ delete connections[connectionID];
+ Atomics.store(intBuffer, 0, Status.SUCCESS_EOF);
+ Atomics.notify(intBuffer, 0);
+ // finished reading successfully
+ // return from event handler
+ return;
+ }
+ curOffset = 0;
+ connections[connectionID].value = readResponse.value;
+ value = readResponse.value;
+ } catch (error) {
+ console.log("Request exception:", error);
+ let errorBytes = encoder.encode(error.message);
+ let written = errorBytes.length;
+ byteBuffer.set(errorBytes);
+ intBuffer[1] = written;
+ Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION);
+ Atomics.notify(intBuffer, 0);
+ }
+ }
+
+ // send as much buffer as we can
+ let curLen = value.length - curOffset;
+ if (curLen > byteBuffer.length) {
+ curLen = byteBuffer.length;
+ }
+ byteBuffer.set(value.subarray(curOffset, curOffset + curLen), 0);
+
+ Atomics.store(intBuffer, 0, curLen); // store current length in bytes
+ Atomics.notify(intBuffer, 0);
+ curOffset += curLen;
+ connections[connectionID].curOffset = curOffset;
+
+ return;
+ } else {
+ // start fetch
+ let connectionID = nextConnectionID;
+ nextConnectionID += 1;
+ const intBuffer = new Int32Array(event.data.buffer);
+ const byteBuffer = new Uint8Array(event.data.buffer, 8);
+ try {
+ const response = await fetch(event.data.url, event.data.fetchParams);
+ // return the headers first via textencoder
+ var headers = [];
+ for (const pair of response.headers.entries()) {
+ headers.push([pair[0], pair[1]]);
+ }
+ let headerObj = {
+ headers: headers,
+ status: response.status,
+ connectionID,
+ };
+ const headerText = JSON.stringify(headerObj);
+ let headerBytes = encoder.encode(headerText);
+ let written = headerBytes.length;
+ byteBuffer.set(headerBytes);
+ intBuffer[1] = written;
+ // make a connection
+ connections[connectionID] = {
+ reader: response.body.getReader(),
+ intBuffer: intBuffer,
+ byteBuffer: byteBuffer,
+ value: undefined,
+ curOffset: 0,
+ };
+ // set header ready
+ Atomics.store(intBuffer, 0, Status.SUCCESS_HEADER);
+ Atomics.notify(intBuffer, 0);
+ // all fetching after this goes through a new postmessage call with getMore
+ // this allows for parallel requests
+ } catch (error) {
+ console.log("Request exception:", error);
+ let errorBytes = encoder.encode(error.message);
+ let written = errorBytes.length;
+ byteBuffer.set(errorBytes);
+ intBuffer[1] = written;
+ Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION);
+ Atomics.notify(intBuffer, 0);
+ }
+ }
+});
+self.postMessage({ inited: true });
diff --git a/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/fetch.py b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/fetch.py
new file mode 100644
index 00000000..a514306e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/fetch.py
@@ -0,0 +1,708 @@
+"""
+Support for streaming http requests in emscripten.
+
+A few caveats -
+
+If your browser (or Node.js) has WebAssembly JavaScript Promise Integration enabled
+https://github.com/WebAssembly/js-promise-integration/blob/main/proposals/js-promise-integration/Overview.md
+*and* you launch pyodide using `pyodide.runPythonAsync`, this will fetch data using the
+JavaScript asynchronous fetch api (wrapped via `pyodide.ffi.call_sync`). In this case
+timeouts and streaming should just work.
+
+Otherwise, it uses a combination of XMLHttpRequest and a web-worker for streaming.
+
+This approach has several caveats:
+
+Firstly, you can't do streaming http in the main UI thread, because atomics.wait isn't allowed.
+Streaming only works if you're running pyodide in a web worker.
+
+Secondly, this uses an extra web worker and SharedArrayBuffer to do the asynchronous fetch
+operation, so it requires that you have crossOriginIsolation enabled, by serving over https
+(or from localhost) with the two headers below set:
+
+ Cross-Origin-Opener-Policy: same-origin
+ Cross-Origin-Embedder-Policy: require-corp
+
+You can tell if cross origin isolation is successfully enabled by looking at the global crossOriginIsolated variable in
+JavaScript console. If it isn't, streaming requests will fallback to XMLHttpRequest, i.e. getting the whole
+request into a buffer and then returning it. it shows a warning in the JavaScript console in this case.
+
+Finally, the webworker which does the streaming fetch is created on initial import, but will only be started once
+control is returned to javascript. Call `await wait_for_streaming_ready()` to wait for streaming fetch.
+
+NB: in this code, there are a lot of JavaScript objects. They are named js_*
+to make it clear what type of object they are.
+"""
+
+from __future__ import annotations
+
+import io
+import json
+from email.parser import Parser
+from importlib.resources import files
+from typing import TYPE_CHECKING, Any
+
+import js # type: ignore[import-not-found]
+from pyodide.ffi import ( # type: ignore[import-not-found]
+ JsArray,
+ JsException,
+ JsProxy,
+ to_js,
+)
+
+if TYPE_CHECKING:
+ from typing_extensions import Buffer
+
+from .request import EmscriptenRequest
+from .response import EmscriptenResponse
+
+"""
+There are some headers that trigger unintended CORS preflight requests.
+See also https://github.com/koenvo/pyodide-http/issues/22
+"""
+HEADERS_TO_IGNORE = ("user-agent",)
+
+SUCCESS_HEADER = -1
+SUCCESS_EOF = -2
+ERROR_TIMEOUT = -3
+ERROR_EXCEPTION = -4
+
+_STREAMING_WORKER_CODE = (
+ files(__package__)
+ .joinpath("emscripten_fetch_worker.js")
+ .read_text(encoding="utf-8")
+)
+
+
+class _RequestError(Exception):
+ def __init__(
+ self,
+ message: str | None = None,
+ *,
+ request: EmscriptenRequest | None = None,
+ response: EmscriptenResponse | None = None,
+ ):
+ self.request = request
+ self.response = response
+ self.message = message
+ super().__init__(self.message)
+
+
+class _StreamingError(_RequestError):
+ pass
+
+
+class _TimeoutError(_RequestError):
+ pass
+
+
+def _obj_from_dict(dict_val: dict[str, Any]) -> JsProxy:
+ return to_js(dict_val, dict_converter=js.Object.fromEntries)
+
+
+class _ReadStream(io.RawIOBase):
+ def __init__(
+ self,
+ int_buffer: JsArray,
+ byte_buffer: JsArray,
+ timeout: float,
+ worker: JsProxy,
+ connection_id: int,
+ request: EmscriptenRequest,
+ ):
+ self.int_buffer = int_buffer
+ self.byte_buffer = byte_buffer
+ self.read_pos = 0
+ self.read_len = 0
+ self.connection_id = connection_id
+ self.worker = worker
+ self.timeout = int(1000 * timeout) if timeout > 0 else None
+ self.is_live = True
+ self._is_closed = False
+ self.request: EmscriptenRequest | None = request
+
+ def __del__(self) -> None:
+ self.close()
+
+ # this is compatible with _base_connection
+ def is_closed(self) -> bool:
+ return self._is_closed
+
+ # for compatibility with RawIOBase
+ @property
+ def closed(self) -> bool:
+ return self.is_closed()
+
+ def close(self) -> None:
+ if self.is_closed():
+ return
+ self.read_len = 0
+ self.read_pos = 0
+ self.int_buffer = None
+ self.byte_buffer = None
+ self._is_closed = True
+ self.request = None
+ if self.is_live:
+ self.worker.postMessage(_obj_from_dict({"close": self.connection_id}))
+ self.is_live = False
+ super().close()
+
+ def readable(self) -> bool:
+ return True
+
+ def writable(self) -> bool:
+ return False
+
+ def seekable(self) -> bool:
+ return False
+
+ def readinto(self, byte_obj: Buffer) -> int:
+ if not self.int_buffer:
+ raise _StreamingError(
+ "No buffer for stream in _ReadStream.readinto",
+ request=self.request,
+ response=None,
+ )
+ if self.read_len == 0:
+ # wait for the worker to send something
+ js.Atomics.store(self.int_buffer, 0, ERROR_TIMEOUT)
+ self.worker.postMessage(_obj_from_dict({"getMore": self.connection_id}))
+ if (
+ js.Atomics.wait(self.int_buffer, 0, ERROR_TIMEOUT, self.timeout)
+ == "timed-out"
+ ):
+ raise _TimeoutError
+ data_len = self.int_buffer[0]
+ if data_len > 0:
+ self.read_len = data_len
+ self.read_pos = 0
+ elif data_len == ERROR_EXCEPTION:
+ string_len = self.int_buffer[1]
+ # decode the error string
+ js_decoder = js.TextDecoder.new()
+ json_str = js_decoder.decode(self.byte_buffer.slice(0, string_len))
+ raise _StreamingError(
+ f"Exception thrown in fetch: {json_str}",
+ request=self.request,
+ response=None,
+ )
+ else:
+ # EOF, free the buffers and return zero
+ # and free the request
+ self.is_live = False
+ self.close()
+ return 0
+ # copy from int32array to python bytes
+ ret_length = min(self.read_len, len(memoryview(byte_obj)))
+ subarray = self.byte_buffer.subarray(
+ self.read_pos, self.read_pos + ret_length
+ ).to_py()
+ memoryview(byte_obj)[0:ret_length] = subarray
+ self.read_len -= ret_length
+ self.read_pos += ret_length
+ return ret_length
+
+
+class _StreamingFetcher:
+ def __init__(self) -> None:
+ # make web-worker and data buffer on startup
+ self.streaming_ready = False
+
+ js_data_blob = js.Blob.new(
+ to_js([_STREAMING_WORKER_CODE], create_pyproxies=False),
+ _obj_from_dict({"type": "application/javascript"}),
+ )
+
+ def promise_resolver(js_resolve_fn: JsProxy, js_reject_fn: JsProxy) -> None:
+ def onMsg(e: JsProxy) -> None:
+ self.streaming_ready = True
+ js_resolve_fn(e)
+
+ def onErr(e: JsProxy) -> None:
+ js_reject_fn(e) # Defensive: never happens in ci
+
+ self.js_worker.onmessage = onMsg
+ self.js_worker.onerror = onErr
+
+ js_data_url = js.URL.createObjectURL(js_data_blob)
+ self.js_worker = js.globalThis.Worker.new(js_data_url)
+ self.js_worker_ready_promise = js.globalThis.Promise.new(promise_resolver)
+
+ def send(self, request: EmscriptenRequest) -> EmscriptenResponse:
+ headers = {
+ k: v for k, v in request.headers.items() if k not in HEADERS_TO_IGNORE
+ }
+
+ body = request.body
+ fetch_data = {"headers": headers, "body": to_js(body), "method": request.method}
+ # start the request off in the worker
+ timeout = int(1000 * request.timeout) if request.timeout > 0 else None
+ js_shared_buffer = js.SharedArrayBuffer.new(1048576)
+ js_int_buffer = js.Int32Array.new(js_shared_buffer)
+ js_byte_buffer = js.Uint8Array.new(js_shared_buffer, 8)
+
+ js.Atomics.store(js_int_buffer, 0, ERROR_TIMEOUT)
+ js.Atomics.notify(js_int_buffer, 0)
+ js_absolute_url = js.URL.new(request.url, js.location).href
+ self.js_worker.postMessage(
+ _obj_from_dict(
+ {
+ "buffer": js_shared_buffer,
+ "url": js_absolute_url,
+ "fetchParams": fetch_data,
+ }
+ )
+ )
+ # wait for the worker to send something
+ js.Atomics.wait(js_int_buffer, 0, ERROR_TIMEOUT, timeout)
+ if js_int_buffer[0] == ERROR_TIMEOUT:
+ raise _TimeoutError(
+ "Timeout connecting to streaming request",
+ request=request,
+ response=None,
+ )
+ elif js_int_buffer[0] == SUCCESS_HEADER:
+ # got response
+ # header length is in second int of intBuffer
+ string_len = js_int_buffer[1]
+ # decode the rest to a JSON string
+ js_decoder = js.TextDecoder.new()
+ # this does a copy (the slice) because decode can't work on shared array
+ # for some silly reason
+ json_str = js_decoder.decode(js_byte_buffer.slice(0, string_len))
+ # get it as an object
+ response_obj = json.loads(json_str)
+ return EmscriptenResponse(
+ request=request,
+ status_code=response_obj["status"],
+ headers=response_obj["headers"],
+ body=_ReadStream(
+ js_int_buffer,
+ js_byte_buffer,
+ request.timeout,
+ self.js_worker,
+ response_obj["connectionID"],
+ request,
+ ),
+ )
+ elif js_int_buffer[0] == ERROR_EXCEPTION:
+ string_len = js_int_buffer[1]
+ # decode the error string
+ js_decoder = js.TextDecoder.new()
+ json_str = js_decoder.decode(js_byte_buffer.slice(0, string_len))
+ raise _StreamingError(
+ f"Exception thrown in fetch: {json_str}", request=request, response=None
+ )
+ else:
+ raise _StreamingError(
+ f"Unknown status from worker in fetch: {js_int_buffer[0]}",
+ request=request,
+ response=None,
+ )
+
+
+class _JSPIReadStream(io.RawIOBase):
+ """
+ A read stream that uses pyodide.ffi.run_sync to read from a JavaScript fetch
+ response. This requires support for WebAssembly JavaScript Promise Integration
+ in the containing browser, and for pyodide to be launched via runPythonAsync.
+
+ :param js_read_stream:
+ The JavaScript stream reader
+
+ :param timeout:
+ Timeout in seconds
+
+ :param request:
+ The request we're handling
+
+ :param response:
+ The response this stream relates to
+
+ :param js_abort_controller:
+ A JavaScript AbortController object, used for timeouts
+ """
+
+ def __init__(
+ self,
+ js_read_stream: Any,
+ timeout: float,
+ request: EmscriptenRequest,
+ response: EmscriptenResponse,
+ js_abort_controller: Any, # JavaScript AbortController for timeouts
+ ):
+ self.js_read_stream = js_read_stream
+ self.timeout = timeout
+ self._is_closed = False
+ self._is_done = False
+ self.request: EmscriptenRequest | None = request
+ self.response: EmscriptenResponse | None = response
+ self.current_buffer = None
+ self.current_buffer_pos = 0
+ self.js_abort_controller = js_abort_controller
+
+ def __del__(self) -> None:
+ self.close()
+
+ # this is compatible with _base_connection
+ def is_closed(self) -> bool:
+ return self._is_closed
+
+ # for compatibility with RawIOBase
+ @property
+ def closed(self) -> bool:
+ return self.is_closed()
+
+ def close(self) -> None:
+ if self.is_closed():
+ return
+ self.read_len = 0
+ self.read_pos = 0
+ self.js_read_stream.cancel()
+ self.js_read_stream = None
+ self._is_closed = True
+ self._is_done = True
+ self.request = None
+ self.response = None
+ super().close()
+
+ def readable(self) -> bool:
+ return True
+
+ def writable(self) -> bool:
+ return False
+
+ def seekable(self) -> bool:
+ return False
+
+ def _get_next_buffer(self) -> bool:
+ result_js = _run_sync_with_timeout(
+ self.js_read_stream.read(),
+ self.timeout,
+ self.js_abort_controller,
+ request=self.request,
+ response=self.response,
+ )
+ if result_js.done:
+ self._is_done = True
+ return False
+ else:
+ self.current_buffer = result_js.value.to_py()
+ self.current_buffer_pos = 0
+ return True
+
+ def readinto(self, byte_obj: Buffer) -> int:
+ if self.current_buffer is None:
+ if not self._get_next_buffer() or self.current_buffer is None:
+ self.close()
+ return 0
+ ret_length = min(
+ len(byte_obj), len(self.current_buffer) - self.current_buffer_pos
+ )
+ byte_obj[0:ret_length] = self.current_buffer[
+ self.current_buffer_pos : self.current_buffer_pos + ret_length
+ ]
+ self.current_buffer_pos += ret_length
+ if self.current_buffer_pos == len(self.current_buffer):
+ self.current_buffer = None
+ return ret_length
+
+
+# check if we are in a worker or not
+def is_in_browser_main_thread() -> bool:
+ return hasattr(js, "window") and hasattr(js, "self") and js.self == js.window
+
+
+def is_cross_origin_isolated() -> bool:
+ return hasattr(js, "crossOriginIsolated") and js.crossOriginIsolated
+
+
+def is_in_node() -> bool:
+ return (
+ hasattr(js, "process")
+ and hasattr(js.process, "release")
+ and hasattr(js.process.release, "name")
+ and js.process.release.name == "node"
+ )
+
+
+def is_worker_available() -> bool:
+ return hasattr(js, "Worker") and hasattr(js, "Blob")
+
+
+_fetcher: _StreamingFetcher | None = None
+
+if is_worker_available() and (
+ (is_cross_origin_isolated() and not is_in_browser_main_thread())
+ and (not is_in_node())
+):
+ _fetcher = _StreamingFetcher()
+else:
+ _fetcher = None
+
+
+NODE_JSPI_ERROR = (
+ "urllib3 only works in Node.js with pyodide.runPythonAsync"
+ " and requires the flag --experimental-wasm-stack-switching in "
+ " versions of node <24."
+)
+
+
+def send_streaming_request(request: EmscriptenRequest) -> EmscriptenResponse | None:
+ if has_jspi():
+ return send_jspi_request(request, True)
+ elif is_in_node():
+ raise _RequestError(
+ message=NODE_JSPI_ERROR,
+ request=request,
+ response=None,
+ )
+
+ if _fetcher and streaming_ready():
+ return _fetcher.send(request)
+ else:
+ _show_streaming_warning()
+ return None
+
+
+_SHOWN_TIMEOUT_WARNING = False
+
+
+def _show_timeout_warning() -> None:
+ global _SHOWN_TIMEOUT_WARNING
+ if not _SHOWN_TIMEOUT_WARNING:
+ _SHOWN_TIMEOUT_WARNING = True
+ message = "Warning: Timeout is not available on main browser thread"
+ js.console.warn(message)
+
+
+_SHOWN_STREAMING_WARNING = False
+
+
+def _show_streaming_warning() -> None:
+ global _SHOWN_STREAMING_WARNING
+ if not _SHOWN_STREAMING_WARNING:
+ _SHOWN_STREAMING_WARNING = True
+ message = "Can't stream HTTP requests because: \n"
+ if not is_cross_origin_isolated():
+ message += " Page is not cross-origin isolated\n"
+ if is_in_browser_main_thread():
+ message += " Python is running in main browser thread\n"
+ if not is_worker_available():
+ message += " Worker or Blob classes are not available in this environment." # Defensive: this is always False in browsers that we test in
+ if streaming_ready() is False:
+ message += """ Streaming fetch worker isn't ready. If you want to be sure that streaming fetch
+is working, you need to call: 'await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready()`"""
+ from js import console
+
+ console.warn(message)
+
+
+def send_request(request: EmscriptenRequest) -> EmscriptenResponse:
+ if has_jspi():
+ return send_jspi_request(request, False)
+ elif is_in_node():
+ raise _RequestError(
+ message=NODE_JSPI_ERROR,
+ request=request,
+ response=None,
+ )
+ try:
+ js_xhr = js.XMLHttpRequest.new()
+
+ if not is_in_browser_main_thread():
+ js_xhr.responseType = "arraybuffer"
+ if request.timeout:
+ js_xhr.timeout = int(request.timeout * 1000)
+ else:
+ js_xhr.overrideMimeType("text/plain; charset=ISO-8859-15")
+ if request.timeout:
+ # timeout isn't available on the main thread - show a warning in console
+ # if it is set
+ _show_timeout_warning()
+
+ js_xhr.open(request.method, request.url, False)
+ for name, value in request.headers.items():
+ if name.lower() not in HEADERS_TO_IGNORE:
+ js_xhr.setRequestHeader(name, value)
+
+ js_xhr.send(to_js(request.body))
+
+ headers = dict(Parser().parsestr(js_xhr.getAllResponseHeaders()))
+
+ if not is_in_browser_main_thread():
+ body = js_xhr.response.to_py().tobytes()
+ else:
+ body = js_xhr.response.encode("ISO-8859-15")
+ return EmscriptenResponse(
+ status_code=js_xhr.status, headers=headers, body=body, request=request
+ )
+ except JsException as err:
+ if err.name == "TimeoutError":
+ raise _TimeoutError(err.message, request=request)
+ elif err.name == "NetworkError":
+ raise _RequestError(err.message, request=request)
+ else:
+ # general http error
+ raise _RequestError(err.message, request=request)
+
+
+def send_jspi_request(
+ request: EmscriptenRequest, streaming: bool
+) -> EmscriptenResponse:
+ """
+ Send a request using WebAssembly JavaScript Promise Integration
+ to wrap the asynchronous JavaScript fetch api (experimental).
+
+ :param request:
+ Request to send
+
+ :param streaming:
+ Whether to stream the response
+
+ :return: The response object
+ :rtype: EmscriptenResponse
+ """
+ timeout = request.timeout
+ js_abort_controller = js.AbortController.new()
+ headers = {k: v for k, v in request.headers.items() if k not in HEADERS_TO_IGNORE}
+ req_body = request.body
+ fetch_data = {
+ "headers": headers,
+ "body": to_js(req_body),
+ "method": request.method,
+ "signal": js_abort_controller.signal,
+ }
+ # Call JavaScript fetch (async api, returns a promise)
+ fetcher_promise_js = js.fetch(request.url, _obj_from_dict(fetch_data))
+ # Now suspend WebAssembly until we resolve that promise
+ # or time out.
+ response_js = _run_sync_with_timeout(
+ fetcher_promise_js,
+ timeout,
+ js_abort_controller,
+ request=request,
+ response=None,
+ )
+ headers = {}
+ header_iter = response_js.headers.entries()
+ while True:
+ iter_value_js = header_iter.next()
+ if getattr(iter_value_js, "done", False):
+ break
+ else:
+ headers[str(iter_value_js.value[0])] = str(iter_value_js.value[1])
+ status_code = response_js.status
+ body: bytes | io.RawIOBase = b""
+
+ response = EmscriptenResponse(
+ status_code=status_code, headers=headers, body=b"", request=request
+ )
+ if streaming:
+ # get via inputstream
+ if response_js.body is not None:
+ # get a reader from the fetch response
+ body_stream_js = response_js.body.getReader()
+ body = _JSPIReadStream(
+ body_stream_js, timeout, request, response, js_abort_controller
+ )
+ else:
+ # get directly via arraybuffer
+ # n.b. this is another async JavaScript call.
+ body = _run_sync_with_timeout(
+ response_js.arrayBuffer(),
+ timeout,
+ js_abort_controller,
+ request=request,
+ response=response,
+ ).to_py()
+ response.body = body
+ return response
+
+
+def _run_sync_with_timeout(
+ promise: Any,
+ timeout: float,
+ js_abort_controller: Any,
+ request: EmscriptenRequest | None,
+ response: EmscriptenResponse | None,
+) -> Any:
+ """
+ Await a JavaScript promise synchronously with a timeout which is implemented
+ via the AbortController
+
+ :param promise:
+ Javascript promise to await
+
+ :param timeout:
+ Timeout in seconds
+
+ :param js_abort_controller:
+ A JavaScript AbortController object, used on timeout
+
+ :param request:
+ The request being handled
+
+ :param response:
+ The response being handled (if it exists yet)
+
+ :raises _TimeoutError: If the request times out
+ :raises _RequestError: If the request raises a JavaScript exception
+
+ :return: The result of awaiting the promise.
+ """
+ timer_id = None
+ if timeout > 0:
+ timer_id = js.setTimeout(
+ js_abort_controller.abort.bind(js_abort_controller), int(timeout * 1000)
+ )
+ try:
+ from pyodide.ffi import run_sync
+
+ # run_sync here uses WebAssembly JavaScript Promise Integration to
+ # suspend python until the JavaScript promise resolves.
+ return run_sync(promise)
+ except JsException as err:
+ if err.name == "AbortError":
+ raise _TimeoutError(
+ message="Request timed out", request=request, response=response
+ )
+ else:
+ raise _RequestError(message=err.message, request=request, response=response)
+ finally:
+ if timer_id is not None:
+ js.clearTimeout(timer_id)
+
+
+def has_jspi() -> bool:
+ """
+ Return true if jspi can be used.
+
+ This requires both browser support and also WebAssembly
+ to be in the correct state - i.e. that the javascript
+ call into python was async not sync.
+
+ :return: True if jspi can be used.
+ :rtype: bool
+ """
+ try:
+ from pyodide.ffi import can_run_sync, run_sync # noqa: F401
+
+ return bool(can_run_sync())
+ except ImportError:
+ return False
+
+
+def streaming_ready() -> bool | None:
+ if _fetcher:
+ return _fetcher.streaming_ready
+ else:
+ return None # no fetcher, return None to signify that
+
+
+async def wait_for_streaming_ready() -> bool:
+ if _fetcher:
+ await _fetcher.js_worker_ready_promise
+ return True
+ else:
+ return False
diff --git a/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/request.py b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/request.py
new file mode 100644
index 00000000..e692e692
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/request.py
@@ -0,0 +1,22 @@
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+
+from ..._base_connection import _TYPE_BODY
+
+
+@dataclass
+class EmscriptenRequest:
+ method: str
+ url: str
+ params: dict[str, str] | None = None
+ body: _TYPE_BODY | None = None
+ headers: dict[str, str] = field(default_factory=dict)
+ timeout: float = 0
+ decode_content: bool = True
+
+ def set_header(self, name: str, value: str) -> None:
+ self.headers[name.capitalize()] = value
+
+ def set_body(self, body: _TYPE_BODY | None) -> None:
+ self.body = body
diff --git a/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/response.py b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/response.py
new file mode 100644
index 00000000..b32b4023
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/urllib3/contrib/emscripten/response.py
@@ -0,0 +1,285 @@
+from __future__ import annotations
+
+import json as _json
+import logging
+import typing
+from contextlib import contextmanager
+from dataclasses import dataclass
+from http.client import HTTPException as HTTPException
+from io import BytesIO, IOBase
+
+from ...exceptions import InvalidHeader, TimeoutError
+from ...response import BaseHTTPResponse
+from ...util.retry import Retry
+from .request import EmscriptenRequest
+
+if typing.TYPE_CHECKING:
+ from ..._base_connection import BaseHTTPConnection, BaseHTTPSConnection
+
+log = logging.getLogger(__name__)
+
+
+@dataclass
+class EmscriptenResponse:
+ status_code: int
+ headers: dict[str, str]
+ body: IOBase | bytes
+ request: EmscriptenRequest
+
+
+class EmscriptenHttpResponseWrapper(BaseHTTPResponse):
+ def __init__(
+ self,
+ internal_response: EmscriptenResponse,
+ url: str | None = None,
+ connection: BaseHTTPConnection | BaseHTTPSConnection | None = None,
+ ):
+ self._pool = None # set by pool class
+ self._body = None
+ self._response = internal_response
+ self._url = url
+ self._connection = connection
+ self._closed = False
+ super().__init__(
+ headers=internal_response.headers,
+ status=internal_response.status_code,
+ request_url=url,
+ version=0,
+ version_string="HTTP/?",
+ reason="",
+ decode_content=True,
+ )
+ self.length_remaining = self._init_length(self._response.request.method)
+ self.length_is_certain = False
+
+ @property
+ def url(self) -> str | None:
+ return self._url
+
+ @url.setter
+ def url(self, url: str | None) -> None:
+ self._url = url
+
+ @property
+ def connection(self) -> BaseHTTPConnection | BaseHTTPSConnection | None:
+ return self._connection
+
+ @property
+ def retries(self) -> Retry | None:
+ return self._retries
+
+ @retries.setter
+ def retries(self, retries: Retry | None) -> None:
+ # Override the request_url if retries has a redirect location.
+ self._retries = retries
+
+ def stream(
+ self, amt: int | None = 2**16, decode_content: bool | None = None
+ ) -> typing.Generator[bytes]:
+ """
+ A generator wrapper for the read() method. A call will block until
+ ``amt`` bytes have been read from the connection or until the
+ connection is closed.
+
+ :param amt:
+ How much of the content to read. The generator will return up to
+ much data per iteration, but may return less. This is particularly
+ likely when using compressed data. However, the empty string will
+ never be returned.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ while True:
+ data = self.read(amt=amt, decode_content=decode_content)
+
+ if data:
+ yield data
+ else:
+ break
+
+ def _init_length(self, request_method: str | None) -> int | None:
+ length: int | None
+ content_length: str | None = self.headers.get("content-length")
+
+ if content_length is not None:
+ try:
+ # RFC 7230 section 3.3.2 specifies multiple content lengths can
+ # be sent in a single Content-Length header
+ # (e.g. Content-Length: 42, 42). This line ensures the values
+ # are all valid ints and that as long as the `set` length is 1,
+ # all values are the same. Otherwise, the header is invalid.
+ lengths = {int(val) for val in content_length.split(",")}
+ if len(lengths) > 1:
+ raise InvalidHeader(
+ "Content-Length contained multiple "
+ "unmatching values (%s)" % content_length
+ )
+ length = lengths.pop()
+ except ValueError:
+ length = None
+ else:
+ if length < 0:
+ length = None
+
+ else: # if content_length is None
+ length = None
+
+ # Check for responses that shouldn't include a body
+ if (
+ self.status in (204, 304)
+ or 100 <= self.status < 200
+ or request_method == "HEAD"
+ ):
+ length = 0
+
+ return length
+
+ def read(
+ self,
+ amt: int | None = None,
+ decode_content: bool | None = None, # ignored because browser decodes always
+ cache_content: bool = False,
+ ) -> bytes:
+ if (
+ self._closed
+ or self._response is None
+ or (isinstance(self._response.body, IOBase) and self._response.body.closed)
+ ):
+ return b""
+
+ with self._error_catcher():
+ # body has been preloaded as a string by XmlHttpRequest
+ if not isinstance(self._response.body, IOBase):
+ self.length_remaining = len(self._response.body)
+ self.length_is_certain = True
+ # wrap body in IOStream
+ self._response.body = BytesIO(self._response.body)
+ if amt is not None and amt >= 0:
+ # don't cache partial content
+ cache_content = False
+ data = self._response.body.read(amt)
+ if self.length_remaining is not None:
+ self.length_remaining = max(self.length_remaining - len(data), 0)
+ if (self.length_is_certain and self.length_remaining == 0) or len(
+ data
+ ) < amt:
+ # definitely finished reading, close response stream
+ self._response.body.close()
+ return typing.cast(bytes, data)
+ else: # read all we can (and cache it)
+ data = self._response.body.read()
+ if cache_content:
+ self._body = data
+ if self.length_remaining is not None:
+ self.length_remaining = max(self.length_remaining - len(data), 0)
+ if len(data) == 0 or (
+ self.length_is_certain and self.length_remaining == 0
+ ):
+ # definitely finished reading, close response stream
+ self._response.body.close()
+ return typing.cast(bytes, data)
+
+ def read_chunked(
+ self,
+ amt: int | None = None,
+ decode_content: bool | None = None,
+ ) -> typing.Generator[bytes]:
+ # chunked is handled by browser
+ while True:
+ bytes = self.read(amt, decode_content)
+ if not bytes:
+ break
+ yield bytes
+
+ def release_conn(self) -> None:
+ if not self._pool or not self._connection:
+ return None
+
+ self._pool._put_conn(self._connection)
+ self._connection = None
+
+ def drain_conn(self) -> None:
+ self.close()
+
+ @property
+ def data(self) -> bytes:
+ if self._body:
+ return self._body
+ else:
+ return self.read(cache_content=True)
+
+ def json(self) -> typing.Any:
+ """
+ Deserializes the body of the HTTP response as a Python object.
+
+ The body of the HTTP response must be encoded using UTF-8, as per
+ `RFC 8529 Section 8.1 <https://www.rfc-editor.org/rfc/rfc8259#section-8.1>`_.
+
+ To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to
+ your custom decoder instead.
+
+ If the body of the HTTP response is not decodable to UTF-8, a
+ `UnicodeDecodeError` will be raised. If the body of the HTTP response is not a
+ valid JSON document, a `json.JSONDecodeError` will be raised.
+
+ Read more :ref:`here <json_content>`.
+
+ :returns: The body of the HTTP response as a Python object.
+ """
+ data = self.data.decode("utf-8")
+ return _json.loads(data)
+
+ def close(self) -> None:
+ if not self._closed:
+ if isinstance(self._response.body, IOBase):
+ self._response.body.close()
+ if self._connection:
+ self._connection.close()
+ self._connection = None
+ self._closed = True
+
+ @contextmanager
+ def _error_catcher(self) -> typing.Generator[None]:
+ """
+ Catch Emscripten specific exceptions thrown by fetch.py,
+ instead re-raising urllib3 variants, so that low-level exceptions
+ are not leaked in the high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ from .fetch import _RequestError, _TimeoutError # avoid circular import
+
+ clean_exit = False
+
+ try:
+ yield
+ # If no exception is thrown, we should avoid cleaning up
+ # unnecessarily.
+ clean_exit = True
+ except _TimeoutError as e:
+ raise TimeoutError(str(e))
+ except _RequestError as e:
+ raise HTTPException(str(e))
+ finally:
+ # If we didn't terminate cleanly, we need to throw away our
+ # connection.
+ if not clean_exit:
+ # The response may not be closed but we're not going to use it
+ # anymore so close it now
+ if (
+ isinstance(self._response.body, IOBase)
+ and not self._response.body.closed
+ ):
+ self._response.body.close()
+ # release the connection back to the pool
+ self.release_conn()
+ else:
+ # If we have read everything from the response stream,
+ # return the connection back to the pool.
+ if (
+ isinstance(self._response.body, IOBase)
+ and self._response.body.closed
+ ):
+ self.release_conn()