aboutsummaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/setuptools/tests/config/downloads/__init__.py
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/setuptools/tests/config/downloads/__init__.py
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are hereHEADmaster
Diffstat (limited to '.venv/lib/python3.12/site-packages/setuptools/tests/config/downloads/__init__.py')
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/tests/config/downloads/__init__.py59
1 files changed, 59 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/setuptools/tests/config/downloads/__init__.py b/.venv/lib/python3.12/site-packages/setuptools/tests/config/downloads/__init__.py
new file mode 100644
index 00000000..00a16423
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/tests/config/downloads/__init__.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import re
+import time
+from pathlib import Path
+from urllib.error import HTTPError
+from urllib.request import urlopen
+
+__all__ = ["DOWNLOAD_DIR", "retrieve_file", "output_file", "urls_from_file"]
+
+
+NAME_REMOVE = ("http://", "https://", "github.com/", "/raw/")
+DOWNLOAD_DIR = Path(__file__).parent
+
+
+# ----------------------------------------------------------------------
+# Please update ./preload.py accordingly when modifying this file
+# ----------------------------------------------------------------------
+
+
+def output_file(url: str, download_dir: Path = DOWNLOAD_DIR) -> Path:
+ file_name = url.strip()
+ for part in NAME_REMOVE:
+ file_name = file_name.replace(part, '').strip().strip('/:').strip()
+ return Path(download_dir, re.sub(r"[^\-_\.\w\d]+", "_", file_name))
+
+
+def retrieve_file(url: str, download_dir: Path = DOWNLOAD_DIR, wait: float = 5) -> Path:
+ path = output_file(url, download_dir)
+ if path.exists():
+ print(f"Skipping {url} (already exists: {path})")
+ else:
+ download_dir.mkdir(exist_ok=True, parents=True)
+ print(f"Downloading {url} to {path}")
+ try:
+ download(url, path)
+ except HTTPError:
+ time.sleep(wait) # wait a few seconds and try again.
+ download(url, path)
+ return path
+
+
+def urls_from_file(list_file: Path) -> list[str]:
+ """``list_file`` should be a text file where each line corresponds to a URL to
+ download.
+ """
+ print(f"file: {list_file}")
+ content = list_file.read_text(encoding="utf-8")
+ return [url for url in content.splitlines() if not url.startswith("#")]
+
+
+def download(url: str, dest: Path):
+ with urlopen(url) as f:
+ data = f.read()
+
+ with open(dest, "wb") as f:
+ f.write(data)
+
+ assert Path(dest).exists()