about summary refs log tree commit diff
path: root/scripts
diff options
context:
space:
mode:
authorFrederick Muriuki Muriithi2025-02-28 12:44:55 -0600
committerFrederick Muriuki Muriithi2025-02-28 12:44:55 -0600
commitef1436bca390ee916b7119036ab2d3de8d1344fb (patch)
tree8ed0197669d51d2f825080bfbda10e35fa2403aa /scripts
parent4edc4450e6fe578212600ad9733f0fa6eb3ffc9f (diff)
downloadgenenetwork3-ef1436bca390ee916b7119036ab2d3de8d1344fb.tar.gz
Fix linting errors.
Diffstat (limited to 'scripts')
-rw-r--r--scripts/pub_med.py6
-rwxr-xr-xscripts/update_rif_table.py2
2 files changed, 4 insertions, 4 deletions
diff --git a/scripts/pub_med.py b/scripts/pub_med.py
index 82b1730..0a94355 100644
--- a/scripts/pub_med.py
+++ b/scripts/pub_med.py
@@ -155,8 +155,8 @@ def fetch_id_lossy_search(query, db_name, max_results):
 
     try:
         response = requests.get(f"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db={db_name}&retmode=json&retmax={max_results}&term={query}",
-                                headers={"content-type": "application/json"}
-                                )
+                                headers={"content-type": "application/json"},
+                                timeout=300)
         return response["esearchresult"]["idlist"]
 
     except requests.exceptions.RequestException as error:
@@ -174,7 +174,7 @@ def search_pubmed_lossy(pubmed_id, db_name):
     - dict: Records fetched based on PubMed ID.
     """
     url = f'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db={db_name}&id={",".join(pubmed_id)}&retmode=json'
-    response = requests.get(url)
+    response = requests.get(url, timeout=300)
     response.raise_for_status()
     data = response.json()
     if db_name.lower() == "pmc":
diff --git a/scripts/update_rif_table.py b/scripts/update_rif_table.py
index 24edf3d..f936f5b 100755
--- a/scripts/update_rif_table.py
+++ b/scripts/update_rif_table.py
@@ -35,7 +35,7 @@ VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
 
 def download_file(url: str, dest: pathlib.Path):
     """Saves the contents of url in dest"""
-    with requests.get(url, stream=True) as resp:
+    with requests.get(url, stream=True, timeout=300) as resp:
         resp.raise_for_status()
         with open(dest, "wb") as downloaded_file:
             for chunk in resp.iter_content(chunk_size=8192):