about summary refs log tree commit diff
path: root/uploader/phenotypes
diff options
context:
space:
mode:
Diffstat (limited to 'uploader/phenotypes')
-rw-r--r--uploader/phenotypes/views.py41
1 files changed, 37 insertions, 4 deletions
diff --git a/uploader/phenotypes/views.py b/uploader/phenotypes/views.py
index 70f375e..b176890 100644
--- a/uploader/phenotypes/views.py
+++ b/uploader/phenotypes/views.py
@@ -8,9 +8,9 @@ import tempfile
 from typing import Any
 from pathlib import Path
 from zipfile import ZipFile
-from urllib.parse import urljoin
 from functools import wraps, reduce
 from logging import INFO, ERROR, DEBUG, FATAL, CRITICAL, WARNING
+from urllib.parse import urljoin, urlparse, ParseResult, urlunparse, urlencode
 
 import datetime
 from datetime import timedelta
@@ -1153,15 +1153,48 @@ def load_data_success(
         job_id: uuid.UUID,
         **kwargs
 ):# pylint: disable=[unused-argument]
-    with sqlite3.connection(app.config["ASYNCHRONOUS_JOBS_SQLITE_DB"]) as conn:
+    with (database_connection(app.config["SQL_URI"]) as conn,
+          sqlite3.connection(app.config["ASYNCHRONOUS_JOBS_SQLITE_DB"])
+          as jobsconn):
         try:
-            job = gnlibs_jobs.job(conn, job_id, fulldetails=True)
+            gn2_uri = urlparse(app.config["GN2_SERVER_URL"])
+            job = gnlibs_jobs.job(jobsconn, job_id, fulldetails=True)
             app.logger.debug("THE JOB: %s", job)
+            _xref_ids = (str(item) for item
+                         in json.loads(job["metadata"].get("xref_ids", "[]")))
+            _publication = fetch_publication_by_id(
+                conn, int(job["metadata"].get("publication_id", "0")))
+            _search_terms = (item for item in
+                             (str(_publication["PubMed_ID"] or ""),
+                              _publication["Authors"],
+                              (_publication["Title"] or ""))
+                             if item != "")
             return render_template("phenotypes/load-phenotypes-success.html",
                                    species=species,
                                    population=population,
                                    dataset=dataset,
                                    job=job,
-                                   gn2_server_url=app.config["GN2_SERVER_URL"])
+                                   search_page_uri=urlunparse(ParseResult(
+                                       scheme=gn2_uri.scheme,
+                                       netloc=gn2_uri.netloc,
+                                       path="/search",
+                                       params="",
+                                       query=urlencode({
+                                           "species": species["Name"],
+                                           "group": population["Name"],
+                                           "type": "Phenotypes",
+                                           "dataset": dataset["Name"],
+                                           "search_terms_or": (
+                                               # Very long URLs will cause
+                                               # errors.
+                                               " ".join(_xref_ids)
+                                               if len(_xref_ids) <= 100
+                                               else ""),
+                                           "search_terms_and": " ".join(
+                                               _search_terms).strip(),
+                                           "accession_id": "None",
+                                           "FormID": "searchResult"
+                                       }),
+                                       fragment="")))
         except JobNotFound as jnf:
             return render_template("jobs/job-not-found.html", job_id=job_id)