aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrederick Muriuki Muriithi2025-06-10 15:56:48 -0500
committerFrederick Muriuki Muriithi2025-06-10 17:19:43 -0500
commitd2b2d2396fbaba55d6027f3a8574c166f4dede27 (patch)
treeb5330ff8203869a09a7aef8dfe0befae5d875dea
parent3ea81d0fbc5b93295f291315c1d05fe7a1911948 (diff)
downloadgn-uploader-d2b2d2396fbaba55d6027f3a8574c166f4dede27.tar.gz
Provide handler for `load-new-phenotypes-data` jobs.
-rw-r--r--uploader/phenotypes/views.py41
1 files changed, 40 insertions, 1 deletions
diff --git a/uploader/phenotypes/views.py b/uploader/phenotypes/views.py
index 333a1e6..65d9371 100644
--- a/uploader/phenotypes/views.py
+++ b/uploader/phenotypes/views.py
@@ -23,6 +23,7 @@ from werkzeug.utils import secure_filename
from gn_libs import sqlite3
from gn_libs import jobs as gnlibs_jobs
+from gn_libs.jobs.jobs import JobNotFound
from gn_libs.mysqldb import database_connection
from gn_libs import monadic_requests as mrequests
@@ -613,6 +614,16 @@ def review_job_data(
activelink="add-phenotypes")
+def load_phenotypes_success_handler(job):
+ """Handle loading new phenotypes into the database successfully."""
+ return redirect(url_for(
+ "species.populations.phenotypes.load_data_success",
+ species_id=job["metadata"]["species_id"],
+ population_id=job["metadata"]["population_id"],
+ dataset_id=job["metadata"]["dataset_id"],
+ job_id=job["job_id"]))
+
+
@phenotypesbp.route(
"<int:species_id>/populations/<int:population_id>/phenotypes/datasets"
"/<int:dataset_id>/load-data-to-database",
@@ -699,7 +710,10 @@ def load_data_to_database(
"dataset_id": dataset["Id"],
"bundle_file": _meta["bundle"],
"authserver": oauth2client.authserver_uri(),
- "token": token["access_token"]
+ "token": token["access_token"],
+ "success_handler": (
+ "uploader.phenotypes.views"
+ ".load_phenotypes_success_handler")
})
).then(
lambda job: gnlibs_jobs.launch_job(
@@ -1116,3 +1130,28 @@ def edit_upload_phenotype_data(# pylint: disable=[unused-argument]
return redirect(url_for("background-jobs.job_status",
job_id=job_id,
job_type="phenotype-bulk-edit"))
+
+
+@phenotypesbp.route(
+ "<int:species_id>/populations/<int:population_id>/phenotypes/datasets"
+ "/<int:dataset_id>/load-data-success/<uuid:job_id>",
+ methods=["GET"])
+@require_login
+@with_dataset(
+ species_redirect_uri="species.populations.phenotypes.index",
+ population_redirect_uri="species.populations.phenotypes.select_population",
+ redirect_uri="species.populations.phenotypes.list_datasets")
+def load_data_success(
+ species: dict,
+ population: dict,
+ dataset: dict,
+ job_id: uuid.UUID,
+ **kwargs
+):# pylint: disable=[unused-argument]
+ with sqlite3.connection(app.config["ASYNCHRONOUS_JOBS_SQLITE_DB"]) as conn:
+ try:
+ job = gnlibs_jobs.job(conn, job_id, fulldetails=True)
+ app.logger.debug("THE JOB: %s", job)
+ return jsonify(job)
+ except JobNotFound as jnf:
+ return render_template("jobs/job-not-found.html", job_id=job_id)