about summary refs log tree commit diff
path: root/uploader/phenotypes/views.py
diff options
context:
space:
mode:
Diffstat (limited to 'uploader/phenotypes/views.py')
-rw-r--r--uploader/phenotypes/views.py169
1 files changed, 153 insertions, 16 deletions
diff --git a/uploader/phenotypes/views.py b/uploader/phenotypes/views.py
index 2cf0ca0..23bc682 100644
--- a/uploader/phenotypes/views.py
+++ b/uploader/phenotypes/views.py
@@ -1,4 +1,6 @@
 """Views handling ('classical') phenotypes."""# pylint: disable=[too-many-lines]
+import io
+import csv
 import sys
 import uuid
 import json
@@ -21,12 +23,14 @@ from gn_libs import jobs as gnlibs_jobs
 from gn_libs.jobs.jobs import JobNotFound
 from gn_libs.mysqldb import database_connection
 
+from werkzeug.datastructures import Headers
 from flask import (flash,
                    request,
                    jsonify,
                    redirect,
                    Blueprint,
-                   current_app as app)
+                   current_app as app,
+                   Response as FlaskResponse)
 
 from r_qtl import r_qtl2_qc as rqc
 from r_qtl import exceptions as rqe
@@ -522,6 +526,65 @@ def job_status(
 
 @phenotypesbp.route(
     "<int:species_id>/populations/<int:population_id>/phenotypes/datasets"
+    "/<int:dataset_id>/job/<uuid:job_id>/download-errors",
+    methods=["GET"])
+@require_login
+@with_dataset(
+    species_redirect_uri="species.populations.phenotypes.index",
+    population_redirect_uri="species.populations.phenotypes.select_population",
+    redirect_uri="species.populations.phenotypes.list_datasets")
+def download_errors(
+        species: dict,
+        population: dict,
+        dataset: dict,
+        job_id: uuid.UUID,
+        **kwargs):# pylint: disable=[unused-argument]
+    """Download the list of errors as a CSV file."""
+    with Redis.from_url(app.config["REDIS_URL"], decode_responses=True) as rconn:
+        try:
+            job = jobs.job(rconn, jobs.jobsnamespace(), str(job_id))
+            _prefix_ = jobs.jobsnamespace()
+            _jobid_ = job['jobid']
+            def __generate_chunks__():
+                _errors_ = (
+                    json.loads(error)
+                    for key in rconn.keys(
+                            f"{_prefix_}:{str(_jobid_)}:*:errors:*")
+                    for error in rconn.lrange(key, 0, -1))
+                _chunk_no_ = 0
+                _all_errors_printed_ = False
+                while not _all_errors_printed_:
+                    _chunk_ = []
+                    try:
+                        for _ in range(0, 1000):
+                            _chunk_.append(next(_errors_))
+                    except StopIteration:
+                        _all_errors_printed_ = True
+                        if len(_chunk_) <= 0:
+                            raise
+
+                    _out_ = io.StringIO()
+                    _writer_ = csv.DictWriter(_out_, fieldnames=tuple(_chunk_[0].keys()))
+                    if _chunk_no_ == 0:
+                        _writer_.writeheader()
+                    _writer_.writerows(_chunk_)
+                    _chunk_no_ += 1
+                    yield _out_.getvalue()
+                    if _all_errors_printed_:
+                        return
+
+            headers = Headers()
+            headers.set("Content-Disposition",
+                        "attachment",
+                        filename=f"{job['job-type']}_{job['jobid']}.csv")
+            return FlaskResponse(
+                __generate_chunks__(), mimetype="text/csv", headers=headers)
+        except jobs.JobNotFound as _jnf:
+            return render_template("jobs/job-not-found.html", job_id=job_id)
+
+
+@phenotypesbp.route(
+    "<int:species_id>/populations/<int:population_id>/phenotypes/datasets"
     "/<int:dataset_id>/job/<uuid:job_id>/review",
     methods=["GET"])
 @require_login
@@ -612,6 +675,12 @@ def load_phenotypes_success_handler(job):
         job_id=job["job_id"]))
 
 
+def proceed_to_job_status(job):
+    """A generic 'job success' handler for asynchronous phenotype jobs."""
+    app.logger.debug("The new job: %s", job)
+    return redirect(url_for("background-jobs.job_status", job_id=job["job_id"]))
+
+
 @phenotypesbp.route(
     "<int:species_id>/populations/<int:population_id>/phenotypes/datasets"
     "/<int:dataset_id>/load-data-to-database",
@@ -654,11 +723,6 @@ def load_data_to_database(
         def __handle_error__(resp):
             return render_template("http-error.html", *resp.json())
 
-        def __handle_success__(load_job):
-            app.logger.debug("The phenotypes loading job: %s", load_job)
-            return redirect(url_for(
-                "background-jobs.job_status", job_id=load_job["job_id"]))
-
 
         return request_token(
             token_uri=urljoin(oauth2client.authserver_uri(), "auth/token"),
@@ -689,7 +753,7 @@ def load_data_to_database(
                 Path(f"{uploads_dir(app)}/job_errors"),
                 worker_manager="gn_libs.jobs.launcher",
                 loglevel=_loglevel)
-        ).either(__handle_error__, __handle_success__)
+        ).either(__handle_error__, proceed_to_job_status)
 
 
 def update_phenotype_metadata(conn, metadata: dict):
@@ -1158,6 +1222,12 @@ def rerun_qtlreaper_success_handler(job):
     return return_to_dataset_view_handler(job, "QTLReaper ran successfully!")
 
 
+def delete_phenotypes_success_handler(job):
+    """Handle success running the 'delete-phenotypes' script."""
+    return return_to_dataset_view_handler(
+        job, "Phenotypes deleted successfully.")
+
+
 @phenotypesbp.route(
     "<int:species_id>/populations/<int:population_id>/phenotypes/datasets"
     "/<int:dataset_id>/delete",
@@ -1167,14 +1237,29 @@ def rerun_qtlreaper_success_handler(job):
     species_redirect_uri="species.populations.phenotypes.index",
     population_redirect_uri="species.populations.phenotypes.select_population",
     redirect_uri="species.populations.phenotypes.list_datasets")
-def delete_phenotypes(# pylint: disable=[unused-argument]
+def delete_phenotypes(# pylint: disable=[unused-argument, too-many-locals]
         species: dict,
         population: dict,
         dataset: dict,
         **kwargs
 ):
     """Delete the specified phenotype data."""
-    with database_connection(app.config["SQL_URI"]) as conn:
+    _dataset_page = redirect(url_for(
+        "species.populations.phenotypes.view_dataset",
+        species_id=species["SpeciesId"],
+        population_id=population["Id"],
+        dataset_id=dataset["Id"]))
+
+    def __handle_error__(resp):
+        flash(
+            "Error retrieving authorisation token. Phenotype deletion "
+            "failed. Please try again later.",
+            "alert alert-danger")
+        return _dataset_page
+
+    _jobs_db = app.config["ASYNCHRONOUS_JOBS_SQLITE_DB"]
+    with (database_connection(app.config["SQL_URI"]) as conn,
+          sqlite3.connection(_jobs_db) as jobsconn):
         form = request.form
         xref_ids = tuple(int(item) for item in set(form.getlist("xref_ids")))
 
@@ -1186,16 +1271,68 @@ def delete_phenotypes(# pylint: disable=[unused-argument]
                     population_id=population["Id"],
                     dataset_id=dataset["Id"]))
             case "delete":
-                # delete everything
-                # python3 -m scripts.phenotypes.delete_phenotypes <mariadburi> <authdburi> <speciesid> <populationid>
-                #
-                # delete selected phenotypes
-                # python3 -m scripts.phenotypes.delete_phenotypes <mariadburi> <authdburi> <speciesid> <populationid> --xref-ids-file=/path/to/file.txt
-                return "Would actually delete the data!"
+                _loglevel = logging.getLevelName(
+                    app.logger.getEffectiveLevel()).lower()
+                if form.get("confirm_delete_all_phenotypes", "") == "on":
+                    _cmd = ["--delete-all"]
+                else:
+                    # setup phenotypes xref_ids file
+                    _xref_ids_file = Path(
+                        app.config["SCRATCH_DIRECTORY"],
+                        f"delete-phenotypes-{uuid.uuid4()}.txt")
+                    with _xref_ids_file.open(mode="w", encoding="utf8") as ptr:
+                        ptr.write("\n".join(str(_id) for _id in xref_ids))
+
+                    _cmd = ["--xref_ids_file", str(_xref_ids_file)]
+
+                _job_id = uuid.uuid4()
+                return request_token(
+                    token_uri=urljoin(
+                        oauth2client.authserver_uri(), "auth/token"),
+                    user_id=session.user_details()["user_id"]
+                ).then(
+                    lambda token: gnlibs_jobs.initialise_job(
+                        jobsconn,
+                        _job_id,
+                        [
+                            sys.executable,
+                            "-u",
+                            "-m",
+                            "scripts.phenotypes.delete_phenotypes",
+                            "--log-level", _loglevel,
+                            app.config["SQL_URI"],
+                            str(species["SpeciesId"]),
+                            str(population["Id"]),
+                            str(dataset["Id"]),
+                            app.config["AUTH_SERVER_URL"],
+                            token["access_token"]] + _cmd,
+                        "delete-phenotypes",
+                        extra_meta={
+                            "species_id": species["SpeciesId"],
+                            "population_id": population["Id"],
+                            "dataset_id": dataset["Id"],
+                            "success_handler": (
+                                "uploader.phenotypes.views."
+                                "delete_phenotypes_success_handler")
+                        },
+                        external_id=session.logged_in_user_id())
+                ).then(
+                    lambda _job: gnlibs_jobs.launch_job(
+                        _job,
+                        _jobs_db,
+                        Path(f"{uploads_dir(app)}/job_errors"),
+                        worker_manager="gn_libs.jobs.launcher",
+                        loglevel=_loglevel)
+                ).either(__handle_error__, proceed_to_job_status)
             case _:
+                _phenos: tuple[dict, ...] = tuple()
+                if len(xref_ids) > 0:
+                    _phenos = dataset_phenotypes(
+                        conn, population["Id"], dataset["Id"], xref_ids=xref_ids)
+
                 return render_template(
                     "phenotypes/confirm-delete-phenotypes.html",
                     species=species,
                     population=population,
                     dataset=dataset,
-                    phenotypes=xref_ids)
+                    phenotypes=_phenos)