diff options
Diffstat (limited to 'uploader/phenotypes/views.py')
-rw-r--r-- | uploader/phenotypes/views.py | 87 |
1 files changed, 87 insertions, 0 deletions
diff --git a/uploader/phenotypes/views.py b/uploader/phenotypes/views.py index dc2df8f..407493c 100644 --- a/uploader/phenotypes/views.py +++ b/uploader/phenotypes/views.py @@ -1,8 +1,10 @@ """Views handling ('classical') phenotypes.""" import sys +import csv import uuid import json import datetime +import tempfile from typing import Any from pathlib import Path from zipfile import ZipFile @@ -13,6 +15,7 @@ from redis import Redis from pymonad.either import Left from requests.models import Response from MySQLdb.cursors import DictCursor +from werkzeug.utils import secure_filename from gn_libs.mysqldb import database_connection from flask import (flash, request, @@ -20,6 +23,7 @@ from flask import (flash, jsonify, redirect, Blueprint, + send_file, current_app as app) # from r_qtl import r_qtl2 as rqtl2 @@ -36,6 +40,7 @@ from uploader.datautils import safe_int, enumerate_sequence from uploader.species.models import all_species, species_by_id from uploader.monadic_requests import make_either_error_handler from uploader.request_checks import with_species, with_population +from uploader.samples.models import samples_by_species_and_population from uploader.input_validation import (encode_errors, decode_errors, is_valid_representative_name) @@ -46,6 +51,7 @@ from .models import (dataset_by_id, save_new_dataset, dataset_phenotypes, datasets_by_population, + phenotypes_data_by_ids, phenotype_publication_data) phenotypesbp = Blueprint("phenotypes", __name__) @@ -844,3 +850,84 @@ def edit_phenotype_data(# pylint: disable=[unused-argument] population_id=population["Id"], dataset_id=dataset["Id"], xref_id=xref_id)) + + +def process_phenotype_data_for_download(pheno: dict) -> dict: + """Sanitise data for download.""" + return { + "UniqueIdentifier": f"phId:{pheno['Id']}::xrId:{pheno['xref_id']}", + **{ + key: val for key, val in pheno.items() + if key not in ("Id", "xref_id", "data", "Units") + }, + **{ + data_item["StrainName"]: data_item["value"] + for data_item in pheno.get("data", {}).values() + } + } + +@phenotypesbp.route( + "<int:species_id>/populations/<int:population_id>/phenotypes/datasets" + "/<int:dataset_id>/download", + methods=["POST"]) +@require_login +@with_dataset( + species_redirect_uri="species.populations.phenotypes.index", + population_redirect_uri="species.populations.phenotypes.select_population", + redirect_uri="species.populations.phenotypes.list_datasets") +def download_phenotype_data(# pylint: disable=[unused-argument] + species: dict, + population: dict, + dataset: dict, + **kwargs +): + formdata = request.json + with database_connection(app.config["SQL_URI"]) as conn: + samples_list = [ + sample["Name"] for sample in samples_by_species_and_population( + conn, species["SpeciesId"], population["Id"])] + data = ( + process_phenotype_data_for_download(pheno) + for pheno in phenotypes_data_by_ids(conn, tuple({ + "population_id": population["Id"], + "phenoid": row["phenotype_id"], + "xref_id": row["xref_id"] + } for row in formdata))) + + with (tempfile.TemporaryDirectory( + prefix=app.config["TEMPORARY_DIRECTORY"]) as tmpdir): + filename = Path(tmpdir).joinpath("tempfile.tsv") + with open(filename, mode="w") as outfile: + outfile.write( + "# **DO NOT** delete the 'UniqueIdentifier' field. It is used " + "by the system to identify and edit the correct row(s) in the " + "database.\n") + outfile.write( + "# The '…_description' fields are useful for you to figure out " + "what row you are working on. Changing any of this fields will " + "also update the database, so do be careful.\n") + outfile.write( + "# Leave a field empty to delete the value in the database.\n") + outfile.write( + "# Any line beginning with a '#' character is considered a " + "comment line. This line, and all the lines above it, are " + "all comment lines. Comment lines will be ignored.\n") + writer = csv.DictWriter(outfile, + fieldnames=[ + "UniqueIdentifier", + "Post_publication_description", + "Pre_publication_abbreviation", + "Pre_publication_description", + "Original_description", + "Post_publication_abbreviation" + ] + samples_list, + dialect="excel-tab") + writer.writeheader() + writer.writerows(data) + outfile.flush() + + return send_file( + filename, + mimetype="text/csv", + as_attachment=True, + download_name=secure_filename(f"{dataset['Name']}_data")) |