diff options
Diffstat (limited to 'uploader/phenotypes/views.py')
-rw-r--r-- | uploader/phenotypes/views.py | 693 |
1 files changed, 596 insertions, 97 deletions
diff --git a/uploader/phenotypes/views.py b/uploader/phenotypes/views.py index f10ba09..a50a8e7 100644 --- a/uploader/phenotypes/views.py +++ b/uploader/phenotypes/views.py @@ -1,21 +1,29 @@ """Views handling ('classical') phenotypes.""" import sys +import csv import uuid import json import datetime +import tempfile +from typing import Any from pathlib import Path -from functools import wraps +from zipfile import ZipFile +from functools import wraps, reduce from logging import INFO, ERROR, DEBUG, FATAL, CRITICAL, WARNING from redis import Redis +from pymonad.either import Left from requests.models import Response from MySQLdb.cursors import DictCursor +from werkzeug.utils import secure_filename from gn_libs.mysqldb import database_connection from flask import (flash, request, url_for, + jsonify, redirect, Blueprint, + send_file, current_app as app) # from r_qtl import r_qtl2 as rqtl2 @@ -27,12 +35,12 @@ from uploader.files import save_file#, fullpath from uploader.ui import make_template_renderer from uploader.oauth2.client import oauth2_post from uploader.authorisation import require_login +from uploader.route_utils import generic_select_population +from uploader.datautils import safe_int, enumerate_sequence from uploader.species.models import all_species, species_by_id from uploader.monadic_requests import make_either_error_handler from uploader.request_checks import with_species, with_population -from uploader.datautils import safe_int, order_by_family, enumerate_sequence -from uploader.population.models import (populations_by_species, - population_by_species_and_id) +from uploader.samples.models import samples_by_species_and_population from uploader.input_validation import (encode_errors, decode_errors, is_valid_representative_name) @@ -42,11 +50,16 @@ from .models import (dataset_by_id, phenotypes_count, save_new_dataset, dataset_phenotypes, - datasets_by_population) + datasets_by_population, + phenotypes_data_by_ids, + phenotype_publication_data) phenotypesbp = Blueprint("phenotypes", __name__) render_template = make_template_renderer("phenotypes") +_FAMILIES_WITH_SE_AND_N_ = ( + "Reference Populations (replicate average, SE, N)",) + @phenotypesbp.route("/phenotypes", methods=["GET"]) @require_login def index(): @@ -54,10 +67,16 @@ def index(): with database_connection(app.config["SQL_URI"]) as conn: if not bool(request.args.get("species_id")): return render_template("phenotypes/index.html", - species=order_by_family(all_species(conn)), + species=all_species(conn), activelink="phenotypes") - species = species_by_id(conn, request.args.get("species_id")) + species_id = request.args.get("species_id") + if species_id == "CREATE-SPECIES": + return redirect(url_for( + "species.create_species", + return_to="species.populations.phenotypes.select_population")) + + species = species_by_id(conn, species_id) if not bool(species): flash("No such species!", "alert-danger") return redirect(url_for("species.populations.phenotypes.index")) @@ -71,27 +90,14 @@ def index(): @with_species(redirect_uri="species.populations.phenotypes.index") def select_population(species: dict, **kwargs):# pylint: disable=[unused-argument] """Select the population for your phenotypes.""" - with database_connection(app.config["SQL_URI"]) as conn: - if not bool(request.args.get("population_id")): - return render_template("phenotypes/select-population.html", - species=species, - populations=order_by_family( - populations_by_species( - conn, species["SpeciesId"]), - order_key="FamilyOrder"), - activelink="phenotypes") - - population = population_by_species_and_id( - conn, species["SpeciesId"], int(request.args["population_id"])) - if not bool(population): - flash("No such population found!", "alert-danger") - return redirect(url_for( - "species.populations.phenotypes.select_population", - species_id=species["SpeciesId"])) - - return redirect(url_for("species.populations.phenotypes.list_datasets", - species_id=species["SpeciesId"], - population_id=population["Id"])) + return generic_select_population( + species, + "phenotypes/select-population.html", + request.args.get("population_id") or "", + "species.populations.phenotypes.select_population", + "species.populations.phenotypes.list_datasets", + "phenotypes", + "No such population found!") @@ -189,12 +195,10 @@ def view_dataset(# pylint: disable=[unused-argument] phenotype_count=phenotypes_count( conn, population["Id"], dataset["Id"]), phenotypes=enumerate_sequence( - dataset_phenotypes(conn, - population["Id"], - dataset["Id"], - offset=start_at, - limit=count), - start=start_at+1), + dataset_phenotypes( + conn, + population["Id"], + dataset["Id"])), start_from=start_at, count=count, activelink="view-dataset") @@ -218,16 +222,31 @@ def view_phenotype(# pylint: disable=[unused-argument] ): """View an individual phenotype from the dataset.""" def __render__(privileges): + phenotype = phenotype_by_id(conn, + species["SpeciesId"], + population["Id"], + dataset["Id"], + xref_id) + def __non_empty__(value) -> bool: + if isinstance(value, str): + return value.strip() != "" + return bool(value) + return render_template( "phenotypes/view-phenotype.html", species=species, population=population, dataset=dataset, - phenotype=phenotype_by_id(conn, - species["SpeciesId"], - population["Id"], - dataset["Id"], - xref_id), + xref_id=xref_id, + phenotype=phenotype, + has_se=any(bool(item.get("error")) for item in phenotype["data"]), + publish_data={ + key.replace("_", " "): val + for key,val in + (phenotype_publication_data(conn, phenotype["Id"]) or {}).items() + if (key in ("PubMed_ID", "Authors", "Title", "Journal") + and __non_empty__(val)) + }, privileges=(privileges ### For demo! Do not commit this part + ("group:resource:edit-resource", @@ -307,8 +326,7 @@ def create_dataset(species: dict, population: dict, **kwargs):# pylint: disable= population_id=population["Id"])) -def process_phenotypes_rqtl2_bundle( - rconn: Redis, species: dict, population: dict, dataset: dict): +def process_phenotypes_rqtl2_bundle(error_uri): """Process phenotypes from the uploaded R/qtl2 bundle.""" _redisuri = app.config["REDIS_URL"] _sqluri = app.config["SQL_URI"] @@ -317,64 +335,59 @@ def process_phenotypes_rqtl2_bundle( phenobundle = save_file(request.files["phenotypes-bundle"], Path(app.config["UPLOAD_FOLDER"])) rqc.validate_bundle(phenobundle) + return phenobundle except AssertionError as _aerr: app.logger.debug("File upload error!", exc_info=True) flash("Expected a zipped bundle of files with phenotypes' " "information.", "alert-danger") - return add_phenos_uri + return error_uri except rqe.RQTLError as rqtlerr: app.logger.debug("Bundle validation error!", exc_info=True) flash("R/qtl2 Error: " + " ".join(rqtlerr.args), "alert-danger") - return add_phenos_uri - - _jobid = uuid.uuid4() - _namespace = jobs.jobsnamespace() - _ttl_seconds = app.config["JOBS_TTL_SECONDS"] - _job = jobs.launch_job( - jobs.initialise_job( - rconn, - _namespace, - str(_jobid), - [sys.executable, "-m", "scripts.rqtl2.phenotypes_qc", _sqluri, - _redisuri, _namespace, str(_jobid), str(species["SpeciesId"]), - str(population["Id"]), - # str(dataset["Id"]), - str(phenobundle), - "--loglevel", - { - INFO: "INFO", - ERROR: "ERROR", - DEBUG: "DEBUG", - FATAL: "FATAL", - CRITICAL: "CRITICAL", - WARNING: "WARNING" - }[app.logger.getEffectiveLevel()], - "--redisexpiry", - str(_ttl_seconds)], "phenotype_qc", _ttl_seconds, - {"job-metadata": json.dumps({ - "speciesid": species["SpeciesId"], - "populationid": population["Id"], - "datasetid": dataset["Id"], - "bundle": str(phenobundle.absolute())})}), - _redisuri, - f"{app.config['UPLOAD_FOLDER']}/job_errors") - - app.logger.debug("JOB DETAILS: %s", _job) - - return redirect(url_for("species.populations.phenotypes.job_status", - species_id=species["SpeciesId"], - population_id=population["Id"], - dataset_id=dataset["Id"], - job_id=str(_job["jobid"]))) - - -def process_phenotypes_individual_files(rconn, species, population, dataset): + return error_uri + + +def process_phenotypes_individual_files(error_uri): """Process the uploaded individual files.""" - ## Handle huge file uploads here... - ## Convert files and settings to R/qtl2 bundle - ## Use same processing as R/qtl2 bundle (after some refactoring) - raise NotImplementedError("Implement this!") + form = request.form + cdata = { + "sep": form["file-separator"], + "comment.char": form["file-comment-character"], + "na.strings": form["file-na"].split(" "), + } + bundlepath = Path(app.config["UPLOAD_FOLDER"], + f"{str(uuid.uuid4()).replace('-', '')}.zip") + with ZipFile(bundlepath,mode="w") as zfile: + for rqtlkey, formkey in (("phenocovar", "phenotype-descriptions"), + ("pheno", "phenotype-data"), + ("phenose", "phenotype-se"), + ("phenonum", "phenotype-n")): + if form.get("resumable-upload", False): + # Chunked upload of large files was used + filedata = json.loads(form[formkey]) + zfile.write( + Path(app.config["UPLOAD_FOLDER"], filedata["uploaded-file"]), + arcname=filedata["original-name"]) + cdata[rqtlkey] = cdata.get(rqtlkey, []) + [filedata["original-name"]] + else: + # TODO: Check this path: fix any bugs. + _sentfile = request.files[formkey] + if not bool(_sentfile): + flash(f"Expected file ('{formkey}') was not provided.", + "alert-danger") + return error_uri + + filepath = save_file( + _sentfile, Path(app.config["UPLOAD_FOLDER"]), hashed=False) + zfile.write( + Path(app.config["UPLOAD_FOLDER"], filepath), + arcname=filepath.name) + cdata[rqtlkey] = cdata.get(rqtlkey, []) + [filepath.name] + + zfile.writestr("control_data.json", data=json.dumps(cdata, indent=2)) + + return bundlepath @phenotypesbp.route( @@ -415,16 +428,61 @@ def add_phenotypes(species: dict, population: dict, dataset: dict, **kwargs):# p "December"), current_month=today.strftime("%B"), current_year=int(today.strftime("%Y")), - families_with_se_and_n=( - "Reference Populations (replicate average, SE, N)",), + families_with_se_and_n=_FAMILIES_WITH_SE_AND_N_, use_bundle=use_bundle, activelink="add-phenotypes") - if use_bundle: - return process_phenotypes_rqtl2_bundle( - rconn, species, population, dataset) - return process_phenotypes_individual_files( - rconn, species, population, dataset) + phenobundle = (process_phenotypes_rqtl2_bundle(add_phenos_uri) + if use_bundle else + process_phenotypes_individual_files(add_phenos_uri)) + + _jobid = uuid.uuid4() + _namespace = jobs.jobsnamespace() + _ttl_seconds = app.config["JOBS_TTL_SECONDS"] + _job = jobs.launch_job( + jobs.initialise_job( + rconn, + _namespace, + str(_jobid), + [sys.executable, "-m", "scripts.rqtl2.phenotypes_qc", _sqluri, + _redisuri, _namespace, str(_jobid), str(species["SpeciesId"]), + str(population["Id"]), + # str(dataset["Id"]), + str(phenobundle), + "--loglevel", + { + INFO: "INFO", + ERROR: "ERROR", + DEBUG: "DEBUG", + FATAL: "FATAL", + CRITICAL: "CRITICAL", + WARNING: "WARNING" + }[app.logger.getEffectiveLevel()], + "--redisexpiry", + str(_ttl_seconds)], "phenotype_qc", _ttl_seconds, + {"job-metadata": json.dumps({ + "speciesid": species["SpeciesId"], + "populationid": population["Id"], + "datasetid": dataset["Id"], + "bundle": str(phenobundle.absolute())})}), + _redisuri, + f"{app.config['UPLOAD_FOLDER']}/job_errors") + + app.logger.debug("JOB DETAILS: %s", _job) + jobstatusuri = url_for("species.populations.phenotypes.job_status", + species_id=species["SpeciesId"], + population_id=population["Id"], + dataset_id=dataset["Id"], + job_id=str(_job["jobid"])) + return ((jsonify({ + "redirect-to": jobstatusuri, + "statuscode": 200, + "message": ("Follow the 'redirect-to' URI to see the state " + "of the quality-control job started for your " + "uploaded files.") + }), 200) + if request.form.get("resumable-upload", False) else + redirect(jobstatusuri)) @phenotypesbp.route( @@ -460,3 +518,444 @@ def job_status( metadata=jobs.job_files_metadata( rconn, jobs.jobsnamespace(), job['jobid']), activelink="add-phenotypes") + + +@phenotypesbp.route( + "<int:species_id>/populations/<int:population_id>/phenotypes/datasets" + "/<int:dataset_id>/job/<uuid:job_id>/review", + methods=["GET"]) +@require_login +@with_dataset( + species_redirect_uri="species.populations.phenotypes.index", + population_redirect_uri="species.populations.phenotypes.select_population", + redirect_uri="species.populations.phenotypes.list_datasets") +def review_job_data( + species: dict, + population: dict, + dataset: dict, + job_id: uuid.UUID, + **kwargs +):# pylint: disable=[unused-argument] + """Review data one more time before entering it into the database.""" + with Redis.from_url(app.config["REDIS_URL"], decode_responses=True) as rconn: + try: + job = jobs.job(rconn, jobs.jobsnamespace(), str(job_id)) + except jobs.JobNotFound as _jnf: + job = None + + def __metadata_by_type__(by_type, item): + filetype = item[1]["filetype"] + return { + **by_type, + filetype: (by_type.get(filetype, tuple()) + + ({"filename": item[0], **item[1]},)) + } + metadata: dict[str, Any] = reduce( + __metadata_by_type__, + (jobs.job_files_metadata( + rconn, jobs.jobsnamespace(), job['jobid']) + if job else {}).items(), + {}) + + def __desc__(filetype): + match filetype: + case "phenocovar": + desc = "phenotypes" + case "pheno": + desc = "phenotypes data" + case "phenose": + desc = "phenotypes standard-errors" + case "phenonum": + desc = "phenotypes samples" + case _: + desc = f"unknown file type '{filetype}'." + + return desc + + def __summarise__(filetype, files): + return { + "filetype": filetype, + "number-of-files": len(files), + "total-data-rows": sum( + int(afile["linecount"]) - 1 for afile in files), + "description": __desc__(filetype) + } + + summary = { + filetype: __summarise__(filetype, meta) + for filetype,meta in metadata.items() + } + return render_template("phenotypes/review-job-data.html", + species=species, + population=population, + dataset=dataset, + job_id=job_id, + job=job, + summary=summary, + activelink="add-phenotypes") + + +def update_phenotype_metadata(conn, metadata: dict): + """Update a phenotype's basic metadata values.""" + with conn.cursor(cursorclass=DictCursor) as cursor: + cursor.execute("SELECT * FROM Phenotype WHERE Id=%(phenotype-id)s", + metadata) + res = { + **{ + _key: _val for _key,_val in { + key.lower().replace("_", "-"): value + for key, value in (cursor.fetchone() or {}).items() + }.items() + if _key in metadata.keys() + }, + "phenotype-id": metadata.get("phenotype-id") + } + if res == metadata: + return False + + cursor.execute( + "UPDATE Phenotype SET " + "Pre_publication_description=%(pre-publication-description)s, " + "Post_publication_description=%(post-publication-description)s, " + "Original_description=%(original-description)s, " + "Units=%(units)s, " + "Pre_publication_abbreviation=%(pre-publication-abbreviation)s, " + "Post_publication_abbreviation=%(post-publication-abbreviation)s " + "WHERE Id=%(phenotype-id)s", + metadata) + return cursor.rowcount + + +def update_phenotype_values(conn, values): + """Update a phenotype's data values.""" + with conn.cursor() as cursor: + cursor.executemany( + "UPDATE PublishData SET value=%(new)s " + "WHERE Id=%(data_id)s AND StrainId=%(strain_id)s", + tuple(item for item in values if item["new"] is not None)) + cursor.executemany( + "DELETE FROM PublishData " + "WHERE Id=%(data_id)s AND StrainId=%(strain_id)s", + tuple(item for item in values if item["new"] is None)) + return len(values) + return 0 + + +def update_phenotype_se(conn, serrs): + """Update a phenotype's standard-error values.""" + with conn.cursor() as cursor: + cursor.executemany( + "INSERT INTO PublishSE(DataId, StrainId, error) " + "VALUES(%(data_id)s, %(strain_id)s, %(new)s) " + "ON DUPLICATE KEY UPDATE error=VALUES(error)", + tuple(item for item in serrs if item["new"] is not None)) + cursor.executemany( + "DELETE FROM PublishSE " + "WHERE DataId=%(data_id)s AND StrainId=%(strain_id)s", + tuple(item for item in serrs if item["new"] is None)) + return len(serrs) + return 0 + + +def update_phenotype_n(conn, counts): + """Update a phenotype's strain counts.""" + with conn.cursor() as cursor: + cursor.executemany( + "INSERT INTO NStrain(DataId, StrainId, count) " + "VALUES(%(data_id)s, %(strain_id)s, %(new)s) " + "ON DUPLICATE KEY UPDATE count=VALUES(count)", + tuple(item for item in counts if item["new"] is not None)) + cursor.executemany( + "DELETE FROM NStrain " + "WHERE DataId=%(data_id)s AND StrainId=%(strain_id)s", + tuple(item for item in counts if item["new"] is None)) + return len(counts) + + return 0 + + +def update_phenotype_data(conn, data: dict): + """Update the numeric data for a phenotype.""" + def __organise_by_dataid_and_strainid__(acc, current): + _key, dataid, strainid = current[0].split("::") + _keysrc, _keytype = _key.split("-") + newkey = f"{dataid}::{strainid}" + newitem = acc.get(newkey, {}) + newitem[_keysrc] = newitem.get(_keysrc, {}) + newitem[_keysrc][_keytype] = current[1] + return {**acc, newkey: newitem} + + def __separate_items__(acc, row): + key, val = row + return ({ + **acc[0], + key: { + **val["value"], + "changed?": (not val["value"]["new"] == val["value"]["original"]) + } + }, { + **acc[1], + key: { + **val["se"], + "changed?": (not val["se"]["new"] == val["se"]["original"]) + } + },{ + **acc[2], + key: { + **val["n"], + "changed?": (not val["n"]["new"] == val["n"]["original"]) + } + }) + + values, serrs, counts = tuple( + tuple({ + "data_id": row[0].split("::")[0], + "strain_id": row[0].split("::")[1], + "new": row[1]["new"] + } for row in item) + for item in ( + filter(lambda val: val[1]["changed?"], item.items())# type: ignore[arg-type] + for item in reduce(# type: ignore[var-annotated] + __separate_items__, + reduce(__organise_by_dataid_and_strainid__, + data.items(), + {}).items(), + ({}, {}, {})))) + + return (update_phenotype_values(conn, values), + update_phenotype_se(conn, serrs), + update_phenotype_n(conn, counts)) + + +@phenotypesbp.route( + "<int:species_id>/populations/<int:population_id>/phenotypes/datasets" + "/<int:dataset_id>/phenotype/<int:xref_id>/edit", + methods=["GET", "POST"]) +@require_login +@with_dataset( + species_redirect_uri="species.populations.phenotypes.index", + population_redirect_uri="species.populations.phenotypes.select_population", + redirect_uri="species.populations.phenotypes.list_datasets") +def edit_phenotype_data(# pylint: disable=[unused-argument] + species: dict, + population: dict, + dataset: dict, + xref_id: int, + **kwargs +): + """Edit the data for a particular phenotype.""" + def __render__(**kwargs): + processed_kwargs = { + **kwargs, + "privileges": (kwargs.get("privileges", tuple()) + ### For demo! Do not commit this part + + ("group:resource:edit-resource", + "group:resource:delete-resource",) + ### END: For demo! Do not commit this part + ) + } + return render_template( + "phenotypes/edit-phenotype.html", + species=species, + population=population, + dataset=dataset, + xref_id=xref_id, + families_with_se_and_n=_FAMILIES_WITH_SE_AND_N_, + **processed_kwargs, + activelink="edit-phenotype") + + with database_connection(app.config["SQL_URI"]) as conn: + if request.method == "GET": + def __fetch_phenotype__(privileges): + phenotype = phenotype_by_id(conn, + species["SpeciesId"], + population["Id"], + dataset["Id"], + xref_id) + if phenotype is None: + msg = ("Could not find the phenotype with cross-reference ID" + f" '{xref_id}' from dataset '{dataset['FullName']}' " + f" from the '{population['FullName']}' population of " + f" species '{species['FullName']}'.") + return Left({"privileges": privileges, "phenotype-error": msg}) + return {"privileges": privileges, "phenotype": phenotype} + + def __fetch_publication_data__(**kwargs): + pheno = kwargs["phenotype"] + return { + **kwargs, + "publication_data": phenotype_publication_data( + conn, pheno["Id"]) + } + + def __fail__(failure_object): + # process the object + return __render__(failure_object=failure_object) + + return oauth2_post( + "/auth/resource/phenotypes/individual/linked-resource", + json={ + "species_id": species["SpeciesId"], + "population_id": population["Id"], + "dataset_id": dataset["Id"], + "xref_id": xref_id + } + ).then( + lambda resource: tuple( + privilege["privilege_id"] for role in resource["roles"] + for privilege in role["privileges"]) + ).then( + __fetch_phenotype__ + ).then( + lambda args: __fetch_publication_data__(**args) + ).either(__fail__, lambda args: __render__(**args)) + + ## POST + _change = False + match request.form.get("submit", "invalid-action"): + case "update basic metadata": + _change = update_phenotype_metadata(conn, { + key: value.strip() if bool(value.strip()) else None + for key, value in request.form.items() + if key not in ("submit",) + }) + msg = "Basic metadata was updated successfully." + case "update data": + _update = update_phenotype_data(conn, { + key: value.strip() if bool(value.strip()) else None + for key, value in request.form.items() + if key not in ("submit",) + }) + msg = (f"{_update[0]} value rows, {_update[1]} standard-error " + f"rows and {_update[2]} 'N' rows were updated.") + _change = any(item != 0 for item in _update) + case "update publication": + flash("NOT IMPLEMENTED: Would update publication data.", "alert-success") + case _: + flash("Invalid phenotype editing action.", "alert-danger") + + if _change: + flash(msg, "alert-success") + return redirect(url_for( + "species.populations.phenotypes.view_phenotype", + species_id=species["SpeciesId"], + population_id=population["Id"], + dataset_id=dataset["Id"], + xref_id=xref_id)) + + flash("No change was made by the user.", "alert-info") + return redirect(url_for( + "species.populations.phenotypes.edit_phenotype_data", + species_id=species["SpeciesId"], + population_id=population["Id"], + dataset_id=dataset["Id"], + xref_id=xref_id)) + + +def process_phenotype_data_for_download(pheno: dict) -> dict: + """Sanitise data for download.""" + return { + "UniqueIdentifier": f"phId:{pheno['Id']}::xrId:{pheno['xref_id']}", + **{ + key: val for key, val in pheno.items() + if key not in ("Id", "xref_id", "data", "Units") + }, + **{ + data_item["StrainName"]: data_item["value"] + for data_item in pheno.get("data", {}).values() + } + } + + +@phenotypesbp.route( + "<int:species_id>/populations/<int:population_id>/phenotypes/datasets" + "/<int:dataset_id>/edit-download", + methods=["POST"]) +@require_login +@with_dataset( + species_redirect_uri="species.populations.phenotypes.index", + population_redirect_uri="species.populations.phenotypes.select_population", + redirect_uri="species.populations.phenotypes.list_datasets") +def edit_download_phenotype_data(# pylint: disable=[unused-argument] + species: dict, + population: dict, + dataset: dict, + **kwargs +): + formdata = request.json + with database_connection(app.config["SQL_URI"]) as conn: + samples_list = [ + sample["Name"] for sample in samples_by_species_and_population( + conn, species["SpeciesId"], population["Id"])] + data = ( + process_phenotype_data_for_download(pheno) + for pheno in phenotypes_data_by_ids(conn, tuple({ + "population_id": population["Id"], + "phenoid": row["phenotype_id"], + "xref_id": row["xref_id"] + } for row in formdata))) + + with (tempfile.TemporaryDirectory( + prefix=app.config["TEMPORARY_DIRECTORY"]) as tmpdir): + filename = Path(tmpdir).joinpath("tempfile.tsv") + with open(filename, mode="w") as outfile: + outfile.write( + "# **DO NOT** delete the 'UniqueIdentifier' field. It is used " + "by the system to identify and edit the correct row(s) in the " + "database.\n") + outfile.write( + "# The '…_description' fields are useful for you to figure out " + "what row you are working on. Changing any of this fields will " + "also update the database, so do be careful.\n") + outfile.write( + "# Leave a field empty to delete the value in the database.\n") + outfile.write( + "# Any line beginning with a '#' character is considered a " + "comment line. This line, and all the lines above it, are " + "all comment lines. Comment lines will be ignored.\n") + writer = csv.DictWriter(outfile, + fieldnames=[ + "UniqueIdentifier", + "Post_publication_description", + "Pre_publication_abbreviation", + "Pre_publication_description", + "Original_description", + "Post_publication_abbreviation", + "PubMed_ID" + ] + samples_list, + dialect="excel-tab") + writer.writeheader() + writer.writerows(data) + outfile.flush() + + return send_file( + filename, + mimetype="text/csv", + as_attachment=True, + download_name=secure_filename(f"{dataset['Name']}_data")) + + +@phenotypesbp.route( + "<int:species_id>/populations/<int:population_id>/phenotypes/datasets" + "/<int:dataset_id>/edit-upload", + methods=["GET", "POST"]) +@require_login +@with_dataset( + species_redirect_uri="species.populations.phenotypes.index", + population_redirect_uri="species.populations.phenotypes.select_population", + redirect_uri="species.populations.phenotypes.list_datasets") +def edit_upload_phenotype_data(# pylint: disable=[unused-argument] + species: dict, + population: dict, + dataset: dict, + **kwargs +): + if request.method == "GET": + return render_template( + "phenotypes/bulk-edit-upload.html", + species=species, + population=population, + dataset=dataset, + activelink="edit-phenotype") + + return "NOT Implemented: Would do actual edit." |