aboutsummaryrefslogtreecommitdiff
path: root/wqflask
diff options
context:
space:
mode:
Diffstat (limited to 'wqflask')
-rw-r--r--wqflask/wqflask/metadata_edits.py136
-rw-r--r--wqflask/wqflask/views.py126
2 files changed, 135 insertions, 127 deletions
diff --git a/wqflask/wqflask/metadata_edits.py b/wqflask/wqflask/metadata_edits.py
index 94e2710b..bab1fa71 100644
--- a/wqflask/wqflask/metadata_edits.py
+++ b/wqflask/wqflask/metadata_edits.py
@@ -1,11 +1,13 @@
import MySQLdb
import os
import json
+import datetime
import difflib
from collections import namedtuple
-from flask import Blueprint, current_app, render_template, request
+from flask import (Blueprint, current_app, redirect,
+ flash, g, render_template, request)
from itertools import groupby
from wqflask.decorators import edit_access_required
@@ -21,6 +23,9 @@ from gn3.db.phenotypes import Probeset
from gn3.db.phenotypes import Publication
from gn3.db.phenotypes import PublishXRef
from gn3.db.phenotypes import probeset_mapping
+from gn3.commands import run_cmd
+from gn3.db.traits import get_trait_csv_sample_data
+from gn3.db.traits import update_sample_data
metadata_edit = Blueprint('metadata_edit', __name__)
@@ -125,6 +130,7 @@ def display_phenotype_metadata(dataset_id: str, name: str):
publish_xref=_d.get("publish_xref"),
phenotype=_d.get("phenotype"),
publication=_d.get("publication"),
+ dataset_id=dataset_id,
resource_id=request.args.get("resource-id"),
version=os.environ.get("GN_VERSION"),
)
@@ -145,3 +151,131 @@ def display_probeset_metadata(name: str):
resource_id=request.args.get("resource-id"),
version=os.environ.get("GN_VERSION"),
)
+
+
+@metadata_edit.route("/<dataset_id>/traits/<name>/update", methods=("POST",))
+@edit_access_required
+def update_phenotype(dataset_id: str, name: str):
+ conn = MySQLdb.Connect(db=current_app.config.get("DB_NAME"),
+ user=current_app.config.get("DB_USER"),
+ passwd=current_app.config.get("DB_PASS"),
+ host=current_app.config.get("DB_HOST"))
+ data_ = request.form.to_dict()
+ TMPDIR = current_app.config.get("TMPDIR")
+ author = (g.user_session.record.get(b"user_id",
+ b"").decode("utf-8") or
+ g.user_session.record.get("user_id", ""))
+ phenotype_id = str(data_.get('phenotype-id'))
+ if 'file' not in request.files:
+ flash("No sample-data has been uploaded", "warning")
+ else:
+ file_ = request.files['file']
+ SAMPLE_DATADIR = os.path.join(TMPDIR, "sample-data")
+ if not os.path.exists(SAMPLE_DATADIR):
+ os.makedirs(SAMPLE_DATADIR)
+ if not os.path.exists(os.path.join(SAMPLE_DATADIR,
+ "diffs")):
+ os.makedirs(os.path.join(SAMPLE_DATADIR,
+ "diffs"))
+ if not os.path.exists(os.path.join(SAMPLE_DATADIR,
+ "updated")):
+ os.makedirs(os.path.join(SAMPLE_DATADIR,
+ "updated"))
+ current_time = str(datetime.datetime.now().isoformat())
+ new_file_name = (os.path.join(TMPDIR,
+ "sample-data/updated/",
+ (f"{author}."
+ f"{name}.{phenotype_id}."
+ f"{current_time}.csv")))
+ uploaded_file_name = (os.path.join(
+ TMPDIR,
+ "sample-data/updated/",
+ (f"updated.{author}."
+ f"{request.args.get('resource-id')}."
+ f"{current_time}.csv")))
+ file_.save(new_file_name)
+ publishdata_id = ""
+ lines = []
+ with open(new_file_name, "r") as f:
+ lines = f.read()
+ first_line = lines.split('\n', 1)[0]
+ publishdata_id = first_line.split("Id:")[-1].strip()
+ with open(new_file_name, "w") as f:
+ f.write(lines.split("\n\n")[-1])
+ csv_ = get_trait_csv_sample_data(conn=conn,
+ trait_name=str(name),
+ phenotype_id=str(phenotype_id))
+ with open(uploaded_file_name, "w") as f_:
+ f_.write(csv_.split("\n\n")[-1])
+ r = run_cmd(cmd=("csvdiff "
+ f"'{uploaded_file_name}' '{new_file_name}' "
+ "--format json"))
+ diff_output = (f"{TMPDIR}/sample-data/diffs/"
+ f"{author}.{request.args.get('resource-id')}."
+ f"{current_time}.json")
+ with open(diff_output, "w") as f:
+ dict_ = json.loads(r.get("output"))
+ dict_.update({
+ "author": author,
+ "publishdata_id": publishdata_id,
+ "dataset_id": data_.get("dataset-name"),
+ "timestamp": datetime.datetime.now().strftime(
+ "%Y-%m-%d %H:%M:%S")
+ })
+ f.write(json.dumps(dict_))
+ flash("Sample-data has been successfully uploaded", "success")
+ # Run updates:
+ phenotype_ = {
+ "pre_pub_description": data_.get("pre-pub-desc"),
+ "post_pub_description": data_.get("post-pub-desc"),
+ "original_description": data_.get("orig-desc"),
+ "units": data_.get("units"),
+ "pre_pub_abbreviation": data_.get("pre-pub-abbrev"),
+ "post_pub_abbreviation": data_.get("post-pub-abbrev"),
+ "lab_code": data_.get("labcode"),
+ "submitter": data_.get("submitter"),
+ "owner": data_.get("owner"),
+ "authorized_users": data_.get("authorized-users"),
+ }
+ updated_phenotypes = update(
+ conn, "Phenotype",
+ data=Phenotype(**phenotype_),
+ where=Phenotype(id_=data_.get("phenotype-id")))
+ diff_data = {}
+ if updated_phenotypes:
+ diff_data.update({"Phenotype": diff_from_dict(old={
+ k: data_.get(f"old_{k}") for k, v in phenotype_.items()
+ if v is not None}, new=phenotype_)})
+ publication_ = {
+ "abstract": data_.get("abstract"),
+ "authors": data_.get("authors"),
+ "title": data_.get("title"),
+ "journal": data_.get("journal"),
+ "volume": data_.get("volume"),
+ "pages": data_.get("pages"),
+ "month": data_.get("month"),
+ "year": data_.get("year")
+ }
+ updated_publications = update(
+ conn, "Publication",
+ data=Publication(**publication_),
+ where=Publication(id_=data_.get("pubmed-id",
+ data_.get("old_id_"))))
+ if updated_publications:
+ diff_data.update({"Publication": diff_from_dict(old={
+ k: data_.get(f"old_{k}") for k, v in publication_.items()
+ if v is not None}, new=publication_)})
+ if diff_data:
+ diff_data.update({"dataset_id": name})
+ diff_data.update({"resource_id": request.args.get('resource-id')})
+ diff_data.update({"author": author})
+ diff_data.update({"timestamp": datetime.datetime.now().strftime(
+ "%Y-%m-%d %H:%M:%S")})
+ insert(conn,
+ table="metadata_audit",
+ data=MetadataAudit(dataset_id=name,
+ editor=author,
+ json_data=json.dumps(diff_data)))
+ flash(f"Diff-data: \n{diff_data}\nhas been uploaded", "success")
+ return redirect(f"/datasets/{dataset_id}/traits/{name}/edit"
+ f"?resource-id={request.args.get('resource-id')}")
diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py
index 08c88a25..963e48b7 100644
--- a/wqflask/wqflask/views.py
+++ b/wqflask/wqflask/views.py
@@ -416,132 +416,6 @@ def submit_trait_form():
version=GN_VERSION)
-@app.route("/trait/update", methods=["POST"])
-@edit_access_required
-def update_phenotype():
- conn = MySQLdb.Connect(db=current_app.config.get("DB_NAME"),
- user=current_app.config.get("DB_USER"),
- passwd=current_app.config.get("DB_PASS"),
- host=current_app.config.get("DB_HOST"))
- data_ = request.form.to_dict()
- TMPDIR = current_app.config.get("TMPDIR")
- author = g.user_session.record.get(b'user_name')
- if 'file' not in request.files:
- flash("No sample-data has been uploaded", "warning")
- else:
- file_ = request.files['file']
- trait_name = str(data_.get('dataset-name'))
- phenotype_id = str(data_.get('phenotype-id', 35))
- SAMPLE_DATADIR = os.path.join(TMPDIR, "sample-data")
- if not os.path.exists(SAMPLE_DATADIR):
- os.makedirs(SAMPLE_DATADIR)
- if not os.path.exists(os.path.join(SAMPLE_DATADIR,
- "diffs")):
- os.makedirs(os.path.join(SAMPLE_DATADIR,
- "diffs"))
- if not os.path.exists(os.path.join(SAMPLE_DATADIR,
- "updated")):
- os.makedirs(os.path.join(SAMPLE_DATADIR,
- "updated"))
- current_time = str(datetime.datetime.now().isoformat())
- new_file_name = (os.path.join(TMPDIR,
- "sample-data/updated/",
- (f"{author.decode('utf-8')}."
- f"{trait_name}.{phenotype_id}."
- f"{current_time}.csv")))
- uploaded_file_name = (os.path.join(
- TMPDIR,
- "sample-data/updated/",
- (f"updated.{author.decode('utf-8')}."
- f"{trait_name}.{phenotype_id}."
- f"{current_time}.csv")))
- file_.save(new_file_name)
- publishdata_id = ""
- lines = []
- with open(new_file_name, "r") as f:
- lines = f.read()
- first_line = lines.split('\n', 1)[0]
- publishdata_id = first_line.split("Id:")[-1].strip()
- with open(new_file_name, "w") as f:
- f.write(lines.split("\n\n")[-1])
- csv_ = get_trait_csv_sample_data(conn=conn,
- trait_name=str(trait_name),
- phenotype_id=str(phenotype_id))
- with open(uploaded_file_name, "w") as f_:
- f_.write(csv_.split("\n\n")[-1])
- r = run_cmd(cmd=("csvdiff "
- f"'{uploaded_file_name}' '{new_file_name}' "
- "--format json"))
- diff_output = (f"{TMPDIR}/sample-data/diffs/"
- f"{trait_name}.{author.decode('utf-8')}."
- f"{phenotype_id}.{current_time}.json")
- with open(diff_output, "w") as f:
- dict_ = json.loads(r.get("output"))
- dict_.update({
- "author": author.decode('utf-8'),
- "publishdata_id": publishdata_id,
- "dataset_id": data_.get("dataset-name"),
- "timestamp": datetime.datetime.now().strftime(
- "%Y-%m-%d %H:%M:%S")
- })
- f.write(json.dumps(dict_))
- flash("Sample-data has been successfully uploaded", "success")
- # Run updates:
- phenotype_ = {
- "pre_pub_description": data_.get("pre-pub-desc"),
- "post_pub_description": data_.get("post-pub-desc"),
- "original_description": data_.get("orig-desc"),
- "units": data_.get("units"),
- "pre_pub_abbreviation": data_.get("pre-pub-abbrev"),
- "post_pub_abbreviation": data_.get("post-pub-abbrev"),
- "lab_code": data_.get("labcode"),
- "submitter": data_.get("submitter"),
- "owner": data_.get("owner"),
- "authorized_users": data_.get("authorized-users"),
- }
- updated_phenotypes = update(
- conn, "Phenotype",
- data=Phenotype(**phenotype_),
- where=Phenotype(id_=data_.get("phenotype-id")))
- diff_data = {}
- if updated_phenotypes:
- diff_data.update({"Phenotype": diff_from_dict(old={
- k: data_.get(f"old_{k}") for k, v in phenotype_.items()
- if v is not None}, new=phenotype_)})
- publication_ = {
- "abstract": data_.get("abstract"),
- "authors": data_.get("authors"),
- "title": data_.get("title"),
- "journal": data_.get("journal"),
- "volume": data_.get("volume"),
- "pages": data_.get("pages"),
- "month": data_.get("month"),
- "year": data_.get("year")
- }
- updated_publications = update(
- conn, "Publication",
- data=Publication(**publication_),
- where=Publication(id_=data_.get("pubmed-id",
- data_.get("old_id_"))))
- if updated_publications:
- diff_data.update({"Publication": diff_from_dict(old={
- k: data_.get(f"old_{k}") for k, v in publication_.items()
- if v is not None}, new=publication_)})
- if diff_data:
- diff_data.update({"dataset_id": data_.get("dataset-name")})
- diff_data.update({"author": author.decode('utf-8')})
- diff_data.update({"timestamp": datetime.datetime.now().strftime(
- "%Y-%m-%d %H:%M:%S")})
- insert(conn,
- table="metadata_audit",
- data=MetadataAudit(dataset_id=data_.get("dataset-name"),
- editor=author.decode("utf-8"),
- json_data=json.dumps(diff_data)))
- flash(f"Diff-data: \n{diff_data}\nhas been uploaded", "success")
- return redirect(f"/trait/{data_.get('dataset-name')}"
- f"/edit/inbredset-id/{data_.get('inbred-set-id')}")
-
-
@app.route("/probeset/update", methods=["POST"])
def update_probeset():
conn = MySQLdb.Connect(db=current_app.config.get("DB_NAME"),