aboutsummaryrefslogtreecommitdiff
path: root/wqflask
diff options
context:
space:
mode:
authorzsloan2023-08-11 21:41:01 +0000
committerzsloan2023-08-17 14:54:58 -0500
commit934fd5e272f27443955cc42453c8b89c1657ced0 (patch)
treee2ff5921caf7f02b74b5d75f11da29fee7337b43 /wqflask
parent51209f9351dc1d0d33c863f7836c40576b855877 (diff)
downloadgenenetwork2-934fd5e272f27443955cc42453c8b89c1657ced0.tar.gz
Implement ProbeSet sample data editing
Diffstat (limited to 'wqflask')
-rw-r--r--wqflask/wqflask/metadata_edits.py115
1 files changed, 80 insertions, 35 deletions
diff --git a/wqflask/wqflask/metadata_edits.py b/wqflask/wqflask/metadata_edits.py
index 60b2ffd6..fee80a5f 100644
--- a/wqflask/wqflask/metadata_edits.py
+++ b/wqflask/wqflask/metadata_edits.py
@@ -60,11 +60,13 @@ from gn3.db.phenotypes import (
fetch_publication_by_id,
fetch_publication_by_pubmed_id,
update_phenotype as _update_phenotype)
-from gn3.db.sample_data import delete_sample_data
from gn3.db.sample_data import (
- delete_sample_data,
- insert_sample_data,
- update_sample_data,
+ delete_mrna_sample_data,
+ delete_pheno_sample_data,
+ insert_mrna_sample_data,
+ insert_pheno_sample_data,
+ update_mrna_sample_data,
+ update_pheno_sample_data,
get_pheno_sample_data,
get_pheno_csv_sample_data,
get_mrna_sample_data,
@@ -497,7 +499,6 @@ def update_probeset(name: str):
url = url_for("metadata_edit.list_diffs")
flash(f"Sample-data has been successfully uploaded. \
View the diffs <a href='{url}' target='_blank'>here</a>", "success")
-
with database_connection(get_setting("SQL_URI")) as conn:
data_ = request.form.to_dict()
probeset_ = {
@@ -844,28 +845,51 @@ def approve_data(resource_id: str, file_name: str):
for modification in (
modifications := [d for d in sample_data.get("Modifications")]):
if modification.get("Current"):
- update_sample_data(
+ if sample_data.get("probeset_id"): # if trait is ProbeSet
+ update_mrna_sample_data(
+ conn=conn,
+ original_data=modification.get("Original"),
+ updated_data=modification.get("Current"),
+ csv_header=sample_data.get(
+ "Columns", "Strain Name,Value,SE,Count"
+ ),
+ probeset_id=int(sample_data.get("probeset_id")),
+ dataset_name=sample_data.get("dataset_name")
+ )
+ else:
+ update_pheno_sample_data(
+ conn=conn,
+ trait_name=sample_data.get("trait_name"),
+ original_data=modification.get("Original"),
+ updated_data=modification.get("Current"),
+ csv_header=sample_data.get(
+ "Columns", "Strain Name,Value,SE,Count"
+ ),
+ phenotype_id=int(sample_data.get("phenotype_id")),
+ )
+
+ # Deletions
+ for data in [d for d in sample_data.get("Deletions")]:
+ if sample_data.get("probeset_id"): # if trait is ProbeSet
+ __deletions = delete_mrna_sample_data(
+ conn=conn,
+ data=data,
+ csv_header=sample_data.get(
+ "Columns", "Strain Name,Value,SE,Count"
+ ),
+ probeset_id=int(sample_data.get("probeset_id")),
+ dataset_name=sample_data.get("dataset_name")
+ )
+ else:
+ __deletions = delete_mrna_sample_data(
conn=conn,
trait_name=sample_data.get("trait_name"),
- original_data=modification.get("Original"),
- updated_data=modification.get("Current"),
+ data=data,
csv_header=sample_data.get(
"Columns", "Strain Name,Value,SE,Count"
),
phenotype_id=int(sample_data.get("phenotype_id")),
)
-
- # Deletions
- for data in [d for d in sample_data.get("Deletions")]:
- __deletions = delete_sample_data(
- conn=conn,
- trait_name=sample_data.get("trait_name"),
- data=data,
- csv_header=sample_data.get(
- "Columns", "Strain Name,Value,SE,Count"
- ),
- phenotype_id=int(sample_data.get("phenotype_id")),
- )
if __deletions:
n_deletions += 1
# Remove any data that already exists from sample_data deletes
@@ -874,16 +898,30 @@ def approve_data(resource_id: str, file_name: str):
## Insertions
for data in [d for d in sample_data.get("Additions")]:
- if insert_sample_data(
- conn=conn,
- trait_name=sample_data.get("trait_name"),
- data=data,
- csv_header=sample_data.get(
- "Columns", "Strain Name,Value,SE,Count"
- ),
- phenotype_id=int(sample_data.get("phenotype_id")),
- ):
+ if sample_data.get("probeset_id"): # if trait is ProbeSet
+ __insertions = insert_mrna_sample_data(
+ conn=conn,
+ data=data,
+ csv_header=sample_data.get(
+ "Columns", "Strain Name,Value,SE,Count"
+ ),
+ probeset_id=int(sample_data.get("probeset_id")),
+ dataset_name=sample_data.get("dataset_name")
+ )
+ else:
+ __insertions = insert_pheno_sample_data(
+ conn=conn,
+ trait_name=sample_data.get("trait_name"),
+ data=data,
+ csv_header=sample_data.get(
+ "Columns", "Strain Name,Value,SE,Count"
+ ),
+ phenotype_id=int(sample_data.get("phenotype_id")),
+ )
+ if __insertions:
n_insertions += 1
+ else:
+ sample_data.get("Additions").remove(data)
if any(
[
sample_data.get("Additions"),
@@ -892,11 +930,18 @@ def approve_data(resource_id: str, file_name: str):
]
):
with database_connection(get_setting("SQL_URI")) as conn:
- create_metadata_audit(conn, {
- "dataset_id": sample_data.get("dataset_id"),
- "editor": sample_data.get("author"),
- "json_data": json.dumps(sample_data, cls=CustomJSONEncoder)
- })
+ if sample_data.get("dataset_id"): # if phenotype
+ create_metadata_audit(conn, {
+ "dataset_id": sample_data.get("dataset_id"),
+ "editor": sample_data.get("author"),
+ "json_data": json.dumps(sample_data, cls=CustomJSONEncoder)
+ })
+ else:
+ create_metadata_audit(conn, {
+ "dataset_id": sample_data.get("probeset_id"),
+ "editor": sample_data.get("author"),
+ "json_data": json.dumps(sample_data, cls=CustomJSONEncoder)
+ })
# Once data is approved, rename it!
os.rename(
os.path.join(f"{TMPDIR}/sample-data/diffs", file_name),
@@ -907,7 +952,7 @@ def approve_data(resource_id: str, file_name: str):
if n_deletions:
flash(f"# Deletions: {n_deletions}", "success")
if n_insertions:
- flash(f"# Additions: {len(modifications)}", "success")
+ flash(f"# Additions: {len(n_insertions)}", "success")
if len(modifications):
flash(f"# Modifications: {len(modifications)}", "success")
else: # Edge case where you need to automatically reject the file