aboutsummaryrefslogtreecommitdiff
path: root/uploader/phenotypes/models.py
diff options
context:
space:
mode:
Diffstat (limited to 'uploader/phenotypes/models.py')
-rw-r--r--uploader/phenotypes/models.py44
1 files changed, 38 insertions, 6 deletions
diff --git a/uploader/phenotypes/models.py b/uploader/phenotypes/models.py
index eb5a189..9324601 100644
--- a/uploader/phenotypes/models.py
+++ b/uploader/phenotypes/models.py
@@ -1,6 +1,7 @@
"""Database and utility functions for phenotypes."""
from typing import Optional
from functools import reduce
+from datetime import datetime
import MySQLdb as mdb
from MySQLdb.cursors import Cursor, DictCursor
@@ -78,7 +79,7 @@ def __phenotype_se__(cursor: Cursor,
xref_id: str) -> dict:
"""Fetch standard-error values (if they exist) for a phenotype."""
_sequery = (
- "SELECT pxr.Id AS xref_id, pxr.DataId, pse.error, nst.count "
+ "SELECT pxr.Id AS xref_id, pxr.DataId, str.Id AS StrainId, pse.error, nst.count "
"FROM Phenotype AS pheno "
"INNER JOIN PublishXRef AS pxr ON pheno.Id=pxr.PhenotypeId "
"INNER JOIN PublishSE AS pse ON pxr.DataId=pse.DataId "
@@ -90,7 +91,12 @@ def __phenotype_se__(cursor: Cursor,
"WHERE (str.SpeciesId, pxr.InbredSetId, pf.Id, pxr.Id)=(%s, %s, %s, %s)")
cursor.execute(_sequery,
(species_id, population_id, dataset_id, xref_id))
- return {row["xref_id"]: dict(row) for row in cursor.fetchall()}
+ return {(row["DataId"], row["StrainId"]): {
+ "xref_id": row["xref_id"],
+ "DataId": row["DataId"],
+ "error": row["error"],
+ "count": row["count"]
+ } for row in cursor.fetchall()}
def __organise_by_phenotype__(pheno, row):
"""Organise disparate data rows into phenotype 'objects'."""
@@ -105,11 +111,10 @@ def __organise_by_phenotype__(pheno, row):
"Units": row["Units"],
"Pre_publication_abbreviation": row["Pre_publication_abbreviation"],
"Post_publication_abbreviation": row["Post_publication_abbreviation"],
+ "xref_id": row["pxr.Id"],
"data": {
- #TOD0: organise these by DataId and StrainId
**(_pheno["data"] if bool(_pheno) else {}),
- row["pxr.Id"]: {
- "xref_id": row["pxr.Id"],
+ (row["DataId"], row["StrainId"]): {
"DataId": row["DataId"],
"mean": row["mean"],
"Locus": row["Locus"],
@@ -168,7 +173,7 @@ def phenotype_by_id(
species_id,
population_id,
dataset_id,
- xref_id)).items())
+ xref_id)).values())
}
if bool(_pheno) and len(_pheno.keys()) > 1:
raise Exception(
@@ -198,3 +203,30 @@ def phenotypes_data(conn: mdb.Connection,
cursor.execute(_query, (population_id, dataset_id))
debug_query(cursor)
return tuple(dict(row) for row in cursor.fetchall())
+
+
+def save_new_dataset(cursor: Cursor,
+ population_id: int,
+ dataset_name: str,
+ dataset_fullname: str,
+ dataset_shortname: str) -> dict:
+ """Create a new phenotype dataset."""
+ params = {
+ "population_id": population_id,
+ "dataset_name": dataset_name,
+ "dataset_fullname": dataset_fullname,
+ "dataset_shortname": dataset_shortname,
+ "created": datetime.now().date().isoformat(),
+ "public": 2,
+ "confidentiality": 0,
+ "users": None
+ }
+ cursor.execute(
+ "INSERT INTO PublishFreeze(Name, FullName, ShortName, CreateTime, "
+ "public, InbredSetId, confidentiality, AuthorisedUsers) "
+ "VALUES(%(dataset_name)s, %(dataset_fullname)s, %(dataset_shortname)s, "
+ "%(created)s, %(public)s, %(population_id)s, %(confidentiality)s, "
+ "%(users)s)",
+ params)
+ debug_query(cursor)
+ return {**params, "Id": cursor.lastrowid}