aboutsummaryrefslogtreecommitdiff
path: root/wqflask/base
diff options
context:
space:
mode:
authorArthur Centeno2021-10-25 21:04:23 +0000
committerArthur Centeno2021-10-25 21:04:23 +0000
commit499a80f138030c4de1629c043c8f9401a99894ea (patch)
tree449dcae965d13f966fb6d52625fbc86661c8c6a0 /wqflask/base
parent6151faa9ea67af4bf4ea95fb681a9dc4319474b6 (diff)
parent700802303e5e8221a9d591ba985d6607aa61e1ce (diff)
downloadgenenetwork2-499a80f138030c4de1629c043c8f9401a99894ea.tar.gz
Merge github.com:genenetwork/genenetwork2 into acenteno
Diffstat (limited to 'wqflask/base')
-rw-r--r--wqflask/base/GeneralObject.py78
-rw-r--r--wqflask/base/data_set.py683
-rw-r--r--wqflask/base/mrna_assay_tissue_data.py45
-rw-r--r--wqflask/base/species.py96
-rw-r--r--wqflask/base/trait.py501
-rw-r--r--wqflask/base/webqtlCaseData.py48
-rw-r--r--wqflask/base/webqtlConfig.py46
7 files changed, 766 insertions, 731 deletions
diff --git a/wqflask/base/GeneralObject.py b/wqflask/base/GeneralObject.py
index 02a1ef06..ce8e60b8 100644
--- a/wqflask/base/GeneralObject.py
+++ b/wqflask/base/GeneralObject.py
@@ -25,44 +25,42 @@
# Last updated by GeneNetwork Core Team 2010/10/20
class GeneralObject:
- """
- Base class to define an Object.
- a = [Spam(1, 4), Spam(9, 3), Spam(4,6)]
- a.sort(lambda x, y: cmp(x.eggs, y.eggs))
- """
+ """
+ Base class to define an Object.
+ a = [Spam(1, 4), Spam(9, 3), Spam(4,6)]
+ a.sort(key = lambda x: x.eggs)
+ """
- def __init__(self, *args, **kw):
- self.contents = list(args)
- for name, value in kw.items():
- setattr(self, name, value)
-
- def __setitem__(self, key, value):
- setattr(self, key, value)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __getattr__(self, key):
- if key in self.__dict__.keys():
- return self.__dict__[key]
- else:
- return eval("self.__dict__.%s" % key)
-
- def __len__(self):
- return len(self.__dict__) - 1
-
- def __str__(self):
- s = ''
- for key in self.__dict__.keys():
- if key != 'contents':
- s += '%s = %s\n' % (key,self.__dict__[key])
- return s
-
- def __repr__(self):
- s = ''
- for key in self.__dict__.keys():
- s += '%s = %s\n' % (key,self.__dict__[key])
- return s
-
- def __cmp__(self,other):
- return len(self.__dict__.keys()).__cmp__(len(other.__dict__.keys())) \ No newline at end of file
+ def __init__(self, *args, **kw):
+ self.contents = list(args)
+ for name, value in list(kw.items()):
+ setattr(self, name, value)
+
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __getattr__(self, key):
+ return eval("self.__dict__.%s" % key)
+
+ def __len__(self):
+ return len(self.__dict__) - 1
+
+ def __str__(self):
+ s = ''
+ for key in list(self.__dict__.keys()):
+ if key != 'contents':
+ s += '%s = %s\n' % (key, self.__dict__[key])
+ return s
+
+ def __repr__(self):
+ s = ''
+ for key in list(self.__dict__.keys()):
+ s += '%s = %s\n' % (key, self.__dict__[key])
+ return s
+
+ def __eq__(self, other):
+ return (len(list(self.__dict__.keys()))
+ == len(list(other.__dict__.keys())))
diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py
index 1457ba8d..8906ab69 100644
--- a/wqflask/base/data_set.py
+++ b/wqflask/base/data_set.py
@@ -17,8 +17,28 @@
# at rwilliams@uthsc.edu and xzhou15@uthsc.edu
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-
-from __future__ import absolute_import, print_function, division
+from dataclasses import dataclass
+from dataclasses import field
+from dataclasses import InitVar
+from typing import Optional, Dict
+from db.call import fetchall, fetchone, fetch1
+from utility.logger import getLogger
+from utility.tools import USE_GN_SERVER, USE_REDIS, flat_files, flat_file_exists, GN2_BASE_URL
+from db.gn_server import menu_main
+from pprint import pformat as pf
+from utility.db_tools import escape
+from utility.db_tools import mescape
+from utility.db_tools import create_in_clause
+from maintenance import get_group_samplelists
+from utility.tools import locate, locate_ignore_error, flat_files
+from utility import gen_geno_ob
+from utility import chunks
+from utility.benchmark import Bench
+from utility import webqtlUtil
+from db import webqtlDatabaseFunction
+from base import species
+from base import webqtlConfig
+from flask import Flask, g
import os
import math
import string
@@ -28,43 +48,25 @@ import codecs
import json
import requests
import gzip
-import cPickle as pickle
+import pickle as pickle
import itertools
from redis import Redis
-Redis = Redis()
-from flask import Flask, g
+r = Redis()
-import reaper
-
-from base import webqtlConfig
-from base import species
-from db import webqtlDatabaseFunction
-from utility import webqtlUtil
-from utility.benchmark import Bench
-from utility import chunks
-from utility import gen_geno_ob
-from utility.tools import locate, locate_ignore_error, flat_files
-
-from wqflask.api import gen_menu
-
-from maintenance import get_group_samplelists
-
-from MySQLdb import escape_string as escape
-from pprint import pformat as pf
-from db.gn_server import menu_main
-from db.call import fetchall,fetchone,fetch1
-
-from utility.tools import USE_GN_SERVER, USE_REDIS, flat_files, flat_file_exists, GN2_BASE_URL
-from utility.logger import getLogger
-logger = getLogger(__name__ )
+logger = getLogger(__name__)
# Used by create_database to instantiate objects
# Each subclass will add to this
DS_NAME_MAP = {}
-def create_dataset(dataset_name, dataset_type = None, get_samplelist = True, group_name = None):
+
+def create_dataset(dataset_name, dataset_type=None,
+ get_samplelist=True, group_name=None):
+ if dataset_name == "Temp":
+ dataset_type = "Temp"
+
if not dataset_type:
dataset_type = Dataset_Getter(dataset_name)
@@ -75,11 +77,11 @@ def create_dataset(dataset_name, dataset_type = None, get_samplelist = True, gro
else:
return dataset_class(dataset_name, get_samplelist)
-class Dataset_Types(object):
- def __init__(self):
- """Create a dictionary of samples where the value is set to Geno,
-Publish or ProbeSet. E.g.
+@dataclass
+class DatasetType:
+ """Create a dictionary of samples where the value is set to Geno,
+ Publish or ProbeSet. E.g.
{'AD-cases-controls-MyersGeno': 'Geno',
'AD-cases-controls-MyersPublish': 'Publish',
@@ -90,20 +92,28 @@ Publish or ProbeSet. E.g.
'All Phenotypes': 'Publish',
'B139_K_1206_M': 'ProbeSet',
'B139_K_1206_R': 'ProbeSet' ...
-
+ }
"""
- self.datasets = {}
+ redis_instance: InitVar[Redis]
+ datasets: Optional[Dict] = field(init=False, default_factory=dict)
+ data: Optional[Dict] = field(init=False)
- data = Redis.get("dataset_structure")
+ def __post_init__(self, redis_instance):
+ self.redis_instance = redis_instance
+ data = redis_instance.get("dataset_structure")
if data:
self.datasets = json.loads(data)
- else: #ZS: I don't think this should ever run unless Redis is emptied
+ else:
+ # ZS: I don't think this should ever run unless Redis is
+ # emptied
try:
- data = json.loads(requests.get(GN2_BASE_URL + "/api/v_pre1/gen_dropdown", timeout = 5).content)
- for species in data['datasets']:
- for group in data['datasets'][species]:
- for dataset_type in data['datasets'][species][group]:
- for dataset in data['datasets'][species][group][dataset_type]:
+ data = json.loads(requests.get(
+ GN2_BASE_URL + "/api/v_pre1/gen_dropdown",
+ timeout=5).content)
+ for _species in data['datasets']:
+ for group in data['datasets'][_species]:
+ for dataset_type in data['datasets'][_species][group]:
+ for dataset in data['datasets'][_species][group][dataset_type]:
short_dataset_name = dataset[1]
if dataset_type == "Phenotypes":
new_type = "Publish"
@@ -112,84 +122,77 @@ Publish or ProbeSet. E.g.
else:
new_type = "ProbeSet"
self.datasets[short_dataset_name] = new_type
- except:
+ except Exception: # Do nothing
pass
- Redis.set("dataset_structure", json.dumps(self.datasets))
+ self.redis_instance.set("dataset_structure",
+ json.dumps(self.datasets))
+ self.data = data
- # Set LOG_LEVEL_DEBUG=5 to see the following:
- logger.debugf(5, "datasets",self.datasets)
+ def set_dataset_key(self, t, name):
+ """If name is not in the object's dataset dictionary, set it, and
+ update dataset_structure in Redis
+ args:
+ t: Type of dataset structure which can be: 'mrna_expr', 'pheno',
+ 'other_pheno', 'geno'
+ name: The name of the key to inserted in the datasets dictionary
- def __call__(self, name):
- if name not in self.datasets:
- mrna_expr_query = """
- SELECT
- ProbeSetFreeze.Id
- FROM
- ProbeSetFreeze
- WHERE
- ProbeSetFreeze.Name = "{0}"
- """.format(name)
+ """
+ sql_query_mapping = {
+ 'mrna_expr': ("SELECT ProbeSetFreeze.Id FROM "
+ "ProbeSetFreeze WHERE "
+ "ProbeSetFreeze.Name = \"%s\" "),
+ 'pheno': ("SELECT InfoFiles.GN_AccesionId "
+ "FROM InfoFiles, PublishFreeze, InbredSet "
+ "WHERE InbredSet.Name = '%s' AND "
+ "PublishFreeze.InbredSetId = InbredSet.Id AND "
+ "InfoFiles.InfoPageName = PublishFreeze.Name"),
+ 'other_pheno': ("SELECT PublishFreeze.Name "
+ "FROM PublishFreeze, InbredSet "
+ "WHERE InbredSet.Name = '%s' AND "
+ "PublishFreeze.InbredSetId = InbredSet.Id"),
+ 'geno': ("SELECT GenoFreeze.Id FROM GenoFreeze WHERE "
+ "GenoFreeze.Name = \"%s\" ")
+ }
+
+ dataset_name_mapping = {
+ "mrna_expr": "ProbeSet",
+ "pheno": "Publish",
+ "other_pheno": "Publish",
+ "geno": "Geno",
+ }
+
+ group_name = name
+ if t in ['pheno', 'other_pheno']:
+ group_name = name.replace("Publish", "")
- results = g.db.execute(mrna_expr_query).fetchall()
- if len(results):
- self.datasets[name] = "ProbeSet"
- Redis.set("dataset_structure", json.dumps(self.datasets))
- return self.datasets[name]
+ results = g.db.execute(sql_query_mapping[t] % group_name).fetchone()
+ if results:
+ self.datasets[name] = dataset_name_mapping[t]
+ self.redis_instance.set(
+ "dataset_structure", json.dumps(self.datasets))
+ return True
+ return None
- group_name = name.replace("Publish", "")
+ def __call__(self, name):
+ if name not in self.datasets:
+ for t in ["mrna_expr", "pheno", "other_pheno", "geno"]:
+ # This has side-effects, with the end result being a
+ # truth-y value
+ if(self.set_dataset_key(t, name)):
+ break
+ # Return None if name has not been set
+ return self.datasets.get(name, None)
- pheno_query = """SELECT InfoFiles.GN_AccesionId
- FROM InfoFiles, PublishFreeze, InbredSet
- WHERE InbredSet.Name = '{0}' AND
- PublishFreeze.InbredSetId = InbredSet.Id AND
- InfoFiles.InfoPageName = PublishFreeze.Name""".format(group_name)
-
- results = g.db.execute(pheno_query).fetchall()
- if len(results):
- self.datasets[name] = "Publish"
- Redis.set("dataset_structure", json.dumps(self.datasets))
- return self.datasets[name]
-
- #ZS: For when there isn't an InfoFiles ID; not sure if this and the preceding query are both necessary
- other_pheno_query = """SELECT PublishFreeze.Name
- FROM PublishFreeze, InbredSet
- WHERE InbredSet.Name = '{}' AND
- PublishFreeze.InbredSetId = InbredSet.Id""".format(group_name)
-
- results = g.db.execute(other_pheno_query).fetchall()
- if len(results):
- self.datasets[name] = "Publish"
- Redis.set("dataset_structure", json.dumps(self.datasets))
- return self.datasets[name]
-
- geno_query = """
- SELECT
- GenoFreeze.Id
- FROM
- GenoFreeze
- WHERE
- GenoFreeze.Name = "{0}"
- """.format(name)
-
- results = g.db.execute(geno_query).fetchall()
- if len(results):
- self.datasets[name] = "Geno"
- Redis.set("dataset_structure", json.dumps(self.datasets))
- return self.datasets[name]
-
- #ZS: It shouldn't ever reach this
- return None
- else:
- return self.datasets[name]
# Do the intensive work at startup one time only
-Dataset_Getter = Dataset_Types()
+Dataset_Getter = DatasetType(r)
+
def create_datasets_list():
if USE_REDIS:
key = "all_datasets"
- result = Redis.get(key)
+ result = r.get(key)
if result:
logger.debug("Redis cache hit")
@@ -205,38 +208,25 @@ def create_datasets_list():
for dataset_type in type_dict:
query = "SELECT Name FROM {}".format(type_dict[dataset_type])
for result in fetchall(query):
- #The query at the beginning of this function isn't
- #necessary here, but still would rather just reuse
- #it logger.debug("type: {}\tname:
- #{}".format(dataset_type, result.Name))
+ # The query at the beginning of this function isn't
+ # necessary here, but still would rather just reuse
+ # it logger.debug("type: {}\tname:
+ # {}".format(dataset_type, result.Name))
dataset = create_dataset(result.Name, dataset_type)
datasets.append(dataset)
if USE_REDIS:
- Redis.set(key, pickle.dumps(datasets, pickle.HIGHEST_PROTOCOL))
- Redis.expire(key, 60*60)
+ r.set(key, pickle.dumps(datasets, pickle.HIGHEST_PROTOCOL))
+ r.expire(key, 60 * 60)
return datasets
-def create_in_clause(items):
- """Create an in clause for mysql"""
- in_clause = ', '.join("'{}'".format(x) for x in mescape(*items))
- in_clause = '( {} )'.format(in_clause)
- return in_clause
-
-
-def mescape(*items):
- """Multiple escape"""
- escaped = [escape(str(item)) for item in items]
- #logger.debug("escaped is:", escaped)
- return escaped
-
-
-class Markers(object):
+class Markers:
"""Todo: Build in cacheing so it saves us reading the same file more than once"""
+
def __init__(self, name):
- json_data_fh = open(locate(name + ".json",'genotype/json'))
+ json_data_fh = open(locate(name + ".json", 'genotype/json'))
markers = []
with open("%s/%s_snps.txt" % (flat_files('genotype/bimbam'), name), 'r') as bimbam_fh:
@@ -251,7 +241,8 @@ class Markers(object):
for line in bimbam_fh:
marker = {}
marker['name'] = line.split(delimiter)[0].rstrip()
- marker['Mb'] = float(line.split(delimiter)[1].rstrip())/1000000
+ marker['Mb'] = float(line.split(delimiter)[
+ 1].rstrip()) / 1000000
marker['chr'] = line.split(delimiter)[2].rstrip()
markers.append(marker)
@@ -266,12 +257,12 @@ class Markers(object):
logger.debug("length of self.markers:", len(self.markers))
logger.debug("length of p_values:", len(p_values))
- if type(p_values) is list:
+ if isinstance(p_values, list):
# THIS IS only needed for the case when we are limiting the number of p-values calculated
- #if len(self.markers) > len(p_values):
+ # if len(self.markers) > len(p_values):
# self.markers = self.markers[:len(p_values)]
- for marker, p_value in itertools.izip(self.markers, p_values):
+ for marker, p_value in zip(self.markers, p_values):
if not p_value:
continue
marker['p_value'] = float(p_value)
@@ -280,38 +271,32 @@ class Markers(object):
marker['lrs_value'] = 0
else:
marker['lod_score'] = -math.log10(marker['p_value'])
- #Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
+ # Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
- elif type(p_values) is dict:
+ elif isinstance(p_values, dict):
filtered_markers = []
for marker in self.markers:
- #logger.debug("marker[name]", marker['name'])
- #logger.debug("p_values:", p_values)
if marker['name'] in p_values:
- #logger.debug("marker {} IS in p_values".format(i))
marker['p_value'] = p_values[marker['name']]
if math.isnan(marker['p_value']) or (marker['p_value'] <= 0):
marker['lod_score'] = 0
marker['lrs_value'] = 0
else:
marker['lod_score'] = -math.log10(marker['p_value'])
- #Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
- marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
+ # Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
+ marker['lrs_value'] = - \
+ math.log10(marker['p_value']) * 4.61
filtered_markers.append(marker)
- #else:
- #logger.debug("marker {} NOT in p_values".format(i))
- #self.markers.remove(marker)
- #del self.markers[i]
self.markers = filtered_markers
+
class HumanMarkers(Markers):
- def __init__(self, name, specified_markers = []):
+ def __init__(self, name, specified_markers=[]):
marker_data_fh = open(flat_files('mapping') + '/' + name + '.bim')
self.markers = []
for line in marker_data_fh:
splat = line.strip().split()
- #logger.debug("splat:", splat)
if len(specified_markers) > 0:
if splat[1] in specified_markers:
marker = {}
@@ -327,14 +312,11 @@ class HumanMarkers(Markers):
marker['Mb'] = float(splat[3]) / 1000000
self.markers.append(marker)
- #logger.debug("markers is: ", pf(self.markers))
-
-
def add_pvalues(self, p_values):
super(HumanMarkers, self).add_pvalues(p_values)
-class DatasetGroup(object):
+class DatasetGroup:
"""
Each group has multiple datasets; each species has multiple groups.
@@ -342,12 +324,15 @@ class DatasetGroup(object):
has multiple datasets associated with it.
"""
+
def __init__(self, dataset, name=None):
"""This sets self.group and self.group_id"""
if name == None:
- self.name, self.id, self.genetic_type = fetchone(dataset.query_for_group)
+ self.name, self.id, self.genetic_type = fetchone(
+ dataset.query_for_group)
else:
- self.name, self.id, self.genetic_type = fetchone("SELECT InbredSet.Name, InbredSet.Id, InbredSet.GeneticType FROM InbredSet where Name='%s'" % name)
+ self.name, self.id, self.genetic_type = fetchone(
+ "SELECT InbredSet.Name, InbredSet.Id, InbredSet.GeneticType FROM InbredSet where Name='%s'" % name)
if self.name == 'BXD300':
self.name = "BXD"
@@ -366,7 +351,8 @@ class DatasetGroup(object):
def get_mapping_methods(self):
- mapping_id = g.db.execute("select MappingMethodId from InbredSet where Name= '%s'" % self.name).fetchone()[0]
+ mapping_id = g.db.execute(
+ "select MappingMethodId from InbredSet where Name= '%s'" % self.name).fetchone()[0]
if mapping_id == "1":
mapping_names = ["GEMMA", "QTLReaper", "R/qtl"]
elif mapping_id == "2":
@@ -383,8 +369,8 @@ class DatasetGroup(object):
def get_markers(self):
def check_plink_gemma():
if flat_file_exists("mapping"):
- MAPPING_PATH = flat_files("mapping")+"/"
- if os.path.isfile(MAPPING_PATH+self.name+".bed"):
+ MAPPING_PATH = flat_files("mapping") + "/"
+ if os.path.isfile(MAPPING_PATH + self.name + ".bed"):
return True
return False
@@ -410,6 +396,15 @@ class DatasetGroup(object):
if maternal and paternal:
self.parlist = [maternal, paternal]
+ def get_study_samplelists(self):
+ study_sample_file = locate_ignore_error(self.name + ".json", 'study_sample_lists')
+ try:
+ f = open(study_sample_file)
+ except:
+ return []
+ study_samples = json.load(f)
+ return study_samples
+
def get_genofiles(self):
jsonfile = "%s/%s.json" % (webqtlConfig.GENODIR, self.name)
try:
@@ -423,22 +418,23 @@ class DatasetGroup(object):
result = None
key = "samplelist:v3:" + self.name
if USE_REDIS:
- result = Redis.get(key)
+ result = r.get(key)
if result is not None:
self.samplelist = json.loads(result)
else:
logger.debug("Cache not hit")
- genotype_fn = locate_ignore_error(self.name+".geno",'genotype')
+ genotype_fn = locate_ignore_error(self.name + ".geno", 'genotype')
if genotype_fn:
- self.samplelist = get_group_samplelists.get_samplelist("geno", genotype_fn)
+ self.samplelist = get_group_samplelists.get_samplelist(
+ "geno", genotype_fn)
else:
self.samplelist = None
if USE_REDIS:
- Redis.set(key, json.dumps(self.samplelist))
- Redis.expire(key, 60*5)
+ r.set(key, json.dumps(self.samplelist))
+ r.expire(key, 60 * 5)
def all_samples_ordered(self):
result = []
@@ -448,32 +444,28 @@ class DatasetGroup(object):
def read_genotype_file(self, use_reaper=False):
'''Read genotype from .geno file instead of database'''
- #genotype_1 is Dataset Object without parents and f1
- #genotype_2 is Dataset Object with parents and f1 (not for intercross)
+ # genotype_1 is Dataset Object without parents and f1
+ # genotype_2 is Dataset Object with parents and f1 (not for intercross)
- #genotype_1 = reaper.Dataset()
# reaper barfs on unicode filenames, so here we ensure it's a string
if self.genofile:
- if "RData" in self.genofile: #ZS: This is a temporary fix; I need to change the way the JSON files that point to multiple genotype files are structured to point to other file types like RData
- full_filename = str(locate(self.genofile.split(".")[0] + ".geno", 'genotype'))
+ if "RData" in self.genofile: # ZS: This is a temporary fix; I need to change the way the JSON files that point to multiple genotype files are structured to point to other file types like RData
+ full_filename = str(
+ locate(self.genofile.split(".")[0] + ".geno", 'genotype'))
else:
full_filename = str(locate(self.genofile, 'genotype'))
else:
full_filename = str(locate(self.name + '.geno', 'genotype'))
-
- if use_reaper:
- genotype_1 = reaper.Dataset()
- genotype_1.read(full_filename)
- else:
- genotype_1 = gen_geno_ob.genotype(full_filename)
+ genotype_1 = gen_geno_ob.genotype(full_filename)
if genotype_1.type == "group" and self.parlist:
- genotype_2 = genotype_1.add(Mat=self.parlist[0], Pat=self.parlist[1]) #, F1=_f1)
+ genotype_2 = genotype_1.add(
+ Mat=self.parlist[0], Pat=self.parlist[1]) # , F1=_f1)
else:
genotype_2 = genotype_1
- #determine default genotype object
+ # determine default genotype object
if self.incparentsf1 and genotype_1.type != "intercross":
genotype = genotype_2
else:
@@ -484,27 +476,21 @@ class DatasetGroup(object):
return genotype
-def datasets(group_name, this_group = None):
+
+def datasets(group_name, this_group=None):
key = "group_dataset_menu:v2:" + group_name
- logger.debug("key is2:", key)
dataset_menu = []
- logger.debug("[tape4] webqtlConfig.PUBLICTHRESH:", webqtlConfig.PUBLICTHRESH)
- logger.debug("[tape4] type webqtlConfig.PUBLICTHRESH:", type(webqtlConfig.PUBLICTHRESH))
the_results = fetchall('''
(SELECT '#PublishFreeze',PublishFreeze.FullName,PublishFreeze.Name
FROM PublishFreeze,InbredSet
WHERE PublishFreeze.InbredSetId = InbredSet.Id
and InbredSet.Name = '%s'
- and PublishFreeze.public > %s
- and PublishFreeze.confidentiality < 1
ORDER BY PublishFreeze.Id ASC)
UNION
(SELECT '#GenoFreeze',GenoFreeze.FullName,GenoFreeze.Name
FROM GenoFreeze, InbredSet
WHERE GenoFreeze.InbredSetId = InbredSet.Id
- and InbredSet.Name = '%s'
- and GenoFreeze.public > %s
- and GenoFreeze.confidentiality < 1)
+ and InbredSet.Name = '%s')
UNION
(SELECT Tissue.Name, ProbeSetFreeze.FullName,ProbeSetFreeze.Name
FROM ProbeSetFreeze, ProbeFreeze, InbredSet, Tissue
@@ -512,16 +498,15 @@ def datasets(group_name, this_group = None):
and ProbeFreeze.TissueId = Tissue.Id
and ProbeFreeze.InbredSetId = InbredSet.Id
and InbredSet.Name like %s
- and ProbeSetFreeze.public > %s
- and ProbeSetFreeze.confidentiality < 1
ORDER BY Tissue.Name, ProbeSetFreeze.OrderList DESC)
- ''' % (group_name, webqtlConfig.PUBLICTHRESH,
- group_name, webqtlConfig.PUBLICTHRESH,
- "'" + group_name + "'", webqtlConfig.PUBLICTHRESH))
+ ''' % (group_name,
+ group_name,
+ "'" + group_name + "'"))
sorted_results = sorted(the_results, key=lambda kv: kv[0])
- pheno_inserted = False #ZS: This is kind of awkward, but need to ensure Phenotypes show up before Genotypes in dropdown
+ # ZS: This is kind of awkward, but need to ensure Phenotypes show up before Genotypes in dropdown
+ pheno_inserted = False
geno_inserted = False
for dataset_item in sorted_results:
tissue_name = dataset_item[0]
@@ -529,13 +514,16 @@ def datasets(group_name, this_group = None):
dataset_short = dataset_item[2]
if tissue_name in ['#PublishFreeze', '#GenoFreeze']:
if tissue_name == '#PublishFreeze' and (dataset_short == group_name + 'Publish'):
- dataset_menu.insert(0, dict(tissue=None, datasets=[(dataset, dataset_short)]))
+ dataset_menu.insert(
+ 0, dict(tissue=None, datasets=[(dataset, dataset_short)]))
pheno_inserted = True
elif pheno_inserted and tissue_name == '#GenoFreeze':
- dataset_menu.insert(1, dict(tissue=None, datasets=[(dataset, dataset_short)]))
+ dataset_menu.insert(
+ 1, dict(tissue=None, datasets=[(dataset, dataset_short)]))
geno_inserted = True
else:
- dataset_menu.append(dict(tissue=None, datasets=[(dataset, dataset_short)]))
+ dataset_menu.append(
+ dict(tissue=None, datasets=[(dataset, dataset_short)]))
else:
tissue_already_exists = False
for i, tissue_dict in enumerate(dataset_menu):
@@ -544,15 +532,14 @@ def datasets(group_name, this_group = None):
break
if tissue_already_exists:
- #logger.debug("dataset_menu:", dataset_menu[i]['datasets'])
dataset_menu[i]['datasets'].append((dataset, dataset_short))
else:
dataset_menu.append(dict(tissue=tissue_name,
- datasets=[(dataset, dataset_short)]))
+ datasets=[(dataset, dataset_short)]))
if USE_REDIS:
- Redis.set(key, pickle.dumps(dataset_menu, pickle.HIGHEST_PROTOCOL))
- Redis.expire(key, 60*5)
+ r.set(key, pickle.dumps(dataset_menu, pickle.HIGHEST_PROTOCOL))
+ r.expire(key, 60 * 5)
if this_group != None:
this_group._datasets = dataset_menu
@@ -560,14 +547,15 @@ def datasets(group_name, this_group = None):
else:
return dataset_menu
-class DataSet(object):
+
+class DataSet:
"""
DataSet class defines a dataset in webqtl, can be either Microarray,
Published phenotype, genotype, or user input dataset(temp)
"""
- def __init__(self, name, get_samplelist = True, group_name = None):
+ def __init__(self, name, get_samplelist=True, group_name=None):
assert name, "Need a name"
self.name = name
@@ -575,30 +563,34 @@ class DataSet(object):
self.shortname = None
self.fullname = None
self.type = None
- self.data_scale = None #ZS: For example log2
+ self.data_scale = None # ZS: For example log2
+ self.accession_id = None
self.setup()
- if self.type == "Temp": #Need to supply group name as input if temp trait
- self.group = DatasetGroup(self, name=group_name) # sets self.group and self.group_id and gets genotype
+ if self.type == "Temp": # Need to supply group name as input if temp trait
+ # sets self.group and self.group_id and gets genotype
+ self.group = DatasetGroup(self, name=group_name)
else:
self.check_confidentiality()
self.retrieve_other_names()
- self.group = DatasetGroup(self) # sets self.group and self.group_id and gets genotype
+ # sets self.group and self.group_id and gets genotype
+ self.group = DatasetGroup(self)
self.accession_id = self.get_accession_id()
if get_samplelist == True:
- self.group.get_samplelist()
+ self.group.get_samplelist()
self.species = species.TheSpecies(self)
-
- def get_desc(self):
- """Gets overridden later, at least for Temp...used by trait's get_given_name"""
- return None
-
- # Delete this eventually
- @property
- def riset():
- Weve_Renamed_This_As_Group
+ def as_dict(self):
+ return {
+ 'name': self.name,
+ 'shortname': self.shortname,
+ 'fullname': self.fullname,
+ 'type': self.type,
+ 'data_scale': self.data_scale,
+ 'group': self.group.name,
+ 'accession_id': self.accession_id
+ }
def get_accession_id(self):
if self.type == "Publish":
@@ -637,29 +629,26 @@ class DataSet(object):
"""
-
try:
if self.type == "ProbeSet":
query_args = tuple(escape(x) for x in (
- str(webqtlConfig.PUBLICTHRESH),
self.name,
self.name,
self.name))
self.id, self.name, self.fullname, self.shortname, self.data_scale, self.tissue = fetch1("""
-SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName, ProbeSetFreeze.ShortName, ProbeSetFreeze.DataScale, Tissue.Name
-FROM ProbeSetFreeze, ProbeFreeze, Tissue
-WHERE ProbeSetFreeze.public > %s
-AND ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id
-AND ProbeFreeze.TissueId = Tissue.Id
-AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFreeze.ShortName = '%s')
- """ % (query_args),"/dataset/"+self.name+".json",
- lambda r: (r["id"],r["name"],r["full_name"],r["short_name"],r["data_scale"],r["tissue"])
+ SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName, ProbeSetFreeze.ShortName, ProbeSetFreeze.DataScale, Tissue.Name
+ FROM ProbeSetFreeze, ProbeFreeze, Tissue
+ WHERE ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id
+ AND ProbeFreeze.TissueId = Tissue.Id
+ AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFreeze.ShortName = '%s')
+ """ % (query_args), "/dataset/" + self.name + ".json",
+ lambda r: (r["id"], r["name"], r["full_name"],
+ r["short_name"], r["data_scale"], r["tissue"])
)
else:
query_args = tuple(escape(x) for x in (
(self.type + "Freeze"),
- str(webqtlConfig.PUBLICTHRESH),
self.name,
self.name,
self.name))
@@ -668,14 +657,77 @@ AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFre
self.id, self.name, self.fullname, self.shortname = fetchone("""
SELECT Id, Name, FullName, ShortName
FROM %s
- WHERE public > %s AND
- (Name = '%s' OR FullName = '%s' OR ShortName = '%s')
- """ % (query_args))
+ WHERE (Name = '%s' OR FullName = '%s' OR ShortName = '%s')
+ """ % (query_args))
except TypeError:
- logger.debug("Dataset {} is not yet available in GeneNetwork.".format(self.name))
+ logger.debug(
+ "Dataset {} is not yet available in GeneNetwork.".format(self.name))
pass
+ def chunk_dataset(self, dataset, n):
+
+ results = {}
+
+ query = """
+ SELECT ProbeSetXRef.DataId,ProbeSet.Name
+ FROM ProbeSet, ProbeSetXRef, ProbeSetFreeze
+ WHERE ProbeSetFreeze.Name = '{}' AND
+ ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
+ ProbeSetXRef.ProbeSetId = ProbeSet.Id
+ """.format(self.name)
+
+ # should cache this
+
+ traits_name_dict = dict(g.db.execute(query).fetchall())
+
+ for i in range(0, len(dataset), n):
+ matrix = list(dataset[i:i + n])
+ trait_name = traits_name_dict[matrix[0][0]]
+
+ my_values = [value for (trait_name, strain, value) in matrix]
+ results[trait_name] = my_values
+ return results
+
+ def get_probeset_data(self, sample_list=None, trait_ids=None):
+
+ # improvement of get trait data--->>>
+ if sample_list:
+ self.samplelist = sample_list
+
+ else:
+ self.samplelist = self.group.samplelist
+
+ if self.group.parlist != None and self.group.f1list != None:
+ if (self.group.parlist + self.group.f1list) in self.samplelist:
+ self.samplelist += self.group.parlist + self.group.f1list
+
+ query = """
+ SELECT Strain.Name, Strain.Id FROM Strain, Species
+ WHERE Strain.Name IN {}
+ and Strain.SpeciesId=Species.Id
+ and Species.name = '{}'
+ """.format(create_in_clause(self.samplelist), *mescape(self.group.species))
+ results = dict(g.db.execute(query).fetchall())
+ sample_ids = [results[item] for item in self.samplelist]
+
+ sorted_samplelist = [strain_name for strain_name, strain_id in sorted(
+ results.items(), key=lambda item: item[1])]
+
+ query = """SELECT * from ProbeSetData
+ where StrainID in {}
+ and id in (SELECT ProbeSetXRef.DataId
+ FROM (ProbeSet, ProbeSetXRef, ProbeSetFreeze)
+ WHERE ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id
+ and ProbeSetFreeze.Name = '{}'
+ and ProbeSet.Id = ProbeSetXRef.ProbeSetId)""".format(create_in_clause(sample_ids), self.name)
+
+ query_results = list(g.db.execute(query).fetchall())
+ data_results = self.chunk_dataset(query_results, len(sample_ids))
+ self.samplelist = sorted_samplelist
+ self.trait_data = data_results
+
+
def get_trait_data(self, sample_list=None):
if sample_list:
self.samplelist = sample_list
@@ -692,7 +744,6 @@ AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFre
and Strain.SpeciesId=Species.Id
and Species.name = '{}'
""".format(create_in_clause(self.samplelist), *mescape(self.group.species))
- logger.sql(query)
results = dict(g.db.execute(query).fetchall())
sample_ids = [results[item] for item in self.samplelist]
@@ -713,7 +764,7 @@ AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFre
else:
query = "SELECT {}.Name,".format(escape(dataset_type))
data_start_pos = 1
- query += string.join(temp, ', ')
+ query += ', '.join(temp)
query += ' FROM ({}, {}XRef, {}Freeze) '.format(*mescape(dataset_type,
self.type,
self.type))
@@ -731,7 +782,7 @@ AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFre
and {}.Id = {}XRef.{}Id
order by {}.Id
""".format(*mescape(self.type, self.type, self.type, self.name,
- dataset_type, self.type, dataset_type, dataset_type))
+ dataset_type, self.type, dataset_type, dataset_type))
else:
query += """
WHERE {}XRef.{}FreezeId = {}Freeze.Id
@@ -739,13 +790,9 @@ AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFre
and {}.Id = {}XRef.{}Id
order by {}.Id
""".format(*mescape(self.type, self.type, self.type, self.type,
- self.name, dataset_type, self.type, self.type, dataset_type))
+ self.name, dataset_type, self.type, self.type, dataset_type))
- #logger.debug("trait data query: ", query)
-
- logger.sql(query)
results = g.db.execute(query).fetchall()
- #logger.debug("query results:", results)
trait_sample_data.append(results)
trait_count = len(trait_sample_data[0])
@@ -759,25 +806,23 @@ AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFre
self.trait_data[trait_name] += (
trait_sample_data[chunk_counter][trait_counter][data_start_pos:])
+
class PhenotypeDataSet(DataSet):
DS_NAME_MAP['Publish'] = 'PhenotypeDataSet'
def setup(self):
-
- #logger.debug("IS A PHENOTYPEDATASET")
-
# Fields in the database table
self.search_fields = ['Phenotype.Post_publication_description',
- 'Phenotype.Pre_publication_description',
- 'Phenotype.Pre_publication_abbreviation',
- 'Phenotype.Post_publication_abbreviation',
- 'PublishXRef.mean',
- 'Phenotype.Lab_code',
- 'Publication.PubMed_ID',
- 'Publication.Abstract',
- 'Publication.Title',
- 'Publication.Authors',
- 'PublishXRef.Id']
+ 'Phenotype.Pre_publication_description',
+ 'Phenotype.Pre_publication_abbreviation',
+ 'Phenotype.Post_publication_abbreviation',
+ 'PublishXRef.mean',
+ 'Phenotype.Lab_code',
+ 'Publication.PubMed_ID',
+ 'Publication.Abstract',
+ 'Publication.Title',
+ 'Publication.Authors',
+ 'PublishXRef.Id']
# Figure out what display_fields is
self.display_fields = ['name', 'group_code',
@@ -799,13 +844,13 @@ class PhenotypeDataSet(DataSet):
# Fields displayed in the search results table header
self.header_fields = ['Index',
- 'Record',
- 'Description',
- 'Authors',
- 'Year',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
+ 'Record',
+ 'Description',
+ 'Authors',
+ 'Year',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
self.type = 'Publish'
@@ -823,7 +868,7 @@ class PhenotypeDataSet(DataSet):
# (Urgently?) Need to write this
pass
- def get_trait_info(self, trait_list, species = ''):
+ def get_trait_info(self, trait_list, species=''):
for this_trait in trait_list:
if not this_trait.haveinfo:
@@ -831,9 +876,9 @@ class PhenotypeDataSet(DataSet):
description = this_trait.post_publication_description
- #If the dataset is confidential and the user has access to confidential
- #phenotype traits, then display the pre-publication description instead
- #of the post-publication description
+ # If the dataset is confidential and the user has access to confidential
+ # phenotype traits, then display the pre-publication description instead
+ # of the post-publication description
if this_trait.confidential:
this_trait.description_display = ""
continue # for now, because no authorization features
@@ -858,7 +903,7 @@ class PhenotypeDataSet(DataSet):
if this_trait.pubmed_id:
this_trait.pubmed_link = webqtlConfig.PUBMEDLINK_URL % this_trait.pubmed_id
- #LRS and its location
+ # LRS and its location
this_trait.LRS_score_repr = "N/A"
this_trait.LRS_location_repr = "N/A"
@@ -869,7 +914,6 @@ class PhenotypeDataSet(DataSet):
Geno.Name = '%s' and
Geno.SpeciesId = Species.Id
""" % (species, this_trait.locus)
- logger.sql(query)
result = g.db.execute(query).fetchone()
if result:
@@ -878,7 +922,8 @@ class PhenotypeDataSet(DataSet):
LRS_Mb = result[1]
this_trait.LRS_score_repr = LRS_score_repr = '%3.1f' % this_trait.lrs
- this_trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (LRS_Chr, float(LRS_Mb))
+ this_trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (
+ LRS_Chr, float(LRS_Mb))
def retrieve_sample_data(self, trait):
query = """
@@ -898,7 +943,6 @@ class PhenotypeDataSet(DataSet):
Order BY
Strain.Name
"""
- logger.sql(query)
results = g.db.execute(query, (trait, self.id)).fetchall()
return results
@@ -945,12 +989,13 @@ class GenotypeDataSet(DataSet):
this_trait.retrieveInfo()
if this_trait.chr and this_trait.mb:
- this_trait.location_repr = 'Chr%s: %.6f' % (this_trait.chr, float(this_trait.mb) )
+ this_trait.location_repr = 'Chr%s: %.6f' % (
+ this_trait.chr, float(this_trait.mb))
def retrieve_sample_data(self, trait):
query = """
SELECT
- Strain.Name, GenoData.value, GenoSE.error, GenoData.Id, Strain.Name2
+ Strain.Name, GenoData.value, GenoSE.error, "N/A", Strain.Name2
FROM
(GenoData, GenoFreeze, Strain, Geno, GenoXRef)
left join GenoSE on
@@ -964,7 +1009,6 @@ class GenotypeDataSet(DataSet):
Order BY
Strain.Name
"""
- logger.sql(query)
results = g.db.execute(query,
(webqtlDatabaseFunction.retrieve_species_id(self.group.name),
trait, self.name)).fetchall()
@@ -1014,14 +1058,14 @@ class MrnaAssayDataSet(DataSet):
# Fields displayed in the search results table header
self.header_fields = ['Index',
- 'Record',
- 'Symbol',
- 'Description',
- 'Location',
- 'Mean',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
+ 'Record',
+ 'Symbol',
+ 'Description',
+ 'Location',
+ 'Mean',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
# Todo: Obsolete or rename this field
self.type = 'ProbeSet'
@@ -1037,7 +1081,6 @@ class MrnaAssayDataSet(DataSet):
ProbeSetFreeze.Name = "%s"
''' % escape(self.name)
-
def check_confidentiality(self):
return geno_mrna_confidentiality(self)
@@ -1055,37 +1098,37 @@ class MrnaAssayDataSet(DataSet):
if not this_trait.symbol:
this_trait.symbol = "N/A"
- #XZ, 12/08/2008: description
- #XZ, 06/05/2009: Rob asked to add probe target description
- description_string = unicode(str(this_trait.description).strip(codecs.BOM_UTF8), 'utf-8')
- target_string = unicode(str(this_trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
+ # XZ, 12/08/2008: description
+ # XZ, 06/05/2009: Rob asked to add probe target description
+ description_string = str(
+ str(this_trait.description).strip(codecs.BOM_UTF8), 'utf-8')
+ target_string = str(
+ str(this_trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
if len(description_string) > 1 and description_string != 'None':
description_display = description_string
else:
description_display = this_trait.symbol
- if (len(description_display) > 1 and description_display != 'N/A' and
- len(target_string) > 1 and target_string != 'None'):
+ if (len(description_display) > 1 and description_display != 'N/A'
+ and len(target_string) > 1 and target_string != 'None'):
description_display = description_display + '; ' + target_string.strip()
# Save it for the jinja2 template
this_trait.description_display = description_display
if this_trait.chr and this_trait.mb:
- this_trait.location_repr = 'Chr%s: %.6f' % (this_trait.chr, float(this_trait.mb))
+ this_trait.location_repr = 'Chr%s: %.6f' % (
+ this_trait.chr, float(this_trait.mb))
- #Get mean expression value
+ # Get mean expression value
query = (
- """select ProbeSetXRef.mean from ProbeSetXRef, ProbeSet
+ """select ProbeSetXRef.mean from ProbeSetXRef, ProbeSet
where ProbeSetXRef.ProbeSetFreezeId = %s and
ProbeSet.Id = ProbeSetXRef.ProbeSetId and
ProbeSet.Name = '%s'
""" % (escape(str(this_trait.dataset.id)),
escape(this_trait.name)))
-
- #logger.debug("query is:", pf(query))
- logger.sql(query)
result = g.db.execute(query).fetchone()
mean = result[0] if result else 0
@@ -1093,11 +1136,11 @@ class MrnaAssayDataSet(DataSet):
if mean:
this_trait.mean = "%2.3f" % mean
- #LRS and its location
+ # LRS and its location
this_trait.LRS_score_repr = 'N/A'
this_trait.LRS_location_repr = 'N/A'
- #Max LRS and its Locus location
+ # Max LRS and its Locus location
if this_trait.lrs and this_trait.locus:
query = """
select Geno.Chr, Geno.Mb from Geno, Species
@@ -1105,24 +1148,28 @@ class MrnaAssayDataSet(DataSet):
Geno.Name = '{}' and
Geno.SpeciesId = Species.Id
""".format(species, this_trait.locus)
- logger.sql(query)
result = g.db.execute(query).fetchone()
if result:
lrs_chr, lrs_mb = result
this_trait.LRS_score_repr = '%3.1f' % this_trait.lrs
- this_trait.LRS_location_repr = 'Chr%s: %.6f' % (lrs_chr, float(lrs_mb))
+ this_trait.LRS_location_repr = 'Chr%s: %.6f' % (
+ lrs_chr, float(lrs_mb))
return trait_list
def retrieve_sample_data(self, trait):
query = """
SELECT
- Strain.Name, ProbeSetData.value, ProbeSetSE.error, ProbeSetData.Id, Strain.Name2
+ Strain.Name, ProbeSetData.value, ProbeSetSE.error, NStrain.count, Strain.Name2
FROM
- (ProbeSetData, ProbeSetFreeze, Strain, ProbeSet, ProbeSetXRef)
+ (ProbeSetData, ProbeSetFreeze,
+ Strain, ProbeSet, ProbeSetXRef)
left join ProbeSetSE on
(ProbeSetSE.DataId = ProbeSetData.Id AND ProbeSetSE.StrainId = ProbeSetData.StrainId)
+ left join NStrain on
+ (NStrain.DataId = ProbeSetData.Id AND
+ NStrain.StrainId = ProbeSetData.StrainId)
WHERE
ProbeSet.Name = '%s' AND ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
@@ -1132,9 +1179,7 @@ class MrnaAssayDataSet(DataSet):
Order BY
Strain.Name
""" % (escape(trait), escape(self.name))
- logger.sql(query)
results = g.db.execute(query).fetchall()
- #logger.debug("RETRIEVED RESULTS HERE:", results)
return results
def retrieve_genes(self, column_name):
@@ -1144,7 +1189,6 @@ class MrnaAssayDataSet(DataSet):
where ProbeSetXRef.ProbeSetFreezeId = %s and
ProbeSetXRef.ProbeSetId=ProbeSet.Id;
""" % (column_name, escape(str(self.id)))
- logger.sql(query)
results = g.db.execute(query).fetchall()
return dict(results)
@@ -1173,48 +1217,21 @@ class TempDataSet(DataSet):
self.shortname = 'Temp'
- @staticmethod
- def handle_pca(desc):
- if 'PCA' in desc:
- # Todo: Modernize below lines
- desc = desc[desc.rindex(':')+1:].strip()
- else:
- desc = desc[:desc.index('entered')].strip()
- return desc
-
- def get_desc(self):
- query = 'SELECT description FROM Temp WHERE Name=%s' % self.name
- logger.sql(query)
- g.db.execute(query)
- desc = g.db.fetchone()[0]
- desc = self.handle_pca(desc)
- return desc
-
- def retrieve_sample_data(self, trait):
- query = """
- SELECT
- Strain.Name, TempData.value, TempData.SE, TempData.NStrain, TempData.Id
- FROM
- TempData, Temp, Strain
- WHERE
- TempData.StrainId = Strain.Id AND
- TempData.Id = Temp.DataId AND
- Temp.name = '%s'
- Order BY
- Strain.Name
- """ % escape(trait.name)
-
- logger.sql(query)
- results = g.db.execute(query).fetchall()
-
-
def geno_mrna_confidentiality(ob):
dataset_table = ob.type + "Freeze"
- #logger.debug("dataset_table [%s]: %s" % (type(dataset_table), dataset_table))
query = '''SELECT Id, Name, FullName, confidentiality,
- AuthorisedUsers FROM %s WHERE Name = "%s"''' % (dataset_table,ob.name)
- logger.sql(query)
+ AuthorisedUsers FROM %s WHERE Name = "%s"''' % (dataset_table, ob.name)
+ result = g.db.execute(query)
+
+ (dataset_id,
+ name,
+ full_name,
+ confidential,
+ authorized_users) = result.fetchall()[0]
+
+ if confidential:
+ return True
result = g.db.execute(query)
(dataset_id,
diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py
index 6fec5dcd..8f8e2b0a 100644
--- a/wqflask/base/mrna_assay_tissue_data.py
+++ b/wqflask/base/mrna_assay_tissue_data.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import collections
from flask import g
@@ -7,14 +5,15 @@ from flask import g
from utility import db_tools
from utility import Bunch
-from MySQLdb import escape_string as escape
+from utility.db_tools import escape
+from gn3.db_utils import database_connector
-from pprint import pformat as pf
from utility.logger import getLogger
-logger = getLogger(__name__ )
+logger = getLogger(__name__)
+
-class MrnaAssayTissueData(object):
+class MrnaAssayTissueData:
def __init__(self, gene_symbols=None):
self.gene_symbols = gene_symbols
@@ -23,7 +22,7 @@ class MrnaAssayTissueData(object):
self.data = collections.defaultdict(Bunch)
- query = '''select t.Symbol, t.GeneId, t.DataId, t.Chr, t.Mb, t.description, t.Probe_Target_Description
+ query = '''select t.Symbol, t.GeneId, t.DataId, t.Chr, t.Mb, t.description, t.Probe_Target_Description
from (
select Symbol, max(Mean) as maxmean
from TissueProbeSetXRef
@@ -34,29 +33,31 @@ class MrnaAssayTissueData(object):
# Due to the limit size of TissueProbeSetFreezeId table in DB,
# performance of inner join is acceptable.MrnaAssayTissueData(gene_symbols=symbol_list)
if len(gene_symbols) == 0:
- query += '''Symbol!='' and Symbol Is Not Null group by Symbol)
+ query += '''Symbol!='' and Symbol Is Not Null group by Symbol)
as x inner join TissueProbeSetXRef as t on t.Symbol = x.Symbol
and t.Mean = x.maxmean;
'''
else:
in_clause = db_tools.create_in_clause(gene_symbols)
- #ZS: This was in the query, not sure why: http://docs.python.org/2/library/string.html?highlight=lower#string.lower
+ # ZS: This was in the query, not sure why: http://docs.python.org/2/library/string.html?highlight=lower#string.lower
query += ''' Symbol in {} group by Symbol)
as x inner join TissueProbeSetXRef as t on t.Symbol = x.Symbol
and t.Mean = x.maxmean;
'''.format(in_clause)
- results = g.db.execute(query).fetchall()
- lower_symbols = []
+ # lower_symbols = []
+ lower_symbols = {}
for gene_symbol in gene_symbols:
+ # lower_symbols[gene_symbol.lower()] = True
if gene_symbol != None:
- lower_symbols.append(gene_symbol.lower())
-
+ lower_symbols[gene_symbol.lower()] = True
+ results = list(g.db.execute(query).fetchall())
for result in results:
symbol = result[0]
- if symbol.lower() in lower_symbols:
+ if symbol is not None and lower_symbols.get(symbol.lower()):
+
symbol = symbol.lower()
self.data[symbol].gene_id = result.GeneId
@@ -67,16 +68,16 @@ class MrnaAssayTissueData(object):
self.data[symbol].probe_target_description = result.Probe_Target_Description
###########################################################################
- #Input: cursor, symbolList (list), dataIdDict(Dict)
- #output: symbolValuepairDict (dictionary):one dictionary of Symbol and Value Pair,
+ # Input: cursor, symbolList (list), dataIdDict(Dict)
+ # output: symbolValuepairDict (dictionary):one dictionary of Symbol and Value Pair,
# key is symbol, value is one list of expression values of one probeSet;
- #function: get one dictionary whose key is gene symbol and value is tissue expression data (list type).
- #Attention! All keys are lower case!
+ # function: get one dictionary whose key is gene symbol and value is tissue expression data (list type).
+ # Attention! All keys are lower case!
###########################################################################
def get_symbol_values_pairs(self):
id_list = [self.data[symbol].data_id for symbol in self.data]
-
+
symbol_values_dict = {}
if len(id_list) > 0:
@@ -85,11 +86,13 @@ class MrnaAssayTissueData(object):
WHERE TissueProbeSetData.Id IN {} and
TissueProbeSetXRef.DataId = TissueProbeSetData.Id""".format(db_tools.create_in_clause(id_list))
+
results = g.db.execute(query).fetchall()
for result in results:
if result.Symbol.lower() not in symbol_values_dict:
symbol_values_dict[result.Symbol.lower()] = [result.value]
else:
- symbol_values_dict[result.Symbol.lower()].append(result.value)
+ symbol_values_dict[result.Symbol.lower()].append(
+ result.value)
- return symbol_values_dict \ No newline at end of file
+ return symbol_values_dict
diff --git a/wqflask/base/species.py b/wqflask/base/species.py
index 6d99af65..f303aabb 100644
--- a/wqflask/base/species.py
+++ b/wqflask/base/species.py
@@ -1,62 +1,66 @@
-from __future__ import absolute_import, print_function, division
+from collections import OrderedDict
+from dataclasses import dataclass
+from dataclasses import InitVar
+from typing import Optional, Dict
+from flask import g
-import collections
-from flask import Flask, g
+@dataclass
+class TheSpecies:
+ """Data related to species."""
+ dataset: Optional[Dict] = None
+ species_name: Optional[str] = None
-#from MySQLdb import escape_string as escape
-
-from utility import Bunch
-
-from pprint import pformat as pf
-
-from utility.logger import getLogger
-logger = getLogger(__name__ )
-
-class TheSpecies(object):
- def __init__(self, dataset=None, species_name=None):
- if species_name != None:
- self.name = species_name
+ def __post_init__(self):
+ if self.species_name is not None:
+ self.name = self.species_name
self.chromosomes = Chromosomes(species=self.name)
else:
- self.dataset = dataset
self.chromosomes = Chromosomes(dataset=self.dataset)
-class IndChromosome(object):
- def __init__(self, name, length):
- self.name = name
- self.length = length
+
+@dataclass
+class IndChromosome:
+ """Data related to IndChromosome"""
+ name: str
+ length: int
@property
def mb_length(self):
- """Chromosome length in megabases"""
+ """Chromosome length in mega-bases"""
return self.length / 1000000
-class Chromosomes(object):
- def __init__(self, dataset=None, species=None):
- self.chromosomes = collections.OrderedDict()
- if species != None:
- query = """
- Select
- Chr_Length.Name, Chr_Length.OrderId, Length from Chr_Length, Species
- where
- Chr_Length.SpeciesId = Species.SpeciesId AND
- Species.Name = '%s'
- Order by OrderId
- """ % species.capitalize()
- else:
+
+@dataclass
+class Chromosomes:
+ """Data related to a chromosome"""
+ dataset: InitVar[Dict] = None
+ species: Optional[str] = None
+
+ def __post_init__(self, dataset):
+ if self.species is None:
self.dataset = dataset
- query = """
- Select
- Chr_Length.Name, Chr_Length.OrderId, Length from Chr_Length, InbredSet
- where
- Chr_Length.SpeciesId = InbredSet.SpeciesId AND
- InbredSet.Name = '%s'
- Order by OrderId
- """ % self.dataset.group.name
- logger.sql(query)
+ @property
+ def chromosomes(self):
+ """Lazily fetch the chromosomes"""
+ chromosomes = OrderedDict()
+ if self.species is not None:
+ query = (
+ "SELECT Chr_Length.Name, Chr_Length.OrderId, Length "
+ "FROM Chr_Length, Species WHERE "
+ "Chr_Length.SpeciesId = Species.SpeciesId AND "
+ "Species.Name = "
+ "'%s' ORDER BY OrderId" % self.species.capitalize())
+ else:
+ query = (
+ "SELECT Chr_Length.Name, Chr_Length.OrderId, "
+ "Length FROM Chr_Length, InbredSet WHERE "
+ "Chr_Length.SpeciesId = InbredSet.SpeciesId AND "
+ "InbredSet.Name = "
+ "'%s' ORDER BY OrderId" % self.dataset.group.name)
results = g.db.execute(query).fetchall()
-
for item in results:
- self.chromosomes[item.OrderId] = IndChromosome(item.Name, item.Length) \ No newline at end of file
+ chromosomes[item.OrderId] = IndChromosome(
+ item.Name, item.Length)
+ return chromosomes
diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py
index e454c593..f0749858 100644
--- a/wqflask/base/trait.py
+++ b/wqflask/base/trait.py
@@ -1,33 +1,59 @@
-from __future__ import absolute_import, division, print_function
-
-import string
-import resource
-import codecs
import requests
-
-import redis
-Redis = redis.StrictRedis()
+import simplejson as json
+from wqflask import app
from base import webqtlConfig
from base.webqtlCaseData import webqtlCaseData
from base.data_set import create_dataset
-from db import webqtlDatabaseFunction
-from utility import webqtlUtil
from utility import hmac
-from utility.tools import GN2_BASE_URL
+from utility.authentication_tools import check_resource_availability
+from utility.tools import GN2_BASE_URL, GN_PROXY_URL
+from utility.redis_tools import get_redis_conn, get_resource_id
-from wqflask import app
-
-import simplejson as json
-from MySQLdb import escape_string as escape
-from pprint import pformat as pf
+from utility.db_tools import escape
-from flask import Flask, g, request, url_for
+from flask import g, request, url_for
from utility.logger import getLogger
-logger = getLogger(__name__ )
-class GeneralTrait(object):
+logger = getLogger(__name__)
+
+Redis = get_redis_conn()
+
+
+def create_trait(**kw):
+ assert bool(kw.get('dataset')) != bool(
+ kw.get('dataset_name')), "Needs dataset ob. or name"
+
+ assert bool(kw.get('name')), "Needs trait name"
+
+ if bool(kw.get('dataset')):
+ dataset = kw.get('dataset')
+ else:
+ if kw.get('dataset_name') != "Temp":
+ dataset = create_dataset(kw.get('dataset_name'))
+ else:
+ dataset = create_dataset("Temp", group_name=kw.get('group_name'))
+
+ if dataset.type == 'Publish':
+ permissions = check_resource_availability(
+ dataset, kw.get('name'))
+ else:
+ permissions = check_resource_availability(dataset)
+
+ if permissions['data'] != "no-access":
+ the_trait = GeneralTrait(**kw)
+ if the_trait.dataset.type != "Temp":
+ the_trait = retrieve_trait_info(
+ the_trait,
+ the_trait.dataset,
+ get_qtl_info=kw.get('get_qtl_info'))
+ return the_trait
+ else:
+ return None
+
+
+class GeneralTrait:
"""
Trait class defines a trait in webqtl, can be either Microarray,
Published phenotype, genotype, or user input trait
@@ -36,12 +62,17 @@ class GeneralTrait(object):
def __init__(self, get_qtl_info=False, get_sample_info=True, **kw):
# xor assertion
- assert bool(kw.get('dataset')) != bool(kw.get('dataset_name')), "Needs dataset ob. or name";
- self.name = kw.get('name') # Trait ID, ProbeSet ID, Published ID, etc.
+ assert bool(kw.get('dataset')) != bool(
+ kw.get('dataset_name')), "Needs dataset ob. or name"
+ # Trait ID, ProbeSet ID, Published ID, etc.
+ self.name = kw.get('name')
if kw.get('dataset_name'):
if kw.get('dataset_name') == "Temp":
temp_group = self.name.split("_")[2]
- self.dataset = create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = temp_group)
+ self.dataset = create_dataset(
+ dataset_name="Temp",
+ dataset_type="Temp",
+ group_name=temp_group)
else:
self.dataset = create_dataset(kw.get('dataset_name'))
else:
@@ -49,8 +80,10 @@ class GeneralTrait(object):
self.cellid = kw.get('cellid')
self.identification = kw.get('identification', 'un-named trait')
self.haveinfo = kw.get('haveinfo', False)
- self.sequence = kw.get('sequence') # Blat sequence, available for ProbeSet
+ # Blat sequence, available for ProbeSet
+ self.sequence = kw.get('sequence')
self.data = kw.get('data', {})
+ self.view = True
# Sets defaults
self.locus = None
@@ -74,11 +107,10 @@ class GeneralTrait(object):
elif len(name2) == 3:
self.dataset, self.name, self.cellid = name2
- # Todo: These two lines are necessary most of the time, but perhaps not all of the time
- # So we could add a simple if statement to short-circuit this if necessary
- if self.dataset.type != "Temp":
- self = retrieve_trait_info(self, self.dataset, get_qtl_info=get_qtl_info)
- if get_sample_info != False:
+ # Todo: These two lines are necessary most of the time, but
+ # perhaps not all of the time So we could add a simple if
+ # statement to short-circuit this if necessary
+ if get_sample_info is not False:
self = retrieve_sample_data(self, self.dataset)
def export_informative(self, include_variance=0):
@@ -91,18 +123,18 @@ class GeneralTrait(object):
vals = []
the_vars = []
sample_aliases = []
- for sample_name, sample_data in self.data.items():
- if sample_data.value != None:
- if not include_variance or sample_data.variance != None:
+ for sample_name, sample_data in list(self.data.items()):
+ if sample_data.value is not None:
+ if not include_variance or sample_data.variance is not None:
samples.append(sample_name)
vals.append(sample_data.value)
the_vars.append(sample_data.variance)
sample_aliases.append(sample_data.name2)
- return samples, vals, the_vars, sample_aliases
+ return samples, vals, the_vars, sample_aliases
@property
def description_fmt(self):
- '''Return a text formated description'''
+ """Return a text formated description"""
if self.dataset.type == 'ProbeSet':
if self.description:
formatted = self.description
@@ -117,31 +149,38 @@ class GeneralTrait(object):
formatted = self.post_publication_description
else:
formatted = "Not available"
+ if isinstance(formatted, bytes):
+ formatted = formatted.decode("utf-8")
return formatted
@property
def alias_fmt(self):
- '''Return a text formatted alias'''
+ """Return a text formatted alias"""
alias = 'Not available'
- if self.alias:
- alias = string.replace(self.alias, ";", " ")
- alias = string.join(string.split(alias), ", ")
+ if getattr(self, "alias", None):
+ alias = self.alias.replace(";", " ")
+ alias = ", ".join(alias.split())
return alias
@property
def wikidata_alias_fmt(self):
- '''Return a text formatted alias'''
+ """Return a text formatted alias"""
alias = 'Not available'
if self.symbol:
- human_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.upper())
- mouse_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.capitalize())
- other_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.lower())
+ human_response = requests.get(
+ GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.upper())
+ mouse_response = requests.get(
+ GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.capitalize())
+ other_response = requests.get(
+ GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.lower())
if human_response and mouse_response and other_response:
- alias_list = json.loads(human_response.content) + json.loads(mouse_response.content) + json.loads(other_response.content)
+ alias_list = json.loads(human_response.content) + json.loads(
+ mouse_response.content) + \
+ json.loads(other_response.content)
filtered_aliases = []
seen = set()
@@ -155,33 +194,34 @@ class GeneralTrait(object):
return alias
-
@property
def location_fmt(self):
- '''Return a text formatted location
+ """Return a text formatted location
- While we're at it we set self.location in case we need it later (do we?)
+ While we're at it we set self.location in case we need it
+ later (do we?)
- '''
+ """
if self.chr and self.mb:
- self.location = 'Chr %s @ %s Mb' % (self.chr,self.mb)
+ self.location = 'Chr %s @ %s Mb' % (self.chr, self.mb)
elif self.chr:
self.location = 'Chr %s @ Unknown position' % (self.chr)
else:
self.location = 'Not available'
fmt = self.location
- ##XZ: deal with direction
+ # XZ: deal with direction
if self.strand_probe == '+':
fmt += (' on the plus strand ')
elif self.strand_probe == '-':
fmt += (' on the minus strand ')
return fmt
-
+
+
def retrieve_sample_data(trait, dataset, samplelist=None):
- if samplelist == None:
+ if samplelist is None:
samplelist = []
if dataset.type == "Temp":
@@ -197,55 +237,71 @@ def retrieve_sample_data(trait, dataset, samplelist=None):
all_samples_ordered = dataset.group.all_samples_ordered()
for i, item in enumerate(results):
try:
- trait.data[all_samples_ordered[i]] = webqtlCaseData(all_samples_ordered[i], float(item))
+ trait.data[all_samples_ordered[i]] = webqtlCaseData(
+ all_samples_ordered[i], float(item))
except:
pass
else:
for item in results:
name, value, variance, num_cases, name2 = item
if not samplelist or (samplelist and name in samplelist):
- trait.data[name] = webqtlCaseData(*item) #name, value, variance, num_cases)
+ # name, value, variance, num_cases)
+ trait.data[name] = webqtlCaseData(*item)
return trait
+
@app.route("/trait/get_sample_data")
def get_sample_data():
params = request.args
trait = params['trait']
dataset = params['dataset']
- trait_ob = GeneralTrait(name=trait, dataset_name=dataset)
-
- trait_dict = {}
- trait_dict['name'] = trait
- trait_dict['db'] = dataset
- trait_dict['type'] = trait_ob.dataset.type
- trait_dict['group'] = trait_ob.dataset.group.name
- trait_dict['tissue'] = trait_ob.dataset.tissue
- trait_dict['species'] = trait_ob.dataset.group.species
- trait_dict['url'] = url_for('show_trait_page', trait_id = trait, dataset = dataset)
- trait_dict['description'] = trait_ob.description_display
- if trait_ob.dataset.type == "ProbeSet":
- trait_dict['symbol'] = trait_ob.symbol
- trait_dict['location'] = trait_ob.location_repr
- elif trait_ob.dataset.type == "Publish":
- if trait_ob.pubmed_id:
- trait_dict['pubmed_link'] = trait_ob.pubmed_link
- trait_dict['pubmed_text'] = trait_ob.pubmed_text
-
- return json.dumps([trait_dict, {key: value.value for key, value in trait_ob.data.iteritems() }])
-
-def jsonable(trait):
+ trait_ob = create_trait(name=trait, dataset_name=dataset)
+ if trait_ob:
+ trait_dict = {}
+ trait_dict['name'] = trait
+ trait_dict['db'] = dataset
+ trait_dict['type'] = trait_ob.dataset.type
+ trait_dict['group'] = trait_ob.dataset.group.name
+ trait_dict['tissue'] = trait_ob.dataset.tissue
+ trait_dict['species'] = trait_ob.dataset.group.species
+ trait_dict['url'] = url_for(
+ 'show_trait_page', trait_id=trait, dataset=dataset)
+ if trait_ob.dataset.type == "ProbeSet":
+ trait_dict['symbol'] = trait_ob.symbol
+ trait_dict['location'] = trait_ob.location_repr
+ trait_dict['description'] = trait_ob.description_display
+ elif trait_ob.dataset.type == "Publish":
+ trait_dict['description'] = trait_ob.description_display
+ if trait_ob.pubmed_id:
+ trait_dict['pubmed_link'] = trait_ob.pubmed_link
+ trait_dict['pubmed_text'] = trait_ob.pubmed_text
+ else:
+ trait_dict['location'] = trait_ob.location_repr
+
+ return json.dumps([trait_dict, {key: value.value for
+ key, value in list(
+ trait_ob.data.items())}])
+ else:
+ return None
+
+
+def jsonable(trait, dataset=None):
"""Return a dict suitable for using as json
Actual turning into json doesn't happen here though"""
- dataset = create_dataset(dataset_name = trait.dataset.name, dataset_type = trait.dataset.type, group_name = trait.dataset.group.name)
-
+ if not dataset:
+ dataset = create_dataset(dataset_name=trait.dataset.name,
+ dataset_type=trait.dataset.type,
+ group_name=trait.dataset.group.name)
+
if dataset.type == "ProbeSet":
return dict(name=trait.name,
+ view=trait.view,
symbol=trait.symbol,
dataset=dataset.name,
- dataset_name = dataset.shortname,
+ dataset_name=dataset.shortname,
description=trait.description_display,
mean=trait.mean,
location=trait.location_repr,
@@ -256,181 +312,142 @@ def jsonable(trait):
elif dataset.type == "Publish":
if trait.pubmed_id:
return dict(name=trait.name,
+ view=trait.view,
dataset=dataset.name,
- dataset_name = dataset.shortname,
+ dataset_name=dataset.shortname,
description=trait.description_display,
abbreviation=trait.abbreviation,
authors=trait.authors,
+ pubmed_id=trait.pubmed_id,
pubmed_text=trait.pubmed_text,
pubmed_link=trait.pubmed_link,
+ mean=trait.mean,
lrs_score=trait.LRS_score_repr,
lrs_location=trait.LRS_location_repr,
additive=trait.additive
)
else:
return dict(name=trait.name,
+ view=trait.view,
dataset=dataset.name,
- dataset_name = dataset.shortname,
+ dataset_name=dataset.shortname,
description=trait.description_display,
abbreviation=trait.abbreviation,
authors=trait.authors,
pubmed_text=trait.pubmed_text,
+ mean=trait.mean,
lrs_score=trait.LRS_score_repr,
lrs_location=trait.LRS_location_repr,
additive=trait.additive
)
elif dataset.type == "Geno":
return dict(name=trait.name,
+ view=trait.view,
dataset=dataset.name,
- dataset_name = dataset.shortname,
+ dataset_name=dataset.shortname,
location=trait.location_repr
)
+ elif dataset.name == "Temp":
+ return dict(name=trait.name,
+ view=trait.view,
+ dataset="Temp",
+ dataset_name="Temp")
else:
return dict()
-def jsonable_table_row(trait, dataset_name, index):
- """Return a list suitable for json and intended to be displayed in a table
-
- Actual turning into json doesn't happen here though"""
-
- dataset = create_dataset(dataset_name)
-
- if dataset.type == "ProbeSet":
- if trait.mean == "":
- mean = "N/A"
- else:
- mean = "%.3f" % round(float(trait.mean), 2)
- if trait.additive == "":
- additive = "N/A"
- else:
- additive = "%.3f" % round(float(trait.additive), 2)
- return ['<input type="checkbox" name="searchResult" class="checkbox trait_checkbox" value="' + hmac.data_hmac('{}:{}'.format(str(trait.name), dataset.name)) + '">',
- index,
- '<a href="/show_trait?trait_id='+str(trait.name)+'&dataset='+dataset.name+'">'+str(trait.name)+'</a>',
- trait.symbol,
- trait.description_display,
- trait.location_repr,
- mean,
- trait.LRS_score_repr,
- trait.LRS_location_repr,
- additive]
- elif dataset.type == "Publish":
- if trait.additive == "":
- additive = "N/A"
- else:
- additive = "%.2f" % round(float(trait.additive), 2)
- if trait.pubmed_id:
- return ['<input type="checkbox" name="searchResult" class="checkbox trait_checkbox" value="' + hmac.data_hmac('{}:{}'.format(str(trait.name), dataset.name)) + '">',
- index,
- '<a href="/show_trait?trait_id='+str(trait.name)+'&dataset='+dataset.name+'">'+str(trait.name)+'</a>',
- trait.description_display,
- trait.authors,
- '<a href="' + trait.pubmed_link + '">' + trait.pubmed_text + '</href>',
- trait.LRS_score_repr,
- trait.LRS_location_repr,
- additive]
- else:
- return ['<input type="checkbox" name="searchResult" class="checkbox trait_checkbox" value="' + hmac.data_hmac('{}:{}'.format(str(trait.name), dataset.name)) + '">',
- index,
- '<a href="/show_trait?trait_id='+str(trait.name)+'&dataset='+dataset.name+'">'+str(trait.name)+'</a>',
- trait.description_display,
- trait.authors,
- trait.pubmed_text,
- trait.LRS_score_repr,
- trait.LRS_location_repr,
- additive]
- elif dataset.type == "Geno":
- return ['<input type="checkbox" name="searchResult" class="checkbox trait_checkbox" value="' + hmac.data_hmac('{}:{}'.format(str(trait.name), dataset.name)) + '">',
- index,
- '<a href="/show_trait?trait_id='+str(trait.name)+'&dataset='+dataset.name+'">'+str(trait.name)+'</a>',
- trait.location_repr]
- else:
- return dict()
def retrieve_trait_info(trait, dataset, get_qtl_info=False):
assert dataset, "Dataset doesn't exist"
-
+
+ resource_id = get_resource_id(dataset, trait.name)
if dataset.type == 'Publish':
- query = """
- SELECT
- PublishXRef.Id, InbredSet.InbredSetCode, Publication.PubMed_ID,
- Phenotype.Pre_publication_description, Phenotype.Post_publication_description, Phenotype.Original_description,
- Phenotype.Pre_publication_abbreviation, Phenotype.Post_publication_abbreviation, PublishXRef.mean,
- Phenotype.Lab_code, Phenotype.Submitter, Phenotype.Owner, Phenotype.Authorized_Users,
- Publication.Authors, Publication.Title, Publication.Abstract,
- Publication.Journal, Publication.Volume, Publication.Pages,
- Publication.Month, Publication.Year, PublishXRef.Sequence,
- Phenotype.Units, PublishXRef.comments
- FROM
- PublishXRef, Publication, Phenotype, PublishFreeze, InbredSet
- WHERE
- PublishXRef.Id = %s AND
- Phenotype.Id = PublishXRef.PhenotypeId AND
- Publication.Id = PublishXRef.PublicationId AND
- PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
- PublishXRef.InbredSetId = InbredSet.Id AND
- PublishFreeze.Id = %s
- """ % (trait.name, dataset.id)
+ the_url = GN_PROXY_URL + "run-action?resource={}&user={}&branch=data&action=view".format(
+ resource_id, g.user_session.user_id)
+ else:
+ the_url = GN_PROXY_URL + "run-action?resource={}&user={}&branch=data&action=view&trait={}".format(
+ resource_id, g.user_session.user_id, trait.name)
- logger.sql(query)
- trait_info = g.db.execute(query).fetchone()
-
-
- #XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name
- #XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms.
- elif dataset.type == 'ProbeSet':
- display_fields_string = ', ProbeSet.'.join(dataset.display_fields)
- display_fields_string = 'ProbeSet.' + display_fields_string
- query = """
- SELECT %s
- FROM ProbeSet, ProbeSetFreeze, ProbeSetXRef
- WHERE
- ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
- ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
- ProbeSetFreeze.Name = '%s' AND
- ProbeSet.Name = '%s'
- """ % (escape(display_fields_string),
- escape(dataset.name),
- escape(str(trait.name)))
- logger.sql(query)
- trait_info = g.db.execute(query).fetchone()
- #XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name
- # to avoid the problem of same marker name from different species.
- elif dataset.type == 'Geno':
- display_fields_string = string.join(dataset.display_fields,',Geno.')
- display_fields_string = 'Geno.' + display_fields_string
- query = """
- SELECT %s
- FROM Geno, GenoFreeze, GenoXRef
- WHERE
- GenoXRef.GenoFreezeId = GenoFreeze.Id AND
- GenoXRef.GenoId = Geno.Id AND
- GenoFreeze.Name = '%s' AND
- Geno.Name = '%s'
- """ % (escape(display_fields_string),
- escape(dataset.name),
- escape(trait.name))
- logger.sql(query)
- trait_info = g.db.execute(query).fetchone()
- else: #Temp type
- query = """SELECT %s FROM %s WHERE Name = %s"""
- logger.sql(query)
- trait_info = g.db.execute(query,
- (string.join(dataset.display_fields,','),
- dataset.type, trait.name)).fetchone()
+ try:
+ response = requests.get(the_url).content
+ trait_info = json.loads(response)
+ except: # ZS: I'm assuming the trait is viewable if the try fails for some reason; it should never reach this point unless the user has privileges, since that's dealt with in create_trait
+ if dataset.type == 'Publish':
+ query = """
+ SELECT
+ PublishXRef.Id, InbredSet.InbredSetCode, Publication.PubMed_ID,
+ CAST(Phenotype.Pre_publication_description AS BINARY),
+ CAST(Phenotype.Post_publication_description AS BINARY),
+ CAST(Phenotype.Original_description AS BINARY),
+ CAST(Phenotype.Pre_publication_abbreviation AS BINARY),
+ CAST(Phenotype.Post_publication_abbreviation AS BINARY), PublishXRef.mean,
+ Phenotype.Lab_code, Phenotype.Submitter, Phenotype.Owner, Phenotype.Authorized_Users,
+ CAST(Publication.Authors AS BINARY), CAST(Publication.Title AS BINARY), CAST(Publication.Abstract AS BINARY),
+ CAST(Publication.Journal AS BINARY), Publication.Volume, Publication.Pages,
+ Publication.Month, Publication.Year, PublishXRef.Sequence,
+ Phenotype.Units, PublishXRef.comments
+ FROM
+ PublishXRef, Publication, Phenotype, PublishFreeze, InbredSet
+ WHERE
+ PublishXRef.Id = %s AND
+ Phenotype.Id = PublishXRef.PhenotypeId AND
+ Publication.Id = PublishXRef.PublicationId AND
+ PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
+ PublishXRef.InbredSetId = InbredSet.Id AND
+ PublishFreeze.Id = %s
+ """ % (trait.name, dataset.id)
+
+ logger.sql(query)
+ trait_info = g.db.execute(query).fetchone()
+
+ # XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name
+ # XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms.
+ elif dataset.type == 'ProbeSet':
+ display_fields_string = ', ProbeSet.'.join(dataset.display_fields)
+ display_fields_string = 'ProbeSet.' + display_fields_string
+ query = """
+ SELECT %s
+ FROM ProbeSet, ProbeSetFreeze, ProbeSetXRef
+ WHERE
+ ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
+ ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
+ ProbeSetFreeze.Name = '%s' AND
+ ProbeSet.Name = '%s'
+ """ % (escape(display_fields_string),
+ escape(dataset.name),
+ escape(str(trait.name)))
+ logger.sql(query)
+ trait_info = g.db.execute(query).fetchone()
+ # XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name
+ # to avoid the problem of same marker name from different species.
+ elif dataset.type == 'Geno':
+ display_fields_string = ',Geno.'.join(dataset.display_fields)
+ display_fields_string = 'Geno.' + display_fields_string
+ query = """
+ SELECT %s
+ FROM Geno, GenoFreeze, GenoXRef
+ WHERE
+ GenoXRef.GenoFreezeId = GenoFreeze.Id AND
+ GenoXRef.GenoId = Geno.Id AND
+ GenoFreeze.Name = '%s' AND
+ Geno.Name = '%s'
+ """ % (escape(display_fields_string),
+ escape(dataset.name),
+ escape(trait.name))
+ logger.sql(query)
+ trait_info = g.db.execute(query).fetchone()
+ else: # Temp type
+ query = """SELECT %s FROM %s WHERE Name = %s"""
+ logger.sql(query)
+ trait_info = g.db.execute(query,
+ ','.join(dataset.display_fields),
+ dataset.type, trait.name).fetchone()
if trait_info:
trait.haveinfo = True
-
- #XZ: assign SQL query result to trait attributes.
for i, field in enumerate(dataset.display_fields):
holder = trait_info[i]
- # if isinstance(trait_info[i], basestring):
- # logger.debug("HOLDER:", holder)
- # logger.debug("HOLDER2:", holder.decode(encoding='latin1'))
- # holder = unicode(trait_info[i], "utf-8", "ignore")
- if isinstance(trait_info[i], basestring):
- holder = holder.encode('latin1')
+ if isinstance(holder, bytes):
+ holder = holder.decode("utf-8", errors="ignore")
setattr(trait, field, holder)
if dataset.type == 'Publish':
@@ -443,29 +460,18 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
description = trait.post_publication_description
- #If the dataset is confidential and the user has access to confidential
- #phenotype traits, then display the pre-publication description instead
- #of the post-publication description
- if trait.confidential:
+ # If the dataset is confidential and the user has access to confidential
+ # phenotype traits, then display the pre-publication description instead
+ # of the post-publication description
+ trait.description_display = "N/A"
+ if not trait.pubmed_id:
trait.abbreviation = trait.pre_publication_abbreviation
- trait.description_display = trait.pre_publication_description
-
- #if not webqtlUtil.hasAccessToConfidentialPhenotypeTrait(
- # privilege=self.dataset.privilege,
- # userName=self.dataset.userName,
- # authorized_users=self.authorized_users):
- #
- # description = self.pre_publication_description
+ if trait.pre_publication_description:
+ trait.description_display = trait.pre_publication_description
else:
trait.abbreviation = trait.post_publication_abbreviation
if description:
trait.description_display = description.strip()
- else:
- trait.description_display = ""
-
- trait.abbreviation = unicode(str(trait.abbreviation).strip(codecs.BOM_UTF8), 'utf-8', errors="replace")
- trait.description_display = unicode(str(trait.description_display).strip(codecs.BOM_UTF8), 'utf-8', errors="replace")
- trait.authors = unicode(str(trait.authors).strip(codecs.BOM_UTF8), 'utf-8', errors="replace")
if not trait.year.isdigit():
trait.pubmed_text = "N/A"
@@ -476,16 +482,17 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
trait.pubmed_link = webqtlConfig.PUBMEDLINK_URL % trait.pubmed_id
if dataset.type == 'ProbeSet' and dataset.group:
- description_string = unicode(str(trait.description).strip(codecs.BOM_UTF8), 'utf-8')
- target_string = unicode(str(trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
+ description_string = trait.description
+ target_string = trait.probe_target_description
- if len(description_string) > 1 and description_string != 'None':
+ if str(description_string or "") != "" and description_string != 'None':
description_display = description_string
else:
description_display = trait.symbol
- if (len(description_display) > 1 and description_display != 'N/A' and
- len(target_string) > 1 and target_string != 'None'):
+ if (str(description_display or "") != ""
+ and description_display != 'N/A'
+ and str(target_string or "") != "" and target_string != 'None'):
description_display = description_display + '; ' + target_string.strip()
# Save it for the jinja2 template
@@ -493,15 +500,17 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
trait.location_repr = 'N/A'
if trait.chr and trait.mb:
- trait.location_repr = 'Chr%s: %.6f' % (trait.chr, float(trait.mb))
+ trait.location_repr = 'Chr%s: %.6f' % (
+ trait.chr, float(trait.mb))
elif dataset.type == "Geno":
trait.location_repr = 'N/A'
if trait.chr and trait.mb:
- trait.location_repr = 'Chr%s: %.6f' % (trait.chr, float(trait.mb))
+ trait.location_repr = 'Chr%s: %.6f' % (
+ trait.chr, float(trait.mb))
if get_qtl_info:
- #LRS and its location
+ # LRS and its location
trait.LRS_score_repr = "N/A"
trait.LRS_location_repr = "N/A"
trait.locus = trait.locus_chr = trait.locus_mb = trait.lrs = trait.pvalue = trait.additive = ""
@@ -571,12 +580,12 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
trait.locus = trait.locus_chr = trait.locus_mb = trait.additive = ""
else:
trait.locus = trait.lrs = trait.additive = ""
-
- if (dataset.type == 'Publish' or dataset.type == "ProbeSet") and trait.locus_chr != "" and trait.locus_mb != "":
- trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (trait.locus_chr, float(trait.locus_mb))
- if trait.lrs != "":
+ if (dataset.type == 'Publish' or dataset.type == "ProbeSet") and str(trait.locus_chr or "") != "" and str(trait.locus_mb or "") != "":
+ trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (
+ trait.locus_chr, float(trait.locus_mb))
+ if str(trait.lrs or "") != "":
trait.LRS_score_repr = LRS_score_repr = '%3.1f' % trait.lrs
else:
- raise KeyError, `trait.name`+' information is not found in the database.'
-
- return trait \ No newline at end of file
+ raise KeyError(repr(trait.name)
+ + ' information is not found in the database.')
+ return trait
diff --git a/wqflask/base/webqtlCaseData.py b/wqflask/base/webqtlCaseData.py
index d8487f01..25b6cb8a 100644
--- a/wqflask/base/webqtlCaseData.py
+++ b/wqflask/base/webqtlCaseData.py
@@ -19,8 +19,7 @@
# This module is used by GeneNetwork project (www.genenetwork.org)
#
# Created by GeneNetwork Core Team 2010/08/10
-#
-# Last updated by GeneNetwork Core Team 2010/10/20
+
from utility.logger import getLogger
logger = getLogger(__name__)
@@ -29,58 +28,57 @@ import utility.tools
utility.tools.show_settings()
-class webqtlCaseData(object):
+
+class webqtlCaseData:
"""one case data in one trait"""
def __init__(self, name, value=None, variance=None, num_cases=None, name2=None):
self.name = name
- self.name2 = name2 # Other name (for traits like BXD65a)
+ # Other name (for traits like BXD65a)
+ self.name2 = name2
self.value = value # Trait Value
self.variance = variance # Trait Variance
self.num_cases = num_cases # Number of individuals/cases
self.extra_attributes = None
- self.this_id = None # Set a sane default (can't be just "id" cause that's a reserved word)
+ # Set a sane default (can't be just "id" cause that's a reserved word)
+ self.this_id = None
self.outlier = None # Not set to True/False until later
def __repr__(self):
- str = "<webqtlCaseData> "
- if self.value != None:
- str += "value=%2.3f" % self.value
- if self.variance != None:
- str += " variance=%2.3f" % self.variance
+ case_data_string = "<webqtlCaseData> "
+ if self.value is not None:
+ case_data_string += "value=%2.3f" % self.value
+ if self.variance is not None:
+ case_data_string += " variance=%2.3f" % self.variance
if self.num_cases:
- str += " ndata=%s" % self.num_cases
+ case_data_string += " ndata=%s" % self.num_cases
if self.name:
- str += " name=%s" % self.name
+ case_data_string += " name=%s" % self.name
if self.name2:
- str += " name2=%s" % self.name2
- return str
+ case_data_string += " name2=%s" % self.name2
+ return case_data_string
@property
def class_outlier(self):
"""Template helper"""
if self.outlier:
return "outlier"
- else:
- return ""
+ return ""
@property
def display_value(self):
- if self.value != None:
+ if self.value is not None:
return "%2.3f" % self.value
- else:
- return "x"
+ return "x"
@property
def display_variance(self):
- if self.variance != None:
+ if self.variance is not None:
return "%2.3f" % self.variance
- else:
- return "x"
+ return "x"
@property
def display_num_cases(self):
- if self.num_cases != None:
+ if self.num_cases is not None:
return "%s" % self.num_cases
- else:
- return "x"
+ return "x"
diff --git a/wqflask/base/webqtlConfig.py b/wqflask/base/webqtlConfig.py
index 55407123..39947158 100644
--- a/wqflask/base/webqtlConfig.py
+++ b/wqflask/base/webqtlConfig.py
@@ -1,4 +1,4 @@
-#########################################'
+# '
# Environment Variables - public
#
# Note: much of this needs to handled by the settings/environment
@@ -10,31 +10,35 @@
from utility.tools import valid_path, mk_dir, assert_dir, assert_writable_dir, flat_files, TEMPDIR
-#Debug Level
-#1 for debug, mod python will reload import each time
+# Debug Level
+# 1 for debug, mod python will reload import each time
DEBUG = 1
-#USER privilege
-USERDICT = {'guest':1,'user':2, 'admin':3, 'root':4}
+# USER privilege
+USERDICT = {'guest': 1, 'user': 2, 'admin': 3, 'root': 4}
-#minimum number of informative strains
+# Set privileges
+SUPER_PRIVILEGES = {'data': 'edit', 'metadata': 'edit', 'admin': 'edit-admins'}
+DEFAULT_PRIVILEGES = {'data': 'view', 'metadata': 'view', 'admin': 'not-admin'}
+
+# minimum number of informative strains
KMININFORMATIVE = 5
-#Daily download limit from one IP
+# Daily download limit from one IP
DAILYMAXIMUM = 1000
-#maximum LRS value
+# maximum LRS value
MAXLRS = 460.0
-#MINIMUM Database public value
+# MINIMUM Database public value
PUBLICTHRESH = 0
-#EXTERNAL LINK ADDRESSES
+# EXTERNAL LINK ADDRESSES
PUBMEDLINK_URL = "http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Retrieve&db=PubMed&list_uids=%s&dopt=Abstract"
UCSC_BLAT = 'http://genome.ucsc.edu/cgi-bin/hgBlat?org=%s&db=%s&type=0&sort=0&output=0&userSeq=%s'
UTHSC_BLAT = 'http://ucscbrowser.genenetwork.org/cgi-bin/hgBlat?org=%s&db=%s&type=0&sort=0&output=0&userSeq=%s'
UTHSC_BLAT2 = 'http://ucscbrowserbeta.genenetwork.org/cgi-bin/hgBlat?org=%s&db=%s&type=0&sort=0&output=0&userSeq=%s'
-GENOMEBROWSER_URL="https://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=%s"
+GENOMEBROWSER_URL = "https://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=%s"
NCBI_LOCUSID = "http://www.ncbi.nlm.nih.gov/gene?cmd=Retrieve&dopt=Graphics&list_uids=%s"
GENBANK_ID = "http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=Nucleotide&cmd=search&doptcmdl=DocSum&term=%s"
OMIM_ID = "http://www.ncbi.nlm.nih.gov/omim/%s"
@@ -52,26 +56,28 @@ GEMMA_URL = "http://www.chibi.ubc.ca/Gemma/gene/showGene.html?ncbiid=%s"
ABA_URL = "http://mouse.brain-map.org/search/show?search_type=gene&search_term=%s"
EBIGWAS_URL = "https://www.ebi.ac.uk/gwas/search?query=%s"
WIKI_PI_URL = "http://severus.dbmi.pitt.edu/wiki-pi/index.php/search?q=%s"
-ENSEMBLETRANSCRIPT_URL="http://useast.ensembl.org/Mus_musculus/Transcript/Idhistory?t=%s"
+ENSEMBLETRANSCRIPT_URL = "http://useast.ensembl.org/Mus_musculus/Transcript/Idhistory?t=%s"
DBSNP = 'http://ensembl.org/Mus_musculus/Variation/Population?v=%s'
PROTEIN_ATLAS_URL = "http://www.proteinatlas.org/search/%s"
OPEN_TARGETS_URL = "https://genetics.opentargets.org/gene/%s"
UNIPROT_URL = "https://www.uniprot.org/uniprot/%s"
RGD_URL = "https://rgd.mcw.edu/rgdweb/elasticResults.html?term=%s&category=Gene&species=%s"
PHENOGEN_URL = "https://phenogen.org/gene.jsp?speciesCB=Rn&auto=Y&geneTxt=%s&genomeVer=rn6&section=geneEQTL"
+RRID_MOUSE_URL = "https://www.jax.org/strain/%s"
+RRID_RAT_URL = "https://rgd.mcw.edu/rgdweb/report/strain/main.html?id=%s"
# Temporary storage (note that this TMPDIR can be set as an
# environment variable - use utility.tools.TEMPDIR when you
# want to reach this base dir
assert_writable_dir(TEMPDIR)
-TMPDIR = mk_dir(TEMPDIR+'/gn2/')
+TMPDIR = mk_dir(TEMPDIR + '/gn2/')
assert_writable_dir(TMPDIR)
-CACHEDIR = mk_dir(TMPDIR+'/cache/')
+CACHEDIR = mk_dir(TMPDIR + '/cache/')
# We can no longer write into the git tree:
-GENERATED_IMAGE_DIR = mk_dir(TMPDIR+'generated/')
-GENERATED_TEXT_DIR = mk_dir(TMPDIR+'generated_text/')
+GENERATED_IMAGE_DIR = mk_dir(TMPDIR + 'generated/')
+GENERATED_TEXT_DIR = mk_dir(TMPDIR + 'generated_text/')
# Make sure we have permissions to access these
assert_writable_dir(CACHEDIR)
@@ -79,12 +85,12 @@ assert_writable_dir(GENERATED_IMAGE_DIR)
assert_writable_dir(GENERATED_TEXT_DIR)
# Flat file directories
-GENODIR = flat_files('genotype')+'/'
+GENODIR = flat_files('genotype') + '/'
assert_dir(GENODIR)
-assert_dir(GENODIR+'bimbam') # for gemma
+# assert_dir(GENODIR+'bimbam') # for gemma
# JSON genotypes are OBSOLETE
-JSON_GENODIR = flat_files('genotype/json')+'/'
+JSON_GENODIR = flat_files('genotype/json') + '/'
if not valid_path(JSON_GENODIR):
# fall back on old location (move the dir, FIXME)
JSON_GENODIR = flat_files('json')
@@ -92,4 +98,4 @@ if not valid_path(JSON_GENODIR):
# Are we using the following...?
PORTADDR = "http://50.16.251.170"
INFOPAGEHREF = '/dbdoc/%s.html'
-CGIDIR = '/webqtl/' #XZ: The variable name 'CGIDIR' should be changed to 'PYTHONDIR'
+CGIDIR = '/webqtl/' # XZ: The variable name 'CGIDIR' should be changed to 'PYTHONDIR'