aboutsummaryrefslogtreecommitdiff
path: root/gn3/base/data_set.py
diff options
context:
space:
mode:
authorAlexander Kabui2021-03-13 13:04:33 +0300
committerGitHub2021-03-13 13:04:33 +0300
commit236ca06dc4c84baecb7b090b8724db997a5d988a (patch)
tree7fce724ae007dacfe3cf0f7511756b6064026ea3 /gn3/base/data_set.py
parent7f9a293929be021eb73aec35defe254351557dcb (diff)
downloadgenenetwork3-236ca06dc4c84baecb7b090b8724db997a5d988a.tar.gz
Correlation api (#2)
* add file for correlation api * register initial correlation api * add correlation package * add function for getting page data * delete loading page api * modify code for correlation * add tests folder for correlations * fix error in correlation api * add tests for correlation * add tests for correlation loading data * add module for correlation computations * modify api to return json when computing correlation * add tests for computing correlation * modify code for loading correlation data * modify tests for correlation computation * test loading correlation data using api endpoint * add tests for asserting error in creating Correlation object * add do correlation method * add dummy tests for do_correlation method * delete unused modules * add tests for creating trait and dataset * add intergration test for correlation api * add tests for correlation api * edit docorrelation method * modify integration tests for correlation api * modify tests for show_corr_results * add create dataset function * pep8 formatting and fix return value for api * add more test data for doing correlation * modify tests for correlation * pep8 formatting * add getting formatted corr type method * import json library add process samples method for correlation * fix issue with sample_vals key_error * create utility module for correlation * refactor endpoint for /corr_compute * add test and mocks for compute_correlation function * add compute correlation function and pep8 formatting * move get genofile samplelist to utility module * refactor code for CorrelationResults object * pep8 formatting for module * remove CorrelationResults from Api * add base package initialize data_set module with create_dataset,redis and Dataset_Getter * set dataset_structure if redis is empty * add callable for DatsetType * add set_dataset_key method If name is not in the object's dataset dictionary * add Dataset object and MrnaAssayDataSet * add db_tools * add mysql client * add DatasetGroup object * add species module * get mapping method * import helper functions and new dataset * add connection to db before request * add helper functions * add logger module * add get_group_samplelists module * add logger for debug * add code for adding sample_data * pep8 formatting * Add chunks module * add correlation helper module * add get_sample_r_and_p_values method add get_header_fields function * add generate corr json method * add function to retrieve_trait_info * remove comments and clean up code in show_corr_results * remove comments and clean up code for data_set module * pep8 formatting for helper_functions module * pep8 formatting for trait module * add module for species * add Temp Dataset Object * add Phenotype Dataset * add Genotype Dataset * add rettrieve sample_sample_data method * add webqtlUtil module * add do lit correlation for all traits * add webqtlCaseData:Settings not ported * return the_trait for create trait method * add correlation_test json data * add tests fore show corr results * add dictfier package * add tests for show_corr_results * add assertion for trait_id * refactor code for show_corr_results * add test file for compute_corr intergration tests * add scipy dependency * refactor show_corr_results object add do lit correlation for trait_list * add hmac module * add bunch module:Dictionary using object notation * add correlation functions * add rpy2 dependency * add hmac module * add MrnaAssayTissueData object and get_symbol_values_pairs function * add config module * add get json_results method * pep8 formatting remove comments * add config file * add db package * refactor correlatio compuatation module * add do tissue correlation for trait list * add do lit correlation for all traits * add do tissue correlation for all traits * add do_bicor for bicor method * raise error for when initital start vars is None * add support for both form and json data when for correlation input * remove print statement and pep8 formatting * add default settings file * add tools module for locate_ignore_error * refactor code remove comments for trait module * Add new test data for computing correlation * pep8 formatting and use pickle * refactor function for filtering form/json data * remove unused imports * remove mock functions in correlation_utility module * refactor tests for compute correlation and pep8 formatting * add tests for show_correlation results * modify tests for show_corr_results * add json files for tests * pep8 formatting for show_corr_results * Todo:Lint base files * pylint for intergration tests * add test module for test_corr_helpers * Add test chunk module * lint utility package * refactoring and pep8 formatting * implement simple metric for correlation * add hmac utility file * add correlation prefix * fix merge conflict * minor fixes for endpoints * import:python-scipy,python-sqlalchemy from guix * add python mysqlclient * remove pkg-resources from requirements * add python-rpy3 from guix * refactor code for species module * pep8 formatting and refactor code * add tests for genereating correlation results * lint correlation functions * fix failing tests for show_corr_results * add new correlation test data fix errors * fix issues related to getting group samplelists * refactor intergration tests for correlation * add todo for refactoring_wanted_inputs * replace custom Attribute setter with SimpleNamespace * comparison of sample r correlation results btwn genenenetwork2 and genenetwork3 * delete AttributeSetter * test request for /api/correlation/compute_correlation took 18.55710196495056 Seconds * refactor tests and show_correlation results * remove unneccessary comments and print statements * edit requirement txt file * api/correlation took 114.29814600944519 Seconds for correlation resullts:20000 - corr-type:lit - corr-method:pearson corr-dataset:corr_dataset:HC_M2_0606_P * capture SQL_URI and GENENETWORK FILES path * pep8 formatting edit && remove print statements * delete filter_input function update test and data for correlation * add docstring for required correlation_input * /api/correlation took 12.905632972717285 Seconds * pearson * lit *dataset:HX_M2_0606_P trait_id :1444666 p_range:(lower->-0.60,uppper->0.74) corr_return_results: 100 * update integration and unittest for correlation * add simple markdown docs for correlation * update docs * add tests and catch for invalid correlation_input * minor fix for api * Remove jupyter from deps * guix.scm: Remove duplicate entry * guix.scm: Add extra action items as comments * Trim requirements.txt file Co-authored-by: BonfaceKilz <me@bonfacemunyoki.com>
Diffstat (limited to 'gn3/base/data_set.py')
-rw-r--r--gn3/base/data_set.py886
1 files changed, 886 insertions, 0 deletions
diff --git a/gn3/base/data_set.py b/gn3/base/data_set.py
new file mode 100644
index 0000000..e61e4eb
--- /dev/null
+++ b/gn3/base/data_set.py
@@ -0,0 +1,886 @@
+
+import json
+import math
+import collections
+import requests
+from redis import Redis
+from flask import g
+from gn3.utility.db_tools import escape
+from gn3.utility.db_tools import mescape
+from gn3.utility.db_tools import create_in_clause
+from gn3.utility.tools import locate_ignore_error
+from gn3.db.calls import fetch1
+from gn3.db.calls import fetchone
+from gn3.db.webqtlDatabaseFunction import retrieve_species
+from gn3.utility import chunks
+
+from gn3.utility import get_group_samplelists
+from gn3.base.species import TheSpecies
+r = Redis()
+
+# should probably move this to its own configuration files
+
+USE_REDIS = True
+
+# todo move to config file
+GN2_BASE_URL = "https://genenetwork.org/"
+
+DS_NAME_MAP = {}
+
+# pylint: disable-all
+#todo file not linted
+# pylint: disable=C0103
+
+
+
+def create_dataset(dataset_name, dataset_type=None, get_samplelist=True, group_name=None):
+
+ if dataset_name == "Temp":
+ dataset_type = "Temp"
+
+ if dataset_type is None:
+ dataset_type = Dataset_Getter(dataset_name)
+ dataset_ob = DS_NAME_MAP[dataset_type]
+ dataset_class = globals()[dataset_ob]
+
+ if dataset_type == "Temp":
+ results = dataset_class(dataset_name, get_samplelist, group_name)
+
+ else:
+ results = dataset_class(dataset_name, get_samplelist)
+
+ return results
+
+
+class DatasetType:
+ def __init__(self, redis_instance):
+ self.redis_instance = redis_instance
+ self.datasets = {}
+
+ data = self.redis_instance.get("dataset_structure")
+ if data:
+ self.datasets = json.loads(data)
+
+ else:
+
+ try:
+
+ data = json.loads(requests.get(
+ GN2_BASE_URL + "/api/v_pre1/gen_dropdown", timeout=5).content)
+
+ # todo:Refactor code below n^4 loop
+
+ for species in data["datasets"]:
+ for group in data["datasets"][species]:
+ for dataset_type in data['datasets'][species][group]:
+ for dataset in data['datasets'][species][group][dataset_type]:
+
+ short_dataset_name = dataset[1]
+ if dataset_type == "Phenotypes":
+ new_type = "Publish"
+
+ elif dataset_type == "Genotypes":
+ new_type = "Geno"
+ else:
+ new_type = "ProbeSet"
+
+ self.datasets[short_dataset_name] = new_type
+
+ except Exception as e:
+ raise e
+
+ self.redis_instance.set(
+ "dataset_structure", json.dumps(self.datasets))
+
+ def set_dataset_key(self, t, name):
+ """If name is not in the object's dataset dictionary, set it, and update
+ dataset_structure in Redis
+
+ args:
+ t: Type of dataset structure which can be: 'mrna_expr', 'pheno',
+ 'other_pheno', 'geno'
+ name: The name of the key to inserted in the datasets dictionary
+
+ """
+
+ sql_query_mapping = {
+ 'mrna_expr': ("""SELECT ProbeSetFreeze.Id FROM """ +
+ """ProbeSetFreeze WHERE ProbeSetFreeze.Name = "{}" """),
+ 'pheno': ("""SELECT InfoFiles.GN_AccesionId """ +
+ """FROM InfoFiles, PublishFreeze, InbredSet """ +
+ """WHERE InbredSet.Name = '{}' AND """ +
+ """PublishFreeze.InbredSetId = InbredSet.Id AND """ +
+ """InfoFiles.InfoPageName = PublishFreeze.Name"""),
+ 'other_pheno': ("""SELECT PublishFreeze.Name """ +
+ """FROM PublishFreeze, InbredSet """ +
+ """WHERE InbredSet.Name = '{}' AND """ +
+ """PublishFreeze.InbredSetId = InbredSet.Id"""),
+ 'geno': ("""SELECT GenoFreeze.Id FROM GenoFreeze WHERE """ +
+ """GenoFreeze.Name = "{}" """)
+ }
+
+ dataset_name_mapping = {
+ "mrna_expr": "ProbeSet",
+ "pheno": "Publish",
+ "other_pheno": "Publish",
+ "geno": "Geno",
+ }
+
+ group_name = name
+ if t in ['pheno', 'other_pheno']:
+ group_name = name.replace("Publish", "")
+
+ results = g.db.execute(
+ sql_query_mapping[t].format(group_name)).fetchone()
+ if results:
+ self.datasets[name] = dataset_name_mapping[t]
+ self.redis_instance.set(
+ "dataset_structure", json.dumps(self.datasets))
+
+ return True
+
+ return None
+
+ def __call__(self, name):
+ if name not in self.datasets:
+ for t in ["mrna_expr", "pheno", "other_pheno", "geno"]:
+
+ if(self.set_dataset_key(t, name)):
+ # This has side-effects, with the end result being a truth-y value
+ break
+
+ return self.datasets.get(name, None)
+
+
+# Do the intensive work at startup one time only
+# could replace the code below
+Dataset_Getter = DatasetType(r)
+
+
+class DatasetGroup:
+ """
+ Each group has multiple datasets; each species has multiple groups.
+
+ For example, Mouse has multiple groups (BXD, BXA, etc), and each group
+ has multiple datasets associated with it.
+
+ """
+
+ def __init__(self, dataset, name=None):
+ """This sets self.group and self.group_id"""
+ if name == None:
+ self.name, self.id, self.genetic_type = fetchone(
+ dataset.query_for_group)
+
+ else:
+ self.name, self.id, self.genetic_type = fetchone(
+ "SELECT InbredSet.Name, InbredSet.Id, InbredSet.GeneticType FROM InbredSet where Name='%s'" % name)
+
+ if self.name == 'BXD300':
+ self.name = "BXD"
+
+ self.f1list = None
+
+ self.parlist = None
+
+ self.get_f1_parent_strains()
+
+ # remove below not used in correlation
+
+ self.mapping_id, self.mapping_names = self.get_mapping_methods()
+
+ self.species = retrieve_species(self.name)
+
+ def get_f1_parent_strains(self):
+ try:
+ # should import ParInfo
+ raise e
+ # NL, 07/27/2010. ParInfo has been moved from webqtlForm.py to webqtlUtil.py;
+ f1, f12, maternal, paternal = webqtlUtil.ParInfo[self.name]
+ except Exception as e:
+ f1 = f12 = maternal = paternal = None
+
+ if f1 and f12:
+ self.f1list = [f1, f12]
+
+ if maternal and paternal:
+ self.parlist = [maternal, paternal]
+
+ def get_mapping_methods(self):
+ mapping_id = g.db.execute(
+ "select MappingMethodId from InbredSet where Name= '%s'" % self.name).fetchone()[0]
+
+ if mapping_id == "1":
+ mapping_names = ["GEMMA", "QTLReaper", "R/qtl"]
+ elif mapping_id == "2":
+ mapping_names = ["GEMMA"]
+
+ elif mapping_id == "3":
+ mapping_names = ["R/qtl"]
+
+ elif mapping_id == "4":
+ mapping_names = ["GEMMA", "PLINK"]
+
+ else:
+ mapping_names = []
+
+ return mapping_id, mapping_names
+
+ def get_samplelist(self):
+ result = None
+ key = "samplelist:v3:" + self.name
+ if USE_REDIS:
+ result = r.get(key)
+
+ if result is not None:
+
+ self.samplelist = json.loads(result)
+
+ else:
+ # logger.debug("Cache not hit")
+ # should enable logger
+ genotype_fn = locate_ignore_error(self.name+".geno", 'genotype')
+ if genotype_fn:
+ self.samplelist = get_group_samplelists.get_samplelist(
+ "geno", genotype_fn)
+
+ else:
+ self.samplelist = None
+
+ if USE_REDIS:
+ r.set(key, json.dumps(self.samplelist))
+ r.expire(key, 60*5)
+
+
+class DataSet:
+ """
+ DataSet class defines a dataset in webqtl, can be either Microarray,
+ Published phenotype, genotype, or user input dataset(temp)
+
+ """
+
+ def __init__(self, name, get_samplelist=True, group_name=None):
+
+ assert name, "Need a name"
+ self.name = name
+ self.id = None
+ self.shortname = None
+ self.fullname = None
+ self.type = None
+ self.data_scale = None # ZS: For example log2
+
+ self.setup()
+
+ if self.type == "Temp": # Need to supply group name as input if temp trait
+ # sets self.group and self.group_id and gets genotype
+ self.group = DatasetGroup(self, name=group_name)
+ else:
+ self.check_confidentiality()
+ self.retrieve_other_names()
+ # sets self.group and self.group_id and gets genotype
+ self.group = DatasetGroup(self)
+ self.accession_id = self.get_accession_id()
+ if get_samplelist == True:
+ self.group.get_samplelist()
+ self.species = TheSpecies(self)
+
+ def get_desc(self):
+ """Gets overridden later, at least for Temp...used by trait's get_given_name"""
+ return None
+
+ # Delete this eventually
+ @property
+ def riset():
+ Weve_Renamed_This_As_Group
+
+ def get_accession_id(self):
+ if self.type == "Publish":
+ results = g.db.execute("""select InfoFiles.GN_AccesionId from InfoFiles, PublishFreeze, InbredSet where
+ InbredSet.Name = %s and
+ PublishFreeze.InbredSetId = InbredSet.Id and
+ InfoFiles.InfoPageName = PublishFreeze.Name and
+ PublishFreeze.public > 0 and
+ PublishFreeze.confidentiality < 1 order by
+ PublishFreeze.CreateTime desc""", (self.group.name)).fetchone()
+ elif self.type == "Geno":
+ results = g.db.execute("""select InfoFiles.GN_AccesionId from InfoFiles, GenoFreeze, InbredSet where
+ InbredSet.Name = %s and
+ GenoFreeze.InbredSetId = InbredSet.Id and
+ InfoFiles.InfoPageName = GenoFreeze.ShortName and
+ GenoFreeze.public > 0 and
+ GenoFreeze.confidentiality < 1 order by
+ GenoFreeze.CreateTime desc""", (self.group.name)).fetchone()
+ else:
+ results = None
+
+ if results != None:
+ return str(results[0])
+ else:
+ return "None"
+
+ def retrieve_other_names(self):
+ """This method fetches the the dataset names in search_result.
+
+ If the data set name parameter is not found in the 'Name' field of
+ the data set table, check if it is actually the FullName or
+ ShortName instead.
+
+ This is not meant to retrieve the data set info if no name at
+ all is passed.
+
+ """
+
+ try:
+ if self.type == "ProbeSet":
+ query_args = tuple(escape(x) for x in (
+ self.name,
+ self.name,
+ self.name))
+
+ self.id, self.name, self.fullname, self.shortname, self.data_scale, self.tissue = fetch1("""
+ SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName, ProbeSetFreeze.ShortName, ProbeSetFreeze.DataScale, Tissue.Name
+ FROM ProbeSetFreeze, ProbeFreeze, Tissue
+ WHERE ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id
+ AND ProbeFreeze.TissueId = Tissue.Id
+ AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFreeze.ShortName = '%s')
+ """ % (query_args), "/dataset/"+self.name+".json",
+ lambda r: (r["id"], r["name"], r["full_name"],
+ r["short_name"], r["data_scale"], r["tissue"])
+ )
+ else:
+ query_args = tuple(escape(x) for x in (
+ (self.type + "Freeze"),
+ self.name,
+ self.name,
+ self.name))
+
+ self.tissue = "N/A"
+ self.id, self.name, self.fullname, self.shortname = fetchone("""
+ SELECT Id, Name, FullName, ShortName
+ FROM %s
+ WHERE (Name = '%s' OR FullName = '%s' OR ShortName = '%s')
+ """ % (query_args))
+
+ except TypeError as e:
+ logger.debug(
+ "Dataset {} is not yet available in GeneNetwork.".format(self.name))
+ pass
+
+ def get_trait_data(self, sample_list=None):
+ if sample_list:
+ self.samplelist = sample_list
+ else:
+ self.samplelist = self.group.samplelist
+
+ if self.group.parlist != None and self.group.f1list != None:
+ if (self.group.parlist + self.group.f1list) in self.samplelist:
+ self.samplelist += self.group.parlist + self.group.f1list
+
+ query = """
+ SELECT Strain.Name, Strain.Id FROM Strain, Species
+ WHERE Strain.Name IN {}
+ and Strain.SpeciesId=Species.Id
+ and Species.name = '{}'
+ """.format(create_in_clause(self.samplelist), *mescape(self.group.species))
+ # logger.sql(query)
+ results = dict(g.db.execute(query).fetchall())
+ sample_ids = [results[item] for item in self.samplelist]
+
+ # MySQL limits the number of tables that can be used in a join to 61,
+ # so we break the sample ids into smaller chunks
+ # Postgres doesn't have that limit, so we can get rid of this after we transition
+ chunk_size = 50
+ number_chunks = int(math.ceil(len(sample_ids) / chunk_size))
+ trait_sample_data = []
+ for sample_ids_step in chunks.divide_into_chunks(sample_ids, number_chunks):
+ if self.type == "Publish":
+ dataset_type = "Phenotype"
+ else:
+ dataset_type = self.type
+ temp = ['T%s.value' % item for item in sample_ids_step]
+ if self.type == "Publish":
+ query = "SELECT {}XRef.Id,".format(escape(self.type))
+ else:
+ query = "SELECT {}.Name,".format(escape(dataset_type))
+ data_start_pos = 1
+ query += ', '.join(temp)
+ query += ' FROM ({}, {}XRef, {}Freeze) '.format(*mescape(dataset_type,
+ self.type,
+ self.type))
+
+ for item in sample_ids_step:
+ query += """
+ left join {}Data as T{} on T{}.Id = {}XRef.DataId
+ and T{}.StrainId={}\n
+ """.format(*mescape(self.type, item, item, self.type, item, item))
+
+ if self.type == "Publish":
+ query += """
+ WHERE {}XRef.InbredSetId = {}Freeze.InbredSetId
+ and {}Freeze.Name = '{}'
+ and {}.Id = {}XRef.{}Id
+ order by {}.Id
+ """.format(*mescape(self.type, self.type, self.type, self.name,
+ dataset_type, self.type, dataset_type, dataset_type))
+ else:
+ query += """
+ WHERE {}XRef.{}FreezeId = {}Freeze.Id
+ and {}Freeze.Name = '{}'
+ and {}.Id = {}XRef.{}Id
+ order by {}.Id
+ """.format(*mescape(self.type, self.type, self.type, self.type,
+ self.name, dataset_type, self.type, self.type, dataset_type))
+
+ results = g.db.execute(query).fetchall()
+ trait_sample_data.append(results)
+
+ trait_count = len(trait_sample_data[0])
+ self.trait_data = collections.defaultdict(list)
+
+ # put all of the separate data together into a dictionary where the keys are
+ # trait names and values are lists of sample values
+ for trait_counter in range(trait_count):
+ trait_name = trait_sample_data[0][trait_counter][0]
+ for chunk_counter in range(int(number_chunks)):
+ self.trait_data[trait_name] += (
+ trait_sample_data[chunk_counter][trait_counter][data_start_pos:])
+
+
+class MrnaAssayDataSet(DataSet):
+ '''
+ An mRNA Assay is a quantitative assessment (assay) associated with an mRNA trait
+
+ This used to be called ProbeSet, but that term only refers specifically to the Affymetrix
+ platform and is far too specific.
+
+ '''
+ DS_NAME_MAP['ProbeSet'] = 'MrnaAssayDataSet'
+
+ def setup(self):
+ # Fields in the database table
+ self.search_fields = ['Name',
+ 'Description',
+ 'Probe_Target_Description',
+ 'Symbol',
+ 'Alias',
+ 'GenbankId',
+ 'UniGeneId',
+ 'RefSeq_TranscriptId']
+
+ # Find out what display_fields is
+ self.display_fields = ['name', 'symbol',
+ 'description', 'probe_target_description',
+ 'chr', 'mb',
+ 'alias', 'geneid',
+ 'genbankid', 'unigeneid',
+ 'omim', 'refseq_transcriptid',
+ 'blatseq', 'targetseq',
+ 'chipid', 'comments',
+ 'strand_probe', 'strand_gene',
+ 'proteinid', 'uniprotid',
+ 'probe_set_target_region',
+ 'probe_set_specificity',
+ 'probe_set_blat_score',
+ 'probe_set_blat_mb_start',
+ 'probe_set_blat_mb_end',
+ 'probe_set_strand',
+ 'probe_set_note_by_rw',
+ 'flag']
+
+ # Fields displayed in the search results table header
+ self.header_fields = ['Index',
+ 'Record',
+ 'Symbol',
+ 'Description',
+ 'Location',
+ 'Mean',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
+
+ # Todo: Obsolete or rename this field
+ self.type = 'ProbeSet'
+
+ self.query_for_group = '''
+ SELECT
+ InbredSet.Name, InbredSet.Id, InbredSet.GeneticType
+ FROM
+ InbredSet, ProbeSetFreeze, ProbeFreeze
+ WHERE
+ ProbeFreeze.InbredSetId = InbredSet.Id AND
+ ProbeFreeze.Id = ProbeSetFreeze.ProbeFreezeId AND
+ ProbeSetFreeze.Name = "%s"
+ ''' % escape(self.name)
+
+ def check_confidentiality(self):
+ return geno_mrna_confidentiality(self)
+
+ def get_trait_info(self, trait_list=None, species=''):
+
+ # Note: setting trait_list to [] is probably not a great idea.
+ if not trait_list:
+ trait_list = []
+
+ for this_trait in trait_list:
+
+ if not this_trait.haveinfo:
+ this_trait.retrieveInfo(QTL=1)
+
+ if not this_trait.symbol:
+ this_trait.symbol = "N/A"
+
+ # XZ, 12/08/2008: description
+ # XZ, 06/05/2009: Rob asked to add probe target description
+ description_string = str(
+ str(this_trait.description).strip(codecs.BOM_UTF8), 'utf-8')
+ target_string = str(
+ str(this_trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
+
+ if len(description_string) > 1 and description_string != 'None':
+ description_display = description_string
+ else:
+ description_display = this_trait.symbol
+
+ if (len(description_display) > 1 and description_display != 'N/A' and
+ len(target_string) > 1 and target_string != 'None'):
+ description_display = description_display + '; ' + target_string.strip()
+
+ # Save it for the jinja2 template
+ this_trait.description_display = description_display
+
+ if this_trait.chr and this_trait.mb:
+ this_trait.location_repr = 'Chr%s: %.6f' % (
+ this_trait.chr, float(this_trait.mb))
+
+ # Get mean expression value
+ query = (
+ """select ProbeSetXRef.mean from ProbeSetXRef, ProbeSet
+ where ProbeSetXRef.ProbeSetFreezeId = %s and
+ ProbeSet.Id = ProbeSetXRef.ProbeSetId and
+ ProbeSet.Name = '%s'
+ """ % (escape(str(this_trait.dataset.id)),
+ escape(this_trait.name)))
+
+ #logger.debug("query is:", pf(query))
+ logger.sql(query)
+ result = g.db.execute(query).fetchone()
+
+ mean = result[0] if result else 0
+
+ if mean:
+ this_trait.mean = "%2.3f" % mean
+
+ # LRS and its location
+ this_trait.LRS_score_repr = 'N/A'
+ this_trait.LRS_location_repr = 'N/A'
+
+ # Max LRS and its Locus location
+ if this_trait.lrs and this_trait.locus:
+ query = """
+ select Geno.Chr, Geno.Mb from Geno, Species
+ where Species.Name = '{}' and
+ Geno.Name = '{}' and
+ Geno.SpeciesId = Species.Id
+ """.format(species, this_trait.locus)
+ logger.sql(query)
+ result = g.db.execute(query).fetchone()
+
+ if result:
+ lrs_chr, lrs_mb = result
+ this_trait.LRS_score_repr = '%3.1f' % this_trait.lrs
+ this_trait.LRS_location_repr = 'Chr%s: %.6f' % (
+ lrs_chr, float(lrs_mb))
+
+ return trait_list
+
+ def retrieve_sample_data(self, trait):
+ query = """
+ SELECT
+ Strain.Name, ProbeSetData.value, ProbeSetSE.error, NStrain.count, Strain.Name2
+ FROM
+ (ProbeSetData, ProbeSetFreeze, Strain, ProbeSet, ProbeSetXRef)
+ left join ProbeSetSE on
+ (ProbeSetSE.DataId = ProbeSetData.Id AND ProbeSetSE.StrainId = ProbeSetData.StrainId)
+ left join NStrain on
+ (NStrain.DataId = ProbeSetData.Id AND
+ NStrain.StrainId = ProbeSetData.StrainId)
+ WHERE
+ ProbeSet.Name = '%s' AND ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
+ ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
+ ProbeSetFreeze.Name = '%s' AND
+ ProbeSetXRef.DataId = ProbeSetData.Id AND
+ ProbeSetData.StrainId = Strain.Id
+ Order BY
+ Strain.Name
+ """ % (escape(trait), escape(self.name))
+ # logger.sql(query)
+ results = g.db.execute(query).fetchall()
+ #logger.debug("RETRIEVED RESULTS HERE:", results)
+ return results
+
+ def retrieve_genes(self, column_name):
+ query = """
+ select ProbeSet.Name, ProbeSet.%s
+ from ProbeSet,ProbeSetXRef
+ where ProbeSetXRef.ProbeSetFreezeId = %s and
+ ProbeSetXRef.ProbeSetId=ProbeSet.Id;
+ """ % (column_name, escape(str(self.id)))
+ # logger.sql(query)
+ results = g.db.execute(query).fetchall()
+
+ return dict(results)
+
+
+class TempDataSet(DataSet):
+ '''Temporary user-generated data set'''
+
+ DS_NAME_MAP['Temp'] = 'TempDataSet'
+
+ def setup(self):
+ self.search_fields = ['name',
+ 'description']
+
+ self.display_fields = ['name',
+ 'description']
+
+ self.header_fields = ['Name',
+ 'Description']
+
+ self.type = 'Temp'
+
+ # Need to double check later how these are used
+ self.id = 1
+ self.fullname = 'Temporary Storage'
+ self.shortname = 'Temp'
+
+
+class PhenotypeDataSet(DataSet):
+ DS_NAME_MAP['Publish'] = 'PhenotypeDataSet'
+
+ def setup(self):
+
+ #logger.debug("IS A PHENOTYPEDATASET")
+
+ # Fields in the database table
+ self.search_fields = ['Phenotype.Post_publication_description',
+ 'Phenotype.Pre_publication_description',
+ 'Phenotype.Pre_publication_abbreviation',
+ 'Phenotype.Post_publication_abbreviation',
+ 'PublishXRef.mean',
+ 'Phenotype.Lab_code',
+ 'Publication.PubMed_ID',
+ 'Publication.Abstract',
+ 'Publication.Title',
+ 'Publication.Authors',
+ 'PublishXRef.Id']
+
+ # Figure out what display_fields is
+ self.display_fields = ['name', 'group_code',
+ 'pubmed_id',
+ 'pre_publication_description',
+ 'post_publication_description',
+ 'original_description',
+ 'pre_publication_abbreviation',
+ 'post_publication_abbreviation',
+ 'mean',
+ 'lab_code',
+ 'submitter', 'owner',
+ 'authorized_users',
+ 'authors', 'title',
+ 'abstract', 'journal',
+ 'volume', 'pages',
+ 'month', 'year',
+ 'sequence', 'units', 'comments']
+
+ # Fields displayed in the search results table header
+ self.header_fields = ['Index',
+ 'Record',
+ 'Description',
+ 'Authors',
+ 'Year',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
+
+ self.type = 'Publish'
+
+ self.query_for_group = '''
+ SELECT
+ InbredSet.Name, InbredSet.Id, InbredSet.GeneticType
+ FROM
+ InbredSet, PublishFreeze
+ WHERE
+ PublishFreeze.InbredSetId = InbredSet.Id AND
+ PublishFreeze.Name = "%s"
+ ''' % escape(self.name)
+
+ def check_confidentiality(self):
+ # (Urgently?) Need to write this
+ pass
+
+ def get_trait_info(self, trait_list, species=''):
+ for this_trait in trait_list:
+
+ if not this_trait.haveinfo:
+ this_trait.retrieve_info(get_qtl_info=True)
+
+ description = this_trait.post_publication_description
+
+ # If the dataset is confidential and the user has access to confidential
+ # phenotype traits, then display the pre-publication description instead
+ # of the post-publication description
+ if this_trait.confidential:
+ this_trait.description_display = ""
+ continue # todo for now, because no authorization features
+
+ if not webqtlUtil.has_access_to_confidentail_phenotype_trait(
+ privilege=self.privilege,
+ userName=self.userName,
+ authorized_users=this_trait.authorized_users):
+
+ description = this_trait.pre_publication_description
+
+ if len(description) > 0:
+ this_trait.description_display = description.strip()
+ else:
+ this_trait.description_display = ""
+
+ if not this_trait.year.isdigit():
+ this_trait.pubmed_text = "N/A"
+ else:
+ this_trait.pubmed_text = this_trait.year
+
+ if this_trait.pubmed_id:
+ this_trait.pubmed_link = webqtlConfig.PUBMEDLINK_URL % this_trait.pubmed_id
+
+ # LRS and its location
+ this_trait.LRS_score_repr = "N/A"
+ this_trait.LRS_location_repr = "N/A"
+
+ if this_trait.lrs:
+ query = """
+ select Geno.Chr, Geno.Mb from Geno, Species
+ where Species.Name = '%s' and
+ Geno.Name = '%s' and
+ Geno.SpeciesId = Species.Id
+ """ % (species, this_trait.locus)
+
+ result = g.db.execute(query).fetchone()
+
+ if result:
+ if result[0] and result[1]:
+ LRS_Chr = result[0]
+ LRS_Mb = result[1]
+
+ this_trait.LRS_score_repr = LRS_score_repr = '%3.1f' % this_trait.lrs
+ this_trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (
+ LRS_Chr, float(LRS_Mb))
+
+ def retrieve_sample_data(self, trait):
+ query = """
+ SELECT
+ Strain.Name, PublishData.value, PublishSE.error, NStrain.count, Strain.Name2
+ FROM
+ (PublishData, Strain, PublishXRef, PublishFreeze)
+ left join PublishSE on
+ (PublishSE.DataId = PublishData.Id AND PublishSE.StrainId = PublishData.StrainId)
+ left join NStrain on
+ (NStrain.DataId = PublishData.Id AND
+ NStrain.StrainId = PublishData.StrainId)
+ WHERE
+ PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
+ PublishData.Id = PublishXRef.DataId AND PublishXRef.Id = %s AND
+ PublishFreeze.Id = %s AND PublishData.StrainId = Strain.Id
+ Order BY
+ Strain.Name
+ """
+
+ results = g.db.execute(query, (trait, self.id)).fetchall()
+ return results
+
+
+class GenotypeDataSet(DataSet):
+ DS_NAME_MAP['Geno'] = 'GenotypeDataSet'
+
+ def setup(self):
+ # Fields in the database table
+ self.search_fields = ['Name',
+ 'Chr']
+
+ # Find out what display_fields is
+ self.display_fields = ['name',
+ 'chr',
+ 'mb',
+ 'source2',
+ 'sequence']
+
+ # Fields displayed in the search results table header
+ self.header_fields = ['Index',
+ 'ID',
+ 'Location']
+
+ # Todo: Obsolete or rename this field
+ self.type = 'Geno'
+
+ self.query_for_group = '''
+ SELECT
+ InbredSet.Name, InbredSet.Id, InbredSet.GeneticType
+ FROM
+ InbredSet, GenoFreeze
+ WHERE
+ GenoFreeze.InbredSetId = InbredSet.Id AND
+ GenoFreeze.Name = "%s"
+ ''' % escape(self.name)
+
+ def check_confidentiality(self):
+ return geno_mrna_confidentiality(self)
+
+ def get_trait_info(self, trait_list, species=None):
+ for this_trait in trait_list:
+ if not this_trait.haveinfo:
+ this_trait.retrieveInfo()
+
+ if this_trait.chr and this_trait.mb:
+ this_trait.location_repr = 'Chr%s: %.6f' % (
+ this_trait.chr, float(this_trait.mb))
+
+ def retrieve_sample_data(self, trait):
+ query = """
+ SELECT
+ Strain.Name, GenoData.value, GenoSE.error, "N/A", Strain.Name2
+ FROM
+ (GenoData, GenoFreeze, Strain, Geno, GenoXRef)
+ left join GenoSE on
+ (GenoSE.DataId = GenoData.Id AND GenoSE.StrainId = GenoData.StrainId)
+ WHERE
+ Geno.SpeciesId = %s AND Geno.Name = %s AND GenoXRef.GenoId = Geno.Id AND
+ GenoXRef.GenoFreezeId = GenoFreeze.Id AND
+ GenoFreeze.Name = %s AND
+ GenoXRef.DataId = GenoData.Id AND
+ GenoData.StrainId = Strain.Id
+ Order BY
+ Strain.Name
+ """
+ results = g.db.execute(query,
+ (webqtlDatabaseFunction.retrieve_species_id(self.group.name),
+ trait, self.name)).fetchall()
+ return results
+
+
+def geno_mrna_confidentiality(ob):
+ dataset_table = ob.type + "Freeze"
+ #logger.debug("dataset_table [%s]: %s" % (type(dataset_table), dataset_table))
+
+ query = '''SELECT Id, Name, FullName, confidentiality,
+ AuthorisedUsers FROM %s WHERE Name = "%s"''' % (dataset_table, ob.name)
+ #
+ result = g.db.execute(query)
+
+ (_dataset_id,
+ _name,
+ _full_name,
+ confidential,
+ _authorized_users) = result.fetchall()[0]
+
+ if confidential:
+ return True