aboutsummaryrefslogtreecommitdiff
path: root/gn3
diff options
context:
space:
mode:
Diffstat (limited to 'gn3')
-rw-r--r--gn3/api/correlation.py2
-rw-r--r--gn3/base/__init__.py0
-rw-r--r--gn3/base/data_set.py882
-rw-r--r--gn3/base/mrna_assay_tissue_data.py94
-rw-r--r--gn3/base/species.py64
-rw-r--r--gn3/base/trait.py366
-rw-r--r--gn3/base/webqtlCaseData.py84
-rw-r--r--gn3/correlation/__init__.py0
-rw-r--r--gn3/correlation/correlation_computations.py32
-rw-r--r--gn3/correlation/correlation_functions.py96
-rw-r--r--gn3/correlation/correlation_utility.py22
-rw-r--r--gn3/correlation/show_corr_results.py735
-rw-r--r--gn3/db/__init__.py0
-rw-r--r--gn3/db/calls.py51
-rw-r--r--gn3/db/webqtlDatabaseFunction.py52
-rw-r--r--gn3/utility/__init__.py0
-rw-r--r--gn3/utility/bunch.py16
-rw-r--r--gn3/utility/chunks.py32
-rw-r--r--gn3/utility/corr_result_helpers.py45
-rw-r--r--gn3/utility/db_tools.py19
-rw-r--r--gn3/utility/get_group_samplelists.py47
-rw-r--r--gn3/utility/helper_functions.py24
-rw-r--r--gn3/utility/hmac.py50
-rw-r--r--gn3/utility/logger.py163
-rw-r--r--gn3/utility/species.py71
-rw-r--r--gn3/utility/tools.py37
-rw-r--r--gn3/utility/webqtlUtil.py66
27 files changed, 1 insertions, 3049 deletions
diff --git a/gn3/api/correlation.py b/gn3/api/correlation.py
index 56b8381..53ea6a7 100644
--- a/gn3/api/correlation.py
+++ b/gn3/api/correlation.py
@@ -60,4 +60,4 @@ def compute_tissue_corr(corr_method="pearson"):
target_tissues_dict_list=target_tissues_dict_list,
corr_method=corr_method)
- return jsonify(results) \ No newline at end of file
+ return jsonify(results)
diff --git a/gn3/base/__init__.py b/gn3/base/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/gn3/base/__init__.py
+++ /dev/null
diff --git a/gn3/base/data_set.py b/gn3/base/data_set.py
deleted file mode 100644
index 01913f2..0000000
--- a/gn3/base/data_set.py
+++ /dev/null
@@ -1,882 +0,0 @@
-
-import json
-import math
-import collections
-import requests
-from redis import Redis
-from flask import g
-from gn3.utility.db_tools import escape
-from gn3.utility.db_tools import mescape
-from gn3.utility.db_tools import create_in_clause
-from gn3.utility.tools import locate_ignore_error
-from gn3.db.calls import fetch1
-from gn3.db.calls import fetchone
-from gn3.db.webqtlDatabaseFunction import retrieve_species
-from gn3.utility import chunks
-
-from gn3.utility import get_group_samplelists
-from gn3.base.species import TheSpecies
-r = Redis()
-
-# should probably move this to its own configuration files
-
-USE_REDIS = True
-
-# todo move to config file
-GN2_BASE_URL = "https://genenetwork.org/"
-
-DS_NAME_MAP = {}
-
-# pylint: disable-all
-#todo file not linted
-# pylint: disable=C0103
-
-
-
-def create_dataset(dataset_name, dataset_type=None, get_samplelist=True, group_name=None):
-
- if dataset_name == "Temp":
- dataset_type = "Temp"
-
- if dataset_type is None:
- dataset_type = Dataset_Getter(dataset_name)
- dataset_ob = DS_NAME_MAP[dataset_type]
- dataset_class = globals()[dataset_ob]
-
- if dataset_type == "Temp":
- results = dataset_class(dataset_name, get_samplelist, group_name)
-
- else:
- results = dataset_class(dataset_name, get_samplelist)
-
- return results
-
-
-class DatasetType:
- def __init__(self, redis_instance):
- self.redis_instance = redis_instance
- self.datasets = {}
-
- data = self.redis_instance.get("dataset_structure")
- if data:
- self.datasets = json.loads(data)
-
- else:
-
- try:
-
- data = json.loads(requests.get(
- GN2_BASE_URL + "/api/v_pre1/gen_dropdown", timeout=5).content)
-
- # todo:Refactor code below n^4 loop
-
- for species in data["datasets"]:
- for group in data["datasets"][species]:
- for dataset_type in data['datasets'][species][group]:
- for dataset in data['datasets'][species][group][dataset_type]:
-
- short_dataset_name = dataset[1]
- if dataset_type == "Phenotypes":
- new_type = "Publish"
-
- elif dataset_type == "Genotypes":
- new_type = "Geno"
- else:
- new_type = "ProbeSet"
-
- self.datasets[short_dataset_name] = new_type
-
- except Exception as e:
- raise e
-
- self.redis_instance.set(
- "dataset_structure", json.dumps(self.datasets))
-
- def set_dataset_key(self, t, name):
- """If name is not in the object's dataset dictionary, set it, and update
- dataset_structure in Redis
-
- args:
- t: Type of dataset structure which can be: 'mrna_expr', 'pheno',
- 'other_pheno', 'geno'
- name: The name of the key to inserted in the datasets dictionary
-
- """
-
- sql_query_mapping = {
- 'mrna_expr': ("""SELECT ProbeSetFreeze.Id FROM """ +
- """ProbeSetFreeze WHERE ProbeSetFreeze.Name = "{}" """),
- 'pheno': ("""SELECT InfoFiles.GN_AccesionId """ +
- """FROM InfoFiles, PublishFreeze, InbredSet """ +
- """WHERE InbredSet.Name = '{}' AND """ +
- """PublishFreeze.InbredSetId = InbredSet.Id AND """ +
- """InfoFiles.InfoPageName = PublishFreeze.Name"""),
- 'other_pheno': ("""SELECT PublishFreeze.Name """ +
- """FROM PublishFreeze, InbredSet """ +
- """WHERE InbredSet.Name = '{}' AND """ +
- """PublishFreeze.InbredSetId = InbredSet.Id"""),
- 'geno': ("""SELECT GenoFreeze.Id FROM GenoFreeze WHERE """ +
- """GenoFreeze.Name = "{}" """)
- }
-
- dataset_name_mapping = {
- "mrna_expr": "ProbeSet",
- "pheno": "Publish",
- "other_pheno": "Publish",
- "geno": "Geno",
- }
-
- group_name = name
- if t in ['pheno', 'other_pheno']:
- group_name = name.replace("Publish", "")
-
- results = g.db.execute(
- sql_query_mapping[t].format(group_name)).fetchone()
- if results:
- self.datasets[name] = dataset_name_mapping[t]
- self.redis_instance.set(
- "dataset_structure", json.dumps(self.datasets))
-
- return True
-
- return None
-
- def __call__(self, name):
- if name not in self.datasets:
- for t in ["mrna_expr", "pheno", "other_pheno", "geno"]:
-
- if(self.set_dataset_key(t, name)):
- # This has side-effects, with the end result being a truth-y value
- break
-
- return self.datasets.get(name, None)
-
-
-# Do the intensive work at startup one time only
-# could replace the code below
-Dataset_Getter = DatasetType(r)
-
-
-class DatasetGroup:
- """
- Each group has multiple datasets; each species has multiple groups.
-
- For example, Mouse has multiple groups (BXD, BXA, etc), and each group
- has multiple datasets associated with it.
-
- """
-
- def __init__(self, dataset, name=None):
- """This sets self.group and self.group_id"""
- if name == None:
- self.name, self.id, self.genetic_type = fetchone(
- dataset.query_for_group)
-
- else:
- self.name, self.id, self.genetic_type = fetchone(
- "SELECT InbredSet.Name, InbredSet.Id, InbredSet.GeneticType FROM InbredSet where Name='%s'" % name)
-
- if self.name == 'BXD300':
- self.name = "BXD"
-
- self.f1list = None
-
- self.parlist = None
-
- self.get_f1_parent_strains()
-
- # remove below not used in correlation
-
- self.mapping_id, self.mapping_names = self.get_mapping_methods()
-
- self.species = retrieve_species(self.name)
-
- def get_f1_parent_strains(self):
- try:
- # should import ParInfo
- raise e
- # NL, 07/27/2010. ParInfo has been moved from webqtlForm.py to webqtlUtil.py;
- f1, f12, maternal, paternal = webqtlUtil.ParInfo[self.name]
- except Exception as e:
- f1 = f12 = maternal = paternal = None
-
- if f1 and f12:
- self.f1list = [f1, f12]
-
- if maternal and paternal:
- self.parlist = [maternal, paternal]
-
- def get_mapping_methods(self):
- mapping_id = g.db.execute(
- "select MappingMethodId from InbredSet where Name= '%s'" % self.name).fetchone()[0]
-
- if mapping_id == "1":
- mapping_names = ["GEMMA", "QTLReaper", "R/qtl"]
- elif mapping_id == "2":
- mapping_names = ["GEMMA"]
-
- elif mapping_id == "3":
- mapping_names = ["R/qtl"]
-
- elif mapping_id == "4":
- mapping_names = ["GEMMA", "PLINK"]
-
- else:
- mapping_names = []
-
- return mapping_id, mapping_names
-
- def get_samplelist(self):
- result = None
- key = "samplelist:v3:" + self.name
- if USE_REDIS:
- result = r.get(key)
-
- if result is not None:
-
- self.samplelist = json.loads(result)
-
- else:
- # logger.debug("Cache not hit")
- # should enable logger
- genotype_fn = locate_ignore_error(self.name+".geno", 'genotype')
- if genotype_fn:
- self.samplelist = get_group_samplelists.get_samplelist(
- "geno", genotype_fn)
-
- else:
- self.samplelist = None
-
- if USE_REDIS:
- r.set(key, json.dumps(self.samplelist))
- r.expire(key, 60*5)
-
-
-class DataSet:
- """
- DataSet class defines a dataset in webqtl, can be either Microarray,
- Published phenotype, genotype, or user input dataset(temp)
-
- """
-
- def __init__(self, name, get_samplelist=True, group_name=None):
-
- assert name, "Need a name"
- self.name = name
- self.id = None
- self.shortname = None
- self.fullname = None
- self.type = None
- self.data_scale = None # ZS: For example log2
-
- self.setup()
-
- if self.type == "Temp": # Need to supply group name as input if temp trait
- # sets self.group and self.group_id and gets genotype
- self.group = DatasetGroup(self, name=group_name)
- else:
- self.check_confidentiality()
- self.retrieve_other_names()
- # sets self.group and self.group_id and gets genotype
- self.group = DatasetGroup(self)
- self.accession_id = self.get_accession_id()
- if get_samplelist == True:
- self.group.get_samplelist()
- self.species = TheSpecies(self)
-
- def get_desc(self):
- """Gets overridden later, at least for Temp...used by trait's get_given_name"""
- return None
-
-
- def get_accession_id(self):
- if self.type == "Publish":
- results = g.db.execute("""select InfoFiles.GN_AccesionId from InfoFiles, PublishFreeze, InbredSet where
- InbredSet.Name = %s and
- PublishFreeze.InbredSetId = InbredSet.Id and
- InfoFiles.InfoPageName = PublishFreeze.Name and
- PublishFreeze.public > 0 and
- PublishFreeze.confidentiality < 1 order by
- PublishFreeze.CreateTime desc""", (self.group.name)).fetchone()
- elif self.type == "Geno":
- results = g.db.execute("""select InfoFiles.GN_AccesionId from InfoFiles, GenoFreeze, InbredSet where
- InbredSet.Name = %s and
- GenoFreeze.InbredSetId = InbredSet.Id and
- InfoFiles.InfoPageName = GenoFreeze.ShortName and
- GenoFreeze.public > 0 and
- GenoFreeze.confidentiality < 1 order by
- GenoFreeze.CreateTime desc""", (self.group.name)).fetchone()
- else:
- results = None
-
- if results != None:
- return str(results[0])
- else:
- return "None"
-
- def retrieve_other_names(self):
- """This method fetches the the dataset names in search_result.
-
- If the data set name parameter is not found in the 'Name' field of
- the data set table, check if it is actually the FullName or
- ShortName instead.
-
- This is not meant to retrieve the data set info if no name at
- all is passed.
-
- """
-
- try:
- if self.type == "ProbeSet":
- query_args = tuple(escape(x) for x in (
- self.name,
- self.name,
- self.name))
-
- self.id, self.name, self.fullname, self.shortname, self.data_scale, self.tissue = fetch1("""
- SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName, ProbeSetFreeze.ShortName, ProbeSetFreeze.DataScale, Tissue.Name
- FROM ProbeSetFreeze, ProbeFreeze, Tissue
- WHERE ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id
- AND ProbeFreeze.TissueId = Tissue.Id
- AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFreeze.ShortName = '%s')
- """ % (query_args), "/dataset/"+self.name+".json",
- lambda r: (r["id"], r["name"], r["full_name"],
- r["short_name"], r["data_scale"], r["tissue"])
- )
- else:
- query_args = tuple(escape(x) for x in (
- (self.type + "Freeze"),
- self.name,
- self.name,
- self.name))
-
- self.tissue = "N/A"
- self.id, self.name, self.fullname, self.shortname = fetchone("""
- SELECT Id, Name, FullName, ShortName
- FROM %s
- WHERE (Name = '%s' OR FullName = '%s' OR ShortName = '%s')
- """ % (query_args))
-
- except TypeError as e:
- logger.debug(
- "Dataset {} is not yet available in GeneNetwork.".format(self.name))
- pass
-
- def get_trait_data(self, sample_list=None):
- if sample_list:
- self.samplelist = sample_list
- else:
- self.samplelist = self.group.samplelist
-
- if self.group.parlist != None and self.group.f1list != None:
- if (self.group.parlist + self.group.f1list) in self.samplelist:
- self.samplelist += self.group.parlist + self.group.f1list
-
- query = """
- SELECT Strain.Name, Strain.Id FROM Strain, Species
- WHERE Strain.Name IN {}
- and Strain.SpeciesId=Species.Id
- and Species.name = '{}'
- """.format(create_in_clause(self.samplelist), *mescape(self.group.species))
- # logger.sql(query)
- results = dict(g.db.execute(query).fetchall())
- sample_ids = [results[item] for item in self.samplelist]
-
- # MySQL limits the number of tables that can be used in a join to 61,
- # so we break the sample ids into smaller chunks
- # Postgres doesn't have that limit, so we can get rid of this after we transition
- chunk_size = 50
- number_chunks = int(math.ceil(len(sample_ids) / chunk_size))
- trait_sample_data = []
- for sample_ids_step in chunks.divide_into_chunks(sample_ids, number_chunks):
- if self.type == "Publish":
- dataset_type = "Phenotype"
- else:
- dataset_type = self.type
- temp = ['T%s.value' % item for item in sample_ids_step]
- if self.type == "Publish":
- query = "SELECT {}XRef.Id,".format(escape(self.type))
- else:
- query = "SELECT {}.Name,".format(escape(dataset_type))
- data_start_pos = 1
- query += ', '.join(temp)
- query += ' FROM ({}, {}XRef, {}Freeze) '.format(*mescape(dataset_type,
- self.type,
- self.type))
-
- for item in sample_ids_step:
- query += """
- left join {}Data as T{} on T{}.Id = {}XRef.DataId
- and T{}.StrainId={}\n
- """.format(*mescape(self.type, item, item, self.type, item, item))
-
- if self.type == "Publish":
- query += """
- WHERE {}XRef.InbredSetId = {}Freeze.InbredSetId
- and {}Freeze.Name = '{}'
- and {}.Id = {}XRef.{}Id
- order by {}.Id
- """.format(*mescape(self.type, self.type, self.type, self.name,
- dataset_type, self.type, dataset_type, dataset_type))
- else:
- query += """
- WHERE {}XRef.{}FreezeId = {}Freeze.Id
- and {}Freeze.Name = '{}'
- and {}.Id = {}XRef.{}Id
- order by {}.Id
- """.format(*mescape(self.type, self.type, self.type, self.type,
- self.name, dataset_type, self.type, self.type, dataset_type))
-
- results = g.db.execute(query).fetchall()
- trait_sample_data.append(results)
-
- trait_count = len(trait_sample_data[0])
- self.trait_data = collections.defaultdict(list)
-
- # put all of the separate data together into a dictionary where the keys are
- # trait names and values are lists of sample values
- for trait_counter in range(trait_count):
- trait_name = trait_sample_data[0][trait_counter][0]
- for chunk_counter in range(int(number_chunks)):
- self.trait_data[trait_name] += (
- trait_sample_data[chunk_counter][trait_counter][data_start_pos:])
-
-
-class MrnaAssayDataSet(DataSet):
- '''
- An mRNA Assay is a quantitative assessment (assay) associated with an mRNA trait
-
- This used to be called ProbeSet, but that term only refers specifically to the Affymetrix
- platform and is far too specific.
-
- '''
- DS_NAME_MAP['ProbeSet'] = 'MrnaAssayDataSet'
-
- def setup(self):
- # Fields in the database table
- self.search_fields = ['Name',
- 'Description',
- 'Probe_Target_Description',
- 'Symbol',
- 'Alias',
- 'GenbankId',
- 'UniGeneId',
- 'RefSeq_TranscriptId']
-
- # Find out what display_fields is
- self.display_fields = ['name', 'symbol',
- 'description', 'probe_target_description',
- 'chr', 'mb',
- 'alias', 'geneid',
- 'genbankid', 'unigeneid',
- 'omim', 'refseq_transcriptid',
- 'blatseq', 'targetseq',
- 'chipid', 'comments',
- 'strand_probe', 'strand_gene',
- 'proteinid', 'uniprotid',
- 'probe_set_target_region',
- 'probe_set_specificity',
- 'probe_set_blat_score',
- 'probe_set_blat_mb_start',
- 'probe_set_blat_mb_end',
- 'probe_set_strand',
- 'probe_set_note_by_rw',
- 'flag']
-
- # Fields displayed in the search results table header
- self.header_fields = ['Index',
- 'Record',
- 'Symbol',
- 'Description',
- 'Location',
- 'Mean',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
-
- # Todo: Obsolete or rename this field
- self.type = 'ProbeSet'
-
- self.query_for_group = '''
- SELECT
- InbredSet.Name, InbredSet.Id, InbredSet.GeneticType
- FROM
- InbredSet, ProbeSetFreeze, ProbeFreeze
- WHERE
- ProbeFreeze.InbredSetId = InbredSet.Id AND
- ProbeFreeze.Id = ProbeSetFreeze.ProbeFreezeId AND
- ProbeSetFreeze.Name = "%s"
- ''' % escape(self.name)
-
- def check_confidentiality(self):
- return geno_mrna_confidentiality(self)
-
- def get_trait_info(self, trait_list=None, species=''):
-
- # Note: setting trait_list to [] is probably not a great idea.
- if not trait_list:
- trait_list = []
-
- for this_trait in trait_list:
-
- if not this_trait.haveinfo:
- this_trait.retrieveInfo(QTL=1)
-
- if not this_trait.symbol:
- this_trait.symbol = "N/A"
-
- # XZ, 12/08/2008: description
- # XZ, 06/05/2009: Rob asked to add probe target description
- description_string = str(
- str(this_trait.description).strip(codecs.BOM_UTF8), 'utf-8')
- target_string = str(
- str(this_trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
-
- if len(description_string) > 1 and description_string != 'None':
- description_display = description_string
- else:
- description_display = this_trait.symbol
-
- if (len(description_display) > 1 and description_display != 'N/A' and
- len(target_string) > 1 and target_string != 'None'):
- description_display = description_display + '; ' + target_string.strip()
-
- # Save it for the jinja2 template
- this_trait.description_display = description_display
-
- if this_trait.chr and this_trait.mb:
- this_trait.location_repr = 'Chr%s: %.6f' % (
- this_trait.chr, float(this_trait.mb))
-
- # Get mean expression value
- query = (
- """select ProbeSetXRef.mean from ProbeSetXRef, ProbeSet
- where ProbeSetXRef.ProbeSetFreezeId = %s and
- ProbeSet.Id = ProbeSetXRef.ProbeSetId and
- ProbeSet.Name = '%s'
- """ % (escape(str(this_trait.dataset.id)),
- escape(this_trait.name)))
-
- #logger.debug("query is:", pf(query))
- logger.sql(query)
- result = g.db.execute(query).fetchone()
-
- mean = result[0] if result else 0
-
- if mean:
- this_trait.mean = "%2.3f" % mean
-
- # LRS and its location
- this_trait.LRS_score_repr = 'N/A'
- this_trait.LRS_location_repr = 'N/A'
-
- # Max LRS and its Locus location
- if this_trait.lrs and this_trait.locus:
- query = """
- select Geno.Chr, Geno.Mb from Geno, Species
- where Species.Name = '{}' and
- Geno.Name = '{}' and
- Geno.SpeciesId = Species.Id
- """.format(species, this_trait.locus)
- logger.sql(query)
- result = g.db.execute(query).fetchone()
-
- if result:
- lrs_chr, lrs_mb = result
- this_trait.LRS_score_repr = '%3.1f' % this_trait.lrs
- this_trait.LRS_location_repr = 'Chr%s: %.6f' % (
- lrs_chr, float(lrs_mb))
-
- return trait_list
-
- def retrieve_sample_data(self, trait):
- query = """
- SELECT
- Strain.Name, ProbeSetData.value, ProbeSetSE.error, NStrain.count, Strain.Name2
- FROM
- (ProbeSetData, ProbeSetFreeze, Strain, ProbeSet, ProbeSetXRef)
- left join ProbeSetSE on
- (ProbeSetSE.DataId = ProbeSetData.Id AND ProbeSetSE.StrainId = ProbeSetData.StrainId)
- left join NStrain on
- (NStrain.DataId = ProbeSetData.Id AND
- NStrain.StrainId = ProbeSetData.StrainId)
- WHERE
- ProbeSet.Name = '%s' AND ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
- ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
- ProbeSetFreeze.Name = '%s' AND
- ProbeSetXRef.DataId = ProbeSetData.Id AND
- ProbeSetData.StrainId = Strain.Id
- Order BY
- Strain.Name
- """ % (escape(trait), escape(self.name))
- # logger.sql(query)
- results = g.db.execute(query).fetchall()
- #logger.debug("RETRIEVED RESULTS HERE:", results)
- return results
-
- def retrieve_genes(self, column_name):
- query = """
- select ProbeSet.Name, ProbeSet.%s
- from ProbeSet,ProbeSetXRef
- where ProbeSetXRef.ProbeSetFreezeId = %s and
- ProbeSetXRef.ProbeSetId=ProbeSet.Id;
- """ % (column_name, escape(str(self.id)))
- # logger.sql(query)
- results = g.db.execute(query).fetchall()
-
- return dict(results)
-
-
-class TempDataSet(DataSet):
- '''Temporary user-generated data set'''
-
- DS_NAME_MAP['Temp'] = 'TempDataSet'
-
- def setup(self):
- self.search_fields = ['name',
- 'description']
-
- self.display_fields = ['name',
- 'description']
-
- self.header_fields = ['Name',
- 'Description']
-
- self.type = 'Temp'
-
- # Need to double check later how these are used
- self.id = 1
- self.fullname = 'Temporary Storage'
- self.shortname = 'Temp'
-
-
-class PhenotypeDataSet(DataSet):
- DS_NAME_MAP['Publish'] = 'PhenotypeDataSet'
-
- def setup(self):
-
- #logger.debug("IS A PHENOTYPEDATASET")
-
- # Fields in the database table
- self.search_fields = ['Phenotype.Post_publication_description',
- 'Phenotype.Pre_publication_description',
- 'Phenotype.Pre_publication_abbreviation',
- 'Phenotype.Post_publication_abbreviation',
- 'PublishXRef.mean',
- 'Phenotype.Lab_code',
- 'Publication.PubMed_ID',
- 'Publication.Abstract',
- 'Publication.Title',
- 'Publication.Authors',
- 'PublishXRef.Id']
-
- # Figure out what display_fields is
- self.display_fields = ['name', 'group_code',
- 'pubmed_id',
- 'pre_publication_description',
- 'post_publication_description',
- 'original_description',
- 'pre_publication_abbreviation',
- 'post_publication_abbreviation',
- 'mean',
- 'lab_code',
- 'submitter', 'owner',
- 'authorized_users',
- 'authors', 'title',
- 'abstract', 'journal',
- 'volume', 'pages',
- 'month', 'year',
- 'sequence', 'units', 'comments']
-
- # Fields displayed in the search results table header
- self.header_fields = ['Index',
- 'Record',
- 'Description',
- 'Authors',
- 'Year',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
-
- self.type = 'Publish'
-
- self.query_for_group = '''
- SELECT
- InbredSet.Name, InbredSet.Id, InbredSet.GeneticType
- FROM
- InbredSet, PublishFreeze
- WHERE
- PublishFreeze.InbredSetId = InbredSet.Id AND
- PublishFreeze.Name = "%s"
- ''' % escape(self.name)
-
- def check_confidentiality(self):
- # (Urgently?) Need to write this
- pass
-
- def get_trait_info(self, trait_list, species=''):
- for this_trait in trait_list:
-
- if not this_trait.haveinfo:
- this_trait.retrieve_info(get_qtl_info=True)
-
- description = this_trait.post_publication_description
-
- # If the dataset is confidential and the user has access to confidential
- # phenotype traits, then display the pre-publication description instead
- # of the post-publication description
- if this_trait.confidential:
- this_trait.description_display = ""
- continue # todo for now, because no authorization features
-
- if not webqtlUtil.has_access_to_confidentail_phenotype_trait(
- privilege=self.privilege,
- userName=self.userName,
- authorized_users=this_trait.authorized_users):
-
- description = this_trait.pre_publication_description
-
- if len(description) > 0:
- this_trait.description_display = description.strip()
- else:
- this_trait.description_display = ""
-
- if not this_trait.year.isdigit():
- this_trait.pubmed_text = "N/A"
- else:
- this_trait.pubmed_text = this_trait.year
-
- if this_trait.pubmed_id:
- this_trait.pubmed_link = webqtlConfig.PUBMEDLINK_URL % this_trait.pubmed_id
-
- # LRS and its location
- this_trait.LRS_score_repr = "N/A"
- this_trait.LRS_location_repr = "N/A"
-
- if this_trait.lrs:
- query = """
- select Geno.Chr, Geno.Mb from Geno, Species
- where Species.Name = '%s' and
- Geno.Name = '%s' and
- Geno.SpeciesId = Species.Id
- """ % (species, this_trait.locus)
-
- result = g.db.execute(query).fetchone()
-
- if result:
- if result[0] and result[1]:
- LRS_Chr = result[0]
- LRS_Mb = result[1]
-
- this_trait.LRS_score_repr = LRS_score_repr = '%3.1f' % this_trait.lrs
- this_trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (
- LRS_Chr, float(LRS_Mb))
-
- def retrieve_sample_data(self, trait):
- query = """
- SELECT
- Strain.Name, PublishData.value, PublishSE.error, NStrain.count, Strain.Name2
- FROM
- (PublishData, Strain, PublishXRef, PublishFreeze)
- left join PublishSE on
- (PublishSE.DataId = PublishData.Id AND PublishSE.StrainId = PublishData.StrainId)
- left join NStrain on
- (NStrain.DataId = PublishData.Id AND
- NStrain.StrainId = PublishData.StrainId)
- WHERE
- PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
- PublishData.Id = PublishXRef.DataId AND PublishXRef.Id = %s AND
- PublishFreeze.Id = %s AND PublishData.StrainId = Strain.Id
- Order BY
- Strain.Name
- """
-
- results = g.db.execute(query, (trait, self.id)).fetchall()
- return results
-
-
-class GenotypeDataSet(DataSet):
- DS_NAME_MAP['Geno'] = 'GenotypeDataSet'
-
- def setup(self):
- # Fields in the database table
- self.search_fields = ['Name',
- 'Chr']
-
- # Find out what display_fields is
- self.display_fields = ['name',
- 'chr',
- 'mb',
- 'source2',
- 'sequence']
-
- # Fields displayed in the search results table header
- self.header_fields = ['Index',
- 'ID',
- 'Location']
-
- # Todo: Obsolete or rename this field
- self.type = 'Geno'
-
- self.query_for_group = '''
- SELECT
- InbredSet.Name, InbredSet.Id, InbredSet.GeneticType
- FROM
- InbredSet, GenoFreeze
- WHERE
- GenoFreeze.InbredSetId = InbredSet.Id AND
- GenoFreeze.Name = "%s"
- ''' % escape(self.name)
-
- def check_confidentiality(self):
- return geno_mrna_confidentiality(self)
-
- def get_trait_info(self, trait_list, species=None):
- for this_trait in trait_list:
- if not this_trait.haveinfo:
- this_trait.retrieveInfo()
-
- if this_trait.chr and this_trait.mb:
- this_trait.location_repr = 'Chr%s: %.6f' % (
- this_trait.chr, float(this_trait.mb))
-
- def retrieve_sample_data(self, trait):
- query = """
- SELECT
- Strain.Name, GenoData.value, GenoSE.error, "N/A", Strain.Name2
- FROM
- (GenoData, GenoFreeze, Strain, Geno, GenoXRef)
- left join GenoSE on
- (GenoSE.DataId = GenoData.Id AND GenoSE.StrainId = GenoData.StrainId)
- WHERE
- Geno.SpeciesId = %s AND Geno.Name = %s AND GenoXRef.GenoId = Geno.Id AND
- GenoXRef.GenoFreezeId = GenoFreeze.Id AND
- GenoFreeze.Name = %s AND
- GenoXRef.DataId = GenoData.Id AND
- GenoData.StrainId = Strain.Id
- Order BY
- Strain.Name
- """
- results = g.db.execute(query,
- (webqtlDatabaseFunction.retrieve_species_id(self.group.name),
- trait, self.name)).fetchall()
- return results
-
-
-def geno_mrna_confidentiality(ob):
- dataset_table = ob.type + "Freeze"
- #logger.debug("dataset_table [%s]: %s" % (type(dataset_table), dataset_table))
-
- query = '''SELECT Id, Name, FullName, confidentiality,
- AuthorisedUsers FROM %s WHERE Name = "%s"''' % (dataset_table, ob.name)
- #
- result = g.db.execute(query)
-
- (_dataset_id,
- _name,
- _full_name,
- confidential,
- _authorized_users) = result.fetchall()[0]
-
- if confidential:
- return True
diff --git a/gn3/base/mrna_assay_tissue_data.py b/gn3/base/mrna_assay_tissue_data.py
deleted file mode 100644
index 0f51ade..0000000
--- a/gn3/base/mrna_assay_tissue_data.py
+++ /dev/null
@@ -1,94 +0,0 @@
-
-# pylint: disable-all
-import collections
-
-from flask import g
-
-from gn3.utility.db_tools import create_in_clause
-from gn3.utility.db_tools import escape
-from gn3.utility.bunch import Bunch
-
-
-# from utility.logger import getLogger
-# logger = getLogger(__name__ )
-
-class MrnaAssayTissueData(object):
-
- def __init__(self, gene_symbols=None):
- self.gene_symbols = gene_symbols
- if self.gene_symbols == None:
- self.gene_symbols = []
-
- self.data = collections.defaultdict(Bunch)
-
- query = '''select t.Symbol, t.GeneId, t.DataId, t.Chr, t.Mb, t.description, t.Probe_Target_Description
- from (
- select Symbol, max(Mean) as maxmean
- from TissueProbeSetXRef
- where TissueProbeSetFreezeId=1 and '''
-
- # Note that inner join is necessary in this query to get distinct record in one symbol group
- # with highest mean value
- # Due to the limit size of TissueProbeSetFreezeId table in DB,
- # performance of inner join is acceptable.MrnaAssayTissueData(gene_symbols=symbol_list)
- if len(gene_symbols) == 0:
- query += '''Symbol!='' and Symbol Is Not Null group by Symbol)
- as x inner join TissueProbeSetXRef as t on t.Symbol = x.Symbol
- and t.Mean = x.maxmean;
- '''
- else:
- in_clause = create_in_clause(gene_symbols)
-
- # ZS: This was in the query, not sure why: http://docs.python.org/2/library/string.html?highlight=lower#string.lower
- query += ''' Symbol in {} group by Symbol)
- as x inner join TissueProbeSetXRef as t on t.Symbol = x.Symbol
- and t.Mean = x.maxmean;
- '''.format(in_clause)
-
- results = g.db.execute(query).fetchall()
-
- lower_symbols = []
- for gene_symbol in gene_symbols:
- if gene_symbol != None:
- lower_symbols.append(gene_symbol.lower())
-
- for result in results:
- symbol = result[0]
- if symbol.lower() in lower_symbols:
- symbol = symbol.lower()
-
- self.data[symbol].gene_id = result.GeneId
- self.data[symbol].data_id = result.DataId
- self.data[symbol].chr = result.Chr
- self.data[symbol].mb = result.Mb
- self.data[symbol].description = result.description
- self.data[symbol].probe_target_description = result.Probe_Target_Description
-
- ###########################################################################
- # Input: cursor, symbolList (list), dataIdDict(Dict)
- # output: symbolValuepairDict (dictionary):one dictionary of Symbol and Value Pair,
- # key is symbol, value is one list of expression values of one probeSet;
- # function: get one dictionary whose key is gene symbol and value is tissue expression data (list type).
- # Attention! All keys are lower case!
- ###########################################################################
-
- def get_symbol_values_pairs(self):
- id_list = [self.data[symbol].data_id for symbol in self.data]
-
- symbol_values_dict = {}
-
- if len(id_list) > 0:
- query = """SELECT TissueProbeSetXRef.Symbol, TissueProbeSetData.value
- FROM TissueProbeSetXRef, TissueProbeSetData
- WHERE TissueProbeSetData.Id IN {} and
- TissueProbeSetXRef.DataId = TissueProbeSetData.Id""".format(create_in_clause(id_list))
-
- results = g.db.execute(query).fetchall()
- for result in results:
- if result.Symbol.lower() not in symbol_values_dict:
- symbol_values_dict[result.Symbol.lower()] = [result.value]
- else:
- symbol_values_dict[result.Symbol.lower()].append(
- result.value)
-
- return symbol_values_dict
diff --git a/gn3/base/species.py b/gn3/base/species.py
deleted file mode 100644
index 9fb08fb..0000000
--- a/gn3/base/species.py
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# pylint: disable-all
-import collections
-from flask import g
-from dataclasses import dataclass
-
-class TheSpecies:
- def __init__(self, dataset=None, species_name=None):
- if species_name is not None:
- self.name = species_name
-
- self.chromosomes = Chromosomes(species=self.name)
-
- else:
- self.dataset = dataset
- self.chromosomes = Chromosomes(dataset=self.dataset)
-
-
-class Chromosomes:
- def __init__(self, dataset=None, species=None):
- self.chromosomes = collections.OrderedDict()
-
- if species is not None:
- query = """
- Select
- Chr_Length.Name, Chr_Length.OrderId, Length from Chr_Length, Species
- where
- Chr_Length.SpeciesId = Species.SpeciesId AND
- Species.Name = '%s'
- Order by OrderId
- """ % species.capitalize()
-
- else:
- self.dataset = dataset
-
- query = """
- Select
- Chr_Length.Name, Chr_Length.OrderId, Length from Chr_Length, InbredSet
- where
- Chr_Length.SpeciesId = InbredSet.SpeciesId AND
- InbredSet.Name = '%s'
- Order by OrderId
- """ % self.dataset.group.name
-
- # logger.sql(query)
-
- results = g.db.execute(query).fetchall()
-
- for item in results:
- self.chromosomes[item.OrderId] = IndChromosome(
- item.Name, item.Length)
-
-
-# @dataclass
-class IndChromosome:
- def __init__(self,name,length):
- self.name= name
- self.length = length
-
- @property
- def mb_length(self):
- """Chromosome length in megabases"""
- return self.length/ 1000000
-
diff --git a/gn3/base/trait.py b/gn3/base/trait.py
deleted file mode 100644
index f4be61c..0000000
--- a/gn3/base/trait.py
+++ /dev/null
@@ -1,366 +0,0 @@
-
-# pylint: disable-all
-from flask import g
-from redis import Redis
-from gn3.utility.db_tools import escape
-from gn3.base.webqtlCaseData import webqtlCaseData
-
-
-def check_resource_availability(dataset, name=None):
-
- # todo add code for this
- # should probably work on this has to do with authentication
- return {'data': ['no-access', 'view'], 'metadata': ['no-access', 'view'], 'admin': ['not-admin']}
-
-
-def create_trait(**kw):
- # work on check resource availability deals with authentication
- assert bool(kw.get("dataset")) != bool(
- kw.get('dataset_name')), "Needs dataset ob. or name"
-
- assert bool(kw.get("name")), "Need trait name"
-
- if kw.get('dataset_name'):
- if kw.get('dataset_name') != "Temp":
- dataset = create_dataset(kw.get('dataset_name'))
- else:
- dataset = kw.get('dataset')
-
- if dataset.type == 'Publish':
- permissions = check_resource_availability(
- dataset, kw.get('name'))
- else:
- permissions = check_resource_availability(dataset)
-
- if "view" in permissions['data']:
- the_trait = GeneralTrait(**kw)
- if the_trait.dataset.type != "Temp":
- the_trait = retrieve_trait_info(
- the_trait,
- the_trait.dataset,
- get_qtl_info=kw.get('get_qtl_info'))
-
-
- return the_trait
-
- return None
-
-
-class GeneralTrait:
- def __init__(self, get_qtl_info=False, get_sample_info=True, **kw):
- assert bool(kw.get('dataset')) != bool(
- kw.get('dataset_name')), "Needs dataset ob. or name"
- # Trait ID, ProbeSet ID, Published ID, etc.
- self.name = kw.get('name')
- if kw.get('dataset_name'):
- if kw.get('dataset_name') == "Temp":
- temp_group = self.name.split("_")[2]
- self.dataset = create_dataset(
- dataset_name="Temp",
- dataset_type="Temp",
- group_name=temp_group)
-
- else:
- self.dataset = create_dataset(kw.get('dataset_name'))
-
- else:
- self.dataset = kw.get("dataset")
-
- self.cellid = kw.get('cellid')
- self.identification = kw.get('identification', 'un-named trait')
- self.haveinfo = kw.get('haveinfo', False)
- self.sequence = kw.get('sequence')
- self.data = kw.get('data', {})
- self.view = True
-
- # Sets defaults
- self.locus = None
- self.lrs = None
- self.pvalue = None
- self.mean = None
- self.additive = None
- self.num_overlap = None
- self.strand_probe = None
- self.symbol = None
- self.display_name = self.name
- self.LRS_score_repr = "N/A"
- self.LRS_location_repr = "N/A"
-
- if kw.get('fullname'):
- name2 = value.split("::")
- if len(name2) == 2:
- self.dataset, self.name = name2
-
- elif len(name2) == 3:
- self.dataset, self.name, self.cellid = name2
-
- # Todo: These two lines are necessary most of the time, but
- # perhaps not all of the time So we could add a simple if
- # statement to short-circuit this if necessary
- if get_sample_info is not False:
- self = retrieve_sample_data(self, self.dataset)
-
-
-def retrieve_sample_data(trait, dataset, samplelist=None):
- if samplelist is None:
- samplelist = []
-
- if dataset.type == "Temp":
- results = Redis.get(trait.name).split()
-
- else:
- results = dataset.retrieve_sample_data(trait.name)
-
- # Todo: is this necessary? If not remove
- trait.data.clear()
-
- if results:
- if dataset.type == "Temp":
- all_samples_ordered = dataset.group.all_samples_ordered()
- for i, item in enumerate(results):
- try:
- trait.data[all_samples_ordered[i]] = webqtlCaseData(
- all_samples_ordered[i], float(item))
-
- except Exception as e:
- pass
-
-
- else:
- for item in results:
- name, value, variance, num_cases, name2 = item
- if not samplelist or (samplelist and name in samplelist):
- trait.data[name] = webqtlCaseData(*item)
-
- return trait
-
-def retrieve_trait_info(trait, dataset, get_qtl_info=False):
- assert dataset, "Dataset doesn't exist"
-
- the_url = None
- # some code should be added added here
-
- try:
- response = requests.get(the_url).content
- trait_info = json.loads(response)
- except: # ZS: I'm assuming the trait is viewable if the try fails for some reason; it should never reach this point unless the user has privileges, since that's dealt with in create_trait
- if dataset.type == 'Publish':
- query = """
- SELECT
- PublishXRef.Id, InbredSet.InbredSetCode, Publication.PubMed_ID,
- CAST(Phenotype.Pre_publication_description AS BINARY),
- CAST(Phenotype.Post_publication_description AS BINARY),
- CAST(Phenotype.Original_description AS BINARY),
- CAST(Phenotype.Pre_publication_abbreviation AS BINARY),
- CAST(Phenotype.Post_publication_abbreviation AS BINARY), PublishXRef.mean,
- Phenotype.Lab_code, Phenotype.Submitter, Phenotype.Owner, Phenotype.Authorized_Users,
- CAST(Publication.Authors AS BINARY), CAST(Publication.Title AS BINARY), CAST(Publication.Abstract AS BINARY),
- CAST(Publication.Journal AS BINARY), Publication.Volume, Publication.Pages,
- Publication.Month, Publication.Year, PublishXRef.Sequence,
- Phenotype.Units, PublishXRef.comments
- FROM
- PublishXRef, Publication, Phenotype, PublishFreeze, InbredSet
- WHERE
- PublishXRef.Id = %s AND
- Phenotype.Id = PublishXRef.PhenotypeId AND
- Publication.Id = PublishXRef.PublicationId AND
- PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
- PublishXRef.InbredSetId = InbredSet.Id AND
- PublishFreeze.Id = %s
- """ % (trait.name, dataset.id)
-
- trait_info = g.db.execute(query).fetchone()
-
- # XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name
- # XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms.
- elif dataset.type == 'ProbeSet':
- display_fields_string = ', ProbeSet.'.join(dataset.display_fields)
- display_fields_string = 'ProbeSet.' + display_fields_string
- query = """
- SELECT %s
- FROM ProbeSet, ProbeSetFreeze, ProbeSetXRef
- WHERE
- ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
- ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
- ProbeSetFreeze.Name = '%s' AND
- ProbeSet.Name = '%s'
- """ % (escape(display_fields_string),
- escape(dataset.name),
- escape(str(trait.name)))
-
- trait_info = g.db.execute(query).fetchone()
- # XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name
- # to avoid the problem of same marker name from different species.
- elif dataset.type == 'Geno':
- display_fields_string = ',Geno.'.join(dataset.display_fields)
- display_fields_string = 'Geno.' + display_fields_string
- query = """
- SELECT %s
- FROM Geno, GenoFreeze, GenoXRef
- WHERE
- GenoXRef.GenoFreezeId = GenoFreeze.Id AND
- GenoXRef.GenoId = Geno.Id AND
- GenoFreeze.Name = '%s' AND
- Geno.Name = '%s'
- """ % (escape(display_fields_string),
- escape(dataset.name),
- escape(trait.name))
-
- trait_info = g.db.execute(query).fetchone()
- else: # Temp type
- query = """SELECT %s FROM %s WHERE Name = %s"""
-
- trait_info = g.db.execute(query,
- ','.join(dataset.display_fields),
- dataset.type, trait.name).fetchone()
-
- if trait_info:
- trait.haveinfo = True
- for i, field in enumerate(dataset.display_fields):
- holder = trait_info[i]
- if isinstance(holder, bytes):
- holder = holder.decode("utf-8", errors="ignore")
- setattr(trait, field, holder)
-
- if dataset.type == 'Publish':
- if trait.group_code:
- trait.display_name = trait.group_code + "_" + str(trait.name)
-
- trait.confidential = 0
- if trait.pre_publication_description and not trait.pubmed_id:
- trait.confidential = 1
-
- description = trait.post_publication_description
-
- # If the dataset is confidential and the user has access to confidential
- # phenotype traits, then display the pre-publication description instead
- # of the post-publication description
- trait.description_display = ""
- if not trait.pubmed_id:
- trait.abbreviation = trait.pre_publication_abbreviation
- trait.description_display = trait.pre_publication_description
- else:
- trait.abbreviation = trait.post_publication_abbreviation
- if description:
- trait.description_display = description.strip()
-
- if not trait.year.isdigit():
- trait.pubmed_text = "N/A"
- else:
- trait.pubmed_text = trait.year
-
- # moved to config
-
- PUBMEDLINK_URL = "http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Retrieve&db=PubMed&list_uids=%s&dopt=Abstract"
-
- if trait.pubmed_id:
- trait.pubmed_link = PUBMEDLINK_URL % trait.pubmed_id
-
- if dataset.type == 'ProbeSet' and dataset.group:
- description_string = trait.description
- target_string = trait.probe_target_description
-
- if str(description_string or "") != "" and description_string != 'None':
- description_display = description_string
- else:
- description_display = trait.symbol
-
- if (str(description_display or "") != "" and
- description_display != 'N/A' and
- str(target_string or "") != "" and target_string != 'None'):
- description_display = description_display + '; ' + target_string.strip()
-
- # Save it for the jinja2 template
- trait.description_display = description_display
-
- trait.location_repr = 'N/A'
- if trait.chr and trait.mb:
- trait.location_repr = 'Chr%s: %.6f' % (
- trait.chr, float(trait.mb))
-
- elif dataset.type == "Geno":
- trait.location_repr = 'N/A'
- if trait.chr and trait.mb:
- trait.location_repr = 'Chr%s: %.6f' % (
- trait.chr, float(trait.mb))
-
- if get_qtl_info:
- # LRS and its location
- trait.LRS_score_repr = "N/A"
- trait.LRS_location_repr = "N/A"
- trait.locus = trait.locus_chr = trait.locus_mb = trait.lrs = trait.pvalue = trait.additive = ""
- if dataset.type == 'ProbeSet' and not trait.cellid:
- trait.mean = ""
- query = """
- SELECT
- ProbeSetXRef.Locus, ProbeSetXRef.LRS, ProbeSetXRef.pValue, ProbeSetXRef.mean, ProbeSetXRef.additive
- FROM
- ProbeSetXRef, ProbeSet
- WHERE
- ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
- ProbeSet.Name = "{}" AND
- ProbeSetXRef.ProbeSetFreezeId ={}
- """.format(trait.name, dataset.id)
-
- trait_qtl = g.db.execute(query).fetchone()
- if trait_qtl:
- trait.locus, trait.lrs, trait.pvalue, trait.mean, trait.additive = trait_qtl
- if trait.locus:
- query = """
- select Geno.Chr, Geno.Mb from Geno, Species
- where Species.Name = '{}' and
- Geno.Name = '{}' and
- Geno.SpeciesId = Species.Id
- """.format(dataset.group.species, trait.locus)
-
- result = g.db.execute(query).fetchone()
- if result:
- trait.locus_chr = result[0]
- trait.locus_mb = result[1]
- else:
- trait.locus = trait.locus_chr = trait.locus_mb = trait.additive = ""
- else:
- trait.locus = trait.locus_chr = trait.locus_mb = trait.additive = ""
-
- if dataset.type == 'Publish':
- query = """
- SELECT
- PublishXRef.Locus, PublishXRef.LRS, PublishXRef.additive
- FROM
- PublishXRef, PublishFreeze
- WHERE
- PublishXRef.Id = %s AND
- PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
- PublishFreeze.Id =%s
- """ % (trait.name, dataset.id)
-
- trait_qtl = g.db.execute(query).fetchone()
- if trait_qtl:
- trait.locus, trait.lrs, trait.additive = trait_qtl
- if trait.locus:
- query = """
- select Geno.Chr, Geno.Mb from Geno, Species
- where Species.Name = '{}' and
- Geno.Name = '{}' and
- Geno.SpeciesId = Species.Id
- """.format(dataset.group.species, trait.locus)
-
- result = g.db.execute(query).fetchone()
- if result:
- trait.locus_chr = result[0]
- trait.locus_mb = result[1]
- else:
- trait.locus = trait.locus_chr = trait.locus_mb = trait.additive = ""
- else:
- trait.locus = trait.locus_chr = trait.locus_mb = trait.additive = ""
- else:
- trait.locus = trait.lrs = trait.additive = ""
- if (dataset.type == 'Publish' or dataset.type == "ProbeSet") and str(trait.locus_chr or "") != "" and str(trait.locus_mb or "") != "":
- trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (
- trait.locus_chr, float(trait.locus_mb))
- if str(trait.lrs or "") != "":
- trait.LRS_score_repr = LRS_score_repr = '%3.1f' % trait.lrs
- else:
- raise KeyError(repr(trait.name) +
- ' information is not found in the database.')
- return trait
diff --git a/gn3/base/webqtlCaseData.py b/gn3/base/webqtlCaseData.py
deleted file mode 100644
index 8395af8..0000000
--- a/gn3/base/webqtlCaseData.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU Affero General Public License
-# as published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU Affero General Public License for more details.
-#
-# This program is available from Source Forge: at GeneNetwork Project
-# (sourceforge.net/projects/genenetwork/).
-#
-# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
-# at rwilliams@uthsc.edu and xzhou15@uthsc.edu
-#
-# This module is used by GeneNetwork project (www.genenetwork.org)
-#
-# Created by GeneNetwork Core Team 2010/08/10
-
-
-# uncomment below
-
-# from utility.logger import getLogger
-# logger = getLogger(__name__)
-
-# import utility.tools
-
-# utility.tools.show_settings()
-# pylint: disable-all
-
-class webqtlCaseData:
- """one case data in one trait"""
-
- def __init__(self, name, value=None, variance=None, num_cases=None, name2=None):
- self.name = name
- self.name2 = name2 # Other name (for traits like BXD65a)
- self.value = value # Trait Value
- self.variance = variance # Trait Variance
- self.num_cases = num_cases # Number of individuals/cases
- self.extra_attributes = None
- self.this_id = None # Set a sane default (can't be just "id" cause that's a reserved word)
- self.outlier = None # Not set to True/False until later
-
- def __repr__(self):
- case_data_string = "<webqtlCaseData> "
- if self.value is not None:
- case_data_string += "value=%2.3f" % self.value
- if self.variance is not None:
- case_data_string += " variance=%2.3f" % self.variance
- if self.num_cases:
- case_data_string += " ndata=%s" % self.num_cases
- if self.name:
- case_data_string += " name=%s" % self.name
- if self.name2:
- case_data_string += " name2=%s" % self.name2
- return case_data_string
-
- @property
- def class_outlier(self):
- """Template helper"""
- if self.outlier:
- return "outlier"
- return ""
-
- @property
- def display_value(self):
- if self.value is not None:
- return "%2.3f" % self.value
- return "x"
-
- @property
- def display_variance(self):
- if self.variance is not None:
- return "%2.3f" % self.variance
- return "x"
-
- @property
- def display_num_cases(self):
- if self.num_cases is not None:
- return "%s" % self.num_cases
- return "x" \ No newline at end of file
diff --git a/gn3/correlation/__init__.py b/gn3/correlation/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/gn3/correlation/__init__.py
+++ /dev/null
diff --git a/gn3/correlation/correlation_computations.py b/gn3/correlation/correlation_computations.py
deleted file mode 100644
index 6a3f2bb..0000000
--- a/gn3/correlation/correlation_computations.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""module contains code for any computation in correlation"""
-
-import json
-from .show_corr_results import CorrelationResults
-
-def compute_correlation(correlation_input_data,
- correlation_results=CorrelationResults):
- """function that does correlation .creates Correlation results instance
-
- correlation_input_data structure is a dict with
-
- {
- "trait_id":"valid trait id",
- "dataset":"",
- "sample_vals":{},
- "primary_samples":"",
- "corr_type":"",
- corr_dataset:"",
- "corr_return_results":"",
-
-
- }
-
- """
-
- corr_object = correlation_results(
- start_vars=correlation_input_data)
-
- corr_results = corr_object.do_correlation(start_vars=correlation_input_data)
- # possibility of file being so large cause of the not sure whether to return a file
-
- return corr_results
diff --git a/gn3/correlation/correlation_functions.py b/gn3/correlation/correlation_functions.py
deleted file mode 100644
index be08c96..0000000
--- a/gn3/correlation/correlation_functions.py
+++ /dev/null
@@ -1,96 +0,0 @@
-
-"""
-# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU Affero General Public License
-# as published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU Affero General Public License for more details.
-#
-# This program is available from Source Forge: at GeneNetwork Project
-# (sourceforge.net/projects/genenetwork/).
-#
-# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
-# at rwilliams@uthsc.edu and xzhou15@uthsc.edu
-#
-#
-#
-# This module is used by GeneNetwork project (www.genenetwork.org)
-#
-# Created by GeneNetwork Core Team 2010/08/10
-#
-# Last updated by NL 2011/03/23
-
-
-"""
-
-import rpy2.robjects
-from gn3.base.mrna_assay_tissue_data import MrnaAssayTissueData
-
-
-#####################################################################################
-# Input: primaryValue(list): one list of expression values of one probeSet,
-# targetValue(list): one list of expression values of one probeSet,
-# method(string): indicate correlation method ('pearson' or 'spearman')
-# Output: corr_result(list): first item is Correlation Value, second item is tissue number,
-# third item is PValue
-# Function: get correlation value,Tissue quantity ,p value result by using R;
-# Note : This function is special case since both primaryValue and targetValue are from
-# the same dataset. So the length of these two parameters is the same. They are pairs.
-# Also, in the datatable TissueProbeSetData, all Tissue values are loaded based on
-# the same tissue order
-#####################################################################################
-
-def cal_zero_order_corr_for_tiss(primaryValue=[], targetValue=[], method='pearson'):
- """refer above for info on the function"""
- # pylint: disable = E, W, R, C
-
- #nb disabled pylint until tests are written for this function
-
- R_primary = rpy2.robjects.FloatVector(list(range(len(primaryValue))))
- N = len(primaryValue)
- for i in range(len(primaryValue)):
- R_primary[i] = primaryValue[i]
-
- R_target = rpy2.robjects.FloatVector(list(range(len(targetValue))))
- for i in range(len(targetValue)):
- R_target[i] = targetValue[i]
-
- R_corr_test = rpy2.robjects.r['cor.test']
- if method == 'spearman':
- R_result = R_corr_test(R_primary, R_target, method='spearman')
- else:
- R_result = R_corr_test(R_primary, R_target)
-
- corr_result = []
- corr_result.append(R_result[3][0])
- corr_result.append(N)
- corr_result.append(R_result[2][0])
-
- return corr_result
-
-
-####################################################
-####################################################
-# input: cursor, symbolList (list), dataIdDict(Dict): key is symbol
-# output: SymbolValuePairDict(dictionary):one dictionary of Symbol and Value Pair.
-# key is symbol, value is one list of expression values of one probeSet.
-# function: wrapper function for getSymbolValuePairDict function
-# build gene symbol list if necessary, cut it into small lists if necessary,
-# then call getSymbolValuePairDict function and merge the results.
-###################################################
-#####################################################
-
-def get_trait_symbol_and_tissue_values(symbol_list=None):
- """function to get trait symbol and tissues values refer above"""
- tissue_data = MrnaAssayTissueData(gene_symbols=symbol_list)
-
- if len(tissue_data.gene_symbols) >= 1:
- return tissue_data.get_symbol_values_pairs()
-
- return None
diff --git a/gn3/correlation/correlation_utility.py b/gn3/correlation/correlation_utility.py
deleted file mode 100644
index 7583bd7..0000000
--- a/gn3/correlation/correlation_utility.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""module contains utility functions for correlation"""
-
-
-class AttributeSetter:
- """class for setting Attributes"""
-
- def __init__(self, trait_obj):
- for key, value in trait_obj.items():
- setattr(self, key, value)
-
- def __str__(self):
- return self.__class__.__name__
-
- def get_dict(self):
- """dummy function to get dict object"""
- return self.__dict__
-
-
-def get_genofile_samplelist(dataset):
- """mock function to get genofile samplelist"""
-
- return ["C57BL/6J"]
diff --git a/gn3/correlation/show_corr_results.py b/gn3/correlation/show_corr_results.py
deleted file mode 100644
index 55d8366..0000000
--- a/gn3/correlation/show_corr_results.py
+++ /dev/null
@@ -1,735 +0,0 @@
-"""module contains code for doing correlation"""
-
-import json
-import collections
-import numpy
-import scipy.stats
-import rpy2.robjects as ro
-from flask import g
-from gn3.base.data_set import create_dataset
-from gn3.utility.db_tools import escape
-from gn3.utility.helper_functions import get_species_dataset_trait
-from gn3.utility.corr_result_helpers import normalize_values
-from gn3.base.trait import create_trait
-from gn3.utility import hmac
-from . import correlation_functions
-
-
-class CorrelationResults:
- """class for computing correlation"""
- # pylint: disable=too-many-instance-attributes
- # pylint:disable=attribute-defined-outside-init
-
- def __init__(self, start_vars):
- self.assertion_for_start_vars(start_vars)
-
- @staticmethod
- def assertion_for_start_vars(start_vars):
- # pylint: disable = E, W, R, C
-
- # should better ways to assert the variables
- # example includes sample
- assert("corr_type" in start_vars)
- assert(isinstance(start_vars['corr_type'], str))
- # example includes pearson
- assert('corr_sample_method' in start_vars)
- assert('corr_dataset' in start_vars)
- # means the limit
- assert('corr_return_results' in start_vars)
-
- if "loc_chr" in start_vars:
- assert('min_loc_mb' in start_vars)
- assert('max_loc_mb' in start_vars)
-
- def get_formatted_corr_type(self):
- """method to formatt corr_types"""
- self.formatted_corr_type = ""
- if self.corr_type == "lit":
- self.formatted_corr_type += "Literature Correlation "
- elif self.corr_type == "tissue":
- self.formatted_corr_type += "Tissue Correlation "
- elif self.corr_type == "sample":
- self.formatted_corr_type += "Genetic Correlation "
-
- if self.corr_method == "pearson":
- self.formatted_corr_type += "(Pearson's r)"
- elif self.corr_method == "spearman":
- self.formatted_corr_type += "(Spearman's rho)"
- elif self.corr_method == "bicor":
- self.formatted_corr_type += "(Biweight r)"
-
- def process_samples(self, start_vars, sample_names, excluded_samples=None):
- """method to process samples"""
-
-
- if not excluded_samples:
- excluded_samples = ()
-
- sample_val_dict = json.loads(start_vars["sample_vals"])
- print(sample_val_dict)
- if sample_names is None:
- raise NotImplementedError
-
- for sample in sample_names:
- if sample not in excluded_samples:
- value = sample_val_dict[sample]
-
- if not value.strip().lower() == "x":
- self.sample_data[str(sample)] = float(value)
-
- def do_tissue_correlation_for_trait_list(self, tissue_dataset_id=1):
- """Given a list of correlation results (self.correlation_results),\
- gets the tissue correlation value for each"""
- # pylint: disable = E, W, R, C
-
- # Gets tissue expression values for the primary trait
- primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
- symbol_list=[self.this_trait.symbol])
-
- if self.this_trait.symbol.lower() in primary_trait_tissue_vals_dict:
- primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower(
- )]
- gene_symbol_list = [
- trait.symbol for trait in self.correlation_results if trait.symbol]
-
- corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
- symbol_list=gene_symbol_list)
-
- for trait in self.correlation_results:
- if trait.symbol and trait.symbol.lower() in corr_result_tissue_vals_dict:
- this_trait_tissue_values = corr_result_tissue_vals_dict[trait.symbol.lower(
- )]
-
- result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values,
- this_trait_tissue_values,
- self.corr_method)
-
- trait.tissue_corr = result[0]
- trait.tissue_pvalue = result[2]
-
- def do_lit_correlation_for_trait_list(self):
- # pylint: disable = E, W, R, C
-
- input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(
- self.dataset.group.species.lower(), self.this_trait.geneid)
-
- for trait in self.correlation_results:
-
- if trait.geneid:
- trait.mouse_gene_id = self.convert_to_mouse_gene_id(
- self.dataset.group.species.lower(), trait.geneid)
- else:
- trait.mouse_gene_id = None
-
- if trait.mouse_gene_id and str(trait.mouse_gene_id).find(";") == -1:
- result = g.db.execute(
- """SELECT value
- FROM LCorrRamin3
- WHERE GeneId1='%s' and
- GeneId2='%s'
- """ % (escape(str(trait.mouse_gene_id)), escape(str(input_trait_mouse_gene_id)))
- ).fetchone()
- if not result:
- result = g.db.execute("""SELECT value
- FROM LCorrRamin3
- WHERE GeneId2='%s' and
- GeneId1='%s'
- """ % (escape(str(trait.mouse_gene_id)), escape(str(input_trait_mouse_gene_id)))
- ).fetchone()
-
- if result:
- lit_corr = result.value
- trait.lit_corr = lit_corr
- else:
- trait.lit_corr = 0
- else:
- trait.lit_corr = 0
-
- def do_lit_correlation_for_all_traits(self):
- """method for lit_correlation for all traits"""
- # pylint: disable = E, W, R, C
- input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(
- self.dataset.group.species.lower(), self.this_trait.geneid)
-
- lit_corr_data = {}
- for trait, gene_id in list(self.trait_geneid_dict.items()):
- mouse_gene_id = self.convert_to_mouse_gene_id(
- self.dataset.group.species.lower(), gene_id)
-
- if mouse_gene_id and str(mouse_gene_id).find(";") == -1:
- #print("gene_symbols:", input_trait_mouse_gene_id + " / " + mouse_gene_id)
- result = g.db.execute(
- """SELECT value
- FROM LCorrRamin3
- WHERE GeneId1='%s' and
- GeneId2='%s'
- """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id))
- ).fetchone()
- if not result:
- result = g.db.execute("""SELECT value
- FROM LCorrRamin3
- WHERE GeneId2='%s' and
- GeneId1='%s'
- """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id))
- ).fetchone()
- if result:
- #print("result:", result)
- lit_corr = result.value
- lit_corr_data[trait] = [gene_id, lit_corr]
- else:
- lit_corr_data[trait] = [gene_id, 0]
- else:
- lit_corr_data[trait] = [gene_id, 0]
-
- lit_corr_data = collections.OrderedDict(sorted(list(lit_corr_data.items()),
- key=lambda t: -abs(t[1][1])))
-
- return lit_corr_data
-
- def do_tissue_correlation_for_all_traits(self, tissue_dataset_id=1):
- # Gets tissue expression values for the primary trait
- # pylint: disable = E, W, R, C
- primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
- symbol_list=[self.this_trait.symbol])
-
- if self.this_trait.symbol.lower() in primary_trait_tissue_vals_dict:
- primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower(
- )]
-
- #print("trait_gene_symbols: ", pf(trait_gene_symbols.values()))
- corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
- symbol_list=list(self.trait_symbol_dict.values()))
-
- #print("corr_result_tissue_vals: ", pf(corr_result_tissue_vals_dict))
-
- #print("trait_gene_symbols: ", pf(trait_gene_symbols))
-
- tissue_corr_data = {}
- for trait, symbol in list(self.trait_symbol_dict.items()):
- if symbol and symbol.lower() in corr_result_tissue_vals_dict:
- this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower(
- )]
-
- result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values,
- this_trait_tissue_values,
- self.corr_method)
-
- tissue_corr_data[trait] = [symbol, result[0], result[2]]
-
- tissue_corr_data = collections.OrderedDict(sorted(list(tissue_corr_data.items()),
- key=lambda t: -abs(t[1][1])))
-
- def get_sample_r_and_p_values(self, trait, target_samples):
- """Calculates the sample r (or rho) and p-value
-
- Given a primary trait and a target trait's sample values,
- calculates either the pearson r or spearman rho and the p-value
- using the corresponding scipy functions.
-
- """
- # pylint: disable = E, W, R, C
- self.this_trait_vals = []
- target_vals = []
-
- for index, sample in enumerate(self.target_dataset.samplelist):
- if sample in self.sample_data:
- sample_value = self.sample_data[sample]
- target_sample_value = target_samples[index]
- self.this_trait_vals.append(sample_value)
- target_vals.append(target_sample_value)
-
- self.this_trait_vals, target_vals, num_overlap = normalize_values(
- self.this_trait_vals, target_vals)
-
- if num_overlap > 5:
- # ZS: 2015 could add biweight correlation, see http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3465711/
- if self.corr_method == 'bicor':
- sample_r, sample_p = do_bicor(
- self.this_trait_vals, target_vals)
-
- elif self.corr_method == 'pearson':
- sample_r, sample_p = scipy.stats.pearsonr(
- self.this_trait_vals, target_vals)
-
- else:
- sample_r, sample_p = scipy.stats.spearmanr(
- self.this_trait_vals, target_vals)
-
- if numpy.isnan(sample_r):
- pass
-
- else:
-
- self.correlation_data[trait] = [
- sample_r, sample_p, num_overlap]
-
- def convert_to_mouse_gene_id(self, species=None, gene_id=None):
- """If the species is rat or human, translate the gene_id to the mouse geneid
-
- If there is no input gene_id or there's no corresponding mouse gene_id, return None
-
- """
- if not gene_id:
- return None
-
- mouse_gene_id = None
- if "species" == "mouse":
- mouse_gene_id = gene_id
-
- elif species == 'rat':
- query = """SELECT mouse
- FROM GeneIDXRef
- WHERE rat='%s'""" % escape(gene_id)
-
- result = g.db.execute(query).fetchone()
- if result != None:
- mouse_gene_id = result.mouse
-
- elif species == "human":
-
- query = """SELECT mouse
- FROM GeneIDXRef
- WHERE human='%s'""" % escape(gene_id)
-
- result = g.db.execute(query).fetchone()
- if result != None:
- mouse_gene_id = result.mouse
-
- return mouse_gene_id
-
- def do_correlation(self, start_vars, create_dataset=create_dataset,
- create_trait=create_trait,
- get_species_dataset_trait=get_species_dataset_trait):
- # pylint: disable = E, W, R, C
- # probably refactor start_vars being passed twice
- # this method aims to replace the do_correlation but also add dependendency injection
- # to enable testing
-
- # should maybe refactor below code more or less works the same
- if start_vars["dataset"] == "Temp":
- self.dataset = create_dataset(
- dataset_name="Temp", dataset_type="Temp", group_name=start_vars['group'])
-
- self.trait_id = start_vars["trait_id"]
-
- self.this_trait = create_trait(dataset=self.dataset,
- name=self.trait_id,
- cellid=None)
-
- else:
-
- get_species_dataset_trait(self, start_vars)
-
- corr_samples_group = start_vars['corr_samples_group']
- self.sample_data = {}
- self.corr_type = start_vars['corr_type']
- self.corr_method = start_vars['corr_sample_method']
- self.min_expr = float(
- start_vars["min_expr"]) if start_vars["min_expr"] != "" else None
- self.p_range_lower = float(
- start_vars["p_range_lower"]) if start_vars["p_range_lower"] != "" else -1.0
- self.p_range_upper = float(
- start_vars["p_range_upper"]) if start_vars["p_range_upper"] != "" else 1.0
-
- if ("loc_chr" in start_vars and "min_loc_mb" in start_vars and "max_loc_mb" in start_vars):
- self.location_type = str(start_vars['location_type'])
- self.location_chr = str(start_vars['loc_chr'])
-
- try:
-
- # the code is below is basically a temporary fix
- self.min_location_mb = int(start_vars['min_loc_mb'])
- self.max_location_mb = int(start_vars['max_loc_mb'])
- except Exception as e:
- self.min_location_mb = None
- self.max_location_mb = None
-
- else:
- self.location_type = self.location_chr = self.min_location_mb = self.max_location_mb = None
-
- self.get_formatted_corr_type()
-
- self.return_number = int(start_vars['corr_return_results'])
-
- primary_samples = self.dataset.group.samplelist
-
-
- # The two if statements below append samples to the sample list based upon whether the user
- # rselected Primary Samples Only, Other Samples Only, or All Samples
-
- if self.dataset.group.parlist != None:
- primary_samples += self.dataset.group.parlist
-
- if self.dataset.group.f1list != None:
-
- primary_samples += self.dataset.group.f1list
-
- # If either BXD/whatever Only or All Samples, append all of that group's samplelist
-
- if corr_samples_group != 'samples_other':
-
- # print("primary samples are *****",primary_samples)
-
- self.process_samples(start_vars, primary_samples)
-
- if corr_samples_group != 'samples_primary':
- if corr_samples_group == 'samples_other':
- primary_samples = [x for x in primary_samples if x not in (
- self.dataset.group.parlist + self.dataset.group.f1list)]
-
- self.process_samples(start_vars, list(self.this_trait.data.keys()), primary_samples)
-
- self.target_dataset = create_dataset(start_vars['corr_dataset'])
- # when you add code to retrieve the trait_data for target dataset got gets very slow
- import time
-
- init_time = time.time()
- self.target_dataset.get_trait_data(list(self.sample_data.keys()))
-
- aft_time = time.time() - init_time
-
- self.header_fields = get_header_fields(
- self.target_dataset.type, self.corr_method)
-
- if self.target_dataset.type == "ProbeSet":
- self.filter_cols = [7, 6]
-
- elif self.target_dataset.type == "Publish":
- self.filter_cols = [6, 0]
-
- else:
- self.filter_cols = [4, 0]
-
- self.correlation_results = []
-
- self.correlation_data = {}
-
- if self.corr_type == "tissue":
- self.trait_symbol_dict = self.dataset.retrieve_genes("Symbol")
-
- tissue_corr_data = self.do_tissue_correlation_for_all_traits()
- if tissue_corr_data != None:
- for trait in list(tissue_corr_data.keys())[:self.return_number]:
- self.get_sample_r_and_p_values(
- trait, self.target_dataset.trait_data[trait])
- else:
- for trait, values in list(self.target_dataset.trait_data.items()):
- self.get_sample_r_and_p_values(trait, values)
-
- elif self.corr_type == "lit":
- self.trait_geneid_dict = self.dataset.retrieve_genes("GeneId")
- lit_corr_data = self.do_lit_correlation_for_all_traits()
-
- for trait in list(lit_corr_data.keys())[:self.return_number]:
- self.get_sample_r_and_p_values(
- trait, self.target_dataset.trait_data[trait])
-
- elif self.corr_type == "sample":
- for trait, values in list(self.target_dataset.trait_data.items()):
- self.get_sample_r_and_p_values(trait, values)
-
- self.correlation_data = collections.OrderedDict(sorted(list(self.correlation_data.items()),
- key=lambda t: -abs(t[1][0])))
-
- # ZS: Convert min/max chromosome to an int for the location range option
-
- """
- took 20.79 seconds took compute all the above majority of time taken on retrieving target dataset trait
- info
- """
-
- initial_time_chr = time.time()
-
- range_chr_as_int = None
- for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()):
- if 'loc_chr' in start_vars:
- if chr_info.name == self.location_chr:
- range_chr_as_int = order_id
-
- for _trait_counter, trait in enumerate(list(self.correlation_data.keys())[:self.return_number]):
- trait_object = create_trait(
- dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False)
- if not trait_object:
- continue
-
- chr_as_int = 0
- for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()):
- if self.location_type == "highest_lod":
- if chr_info.name == trait_object.locus_chr:
- chr_as_int = order_id
- else:
- if chr_info.name == trait_object.chr:
- chr_as_int = order_id
-
- if (float(self.correlation_data[trait][0]) >= self.p_range_lower and
- float(self.correlation_data[trait][0]) <= self.p_range_upper):
-
- if (self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Publish") and bool(trait_object.mean):
- if (self.min_expr != None) and (float(trait_object.mean) < self.min_expr):
- continue
-
- if range_chr_as_int != None and (chr_as_int != range_chr_as_int):
- continue
- if self.location_type == "highest_lod":
- if (self.min_location_mb != None) and (float(trait_object.locus_mb) < float(self.min_location_mb)):
- continue
- if (self.max_location_mb != None) and (float(trait_object.locus_mb) > float(self.max_location_mb)):
- continue
- else:
- if (self.min_location_mb != None) and (float(trait_object.mb) < float(self.min_location_mb)):
- continue
- if (self.max_location_mb != None) and (float(trait_object.mb) > float(self.max_location_mb)):
- continue
-
- (trait_object.sample_r,
- trait_object.sample_p,
- trait_object.num_overlap) = self.correlation_data[trait]
-
- # Set some sane defaults
- trait_object.tissue_corr = 0
- trait_object.tissue_pvalue = 0
- trait_object.lit_corr = 0
- if self.corr_type == "tissue" and tissue_corr_data != None:
- trait_object.tissue_corr = tissue_corr_data[trait][1]
- trait_object.tissue_pvalue = tissue_corr_data[trait][2]
- elif self.corr_type == "lit":
- trait_object.lit_corr = lit_corr_data[trait][1]
-
- self.correlation_results.append(trait_object)
-
- """
- above takes time with respect to size of traits i.e n=100,500,.....t_size
- """
-
- if self.corr_type != "lit" and self.dataset.type == "ProbeSet" and self.target_dataset.type == "ProbeSet":
- # self.do_lit_correlation_for_trait_list()
- self.do_lit_correlation_for_trait_list()
-
- if self.corr_type != "tissue" and self.dataset.type == "ProbeSet" and self.target_dataset.type == "ProbeSet":
- self.do_tissue_correlation_for_trait_list()
- # self.do_lit_correlation_for_trait_list()
-
- self.json_results = generate_corr_json(
- self.correlation_results, self.this_trait, self.dataset, self.target_dataset)
-
- # org mode by bons
-
- # DVORAKS
- # klavaro for touch typing
- # archwiki for documentation
- # exwm for window manager ->13
-
- # will fit perfectly with genenetwork 2 with change of anything if return self
-
- # alternative for this
- return self.json_results
- # return {
- # # "Results": "succeess",
- # # "return_number": self.return_number,
- # # "primary_samples": primary_samples,
- # # "time_taken": 12,
- # # "correlation_data": self.correlation_data,
- # "correlation_json": self.json_results
- # }
-
-
-def do_bicor(this_trait_vals, target_trait_vals):
- # pylint: disable = E, W, R, C
- r_library = ro.r["library"] # Map the library function
- r_options = ro.r["options"] # Map the options function
-
- r_library("WGCNA")
- r_bicor = ro.r["bicorAndPvalue"] # Map the bicorAndPvalue function
-
- r_options(stringsAsFactors=False)
-
- this_vals = ro.Vector(this_trait_vals)
- target_vals = ro.Vector(target_trait_vals)
-
- the_r, the_p, _fisher_transform, _the_t, _n_obs = [
- numpy.asarray(x) for x in r_bicor(x=this_vals, y=target_vals)]
-
- return the_r, the_p
-
-
-def get_header_fields(data_type, corr_method):
- """function to get header fields when doing correlation"""
- if data_type == "ProbeSet":
- if corr_method == "spearman":
-
- header_fields = ['Index',
- 'Record',
- 'Symbol',
- 'Description',
- 'Location',
- 'Mean',
- 'Sample rho',
- 'N',
- 'Sample p(rho)',
- 'Lit rho',
- 'Tissue rho',
- 'Tissue p(rho)',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
-
- else:
- header_fields = ['Index',
- 'Record',
- 'Abbreviation',
- 'Description',
- 'Mean',
- 'Authors',
- 'Year',
- 'Sample r',
- 'N',
- 'Sample p(r)',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
-
- elif data_type == "Publish":
- if corr_method == "spearman":
-
- header_fields = ['Index',
- 'Record',
- 'Abbreviation',
- 'Description',
- 'Mean',
- 'Authors',
- 'Year',
- 'Sample rho',
- 'N',
- 'Sample p(rho)',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
-
- else:
- header_fields = ['Index',
- 'Record',
- 'Abbreviation',
- 'Description',
- 'Mean',
- 'Authors',
- 'Year',
- 'Sample r',
- 'N',
- 'Sample p(r)',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
-
- else:
- if corr_method == "spearman":
- header_fields = ['Index',
- 'ID',
- 'Location',
- 'Sample rho',
- 'N',
- 'Sample p(rho)']
-
- else:
- header_fields = ['Index',
- 'ID',
- 'Location',
- 'Sample r',
- 'N',
- 'Sample p(r)']
-
- return header_fields
-
-
-def generate_corr_json(corr_results, this_trait, dataset, target_dataset, for_api=False):
- """function to generate corr json data"""
- #todo refactor this function
- results_list = []
- for i, trait in enumerate(corr_results):
- if trait.view == False:
- continue
- results_dict = {}
- results_dict['index'] = i + 1
- results_dict['trait_id'] = trait.name
- results_dict['dataset'] = trait.dataset.name
- results_dict['hmac'] = hmac.data_hmac(
- '{}:{}'.format(trait.name, trait.dataset.name))
- if target_dataset.type == "ProbeSet":
- results_dict['symbol'] = trait.symbol
- results_dict['description'] = "N/A"
- results_dict['location'] = trait.location_repr
- results_dict['mean'] = "N/A"
- results_dict['additive'] = "N/A"
- if bool(trait.description_display):
- results_dict['description'] = trait.description_display
- if bool(trait.mean):
- results_dict['mean'] = f"{float(trait.mean):.3f}"
- try:
- results_dict['lod_score'] = f"{float(trait.LRS_score_repr) / 4.61:.1f}"
- except:
- results_dict['lod_score'] = "N/A"
- results_dict['lrs_location'] = trait.LRS_location_repr
- if bool(trait.additive):
- results_dict['additive'] = f"{float(trait.additive):.3f}"
- results_dict['sample_r'] = f"{float(trait.sample_r):.3f}"
- results_dict['num_overlap'] = trait.num_overlap
- results_dict['sample_p'] = f"{float(trait.sample_p):.3e}"
- results_dict['lit_corr'] = "--"
- results_dict['tissue_corr'] = "--"
- results_dict['tissue_pvalue'] = "--"
- if bool(trait.lit_corr):
- results_dict['lit_corr'] = f"{float(trait.lit_corr):.3f}"
- if bool(trait.tissue_corr):
- results_dict['tissue_corr'] = f"{float(trait.tissue_corr):.3f}"
- results_dict['tissue_pvalue'] = f"{float(trait.tissue_pvalue):.3e}"
- elif target_dataset.type == "Publish":
- results_dict['abbreviation_display'] = "N/A"
- results_dict['description'] = "N/A"
- results_dict['mean'] = "N/A"
- results_dict['authors_display'] = "N/A"
- results_dict['additive'] = "N/A"
- if for_api:
- results_dict['pubmed_id'] = "N/A"
- results_dict['year'] = "N/A"
- else:
- results_dict['pubmed_link'] = "N/A"
- results_dict['pubmed_text'] = "N/A"
-
- if bool(trait.abbreviation):
- results_dict['abbreviation_display'] = trait.abbreviation
- if bool(trait.description_display):
- results_dict['description'] = trait.description_display
- if bool(trait.mean):
- results_dict['mean'] = f"{float(trait.mean):.3f}"
- if bool(trait.authors):
- authors_list = trait.authors.split(',')
- if len(authors_list) > 6:
- results_dict['authors_display'] = ", ".join(
- authors_list[:6]) + ", et al."
- else:
- results_dict['authors_display'] = trait.authors
- if bool(trait.pubmed_id):
- if for_api:
- results_dict['pubmed_id'] = trait.pubmed_id
- results_dict['year'] = trait.pubmed_text
- else:
- results_dict['pubmed_link'] = trait.pubmed_link
- results_dict['pubmed_text'] = trait.pubmed_text
- try:
- results_dict['lod_score'] = f"{float(trait.LRS_score_repr) / 4.61:.1f}"
- except:
- results_dict['lod_score'] = "N/A"
- results_dict['lrs_location'] = trait.LRS_location_repr
- if bool(trait.additive):
- results_dict['additive'] = f"{float(trait.additive):.3f}"
- results_dict['sample_r'] = f"{float(trait.sample_r):.3f}"
- results_dict['num_overlap'] = trait.num_overlap
- results_dict['sample_p'] = f"{float(trait.sample_p):.3e}"
- else:
- results_dict['location'] = trait.location_repr
- results_dict['sample_r'] = f"{float(trait.sample_r):.3f}"
- results_dict['num_overlap'] = trait.num_overlap
- results_dict['sample_p'] = f"{float(trait.sample_p):.3e}"
-
- results_list.append(results_dict)
-
- return json.dumps(results_list)
diff --git a/gn3/db/__init__.py b/gn3/db/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/gn3/db/__init__.py
+++ /dev/null
diff --git a/gn3/db/calls.py b/gn3/db/calls.py
deleted file mode 100644
index 547bccf..0000000
--- a/gn3/db/calls.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""module contains calls method for db"""
-import json
-import urllib
-from flask import g
-from gn3.utility.logger import getLogger
-logger = getLogger(__name__)
-# should probably put this is env
-USE_GN_SERVER = False
-LOG_SQL = False
-
-GN_SERVER_URL = None
-
-
-def fetch1(query, path=None, func=None):
- """fetch1 method"""
- if USE_GN_SERVER and path:
- result = gn_server(path)
- if func is not None:
- res2 = func(result)
-
- else:
- res2 = result
-
- if LOG_SQL:
- pass
- # should probably and logger
- # logger.debug("Replaced SQL call", query)
-
- # logger.debug(path,res2)
- return res2
-
- return fetchone(query)
-
-
-def gn_server(path):
- """Return JSON record by calling GN_SERVER
-
- """
- res = urllib.request.urlopen(GN_SERVER_URL+path)
- rest = res.read()
- res2 = json.loads(rest)
- return res2
-
-
-def fetchone(query):
- """method to fetchone item from db"""
- def helper(query):
- res = g.db.execute(query)
- return res.fetchone()
-
- return logger.sql(query, helper)
diff --git a/gn3/db/webqtlDatabaseFunction.py b/gn3/db/webqtlDatabaseFunction.py
deleted file mode 100644
index 9e9982b..0000000
--- a/gn3/db/webqtlDatabaseFunction.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU Affero General Public License
-# as published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU Affero General Public License for more details.
-#
-# This program is available from Source Forge: at GeneNetwork Project
-# (sourceforge.net/projects/genenetwork/).
-#
-# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
-# at rwilliams@uthsc.edu and xzhou15@uthsc.edu
-#
-#
-#
-# This module is used by GeneNetwork project (www.genenetwork.org)
-"""
-
-from gn3.db.calls import fetch1
-
-from gn3.utility.logger import getLogger
-logger = getLogger(__name__)
-
-###########################################################################
-# output: cursor instance
-# function: connect to database and return cursor instance
-###########################################################################
-
-
-def retrieve_species(group):
- """Get the species of a group (e.g. returns string "mouse" on "BXD"
-
- """
- result = fetch1("select Species.Name from Species, InbredSet where InbredSet.Name = '%s' and InbredSet.SpeciesId = Species.Id" % (
- group), "/cross/"+group+".json", lambda r: (r["species"],))[0]
- # logger.debug("retrieve_species result:", result)
- return result
-
-
-def retrieve_species_id(group):
- """retrieve species id method"""
-
- result = fetch1("select SpeciesId from InbredSet where Name = '%s'" % (
- group), "/cross/"+group+".json", lambda r: (r["species_id"],))[0]
- logger.debug("retrieve_species_id result:", result)
- return result
diff --git a/gn3/utility/__init__.py b/gn3/utility/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/gn3/utility/__init__.py
+++ /dev/null
diff --git a/gn3/utility/bunch.py b/gn3/utility/bunch.py
deleted file mode 100644
index c1fd907..0000000
--- a/gn3/utility/bunch.py
+++ /dev/null
@@ -1,16 +0,0 @@
-"""module contains Bunch class a dictionary like with object notation """
-
-from pprint import pformat as pf
-
-
-class Bunch:
- """Like a dictionary but using object notation"""
-
- def __init__(self, **kw):
- self.__dict__ = kw
-
- def __repr__(self):
- return pf(self.__dict__)
-
- def __str__(self):
- return self.__class__.__name__
diff --git a/gn3/utility/chunks.py b/gn3/utility/chunks.py
deleted file mode 100644
index fa27a39..0000000
--- a/gn3/utility/chunks.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""module for chunks functions"""
-
-import math
-
-
-def divide_into_chunks(the_list, number_chunks):
- """Divides a list into approximately number_chunks smaller lists
-
- >>> divide_into_chunks([1, 2, 7, 3, 22, 8, 5, 22, 333], 3)
- [[1, 2, 7], [3, 22, 8], [5, 22, 333]]
- >>> divide_into_chunks([1, 2, 7, 3, 22, 8, 5, 22, 333], 4)
- [[1, 2, 7], [3, 22, 8], [5, 22, 333]]
- >>> divide_into_chunks([1, 2, 7, 3, 22, 8, 5, 22, 333], 5)
- [[1, 2], [7, 3], [22, 8], [5, 22], [333]]
- >>>
-
- """
- length = len(the_list)
-
- if length == 0:
- return [[]]
-
- if length <= number_chunks:
- number_chunks = length
-
- chunksize = int(math.ceil(length / number_chunks))
-
- chunks = []
- for counter in range(0, length, chunksize):
- chunks.append(the_list[counter:counter+chunksize])
-
- return chunks
diff --git a/gn3/utility/corr_result_helpers.py b/gn3/utility/corr_result_helpers.py
deleted file mode 100644
index a68308e..0000000
--- a/gn3/utility/corr_result_helpers.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""module contains helper function for corr results"""
-
-#pylint:disable=C0103
-#above disable snake_case for variable tod refactor
-def normalize_values(a_values, b_values):
- """
- Trim two lists of values to contain only the values they both share
-
- Given two lists of sample values, trim each list so that it contains
- only the samples that contain a value in both lists. Also returns
- the number of such samples.
-
- >>> normalize_values([2.3, None, None, 3.2, 4.1, 5], [3.4, 7.2, 1.3, None, 6.2, 4.1])
- ([2.3, 4.1, 5], [3.4, 6.2, 4.1], 3)
-
- """
- a_new = []
- b_new = []
- for a, b in zip(a_values, b_values):
- if (a and b is not None):
- a_new.append(a)
- b_new.append(b)
- return a_new, b_new, len(a_new)
-
-
-def common_keys(a_samples, b_samples):
- """
- >>> a = dict(BXD1 = 9.113, BXD2 = 9.825, BXD14 = 8.985, BXD15 = 9.300)
- >>> b = dict(BXD1 = 9.723, BXD3 = 9.825, BXD14 = 9.124, BXD16 = 9.300)
- >>> sorted(common_keys(a, b))
- ['BXD1', 'BXD14']
- """
- return set(a_samples.keys()).intersection(set(b_samples.keys()))
-
-
-def normalize_values_with_samples(a_samples, b_samples):
- """function to normalize values with samples"""
- common_samples = common_keys(a_samples, b_samples)
- a_new = {}
- b_new = {}
- for sample in common_samples:
- a_new[sample] = a_samples[sample]
- b_new[sample] = b_samples[sample]
-
- return a_new, b_new, len(a_new)
diff --git a/gn3/utility/db_tools.py b/gn3/utility/db_tools.py
deleted file mode 100644
index 446acda..0000000
--- a/gn3/utility/db_tools.py
+++ /dev/null
@@ -1,19 +0,0 @@
-"""module for db_tools"""
-from MySQLdb import escape_string as escape_
-
-
-def create_in_clause(items):
- """Create an in clause for mysql"""
- in_clause = ', '.join("'{}'".format(x) for x in mescape(*items))
- in_clause = '( {} )'.format(in_clause)
- return in_clause
-
-
-def mescape(*items):
- """Multiple escape"""
- return [escape_(str(item)).decode('utf8') for item in items]
-
-
-def escape(string_):
- """escape function"""
- return escape_(string_).decode('utf8')
diff --git a/gn3/utility/get_group_samplelists.py b/gn3/utility/get_group_samplelists.py
deleted file mode 100644
index 8fb322a..0000000
--- a/gn3/utility/get_group_samplelists.py
+++ /dev/null
@@ -1,47 +0,0 @@
-
-"""module for group samplelist"""
-import os
-
-#todo close the files after opening
-def get_samplelist(file_type, geno_file):
- """get samplelist function"""
- if file_type == "geno":
- return get_samplelist_from_geno(geno_file)
- elif file_type == "plink":
- return get_samplelist_from_plink(geno_file)
-
-def get_samplelist_from_geno(genofilename):
- if os.path.isfile(genofilename + '.gz'):
- genofilename += '.gz'
- genofile = gzip.open(genofilename)
- else:
- genofile = open(genofilename)
-
- for line in genofile:
- line = line.strip()
- if not line:
- continue
- if line.startswith(("#", "@")):
- continue
- break
-
- headers = line.split("\t")
-
- if headers[3] == "Mb":
- samplelist = headers[4:]
- else:
- samplelist = headers[3:]
- return samplelist
-
-
-
-def get_samplelist_from_plink(genofilename):
- """get samplelist from plink"""
- genofile = open(genofilename)
-
- samplelist = []
- for line in genofile:
- line = line.split(" ")
- samplelist.append(line[1])
-
- return samplelist
diff --git a/gn3/utility/helper_functions.py b/gn3/utility/helper_functions.py
deleted file mode 100644
index f5a8b80..0000000
--- a/gn3/utility/helper_functions.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""module contains general helper functions """
-from gn3.base.data_set import create_dataset
-from gn3.base.trait import create_trait
-from gn3.base.species import TheSpecies
-
-
-def get_species_dataset_trait(self, start_vars):
- """function to get species dataset and trait"""
- if "temp_trait" in list(start_vars.keys()):
- if start_vars['temp_trait'] == "True":
- self.dataset = create_dataset(
- dataset_name="Temp", dataset_type="Temp", group_name=start_vars['group'])
-
- else:
- self.dataset = create_dataset(start_vars['dataset'])
-
- else:
- self.dataset = create_dataset(start_vars['dataset'])
- self.species = TheSpecies(dataset=self.dataset)
-
- self.this_trait = create_trait(dataset=self.dataset,
- name=start_vars['trait_id'],
- cellid=None,
- get_qtl_info=True)
diff --git a/gn3/utility/hmac.py b/gn3/utility/hmac.py
deleted file mode 100644
index eb39e59..0000000
--- a/gn3/utility/hmac.py
+++ /dev/null
@@ -1,50 +0,0 @@
-"""module for hmac """
-
-# pylint: disable-all
-import hmac
-import hashlib
-
-# xtodo work on this file
-
-# from main import app
-
-
-def hmac_creation(stringy):
- """Helper function to create the actual hmac"""
-
- # secret = app.config['SECRET_HMAC_CODE']
- # put in config
- secret = "my secret"
- hmaced = hmac.new(bytearray(secret, "latin-1"),
- bytearray(stringy, "utf-8"),
- hashlib.sha1)
- hm = hmaced.hexdigest()
- # ZS: Leaving the below comment here to ask Pjotr about
- # "Conventional wisdom is that you don't lose much in terms of security if you throw away up to half of the output."
- # http://www.w3.org/QA/2009/07/hmac_truncation_in_xml_signatu.html
- hm = hm[:20]
- return hm
-
-
-def data_hmac(stringy):
- """Takes arbitrary data string and appends :hmac so we know data hasn't been tampered with"""
- return stringy + ":" + hmac_creation(stringy)
-
-
-def url_for_hmac(endpoint, **values):
- """Like url_for but adds an hmac at the end to insure the url hasn't been tampered with"""
-
- url = url_for(endpoint, **values)
-
- hm = hmac_creation(url)
- if '?' in url:
- combiner = "&"
- else:
- combiner = "?"
- return url + combiner + "hm=" + hm
-
-
-
-# todo
-# app.jinja_env.globals.update(url_for_hmac=url_for_hmac,
-# data_hmac=data_hmac)
diff --git a/gn3/utility/logger.py b/gn3/utility/logger.py
deleted file mode 100644
index 4245a02..0000000
--- a/gn3/utility/logger.py
+++ /dev/null
@@ -1,163 +0,0 @@
-"""
-# GeneNetwork logger
-#
-# The standard python logging module is very good. This logger adds a
-# few facilities on top of that. Main one being that it picks up
-# settings for log levels (global and by module) and (potentially)
-# offers some fine grained log levels for the standard levels.
-#
-# All behaviour is defined here. Global settings (defined in
-# default_settings.py).
-#
-# To use logging and settings put this at the top of a module:
-#
-# import utility.logger
-# logger = utility.logger.getLogger(__name__ )
-#
-# To override global behaviour set the LOG_LEVEL in default_settings.py
-# or use an environment variable, e.g.
-#
-# env LOG_LEVEL=INFO ./bin/genenetwork2
-#
-# To override log level for a module replace that with, for example,
-#
-# import logging
-# import utility.logger
-# logger = utility.logger.getLogger(__name__,level=logging.DEBUG)
-#
-# We'll add more overrides soon.
-"""
-# todo incomplete file
-
-# pylint: disable-all
-import logging
-import datetime
-from inspect import isfunction
-from inspect import stack
-
-from pprint import pformat as pf
-
-
-# from utility.tools import LOG_LEVEL, LOG_LEVEL_DEBUG, LOG_SQL
-
-LOG_SQL = True
-
-
-class GNLogger:
- """A logger class with some additional functionality, such as
- multiple parameter logging, SQL logging, timing, colors, and lazy
- functions.
-
- """
-
- def __init__(self, name):
- self.logger = logging.getLogger(name)
-
- def setLevel(self, value):
- """Set the undelying log level"""
- self.logger.setLevel(value)
-
- def debug(self, *args):
- """Call logging.debug for multiple args. Use (lazy) debugf and
-level=num to filter on LOG_LEVEL_DEBUG.
-
- """
- self.collect(self.logger.debug, *args)
-
- def debug20(self, *args):
- """Call logging.debug for multiple args. Use level=num to filter on
-LOG_LEVEL_DEBUG (NYI).
-
- """
- if level <= LOG_LEVEL_DEBUG:
- if self.logger.getEffectiveLevel() < 20:
- self.collect(self.logger.debug, *args)
-
- def info(self, *args):
- """Call logging.info for multiple args"""
- self.collect(self.logger.info, *args)
-
- def warning(self, *args):
- """Call logging.warning for multiple args"""
- self.collect(self.logger.warning, *args)
- # self.logger.warning(self.collect(*args))
-
- def error(self, *args):
- """Call logging.error for multiple args"""
- now = datetime.datetime.utcnow()
- time_str = now.strftime('%H:%M:%S UTC %Y%m%d')
- l = [time_str]+list(args)
- self.collect(self.logger.error, *l)
-
- def infof(self, *args):
- """Call logging.info for multiple args lazily"""
- # only evaluate function when logging
- if self.logger.getEffectiveLevel() < 30:
- self.collectf(self.logger.debug, *args)
-
- def debugf(self, level=0, *args):
- """Call logging.debug for multiple args lazily and handle
- LOG_LEVEL_DEBUG correctly
-
- """
- # only evaluate function when logging
- if level <= LOG_LEVEL_DEBUG:
- if self.logger.getEffectiveLevel() < 20:
- self.collectf(self.logger.debug, *args)
-
- def sql(self, sqlcommand, fun=None):
- """Log SQL command, optionally invoking a timed fun"""
- if LOG_SQL:
- caller = stack()[1][3]
- if caller in ['fetchone', 'fetch1', 'fetchall']:
- caller = stack()[2][3]
- self.info(caller, sqlcommand)
- if fun:
- result = fun(sqlcommand)
- if LOG_SQL:
- self.info(result)
- return result
-
- def collect(self, fun, *args):
- """Collect arguments and use fun to output"""
- out = "."+stack()[2][3]
- for a in args:
- if len(out) > 1:
- out += ": "
- if isinstance(a, str):
- out = out + a
- else:
- out = out + pf(a, width=160)
- fun(out)
-
- def collectf(self, fun, *args):
- """Collect arguments and use fun to output one by one"""
- out = "."+stack()[2][3]
- for a in args:
- if len(out) > 1:
- out += ": "
- if isfunction(a):
- out += a()
- else:
- if isinstance(a, str):
- out = out + a
- else:
- out = out + pf(a, width=160)
- fun(out)
-
-# Get the module logger. You can override log levels at the
-# module level
-
-
-def getLogger(name, level=None):
- """method to get logger"""
- gnlogger = GNLogger(name)
- _logger = gnlogger.logger
-
- # if level:
- # logger.setLevel(level)
- # else:
- # logger.setLevel(LOG_LEVEL)
-
- # logger.info("Log level of "+name+" set to "+logging.getLevelName(logger.getEffectiveLevel()))
- return gnlogger
diff --git a/gn3/utility/species.py b/gn3/utility/species.py
deleted file mode 100644
index 0140d41..0000000
--- a/gn3/utility/species.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""module contains species and chromosomes classes"""
-import collections
-
-from flask import g
-
-
-from gn3.utility.logger import getLogger
-logger = getLogger(__name__)
-
- # pylint: disable=too-few-public-methods
- # intentionally disabled check for few public methods
-
-class TheSpecies:
- """class for Species"""
-
- def __init__(self, dataset=None, species_name=None):
- if species_name is not None:
- self.name = species_name
- self.chromosomes = Chromosomes(species=self.name)
- else:
- self.dataset = dataset
- self.chromosomes = Chromosomes(dataset=self.dataset)
-
-
-
-class IndChromosome:
- """class for IndChromosome"""
-
- def __init__(self, name, length):
- self.name = name
- self.length = length
-
- @property
- def mb_length(self):
- """Chromosome length in megabases"""
- return self.length / 1000000
-
-
-
-
-class Chromosomes:
- """class for Chromosomes"""
-
- def __init__(self, dataset=None, species=None):
- self.chromosomes = collections.OrderedDict()
- if species is not None:
- query = """
- Select
- Chr_Length.Name, Chr_Length.OrderId, Length from Chr_Length, Species
- where
- Chr_Length.SpeciesId = Species.SpeciesId AND
- Species.Name = '%s'
- Order by OrderId
- """ % species.capitalize()
- else:
- self.dataset = dataset
-
- query = """
- Select
- Chr_Length.Name, Chr_Length.OrderId, Length from Chr_Length, InbredSet
- where
- Chr_Length.SpeciesId = InbredSet.SpeciesId AND
- InbredSet.Name = '%s'
- Order by OrderId
- """ % self.dataset.group.name
- logger.sql(query)
- results = g.db.execute(query).fetchall()
-
- for item in results:
- self.chromosomes[item.OrderId] = IndChromosome(
- item.Name, item.Length)
diff --git a/gn3/utility/tools.py b/gn3/utility/tools.py
deleted file mode 100644
index 85df9f6..0000000
--- a/gn3/utility/tools.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""module contains general tools forgenenetwork"""
-
-import os
-
-from default_settings import GENENETWORK_FILES
-
-
-def valid_file(file_name):
- """check if file is valid"""
- if os.path.isfile(file_name):
- return file_name
- return None
-
-
-def valid_path(dir_name):
- """check if path is valid"""
- if os.path.isdir(dir_name):
- return dir_name
- return None
-
-
-def locate_ignore_error(name, subdir=None):
- """
- Locate a static flat file in the GENENETWORK_FILES environment.
-
- This function does not throw an error when the file is not found
- but returns None.
- """
- base = GENENETWORK_FILES
- if subdir:
- base = base+"/"+subdir
- if valid_path(base):
- lookfor = base + "/" + name
- if valid_file(lookfor):
- return lookfor
-
- return None
diff --git a/gn3/utility/webqtlUtil.py b/gn3/utility/webqtlUtil.py
deleted file mode 100644
index 1c76410..0000000
--- a/gn3/utility/webqtlUtil.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU Affero General Public License
-# as published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU Affero General Public License for more details.
-#
-# This program is available from Source Forge: at GeneNetwork Project
-# (sourceforge.net/projects/genenetwork/).
-#
-# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
-# at rwilliams@uthsc.edu and xzhou15@uthsc.edu
-#
-#
-#
-# This module is used by GeneNetwork project (www.genenetwork.org)
-#
-# Created by GeneNetwork Core Team 2010/08/10
-#
-# Last updated by GeneNetwork Core Team 2010/10/20
-
-# from base import webqtlConfig
-
-# NL, 07/27/2010. moved from webqtlForm.py
-# Dict of Parents and F1 information, In the order of [F1, Mat, Pat]
-
-"""
-ParInfo = {
- 'BXH': ['BHF1', 'HBF1', 'C57BL/6J', 'C3H/HeJ'],
- 'AKXD': ['AKF1', 'KAF1', 'AKR/J', 'DBA/2J'],
- 'BXD': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
- 'C57BL-6JxC57BL-6NJF2': ['', '', 'C57BL/6J', 'C57BL/6NJ'],
- 'BXD300': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
- 'B6BTBRF2': ['B6BTBRF1', 'BTBRB6F1', 'C57BL/6J', 'BTBRT<+>tf/J'],
- 'BHHBF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'],
- 'BHF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'],
- 'B6D2F2': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
- 'BDF2-1999': ['B6D2F2', 'D2B6F2', 'C57BL/6J', 'DBA/2J'],
- 'BDF2-2005': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
- 'CTB6F2': ['CTB6F2', 'B6CTF2', 'C57BL/6J', 'Castaneous'],
- 'CXB': ['CBF1', 'BCF1', 'C57BL/6ByJ', 'BALB/cByJ'],
- 'AXBXA': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'],
- 'AXB': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'],
- 'BXA': ['BAF1', 'ABF1', 'C57BL/6J', 'A/J'],
- 'LXS': ['LSF1', 'SLF1', 'ISS', 'ILS'],
- 'HXBBXH': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'],
- 'BayXSha': ['BayXShaF1', 'ShaXBayF1', 'Bay-0', 'Shahdara'],
- 'ColXBur': ['ColXBurF1', 'BurXColF1', 'Col-0', 'Bur-0'],
- 'ColXCvi': ['ColXCviF1', 'CviXColF1', 'Col-0', 'Cvi'],
- 'SXM': ['SMF1', 'MSF1', 'Steptoe', 'Morex'],
- 'HRDP': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv']
-}
-
-
-def has_access_to_confidentail_phenotype_trait(privilege, username, authorized_users):
- """function to access to confidential phenotype Traits further implementation needed"""
- access_to_confidential_phenotype_trait = 0
-
- results = (privilege, username, authorized_users)
- return access_to_confidential_phenotype_trait