aboutsummaryrefslogtreecommitdiff
path: root/gn3/correlation
diff options
context:
space:
mode:
authorAlexander Kabui2021-03-13 13:04:33 +0300
committerGitHub2021-03-13 13:04:33 +0300
commit236ca06dc4c84baecb7b090b8724db997a5d988a (patch)
tree7fce724ae007dacfe3cf0f7511756b6064026ea3 /gn3/correlation
parent7f9a293929be021eb73aec35defe254351557dcb (diff)
downloadgenenetwork3-236ca06dc4c84baecb7b090b8724db997a5d988a.tar.gz
Correlation api (#2)
* add file for correlation api * register initial correlation api * add correlation package * add function for getting page data * delete loading page api * modify code for correlation * add tests folder for correlations * fix error in correlation api * add tests for correlation * add tests for correlation loading data * add module for correlation computations * modify api to return json when computing correlation * add tests for computing correlation * modify code for loading correlation data * modify tests for correlation computation * test loading correlation data using api endpoint * add tests for asserting error in creating Correlation object * add do correlation method * add dummy tests for do_correlation method * delete unused modules * add tests for creating trait and dataset * add intergration test for correlation api * add tests for correlation api * edit docorrelation method * modify integration tests for correlation api * modify tests for show_corr_results * add create dataset function * pep8 formatting and fix return value for api * add more test data for doing correlation * modify tests for correlation * pep8 formatting * add getting formatted corr type method * import json library add process samples method for correlation * fix issue with sample_vals key_error * create utility module for correlation * refactor endpoint for /corr_compute * add test and mocks for compute_correlation function * add compute correlation function and pep8 formatting * move get genofile samplelist to utility module * refactor code for CorrelationResults object * pep8 formatting for module * remove CorrelationResults from Api * add base package initialize data_set module with create_dataset,redis and Dataset_Getter * set dataset_structure if redis is empty * add callable for DatsetType * add set_dataset_key method If name is not in the object's dataset dictionary * add Dataset object and MrnaAssayDataSet * add db_tools * add mysql client * add DatasetGroup object * add species module * get mapping method * import helper functions and new dataset * add connection to db before request * add helper functions * add logger module * add get_group_samplelists module * add logger for debug * add code for adding sample_data * pep8 formatting * Add chunks module * add correlation helper module * add get_sample_r_and_p_values method add get_header_fields function * add generate corr json method * add function to retrieve_trait_info * remove comments and clean up code in show_corr_results * remove comments and clean up code for data_set module * pep8 formatting for helper_functions module * pep8 formatting for trait module * add module for species * add Temp Dataset Object * add Phenotype Dataset * add Genotype Dataset * add rettrieve sample_sample_data method * add webqtlUtil module * add do lit correlation for all traits * add webqtlCaseData:Settings not ported * return the_trait for create trait method * add correlation_test json data * add tests fore show corr results * add dictfier package * add tests for show_corr_results * add assertion for trait_id * refactor code for show_corr_results * add test file for compute_corr intergration tests * add scipy dependency * refactor show_corr_results object add do lit correlation for trait_list * add hmac module * add bunch module:Dictionary using object notation * add correlation functions * add rpy2 dependency * add hmac module * add MrnaAssayTissueData object and get_symbol_values_pairs function * add config module * add get json_results method * pep8 formatting remove comments * add config file * add db package * refactor correlatio compuatation module * add do tissue correlation for trait list * add do lit correlation for all traits * add do tissue correlation for all traits * add do_bicor for bicor method * raise error for when initital start vars is None * add support for both form and json data when for correlation input * remove print statement and pep8 formatting * add default settings file * add tools module for locate_ignore_error * refactor code remove comments for trait module * Add new test data for computing correlation * pep8 formatting and use pickle * refactor function for filtering form/json data * remove unused imports * remove mock functions in correlation_utility module * refactor tests for compute correlation and pep8 formatting * add tests for show_correlation results * modify tests for show_corr_results * add json files for tests * pep8 formatting for show_corr_results * Todo:Lint base files * pylint for intergration tests * add test module for test_corr_helpers * Add test chunk module * lint utility package * refactoring and pep8 formatting * implement simple metric for correlation * add hmac utility file * add correlation prefix * fix merge conflict * minor fixes for endpoints * import:python-scipy,python-sqlalchemy from guix * add python mysqlclient * remove pkg-resources from requirements * add python-rpy3 from guix * refactor code for species module * pep8 formatting and refactor code * add tests for genereating correlation results * lint correlation functions * fix failing tests for show_corr_results * add new correlation test data fix errors * fix issues related to getting group samplelists * refactor intergration tests for correlation * add todo for refactoring_wanted_inputs * replace custom Attribute setter with SimpleNamespace * comparison of sample r correlation results btwn genenenetwork2 and genenetwork3 * delete AttributeSetter * test request for /api/correlation/compute_correlation took 18.55710196495056 Seconds * refactor tests and show_correlation results * remove unneccessary comments and print statements * edit requirement txt file * api/correlation took 114.29814600944519 Seconds for correlation resullts:20000 - corr-type:lit - corr-method:pearson corr-dataset:corr_dataset:HC_M2_0606_P * capture SQL_URI and GENENETWORK FILES path * pep8 formatting edit && remove print statements * delete filter_input function update test and data for correlation * add docstring for required correlation_input * /api/correlation took 12.905632972717285 Seconds * pearson * lit *dataset:HX_M2_0606_P trait_id :1444666 p_range:(lower->-0.60,uppper->0.74) corr_return_results: 100 * update integration and unittest for correlation * add simple markdown docs for correlation * update docs * add tests and catch for invalid correlation_input * minor fix for api * Remove jupyter from deps * guix.scm: Remove duplicate entry * guix.scm: Add extra action items as comments * Trim requirements.txt file Co-authored-by: BonfaceKilz <me@bonfacemunyoki.com>
Diffstat (limited to 'gn3/correlation')
-rw-r--r--gn3/correlation/__init__.py0
-rw-r--r--gn3/correlation/correlation_computations.py32
-rw-r--r--gn3/correlation/correlation_functions.py96
-rw-r--r--gn3/correlation/correlation_utility.py22
-rw-r--r--gn3/correlation/show_corr_results.py735
5 files changed, 885 insertions, 0 deletions
diff --git a/gn3/correlation/__init__.py b/gn3/correlation/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/gn3/correlation/__init__.py
diff --git a/gn3/correlation/correlation_computations.py b/gn3/correlation/correlation_computations.py
new file mode 100644
index 0000000..6a3f2bb
--- /dev/null
+++ b/gn3/correlation/correlation_computations.py
@@ -0,0 +1,32 @@
+"""module contains code for any computation in correlation"""
+
+import json
+from .show_corr_results import CorrelationResults
+
+def compute_correlation(correlation_input_data,
+ correlation_results=CorrelationResults):
+ """function that does correlation .creates Correlation results instance
+
+ correlation_input_data structure is a dict with
+
+ {
+ "trait_id":"valid trait id",
+ "dataset":"",
+ "sample_vals":{},
+ "primary_samples":"",
+ "corr_type":"",
+ corr_dataset:"",
+ "corr_return_results":"",
+
+
+ }
+
+ """
+
+ corr_object = correlation_results(
+ start_vars=correlation_input_data)
+
+ corr_results = corr_object.do_correlation(start_vars=correlation_input_data)
+ # possibility of file being so large cause of the not sure whether to return a file
+
+ return corr_results
diff --git a/gn3/correlation/correlation_functions.py b/gn3/correlation/correlation_functions.py
new file mode 100644
index 0000000..be08c96
--- /dev/null
+++ b/gn3/correlation/correlation_functions.py
@@ -0,0 +1,96 @@
+
+"""
+# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Affero General Public License
+# as published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU Affero General Public License for more details.
+#
+# This program is available from Source Forge: at GeneNetwork Project
+# (sourceforge.net/projects/genenetwork/).
+#
+# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
+# at rwilliams@uthsc.edu and xzhou15@uthsc.edu
+#
+#
+#
+# This module is used by GeneNetwork project (www.genenetwork.org)
+#
+# Created by GeneNetwork Core Team 2010/08/10
+#
+# Last updated by NL 2011/03/23
+
+
+"""
+
+import rpy2.robjects
+from gn3.base.mrna_assay_tissue_data import MrnaAssayTissueData
+
+
+#####################################################################################
+# Input: primaryValue(list): one list of expression values of one probeSet,
+# targetValue(list): one list of expression values of one probeSet,
+# method(string): indicate correlation method ('pearson' or 'spearman')
+# Output: corr_result(list): first item is Correlation Value, second item is tissue number,
+# third item is PValue
+# Function: get correlation value,Tissue quantity ,p value result by using R;
+# Note : This function is special case since both primaryValue and targetValue are from
+# the same dataset. So the length of these two parameters is the same. They are pairs.
+# Also, in the datatable TissueProbeSetData, all Tissue values are loaded based on
+# the same tissue order
+#####################################################################################
+
+def cal_zero_order_corr_for_tiss(primaryValue=[], targetValue=[], method='pearson'):
+ """refer above for info on the function"""
+ # pylint: disable = E, W, R, C
+
+ #nb disabled pylint until tests are written for this function
+
+ R_primary = rpy2.robjects.FloatVector(list(range(len(primaryValue))))
+ N = len(primaryValue)
+ for i in range(len(primaryValue)):
+ R_primary[i] = primaryValue[i]
+
+ R_target = rpy2.robjects.FloatVector(list(range(len(targetValue))))
+ for i in range(len(targetValue)):
+ R_target[i] = targetValue[i]
+
+ R_corr_test = rpy2.robjects.r['cor.test']
+ if method == 'spearman':
+ R_result = R_corr_test(R_primary, R_target, method='spearman')
+ else:
+ R_result = R_corr_test(R_primary, R_target)
+
+ corr_result = []
+ corr_result.append(R_result[3][0])
+ corr_result.append(N)
+ corr_result.append(R_result[2][0])
+
+ return corr_result
+
+
+####################################################
+####################################################
+# input: cursor, symbolList (list), dataIdDict(Dict): key is symbol
+# output: SymbolValuePairDict(dictionary):one dictionary of Symbol and Value Pair.
+# key is symbol, value is one list of expression values of one probeSet.
+# function: wrapper function for getSymbolValuePairDict function
+# build gene symbol list if necessary, cut it into small lists if necessary,
+# then call getSymbolValuePairDict function and merge the results.
+###################################################
+#####################################################
+
+def get_trait_symbol_and_tissue_values(symbol_list=None):
+ """function to get trait symbol and tissues values refer above"""
+ tissue_data = MrnaAssayTissueData(gene_symbols=symbol_list)
+
+ if len(tissue_data.gene_symbols) >= 1:
+ return tissue_data.get_symbol_values_pairs()
+
+ return None
diff --git a/gn3/correlation/correlation_utility.py b/gn3/correlation/correlation_utility.py
new file mode 100644
index 0000000..7583bd7
--- /dev/null
+++ b/gn3/correlation/correlation_utility.py
@@ -0,0 +1,22 @@
+"""module contains utility functions for correlation"""
+
+
+class AttributeSetter:
+ """class for setting Attributes"""
+
+ def __init__(self, trait_obj):
+ for key, value in trait_obj.items():
+ setattr(self, key, value)
+
+ def __str__(self):
+ return self.__class__.__name__
+
+ def get_dict(self):
+ """dummy function to get dict object"""
+ return self.__dict__
+
+
+def get_genofile_samplelist(dataset):
+ """mock function to get genofile samplelist"""
+
+ return ["C57BL/6J"]
diff --git a/gn3/correlation/show_corr_results.py b/gn3/correlation/show_corr_results.py
new file mode 100644
index 0000000..55d8366
--- /dev/null
+++ b/gn3/correlation/show_corr_results.py
@@ -0,0 +1,735 @@
+"""module contains code for doing correlation"""
+
+import json
+import collections
+import numpy
+import scipy.stats
+import rpy2.robjects as ro
+from flask import g
+from gn3.base.data_set import create_dataset
+from gn3.utility.db_tools import escape
+from gn3.utility.helper_functions import get_species_dataset_trait
+from gn3.utility.corr_result_helpers import normalize_values
+from gn3.base.trait import create_trait
+from gn3.utility import hmac
+from . import correlation_functions
+
+
+class CorrelationResults:
+ """class for computing correlation"""
+ # pylint: disable=too-many-instance-attributes
+ # pylint:disable=attribute-defined-outside-init
+
+ def __init__(self, start_vars):
+ self.assertion_for_start_vars(start_vars)
+
+ @staticmethod
+ def assertion_for_start_vars(start_vars):
+ # pylint: disable = E, W, R, C
+
+ # should better ways to assert the variables
+ # example includes sample
+ assert("corr_type" in start_vars)
+ assert(isinstance(start_vars['corr_type'], str))
+ # example includes pearson
+ assert('corr_sample_method' in start_vars)
+ assert('corr_dataset' in start_vars)
+ # means the limit
+ assert('corr_return_results' in start_vars)
+
+ if "loc_chr" in start_vars:
+ assert('min_loc_mb' in start_vars)
+ assert('max_loc_mb' in start_vars)
+
+ def get_formatted_corr_type(self):
+ """method to formatt corr_types"""
+ self.formatted_corr_type = ""
+ if self.corr_type == "lit":
+ self.formatted_corr_type += "Literature Correlation "
+ elif self.corr_type == "tissue":
+ self.formatted_corr_type += "Tissue Correlation "
+ elif self.corr_type == "sample":
+ self.formatted_corr_type += "Genetic Correlation "
+
+ if self.corr_method == "pearson":
+ self.formatted_corr_type += "(Pearson's r)"
+ elif self.corr_method == "spearman":
+ self.formatted_corr_type += "(Spearman's rho)"
+ elif self.corr_method == "bicor":
+ self.formatted_corr_type += "(Biweight r)"
+
+ def process_samples(self, start_vars, sample_names, excluded_samples=None):
+ """method to process samples"""
+
+
+ if not excluded_samples:
+ excluded_samples = ()
+
+ sample_val_dict = json.loads(start_vars["sample_vals"])
+ print(sample_val_dict)
+ if sample_names is None:
+ raise NotImplementedError
+
+ for sample in sample_names:
+ if sample not in excluded_samples:
+ value = sample_val_dict[sample]
+
+ if not value.strip().lower() == "x":
+ self.sample_data[str(sample)] = float(value)
+
+ def do_tissue_correlation_for_trait_list(self, tissue_dataset_id=1):
+ """Given a list of correlation results (self.correlation_results),\
+ gets the tissue correlation value for each"""
+ # pylint: disable = E, W, R, C
+
+ # Gets tissue expression values for the primary trait
+ primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
+ symbol_list=[self.this_trait.symbol])
+
+ if self.this_trait.symbol.lower() in primary_trait_tissue_vals_dict:
+ primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower(
+ )]
+ gene_symbol_list = [
+ trait.symbol for trait in self.correlation_results if trait.symbol]
+
+ corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
+ symbol_list=gene_symbol_list)
+
+ for trait in self.correlation_results:
+ if trait.symbol and trait.symbol.lower() in corr_result_tissue_vals_dict:
+ this_trait_tissue_values = corr_result_tissue_vals_dict[trait.symbol.lower(
+ )]
+
+ result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values,
+ this_trait_tissue_values,
+ self.corr_method)
+
+ trait.tissue_corr = result[0]
+ trait.tissue_pvalue = result[2]
+
+ def do_lit_correlation_for_trait_list(self):
+ # pylint: disable = E, W, R, C
+
+ input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(
+ self.dataset.group.species.lower(), self.this_trait.geneid)
+
+ for trait in self.correlation_results:
+
+ if trait.geneid:
+ trait.mouse_gene_id = self.convert_to_mouse_gene_id(
+ self.dataset.group.species.lower(), trait.geneid)
+ else:
+ trait.mouse_gene_id = None
+
+ if trait.mouse_gene_id and str(trait.mouse_gene_id).find(";") == -1:
+ result = g.db.execute(
+ """SELECT value
+ FROM LCorrRamin3
+ WHERE GeneId1='%s' and
+ GeneId2='%s'
+ """ % (escape(str(trait.mouse_gene_id)), escape(str(input_trait_mouse_gene_id)))
+ ).fetchone()
+ if not result:
+ result = g.db.execute("""SELECT value
+ FROM LCorrRamin3
+ WHERE GeneId2='%s' and
+ GeneId1='%s'
+ """ % (escape(str(trait.mouse_gene_id)), escape(str(input_trait_mouse_gene_id)))
+ ).fetchone()
+
+ if result:
+ lit_corr = result.value
+ trait.lit_corr = lit_corr
+ else:
+ trait.lit_corr = 0
+ else:
+ trait.lit_corr = 0
+
+ def do_lit_correlation_for_all_traits(self):
+ """method for lit_correlation for all traits"""
+ # pylint: disable = E, W, R, C
+ input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(
+ self.dataset.group.species.lower(), self.this_trait.geneid)
+
+ lit_corr_data = {}
+ for trait, gene_id in list(self.trait_geneid_dict.items()):
+ mouse_gene_id = self.convert_to_mouse_gene_id(
+ self.dataset.group.species.lower(), gene_id)
+
+ if mouse_gene_id and str(mouse_gene_id).find(";") == -1:
+ #print("gene_symbols:", input_trait_mouse_gene_id + " / " + mouse_gene_id)
+ result = g.db.execute(
+ """SELECT value
+ FROM LCorrRamin3
+ WHERE GeneId1='%s' and
+ GeneId2='%s'
+ """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id))
+ ).fetchone()
+ if not result:
+ result = g.db.execute("""SELECT value
+ FROM LCorrRamin3
+ WHERE GeneId2='%s' and
+ GeneId1='%s'
+ """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id))
+ ).fetchone()
+ if result:
+ #print("result:", result)
+ lit_corr = result.value
+ lit_corr_data[trait] = [gene_id, lit_corr]
+ else:
+ lit_corr_data[trait] = [gene_id, 0]
+ else:
+ lit_corr_data[trait] = [gene_id, 0]
+
+ lit_corr_data = collections.OrderedDict(sorted(list(lit_corr_data.items()),
+ key=lambda t: -abs(t[1][1])))
+
+ return lit_corr_data
+
+ def do_tissue_correlation_for_all_traits(self, tissue_dataset_id=1):
+ # Gets tissue expression values for the primary trait
+ # pylint: disable = E, W, R, C
+ primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
+ symbol_list=[self.this_trait.symbol])
+
+ if self.this_trait.symbol.lower() in primary_trait_tissue_vals_dict:
+ primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower(
+ )]
+
+ #print("trait_gene_symbols: ", pf(trait_gene_symbols.values()))
+ corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
+ symbol_list=list(self.trait_symbol_dict.values()))
+
+ #print("corr_result_tissue_vals: ", pf(corr_result_tissue_vals_dict))
+
+ #print("trait_gene_symbols: ", pf(trait_gene_symbols))
+
+ tissue_corr_data = {}
+ for trait, symbol in list(self.trait_symbol_dict.items()):
+ if symbol and symbol.lower() in corr_result_tissue_vals_dict:
+ this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower(
+ )]
+
+ result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values,
+ this_trait_tissue_values,
+ self.corr_method)
+
+ tissue_corr_data[trait] = [symbol, result[0], result[2]]
+
+ tissue_corr_data = collections.OrderedDict(sorted(list(tissue_corr_data.items()),
+ key=lambda t: -abs(t[1][1])))
+
+ def get_sample_r_and_p_values(self, trait, target_samples):
+ """Calculates the sample r (or rho) and p-value
+
+ Given a primary trait and a target trait's sample values,
+ calculates either the pearson r or spearman rho and the p-value
+ using the corresponding scipy functions.
+
+ """
+ # pylint: disable = E, W, R, C
+ self.this_trait_vals = []
+ target_vals = []
+
+ for index, sample in enumerate(self.target_dataset.samplelist):
+ if sample in self.sample_data:
+ sample_value = self.sample_data[sample]
+ target_sample_value = target_samples[index]
+ self.this_trait_vals.append(sample_value)
+ target_vals.append(target_sample_value)
+
+ self.this_trait_vals, target_vals, num_overlap = normalize_values(
+ self.this_trait_vals, target_vals)
+
+ if num_overlap > 5:
+ # ZS: 2015 could add biweight correlation, see http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3465711/
+ if self.corr_method == 'bicor':
+ sample_r, sample_p = do_bicor(
+ self.this_trait_vals, target_vals)
+
+ elif self.corr_method == 'pearson':
+ sample_r, sample_p = scipy.stats.pearsonr(
+ self.this_trait_vals, target_vals)
+
+ else:
+ sample_r, sample_p = scipy.stats.spearmanr(
+ self.this_trait_vals, target_vals)
+
+ if numpy.isnan(sample_r):
+ pass
+
+ else:
+
+ self.correlation_data[trait] = [
+ sample_r, sample_p, num_overlap]
+
+ def convert_to_mouse_gene_id(self, species=None, gene_id=None):
+ """If the species is rat or human, translate the gene_id to the mouse geneid
+
+ If there is no input gene_id or there's no corresponding mouse gene_id, return None
+
+ """
+ if not gene_id:
+ return None
+
+ mouse_gene_id = None
+ if "species" == "mouse":
+ mouse_gene_id = gene_id
+
+ elif species == 'rat':
+ query = """SELECT mouse
+ FROM GeneIDXRef
+ WHERE rat='%s'""" % escape(gene_id)
+
+ result = g.db.execute(query).fetchone()
+ if result != None:
+ mouse_gene_id = result.mouse
+
+ elif species == "human":
+
+ query = """SELECT mouse
+ FROM GeneIDXRef
+ WHERE human='%s'""" % escape(gene_id)
+
+ result = g.db.execute(query).fetchone()
+ if result != None:
+ mouse_gene_id = result.mouse
+
+ return mouse_gene_id
+
+ def do_correlation(self, start_vars, create_dataset=create_dataset,
+ create_trait=create_trait,
+ get_species_dataset_trait=get_species_dataset_trait):
+ # pylint: disable = E, W, R, C
+ # probably refactor start_vars being passed twice
+ # this method aims to replace the do_correlation but also add dependendency injection
+ # to enable testing
+
+ # should maybe refactor below code more or less works the same
+ if start_vars["dataset"] == "Temp":
+ self.dataset = create_dataset(
+ dataset_name="Temp", dataset_type="Temp", group_name=start_vars['group'])
+
+ self.trait_id = start_vars["trait_id"]
+
+ self.this_trait = create_trait(dataset=self.dataset,
+ name=self.trait_id,
+ cellid=None)
+
+ else:
+
+ get_species_dataset_trait(self, start_vars)
+
+ corr_samples_group = start_vars['corr_samples_group']
+ self.sample_data = {}
+ self.corr_type = start_vars['corr_type']
+ self.corr_method = start_vars['corr_sample_method']
+ self.min_expr = float(
+ start_vars["min_expr"]) if start_vars["min_expr"] != "" else None
+ self.p_range_lower = float(
+ start_vars["p_range_lower"]) if start_vars["p_range_lower"] != "" else -1.0
+ self.p_range_upper = float(
+ start_vars["p_range_upper"]) if start_vars["p_range_upper"] != "" else 1.0
+
+ if ("loc_chr" in start_vars and "min_loc_mb" in start_vars and "max_loc_mb" in start_vars):
+ self.location_type = str(start_vars['location_type'])
+ self.location_chr = str(start_vars['loc_chr'])
+
+ try:
+
+ # the code is below is basically a temporary fix
+ self.min_location_mb = int(start_vars['min_loc_mb'])
+ self.max_location_mb = int(start_vars['max_loc_mb'])
+ except Exception as e:
+ self.min_location_mb = None
+ self.max_location_mb = None
+
+ else:
+ self.location_type = self.location_chr = self.min_location_mb = self.max_location_mb = None
+
+ self.get_formatted_corr_type()
+
+ self.return_number = int(start_vars['corr_return_results'])
+
+ primary_samples = self.dataset.group.samplelist
+
+
+ # The two if statements below append samples to the sample list based upon whether the user
+ # rselected Primary Samples Only, Other Samples Only, or All Samples
+
+ if self.dataset.group.parlist != None:
+ primary_samples += self.dataset.group.parlist
+
+ if self.dataset.group.f1list != None:
+
+ primary_samples += self.dataset.group.f1list
+
+ # If either BXD/whatever Only or All Samples, append all of that group's samplelist
+
+ if corr_samples_group != 'samples_other':
+
+ # print("primary samples are *****",primary_samples)
+
+ self.process_samples(start_vars, primary_samples)
+
+ if corr_samples_group != 'samples_primary':
+ if corr_samples_group == 'samples_other':
+ primary_samples = [x for x in primary_samples if x not in (
+ self.dataset.group.parlist + self.dataset.group.f1list)]
+
+ self.process_samples(start_vars, list(self.this_trait.data.keys()), primary_samples)
+
+ self.target_dataset = create_dataset(start_vars['corr_dataset'])
+ # when you add code to retrieve the trait_data for target dataset got gets very slow
+ import time
+
+ init_time = time.time()
+ self.target_dataset.get_trait_data(list(self.sample_data.keys()))
+
+ aft_time = time.time() - init_time
+
+ self.header_fields = get_header_fields(
+ self.target_dataset.type, self.corr_method)
+
+ if self.target_dataset.type == "ProbeSet":
+ self.filter_cols = [7, 6]
+
+ elif self.target_dataset.type == "Publish":
+ self.filter_cols = [6, 0]
+
+ else:
+ self.filter_cols = [4, 0]
+
+ self.correlation_results = []
+
+ self.correlation_data = {}
+
+ if self.corr_type == "tissue":
+ self.trait_symbol_dict = self.dataset.retrieve_genes("Symbol")
+
+ tissue_corr_data = self.do_tissue_correlation_for_all_traits()
+ if tissue_corr_data != None:
+ for trait in list(tissue_corr_data.keys())[:self.return_number]:
+ self.get_sample_r_and_p_values(
+ trait, self.target_dataset.trait_data[trait])
+ else:
+ for trait, values in list(self.target_dataset.trait_data.items()):
+ self.get_sample_r_and_p_values(trait, values)
+
+ elif self.corr_type == "lit":
+ self.trait_geneid_dict = self.dataset.retrieve_genes("GeneId")
+ lit_corr_data = self.do_lit_correlation_for_all_traits()
+
+ for trait in list(lit_corr_data.keys())[:self.return_number]:
+ self.get_sample_r_and_p_values(
+ trait, self.target_dataset.trait_data[trait])
+
+ elif self.corr_type == "sample":
+ for trait, values in list(self.target_dataset.trait_data.items()):
+ self.get_sample_r_and_p_values(trait, values)
+
+ self.correlation_data = collections.OrderedDict(sorted(list(self.correlation_data.items()),
+ key=lambda t: -abs(t[1][0])))
+
+ # ZS: Convert min/max chromosome to an int for the location range option
+
+ """
+ took 20.79 seconds took compute all the above majority of time taken on retrieving target dataset trait
+ info
+ """
+
+ initial_time_chr = time.time()
+
+ range_chr_as_int = None
+ for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()):
+ if 'loc_chr' in start_vars:
+ if chr_info.name == self.location_chr:
+ range_chr_as_int = order_id
+
+ for _trait_counter, trait in enumerate(list(self.correlation_data.keys())[:self.return_number]):
+ trait_object = create_trait(
+ dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False)
+ if not trait_object:
+ continue
+
+ chr_as_int = 0
+ for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()):
+ if self.location_type == "highest_lod":
+ if chr_info.name == trait_object.locus_chr:
+ chr_as_int = order_id
+ else:
+ if chr_info.name == trait_object.chr:
+ chr_as_int = order_id
+
+ if (float(self.correlation_data[trait][0]) >= self.p_range_lower and
+ float(self.correlation_data[trait][0]) <= self.p_range_upper):
+
+ if (self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Publish") and bool(trait_object.mean):
+ if (self.min_expr != None) and (float(trait_object.mean) < self.min_expr):
+ continue
+
+ if range_chr_as_int != None and (chr_as_int != range_chr_as_int):
+ continue
+ if self.location_type == "highest_lod":
+ if (self.min_location_mb != None) and (float(trait_object.locus_mb) < float(self.min_location_mb)):
+ continue
+ if (self.max_location_mb != None) and (float(trait_object.locus_mb) > float(self.max_location_mb)):
+ continue
+ else:
+ if (self.min_location_mb != None) and (float(trait_object.mb) < float(self.min_location_mb)):
+ continue
+ if (self.max_location_mb != None) and (float(trait_object.mb) > float(self.max_location_mb)):
+ continue
+
+ (trait_object.sample_r,
+ trait_object.sample_p,
+ trait_object.num_overlap) = self.correlation_data[trait]
+
+ # Set some sane defaults
+ trait_object.tissue_corr = 0
+ trait_object.tissue_pvalue = 0
+ trait_object.lit_corr = 0
+ if self.corr_type == "tissue" and tissue_corr_data != None:
+ trait_object.tissue_corr = tissue_corr_data[trait][1]
+ trait_object.tissue_pvalue = tissue_corr_data[trait][2]
+ elif self.corr_type == "lit":
+ trait_object.lit_corr = lit_corr_data[trait][1]
+
+ self.correlation_results.append(trait_object)
+
+ """
+ above takes time with respect to size of traits i.e n=100,500,.....t_size
+ """
+
+ if self.corr_type != "lit" and self.dataset.type == "ProbeSet" and self.target_dataset.type == "ProbeSet":
+ # self.do_lit_correlation_for_trait_list()
+ self.do_lit_correlation_for_trait_list()
+
+ if self.corr_type != "tissue" and self.dataset.type == "ProbeSet" and self.target_dataset.type == "ProbeSet":
+ self.do_tissue_correlation_for_trait_list()
+ # self.do_lit_correlation_for_trait_list()
+
+ self.json_results = generate_corr_json(
+ self.correlation_results, self.this_trait, self.dataset, self.target_dataset)
+
+ # org mode by bons
+
+ # DVORAKS
+ # klavaro for touch typing
+ # archwiki for documentation
+ # exwm for window manager ->13
+
+ # will fit perfectly with genenetwork 2 with change of anything if return self
+
+ # alternative for this
+ return self.json_results
+ # return {
+ # # "Results": "succeess",
+ # # "return_number": self.return_number,
+ # # "primary_samples": primary_samples,
+ # # "time_taken": 12,
+ # # "correlation_data": self.correlation_data,
+ # "correlation_json": self.json_results
+ # }
+
+
+def do_bicor(this_trait_vals, target_trait_vals):
+ # pylint: disable = E, W, R, C
+ r_library = ro.r["library"] # Map the library function
+ r_options = ro.r["options"] # Map the options function
+
+ r_library("WGCNA")
+ r_bicor = ro.r["bicorAndPvalue"] # Map the bicorAndPvalue function
+
+ r_options(stringsAsFactors=False)
+
+ this_vals = ro.Vector(this_trait_vals)
+ target_vals = ro.Vector(target_trait_vals)
+
+ the_r, the_p, _fisher_transform, _the_t, _n_obs = [
+ numpy.asarray(x) for x in r_bicor(x=this_vals, y=target_vals)]
+
+ return the_r, the_p
+
+
+def get_header_fields(data_type, corr_method):
+ """function to get header fields when doing correlation"""
+ if data_type == "ProbeSet":
+ if corr_method == "spearman":
+
+ header_fields = ['Index',
+ 'Record',
+ 'Symbol',
+ 'Description',
+ 'Location',
+ 'Mean',
+ 'Sample rho',
+ 'N',
+ 'Sample p(rho)',
+ 'Lit rho',
+ 'Tissue rho',
+ 'Tissue p(rho)',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
+
+ else:
+ header_fields = ['Index',
+ 'Record',
+ 'Abbreviation',
+ 'Description',
+ 'Mean',
+ 'Authors',
+ 'Year',
+ 'Sample r',
+ 'N',
+ 'Sample p(r)',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
+
+ elif data_type == "Publish":
+ if corr_method == "spearman":
+
+ header_fields = ['Index',
+ 'Record',
+ 'Abbreviation',
+ 'Description',
+ 'Mean',
+ 'Authors',
+ 'Year',
+ 'Sample rho',
+ 'N',
+ 'Sample p(rho)',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
+
+ else:
+ header_fields = ['Index',
+ 'Record',
+ 'Abbreviation',
+ 'Description',
+ 'Mean',
+ 'Authors',
+ 'Year',
+ 'Sample r',
+ 'N',
+ 'Sample p(r)',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
+
+ else:
+ if corr_method == "spearman":
+ header_fields = ['Index',
+ 'ID',
+ 'Location',
+ 'Sample rho',
+ 'N',
+ 'Sample p(rho)']
+
+ else:
+ header_fields = ['Index',
+ 'ID',
+ 'Location',
+ 'Sample r',
+ 'N',
+ 'Sample p(r)']
+
+ return header_fields
+
+
+def generate_corr_json(corr_results, this_trait, dataset, target_dataset, for_api=False):
+ """function to generate corr json data"""
+ #todo refactor this function
+ results_list = []
+ for i, trait in enumerate(corr_results):
+ if trait.view == False:
+ continue
+ results_dict = {}
+ results_dict['index'] = i + 1
+ results_dict['trait_id'] = trait.name
+ results_dict['dataset'] = trait.dataset.name
+ results_dict['hmac'] = hmac.data_hmac(
+ '{}:{}'.format(trait.name, trait.dataset.name))
+ if target_dataset.type == "ProbeSet":
+ results_dict['symbol'] = trait.symbol
+ results_dict['description'] = "N/A"
+ results_dict['location'] = trait.location_repr
+ results_dict['mean'] = "N/A"
+ results_dict['additive'] = "N/A"
+ if bool(trait.description_display):
+ results_dict['description'] = trait.description_display
+ if bool(trait.mean):
+ results_dict['mean'] = f"{float(trait.mean):.3f}"
+ try:
+ results_dict['lod_score'] = f"{float(trait.LRS_score_repr) / 4.61:.1f}"
+ except:
+ results_dict['lod_score'] = "N/A"
+ results_dict['lrs_location'] = trait.LRS_location_repr
+ if bool(trait.additive):
+ results_dict['additive'] = f"{float(trait.additive):.3f}"
+ results_dict['sample_r'] = f"{float(trait.sample_r):.3f}"
+ results_dict['num_overlap'] = trait.num_overlap
+ results_dict['sample_p'] = f"{float(trait.sample_p):.3e}"
+ results_dict['lit_corr'] = "--"
+ results_dict['tissue_corr'] = "--"
+ results_dict['tissue_pvalue'] = "--"
+ if bool(trait.lit_corr):
+ results_dict['lit_corr'] = f"{float(trait.lit_corr):.3f}"
+ if bool(trait.tissue_corr):
+ results_dict['tissue_corr'] = f"{float(trait.tissue_corr):.3f}"
+ results_dict['tissue_pvalue'] = f"{float(trait.tissue_pvalue):.3e}"
+ elif target_dataset.type == "Publish":
+ results_dict['abbreviation_display'] = "N/A"
+ results_dict['description'] = "N/A"
+ results_dict['mean'] = "N/A"
+ results_dict['authors_display'] = "N/A"
+ results_dict['additive'] = "N/A"
+ if for_api:
+ results_dict['pubmed_id'] = "N/A"
+ results_dict['year'] = "N/A"
+ else:
+ results_dict['pubmed_link'] = "N/A"
+ results_dict['pubmed_text'] = "N/A"
+
+ if bool(trait.abbreviation):
+ results_dict['abbreviation_display'] = trait.abbreviation
+ if bool(trait.description_display):
+ results_dict['description'] = trait.description_display
+ if bool(trait.mean):
+ results_dict['mean'] = f"{float(trait.mean):.3f}"
+ if bool(trait.authors):
+ authors_list = trait.authors.split(',')
+ if len(authors_list) > 6:
+ results_dict['authors_display'] = ", ".join(
+ authors_list[:6]) + ", et al."
+ else:
+ results_dict['authors_display'] = trait.authors
+ if bool(trait.pubmed_id):
+ if for_api:
+ results_dict['pubmed_id'] = trait.pubmed_id
+ results_dict['year'] = trait.pubmed_text
+ else:
+ results_dict['pubmed_link'] = trait.pubmed_link
+ results_dict['pubmed_text'] = trait.pubmed_text
+ try:
+ results_dict['lod_score'] = f"{float(trait.LRS_score_repr) / 4.61:.1f}"
+ except:
+ results_dict['lod_score'] = "N/A"
+ results_dict['lrs_location'] = trait.LRS_location_repr
+ if bool(trait.additive):
+ results_dict['additive'] = f"{float(trait.additive):.3f}"
+ results_dict['sample_r'] = f"{float(trait.sample_r):.3f}"
+ results_dict['num_overlap'] = trait.num_overlap
+ results_dict['sample_p'] = f"{float(trait.sample_p):.3e}"
+ else:
+ results_dict['location'] = trait.location_repr
+ results_dict['sample_r'] = f"{float(trait.sample_r):.3f}"
+ results_dict['num_overlap'] = trait.num_overlap
+ results_dict['sample_p'] = f"{float(trait.sample_p):.3e}"
+
+ results_list.append(results_dict)
+
+ return json.dumps(results_list)