aboutsummaryrefslogtreecommitdiff
path: root/wqflask
diff options
context:
space:
mode:
Diffstat (limited to 'wqflask')
-rw-r--r--wqflask/wqflask/do_search.py67
-rw-r--r--wqflask/wqflask/my_pylmm/pyLMM/input.py181
-rw-r--r--wqflask/wqflask/my_pylmm/pyLMM/lmm.py79
-rw-r--r--wqflask/wqflask/my_pylmm/pylmmKinship.py108
-rw-r--r--wqflask/wqflask/search_results.py28
5 files changed, 267 insertions, 196 deletions
diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py
index 2e74d991..1b1b56fb 100644
--- a/wqflask/wqflask/do_search.py
+++ b/wqflask/wqflask/do_search.py
@@ -58,6 +58,7 @@ class DoSearch(object):
@classmethod
def get_search(cls, search_type):
+ print("search_types are:", pf(cls.search_types))
return cls.search_types[search_type]
class QuickMrnaAssaySearch(DoSearch):
@@ -66,9 +67,10 @@ class QuickMrnaAssaySearch(DoSearch):
DoSearch.search_types['quick_mrna_assay'] = "QuickMrnaAssaySearch"
base_query = """SELECT ProbeSet.Name as ProbeSet_Name,
+ ProbeSet.Symbol as ProbeSet_Symbol,
+ ProbeSet.description as ProbeSet_Description,
ProbeSet.Chr_num as ProbeSet_Chr_Num,
ProbeSet.Mb as ProbeSet_Mb,
- ProbeSet.Symbol as ProbeSet_Symbol,
ProbeSet.name_num as ProbeSet_name_num
FROM ProbeSet """
@@ -76,14 +78,15 @@ class QuickMrnaAssaySearch(DoSearch):
'Record ID',
'Symbol',
'Location']
-
+
def run(self):
"""Generates and runs a search for assays across all mRNA expression datasets"""
print("Running ProbeSetSearch")
query = self.base_query + """WHERE (MATCH (ProbeSet.Name,
ProbeSet.description,
- ProbeSet.symbol)
+ ProbeSet.symbol,
+ ProbeSet.alias)
AGAINST ('%s' IN BOOLEAN MODE))
""" % (escape(self.search_term[0]))
@@ -156,9 +159,7 @@ class MrnaAssaySearch(DoSearch):
return self.execute(query)
-
-#class QuickPhenotypeSearch(DoSearch):
-
+
class PhenotypeSearch(DoSearch):
"""A search within a phenotype dataset"""
@@ -232,9 +233,57 @@ class PhenotypeSearch(DoSearch):
query = self.compile_final_query(where_clause = self.get_fields_clause())
- results = self.execute(query)
- print("in [df] run results are:", results)
- return results
+ return self.execute(query)
+
+class QuickPhenotypeSearch(PhenotypeSearch):
+ """A search across all phenotype datasets"""
+
+ DoSearch.search_types['quick_phenotype'] = "QuickPhenotypeSearch"
+
+ base_query = """SELECT Species.Name as Species_Name,
+ PublishFreeze.FullName as Dataset_Name,
+ PublishFreeze.Name,
+ PublishXRef.Id,
+ PublishFreeze.createtime as thistable,
+ Publication.PubMed_ID as Publication_PubMed_ID,
+ Phenotype.Post_publication_description as Phenotype_Name
+ FROM Phenotype,
+ PublishFreeze,
+ Publication,
+ PublishXRef,
+ InbredSet,
+ Species """
+
+ search_fields = ('Phenotype.Post_publication_description',
+ 'Phenotype.Pre_publication_description',
+ 'Phenotype.Pre_publication_abbreviation',
+ 'Phenotype.Post_publication_abbreviation',
+ 'Phenotype.Lab_code',
+ 'Publication.PubMed_ID',
+ 'Publication.Abstract',
+ 'Publication.Title',
+ 'Publication.Authors')
+
+ def compile_final_query(self, where_clause = ''):
+ """Generates the final query string"""
+
+ query = (self.base_query +
+ """WHERE %s
+ PublishXRef.PhenotypeId = Phenotype.Id and
+ PublishXRef.PublicationId = Publication.Id and
+ PublishXRef.InbredSetId = InbredSet.Id and
+ InbredSet.SpeciesId = Species.Id""" % where_clause)
+
+ print("query is:", pf(query))
+
+ return query
+
+ def run(self):
+ """Generates and runs a search across all phenotype datasets"""
+
+ query = self.compile_final_query(where_clause = self.get_fields_clause())
+
+ return self.execute(query)
class GenotypeSearch(DoSearch):
"""A search within a genotype dataset"""
diff --git a/wqflask/wqflask/my_pylmm/pyLMM/input.py b/wqflask/wqflask/my_pylmm/pyLMM/input.py
index 33666a0d..b8b76fd0 100644
--- a/wqflask/wqflask/my_pylmm/pyLMM/input.py
+++ b/wqflask/wqflask/my_pylmm/pyLMM/input.py
@@ -1,20 +1,25 @@
# pylmm is a python-based linear mixed-model solver with applications to GWAS
# Copyright (C) 2013 Nicholas A. Furlotte (nick.furlotte@gmail.com)
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#The program is free for academic use. Please contact Nick Furlotte
+#<nick.furlotte@gmail.com> if you are interested in using the software for
+#commercial purposes.
+
+#The software must not be modified and distributed without prior
+#permission of the author.
+
+#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+#PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+#NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
@@ -24,7 +29,6 @@ import pdb
class plink:
def __init__(self,fbase,kFile=None,phenoFile=None,type='b',normGenotype=True,readKFile=False):
-
self.fbase = fbase
self.type = type
self.indivs = self.getIndivs(self.fbase,type)
@@ -33,34 +37,33 @@ class plink:
self.normGenotype = normGenotype
self.phenoFile = phenoFile
# Originally I was using the fastLMM style that has indiv IDs embedded.
- # NOW I want to use this module to just read SNPs so I'm allowing
+ # NOW I want to use this module to just read SNPs so I'm allowing
# the programmer to turn off the kinship reading.
self.readKFile = readKFile
-
- if self.kFile:
- self.kFile = kFile
- if self.readKFile: self.K = self.readKinship(self.kFile)
- elif os.path.isfile("%s.kin" % fbase):
+
+ if self.kFile: self.K = self.readKinship(self.kFile)
+ elif os.path.isfile("%s.kin" % fbase):
self.kFile = "%s.kin" %fbase
- if self.readKFile: self.K = self.readKinship(self.kFile)
- else:
+ if self.readKFile:
+ self.K = self.readKinship(self.kFile)
+ else:
self.kFile = None
self.K = None
-
+
self.getPhenos(self.phenoFile)
-
+
self.fhandle = None
self.snpFileHandle = None
-
- def __del__(self):
+
+ def __del__(self):
if self.fhandle: self.fhandle.close()
if self.snpFileHandle: self.snpFileHandle.close()
-
+
def getSNPIterator(self):
- if not self.type == 'b':
+ if not self.type == 'b':
sys.stderr.write("Have only implemented this for binary plink files (bed)\n")
return
-
+
# get the number of snps
file = self.fbase + '.bim'
i = 0
@@ -70,30 +73,32 @@ class plink:
self.numSNPs = i
self.have_read = 0
self.snpFileHandle = open(file,'r')
-
+
self.BytestoRead = self.N / 4 + (self.N % 4 and 1 or 0)
self._formatStr = 'c'*self.BytestoRead
-
+
file = self.fbase + '.bed'
self.fhandle = open(file,'rb')
-
+
magicNumber = self.fhandle.read(2)
order = self.fhandle.read(1)
- if not order == '\x01':
+ if not order == '\x01':
sys.stderr.write("This is not in SNP major order - you did not handle this case\n")
raise StopIteration
-
+
return self
-
- def __iter__(self): return self.getSNPIterator()
-
+
+ def __iter__(self):
+ return self.getSNPIterator()
+
def next(self):
- if self.have_read == self.numSNPs: raise StopIteration
+ if self.have_read == self.numSNPs:
+ raise StopIteration
X = self.fhandle.read(self.BytestoRead)
XX = [bin(ord(x)) for x in struct.unpack(self._formatStr,X)]
self.have_read += 1
return self.formatBinaryGenotypes(XX,self.normGenotype),self.snpFileHandle.readline().strip().split()[1]
-
+
def formatBinaryGenotypes(self,X,norm=True):
D = { \
'00': 0.0, \
@@ -101,41 +106,45 @@ class plink:
'11': 1.0, \
'01': np.nan \
}
-
+
D_tped = { \
'00': '1 1', \
'10': '1 2', \
'11': '2 2', \
'01': '0 0' \
}
-
+
#D = D_tped
-
+
G = []
for x in X:
if not len(x) == 10:
xx = x[2:]
x = '0b' + '0'*(8 - len(xx)) + xx
- a,b,c,d = (x[8:],x[6:8],x[4:6],x[2:4])
+ a,b,c,d = (x[8:],x[6:8],x[4:6],x[2:4])
L = [D[y] for y in [a,b,c,d]]
G += L
# only take the leading values because whatever is left should be null
G = G[:self.N]
G = np.array(G)
- if norm: G = self.normalizeGenotype(G)
+ if norm:
+ G = self.normalizeGenotype(G)
return G
-
+
def normalizeGenotype(self,G):
x = True - np.isnan(G)
m = G[x].mean()
s = np.sqrt(G[x].var())
G[np.isnan(G)] = m
- G = (G - m) / s
+ if s == 0: G = G - m
+ else: G = (G - m) / s
+
return G
-
+
def getPhenos(self,phenoFile=None):
- if not phenoFile: self.phenoFile = phenoFile = self.fbase+".phenos"
- if not os.path.isfile(phenoFile):
+ if not phenoFile:
+ self.phenoFile = phenoFile = self.fbase+".phenos"
+ if not os.path.isfile(phenoFile):
sys.stderr.write("Could not find phenotype file: %s\n" % (phenoFile))
return
f = open(phenoFile,'r')
@@ -147,81 +156,86 @@ class plink:
P.append([(x == 'NA' or x == '-9') and np.nan or float(x) for x in v[2:]])
f.close()
P = np.array(P)
-
+
# reorder to match self.indivs
D = {}
L = []
for i in range(len(keys)): D[keys[i]] = i
for i in range(len(self.indivs)):
- if not D.has_key(self.indivs[i]): continue
+ if not D.has_key(self.indivs[i]):
+ continue
L.append(D[self.indivs[i]])
P = P[L,:]
-
+
self.phenos = P
return P
-
+
def getIndivs(self,base,type='b'):
- if type == 't': famFile = "%s.tfam" % base
- else: famFile = "%s.fam" % base
-
+ if type == 't':
+ famFile = "%s.tfam" % base
+ else:
+ famFile = "%s.fam" % base
keys = []
i = 0
f = open(famFile,'r')
for line in f:
- v = line.strip().split()
- famId = v[0]
- indivId = v[1]
- k = (famId.strip(),indivId.strip())
- keys.append(k)
- i += 1
+ v = line.strip().split()
+ famId = v[0]
+ indivId = v[1]
+ k = (famId.strip(),indivId.strip())
+ keys.append(k)
+ i += 1
f.close()
-
+
self.N = len(keys)
sys.stderr.write("Read %d individuals from %s\n" % (self.N, famFile))
-
+
return keys
-
+
def readKinship(self,kFile):
# Assume the fastLMM style
# This will read in the kinship matrix and then reorder it
- # according to self.indivs - additionally throwing out individuals
+ # according to self.indivs - additionally throwing out individuals
# that are not in both sets
if self.indivs == None or len(self.indivs) == 0:
sys.stderr.write("Did not read any individuals so can't load kinship\n")
- return
-
+ return
+
sys.stderr.write("Reading kinship matrix from %s\n" % (kFile) )
-
+
f = open(kFile,'r')
- # read indivs
+ # read indivs
v = f.readline().strip().split("\t")[1:]
keys = [tuple(y.split()) for y in v]
D = {}
for i in range(len(keys)): D[keys[i]] = i
-
+
# read matrix
K = []
- for line in f: K.append([float(x) for x in line.strip().split("\t")[1:]])
+ for line in f:
+ K.append([float(x) for x in line.strip().split("\t")[1:]])
f.close()
K = np.array(K)
-
+
# reorder to match self.indivs
L = []
KK = []
X = []
for i in range(len(self.indivs)):
- if not D.has_key(self.indivs[i]): X.append(self.indivs[i])
- else:
+ if not D.has_key(self.indivs[i]):
+ X.append(self.indivs[i])
+ else:
KK.append(self.indivs[i])
L.append(D[self.indivs[i]])
K = K[L,:][:,L]
self.indivs = KK
self.indivs_removed = X
- if len(self.indivs_removed): sys.stderr.write("Removed %d individuals that did not appear in Kinship\n" % (len(self.indivs_removed)))
- return K
-
+ if len(self.indivs_removed):
+ sys.stderr.write("Removed %d individuals that did not appear in Kinship\n" % (len(self.indivs_removed)))
+ return K
+
def getCovariates(self,covFile=None):
- if not os.path.isfile(covFile):
+ if not os.path.isfile(covFile):
sys.stderr.write("Could not find covariate file: %s\n" % (phenoFile))
return
f = open(covFile,'r')
@@ -233,14 +247,15 @@ class plink:
P.append([x == 'NA' and np.nan or float(x) for x in v[2:]])
f.close()
P = np.array(P)
-
+
# reorder to match self.indivs
D = {}
L = []
- for i in range(len(keys)): D[keys[i]] = i
+ for i in range(len(keys)):
+ D[keys[i]] = i
for i in range(len(self.indivs)):
- if not D.has_key(self.indivs[i]): continue
+ if not D.has_key(self.indivs[i]): continue
L.append(D[self.indivs[i]])
P = P[L,:]
-
+
return P \ No newline at end of file
diff --git a/wqflask/wqflask/my_pylmm/pyLMM/lmm.py b/wqflask/wqflask/my_pylmm/pyLMM/lmm.py
index 1e689e49..7ed0f3e5 100644
--- a/wqflask/wqflask/my_pylmm/pyLMM/lmm.py
+++ b/wqflask/wqflask/my_pylmm/pyLMM/lmm.py
@@ -26,46 +26,51 @@ from scipy import stats
from pprint import pformat as pf
-from utility.benchmark import Bench
-
-#np.seterr('raise')
-
-def run(pheno_vector,
- genotype_matrix,
- restricted_max_likelihood=True,
- refit=False,
- temp_data=None):
- """Takes the phenotype vector and genotype matrix and returns a set of p-values and t-statistics
-
- restricted_max_likelihood -- whether to use restricted max likelihood; True or False
- refit -- whether to refit the variance component for each marker
- temp_data -- TempData object that stores the progress for each major step of the
- calculations ("calculate_kinship" and "GWAS" take the majority of time)
-
- """
-
- with Bench("Calculate Kinship"):
- kinship_matrix = calculate_kinship(genotype_matrix, temp_data)
-
- with Bench("Create LMM object"):
- lmm_ob = LMM(pheno_vector, kinship_matrix)
-
- with Bench("LMM_ob fitting"):
- lmm_ob.fit()
-
- with Bench("Doing GWAS"):
- t_stats, p_values = GWAS(pheno_vector,
- genotype_matrix,
- kinship_matrix,
- restricted_max_likelihood=True,
- refit=False,
- temp_data=temp_data)
- Bench().report()
- return t_stats, p_values
+#from utility.benchmark import Bench
+#
+##np.seterr('raise')
+#
+#def run(pheno_vector,
+# genotype_matrix,
+# restricted_max_likelihood=True,
+# refit=False,
+# temp_data=None):
+# """Takes the phenotype vector and genotype matrix and returns a set of p-values and t-statistics
+#
+# restricted_max_likelihood -- whether to use restricted max likelihood; True or False
+# refit -- whether to refit the variance component for each marker
+# temp_data -- TempData object that stores the progress for each major step of the
+# calculations ("calculate_kinship" and "GWAS" take the majority of time)
+#
+# """
+#
+# with Bench("Calculate Kinship"):
+# kinship_matrix = calculate_kinship(genotype_matrix, temp_data)
+#
+# with Bench("Create LMM object"):
+# lmm_ob = LMM(pheno_vector, kinship_matrix)
+#
+# with Bench("LMM_ob fitting"):
+# lmm_ob.fit()
+#
+# with Bench("Doing GWAS"):
+# t_stats, p_values = GWAS(pheno_vector,
+# genotype_matrix,
+# kinship_matrix,
+# restricted_max_likelihood=True,
+# refit=False,
+# temp_data=temp_data)
+# Bench().report()
+# return t_stats, p_values
def matrixMult(A,B):
- #return np.dot(A,B)
+
+ # If there is no fblas then we will revert to np.dot()
+ try:
+ linalg.fblas
+ except AttributeError:
+ return np.dot(A,B)
print("A is:", pf(A.shape))
print("B is:", pf(B.shape))
diff --git a/wqflask/wqflask/my_pylmm/pylmmKinship.py b/wqflask/wqflask/my_pylmm/pylmmKinship.py
index db1449a4..cfba2936 100644
--- a/wqflask/wqflask/my_pylmm/pylmmKinship.py
+++ b/wqflask/wqflask/my_pylmm/pylmmKinship.py
@@ -3,19 +3,25 @@
# pylmm is a python-based linear mixed-model solver with applications to GWAS
# Copyright (C) 2013 Nicholas A. Furlotte (nick.furlotte@gmail.com)
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#The program is free for academic use. Please contact Nick Furlotte
+#<nick.furlotte@gmail.com> if you are interested in using the software for
+#commercial purposes.
+
+#The software must not be modified and distributed without prior
+#permission of the author.
+
+#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+#PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+#NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from optparse import OptionParser,OptionGroup
usage = """usage: %prog [options] --[t | b | p]file plinkFileBase outfile
@@ -28,38 +34,25 @@ parser = OptionParser(usage=usage)
basicGroup = OptionGroup(parser, "Basic Options")
#advancedGroup = OptionGroup(parser, "Advanced Options")
-basicGroup.add_option("--pfile",
- dest="pfile",
- help="The base for a PLINK ped file")
-basicGroup.add_option("--tfile",
- dest="tfile",
- help="The base for a PLINK tped file")
-basicGroup.add_option("--bfile",
- dest="bfile",
- help="The base for a PLINK binary ped file")
-
-basicGroup.add_option("-e",
- "--efile",
- dest="saveEig",
- help="Save eigendecomposition to this file.")
-basicGroup.add_option("-n",
- default=1000,
- dest="computeSize",
- type="int",
- help="""The maximum number of SNPs to read into memory at once (default 1000).
- This is important when there is a large number of SNPs, because memory could
- be an issue.""")
+basicGroup.add_option("--pfile", dest="pfile",
+ help="The base for a PLINK ped file")
+basicGroup.add_option("--tfile", dest="tfile",
+ help="The base for a PLINK tped file")
+basicGroup.add_option("--bfile", dest="bfile",
+ help="The base for a PLINK binary ped file")
+
+basicGroup.add_option("-e", "--efile", dest="saveEig", help="Save eigendecomposition to this file.")
+basicGroup.add_option("-n", default=1000,dest="computeSize", type="int", help="The maximum number of SNPs to read into memory at once (default 1000). This is important when there is a large number of SNPs, because memory could be an issue.")
basicGroup.add_option("-v", "--verbose",
- action="store_true", dest="verbose", default=False,
- help="Print extra info")
+ action="store_true", dest="verbose", default=False,
+ help="Print extra info")
parser.add_option_group(basicGroup)
#parser.add_option_group(advancedGroup)
(options, args) = parser.parse_args()
-if len(args) != 1:
- parser.error("Incorrect number of arguments")
+if len(args) != 1: parser.error("Incorrect number of arguments")
outFile = args[0]
import sys
@@ -72,16 +65,11 @@ from pyLMM import input
if not options.pfile and not options.tfile and not options.bfile:
parser.error("You must provide at least one PLINK input file base")
-if options.verbose:
- sys.stderr.write("Reading PLINK input...\n")
-if options.bfile:
- IN = input.plink(options.bfile,type='b')
-elif options.tfile:
- IN = input.plink(options.tfile,type='t')
-elif options.pfile:
- IN = input.plink(options.pfile,type='p')
-else:
- parser.error("You must provide at least one PLINK input file base")
+if options.verbose: sys.stderr.write("Reading PLINK input...\n")
+if options.bfile: IN = input.plink(options.bfile,type='b')
+elif options.tfile: IN = input.plink(options.tfile,type='t')
+elif options.pfile: IN = input.plink(options.pfile,type='p')
+else: parser.error("You must provide at least one PLINK input file base")
n = len(IN.indivs)
m = options.computeSize
@@ -98,32 +86,34 @@ while i < IN.numSNPs:
i += 1
continue
W[:,j] = snp
-
+
i += 1
j += 1
- if j < options.computeSize:
- W = W[:,range(0,j)]
+ if j < options.computeSize: W = W[:,range(0,j)]
if options.verbose:
sys.stderr.write("Processing first %d SNPs\n" % i)
- if K == None:
- K = linalg.fblas.dgemm(alpha=1.,a=W.T,b=W.T,trans_a=True,trans_b=False) # calculate_kinship(W) * j
- #if K == None:
- # K = np.dot(W,W.T) # calculate_kinship(W) * j
+ if K == None:
+ try:
+ K = linalg.fblas.dgemm(alpha=1.,a=W.T,b=W.T,trans_a=True,trans_b=False) # calculateKinship(W) * j
+ except AttributeError:
+ K = np.dot(W,W.T)
else:
- K_j = linalg.fblas.dgemm(alpha=1.,a=W.T,b=W.T,trans_a=True,trans_b=False) # calculate_kinship(W) * j
+ try:
+ K_j = linalg.fblas.dgemm(alpha=1.,a=W.T,b=W.T,trans_a=True,trans_b=False) # calculateKinship(W) * j
+ except AttributeError:
+ K_j = np.dot(W,W.T)
K = K + K_j
K = K / float(IN.numSNPs)
-if options.verbose:
- sys.stderr.write("Saving Kinship file to %s\n" % outFile)
+if options.verbose: sys.stderr.write("Saving Kinship file to %s\n" % outFile)
np.savetxt(outFile,K)
if options.saveEig:
if options.verbose:
sys.stderr.write("Obtaining Eigendecomposition\n")
- Kva, Kve = linalg.eigh(K)
+ Kva,Kve = linalg.eigh(K)
if options.verbose:
sys.stderr.write("Saving eigendecomposition to %s.[kva | kve]\n" % outFile)
np.savetxt(outFile+".kva",Kva)
- np.savetxt(outFile+".kve",Kve) \ No newline at end of file
+ np.savetxt(outFile+".kve",Kve)
diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py
index 2d39a711..e1d81f37 100644
--- a/wqflask/wqflask/search_results.py
+++ b/wqflask/wqflask/search_results.py
@@ -104,15 +104,27 @@ class SearchResultPage():
self.search_terms = parser.parse(self.search_terms)
print("After parsing:", self.search_terms)
- for a_search in self.search_terms:
- search_term = a_search['search_term']
- #Do mRNA assay search
- search_ob = do_search.DoSearch.get_search("quick_mrna_assay")
+ search_types = ["quick_phenotype", "quick_mrna_assay"]
+
+ for search_category in search_types:
+ search_ob = do_search.DoSearch.get_search(search_category)
search_class = getattr(do_search, search_ob)
- the_search = search_class(search_term)
-
- self.results.extend(the_search.run())
- print("in the search results are:", self.results)
+ for a_search in self.search_terms:
+ search_term = a_search['search_term']
+ the_search = search_class(search_term)
+ self.results.extend(the_search.run())
+ print("in the search results are:", self.results)
+
+ #for a_search in self.search_terms:
+ # search_term = a_search['search_term']
+ #
+ # #Do mRNA assay search
+ # search_ob = do_search.DoSearch.get_search("quick_mrna_assay")
+ # search_class = getattr(do_search, search_ob)
+ # the_search = search_class(search_term)
+ #
+ # self.results.extend(the_search.run())
+ # print("in the search results are:", self.results)
#return True