aboutsummaryrefslogtreecommitdiff
path: root/wqflask
diff options
context:
space:
mode:
Diffstat (limited to 'wqflask')
-rwxr-xr-xwqflask/base/data_set.py5
-rwxr-xr-xwqflask/base/webqtlConfig.py1
-rw-r--r--wqflask/wqflask/do_search.py144
-rwxr-xr-xwqflask/wqflask/marker_regression/marker_regression.py152
-rw-r--r--wqflask/wqflask/my_pylmm/pyLMM/input.py8
-rw-r--r--wqflask/wqflask/my_pylmm/pyLMM/lmm.py544
-rw-r--r--wqflask/wqflask/my_pylmm/pylmmGWAS.py45
-rw-r--r--wqflask/wqflask/search_results.py21
-rwxr-xr-xwqflask/wqflask/show_trait/show_trait.py110
9 files changed, 565 insertions, 465 deletions
diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py
index 71efc9b2..17881e53 100755
--- a/wqflask/base/data_set.py
+++ b/wqflask/base/data_set.py
@@ -323,6 +323,11 @@ class PhenotypeDataSet(DataSet):
description = this_trait.pre_publication_description
this_trait.description_display = description
+ try:
+ this_trait.description_display.decode('ascii')
+ except Exception:
+ this_trait.description_display = this_trait.description_display.decode('utf-8')
+
if not this_trait.year.isdigit():
this_trait.pubmed_text = "N/A"
diff --git a/wqflask/base/webqtlConfig.py b/wqflask/base/webqtlConfig.py
index d05fa6e0..1845c749 100755
--- a/wqflask/base/webqtlConfig.py
+++ b/wqflask/base/webqtlConfig.py
@@ -52,6 +52,7 @@ ENSEMBLETRANSCRIPT_URL="http://useast.ensembl.org/Mus_musculus/Lucene/Details?sp
SECUREDIR = GNROOT + 'secure/'
COMMON_LIB = GNROOT + 'support/admin'
HTMLPATH = GNROOT + 'web/'
+PYLMM_PATH = HTMLPATH + 'plink/'
IMGDIR = HTMLPATH +'image/'
IMAGESPATH = HTMLPATH + 'images/'
UPLOADPATH = IMAGESPATH + 'upload/'
diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py
index 4ba35d63..fc65eb49 100644
--- a/wqflask/wqflask/do_search.py
+++ b/wqflask/wqflask/do_search.py
@@ -13,7 +13,6 @@ sys.path.append("..")
from dbFunction import webqtlDatabaseFunction
-from utility.benchmark import Bench
class DoSearch(object):
"""Parent class containing parameters/functions used for all searches"""
@@ -64,25 +63,17 @@ class DoSearch(object):
class QuickMrnaAssaySearch(DoSearch):
"""A general search for mRNA assays"""
-
+
DoSearch.search_types['quick_mrna_assay'] = "QuickMrnaAssaySearch"
-
- base_query = """SELECT Species.Name as Species_Name,
- ProbeSetFreeze.Name as DataSet_Name,
- ProbeSetFreeze.FullName as DataSet_FullName,
- ProbeSet.Name as ProbeSet_Name,
+
+ base_query = """SELECT ProbeSet.Name as ProbeSet_Name,
ProbeSet.Symbol as ProbeSet_Symbol,
ProbeSet.description as ProbeSet_Description,
ProbeSet.Chr_num as ProbeSet_Chr_Num,
ProbeSet.Mb as ProbeSet_Mb,
ProbeSet.name_num as ProbeSet_name_num
- FROM ProbeSet,
- ProbeSetXRef,
- ProbeSetFreeze,
- ProbeFreeze,
- InbredSet,
- Species """
-
+ FROM ProbeSet """
+
header_fields = ['',
'Record ID',
'Symbol',
@@ -96,12 +87,7 @@ class QuickMrnaAssaySearch(DoSearch):
ProbeSet.description,
ProbeSet.symbol,
ProbeSet.alias)
- AGAINST ('%s' IN BOOLEAN MODE)) and
- ProbeSet.Id = ProbeSetXRef.ProbeSetId and
- ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id and
- ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and
- ProbeFreeze.InbredSetId = InbredSet.Id and
- InbredSet.SpeciesId = Species.Id
+ AGAINST ('%s' IN BOOLEAN MODE))
""" % (escape(self.search_term[0]))
print("final query is:", pf(query))
@@ -172,7 +158,7 @@ class MrnaAssaySearch(DoSearch):
print("final query is:", pf(query))
return self.execute(query)
-
+
class PhenotypeSearch(DoSearch):
"""A search within a phenotype dataset"""
@@ -204,23 +190,6 @@ class PhenotypeSearch(DoSearch):
'Max LRS',
'Max LRS Location']
- #def get_fields_clause(self):
- # """Generate clause for WHERE portion of query"""
- #
- # #Todo: Zach will figure out exactly what both these lines mean
- # #and comment here
- # if "'" not in self.search_term[0]:
- # search_term = "[[:<:]]" + self.search_term[0] + "[[:>:]]"
- #
- # # This adds a clause to the query that matches the search term
- # # against each field in the search_fields tuple
- # fields_clause = []
- # for field in self.search_fields:
- # fields_clause.append('''%s REGEXP "%s"''' % (field, search_term))
- # fields_clause = "(%s) and " % ' OR '.join(fields_clause)
- #
- # return fields_clause
-
def get_fields_clause(self):
"""Generate clause for WHERE portion of query"""
@@ -231,13 +200,13 @@ class PhenotypeSearch(DoSearch):
# This adds a clause to the query that matches the search term
# against each field in the search_fields tuple
- fields_clause = "MATCH("
- fields_clause += ",".join(self.search_fields) + ") "
- fields_clause += "AGAINST('{}' IN BOOLEAN MODE)".format(self.search_term[0])
+ fields_clause = []
+ for field in self.search_fields:
+ fields_clause.append('''%s REGEXP "%s"''' % (field, search_term))
+ fields_clause = "(%s) and " % ' OR '.join(fields_clause)
return fields_clause
-
def compile_final_query(self, from_clause = '', where_clause = ''):
"""Generates the final query string"""
@@ -265,61 +234,56 @@ class PhenotypeSearch(DoSearch):
query = self.compile_final_query(where_clause = self.get_fields_clause())
return self.execute(query)
-
-
-class QuickPhenotypeSearch(PhenotypeSearch):
- """A search across all phenotype datasets"""
-
- DoSearch.search_types['quick_phenotype'] = "QuickPhenotypeSearch"
-
- base_query = """SELECT Species.Name as Species_Name,
- PublishFreeze.FullName as Dataset_Name,
- PublishFreeze.Name,
- PublishXRef.Id,
- PublishFreeze.createtime as thistable,
- Publication.PubMed_ID as Publication_PubMed_ID,
- Phenotype.Post_publication_description as Phenotype_Name
- FROM Phenotype,
- PublishFreeze,
- Publication,
- PublishXRef,
- InbredSet,
- Species """
-
- search_fields = ('Phenotype.Post_publication_description',
- 'Phenotype.Pre_publication_description',
- 'Phenotype.Pre_publication_abbreviation',
- 'Phenotype.Post_publication_abbreviation',
- 'Phenotype.Lab_code',
- 'Publication.PubMed_ID',
- 'Publication.Abstract',
- 'Publication.Title',
- 'Publication.Authors')
-
- def compile_final_query(self, where_clause = ''):
- """Generates the final query string"""
- query = (self.base_query +
- """WHERE (%s) and
- PublishXRef.PhenotypeId = Phenotype.Id and
- PublishXRef.PublicationId = Publication.Id and
- PublishXRef.InbredSetId = InbredSet.Id and
- InbredSet.SpeciesId = Species.Id""" % where_clause)
-
- print("query is:", pf(query))
-
- return query
+#class QuickPhenotypeSearch(PhenotypeSearch):
+# """A search across all phenotype datasets"""
+#
+# DoSearch.search_types['quick_phenotype'] = "QuickPhenotypeSearch"
+#
+# base_query = """SELECT Species.Name as Species_Name,
+# PublishFreeze.FullName as Dataset_Name,
+# PublishFreeze.Name,
+# PublishXRef.Id,
+# PublishFreeze.createtime as thistable,
+# Publication.PubMed_ID as Publication_PubMed_ID,
+# Phenotype.Post_publication_description as Phenotype_Name
+# FROM Phenotype,
+# PublishFreeze,
+# Publication,
+# PublishXRef,
+# InbredSet,
+# Species """
+#
+# search_fields = ('Phenotype.Post_publication_description',
+# 'Phenotype.Pre_publication_description',
+# 'Phenotype.Pre_publication_abbreviation',
+# 'Phenotype.Post_publication_abbreviation',
+# 'Phenotype.Lab_code',
+# 'Publication.PubMed_ID',
+# 'Publication.Abstract',
+# 'Publication.Title',
+# 'Publication.Authors')
+#
+# def compile_final_query(self, where_clause = ''):
+# """Generates the final query string"""
+#
+# query = (self.base_query +
+# """WHERE %s
+# PublishXRef.PhenotypeId = Phenotype.Id and
+# PublishXRef.PublicationId = Publication.Id and
+# PublishXRef.InbredSetId = InbredSet.Id and
+# InbredSet.SpeciesId = Species.Id""" % where_clause)
+#
+# print("query is:", pf(query))
+#
+# return query
def run(self):
"""Generates and runs a search across all phenotype datasets"""
query = self.compile_final_query(where_clause = self.get_fields_clause())
- with Bench("Doing quick phenotype search"):
- results = self.execute(query)
-
- return results
-
+ return self.execute(query)
class GenotypeSearch(DoSearch):
"""A search within a genotype dataset"""
diff --git a/wqflask/wqflask/marker_regression/marker_regression.py b/wqflask/wqflask/marker_regression/marker_regression.py
index 6c85afe9..c3555e8f 100755
--- a/wqflask/wqflask/marker_regression/marker_regression.py
+++ b/wqflask/wqflask/marker_regression/marker_regression.py
@@ -6,23 +6,30 @@ from base import data_set #import create_dataset
from pprint import pformat as pf
import string
+import sys
import os
import collections
import numpy as np
+from scipy import linalg
#from redis import Redis
-from utility import Plot, Bunch
+
from base.trait import GeneralTrait
from base import data_set
from base import species
-from utility import helper_functions
from base import webqtlConfig
from wqflask.my_pylmm.data import prep_data
from wqflask.my_pylmm.pyLMM import lmm
+from wqflask.my_pylmm.pyLMM import input
+from utility import helper_functions
+from utility import Plot, Bunch
from utility import temp_data
+from utility.benchmark import Bench
+
+
class MarkerRegression(object):
def __init__(self, start_vars, temp_uuid):
@@ -52,28 +59,135 @@ class MarkerRegression(object):
)
+
def gen_data(self, tempdata):
"""Generates p-values for each marker"""
- genotype_data = [marker['genotypes'] for marker in self.dataset.group.markers.markers]
-
- no_val_samples = self.identify_empty_samples()
- trimmed_genotype_data = self.trim_genotypes(genotype_data, no_val_samples)
-
- pheno_vector = np.array([float(val) for val in self.vals if val!="x"])
- genotype_matrix = np.array(trimmed_genotype_data).T
- print("pheno_vector is: ", pf(pheno_vector))
- print("genotype_matrix is: ", pf(genotype_matrix))
+ file_base = os.path.join(webqtlConfig.PYLMM_PATH, self.dataset.group.name)
+
+ plink_input = input.plink(file_base, type='b')
+
+
+ pheno_vector = np.array([val == "x" and np.nan or float(val) for val in self.vals])
+ pheno_vector = pheno_vector.reshape((len(pheno_vector), 1))
+ covariate_matrix = np.ones((pheno_vector.shape[0],1))
+ kinship_matrix = np.fromfile(open(file_base + '.kin','r'),sep=" ")
+ kinship_matrix.resize((len(plink_input.indivs),len(plink_input.indivs)))
+
+ refit = False
+
+ v = np.isnan(pheno_vector)
+ keep = True - v
+ keep = keep.reshape((len(keep),))
+ eigen_values = []
+ eigen_vectors = []
+
+
+ print("pheno_vector shape is: ", pf(pheno_vector.shape))
+
+ #print("pheno_vector is: ", pf(pheno_vector))
+ #print("kinship_matrix is: ", pf(kinship_matrix))
+
+ if v.sum():
+ pheno_vector = pheno_vector[keep]
+ print("pheno_vector shape is now: ", pf(pheno_vector.shape))
+ covariate_matrix = covariate_matrix[keep,:]
+ print("kinship_matrix shape is: ", pf(kinship_matrix.shape))
+ print("len(keep) is: ", pf(keep.shape))
+ kinship_matrix = kinship_matrix[keep,:][:,keep]
+
+ #if not v.sum():
+ # eigen_values = np.fromfile(file_base + ".kin.kva")
+ # eigen_vectors = np.fromfile(file_base + ".kin.kve")
+
+ #print("eigen_values is: ", pf(eigen_values))
+ #print("eigen_vectors is: ", pf(eigen_vectors))
+
+ n = kinship_matrix.shape[0]
+ lmm_ob = lmm.LMM(pheno_vector,
+ kinship_matrix,
+ eigen_values,
+ eigen_vectors,
+ covariate_matrix)
+ lmm_ob.fit()
+
+ # Buffers for pvalues and t-stats
+ p_values = []
+ t_statistics = []
+ count = 0
+
+ plink_input.getSNPIterator()
+ print("# snps is: ", pf(plink_input.numSNPs))
+ with Bench("snp iterator loop"):
+ for snp, this_id in plink_input:
+ #if count > 10000:
+ # break
+ count += 1
+
+ x = snp[keep].reshape((n,1))
+ #x[[1,50,100,200,3000],:] = np.nan
+ v = np.isnan(x).reshape((-1,))
+
+ # Check SNPs for missing values
+ if v.sum():
+ keeps = True - v
+ xs = x[keeps,:]
+ # If no variation at this snp or all genotypes missing
+ if keeps.sum() <= 1 or xs.var() <= 1e-6:
+ p_values.append(np.nan)
+ t_statistics.append(np.nan)
+ continue
+
+ # Its ok to center the genotype - I used options.normalizeGenotype to
+ # force the removal of missing genotypes as opposed to replacing them with MAF.
+
+ #if not options.normalizeGenotype:
+ # xs = (xs - xs.mean()) / np.sqrt(xs.var())
+
+ filtered_pheno = pheno_vector[keeps]
+ filtered_covariate_matrix = covariate_matrix[keeps,:]
+ filtered_kinship_matrix = kinship_matrix[keeps,:][:,keeps]
+ filtered_lmm_ob = lmm.LMM(filtered_pheno,filtered_kinship_matrix,X0=filtered_covariate_matrix)
+ if refit:
+ filtered_lmm_ob.fit(X=xs)
+ else:
+ #try:
+ filtered_lmm_ob.fit()
+ #except: pdb.set_trace()
+ ts,ps,beta,betaVar = Ls.association(xs,returnBeta=True)
+ else:
+ if x.var() == 0:
+ p_values.append(np.nan)
+ t_statistics.append(np.nan)
+ continue
+
+ if refit:
+ lmm_ob.fit(X=x)
+ ts,ps,beta,betaVar = lmm_ob.association(x)
+ p_values.append(ps)
+ t_statistics.append(ts)
+
- t_stats, p_values = lmm.run(
- pheno_vector,
- genotype_matrix,
- restricted_max_likelihood=True,
- refit=False,
- temp_data=tempdata
- )
+ #genotype_data = [marker['genotypes'] for marker in self.dataset.group.markers.markers]
+ #
+ #no_val_samples = self.identify_empty_samples()
+ #trimmed_genotype_data = self.trim_genotypes(genotype_data, no_val_samples)
+ #
+ #genotype_matrix = np.array(trimmed_genotype_data).T
+ #
+ #print("pheno_vector is: ", pf(pheno_vector))
+ #print("genotype_matrix is: ", pf(genotype_matrix))
+ #
+ #t_stats, p_values = lmm.run(
+ # pheno_vector,
+ # genotype_matrix,
+ # restricted_max_likelihood=True,
+ # refit=False,
+ # temp_data=tempdata
+ #)
+ print("p_values is: ", pf(p_values))
self.dataset.group.markers.add_pvalues(p_values)
#self.lrs_values = [marker['lrs_value'] for marker in self.dataset.group.markers.markers]
@@ -118,3 +232,5 @@ class MarkerRegression(object):
new_genotypes.append(genotype)
trimmed_genotype_data.append(new_genotypes)
return trimmed_genotype_data
+
+
diff --git a/wqflask/wqflask/my_pylmm/pyLMM/input.py b/wqflask/wqflask/my_pylmm/pyLMM/input.py
index b8b76fd0..35662072 100644
--- a/wqflask/wqflask/my_pylmm/pyLMM/input.py
+++ b/wqflask/wqflask/my_pylmm/pyLMM/input.py
@@ -41,7 +41,8 @@ class plink:
# the programmer to turn off the kinship reading.
self.readKFile = readKFile
- if self.kFile: self.K = self.readKinship(self.kFile)
+ if self.kFile:
+ self.K = self.readKinship(self.kFile)
elif os.path.isfile("%s.kin" % fbase):
self.kFile = "%s.kin" %fbase
if self.readKFile:
@@ -54,7 +55,7 @@ class plink:
self.fhandle = None
self.snpFileHandle = None
-
+
def __del__(self):
if self.fhandle: self.fhandle.close()
if self.snpFileHandle: self.snpFileHandle.close()
@@ -160,7 +161,8 @@ class plink:
# reorder to match self.indivs
D = {}
L = []
- for i in range(len(keys)): D[keys[i]] = i
+ for i in range(len(keys)):
+ D[keys[i]] = i
for i in range(len(self.indivs)):
if not D.has_key(self.indivs[i]):
continue
diff --git a/wqflask/wqflask/my_pylmm/pyLMM/lmm.py b/wqflask/wqflask/my_pylmm/pyLMM/lmm.py
index 163b876a..f1f195d6 100644
--- a/wqflask/wqflask/my_pylmm/pyLMM/lmm.py
+++ b/wqflask/wqflask/my_pylmm/pyLMM/lmm.py
@@ -26,42 +26,42 @@ from scipy import stats
from pprint import pformat as pf
-from utility.benchmark import Bench
-
-#np.seterr('raise')
-
-def run(pheno_vector,
- genotype_matrix,
- restricted_max_likelihood=True,
- refit=False,
- temp_data=None):
- """Takes the phenotype vector and genotype matrix and returns a set of p-values and t-statistics
-
- restricted_max_likelihood -- whether to use restricted max likelihood; True or False
- refit -- whether to refit the variance component for each marker
- temp_data -- TempData object that stores the progress for each major step of the
- calculations ("calculate_kinship" and "GWAS" take the majority of time)
-
- """
-
- with Bench("Calculate Kinship"):
- kinship_matrix = calculate_kinship(genotype_matrix, temp_data)
-
- with Bench("Create LMM object"):
- lmm_ob = LMM(pheno_vector, kinship_matrix)
-
- with Bench("LMM_ob fitting"):
- lmm_ob.fit()
-
- with Bench("Doing GWAS"):
- t_stats, p_values = GWAS(pheno_vector,
- genotype_matrix,
- kinship_matrix,
- restricted_max_likelihood=True,
- refit=False,
- temp_data=temp_data)
- Bench().report()
- return t_stats, p_values
+#from utility.benchmark import Bench
+#
+##np.seterr('raise')
+#
+#def run(pheno_vector,
+# genotype_matrix,
+# restricted_max_likelihood=True,
+# refit=False,
+# temp_data=None):
+# """Takes the phenotype vector and genotype matrix and returns a set of p-values and t-statistics
+#
+# restricted_max_likelihood -- whether to use restricted max likelihood; True or False
+# refit -- whether to refit the variance component for each marker
+# temp_data -- TempData object that stores the progress for each major step of the
+# calculations ("calculate_kinship" and "GWAS" take the majority of time)
+#
+# """
+#
+# with Bench("Calculate Kinship"):
+# kinship_matrix = calculate_kinship(genotype_matrix, temp_data)
+#
+# with Bench("Create LMM object"):
+# lmm_ob = LMM(pheno_vector, kinship_matrix)
+#
+# with Bench("LMM_ob fitting"):
+# lmm_ob.fit()
+#
+# with Bench("Doing GWAS"):
+# t_stats, p_values = GWAS(pheno_vector,
+# genotype_matrix,
+# kinship_matrix,
+# restricted_max_likelihood=True,
+# refit=False,
+# temp_data=temp_data)
+# Bench().report()
+# return t_stats, p_values
def matrixMult(A,B):
@@ -72,8 +72,8 @@ def matrixMult(A,B):
except AttributeError:
return np.dot(A,B)
- print("A is:", pf(A.shape))
- print("B is:", pf(B.shape))
+ #print("A is:", pf(A.shape))
+ #print("B is:", pf(B.shape))
# If the matrices are in Fortran order then the computations will be faster
# when using dgemm. Otherwise, the function will copy the matrix and that takes time.
@@ -234,237 +234,245 @@ def GWAS(pheno_vector,
class LMM:
- """
- This is a simple version of EMMA/fastLMM.
- The main purpose of this module is to take a phenotype vector (Y), a set of covariates (X) and a kinship matrix (K)
- and to optimize this model by finding the maximum-likelihood estimates for the model parameters.
- There are three model parameters: heritability (h), covariate coefficients (beta) and the total
- phenotypic variance (sigma).
- Heritability as defined here is the proportion of the total variance (sigma) that is attributed to
- the kinship matrix.
-
- For simplicity, we assume that everything being input is a numpy array.
- If this is not the case, the module may throw an error as conversion from list to numpy array
- is not done consistently.
-
- """
- def __init__(self,Y,K,Kva=[],Kve=[],X0=None,verbose=False):
-
- """
- The constructor takes a phenotype vector or array of size n.
- It takes a kinship matrix of size n x n. Kva and Kve can be computed as Kva,Kve = linalg.eigh(K) and cached.
- If they are not provided, the constructor will calculate them.
- X0 is an optional covariate matrix of size n x q, where there are q covariates.
- When this parameter is not provided, the constructor will set X0 to an n x 1 matrix of all ones to represent a mean effect.
- """
-
- if X0 == None: X0 = np.ones(len(Y)).reshape(len(Y),1)
- self.verbose = verbose
-
- #x = Y != -9
- x = True - np.isnan(Y)
- if not x.sum() == len(Y):
- if self.verbose: sys.stderr.write("Removing %d missing values from Y\n" % ((True - x).sum()))
- Y = Y[x]
- K = K[x,:][:,x]
- X0 = X0[x,:]
- Kva = []
- Kve = []
- self.nonmissing = x
-
- if len(Kva) == 0 or len(Kve) == 0:
- if self.verbose: sys.stderr.write("Obtaining eigendecomposition for %dx%d matrix\n" % (K.shape[0],K.shape[1]) )
- begin = time.time()
- Kva,Kve = linalg.eigh(K)
- end = time.time()
- if self.verbose: sys.stderr.write("Total time: %0.3f\n" % (end - begin))
-
- self.K = K
- self.Kva = Kva
- self.Kve = Kve
- print("self.Kva is: ", pf(self.Kva))
- print("self.Kve is: ", pf(self.Kve))
- self.Y = Y
- self.X0 = X0
- self.N = self.K.shape[0]
-
- if sum(self.Kva < 1e-6):
- if self.verbose: sys.stderr.write("Cleaning %d eigen values\n" % (sum(self.Kva < 0)))
- self.Kva[self.Kva < 1e-6] = 1e-6
-
- self.transform()
-
- def transform(self):
-
- """
- Computes a transformation on the phenotype vector and the covariate matrix.
- The transformation is obtained by left multiplying each parameter by the transpose of the
- eigenvector matrix of K (the kinship).
- """
+ """
+ This is a simple version of EMMA/fastLMM.
+ The main purpose of this module is to take a phenotype vector (Y), a set of covariates (X) and a kinship matrix (K)
+ and to optimize this model by finding the maximum-likelihood estimates for the model parameters.
+ There are three model parameters: heritability (h), covariate coefficients (beta) and the total
+ phenotypic variance (sigma).
+ Heritability as defined here is the proportion of the total variance (sigma) that is attributed to
+ the kinship matrix.
+
+ For simplicity, we assume that everything being input is a numpy array.
+ If this is not the case, the module may throw an error as conversion from list to numpy array
+ is not done consistently.
+
+ """
+ def __init__(self,Y,K,Kva=[],Kve=[],X0=None,verbose=False):
+
+ """
+ The constructor takes a phenotype vector or array of size n.
+ It takes a kinship matrix of size n x n. Kva and Kve can be computed as Kva,Kve = linalg.eigh(K) and cached.
+ If they are not provided, the constructor will calculate them.
+ X0 is an optional covariate matrix of size n x q, where there are q covariates.
+ When this parameter is not provided, the constructor will set X0 to an n x 1 matrix of all ones to represent a mean effect.
+ """
+
+ if X0 == None: X0 = np.ones(len(Y)).reshape(len(Y),1)
+ self.verbose = verbose
+
+ #x = Y != -9
+ x = True - np.isnan(Y)
+ if not x.sum() == len(Y):
+ if self.verbose: sys.stderr.write("Removing %d missing values from Y\n" % ((True - x).sum()))
+ Y = Y[x]
+ K = K[x,:][:,x]
+ X0 = X0[x,:]
+ Kva = []
+ Kve = []
+ self.nonmissing = x
+
+ if len(Kva) == 0 or len(Kve) == 0:
+ if self.verbose: sys.stderr.write("Obtaining eigendecomposition for %dx%d matrix\n" % (K.shape[0],K.shape[1]) )
+ begin = time.time()
+ Kva,Kve = linalg.eigh(K)
+ end = time.time()
+ if self.verbose: sys.stderr.write("Total time: %0.3f\n" % (end - begin))
+
+ self.K = K
+ self.Kva = Kva
+ self.Kve = Kve
+ print("self.Kva is: ", pf(self.Kva))
+ print("self.Kve is: ", pf(self.Kve))
+ self.Y = Y
+ self.X0 = X0
+ self.N = self.K.shape[0]
+
+ if sum(self.Kva < 1e-6):
+ if self.verbose: sys.stderr.write("Cleaning %d eigen values\n" % (sum(self.Kva < 0)))
+ self.Kva[self.Kva < 1e-6] = 1e-6
+
+ self.transform()
+
+ def transform(self):
+
+ """
+ Computes a transformation on the phenotype vector and the covariate matrix.
+ The transformation is obtained by left multiplying each parameter by the transpose of the
+ eigenvector matrix of K (the kinship).
+ """
+
+ self.Yt = matrixMult(self.Kve.T, self.Y)
+ self.X0t = matrixMult(self.Kve.T, self.X0)
+ self.X0t_stack = np.hstack([self.X0t, np.ones((self.N,1))])
+ self.q = self.X0t.shape[1]
+
+ def getMLSoln(self,h,X):
+
+ """
+ Obtains the maximum-likelihood estimates for the covariate coefficients (beta),
+ the total variance of the trait (sigma) and also passes intermediates that can
+ be utilized in other functions. The input parameter h is a value between 0 and 1 and represents
+ the heritability or the proportion of the total variance attributed to genetics. The X is the
+ covariate matrix.
+ """
- self.Yt = matrixMult(self.Kve.T, self.Y)
- self.X0t = matrixMult(self.Kve.T, self.X0)
- self.X0t_stack = np.hstack([self.X0t, np.ones((self.N,1))])
- self.q = self.X0t.shape[1]
-
- def getMLSoln(self,h,X):
-
- """
- Obtains the maximum-likelihood estimates for the covariate coefficients (beta),
- the total variance of the trait (sigma) and also passes intermediates that can
- be utilized in other functions. The input parameter h is a value between 0 and 1 and represents
- the heritability or the proportion of the total variance attributed to genetics. The X is the
- covariate matrix.
- """
-
- S = 1.0/(h*self.Kva + (1.0 - h))
- Xt = X.T*S
- XX = matrixMult(Xt,X)
- XX_i = linalg.inv(XX)
- beta = matrixMult(matrixMult(XX_i,Xt),self.Yt)
- Yt = self.Yt - matrixMult(X,beta)
- Q = np.dot(Yt.T*S,Yt)
- sigma = Q * 1.0 / (float(self.N) - float(X.shape[1]))
- return beta,sigma,Q,XX_i,XX
-
- def LL_brent(self,h,X=None,REML=False):
- #brent will not be bounded by the specified bracket.
- # I return a large number if we encounter h < 0 to avoid errors in LL computation during the search.
- if h < 0: return 1e6
- return -self.LL(h,X,stack=False,REML=REML)[0]
+ S = 1.0/(h*self.Kva + (1.0 - h))
+ Xt = X.T*S
+ XX = matrixMult(Xt,X)
+ XX_i = linalg.inv(XX)
+ beta = matrixMult(matrixMult(XX_i,Xt),self.Yt)
+ Yt = self.Yt - matrixMult(X,beta)
+ Q = np.dot(Yt.T*S,Yt)
+ sigma = Q * 1.0 / (float(self.N) - float(X.shape[1]))
+ return beta,sigma,Q,XX_i,XX
+
+ def LL_brent(self,h,X=None,REML=False):
+ #brent will not be bounded by the specified bracket.
+ # I return a large number if we encounter h < 0 to avoid errors in LL computation during the search.
+ if h < 0: return 1e6
+ return -self.LL(h,X,stack=False,REML=REML)[0]
- def LL(self,h,X=None,stack=True,REML=False):
-
- """
- Computes the log-likelihood for a given heritability (h). If X==None, then the
- default X0t will be used. If X is set and stack=True, then X0t will be matrix concatenated with
- the input X. If stack is false, then X is used in place of X0t in the LL calculation.
- REML is computed by adding additional terms to the standard LL and can be computed by setting REML=True.
- """
-
- if X == None: X = self.X0t
- elif stack:
- self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0]
- X = self.X0t_stack
-
- n = float(self.N)
- q = float(X.shape[1])
- beta,sigma,Q,XX_i,XX = self.getMLSoln(h,X)
- LL = n*np.log(2*np.pi) + np.log(h*self.Kva + (1.0-h)).sum() + n + n*np.log(1.0/n * Q)
- LL = -0.5 * LL
-
- if REML:
- LL_REML_part = q*np.log(2.0*np.pi*sigma) + np.log(linalg.det(matrixMult(X.T,X))) - np.log(linalg.det(XX))
- LL = LL + 0.5*LL_REML_part
-
- return LL,beta,sigma,XX_i
-
- def getMax(self,H, X=None,REML=False):
-
- """
- Helper functions for .fit(...).
- This function takes a set of LLs computed over a grid and finds possible regions
- containing a maximum. Within these regions, a Brent search is performed to find the
- optimum.
-
- """
- n = len(self.LLs)
- HOpt = []
- for i in range(1,n-2):
- if self.LLs[i-1] < self.LLs[i] and self.LLs[i] > self.LLs[i+1]:
- HOpt.append(optimize.brent(self.LL_brent,args=(X,REML),brack=(H[i-1],H[i+1])))
- if np.isnan(HOpt[-1][0]): HOpt[-1][0] = [self.LLs[i-1]]
-
- if len(HOpt) > 1:
- if self.verbose: sys.stderr.write("NOTE: Found multiple optima. Returning first...\n")
- return HOpt[0]
- elif len(HOpt) == 1: return HOpt[0]
- elif self.LLs[0] > self.LLs[n-1]: return H[0]
- else: return H[n-1]
-
- def fit(self,X=None,ngrids=100,REML=True):
-
- """
- Finds the maximum-likelihood solution for the heritability (h) given the current parameters.
- X can be passed and will transformed and concatenated to X0t. Otherwise, X0t is used as
- the covariate matrix.
-
- This function calculates the LLs over a grid and then uses .getMax(...) to find the optimum.
- Given this optimum, the function computes the LL and associated ML solutions.
- """
-
- if X == None: X = self.X0t
- else:
- #X = np.hstack([self.X0t,matrixMult(self.Kve.T, X)])
- self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0]
- X = self.X0t_stack
-
- H = np.array(range(ngrids)) / float(ngrids)
- L = np.array([self.LL(h,X,stack=False,REML=REML)[0] for h in H])
- self.LLs = L
-
- hmax = self.getMax(H,X,REML)
- L,beta,sigma,betaSTDERR = self.LL(hmax,X,stack=False,REML=REML)
-
- self.H = H
- self.optH = hmax
- self.optLL = L
- self.optBeta = beta
- self.optSigma = sigma
-
- return hmax,beta,sigma,L
-
- def association(self,X, h = None, stack=True,REML=True, returnBeta=False):
-
- """
- Calculates association statitics for the SNPs encoded in the vector X of size n.
- If h == None, the optimal h stored in optH is used.
-
- """
- if stack:
- #X = np.hstack([self.X0t,matrixMult(self.Kve.T, X)])
- self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0]
- X = self.X0t_stack
-
- if h == None: h = self.optH
-
- L,beta,sigma,betaVAR = self.LL(h,X,stack=False,REML=REML)
- q = len(beta)
- ts,ps = self.tstat(beta[q-1],betaVAR[q-1,q-1],sigma,q)
-
- if returnBeta: return ts,ps,beta[q-1].sum(),betaVAR[q-1,q-1].sum()*sigma
- return ts,ps
-
- def tstat(self,beta,var,sigma,q):
-
+ def LL(self,h,X=None,stack=True,REML=False):
+
+ """
+ Computes the log-likelihood for a given heritability (h). If X==None, then the
+ default X0t will be used. If X is set and stack=True, then X0t will be matrix concatenated with
+ the input X. If stack is false, then X is used in place of X0t in the LL calculation.
+ REML is computed by adding additional terms to the standard LL and can be computed by setting REML=True.
+ """
+
+ if X == None: X = self.X0t
+ elif stack:
+ self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0]
+ X = self.X0t_stack
+
+ n = float(self.N)
+ q = float(X.shape[1])
+ beta,sigma,Q,XX_i,XX = self.getMLSoln(h,X)
+ LL = n*np.log(2*np.pi) + np.log(h*self.Kva + (1.0-h)).sum() + n + n*np.log(1.0/n * Q)
+ LL = -0.5 * LL
+
+ if REML:
+ LL_REML_part = q*np.log(2.0*np.pi*sigma) + np.log(linalg.det(matrixMult(X.T,X))) - np.log(linalg.det(XX))
+ LL = LL + 0.5*LL_REML_part
+
+ return LL,beta,sigma,XX_i
+
+ def getMax(self,H, X=None,REML=False):
+
"""
- Calculates a t-statistic and associated p-value given the estimate of beta and its standard error.
- This is actually an F-test, but when only one hypothesis is being performed, it reduces to a t-test.
+ Helper functions for .fit(...).
+ This function takes a set of LLs computed over a grid and finds possible regions
+ containing a maximum. Within these regions, a Brent search is performed to find the
+ optimum.
+
"""
-
- ts = beta / np.sqrt(var * sigma)
- ps = 2.0*(1.0 - stats.t.cdf(np.abs(ts), self.N-q))
- if not len(ts) == 1 or not len(ps) == 1: raise Exception("Something bad happened :(")
- return ts.sum(),ps.sum()
-
- def plotFit(self,color='b-',title=''):
-
- """
- Simple function to visualize the likelihood space. It takes the LLs
- calcualted over a grid and normalizes them by subtracting off the mean and exponentiating.
- The resulting "probabilities" are normalized to one and plotted against heritability.
- This can be seen as an approximation to the posterior distribuiton of heritability.
-
- For diagnostic purposes this lets you see if there is one distinct maximum or multiple
- and what the variance of the parameter looks like.
- """
- import matplotlib.pyplot as pl
-
- mx = self.LLs.max()
- p = np.exp(self.LLs - mx)
- p = p/p.sum()
-
- pl.plot(self.H,p,color)
- pl.xlabel("Heritability")
- pl.ylabel("Probability of data")
- pl.title(title) \ No newline at end of file
+ n = len(self.LLs)
+ HOpt = []
+ for i in range(1,n-2):
+ if self.LLs[i-1] < self.LLs[i] and self.LLs[i] > self.LLs[i+1]:
+ HOpt.append(optimize.brent(self.LL_brent,args=(X,REML),brack=(H[i-1],H[i+1])))
+ if np.isnan(HOpt[-1][0]):
+ HOpt[-1][0] = [self.LLs[i-1]]
+
+ if len(HOpt) > 1:
+ if self.verbose:
+ sys.stderr.write("NOTE: Found multiple optima. Returning first...\n")
+ return HOpt[0]
+ elif len(HOpt) == 1:
+ return HOpt[0]
+ elif self.LLs[0] > self.LLs[n-1]:
+ return H[0]
+ else:
+ return H[n-1]
+
+ def fit(self,X=None,ngrids=100,REML=True):
+
+ """
+ Finds the maximum-likelihood solution for the heritability (h) given the current parameters.
+ X can be passed and will transformed and concatenated to X0t. Otherwise, X0t is used as
+ the covariate matrix.
+
+ This function calculates the LLs over a grid and then uses .getMax(...) to find the optimum.
+ Given this optimum, the function computes the LL and associated ML solutions.
+ """
+
+ if X == None:
+ X = self.X0t
+ else:
+ #X = np.hstack([self.X0t,matrixMult(self.Kve.T, X)])
+ self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0]
+ X = self.X0t_stack
+
+ H = np.array(range(ngrids)) / float(ngrids)
+ L = np.array([self.LL(h,X,stack=False,REML=REML)[0] for h in H])
+ self.LLs = L
+
+ hmax = self.getMax(H,X,REML)
+ L,beta,sigma,betaSTDERR = self.LL(hmax,X,stack=False,REML=REML)
+
+ self.H = H
+ self.optH = hmax
+ self.optLL = L
+ self.optBeta = beta
+ self.optSigma = sigma
+
+ return hmax,beta,sigma,L
+
+ def association(self,X, h = None, stack=True,REML=True, returnBeta=True):
+
+ """
+ Calculates association statitics for the SNPs encoded in the vector X of size n.
+ If h == None, the optimal h stored in optH is used.
+
+ """
+ if stack:
+ #X = np.hstack([self.X0t,matrixMult(self.Kve.T, X)])
+ self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0]
+ X = self.X0t_stack
+
+ if h == None:
+ h = self.optH
+
+ L,beta,sigma,betaVAR = self.LL(h,X,stack=False,REML=REML)
+ q = len(beta)
+ ts,ps = self.tstat(beta[q-1],betaVAR[q-1,q-1],sigma,q)
+
+ if returnBeta:
+ return ts,ps,beta[q-1].sum(),betaVAR[q-1,q-1].sum()*sigma
+ return ts,ps
+
+ def tstat(self,beta,var,sigma,q):
+
+ """
+ Calculates a t-statistic and associated p-value given the estimate of beta and its standard error.
+ This is actually an F-test, but when only one hypothesis is being performed, it reduces to a t-test.
+ """
+
+ ts = beta / np.sqrt(var * sigma)
+ ps = 2.0*(1.0 - stats.t.cdf(np.abs(ts), self.N-q))
+ if not len(ts) == 1 or not len(ps) == 1: raise Exception("Something bad happened :(")
+ return ts.sum(),ps.sum()
+
+ def plotFit(self,color='b-',title=''):
+
+ """
+ Simple function to visualize the likelihood space. It takes the LLs
+ calcualted over a grid and normalizes them by subtracting off the mean and exponentiating.
+ The resulting "probabilities" are normalized to one and plotted against heritability.
+ This can be seen as an approximation to the posterior distribuiton of heritability.
+
+ For diagnostic purposes this lets you see if there is one distinct maximum or multiple
+ and what the variance of the parameter looks like.
+ """
+ import matplotlib.pyplot as pl
+
+ mx = self.LLs.max()
+ p = np.exp(self.LLs - mx)
+ p = p/p.sum()
+
+ pl.plot(self.H,p,color)
+ pl.xlabel("Heritability")
+ pl.ylabel("Probability of data")
+ pl.title(title) \ No newline at end of file
diff --git a/wqflask/wqflask/my_pylmm/pylmmGWAS.py b/wqflask/wqflask/my_pylmm/pylmmGWAS.py
index 487949f0..54a230de 100644
--- a/wqflask/wqflask/my_pylmm/pylmmGWAS.py
+++ b/wqflask/wqflask/my_pylmm/pylmmGWAS.py
@@ -20,7 +20,8 @@
import pdb
import time
-def printOutHead(): out.write("\t".join(["SNP_ID","BETA","BETA_SD","F_STAT","P_VALUE"]) + "\n")
+def printOutHead():
+ out.write("\t".join(["SNP_ID","BETA","BETA_SD","F_STAT","P_VALUE"]) + "\n")
def outputResult(id,beta,betaSD,ts,ps):
out.write("\t".join([str(x) for x in [id,beta,betaSD,ts,ps]]) + "\n")
@@ -88,7 +89,8 @@ from scipy import linalg
from pylmm.lmm import LMM
from pylmm import input
-if len(args) != 1: parser.error("Incorrect number of arguments")
+if len(args) != 1:
+ parser.error("Incorrect number of arguments")
outFile = args[0]
if not options.pfile and not options.tfile and not options.bfile:
@@ -97,30 +99,40 @@ if not options.kfile:
parser.error("Please provide a pre-computed kinship file")
# READING PLINK input
-if options.verbose: sys.stderr.write("Reading PLINK input...\n")
-if options.bfile: IN = input.plink(options.bfile,type='b', phenoFile=options.phenoFile,normGenotype=options.normalizeGenotype)
-elif options.tfile: IN = input.plink(options.tfile,type='t', phenoFile=options.phenoFile,normGenotype=options.normalizeGenotype)
-elif options.pfile: IN = input.plink(options.pfile,type='p', phenoFile=options.phenoFile,normGenotype=options.normalizeGenotype)
-else: parser.error("You must provide at least one PLINK input file base")
+if options.verbose:
+ sys.stderr.write("Reading PLINK input...\n")
+if options.bfile:
+ IN = input.plink(options.bfile,type='b', phenoFile=options.phenoFile,normGenotype=options.normalizeGenotype)
+elif options.tfile:
+ IN = input.plink(options.tfile,type='t', phenoFile=options.phenoFile,normGenotype=options.normalizeGenotype)
+elif options.pfile:
+ IN = input.plink(options.pfile,type='p', phenoFile=options.phenoFile,normGenotype=options.normalizeGenotype)
+else:
+ parser.error("You must provide at least one PLINK input file base")
if not os.path.isfile(options.phenoFile or IN.fbase + '.phenos'):
parser.error("No .pheno file exist for %s" % (options.phenoFile or IN.fbase + '.phenos'))
# READING Covariate File
if options.covfile:
- if options.verbose: sys.stderr.write("Reading covariate file...\n")
+ if options.verbose:
+ sys.stderr.write("Reading covariate file...\n")
# Read the covariate file -- write this into input.plink
P = IN.getCovariates(options.covfile)
- if options.noMean: X0 = P
- else: X0 = np.hstack([np.ones((IN.phenos.shape[0],1)),P])
+ if options.noMean:
+ X0 = P
+ else:
+ X0 = np.hstack([np.ones((IN.phenos.shape[0],1)),P])
if np.isnan(X0).sum():
parser.error("The covariate file %s contains missing values. At this time we are not dealing with this case. Either remove those individuals with missing values or replace them in some way.")
-else: X0 = np.ones((IN.phenos.shape[0],1))
+else:
+ X0 = np.ones((IN.phenos.shape[0],1))
# READING Kinship - major bottleneck for large datasets
-if options.verbose: sys.stderr.write("Reading kinship...\n")
+if options.verbose:
+ sys.stderr.write("Reading kinship...\n")
begin = time.time()
# This method seems to be the fastest and works if you already know the size of the matrix
if options.kfile[-3:] == '.gz':
@@ -129,13 +141,15 @@ if options.kfile[-3:] == '.gz':
F = f.read() # might exhaust mem if the file is huge
K = np.fromstring(F,sep=' ') # Assume that space separated
f.close()
-else: K = np.fromfile(open(options.kfile,'r'),sep=" ")
+else:
+ K = np.fromfile(open(options.kfile,'r'),sep=" ")
K.resize((len(IN.indivs),len(IN.indivs)))
end = time.time()
# Other slower ways
#K = np.loadtxt(options.kfile)
#K = np.genfromtxt(options.kfile)
-if options.verbose: sys.stderr.write("Read the %d x %d kinship matrix in %0.3fs \n" % (K.shape[0],K.shape[1],end-begin))
+if options.verbose:
+ sys.stderr.write("Read the %d x %d kinship matrix in %0.3fs \n" % (K.shape[0],K.shape[1],end-begin))
# PROCESS the phenotype data -- Remove missing phenotype values
@@ -144,7 +158,8 @@ Y = IN.phenos[:,options.pheno]
v = np.isnan(Y)
keep = True - v
if v.sum():
- if options.verbose: sys.stderr.write("Cleaning the phenotype vector by removing %d individuals...\n" % (v.sum()))
+ if options.verbose:
+ sys.stderr.write("Cleaning the phenotype vector by removing %d individuals...\n" % (v.sum()))
Y = Y[keep]
X0 = X0[keep,:]
K = K[keep,:][:,keep]
diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py
index 8942d2ff..43c68942 100644
--- a/wqflask/wqflask/search_results.py
+++ b/wqflask/wqflask/search_results.py
@@ -61,29 +61,17 @@ class SearchResultPage():
self.results = []
if 'q' in kw:
- self.quick_search = True
+ #self.quick_search = True
self.search_terms = kw['q']
print("self.search_terms is: ", self.search_terms)
- self.do_quick_search()
+ self.quick_search()
else:
- self.quick_search = False
+ #self.quick_search = False
self.search_terms = kw['search_terms']
self.dataset = create_dataset(kw['dataset'])
self.search()
self.gen_search_result()
- def gen_quick_search_result(self):
- self.trait_list = []
-
- species_list = []
-
- for result in self.results:
- if not result:
- continue
- if result[0] not in species_list:
- species_list.append(result[0])
-
-
def gen_search_result(self):
"""
@@ -112,7 +100,7 @@ class SearchResultPage():
self.dataset.get_trait_info(self.trait_list, species)
- def do_quick_search(self):
+ def quick_search(self):
self.search_terms = parser.parse(self.search_terms)
print("After parsing:", self.search_terms)
@@ -171,6 +159,7 @@ class SearchResultPage():
search_ob = do_search.DoSearch.get_search(search_type)
search_class = getattr(do_search, search_ob)
+ print("search_class is: ", pf(search_class))
the_search = search_class(search_term,
search_operator,
self.dataset,
diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py
index 5c064359..85e33595 100755
--- a/wqflask/wqflask/show_trait/show_trait.py
+++ b/wqflask/wqflask/show_trait/show_trait.py
@@ -679,61 +679,61 @@ class ShowTrait(object):
elif this_trait and this_trait.dataset and this_trait.dataset.type =='Publish': #Check if trait is phenotype
- if this_trait.confidential:
- pass
- #tbl.append(HT.TR(
- # HT.TD('Pre-publication Phenotype: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
- # HT.TD(width=10, valign="top"),
- # HT.TD(HT.Span(this_trait.pre_publication_description, Class="fs13"), valign="top", width=740)
- # ))
- if webqtlUtil.hasAccessToConfidentialPhenotypeTrait(privilege=self.privilege, userName=self.userName, authorized_users=this_trait.authorized_users):
- #tbl.append(HT.TR(
- # HT.TD('Post-publication Phenotype: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
- # HT.TD(width=10, valign="top"),
- # HT.TD(HT.Span(this_trait.post_publication_description, Class="fs13"), valign="top", width=740)
- # ))
- #tbl.append(HT.TR(
- # HT.TD('Pre-publication Abbreviation: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
- # HT.TD(width=10, valign="top"),
- # HT.TD(HT.Span(this_trait.pre_publication_abbreviation, Class="fs13"), valign="top", width=740)
- # ))
- #tbl.append(HT.TR(
- # HT.TD('Post-publication Abbreviation: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
- # HT.TD(width=10, valign="top"),
- # HT.TD(HT.Span(this_trait.post_publication_abbreviation, Class="fs13"), valign="top", width=740)
- # ))
- #tbl.append(HT.TR(
- # HT.TD('Lab code: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
- # HT.TD(width=10, valign="top"),
- # HT.TD(HT.Span(this_trait.lab_code, Class="fs13"), valign="top", width=740)
- # ))
- pass
- #tbl.append(HT.TR(
- # HT.TD('Owner: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
- # HT.TD(width=10, valign="top"),
- # HT.TD(HT.Span(this_trait.owner, Class="fs13"), valign="top", width=740)
- # ))
- else:
- pass
- #tbl.append(HT.TR(
- # HT.TD('Phenotype: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
- # HT.TD(width=10, valign="top"),
- # HT.TD(HT.Span(this_trait.post_publication_description, Class="fs13"), valign="top", width=740)
- # ))
- #tbl.append(HT.TR(
- # HT.TD('Authors: ', Class="fs13 fwb",
- # valign="top", nowrap="on", width=90),
- # HT.TD(width=10, valign="top"),
- # HT.TD(HT.Span(this_trait.authors, Class="fs13"),
- # valign="top", width=740)
- # ))
- #tbl.append(HT.TR(
- # HT.TD('Title: ', Class="fs13 fwb",
- # valign="top", nowrap="on", width=90),
- # HT.TD(width=10, valign="top"),
- # HT.TD(HT.Span(this_trait.title, Class="fs13"),
- # valign="top", width=740)
- # ))
+ #if this_trait.confidential:
+ # pass
+ # #tbl.append(HT.TR(
+ # # HT.TD('Pre-publication Phenotype: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
+ # # HT.TD(width=10, valign="top"),
+ # # HT.TD(HT.Span(this_trait.pre_publication_description, Class="fs13"), valign="top", width=740)
+ # # ))
+ # if webqtlUtil.hasAccessToConfidentialPhenotypeTrait(privilege=self.privilege, userName=self.userName, authorized_users=this_trait.authorized_users):
+ # #tbl.append(HT.TR(
+ # # HT.TD('Post-publication Phenotype: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
+ # # HT.TD(width=10, valign="top"),
+ # # HT.TD(HT.Span(this_trait.post_publication_description, Class="fs13"), valign="top", width=740)
+ # # ))
+ # #tbl.append(HT.TR(
+ # # HT.TD('Pre-publication Abbreviation: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
+ # # HT.TD(width=10, valign="top"),
+ # # HT.TD(HT.Span(this_trait.pre_publication_abbreviation, Class="fs13"), valign="top", width=740)
+ # # ))
+ # #tbl.append(HT.TR(
+ # # HT.TD('Post-publication Abbreviation: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
+ # # HT.TD(width=10, valign="top"),
+ # # HT.TD(HT.Span(this_trait.post_publication_abbreviation, Class="fs13"), valign="top", width=740)
+ # # ))
+ # #tbl.append(HT.TR(
+ # # HT.TD('Lab code: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
+ # # HT.TD(width=10, valign="top"),
+ # # HT.TD(HT.Span(this_trait.lab_code, Class="fs13"), valign="top", width=740)
+ # # ))
+ # pass
+ # #tbl.append(HT.TR(
+ # # HT.TD('Owner: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
+ # # HT.TD(width=10, valign="top"),
+ # # HT.TD(HT.Span(this_trait.owner, Class="fs13"), valign="top", width=740)
+ # # ))
+ #else:
+ # pass
+ # #tbl.append(HT.TR(
+ # # HT.TD('Phenotype: ', Class="fs13 fwb", valign="top", nowrap="on", width=90),
+ # # HT.TD(width=10, valign="top"),
+ # # HT.TD(HT.Span(this_trait.post_publication_description, Class="fs13"), valign="top", width=740)
+ # # ))
+ ##tbl.append(HT.TR(
+ ## HT.TD('Authors: ', Class="fs13 fwb",
+ ## valign="top", nowrap="on", width=90),
+ ## HT.TD(width=10, valign="top"),
+ ## HT.TD(HT.Span(this_trait.authors, Class="fs13"),
+ ## valign="top", width=740)
+ ## ))
+ ##tbl.append(HT.TR(
+ ## HT.TD('Title: ', Class="fs13 fwb",
+ ## valign="top", nowrap="on", width=90),
+ ## HT.TD(width=10, valign="top"),
+ ## HT.TD(HT.Span(this_trait.title, Class="fs13"),
+ ## valign="top", width=740)
+ ## ))
if this_trait.journal:
journal = this_trait.journal
if this_trait.year: