aboutsummaryrefslogtreecommitdiff
path: root/wqflask/maintenance
diff options
context:
space:
mode:
Diffstat (limited to 'wqflask/maintenance')
-rw-r--r--wqflask/maintenance/dataset/.gitignore2
-rw-r--r--wqflask/maintenance/dataset/__init__.py0
-rw-r--r--wqflask/maintenance/dataset/calculate.py7
-rw-r--r--wqflask/maintenance/dataset/correlations.py47
-rw-r--r--wqflask/maintenance/dataset/datasampledir/load_genotypes/config.ini7
-rw-r--r--wqflask/maintenance/dataset/datasampledir/load_genotypes/sample.geno12
-rw-r--r--wqflask/maintenance/dataset/datasampledir/load_phenotypes/config.ini4
-rw-r--r--wqflask/maintenance/dataset/datasampledir/load_phenotypes/sample_data.txt10
-rw-r--r--wqflask/maintenance/dataset/datasampledir/load_phenotypes/sample_meta.txt4
-rw-r--r--wqflask/maintenance/dataset/datastructure.py155
-rw-r--r--wqflask/maintenance/dataset/fetch.py11
-rw-r--r--wqflask/maintenance/dataset/genotypes.py48
-rw-r--r--wqflask/maintenance/dataset/load_genotypes.py157
-rw-r--r--wqflask/maintenance/dataset/load_phenotypes.py171
-rw-r--r--wqflask/maintenance/dataset/phenotypes.py100
-rw-r--r--wqflask/maintenance/dataset/probesets.py90
-rw-r--r--wqflask/maintenance/dataset/specials1.py53
-rw-r--r--wqflask/maintenance/dataset/specials2.py139
-rw-r--r--wqflask/maintenance/dataset/specials3.py117
-rw-r--r--wqflask/maintenance/dataset/utilities.py89
-rwxr-xr-xwqflask/maintenance/gen_select_dataset.py89
-rwxr-xr-xwqflask/maintenance/quick_search_table.py509
22 files changed, 56 insertions, 1765 deletions
diff --git a/wqflask/maintenance/dataset/.gitignore b/wqflask/maintenance/dataset/.gitignore
deleted file mode 100644
index 4910042e..00000000
--- a/wqflask/maintenance/dataset/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.pyc
-datadir/
diff --git a/wqflask/maintenance/dataset/__init__.py b/wqflask/maintenance/dataset/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/wqflask/maintenance/dataset/__init__.py
+++ /dev/null
diff --git a/wqflask/maintenance/dataset/calculate.py b/wqflask/maintenance/dataset/calculate.py
deleted file mode 100644
index 6aada827..00000000
--- a/wqflask/maintenance/dataset/calculate.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import scipy.stats
-
-def correlation(a1, a2):
- re = []
- re.append(scipy.stats.pearsonr(a1, a2))
- re.append(scipy.stats.spearmanr(a1, a2))
- return re
diff --git a/wqflask/maintenance/dataset/correlations.py b/wqflask/maintenance/dataset/correlations.py
deleted file mode 100644
index b089e446..00000000
--- a/wqflask/maintenance/dataset/correlations.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Author: Lei Yan
-# Create Date: 2014-01-21
-# Last Update Date: 2014-01-24
-
-# import
-import sys
-import os
-import re
-import MySQLdb
-import ConfigParser
-
-def main(argv):
-
- # load configuration from configuration file
- config = ConfigParser.ConfigParser()
- config.read(argv[1])
- genofile = config.get('configuration', 'genofile')
-
- # parse genofile
- genotypes = []
- file_geno = open(genofile, 'r')
- for line in file_geno:
- line = line.strip()
- if line.startswith('#'):
- continue
- if line.startswith('@'):
- continue
- cells = line.split()
- if line.startswith("Chr"):
- strains = cells[4:]
- continue
- genotype = {}
- genotype['chr'] = cells[0]
- genotype['locus'] = cells[1]
- genotype['cm'] = cells[2]
- genotype['mb'] = cells[3]
- genotype['values'] = cells[4:]
- genotypes.append(genotype)
- print "get %d strains:\t%s" % (len(strains), strains)
- print "load %d genotypes" % len(genotypes)
-
- # phenotypes
-
-# main
-if __name__ == "__main__":
- main(sys.argv)
- print "exit successfully"
diff --git a/wqflask/maintenance/dataset/datasampledir/load_genotypes/config.ini b/wqflask/maintenance/dataset/datasampledir/load_genotypes/config.ini
deleted file mode 100644
index abff371b..00000000
--- a/wqflask/maintenance/dataset/datasampledir/load_genotypes/config.ini
+++ /dev/null
@@ -1,7 +0,0 @@
-[config]
-inbredsetid = 1
-genofile = datasampledir/load_genotypes/sample.geno
-genovalue_U = x
-genovalue_H = 0
-genovalue_B = -1
-genovalue_D = 1
diff --git a/wqflask/maintenance/dataset/datasampledir/load_genotypes/sample.geno b/wqflask/maintenance/dataset/datasampledir/load_genotypes/sample.geno
deleted file mode 100644
index 0024ffd1..00000000
--- a/wqflask/maintenance/dataset/datasampledir/load_genotypes/sample.geno
+++ /dev/null
@@ -1,12 +0,0 @@
-@name:BXD
-@type:riset
-@mat:B
-@pat:D
-@het:H
-@unk:U
-Chr Locus cM Mb BXD1 BXD2 BXD5 BXD6 BXD8
-1 rs6269442 0 3.482275 D B D D D
-2 rs6365999 0.3 4.811062 B B D D D
-3 rs6376963 0.895 5.008089 B B D D D
-4 rs3677817 1.185 5.176058 B B D D D
-5 rs8236463 2.081 5.579193 B B D D D
diff --git a/wqflask/maintenance/dataset/datasampledir/load_phenotypes/config.ini b/wqflask/maintenance/dataset/datasampledir/load_phenotypes/config.ini
deleted file mode 100644
index 638c3bd8..00000000
--- a/wqflask/maintenance/dataset/datasampledir/load_phenotypes/config.ini
+++ /dev/null
@@ -1,4 +0,0 @@
-[config]
-inbredsetid = 1
-datafile = datasampledir/load_phenotypes/sample_data.txt
-metafile = datasampledir/load_phenotypes/sample_meta.txt
diff --git a/wqflask/maintenance/dataset/datasampledir/load_phenotypes/sample_data.txt b/wqflask/maintenance/dataset/datasampledir/load_phenotypes/sample_data.txt
deleted file mode 100644
index a39fd3be..00000000
--- a/wqflask/maintenance/dataset/datasampledir/load_phenotypes/sample_data.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-CaseID BXD1 BXD2 BXD3 BXD4 BXD5
-Value 1 2 3 4 5
-SE 6 7 8 9 10
-N 11 12 13 14 15
-Sex 16 17 18 19 20
-SE 21 22 23 24 25
-N 26 27 28 29 30
-Age 31 32 33 34 35
-SE 36 37 38 39 40
-N 41 42 43 44 45
diff --git a/wqflask/maintenance/dataset/datasampledir/load_phenotypes/sample_meta.txt b/wqflask/maintenance/dataset/datasampledir/load_phenotypes/sample_meta.txt
deleted file mode 100644
index 5172223f..00000000
--- a/wqflask/maintenance/dataset/datasampledir/load_phenotypes/sample_meta.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-Pubmed ID Pre Publication Description Post Publication Description Original Description Pre Publication Abbreviation Post Publication Abbreviation Lab Code Submitter Owner Authorized Users Authors Title Abstract Journal Volume Pages Month Year Units
-x Pre Publication Description 1 Post Publication Description 1 Original Description 1 Pre Publication Abbreviation 1 Post Publication Abbreviation 1 Lab Code 1 Submitter 1 Owner 1 Authorized Users 1 Authors 1 Title 1 Abstract 1 Journal 1 Volume 1 Pages 1 Month 1 2001 unit 1
-x Pre Publication Description 2 Post Publication Description 2 Original Description 2 Pre Publication Abbreviation 2 Post Publication Abbreviation 2 Lab Code 2 Submitter 2 Owner 2 Authorized Users 2 Authors 2 Title 2 Abstract 2 Journal 2 Volume 2 Pages 2 Month 2 2002 unit 2
-19958391 Pre Publication Description 3 Post Publication Description 3 Original Description 3 Pre Publication Abbreviation 3 Post Publication Abbreviation 3 Lab Code 3 Submitter 3 Owner 3 Authorized Users 3 Authors 3 Title 3 Abstract 3 Journal 3 Volume 3 Pages 3 Month 3 2003 unit 3
diff --git a/wqflask/maintenance/dataset/datastructure.py b/wqflask/maintenance/dataset/datastructure.py
deleted file mode 100644
index 7139856f..00000000
--- a/wqflask/maintenance/dataset/datastructure.py
+++ /dev/null
@@ -1,155 +0,0 @@
-import utilities
-
-def get_probesetfreezes(inbredsetid):
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT ProbeSetFreeze.`Id`, ProbeSetFreeze.`Name`, ProbeSetFreeze.`FullName`
- FROM ProbeSetFreeze, ProbeFreeze
- WHERE ProbeSetFreeze.`ProbeFreezeId`=ProbeFreeze.`Id`
- AND ProbeFreeze.`InbredSetId`=%s
- """
- cursor.execute(sql, (inbredsetid))
- return cursor.fetchall()
-
-def get_probesetfreeze(probesetfreezeid):
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT ProbeSetFreeze.`Id`, ProbeSetFreeze.`Name`, ProbeSetFreeze.`FullName`
- FROM ProbeSetFreeze
- WHERE ProbeSetFreeze.`Id`=%s
- """
- cursor.execute(sql, (probesetfreezeid))
- return cursor.fetchone()
-
-def get_strains(inbredsetid):
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT Strain.`Id`, Strain.`Name`
- FROM StrainXRef, Strain
- WHERE StrainXRef.`InbredSetId`=%s
- AND StrainXRef.`StrainId`=Strain.`Id`
- ORDER BY StrainXRef.`OrderId`
- """
- cursor.execute(sql, (inbredsetid))
- return cursor.fetchall()
-
-def get_inbredset(probesetfreezeid):
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT InbredSet.`Id`, InbredSet.`Name`, InbredSet.`FullName`
- FROM InbredSet, ProbeFreeze, ProbeSetFreeze
- WHERE InbredSet.`Id`=ProbeFreeze.`InbredSetId`
- AND ProbeFreeze.`Id`=ProbeSetFreeze.`ProbeFreezeId`
- AND ProbeSetFreeze.`Id`=%s
- """
- cursor.execute(sql, (probesetfreezeid))
- return cursor.fetchone()
-
-def get_species(inbredsetid):
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT Species.`Id`, Species.`Name`, Species.`MenuName`, Species.`FullName`
- FROM InbredSet, Species
- WHERE InbredSet.`Id`=%s
- AND InbredSet.`SpeciesId`=Species.`Id`
- """
- cursor.execute(sql, (inbredsetid))
- return cursor.fetchone()
-
-def get_genofreeze_byinbredsetid(inbredsetid):
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT GenoFreeze.`Id`, GenoFreeze.`Name`, GenoFreeze.`FullName`, GenoFreeze.`InbredSetId`
- FROM GenoFreeze
- WHERE GenoFreeze.`InbredSetId`=%s
- """
- cursor.execute(sql, (inbredsetid))
- return cursor.fetchone()
-
-def get_nextdataid_genotype():
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT GenoData.`Id`
- FROM GenoData
- ORDER BY GenoData.`Id` DESC
- LIMIT 1
- """
- cursor.execute(sql)
- re = cursor.fetchone()
- dataid = re[0]
- dataid += 1
- return dataid
-
-def get_nextdataid_phenotype():
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT PublishData.`Id`
- FROM PublishData
- ORDER BY PublishData.`Id` DESC
- LIMIT 1
- """
- cursor.execute(sql)
- re = cursor.fetchone()
- dataid = re[0]
- dataid += 1
- return dataid
-
-def insert_strain(inbredsetid, strainname, updatestrainxref=None):
- speciesid = get_species(inbredsetid)[0]
- cursor, con = utilities.get_cursor()
- sql = """
- INSERT INTO Strain
- SET
- Strain.`Name`=%s,
- Strain.`Name2`=%s,
- Strain.`SpeciesId`=%s
- """
- cursor.execute(sql, (strainname, strainname, speciesid))
- strainid = con.insert_id()
- if updatestrainxref:
- sql = """
- SELECT StrainXRef.`OrderId`
- FROM StrainXRef
- where StrainXRef.`InbredSetId`=%s
- ORDER BY StrainXRef.`OrderId` DESC
- LIMIT 1
- """
- cursor.execute(sql, (inbredsetid))
- re = cursor.fetchone()
- orderid = re[0] + 1
- #
- sql = """
- INSERT INTO StrainXRef
- SET
- StrainXRef.`InbredSetId`=%s,
- StrainXRef.`StrainId`=%s,
- StrainXRef.`OrderId`=%s,
- StrainXRef.`Used_for_mapping`=%s,
- StrainXRef.`PedigreeStatus`=%s
- """
- cursor.execute(sql, (inbredsetid, strainid, orderid, "N", None))
-
-def get_strain(inbredsetid, strainname):
- speciesid = get_species(inbredsetid)[0]
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT Strain.`Id`, Strain.`Name`
- FROM Strain
- WHERE Strain.`SpeciesId`=%s
- AND Strain.`Name` LIKE %s
- """
- cursor.execute(sql, (speciesid, strainname))
- return cursor.fetchone()
-
-def get_strain_sure(inbredsetid, strainname, updatestrainxref=None):
- strain = get_strain(inbredsetid, strainname)
- if not strain:
- insert_strain(inbredsetid, strainname, updatestrainxref)
- strain = get_strain(inbredsetid, strainname)
- return strain
-
-def get_strains_bynames(inbredsetid, strainnames, updatestrainxref=None):
- strains = []
- for strainname in strainnames:
- strains.append(get_strain_sure(inbredsetid, strainname, updatestrainxref))
- return strains
diff --git a/wqflask/maintenance/dataset/fetch.py b/wqflask/maintenance/dataset/fetch.py
deleted file mode 100644
index fcb2d2d8..00000000
--- a/wqflask/maintenance/dataset/fetch.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import sys
-
-inputfile = open(sys.argv[1], 'r')
-
-for line in inputfile:
- cells = line.split()
- #print cells[int(sys.argv[2])]
- i = len(cells)
- print i
-
-inputfile.close()
diff --git a/wqflask/maintenance/dataset/genotypes.py b/wqflask/maintenance/dataset/genotypes.py
deleted file mode 100644
index b57d4651..00000000
--- a/wqflask/maintenance/dataset/genotypes.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import utilities
-
-def get_geno(inbredsetid, name):
- cursor = utilities.get_cursor()
- sql = """
- SELECT Geno.`Id`, Geno.`Name`, Geno.`Chr`, Geno.`Mb`
- FROM (Geno, InbredSet)
- WHERE Geno.`SpeciesId`=InbredSet.`SpeciesId`
- AND InbredSet.`Id`=%s
- AND Geno.`Name` LIKE %s
- """
- cursor.execute(sql, (inbredsetid, name))
- return cursor.fetchone()
-
-def load_genos(file):
- genotypes = []
- file_geno = open(file, 'r')
- for line in file_geno:
- line = line.strip()
- if line.startswith('#'):
- continue
- if line.startswith('@'):
- continue
- cells = line.split()
- if line.startswith("Chr"):
- strains = cells[4:]
- strains = [strain.lower() for strain in strains]
- continue
- genotype = {}
- genotype['chr'] = cells[0]
- genotype['locus'] = cells[1]
- genotype['cm'] = cells[2]
- genotype['mb'] = cells[3]
- values = cells[4:]
- values = [to_number(value) for value in values]
- genotype['values'] = values
- genotype['dicvalues'] = utilities.to_dic(strains, values)
- genotypes.append(genotype)
- return strains, genotypes
-
-def to_number(char):
- dic = {
- 'b': -1,
- 'd': 1,
- 'h': 0,
- 'u': None,
- }
- return dic.get(char.lower(), None)
diff --git a/wqflask/maintenance/dataset/load_genotypes.py b/wqflask/maintenance/dataset/load_genotypes.py
deleted file mode 100644
index 4697382b..00000000
--- a/wqflask/maintenance/dataset/load_genotypes.py
+++ /dev/null
@@ -1,157 +0,0 @@
-from __future__ import absolute_import, print_function, division
-
-import sys
-import re
-import argparse
-
-import utilities
-import datastructure
-
-def main(argv):
- config = utilities.get_config(argv[1])
- print("config file:")
- for item in config.items('config'):
- print("\t", str(item))
- parse_genofile(config, fetch_parameters(config))
-
-def fetch_parameters(config):
- config_dic = {}
- config_dic['inbredsetid'] = config.get('config', 'inbredsetid')
- config_dic["speciesid"] = datastructure.get_species(config_dic['inbredsetid'])[0]
- config_dic['genofreezeid'] = datastructure.get_genofreeze_byinbredsetid(config_dic['inbredsetid'])[0]
- config_dic['dataid'] = datastructure.get_nextdataid_genotype()
- config_dic['genofile'] = config.get('config', 'genofile')
- print("config dictionary:")
- for k, v in config_dic.items():
- print("\t%s: %s" % (k, v))
- return config_dic
-
-def parse_genofile(config, config_dic):
- genofile = open(config_dic['genofile'], 'r')
- meta_dic = {}
- for line in genofile:
- line = line.strip()
- if len(line) == 0:
- continue
- if line.startswith('#'):
- continue
- if line.startswith('@'):
- line = line.strip('@')
- items = line.split(';')
- for item in items:
- kv = re.split(':|=', item)
- meta_dic[kv[0].strip()] = kv[1].strip()
- continue
- if line.lower().startswith("chr"):
- #
- print("geno file meta dictionary:")
- for k, v in meta_dic.items():
- print("\t%s: %s" % (k, v))
- #
- print("geno file head:\n\t%s" % line)
- strainnames = line.split()[4:]
- config_dic['strains'] = datastructure.get_strains_bynames(inbredsetid=config_dic['inbredsetid'], strainnames=strainnames, updatestrainxref="yes")
- continue
- # geno file line, marker
- marker_dic = parse_marker(line)
- marker_dic['genoid'] = check_or_insert_geno(config_dic, marker_dic)
- if check_genoxref(config_dic, marker_dic):
- continue
- insert_genodata(config, config_dic, marker_dic)
- insert_genoxref(config_dic, marker_dic)
- config_dic['dataid'] += 1
- genofile.close()
-
-def parse_marker(line):
- marker_dic = {}
- cells = line.split()
- marker_dic['chromosome'] = cells[0]
- marker_dic['locus'] = cells[1]
- marker_dic['cm'] = cells[2]
- marker_dic['mb'] = cells[3]
- marker_dic['values'] = cells[4:]
- return marker_dic
-
-def check_or_insert_geno(config_dic, marker_dic):
- cursor, con = utilities.get_cursor()
- sql = """
- SELECT Geno.`Id`
- FROM Geno
- WHERE Geno.`SpeciesId`=%s
- AND Geno.`Name` like %s
- """
- cursor.execute(sql, (config_dic["speciesid"], marker_dic['locus']))
- result = cursor.fetchone()
- if result:
- genoid = result[0]
- print("get geno record: %d" % genoid)
- else:
- sql = """
- INSERT INTO Geno
- SET
- Geno.`SpeciesId`=%s,
- Geno.`Name`=%s,
- Geno.`Marker_Name`=%s,
- Geno.`Chr`=%s,
- Geno.`Mb`=%s
- """
- cursor.execute(sql, (config_dic['speciesid'], marker_dic['locus'], marker_dic['locus'], marker_dic['chromosome'], marker_dic['mb']))
- rowcount = cursor.rowcount
- genoid = con.insert_id()
- print("INSERT INTO Geno: %d record: %d" % (rowcount, genoid))
- return genoid
-
-def check_genoxref(config_dic, marker_dic):
- cursor, con = utilities.get_cursor()
- sql = """
- select GenoXRef.*
- from GenoXRef
- where GenoXRef.`GenoFreezeId`=%s
- AND GenoXRef.`GenoId`=%s
- """
- cursor.execute(sql, (config_dic['genofreezeid'], marker_dic['genoid']))
- rowcount = cursor.rowcount
- return rowcount
-
-def insert_genodata(config, config_dic, marker_dic):
- cursor, con = utilities.get_cursor()
- for index, strain in enumerate(config_dic['strains']):
- strainid = strain[0]
- value = utilities.to_db_string(marker_dic['values'][index], None)
- if not value:
- continue
- value = config.get('config', "genovalue_" + value)
- try:
- number = int(value)
- except:
- continue
- if not number in [-1, 0, 1]:
- continue
- sql = """
- INSERT INTO GenoData
- SET
- GenoData.`Id`=%s,
- GenoData.`StrainId`=%s,
- GenoData.`value`=%s
- """
- cursor.execute(sql, (config_dic['dataid'], strainid, number))
-
-def insert_genoxref(config_dic, marker_dic):
- cursor, con = utilities.get_cursor()
- sql = """
- INSERT INTO GenoXRef
- SET
- GenoXRef.`GenoFreezeId`=%s,
- GenoXRef.`GenoId`=%s,
- GenoXRef.`DataId`=%s,
- GenoXRef.`cM`=%s,
- GenoXRef.`Used_for_mapping`=%s
- """
- cursor.execute(sql, (config_dic['genofreezeid'], marker_dic['genoid'], config_dic['dataid'], marker_dic['cm'], 'N'))
- rowcount = cursor.rowcount
- print("INSERT INTO GenoXRef: %d record" % (rowcount))
-
-if __name__ == "__main__":
- print("command line arguments:\n\t%s" % sys.argv)
- main(sys.argv)
- print("exit successfully")
diff --git a/wqflask/maintenance/dataset/load_phenotypes.py b/wqflask/maintenance/dataset/load_phenotypes.py
deleted file mode 100644
index 27e340f8..00000000
--- a/wqflask/maintenance/dataset/load_phenotypes.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import sys
-import csv
-
-import utilities
-import datastructure
-
-def main(argv):
- # config
- config = utilities.get_config(argv[1])
- print "config:"
- for item in config.items('config'):
- print "\t%s" % (str(item))
- # var
- inbredsetid = config.get('config', 'inbredsetid')
- print "inbredsetid: %s" % inbredsetid
- species = datastructure.get_species(inbredsetid)
- speciesid = species[0]
- print "speciesid: %s" % speciesid
- dataid = datastructure.get_nextdataid_phenotype()
- print "next data id: %s" % dataid
- cursor, con = utilities.get_cursor()
- # datafile
- datafile = open(config.get('config', 'datafile'), 'r')
- phenotypedata = csv.reader(datafile, delimiter='\t', quotechar='"')
- phenotypedata_head = phenotypedata.next()
- print "phenotypedata head:\n\t%s" % phenotypedata_head
- strainnames = phenotypedata_head[1:]
- strains = datastructure.get_strains_bynames(inbredsetid=inbredsetid, strainnames=strainnames, updatestrainxref=None)
- # metafile
- metafile = open(config.get('config', 'metafile'), 'r')
- phenotypemeta = csv.reader(metafile, delimiter='\t', quotechar='"')
- phenotypemeta_head = phenotypemeta.next()
- print "phenotypemeta head:\n\t%s" % phenotypemeta_head
- print
- # load
- for metarow in phenotypemeta:
- #
- datarow_value = phenotypedata.next()
- datarow_se = phenotypedata.next()
- datarow_n = phenotypedata.next()
- # Phenotype
- sql = """
- INSERT INTO Phenotype
- SET
- Phenotype.`Pre_publication_description`=%s,
- Phenotype.`Post_publication_description`=%s,
- Phenotype.`Original_description`=%s,
- Phenotype.`Pre_publication_abbreviation`=%s,
- Phenotype.`Post_publication_abbreviation`=%s,
- Phenotype.`Lab_code`=%s,
- Phenotype.`Submitter`=%s,
- Phenotype.`Owner`=%s,
- Phenotype.`Authorized_Users`=%s,
- Phenotype.`Units`=%s
- """
- cursor.execute(sql, (
- utilities.to_db_string(metarow[1], None),
- utilities.to_db_string(metarow[2], None),
- utilities.to_db_string(metarow[3], None),
- utilities.to_db_string(metarow[4], None),
- utilities.to_db_string(metarow[5], None),
- utilities.to_db_string(metarow[6], None),
- utilities.to_db_string(metarow[7], None),
- utilities.to_db_string(metarow[8], None),
- utilities.to_db_string(metarow[9], ""),
- utilities.to_db_string(metarow[18], None),
- ))
- rowcount = cursor.rowcount
- phenotypeid = con.insert_id()
- print "INSERT INTO Phenotype: %d record: %d" % (rowcount, phenotypeid)
- # Publication
- publicationid = None # reset
- pubmed_id = utilities.to_db_string(metarow[0], None)
- if pubmed_id:
- sql = """
- SELECT Publication.`Id`
- FROM Publication
- WHERE Publication.`PubMed_ID`=%s
- """
- cursor.execute(sql, (pubmed_id))
- re = cursor.fetchone()
- if re:
- publicationid = re[0]
- print "get Publication record: %d" % publicationid
- if not publicationid:
- sql = """
- INSERT INTO Publication
- SET
- Publication.`PubMed_ID`=%s,
- Publication.`Abstract`=%s,
- Publication.`Authors`=%s,
- Publication.`Title`=%s,
- Publication.`Journal`=%s,
- Publication.`Volume`=%s,
- Publication.`Pages`=%s,
- Publication.`Month`=%s,
- Publication.`Year`=%s
- """
- cursor.execute(sql, (
- utilities.to_db_string(metarow[0], None),
- utilities.to_db_string(metarow[12], None),
- utilities.to_db_string(metarow[10], ""),
- utilities.to_db_string(metarow[11], None),
- utilities.to_db_string(metarow[13], None),
- utilities.to_db_string(metarow[14], None),
- utilities.to_db_string(metarow[15], None),
- utilities.to_db_string(metarow[16], None),
- utilities.to_db_string(metarow[17], None),
- ))
- rowcount = cursor.rowcount
- publicationid = con.insert_id()
- print "INSERT INTO Publication: %d record: %d" % (rowcount, publicationid)
- # data
- for index, strain in enumerate(strains):
- #
- strainid = strain[0]
- value = utilities.to_db_float(datarow_value[index+1], None)
- se = utilities.to_db_float(datarow_se[index+1], None)
- n = utilities.to_db_int(datarow_n[index+1], None)
- #
- if value:
- sql = """
- INSERT INTO PublishData
- SET
- PublishData.`Id`=%s,
- PublishData.`StrainId`=%s,
- PublishData.`value`=%s
- """
- cursor.execute(sql, (dataid, strainid, value))
- if se:
- sql = """
- INSERT INTO PublishSE
- SET
- PublishSE.`DataId`=%s,
- PublishSE.`StrainId`=%s,
- PublishSE.`error`=%s
- """
- cursor.execute(sql, (dataid, strainid, se))
- if n:
- sql = """
- INSERT INTO NStrain
- SET
- NStrain.`DataId`=%s,
- NStrain.`StrainId`=%s,
- NStrain.`count`=%s
- """
- cursor.execute(sql, (dataid, strainid, n))
- # PublishXRef
- sql = """
- INSERT INTO PublishXRef
- SET
- PublishXRef.`InbredSetId`=%s,
- PublishXRef.`PhenotypeId`=%s,
- PublishXRef.`PublicationId`=%s,
- PublishXRef.`DataId`=%s,
- PublishXRef.`comments`=%s
- """
- cursor.execute(sql, (inbredsetid, phenotypeid, publicationid, dataid, ""))
- rowcount = cursor.rowcount
- publishxrefid = con.insert_id()
- print "INSERT INTO PublishXRef: %d record: %d" % (rowcount, publishxrefid)
- # for loop next
- dataid += 1
- print
- # release
- con.close()
-
-if __name__ == "__main__":
- print "command line arguments:\n\t%s" % sys.argv
- main(sys.argv)
- print "exit successfully"
diff --git a/wqflask/maintenance/dataset/phenotypes.py b/wqflask/maintenance/dataset/phenotypes.py
deleted file mode 100644
index ed30f33f..00000000
--- a/wqflask/maintenance/dataset/phenotypes.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import utilities
-
-def fetch():
- # parameters
- inbredsetid = 1
- phenotypesfile = open('bxdphenotypes.txt', 'w+')
- #
- phenotypesfile.write("id\tAuthors\tOriginal_description\tPre_publication_description\tPost_publication_description\t")
- # open db
- cursor = utilities.get_cursor()
- # get strain list
- strains = []
- sql = """
- SELECT Strain.`Name`
- FROM StrainXRef, Strain
- WHERE StrainXRef.`StrainId`=Strain.`Id`
- AND StrainXRef.`InbredSetId`=%s
- ORDER BY StrainXRef.`OrderId`
- """
- cursor.execute(sql, (inbredsetid))
- results = cursor.fetchall()
- for row in results:
- strain = row[0]
- strain = strain.lower()
- strains.append(strain)
- print "get %d strains: %s" % (len(strains), strains)
- phenotypesfile.write('\t'.join([strain.upper() for strain in strains]))
- phenotypesfile.write('\n')
- phenotypesfile.flush()
- # phenotypes
- sql = """
- SELECT PublishXRef.`Id`, Publication.`Authors`, Phenotype.`Original_description`, Phenotype.`Pre_publication_description`, Phenotype.`Post_publication_description`
- FROM (PublishXRef, Phenotype, Publication)
- WHERE PublishXRef.`InbredSetId`=%s
- AND PublishXRef.`PhenotypeId`=Phenotype.`Id`
- AND PublishXRef.`PublicationId`=Publication.`Id`
- """
- cursor.execute(sql, (inbredsetid))
- results = cursor.fetchall()
- print "get %d phenotypes" % (len(results))
- for phenotyperow in results:
- publishxrefid = phenotyperow[0]
- authors = utilities.clearspaces(phenotyperow[1])
- original_description = utilities.clearspaces(phenotyperow[2])
- pre_publication_description = utilities.clearspaces(phenotyperow[3])
- post_publication_description = utilities.clearspaces(phenotyperow[4])
- phenotypesfile.write("%s\t%s\t%s\t%s\t%s\t" % (publishxrefid, authors, original_description, pre_publication_description, post_publication_description))
- sql = """
- SELECT Strain.Name, PublishData.value
- FROM (PublishXRef, PublishData, Strain)
- WHERE PublishXRef.`InbredSetId`=%s
- AND PublishXRef.Id=%s
- AND PublishXRef.DataId=PublishData.Id
- AND PublishData.StrainId=Strain.Id
- """
- cursor.execute(sql, (inbredsetid, publishxrefid))
- results = cursor.fetchall()
- print "get %d values" % (len(results))
- strainvaluedic = {}
- for strainvalue in results:
- strainname = strainvalue[0]
- strainname = strainname.lower()
- value = strainvalue[1]
- strainvaluedic[strainname] = value
- for strain in strains:
- if strain in strainvaluedic:
- phenotypesfile.write(str(strainvaluedic[strain]))
- else:
- phenotypesfile.write('x')
- phenotypesfile.write('\t')
- phenotypesfile.write('\n')
- phenotypesfile.flush()
- # release
- phenotypesfile.close()
-
-def delete(publishxrefid, inbredsetid):
- cursor = utilities.get_cursor()
- sql = """
- DELETE Phenotype
- FROM PublishXRef,Phenotype
- WHERE PublishXRef.`Id`=%s
- AND PublishXRef.`InbredSetId`=%s
- AND PublishXRef.`PhenotypeId`=Phenotype.`Id`
- """
- cursor.execute(sql, (publishxrefid, inbredsetid))
- sql = """
- DELETE PublishData
- FROM PublishXRef,PublishData
- WHERE PublishXRef.`Id`=%s
- AND PublishXRef.`InbredSetId`=%s
- AND PublishXRef.`DataId`=PublishData.`Id`
- """
- cursor.execute(sql, (publishxrefid, inbredsetid))
- sql = """
- DELETE PublishXRef
- FROM PublishXRef
- WHERE PublishXRef.`Id`=%s
- AND PublishXRef.`InbredSetId`=%s
- """
- cursor.execute(sql, (publishxrefid, inbredsetid))
diff --git a/wqflask/maintenance/dataset/probesets.py b/wqflask/maintenance/dataset/probesets.py
deleted file mode 100644
index 97bb5bdf..00000000
--- a/wqflask/maintenance/dataset/probesets.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import utilities
-import datastructure
-import genotypes
-
-def get_probesetxref(probesetfreezeid):
- cursor = utilities.get_cursor()
- sql = """
- SELECT ProbeSetXRef.`ProbeSetId`, ProbeSetXRef.`DataId`
- FROM ProbeSetXRef
- WHERE ProbeSetXRef.`ProbeSetFreezeId`=%s
- """
- cursor.execute(sql, (probesetfreezeid))
- return cursor.fetchall()
-
-def get_probeset(probesetid):
- cursor = utilities.get_cursor()
- sql = """
- SELECT ProbeSet.`Id`, ProbeSet.`Name`, ProbeSet.`Symbol`, ProbeSet.`description`, ProbeSet.`Probe_Target_Description`, ProbeSet.`Chr`, ProbeSet.`Mb`
- FROM ProbeSet
- WHERE ProbeSet.`Id`=%s
- """
- cursor.execute(sql, (probesetid))
- return cursor.fetchone()
-
-def get_probesetdata(probesetdataid):
- cursor = utilities.get_cursor()
- sql = """
- SELECT Strain.`Id`, Strain.`Name`, ProbeSetData.`value`
- FROM ProbeSetData, Strain
- WHERE ProbeSetData.`Id`=%s
- AND ProbeSetData.`StrainId`=Strain.`Id`;
- """
- cursor.execute(sql, (probesetdataid))
- return cursor.fetchall()
-
-def get_probesetxref_probesetfreezeid(locus, probesetfreezeid):
- cursor = utilities.get_cursor()
- sql = """
- SELECT ProbeSetXRef.`ProbeSetId`
- FROM ProbeSetXRef
- WHERE ProbeSetXRef.`ProbeSetFreezeId`=%s
- AND ProbeSetXRef.`Locus` LIKE %s
- """
- cursor.execute(sql, (probesetfreezeid, locus))
- return cursor.fetchall()
-
-def get_probesetxref_inbredsetid(locus, inbredsetid):
- cursor = utilities.get_cursor()
- sql = """
- SELECT ProbeSetXRef.`ProbeSetId`, ProbeSetXRef.`mean`, ProbeSetXRef.`LRS`, ProbeSetXRef.`Locus`, ProbeSetXRef.`ProbeSetFreezeId`
- FROM (ProbeSetXRef, ProbeSetFreeze, ProbeFreeze)
- WHERE ProbeSetXRef.`ProbeSetFreezeId`=ProbeSetFreeze.`Id`
- AND ProbeSetFreeze.`ProbeFreezeId`=ProbeFreeze.`Id`
- AND ProbeFreeze.`InbredSetId`=%s
- AND ProbeSetXRef.`Locus` LIKE %s
- """
- cursor.execute(sql, (inbredsetid, locus))
- return cursor.fetchall()
-
-def get_normalized_probeset(locus, inbredsetid):
- normalized_probesets = []
- probesetxrefs = get_probesetxref_inbredsetid(locus, inbredsetid)
- for probesetxref in probesetxrefs:
- normalized_probeset = []
- #
- probesetfreezeid = probesetxref[4]
- probesetfreeze = datastructure.get_probesetfreeze(probesetfreezeid)
- normalized_probeset.append(probesetfreeze[0])
- normalized_probeset.append(probesetfreeze[1])
- normalized_probeset.append(probesetfreeze[2])
- #
- probesetid = probesetxref[0]
- probeset = get_probeset(probesetid)
- normalized_probeset.append(probeset[1])
- normalized_probeset.append(probeset[2])
- normalized_probeset.append(probeset[3])
- normalized_probeset.append(probeset[4])
- normalized_probeset.append(probeset[5])
- normalized_probeset.append(probeset[6])
- #
- normalized_probeset.append(probesetxref[1])
- normalized_probeset.append(probesetxref[2])
- #
- locus = probesetxref[3]
- geno = genotypes.get_geno(inbredsetid=inbredsetid, name=locus)
- normalized_probeset.append(geno[2])
- normalized_probeset.append(geno[3])
- #
- normalized_probesets.append(normalized_probeset)
- return normalized_probesets
diff --git a/wqflask/maintenance/dataset/specials1.py b/wqflask/maintenance/dataset/specials1.py
deleted file mode 100644
index 9159fd7f..00000000
--- a/wqflask/maintenance/dataset/specials1.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import utilities
-import datastructure
-import genotypes
-import probesets
-import calculate
-
-"""
-For: Rob, GeneNetwork
-Date: 2014-02-04
-Function:
- For BXD group, fetch probesets with given locus (mapping info).
-
-locus="rs3663871"
-"""
-def bxd_probesets_locus(locus, inbredsetid):
- #
- file = open('probesets_%s.txt' % (locus), 'w+')
- file.write("GN Dataset ID\t")
- file.write("Dataset Full Name\t")
- file.write("ProbeSet Name\t")
- file.write("Symbol\t")
- file.write("ProbeSet Description\t")
- file.write("Probe Target Description\t")
- file.write("ProbeSet Chr\t")
- file.write("ProbeSet Mb\t")
- file.write("Mean\t")
- file.write("LRS\t")
- file.write("Geno Chr\t")
- file.write("Geno Mb\t")
- file.write("\n")
- file.flush()
- #
- results = probesets.get_normalized_probeset(locus=locus, inbredsetid=inbredsetid)
- for row in results:
- file.write("%s\t" % (row[0]))
- file.write("%s\t" % (utilities.clearspaces(row[2], default='')))
- file.write("%s\t" % (utilities.clearspaces(row[3], default='')))
- file.write("%s\t" % (utilities.clearspaces(row[4], default='')))
- file.write("%s\t" % (utilities.clearspaces(row[5], default='')))
- file.write("%s\t" % (utilities.clearspaces(row[6], default='')))
- file.write("%s\t" % (utilities.clearspaces(row[7], default='')))
- file.write("%s\t" % (row[8]))
- file.write("%s\t" % (row[9]))
- file.write("%s\t" % (row[10]))
- file.write("%s\t" % (utilities.clearspaces(row[11], default='')))
- file.write("%s\t" % (row[12]))
- file.write('\n')
- file.flush()
- file.close()
-
-locus='rs3663871'
-inbredsetid=1
-bxd_probesets_locus(locus=locus, inbredsetid=inbredsetid)
diff --git a/wqflask/maintenance/dataset/specials2.py b/wqflask/maintenance/dataset/specials2.py
deleted file mode 100644
index 2438af43..00000000
--- a/wqflask/maintenance/dataset/specials2.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import utilities
-import datastructure
-import genotypes
-import probesets
-import calculate
-
-"""
-For: Ash
-Date: 2014-02-07
-Function:
- For BXD group, get a probesetfreeze name list.
-"""
-def probesetfreeze_list():
- #
- inbredsetid = 1
- outputdir = "/home/leiyan/gn2/wqflask/maintenance/dataset/datadir/20140205_Ash_correlations/output"
- #
- probesetfreezes = datastructure.get_probesetfreezes(inbredsetid)
- print "From DB, get %d probesetfreezes" % (len(probesetfreezes))
- file = open(outputdir + '/' + 'probesetfreezes.txt', 'w+')
- #
- for probesetfreeze in probesetfreezes:
- #
- print probesetfreeze
- probesetfreezeid = probesetfreeze[0]
- probesetfreezename = probesetfreeze[1]
- probesetfreezefullname = probesetfreeze[2]
- #
- file.write("%s\t" % probesetfreezeid)
- file.write("%s" % probesetfreezefullname)
- file.write("\n")
- file.flush()
- #
- file.close()
-
-"""
-For: Ash
-Date: 2014-02-05
-Function:
- For BXD group, calculate correlations with genotypes and probesets.
-"""
-def bxd_correlations():
- #
- inbredsetid = 1
- genofile = "/home/leiyan/gn/web/genotypes/BXD.geno"
- outputdir = "/home/leiyan/gn2/wqflask/maintenance/dataset/datadir/20140205_Ash_correlations/output"
- #
- t = genotypes.load_genos(genofile)
- genostrains = t[0]
- genos = t[1]
- print "From geno file, get %d strains" % (len(genostrains))
- print "From geno file, get %d genos" % (len(genos))
- #
- probesetfreezes = datastructure.get_probesetfreezes(inbredsetid)
- print "From DB, get %d probesetfreezes" % (len(probesetfreezes))
- for probesetfreeze in probesetfreezes:
- correlations(outputdir=outputdir, genos=genos, probesetfreeze=probesetfreeze)
-
-def correlations(outputdir, genos, probesetfreeze):
- print probesetfreeze
- probesetfreezeid = probesetfreeze[0]
- probesetfreezename = probesetfreeze[1]
- probesetfreezefullname = probesetfreeze[2]
- #
- outputfile = open("%s/%d_%s.txt" % (outputdir, probesetfreezeid, probesetfreezename), "w+")
- outputfile.write("%s\t" % "ProbeSet Id")
- outputfile.write("%s\t" % "ProbeSet Name")
- outputfile.write("%s\t" % "Geno Name")
- outputfile.write("%s\t" % "Overlap Number")
- outputfile.write("%s\t" % "Pearson r")
- outputfile.write("%s\t" % "Pearson p")
- outputfile.write("%s\t" % "Spearman r")
- outputfile.write("%s\t" % "Spearman p")
- outputfile.write("\n")
- outputfile.flush()
- #
- probesetxrefs = probesets.get_probesetxref(probesetfreezeid)
- print "Get %d probesetxrefs" % (len(probesetxrefs))
- #
- for probesetxref in probesetxrefs:
- #
- probesetid = probesetxref[0]
- probesetdataid = probesetxref[1]
- probeset = probesets.get_probeset(probesetid)
- probesetname = probeset[1]
- probesetdata = probesets.get_probesetdata(probesetdataid)
- probesetdata = zip(*probesetdata)
- probesetdata = utilities.to_dic([strain.lower() for strain in probesetdata[1]], probesetdata[2])
- #
- for geno in genos:
- genoname = geno['locus']
- outputfile.write("%s\t" % probesetid)
- outputfile.write("%s\t" % probesetname)
- outputfile.write("%s\t" % genoname)
- #
- dic1 = geno['dicvalues']
- dic2 = probesetdata
- keys, values1, values2 = utilities.overlap(dic1, dic2)
- rs = calculate.correlation(values1, values2)
- #
- outputfile.write("%s\t" % len(keys))
- outputfile.write("%s\t" % rs[0][0])
- outputfile.write("%s\t" % rs[0][1])
- outputfile.write("%s\t" % rs[1][0])
- outputfile.write("%s\t" % rs[1][1])
- outputfile.write("\n")
- outputfile.flush()
- #
- outputfile.close()
-
-"""
-For: Ash
-Date: 2014-02-10
-Function:
- For BXD group, calculate correlations with genotypes and probesets.
- given probesetfreezes
-"""
-def bxd_correlations_givenprobesetfreezes(probesetfreezesfile):
- #
- inbredsetid = 1
- genofile = "/home/leiyan/gn/web/genotypes/BXD.geno"
- outputdir = "/home/leiyan/gn2/wqflask/maintenance/dataset/datadir/20140205_Ash_correlations/output"
- #
- t = genotypes.load_genos(genofile)
- genostrains = t[0]
- genos = t[1]
- print "From geno file, get %d strains" % (len(genostrains))
- print "From geno file, get %d genos" % (len(genos))
- #
- file = open(probesetfreezesfile, 'r')
- for line in file:
- line = line.strip()
- cells = line.split()
- probesetfreezeid = cells[0]
- probesetfreeze = datastructure.get_probesetfreeze(probesetfreezeid)
- correlations(outputdir=outputdir, genos=genos, probesetfreeze=probesetfreeze)
- file.close()
-
-bxd_correlations_givenprobesetfreezes('/home/leiyan/gn2/wqflask/maintenance/dataset/datadir/20140205_Ash_correlations/output/probesetfreezes_filter.txt')
diff --git a/wqflask/maintenance/dataset/specials3.py b/wqflask/maintenance/dataset/specials3.py
deleted file mode 100644
index 237df27e..00000000
--- a/wqflask/maintenance/dataset/specials3.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import utilities
-import datastructure
-import genotypes
-import probesets
-import calculate
-
-def correlations(outputdir, genos, probesetfreeze):
- print probesetfreeze
- probesetfreezeid = probesetfreeze[0]
- probesetfreezename = probesetfreeze[1]
- probesetfreezefullname = probesetfreeze[2]
- #
- outputfile = open("%s/%d_%s.txt" % (outputdir, probesetfreezeid, probesetfreezename), "w+")
- outputfile.write("%s\t" % "ProbeSet Id")
- outputfile.write("%s\t" % "ProbeSet Name")
- outputfile.write("%s\t" % "Geno Name")
- outputfile.write("%s\t" % "Overlap Number")
- outputfile.write("%s\t" % "Pearson r")
- outputfile.write("%s\t" % "Pearson p")
- outputfile.write("%s\t" % "Spearman r")
- outputfile.write("%s\t" % "Spearman p")
- outputfile.write("\n")
- outputfile.flush()
- #
- probesetxrefs = probesets.get_probesetxref(probesetfreezeid)
- print "Get %d probesetxrefs" % (len(probesetxrefs))
- #
- for probesetxref in probesetxrefs:
- #
- probesetid = probesetxref[0]
- probesetdataid = probesetxref[1]
- probeset = probesets.get_probeset(probesetid)
- probesetname = probeset[1]
- probesetdata = probesets.get_probesetdata(probesetdataid)
- probesetdata = zip(*probesetdata)
- probesetdata = utilities.to_dic([strain.lower() for strain in probesetdata[1]], probesetdata[2])
- #
- for geno in genos:
- genoname = geno['locus']
- outputfile.write("%s\t" % probesetid)
- outputfile.write("%s\t" % probesetname)
- outputfile.write("%s\t" % genoname)
- #
- dic1 = geno['dicvalues']
- dic2 = probesetdata
- keys, values1, values2 = utilities.overlap(dic1, dic2)
- rs = calculate.correlation(values1, values2)
- #
- outputfile.write("%s\t" % len(keys))
- outputfile.write("%s\t" % rs[0][0])
- outputfile.write("%s\t" % rs[0][1])
- outputfile.write("%s\t" % rs[1][0])
- outputfile.write("%s\t" % rs[1][1])
- outputfile.write("\n")
- outputfile.flush()
- #
- outputfile.close()
-
-"""
-For: Ash
-Date: 2014-02-12
-Function:
- Generate probeset data files.
- given probesetfreeze list.
-"""
-def generate_probesets(probesetfreezesfile, outputdir):
- file = open(probesetfreezesfile, 'r')
- for line in file:
- line = line.strip()
- cells = line.split()
- probesetfreezeid = cells[0]
- probesetfreeze = datastructure.get_probesetfreeze(probesetfreezeid)
- probesetfreezeid = probesetfreeze[0]
- probesetfreezename = probesetfreeze[1]
- inbredset = datastructure.get_inbredset(probesetfreezeid)
- inbredsetid = inbredset[0]
- strains = datastructure.get_strains(inbredsetid)
- #
- outputfile = open("%s/%d_%s.txt" % (outputdir, probesetfreezeid, probesetfreezename), "w+")
- outputfile.write("%s\t" % "ProbeSet Id")
- outputfile.write("%s\t" % "ProbeSet Name")
- outputfile.write('\t'.join([strain[1].upper() for strain in strains]))
- outputfile.write("\n")
- outputfile.flush()
- #
- probesetxrefs = probesets.get_probesetxref(probesetfreezeid)
- print probesetfreeze
- print len(probesetxrefs)
- for probesetxref in probesetxrefs:
- probesetid = probesetxref[0]
- probesetdataid = probesetxref[1]
- probeset = probesets.get_probeset(probesetid)
- probesetname = probeset[1]
- probesetdata = probesets.get_probesetdata(probesetdataid)
- probesetdata = zip(*probesetdata)
- probesetdata = utilities.to_dic([strain.lower() for strain in probesetdata[1]], probesetdata[2])
- #
- outputfile.write("%s\t" % probesetid)
- outputfile.write("%s\t" % probesetname)
- #
- for strain in strains:
- strainname = strain[1]
- strainname = strainname.lower()
- if strainname in probesetdata:
- value = probesetdata[strainname]
- else:
- value = 'x'
- outputfile.write("%s\t" % value)
- outputfile.write("\n")
- outputfile.flush()
- #
- outputfile.close()
- file.close()
-
-probesetfreezesfile = "/home/leiyan/gn2/wqflask/maintenance/dataset/datadir/20140205_Ash_correlations/output2/probesetfreezes_filter.txt"
-outputdir = "/home/leiyan/gn2/wqflask/maintenance/dataset/datadir/20140205_Ash_correlations/output2"
-generate_probesets(probesetfreezesfile, outputdir)
diff --git a/wqflask/maintenance/dataset/utilities.py b/wqflask/maintenance/dataset/utilities.py
deleted file mode 100644
index 787c9481..00000000
--- a/wqflask/maintenance/dataset/utilities.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import MySQLdb
-import re
-import ConfigParser
-
-def get_cursor():
- host = 'localhost'
- user = 'gn2'
- passwd = 'UhHJuiS6gC8hj4a'
- db = 'db_webqtl'
- con = MySQLdb.Connect(db=db, host=host, user=user, passwd=passwd)
- cursor = con.cursor()
- return cursor, con
-
-def clearspaces(s, default=None):
- if s:
- s = re.sub('\s+', ' ', s)
- s = s.strip()
- return s
- else:
- return default
-
-def to_dic(keys, values):
- dic = {}
- for i in range(len(keys)):
- key = keys[i]
- value = values[i]
- dic[key] = value
- return dic
-
-def overlap(dic1, dic2):
- keys = []
- values1 = []
- values2 = []
- for key in dic1.keys():
- if key in dic2:
- value1 = dic1[key]
- value2 = dic2[key]
- if value1 and value2:
- keys.append(key)
- values1.append(value1)
- values2.append(value2)
- return keys, values1, values2
-
-def to_db_string(s, default):
- if s:
- s = s.strip()
- if len(s) == 0:
- return default
- elif s == 'x':
- return default
- else:
- return s
- else:
- return default
-
-def to_db_float(s, default):
- if s:
- s = s.strip()
- if len(s) == 0:
- return default
- elif s == 'x':
- return default
- else:
- try:
- return float(s)
- except:
- return default
- else:
- return default
-
-def to_db_int(s, default):
- if s:
- s = s.strip()
- if len(s) == 0:
- return default
- elif s == 'x':
- return default
- else:
- try:
- return int(s)
- except:
- return default
- else:
- return default
-
-def get_config(configfile):
- config = ConfigParser.ConfigParser()
- config.read(configfile)
- return config
diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py
index a2ad8c91..e080050e 100755
--- a/wqflask/maintenance/gen_select_dataset.py
+++ b/wqflask/maintenance/gen_select_dataset.py
@@ -76,7 +76,7 @@ def parse_db_uri(db_uri):
def get_species():
"""Build species list"""
- Cursor.execute("select Name, MenuName from Species order by OrderId")
+ Cursor.execute("select Name, MenuName from Species where Species.Name != 'macaque monkey' order by OrderId")
species = list(Cursor.fetchall())
return species
@@ -90,8 +90,8 @@ def get_groups(species):
ProbeFreeze, GenoFreeze, PublishFreeze where Species.Name = %s
and InbredSet.SpeciesId = Species.Id and InbredSet.Name != 'BXD300' and
(PublishFreeze.InbredSetId = InbredSet.Id
- or GenoFreeze.InbredSetId = InbredSet.Id
- or ProbeFreeze.InbredSetId = InbredSet.Id)
+ or GenoFreeze.InbredSetId = InbredSet.Id
+ or ProbeFreeze.InbredSetId = InbredSet.Id)
group by InbredSet.Name
order by InbredSet.Name""", (species_name))
groups[species_name] = list(Cursor.fetchall())
@@ -123,12 +123,12 @@ def get_types(groups):
def phenotypes_exist(group_name):
- print("group_name:", group_name)
+ #print("group_name:", group_name)
Cursor.execute("""select Name from PublishFreeze
where PublishFreeze.Name = %s""", (group_name+"Publish"))
results = Cursor.fetchone()
- print("RESULTS:", results)
+ #print("RESULTS:", results)
if results != None:
return True
@@ -136,12 +136,12 @@ def phenotypes_exist(group_name):
return False
def genotypes_exist(group_name):
- print("group_name:", group_name)
+ #print("group_name:", group_name)
Cursor.execute("""select Name from GenoFreeze
where GenoFreeze.Name = %s""", (group_name+"Geno"))
results = Cursor.fetchone()
- print("RESULTS:", results)
+ #print("RESULTS:", results)
if results != None:
return True
@@ -156,15 +156,6 @@ def build_types(species, group):
"""
- #print("""select distinct Tissue.Name
- # from ProbeFreeze, ProbeSetFreeze, InbredSet, Tissue, Species
- # where Species.Name = '{}' and Species.Id = InbredSet.SpeciesId and
- # InbredSet.Name = '{}' and
- # ProbeFreeze.TissueId = Tissue.Id and
- # ProbeFreeze.InbredSetId = InbredSet.Id and
- # ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and
- # ProbeSetFreeze.public > 0
- # order by Tissue.Name""".format(species, group))
Cursor.execute("""select distinct Tissue.Name
from ProbeFreeze, ProbeSetFreeze, InbredSet, Tissue, Species
where Species.Name = %s and Species.Id = InbredSet.SpeciesId and
@@ -178,7 +169,9 @@ def build_types(species, group):
results = []
for result in Cursor.fetchall():
if len(result):
- results.append((result[0], result[0]))
+ these_datasets = build_datasets(species, group, result[0])
+ if len(these_datasets) > 0:
+ results.append((result[0], result[0]))
return results
@@ -191,7 +184,10 @@ def get_datasets(types):
#print("type_list: ", type_list)
datasets[species][group] = {}
for type_name in type_list:
- datasets[species][group][type_name[0]] = build_datasets(species, group, type_name[0])
+ these_datasets = build_datasets(species, group, type_name[0])
+ if len(these_datasets) > 0:
+ datasets[species][group][type_name[0]] = these_datasets
+
return datasets
@@ -199,6 +195,20 @@ def build_datasets(species, group, type_name):
"""Gets dataset names from database"""
dataset_text = dataset_value = None
if type_name == "Phenotypes":
+ print("GROUP:", group)
+ Cursor.execute("""select InfoFiles.GN_AccesionId from InfoFiles, PublishFreeze, InbredSet where
+ InbredSet.Name = %s and
+ PublishFreeze.InbredSetId = InbredSet.Id and
+ InfoFiles.InfoPageName = PublishFreeze.Name and
+ PublishFreeze.public > 0 and
+ PublishFreeze.confidentiality < 1 order by
+ PublishFreeze.CreateTime desc""", (group))
+
+ results = Cursor.fetchone()
+ if results != None:
+ dataset_id = str(results[0])
+ else:
+ dataset_id = "None"
dataset_value = "%sPublish" % group
if group == 'MDP':
dataset_text = "Mouse Phenome Database"
@@ -206,29 +216,43 @@ def build_datasets(species, group, type_name):
dataset_text = "%s Published Phenotypes" % group
elif type_name == "Genotypes":
+ Cursor.execute("""select InfoFiles.GN_AccesionId from InfoFiles, GenoFreeze, InbredSet where
+ InbredSet.Name = %s and
+ GenoFreeze.InbredSetId = InbredSet.Id and
+ InfoFiles.InfoPageName = GenoFreeze.ShortName and
+ GenoFreeze.public > 0 and
+ GenoFreeze.confidentiality < 1 order by
+ GenoFreeze.CreateTime desc""", (group))
+
+ results = Cursor.fetchone()
+ if results != None:
+ dataset_id = str(results[0])
+ else:
+ dataset_id = "None"
dataset_value = "%sGeno" % group
dataset_text = "%s Genotypes" % group
if dataset_value:
- return [(dataset_value, dataset_text)]
+ return [(dataset_id, dataset_value, dataset_text)]
else:
- #print("""select ProbeSetFreeze.Name, ProbeSetFreeze.FullName from
- # ProbeSetFreeze, ProbeFreeze, InbredSet, Tissue, Species where
- # Species.Name = '{}' and Species.Id = InbredSet.SpeciesId and
- # InbredSet.Name = '{}' and
- # ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and Tissue.Name = '{}'
- # and ProbeFreeze.TissueId = Tissue.Id and ProbeFreeze.InbredSetId =
- # InbredSet.Id and ProbeSetFreeze.public > 0 order by
- # ProbeSetFreeze.CreateTime desc""".format(species, group, type_name))
- Cursor.execute("""select ProbeSetFreeze.Name, ProbeSetFreeze.FullName from
+ Cursor.execute("""select ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName from
ProbeSetFreeze, ProbeFreeze, InbredSet, Tissue, Species where
Species.Name = %s and Species.Id = InbredSet.SpeciesId and
InbredSet.Name = %s and
- ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and Tissue.Name = %s
- and ProbeFreeze.TissueId = Tissue.Id and ProbeFreeze.InbredSetId =
- InbredSet.Id and ProbeSetFreeze.public > 0 order by
+ ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and Tissue.Name = %s and
+ ProbeFreeze.TissueId = Tissue.Id and ProbeFreeze.InbredSetId = InbredSet.Id and
+ ProbeSetFreeze.confidentiality < 1 and ProbeSetFreeze.public > 0 order by
ProbeSetFreeze.CreateTime desc""", (species, group, type_name))
- return Cursor.fetchall()
+
+ dataset_results = Cursor.fetchall()
+ datasets = []
+ for dataset_info in dataset_results:
+ this_dataset_info = []
+ for info in dataset_info:
+ this_dataset_info.append(str(info))
+ datasets.append(this_dataset_info)
+
+ return datasets
def main():
@@ -238,7 +262,6 @@ def main():
species = get_species()
groups = get_groups(species)
- print("groups:", groups)
types = get_types(groups)
datasets = get_datasets(types)
diff --git a/wqflask/maintenance/quick_search_table.py b/wqflask/maintenance/quick_search_table.py
deleted file mode 100755
index f0075df0..00000000
--- a/wqflask/maintenance/quick_search_table.py
+++ /dev/null
@@ -1,509 +0,0 @@
-"""Creates a table used for the quick search feature.
-
-One column contains the terms to match the user's search against. Another contains the result
-fields in json format
-
-Results will be returned for each of several trait types: mRNA assays, phenotypes, genotypes, and
-(maybe later) genes
-
-For each trait type, the results for each species should be given This table will then list
-each trait, its dataset, and several columns determined by its trait type (phenotype, genotype, etc)
-
-"""
-
-from __future__ import absolute_import, division, print_function
-
- # We do this here so we can use zach_settings
-# Not to avoid other absoulte_imports
-import sys
-sys.path.append("../../..")
-
-import simplejson as json
-
-import sqlalchemy as sa
-from sqlalchemy.orm import scoped_session, sessionmaker
-from sqlalchemy.ext.declarative import declarative_base
-
-#from pprint import pformat as pf
-
-import zach_settings as settings
-
-Engine = sa.create_engine(settings.SQLALCHEMY_DATABASE_URI,
- #encoding='utf-8',
- #client_encoding='utf-8',
- #echo="debug",w
- )
-
-Session = scoped_session(sessionmaker(bind=Engine)) #, extension=VersionedListener()))
-
-Base = declarative_base(bind=Engine)
-Metadata = sa.MetaData()
-Metadata.bind = Engine
-
-class PublishXRef(Base):
- """Class that corresponds with the PublishXRef table in the database.
-
- The PublishXRef table links phenotype traits and their publications.
-
- This class is used to add phenotype traits to the quick search table.
-
- """
-
- __tablename__ = 'PublishXRef'
-
- Id = sa.Column(sa.Integer, primary_key=True)
- InbredSetId = sa.Column(sa.Integer, primary_key=True)
- PhenotypeId = sa.Column(sa.Integer)
- PublicationId = sa.Column(sa.Integer)
- DataId = sa.Column(sa.Integer)
- Locus = sa.Column(sa.Text)
- LRS = sa.Column(sa.Float)
- additive = sa.Column(sa.Float)
- Sequence = sa.Column(sa.Integer)
- comments = sa.Column(sa.Text)
-
- @classmethod
- def run(cls):
- """Connects to database and inserts phenotype trait info into the Quicksearch table."""
- conn = Engine.connect()
- counter = 0
- for pub_row in page_query(Session.query(cls)): #all()
- values = {}
- values['table_name'] = cls.__tablename__
- values['the_key'] = json.dumps([pub_row.Id, pub_row.InbredSetId])
- values['terms'] = cls.get_unique_terms(pub_row.Id, pub_row.InbredSetId)
- print("terms is:", values['terms'])
- if values['terms']:
- values['result_fields'] = cls.get_result_fields(pub_row.Id, pub_row.InbredSetId)
- ins = QuickSearch.insert().values(**values)
- conn.execute(ins)
- counter += 1
- print("Done:", counter)
-
- @staticmethod
- def get_unique_terms(publishxref_id, inbredset_id):
- """Finds unique terms for each item in the PublishXRef table to match a query against"""
- results = Session.query(
- "pre_publication_description",
- "post_publication_description",
- "pre_publication_abbreviation",
- "post_publication_abbreviation",
- "publication_title"
- ).from_statement(
- "SELECT Phenotype.Pre_publication_description as pre_publication_description, "
- "Phenotype.Post_publication_description as post_publication_description, "
- "Phenotype.Pre_publication_abbreviation as pre_publication_abbreviation, "
- "Phenotype.Post_publication_abbreviation as post_publication_abbreviation, "
- "Publication.Title as publication_title "
- "FROM Phenotype, Publication, PublishXRef "
- "WHERE PublishXRef.Id = :publishxref_id and "
- "PublishXRef.InbredSetId = :inbredset_id and "
- "PublishXRef.PhenotypeId = Phenotype.Id and "
- "PublishXRef.PublicationId = Publication.Id ").params(publishxref_id=publishxref_id,
- inbredset_id=inbredset_id).all()
-
- unique = set()
- print("results: ", results)
- if len(results):
- for item in results[0]:
- #print("locals:", locals())
- if not item:
- continue
- for token in item.split():
- if token.startswith(('(','[')):
- token = token[1:]
- if token.endswith((')', ']')):
- token = token[:-1]
- if token.endswith(';'):
- token = token[:-1]
- if len(token) > 2:
- try:
- # This hopefully ensures that the token is utf-8
- token = token.encode('utf-8')
- print(" ->", token)
- except UnicodeDecodeError:
- print("\n-- UDE \n")
- # Can't get it into utf-8, we won't use it
- continue
-
- unique.add(token)
- #print("\nUnique terms are: {}\n".format(unique))
- return " ".join(unique)
-
- @staticmethod
- def get_result_fields(publishxref_id, inbredset_id):
- """Gets the result fields (columns) that appear on the result page as a json string"""
- results = Session.query(
- "phenotype_id",
- "species",
- "group_name",
- "description",
- "lrs",
- "publication_id",
- "pubmed_id",
- "year",
- "authors"
- ).from_statement(
- "SELECT PublishXRef.PhenotypeId as phenotype_id, "
- "Species.Name as species, "
- "InbredSet.Name as group_name, "
- "Phenotype.Original_description as description, "
- "PublishXRef.LRS as lrs, "
- "PublishXRef.PublicationId as publication_id, "
- "Publication.PubMed_ID as pubmed_id, "
- "Publication.Year as year, "
- "Publication.Authors as authors "
- "FROM PublishXRef, "
- "Phenotype, "
- "Publication, "
- "InbredSet, "
- "Species "
- "WHERE PublishXRef.Id = :publishxref_id and "
- "PublishXRef.InbredSetId = :inbredset_id and "
- "PublishXRef.PhenotypeId = Phenotype.Id and "
- "PublishXRef.PublicationId = Publication.Id and "
- "InbredSet.Id = :inbredset_id and "
- "Species.Id = InbredSet.SpeciesId ").params(publishxref_id=publishxref_id,
- inbredset_id=inbredset_id).all()
-
- assert len(set(result for result in results)) == 1, "Different results or no results"
-
- result = results[0]
- result = row2dict(result)
- try:
- json_results = json.dumps(result, sort_keys=True)
- except UnicodeDecodeError:
- print("\n\nTrying to massage unicode\n\n")
- for key, value in result.iteritems():
- print("\tkey is:", key)
- print("\tvalue is:", value)
- if isinstance(value, basestring):
- result[key] = value.decode('utf-8', errors='ignore')
- json_results = json.dumps(result, sort_keys=True)
-
- return json_results
-
-
-class GenoXRef(Base):
- """Class that corresponds with the GenoXRef table in the database.
-
- The GenoXRef table links genotype traits and their data.
-
- This class is used to add genotype traits to the quick search table.
-
- """
-
- __tablename__ = 'GenoXRef'
-
- GenoFreezeId = sa.Column(sa.Integer, primary_key=True)
- GenoId = sa.Column(sa.Integer, primary_key=True)
- DataId = sa.Column(sa.Integer)
- cM = sa.Column(sa.Float)
- Used_for_mapping = sa.Column(sa.Text)
-
- @classmethod
- def run(cls):
- """Connects to database and inserts genotype trait info into the Quicksearch table."""
- conn = Engine.connect()
- counter = 0
- for item in page_query(Session.query(cls)): #all()
- values = {}
- values['table_name'] = cls.__tablename__
- values['the_key'] = json.dumps([item.GenoId, item.GenoFreezeId])
- values['terms'] = cls.get_unique_terms(item.GenoId)
- print("terms is:", values['terms'])
- if values['terms']:
- values['result_fields'] = cls.get_result_fields(item.GenoId, item.GenoFreezeId)
- ins = QuickSearch.insert().values(**values)
- conn.execute(ins)
- counter += 1
- print("Done:", counter)
-
- @staticmethod
- def get_unique_terms(geno_id):
- """Finds unique terms for each item in the GenoXRef table to match a query against"""
- print("geno_id: ", geno_id)
- results = Session.query(
- "name",
- "marker_name"
- ).from_statement(
- "SELECT Geno.Name as name, "
- "Geno.Marker_Name as marker_name "
- "FROM Geno "
- "WHERE Geno.Id = :geno_id ").params(geno_id=geno_id).all()
-
- unique = set()
- if len(results):
- for item in results[0]:
- #print("locals:", locals())
- if not item:
- continue
- for token in item.split():
- if len(token) > 2:
- try:
- # This hopefully ensures that the token is utf-8
- token = token.encode('utf-8')
- print(" ->", token)
- except UnicodeDecodeError:
- print("\n-- UDE \n")
- # Can't get it into utf-8, we won't use it
- continue
-
- unique.add(token)
- return " ".join(unique)
-
-
- @staticmethod
- def get_result_fields(geno_id, dataset_id):
- """Gets the result fields (columns) that appear on the result page as a json string"""
- results = Session.query(
- "name",
- "marker_name",
- "group_name",
- "species",
- "dataset",
- "dataset_name",
- "chr", "mb",
- "source"
- ).from_statement(
- "SELECT Geno.Name as name, "
- "Geno.Marker_Name as marker_name, "
- "InbredSet.Name as group_name, "
- "Species.Name as species, "
- "GenoFreeze.Name as dataset, "
- "GenoFreeze.FullName as dataset_name, "
- "Geno.Chr as chr, "
- "Geno.Mb as mb, "
- "Geno.Source as source "
- "FROM Geno, "
- "GenoXRef, "
- "GenoFreeze, "
- "InbredSet, "
- "Species "
- "WHERE Geno.Id = :geno_id and "
- "GenoXRef.GenoId = Geno.Id and "
- "GenoFreeze.Id = :dataset_id and "
- "GenoXRef.GenoFreezeId = GenoFreeze.Id and "
- "InbredSet.Id = GenoFreeze.InbredSetId and "
- "InbredSet.SpeciesId = Species.Id ").params(geno_id=geno_id,
- dataset_id=dataset_id).all()
- assert len(set(result for result in results)) == 1, "Different results"
-
- result = results[0]
- result = row2dict(result)
- try:
- json_results = json.dumps(result, sort_keys=True)
- except UnicodeDecodeError:
- print("\n\nTrying to massage unicode\n\n")
- for key, value in result.iteritems():
- print("\tkey is:", key)
- print("\tvalue is:", value)
- if isinstance(value, basestring):
- result[key] = value.decode('utf-8', errors='ignore')
- json_results = json.dumps(result, sort_keys=True)
-
- return json_results
-
-class ProbeSetXRef(Base):
- """Class that corresponds with the ProbeSetXRef table in the database.
-
- The ProbeSetXRef table links mRNA expression traits and their sample data.
-
- This class is used to add mRNA expression traits to the quick search table.
-
- """
-
- __tablename__ = 'ProbeSetXRef'
-
- ProbeSetFreezeId = sa.Column(sa.Integer, primary_key=True)
- ProbeSetId = sa.Column(sa.Integer, primary_key=True)
- DataId = sa.Column(sa.Integer, unique=True)
- Locus_old = sa.Column(sa.Text)
- LRS_old = sa.Column(sa.Float)
- pValue_old = sa.Column(sa.Float)
- mean = sa.Column(sa.Float)
- se = sa.Column(sa.Float)
- Locus = sa.Column(sa.Text)
- LRS = sa.Column(sa.Float)
- pValue = sa.Column(sa.Float)
- additive = sa.Column(sa.Float)
- h2 = sa.Column(sa.Float)
-
- @classmethod
- def run(cls):
- """Connects to db and inserts mRNA expression trait info into the Quicksearch table."""
- conn = Engine.connect()
- counter = 0
- for ps_row in page_query(Session.query(cls)): #all()
- values = {}
- values['table_name'] = cls.__tablename__
- values['the_key'] = json.dumps([ps_row.ProbeSetId, ps_row.ProbeSetFreezeId])
- values['terms'] = cls.get_unique_terms(ps_row.ProbeSetId)
- print("terms is:", values['terms'])
- values['result_fields'] = cls.get_result_fields(ps_row.ProbeSetId,
- ps_row.ProbeSetFreezeId)
- if values['result_fields'] == None:
- continue
- ins = QuickSearch.insert().values(**values)
- conn.execute(ins)
- counter += 1
- print("Done:", counter)
-
- @staticmethod
- def get_unique_terms(probeset_id):
- """Finds unique terms for each item in the ProbeSetXRef table to match a query against"""
- results = Session.query(
- "name",
- "symbol",
- "description",
- "alias"
- ).from_statement(
- "SELECT ProbeSet.Name as name, "
- "ProbeSet.Symbol as symbol, "
- "ProbeSet.description as description, "
- "ProbeSet.alias as alias "
- "FROM ProbeSet "
- "WHERE ProbeSet.Id = :probeset_id ").params(probeset_id=probeset_id).all()
-
- unique = set()
- if len(results):
- for item in results[0]:
- if not item:
- continue
- for token in item.split():
- if token.startswith(('(','[')):
- token = token[1:]
- if token.endswith((')', ']')):
- token = token[:-1]
- if token.endswith(';'):
- token = token[:-1]
- if len(token) > 2:
- try:
- # This hopefully ensures that the token is utf-8
- token = token.encode('utf-8')
- print(" ->", token)
- except UnicodeDecodeError:
- print("\n-- UDE \n")
- # Can't get it into utf-8, we won't use it
- continue
-
- unique.add(token)
- return " ".join(unique)
-
-
- @staticmethod
- def get_result_fields(probeset_id, dataset_id):
- """Gets the result fields (columns) that appear on the result page as a json string"""
- print("probeset_id: ", probeset_id)
- print("dataset_id: ", dataset_id)
- results = Session.query(
- "name",
- "species",
- "group_name",
- "dataset",
- "dataset_name",
- "symbol",
- "description",
- "chr", "mb",
- "lrs", "mean",
- "genbank_id",
- "gene_id",
- "chip_id",
- "chip_name"
- ).from_statement(
- "SELECT ProbeSet.Name as name, "
- "Species.Name as species, "
- "InbredSet.Name as group_name, "
- "ProbeSetFreeze.Name as dataset, "
- "ProbeSetFreeze.FullName as dataset_name, "
- "ProbeSet.Symbol as symbol, "
- "ProbeSet.description as description, "
- "ProbeSet.Chr as chr, "
- "ProbeSet.Mb as mb, "
- "ProbeSetXRef.LRS as lrs, "
- "ProbeSetXRef.mean as mean, "
- "ProbeSet.GenbankId as genbank_id, "
- "ProbeSet.GeneId as gene_id, "
- "ProbeSet.ChipId as chip_id, "
- "GeneChip.Name as chip_name "
- "FROM ProbeSet, "
- "ProbeSetXRef, "
- "ProbeSetFreeze, "
- "ProbeFreeze, "
- "InbredSet, "
- "Species, "
- "GeneChip "
- "WHERE ProbeSetXRef.ProbeSetId = :probeset_id and "
- "ProbeSetXRef.ProbeSetFreezeId = :dataset_id and "
- "ProbeSetXRef.ProbeSetId = ProbeSet.Id and "
- "ProbeSet.ChipId = GeneChip.Id and "
- "ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id and "
- "ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and "
- "ProbeFreeze.InbredSetId = InbredSet.Id and "
- "InbredSet.SpeciesId = Species.Id ").params(probeset_id=probeset_id,
- dataset_id=dataset_id).all()
-
- if len(set(result for result in results)) != 1:
- return None
-
- result = results[0]
- result = row2dict(result)
- try:
- json_results = json.dumps(result, sort_keys=True)
- except UnicodeDecodeError:
- print("\n\nTrying to massage unicode\n\n")
- for key, value in result.iteritems():
- print("\tkey is:", key)
- print("\tvalue is:", value)
- if isinstance(value, basestring):
- result[key] = value.decode('utf-8', errors='ignore')
- json_results = json.dumps(result, sort_keys=True)
-
- return json_results
-
-QuickSearch = sa.Table("QuickSearch", Metadata,
- # table_name is the table that item is inserted from
- sa.Column('table_name', sa.String(15),
- primary_key=True, nullable=False, autoincrement=False),
- sa.Column('the_key', sa.String(30),
- primary_key=True, nullable=False, autoincrement=False), # key in database table
- sa.Column('terms', sa.Text), # terms to compare search string with
- sa.Column('result_fields', sa.Text), # json
- mysql_engine = 'MyISAM',
- )
-
-QuickSearch.drop(Engine, checkfirst=True)
-Metadata.create_all(Engine)
-
-
-def row2dict(row):
- """From http://stackoverflow.com/a/2848519/1175849"""
- return dict(zip(row.keys(), row))
-
-
-def page_query(query):
- """From http://stackoverflow.com/a/1217947/1175849"""
- offset = 0
- while True:
- rrr = False
- for elem in query.limit(1000).offset(offset):
- rrr = True
- yield elem
- offset += 1000
- if not rrr:
- break
-
-
-def main():
- """Populate the QuickSearch table that is used with the quick search features.
-
- Add all items from the ProbeSetXRef, GenoXRef, and PublishXRef tables to the QuickSearch tables.
-
- """
-
- GenoXRef.run()
- PublishXRef.run()
- ProbeSetXRef.run()
-
-if __name__ == "__main__":
- main()