aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBonfaceKilz2020-09-28 18:13:19 +0300
committerBonfaceKilz2020-09-28 18:13:19 +0300
commitda6098574f8b410386e84f07fd0e8d0eed39e40d (patch)
tree5da9a54c80ea4525aa2fb08f9dc3012c99626ed9
parentd34258bed3ef13350499414100401df3bf08a105 (diff)
parent367de7d8bd822a80cdc035a219b814f0b268b65f (diff)
downloadgenenetwork2-da6098574f8b410386e84f07fd0e8d0eed39e40d.tar.gz
Merge branch 'build/python3-migration' of github.com:BonfaceKilz/genenetwork2 into build/python3-migration
-rw-r--r--.github/workflows/main.yml2
-rwxr-xr-xbin/genenetwork26
-rw-r--r--doc/GUIX-Reproducible-from-source.org6
-rw-r--r--doc/README.org2
-rw-r--r--doc/development.org4
-rw-r--r--etc/default_settings.py4
-rwxr-xr-xscripts/maintenance/QTL_Reaper_v6.py10
-rw-r--r--scripts/maintenance/Update_Case_Attributes_MySQL_tab.py2
-rwxr-xr-xscripts/maintenance/delete_genotypes.py16
-rwxr-xr-xscripts/maintenance/delete_phenotypes.py14
-rwxr-xr-xscripts/maintenance/load_genotypes.py20
-rwxr-xr-xscripts/maintenance/load_phenotypes.py40
-rwxr-xr-xscripts/maintenance/readProbeSetMean_v7.py292
-rwxr-xr-xscripts/maintenance/readProbeSetSE_v7.py508
-rw-r--r--setup.py2
-rw-r--r--test/requests/link_checker.py16
-rw-r--r--test/requests/main_web_functionality.py21
-rw-r--r--test/requests/mapping_tests.py1
-rw-r--r--test/requests/navigation_tests.py1
-rwxr-xr-xtest/requests/test-website.py2
-rw-r--r--webtests/browser_run.py4
-rw-r--r--webtests/correlation_matrix_test.py2
-rw-r--r--webtests/correlation_test.py2
-rw-r--r--webtests/marker_regression_test.py2
-rw-r--r--webtests/show_trait_js_test.py2
-rw-r--r--webtests/test_runner.py4
-rw-r--r--wqflask/base/GeneralObject.py11
-rw-r--r--wqflask/base/data_set.py41
-rw-r--r--wqflask/base/mrna_assay_tissue_data.py7
-rw-r--r--wqflask/base/species.py9
-rw-r--r--wqflask/base/trait.py213
-rw-r--r--wqflask/db/call.py21
-rw-r--r--wqflask/db/webqtlDatabaseFunction.py8
-rw-r--r--wqflask/maintenance/convert_dryad_to_bimbam.py3
-rw-r--r--wqflask/maintenance/convert_geno_to_bimbam.py3
-rw-r--r--wqflask/maintenance/gen_select_dataset.py22
-rw-r--r--wqflask/maintenance/generate_kinship_from_bimbam.py3
-rw-r--r--wqflask/maintenance/generate_probesetfreeze_file.py4
-rw-r--r--wqflask/maintenance/geno_to_json.py3
-rw-r--r--wqflask/maintenance/get_group_samplelists.py2
-rw-r--r--wqflask/maintenance/print_benchmark.py4
-rw-r--r--wqflask/maintenance/quantile_normalize.py17
-rw-r--r--wqflask/maintenance/set_resource_defaults.py10
-rw-r--r--wqflask/run_gunicorn.py2
-rw-r--r--wqflask/runserver.py20
-rw-r--r--wqflask/tests/base/test_data_set.py71
-rw-r--r--wqflask/tests/base/test_general_object.py9
-rw-r--r--wqflask/tests/base/test_trait.py15
-rw-r--r--wqflask/tests/utility/test_authentication_tools.py10
-rw-r--r--wqflask/tests/utility/test_hmac.py2
-rw-r--r--wqflask/tests/wqflask/api/test_gen_menu.py8
-rw-r--r--wqflask/tests/wqflask/marker_regression/test_display_mapping_results.py91
-rw-r--r--wqflask/utility/Plot.py56
-rw-r--r--wqflask/utility/__init__.py4
-rw-r--r--wqflask/utility/after.py4
-rw-r--r--wqflask/utility/authentication_tools.py41
-rw-r--r--wqflask/utility/benchmark.py6
-rw-r--r--wqflask/utility/chunks.py2
-rw-r--r--wqflask/utility/db_tools.py12
-rw-r--r--wqflask/utility/elasticsearch_tools.py2
-rw-r--r--wqflask/utility/gen_geno_ob.py6
-rw-r--r--wqflask/utility/genofile_parser.py1
-rw-r--r--wqflask/utility/helper_functions.py6
-rw-r--r--wqflask/utility/hmac.py6
-rw-r--r--wqflask/utility/logger.py26
-rw-r--r--wqflask/utility/pillow_utils.py4
-rw-r--r--wqflask/utility/redis_tools.py2
-rw-r--r--wqflask/utility/startup_config.py4
-rw-r--r--wqflask/utility/svg.py692
-rw-r--r--wqflask/utility/temp_data.py3
-rw-r--r--wqflask/utility/tools.py38
-rw-r--r--wqflask/utility/webqtlUtil.py24
-rw-r--r--wqflask/wqflask/__init__.py2
-rw-r--r--wqflask/wqflask/api/correlation.py22
-rw-r--r--wqflask/wqflask/api/gen_menu.py8
-rw-r--r--wqflask/wqflask/api/mapping.py2
-rw-r--r--wqflask/wqflask/api/router.py41
-rw-r--r--wqflask/wqflask/collect.py9
-rw-r--r--wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py19
-rw-r--r--wqflask/wqflask/correlation/corr_scatter_plot.py10
-rw-r--r--wqflask/wqflask/correlation/correlation_functions.py8
-rw-r--r--wqflask/wqflask/correlation/show_corr_results.py71
-rw-r--r--wqflask/wqflask/correlation_matrix/show_corr_matrix.py39
-rw-r--r--wqflask/wqflask/ctl/ctl_analysis.py4
-rw-r--r--wqflask/wqflask/db_info.py265
-rw-r--r--wqflask/wqflask/do_search.py10
-rw-r--r--wqflask/wqflask/docs.py4
-rw-r--r--wqflask/wqflask/export_traits.py14
-rw-r--r--wqflask/wqflask/external_tools/send_to_bnw.py4
-rw-r--r--wqflask/wqflask/external_tools/send_to_geneweaver.py12
-rw-r--r--wqflask/wqflask/external_tools/send_to_webgestalt.py6
-rw-r--r--wqflask/wqflask/group_manager.py5
-rw-r--r--wqflask/wqflask/gsearch.py2
-rw-r--r--wqflask/wqflask/heatmap/heatmap.py43
-rw-r--r--wqflask/wqflask/interval_analyst/GeneUtil.py8
-rw-r--r--wqflask/wqflask/marker_regression/display_mapping_results.py290
-rw-r--r--wqflask/wqflask/marker_regression/plink_mapping.py20
-rw-r--r--wqflask/wqflask/marker_regression/qtlreaper_mapping.py2
-rw-r--r--wqflask/wqflask/marker_regression/rqtl_mapping.py2
-rw-r--r--wqflask/wqflask/marker_regression/run_mapping.py28
-rw-r--r--wqflask/wqflask/model.py10
-rw-r--r--wqflask/wqflask/network_graph/network_graph.py31
-rw-r--r--wqflask/wqflask/news.py4
-rw-r--r--wqflask/wqflask/parser.py18
-rw-r--r--wqflask/wqflask/pbkdf2.py28
-rw-r--r--wqflask/wqflask/resource_manager.py6
-rw-r--r--wqflask/wqflask/search_results.py8
-rw-r--r--wqflask/wqflask/send_mail.py2
-rw-r--r--wqflask/wqflask/show_trait/SampleList.py6
-rw-r--r--wqflask/wqflask/show_trait/export_trait_data.py6
-rw-r--r--wqflask/wqflask/show_trait/show_trait.py20
-rw-r--r--wqflask/wqflask/snp_browser/snp_browser.py24
-rw-r--r--wqflask/wqflask/submit_bnw.py4
-rw-r--r--wqflask/wqflask/templates/admin/manage_resource.html2
-rw-r--r--wqflask/wqflask/templates/loading.html4
-rw-r--r--wqflask/wqflask/update_search_results.py2
-rw-r--r--wqflask/wqflask/user_login.py6
-rw-r--r--wqflask/wqflask/user_manager.py8
-rw-r--r--wqflask/wqflask/user_session.py4
-rw-r--r--wqflask/wqflask/views.py38
-rw-r--r--wqflask/wqflask/wgcna/wgcna_analysis.py12
121 files changed, 1801 insertions, 1925 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 2fd9a886..f27feb5f 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -11,7 +11,7 @@ on:
jobs:
unittest:
runs-on: ubuntu-latest
- container: bonfacekilz/python2-genenetwork2:latest
+ container: bonfacekilz/python3-genenetwork2:latest
steps:
# First start with mariadb set then checkout. The checkout gives
diff --git a/bin/genenetwork2 b/bin/genenetwork2
index dd6db7d6..5f4e0f9a 100755
--- a/bin/genenetwork2
+++ b/bin/genenetwork2
@@ -60,7 +60,7 @@ GN2_ID=$(cat /etc/hostname):$(basename $GN2_BASE_DIR)
echo GN2_BASE_DIR=$GN2_BASE_DIR
-GUIX_SITE=$GN2_BASE_DIR/lib/python2.7/site-packages
+GUIX_SITE=$GN2_BASE_DIR/lib/python3.8/site-packages
if [ -d $GUIX_SITE ]; then
echo INFO: GN2 is running from GNU Guix
GN2_BASE_DIR=$GUIX_SITE
@@ -105,7 +105,7 @@ echo GN2_SETTINGS=$settings
# if [ -z $ELASTICSEARCH_PROFILE ]; then
# echo -e "WARNING: Elastic Search profile has not been set - use ELASTICSEARCH_PROFILE";
# else
-# PYTHONPATH="$PYTHONPATH${PYTHONPATH:+:}$ELASTICSEARCH_PROFILE/lib/python2.7/site-packages"
+# PYTHONPATH="$PYTHONPATH${PYTHONPATH:+:}$ELASTICSEARCH_PROFILE/lib/python3.8/site-packages"
# fi
if [ -z $GN2_PROFILE ] ; then
@@ -121,7 +121,7 @@ if [ -z $GN2_PROFILE ]; then
read -p "PRESS [ENTER] TO CONTINUE..."
else
export PATH=$GN2_PROFILE/bin:$PATH
- export PYTHONPATH="$PYTHON_GN_PATH:$GN2_PROFILE/lib/python2.7/site-packages" # never inject another PYTHONPATH!!
+ export PYTHONPATH="$GN2_PROFILE/lib/python3.8/site-packages" # never inject another PYTHONPATH!!
export R_LIBS_SITE=$GN2_PROFILE/site-library
export JS_GUIX_PATH=$GN2_PROFILE/share/genenetwork2/javascript
export GUIX_GTK3_PATH="$GN2_PROFILE/lib/gtk-3.0"
diff --git a/doc/GUIX-Reproducible-from-source.org b/doc/GUIX-Reproducible-from-source.org
index 19e4d14f..fffa9571 100644
--- a/doc/GUIX-Reproducible-from-source.org
+++ b/doc/GUIX-Reproducible-from-source.org
@@ -167,7 +167,7 @@ the Guix suggested environment vars. Check the output of
#+begin_src bash
guix package --search-paths
-export PYTHONPATH="/root/.guix-profile/lib/python2.7/site-packages"
+export PYTHONPATH="/root/.guix-profile/lib/python3.8/site-packages"
export R_LIBS_SITE="/root/.guix-profile/site-library/"
#+end_src
@@ -265,7 +265,7 @@ software.
If something is not working, take a hint from the settings file
that comes in the Guix installation. It sits in something like
-: cat ~/.guix-profile/lib/python2.7/site-packages/genenetwork2-2.0-py2.7.egg/etc/default_settings.py
+: cat ~/.guix-profile/lib/python3.8/site-packages/genenetwork2-2.0-py2.7.egg/etc/default_settings.py
** Set up nginx port forwarding
@@ -380,7 +380,7 @@ After setting the paths for the server
#+begin_src bash
export PATH=~/.guix-profile/bin:$PATH
-export PYTHONPATH="$HOME/.guix-profile/lib/python2.7/site-packages"
+export PYTHONPATH="$HOME/.guix-profile/lib/python3.8/site-packages"
export R_LIBS_SITE="$HOME/.guix-profile/site-library/"
export GUIX_GTK3_PATH="$HOME/.guix-profile/lib/gtk-3.0"
export GI_TYPELIB_PATH="$HOME/.guix-profile/lib/girepository-1.0"
diff --git a/doc/README.org b/doc/README.org
index 46df03c7..43c92e3c 100644
--- a/doc/README.org
+++ b/doc/README.org
@@ -218,7 +218,7 @@ information given by guix:
On one system:
-: export PYTHONPATH="$HOME/.guix-profile/lib/python2.7/site-packages"
+: export PYTHONPATH="$HOME/.guix-profile/lib/python3.8/site-packages"
: export R_LIBS_SITE="$HOME/.guix-profile/site-library/"
: export GEM_PATH="$HOME/.guix-profile/lib/ruby/gems/2.2.0"
diff --git a/doc/development.org b/doc/development.org
index e65ccd58..cd3beea3 100644
--- a/doc/development.org
+++ b/doc/development.org
@@ -67,11 +67,11 @@ You can install a Python package locally with pip, e.g.
pip install hjson
#+END_SRC
-This installed in ~$HOME/.local/lib/python2.7/site-packages~. To add
+This installed in ~$HOME/.local/lib/python3.8/site-packages~. To add
the search path for GeneNetwork use the environment variable
#+BEGIN_SRC sh
-export PYTHON_GN_PATH=$HOME/.local/lib/python2.7/site-packages
+export PYTHON_GN_PATH=$HOME/.local/lib/python3.8/site-packages
#+END_SRC
Now you should be able to do
diff --git a/etc/default_settings.py b/etc/default_settings.py
index f368237b..27522187 100644
--- a/etc/default_settings.py
+++ b/etc/default_settings.py
@@ -19,12 +19,12 @@
#
# For GNU Guix deployment also check the paths in
#
-# ~/.guix-profile/lib/python2.7/site-packages/genenetwork2-2.0-py2.7.egg/etc/default_settings.py
+# ~/.guix-profile/lib/python3.8/site-packages/genenetwork2-2.0-py2.7.egg/etc/default_settings.py
import os
import sys
-GN_VERSION = open("../etc/VERSION","r").read()
+GN_VERSION = open("../etc/VERSION", "r").read()
GN_SERVER_URL = "http://localhost:8880/" # REST API server
# ---- MySQL
diff --git a/scripts/maintenance/QTL_Reaper_v6.py b/scripts/maintenance/QTL_Reaper_v6.py
index e50dbd40..35f2d1a1 100755
--- a/scripts/maintenance/QTL_Reaper_v6.py
+++ b/scripts/maintenance/QTL_Reaper_v6.py
@@ -7,7 +7,7 @@ import reaper
import MySQLdb
import time
-con = MySQLdb.Connect(db='db_webqtl',user='username',passwd='', host="localhost")
+con = MySQLdb.Connect(db='db_webqtl', user='username', passwd='', host="localhost")
cursor = con.cursor()
genotypeDir = '/gnshare/gn/web/genotypes/'
@@ -23,7 +23,7 @@ for item in results:
ProbeSetFreezeIds=sys.argv[1:]
if ProbeSetFreezeIds:
#####convert the Ids to integer
- ProbeSetFreezeIds=map(int, ProbeSetFreezeIds)
+ ProbeSetFreezeIds=list(map(int, ProbeSetFreezeIds))
else:
#####get all of the dataset that need be updated
@@ -53,7 +53,7 @@ for ProbeSetFreezeId in ProbeSetFreezeIds:
#if InbredSetId==12:
# InbredSetId=2
- print ProbeSetFreezeId, InbredSets[InbredSetId]
+ print((ProbeSetFreezeId, InbredSets[InbredSetId]))
genotype_1.read(InbredSets[InbredSetId])
locuses = []
@@ -102,7 +102,7 @@ for ProbeSetFreezeId in ProbeSetFreezeIds:
kj += 1
if kj%1000==0:
- print ProbeSetFreezeId, InbredSets[InbredSetId],kj
+ print((ProbeSetFreezeId, InbredSets[InbredSetId], kj))
- print ProbeSetFreezeIds
+ print(ProbeSetFreezeIds)
diff --git a/scripts/maintenance/Update_Case_Attributes_MySQL_tab.py b/scripts/maintenance/Update_Case_Attributes_MySQL_tab.py
index 0f8602c9..bf796df4 100644
--- a/scripts/maintenance/Update_Case_Attributes_MySQL_tab.py
+++ b/scripts/maintenance/Update_Case_Attributes_MySQL_tab.py
@@ -24,4 +24,4 @@ for row in csv_data:
#close the connection to the database.
mydb.commit()
cursor.close()
-print "Done" \ No newline at end of file
+print("Done") \ No newline at end of file
diff --git a/scripts/maintenance/delete_genotypes.py b/scripts/maintenance/delete_genotypes.py
index fa693f0f..b7f83758 100755
--- a/scripts/maintenance/delete_genotypes.py
+++ b/scripts/maintenance/delete_genotypes.py
@@ -8,26 +8,26 @@ import genotypes
def main(argv):
# config
config = utilities.get_config(argv[1])
- print "config:"
+ print("config:")
for item in config.items('config'):
- print "\t%s" % (str(item))
+ print(("\t%s" % (str(item))))
# var
- print "variable:"
+ print("variable:")
inbredsetid = config.get('config', 'inbredsetid')
- print "\tinbredsetid: %s" % inbredsetid
+ print(("\tinbredsetid: %s" % inbredsetid))
# datafile
datafile = open(config.get('config', 'datafile'), 'r')
datafile = csv.reader(datafile, delimiter='\t', quotechar='"')
- datafile.next()
+ next(datafile)
delrowcount = 0
for row in datafile:
if len(row) == 0:
continue
genoname = row[0]
delrowcount += genotypes.delete(genoname, inbredsetid)
- print "deleted %d genotypes" % (delrowcount)
+ print(("deleted %d genotypes" % (delrowcount)))
if __name__ == "__main__":
- print "command line arguments:\n\t%s" % sys.argv
+ print(("command line arguments:\n\t%s" % sys.argv))
main(sys.argv)
- print "exit successfully"
+ print("exit successfully")
diff --git a/scripts/maintenance/delete_phenotypes.py b/scripts/maintenance/delete_phenotypes.py
index 326c466e..60dbec61 100755
--- a/scripts/maintenance/delete_phenotypes.py
+++ b/scripts/maintenance/delete_phenotypes.py
@@ -8,13 +8,13 @@ import phenotypes
def main(argv):
# config
config = utilities.get_config(argv[1])
- print "config:"
+ print("config:")
for item in config.items('config'):
- print "\t%s" % (str(item))
+ print(("\t%s" % (str(item))))
# var
- print "variable:"
+ print("variable:")
inbredsetid = config.get('config', 'inbredsetid')
- print "\tinbredsetid: %s" % inbredsetid
+ print(("\tinbredsetid: %s" % inbredsetid))
# datafile
datafile = open(config.get('config', 'datafile'), 'r')
datafile = csv.reader(datafile, delimiter='\t', quotechar='"')
@@ -27,9 +27,9 @@ def main(argv):
except:
continue
delrowcount += phenotypes.delete(publishxrefid=publishxrefid, inbredsetid=inbredsetid)
- print "deleted %d phenotypes" % (delrowcount)
+ print(("deleted %d phenotypes" % (delrowcount)))
if __name__ == "__main__":
- print "command line arguments:\n\t%s" % sys.argv
+ print(("command line arguments:\n\t%s" % sys.argv))
main(sys.argv)
- print "exit successfully"
+ print("exit successfully")
diff --git a/scripts/maintenance/load_genotypes.py b/scripts/maintenance/load_genotypes.py
index 338483f4..51278d48 100755
--- a/scripts/maintenance/load_genotypes.py
+++ b/scripts/maintenance/load_genotypes.py
@@ -8,7 +8,7 @@ def main(argv):
config = utilities.get_config(argv[1])
print("config file:")
for item in config.items('config'):
- print("\t%s" % str(item))
+ print(("\t%s" % str(item)))
parse_genofile(config, fetch_parameters(config))
def fetch_parameters(config):
@@ -19,8 +19,8 @@ def fetch_parameters(config):
config_dic['dataid'] = datastructure.get_nextdataid_genotype()
config_dic['genofile'] = config.get('config', 'genofile')
print("config dictionary:")
- for k, v in config_dic.items():
- print("\t%s: %s" % (k, v))
+ for k, v in list(config_dic.items()):
+ print(("\t%s: %s" % (k, v)))
return config_dic
def parse_genofile(config, config_dic):
@@ -42,10 +42,10 @@ def parse_genofile(config, config_dic):
if line.lower().startswith("chr"):
#
print("geno file meta dictionary:")
- for k, v in meta_dic.items():
- print("\t%s: %s" % (k, v))
+ for k, v in list(meta_dic.items()):
+ print(("\t%s: %s" % (k, v)))
#
- print("geno file head:\n\t%s" % line)
+ print(("geno file head:\n\t%s" % line))
strainnames = line.split()[4:]
config_dic['strains'] = datastructure.get_strains_bynames(inbredsetid=config_dic['inbredsetid'], strainnames=strainnames, updatestrainxref="yes")
continue
@@ -81,7 +81,7 @@ def check_or_insert_geno(config_dic, marker_dic):
result = cursor.fetchone()
if result:
genoid = result[0]
- print("get geno record: %d" % genoid)
+ print(("get geno record: %d" % genoid))
else:
sql = """
INSERT INTO Geno
@@ -95,7 +95,7 @@ def check_or_insert_geno(config_dic, marker_dic):
cursor.execute(sql, (config_dic['speciesid'], marker_dic['locus'], marker_dic['locus'], marker_dic['chromosome'], marker_dic['mb']))
rowcount = cursor.rowcount
genoid = con.insert_id()
- print("INSERT INTO Geno: %d record: %d" % (rowcount, genoid))
+ print(("INSERT INTO Geno: %d record: %d" % (rowcount, genoid)))
return genoid
def check_genoxref(config_dic, marker_dic):
@@ -146,9 +146,9 @@ def insert_genoxref(config_dic, marker_dic):
"""
cursor.execute(sql, (config_dic['genofreezeid'], marker_dic['genoid'], config_dic['dataid'], marker_dic['cm'], 'N'))
rowcount = cursor.rowcount
- print("INSERT INTO GenoXRef: %d record" % (rowcount))
+ print(("INSERT INTO GenoXRef: %d record" % (rowcount)))
if __name__ == "__main__":
- print("command line arguments:\n\t%s" % sys.argv)
+ print(("command line arguments:\n\t%s" % sys.argv))
main(sys.argv)
print("exit successfully")
diff --git a/scripts/maintenance/load_phenotypes.py b/scripts/maintenance/load_phenotypes.py
index c3c6570b..759d2eec 100755
--- a/scripts/maintenance/load_phenotypes.py
+++ b/scripts/maintenance/load_phenotypes.py
@@ -7,37 +7,37 @@ import datastructure
def main(argv):
# config
config = utilities.get_config(argv[1])
- print "config:"
+ print("config:")
for item in config.items('config'):
- print "\t%s" % (str(item))
+ print(("\t%s" % (str(item))))
# var
inbredsetid = config.get('config', 'inbredsetid')
- print "inbredsetid: %s" % inbredsetid
+ print(("inbredsetid: %s" % inbredsetid))
species = datastructure.get_species(inbredsetid)
speciesid = species[0]
- print "speciesid: %s" % speciesid
+ print(("speciesid: %s" % speciesid))
dataid = datastructure.get_nextdataid_phenotype()
- print "next data id: %s" % dataid
+ print(("next data id: %s" % dataid))
cursor, con = utilities.get_cursor()
# datafile
datafile = open(config.get('config', 'datafile'), 'r')
phenotypedata = csv.reader(datafile, delimiter='\t', quotechar='"')
- phenotypedata_head = phenotypedata.next()
- print "phenotypedata head:\n\t%s" % phenotypedata_head
+ phenotypedata_head = next(phenotypedata)
+ print(("phenotypedata head:\n\t%s" % phenotypedata_head))
strainnames = phenotypedata_head[1:]
strains = datastructure.get_strains_bynames(inbredsetid=inbredsetid, strainnames=strainnames, updatestrainxref="yes")
# metafile
metafile = open(config.get('config', 'metafile'), 'r')
phenotypemeta = csv.reader(metafile, delimiter='\t', quotechar='"')
- phenotypemeta_head = phenotypemeta.next()
- print "phenotypemeta head:\n\t%s" % phenotypemeta_head
- print
+ phenotypemeta_head = next(phenotypemeta)
+ print(("phenotypemeta head:\n\t%s" % phenotypemeta_head))
+ print()
# load
for metarow in phenotypemeta:
#
- datarow_value = phenotypedata.next()
- datarow_se = phenotypedata.next()
- datarow_n = phenotypedata.next()
+ datarow_value = next(phenotypedata)
+ datarow_se = next(phenotypedata)
+ datarow_n = next(phenotypedata)
# Phenotype
sql = """
INSERT INTO Phenotype
@@ -67,7 +67,7 @@ def main(argv):
))
rowcount = cursor.rowcount
phenotypeid = con.insert_id()
- print "INSERT INTO Phenotype: %d record: %d" % (rowcount, phenotypeid)
+ print(("INSERT INTO Phenotype: %d record: %d" % (rowcount, phenotypeid)))
# Publication
publicationid = None # reset
pubmed_id = utilities.to_db_string(metarow[0], None)
@@ -81,7 +81,7 @@ def main(argv):
re = cursor.fetchone()
if re:
publicationid = re[0]
- print "get Publication record: %d" % publicationid
+ print(("get Publication record: %d" % publicationid))
if not publicationid:
sql = """
INSERT INTO Publication
@@ -109,7 +109,7 @@ def main(argv):
))
rowcount = cursor.rowcount
publicationid = con.insert_id()
- print "INSERT INTO Publication: %d record: %d" % (rowcount, publicationid)
+ print(("INSERT INTO Publication: %d record: %d" % (rowcount, publicationid)))
# data
for index, strain in enumerate(strains):
#
@@ -158,14 +158,14 @@ def main(argv):
cursor.execute(sql, (inbredsetid, phenotypeid, publicationid, dataid, ""))
rowcount = cursor.rowcount
publishxrefid = con.insert_id()
- print "INSERT INTO PublishXRef: %d record: %d" % (rowcount, publishxrefid)
+ print(("INSERT INTO PublishXRef: %d record: %d" % (rowcount, publishxrefid)))
# for loop next
dataid += 1
- print
+ print()
# release
con.close()
if __name__ == "__main__":
- print "command line arguments:\n\t%s" % sys.argv
+ print(("command line arguments:\n\t%s" % sys.argv))
main(sys.argv)
- print "exit successfully"
+ print("exit successfully")
diff --git a/scripts/maintenance/readProbeSetMean_v7.py b/scripts/maintenance/readProbeSetMean_v7.py
index e9c8f25c..43f084f4 100755
--- a/scripts/maintenance/readProbeSetMean_v7.py
+++ b/scripts/maintenance/readProbeSetMean_v7.py
@@ -9,19 +9,17 @@ import sys
import MySQLdb
import getpass
import time
-#import pdb
-#pdb.set_trace()
########################################################################
def translateAlias(str):
- if str == "B6":
- return "C57BL/6J"
- elif str == "D2":
- return "DBA/2J"
- else:
- return str
+ if str == "B6":
+ return "C57BL/6J"
+ elif str == "D2":
+ return "DBA/2J"
+ else:
+ return str
########################################################################
#
@@ -29,23 +27,25 @@ def translateAlias(str):
#
########################################################################
+
dataStart = 1
-GeneChipId = int( raw_input("Enter GeneChipId:") )
-ProbeSetFreezeId = int( raw_input("Enter ProbeSetFreezeId:") )
-input_file_name = raw_input("Enter file name with suffix:")
+GeneChipId = int(input("Enter GeneChipId:"))
+ProbeSetFreezeId = int(input("Enter ProbeSetFreezeId:"))
+input_file_name = input("Enter file name with suffix:")
fp = open("%s" % input_file_name, 'rb')
try:
- passwd = getpass.getpass('Please enter mysql password here : ')
- con = MySQLdb.Connect(db='db_webqtl',host='localhost', user='username',passwd=passwd)
+ passwd = getpass.getpass('Please enter mysql password here : ')
+ con = MySQLdb.Connect(db='db_webqtl', host='localhost',
+ user='username', passwd=passwd)
- db = con.cursor()
- print "You have successfully connected to mysql.\n"
+ db = con.cursor()
+ print("You have successfully connected to mysql.\n")
except:
- print "You entered incorrect password.\n"
- sys.exit(0)
+ print("You entered incorrect password.\n")
+ sys.exit(0)
time0 = time.time()
@@ -55,163 +55,163 @@ time0 = time.time()
# generate the gene list of expression data here
#
#########################################################################
-print 'Checking if each line have same number of members'
+print('Checking if each line have same number of members')
GeneList = []
isCont = 1
header = fp.readline()
-header = string.split(string.strip(header),'\t')
-header = map(string.strip, header)
+header = header.strip().split('\t')
+header = [x.strip() for x in header]
nfield = len(header)
line = fp.readline()
-kj=0
+kj = 0
while line:
- line2 = string.split(string.strip(line),'\t')
- line2 = map(string.strip, line2)
- if len(line2) != nfield:
- print "Error : " + line
- isCont = 0
+ line2 = line.strip().split('\t')
+ line2 = [x.strip() for x in line2]
+ if len(line2) != nfield:
+ print(("Error : " + line))
+ isCont = 0
- GeneList.append(line2[0])
- line = fp.readline()
+ GeneList.append(line2[0])
+ line = fp.readline()
- kj+=1
- if kj%100000 == 0:
- print 'checked ',kj,' lines'
+ kj += 1
+ if kj % 100000 == 0:
+ print(('checked ', kj, ' lines'))
-GeneList = map(string.lower, GeneList)
-GeneList.sort()
-
-if isCont==0:
- sys.exit(0)
+GeneList = sorted(map(string.lower, GeneList))
+if isCont == 0:
+ sys.exit(0)
-print 'used ',time.time()-time0,' seconds'
+
+print(('used ', time.time()-time0, ' seconds'))
#########################################################################
#
# Check if each strain exist in database
# generate the string id list of expression data here
#
#########################################################################
-print 'Checking if each strain exist in database'
+print('Checking if each strain exist in database')
isCont = 1
fp.seek(0)
header = fp.readline()
-header = string.split(string.strip(header),'\t')
-header = map(string.strip, header)
-header = map(translateAlias, header)
+header = header.strip().split('\t')
+header = [x.strip() for x in header]
+header = list(map(translateAlias, header))
header = header[dataStart:]
Ids = []
for item in header:
- try:
- db.execute('select Id from Strain where Name = "%s"' % item)
- Ids.append(db.fetchall()[0][0])
- except:
- print item,'does not exist, check the if the strain name is correct'
- isCont=0
+ try:
+ db.execute('select Id from Strain where Name = "%s"' % item)
+ Ids.append(db.fetchall()[0][0])
+ except:
+ print((item, 'does not exist, check the if the strain name is correct'))
+ isCont = 0
-if isCont==0:
- sys.exit(0)
+if isCont == 0:
+ sys.exit(0)
-print 'used ',time.time()-time0,' seconds'
+print(('used ', time.time()-time0, ' seconds'))
########################################################################
#
# Check if each ProbeSet exist in database
#
########################################################################
-print 'Check if each ProbeSet exist in database'
+print('Check if each ProbeSet exist in database')
##---- find PID is name or target ----##
line = fp.readline()
line = fp.readline()
-line2 = string.split(string.strip(line),'\t')
-line2 = map(string.strip, line2)
+line2 = line.strip().split('\t')
+line2 = [x.strip() for x in line2]
PId = line2[0]
-db.execute('select Id from ProbeSet where Name="%s" and ChipId=%d' % (PId, GeneChipId) )
+db.execute('select Id from ProbeSet where Name="%s" and ChipId=%d' %
+ (PId, GeneChipId))
results = db.fetchall()
IdStr = 'TargetId'
-if len(results)>0:
- IdStr = 'Name'
+if len(results) > 0:
+ IdStr = 'Name'
##---- get Name/TargetId list from database ----##
-db.execute('select distinct(%s) from ProbeSet where ChipId=%d order by %s' % (IdStr, GeneChipId, IdStr))
+db.execute('select distinct(%s) from ProbeSet where ChipId=%d order by %s' % (
+ IdStr, GeneChipId, IdStr))
results = db.fetchall()
-
+
Names = []
for item in results:
- Names.append(item[0])
-
-print Names
+ Names.append(item[0])
-Names = map(string.lower, Names)
+print(Names)
-Names.sort() # -- Fixed the lower case problem of ProbeSets affx-mur_b2_at doesn't exist --#
+Names = sorted(map(string.lower, Names))
##---- compare genelist with names ----##
-x=y=0
-x1=-1
-GeneList2=[]
-while x<len(GeneList) and y<len(Names):
- if GeneList[x]==Names[y]:
- x += 1
- y += 1
- elif GeneList[x]<Names[y]:
- if x!=x1:
- GeneList2.append(GeneList[x])
- x1 = x
- x += 1
- elif GeneList[x]>Names[y]:
- y += 1
-
- if x%100000==0:
- print 'check Name, checked %d lines'%x
-
-while x<len(GeneList):
- GeneList2.append(GeneList[x])
- x += 1
-
-isCont=1
+x = y = 0
+x1 = -1
+GeneList2 = []
+while x < len(GeneList) and y < len(Names):
+ if GeneList[x] == Names[y]:
+ x += 1
+ y += 1
+ elif GeneList[x] < Names[y]:
+ if x != x1:
+ GeneList2.append(GeneList[x])
+ x1 = x
+ x += 1
+ elif GeneList[x] > Names[y]:
+ y += 1
+
+ if x % 100000 == 0:
+ print(('check Name, checked %d lines' % x))
+
+while x < len(GeneList):
+ GeneList2.append(GeneList[x])
+ x += 1
+
+isCont = 1
ferror = open("ProbeSetError.txt", "wb")
for item in GeneList2:
- ferror.write(item + " doesn't exist \n")
- print item, " doesn't exist, check if the ProbeSet name is correct"
- isCont = 0
-
-if isCont==0:
- sys.exit(0)
+ ferror.write(item + " doesn't exist \n")
+ print((item, " doesn't exist, check if the ProbeSet name is correct"))
+ isCont = 0
+
+if isCont == 0:
+ sys.exit(0)
-print 'used ',time.time()-time0,' seconds'
+print(('used ', time.time()-time0, ' seconds'))
#########################################################################
#
# Insert data into database
#
#########################################################################
-print 'getting ProbeSet/Id'
+print('getting ProbeSet/Id')
#---- get Name/Id map ----#
-db.execute('select %s, Id from ProbeSet where ChipId=%d order by %s' % (IdStr, GeneChipId, IdStr))
+db.execute('select %s, Id from ProbeSet where ChipId=%d order by %s' %
+ (IdStr, GeneChipId, IdStr))
results = db.fetchall()
NameIds = {}
for item in results:
- NameIds[item[0]] = item[1]
-print 'used ',time.time()-time0,' seconds'
+ NameIds[item[0]] = item[1]
+print(('used ', time.time()-time0, ' seconds'))
-print 'inserting data'
+print('inserting data')
##---- get old max dataId ----##
db.execute('select max(Id) from ProbeSetData')
maxDataId = int(db.fetchall()[0][0])
bmax = maxDataId
-print "old_max = %d\n" % bmax
+print(("old_max = %d\n" % bmax))
##---- insert data ----##
fp.seek(0)
@@ -222,53 +222,51 @@ kj = 0
values1 = []
values2 = []
while line:
- line2 = string.split(string.strip(line),'\t')
- line2 = map(string.strip, line2)
- PId = line2[0]
- recordId = NameIds[PId]
-
- maxDataId += 1
- datasorig = line2[dataStart:]
-
- ###### Data Table items ######
- i=0
- for item in datasorig:
- try:
- values1.append('(%d,%d,%s)' % (maxDataId, Ids[i], float(item)))
- except:
- pass
- i += 1
-
- values2.append("(%d,%d,%d)" % (ProbeSetFreezeId, recordId, maxDataId))
-
-
- ##---- insert into table ----##
- kj += 1
- if kj % 100 == 0:
- cmd = ','.join(values1)
- cmd = 'insert into ProbeSetData values %s' % cmd
- db.execute(cmd)
-
- cmd = ','.join(values2)
- cmd = 'insert into ProbeSetXRef(ProbeSetFreezeId, ProbeSetId, DataId) values %s' % cmd
- db.execute(cmd)
-
- values1=[]
- values2=[]
- print 'Inserted ', kj,' lines'
- print 'used ',time.time()-time0,' seconds'
-
- line = fp.readline()
-
-
-
-if len(values1)>0:
- cmd = ','.join(values1)
- cmd = 'insert into ProbeSetData values %s' % cmd
- db.execute(cmd)
-
- cmd = ','.join(values2)
- cmd = 'insert into ProbeSetXRef(ProbeSetFreezeId, ProbeSetId, DataId) values %s' % cmd
- db.execute(cmd)
+ line2 = line.strip().split('\t')
+ line2 = [x.strip() for x in line2]
+ PId = line2[0]
+ recordId = NameIds[PId]
+
+ maxDataId += 1
+ datasorig = line2[dataStart:]
+
+ ###### Data Table items ######
+ i = 0
+ for item in datasorig:
+ try:
+ values1.append('(%d,%d,%s)' % (maxDataId, Ids[i], float(item)))
+ except:
+ pass
+ i += 1
+
+ values2.append("(%d,%d,%d)" % (ProbeSetFreezeId, recordId, maxDataId))
+
+ ##---- insert into table ----##
+ kj += 1
+ if kj % 100 == 0:
+ cmd = ','.join(values1)
+ cmd = 'insert into ProbeSetData values %s' % cmd
+ db.execute(cmd)
+
+ cmd = ','.join(values2)
+ cmd = 'insert into ProbeSetXRef(ProbeSetFreezeId, ProbeSetId, DataId) values %s' % cmd
+ db.execute(cmd)
+
+ values1 = []
+ values2 = []
+ print(('Inserted ', kj, ' lines'))
+ print(('used ', time.time()-time0, ' seconds'))
+
+ line = fp.readline()
+
+
+if len(values1) > 0:
+ cmd = ','.join(values1)
+ cmd = 'insert into ProbeSetData values %s' % cmd
+ db.execute(cmd)
+
+ cmd = ','.join(values2)
+ cmd = 'insert into ProbeSetXRef(ProbeSetFreezeId, ProbeSetId, DataId) values %s' % cmd
+ db.execute(cmd)
con.close()
diff --git a/scripts/maintenance/readProbeSetSE_v7.py b/scripts/maintenance/readProbeSetSE_v7.py
index fd6f0bb8..edd9e7b0 100755
--- a/scripts/maintenance/readProbeSetSE_v7.py
+++ b/scripts/maintenance/readProbeSetSE_v7.py
@@ -1,254 +1,254 @@
-#!/usr/bin/python2
-"""This script use the nearest marker to the transcript as control, increasing permutation rounds according to the p-value"""
-########################################################################
-# Last Updated Sep 27, 2011 by Xiaodong
-# This version fix the bug that incorrectly exclude the first 2 probesetIDs
-########################################################################
-
-import string
-import sys
-import MySQLdb
-import getpass
-import time
-
-
-def translateAlias(str):
- if str == "B6":
- return "C57BL/6J"
- elif str == "D2":
- return "DBA/2J"
- else:
- return str
-
-########################################################################
-#
-# Indicate Data Start Position, ProbeFreezeId, GeneChipId, DataFile
-#
-########################################################################
-
-dataStart = 1
-
-GeneChipId = int( raw_input("Enter GeneChipId:") )
-ProbeSetFreezeId = int( raw_input("Enter ProbeSetFreezeId:") )
-input_file_name = raw_input("Enter file name with suffix:")
-
-fp = open("%s" % input_file_name, 'rb')
-
-
-try:
- passwd = getpass.getpass('Please enter mysql password here : ')
- con = MySQLdb.Connect(db='db_webqtl',host='localhost', user='username',passwd=passwd)
-
- db = con.cursor()
- print "You have successfully connected to mysql.\n"
-except:
- print "You entered incorrect password.\n"
- sys.exit(0)
-
-time0 = time.time()
-########################################################################
-#
-# Indicate Data Start Position, ProbeFreezeId, GeneChipId, DataFile
-#
-########################################################################
-
-#GeneChipId = 4
-#dataStart = 1
-#ProbeSetFreezeId = 359 #JAX Liver 6C Affy M430 2.0 (Jul11) MDP
-#fp = open("GSE10493_AllSamples_6C_Z_AvgSE.txt", 'rb')
-
-
-#########################################################################
-#
-# Check if each line have same number of members
-# generate the gene list of expression data here
-#
-#########################################################################
-print 'Checking if each line have same number of members'
-
-GeneList = []
-isCont = 1
-header = fp.readline()
-header = string.split(string.strip(header),'\t')
-header = map(string.strip, header)
-nfield = len(header)
-line = fp.readline()
-
-kj=0
-while line:
- line2 = string.split(string.strip(line),'\t')
- line2 = map(string.strip, line2)
- if len(line2) != nfield:
- print "Error : " + line
- isCont = 0
-
- GeneList.append(line2[0])
- line = fp.readline()
-
- kj+=1
- if kj%100000 == 0:
- print 'checked ',kj,' lines'
-
-GeneList = map(string.lower, GeneList)
-GeneList.sort()
-
-if isCont==0:
- sys.exit(0)
-
-
-print 'used ',time.time()-time0,' seconds'
-#########################################################################
-#
-# Check if each strain exist in database
-# generate the string id list of expression data here
-#
-#########################################################################
-print 'Checking if each strain exist in database'
-
-isCont = 1
-fp.seek(0)
-header = fp.readline()
-header = string.split(string.strip(header),'\t')
-header = map(string.strip, header)
-header = map(translateAlias, header)
-header = header[dataStart:]
-Ids = []
-for item in header:
- try:
- db.execute('select Id from Strain where Name = "%s"' % item)
- Ids.append(db.fetchall()[0][0])
- except:
- print item,'does not exist, check the if the strain name is correct'
- isCont=0
-
-if isCont==0:
- sys.exit(0)
-
-
-print 'used ',time.time()-time0,' seconds'
-########################################################################
-#
-# Check if each ProbeSet exist in database
-#
-########################################################################
-print 'Check if each ProbeSet exist in database'
-
-##---- find PID is name or target ----##
-line = fp.readline()
-line = fp.readline()
-line2 = string.split(string.strip(line),'\t')
-line2 = map(string.strip, line2)
-PId = line2[0]
-
-db.execute('select Id from ProbeSet where Name="%s" and ChipId=%d' % (PId, GeneChipId))
-results = db.fetchall()
-IdStr = 'TargetId'
-if len(results)>0:
- IdStr = 'Name'
-
-
-##---- get Name/TargetId list from database ----##
-db.execute('select distinct(%s) from ProbeSet where ChipId=%d order by %s' % (IdStr, GeneChipId, IdStr))
-results = db.fetchall()
-
-Names = []
-for item in results:
- Names.append(item[0])
-Names = map(string.lower, Names)
-Names.sort() # -- Fixed the lower case problem of ProbeSets affx-mur_b2_at doesn't exist --#
-
-##---- compare genelist with names ----##
-x=y=0
-x1=-1
-GeneList2=[]
-while x<len(GeneList) and y<len(Names):
- if GeneList[x]==Names[y]:
- x += 1
- y += 1
- elif GeneList[x]<Names[y]:
- if x!=x1:
- GeneList2.append(GeneList[x])
- x1 = x
- x += 1
- elif GeneList[x]>Names[y]:
- y += 1
-
- if x%100000==0:
- print 'check Name, checked %d lines'%x
-
-while x<len(GeneList):
- GeneList2.append(GeneList[x])
- x += 1
-
-isCont=1
-ferror = open("ProbeSetError.txt", "wb")
-for item in GeneList2:
- ferror.write(item + " doesn't exist \n")
- print item, " doesn't exist"
- isCont = 0
-
-if isCont==0:
- sys.exit(0)
-
-
-print 'used ',time.time()-time0,' seconds'
-#############################
-#Insert new Data into SE
-############################
-db.execute("""
- select ProbeSet.%s, ProbeSetXRef.DataId from ProbeSet, ProbeSetXRef
- where ProbeSet.Id=ProbeSetXRef.ProbeSetId and ProbeSetXRef.ProbeSetFreezeId=%d"""
- % (IdStr, ProbeSetFreezeId))
-results = db.fetchall()
-
-ProbeNameId = {}
-for Name, Id in results:
- ProbeNameId[Name] = Id
-
-ferror = open("ProbeError.txt", "wb")
-
-DataValues = []
-
-fp.seek(0) #XZ add this line
-line = fp.readline() #XZ add this line
-line = fp.readline()
-
-kj = 0
-while line:
- line2 = string.split(string.strip(line),'\t')
- line2 = map(string.strip, line2)
-
- CellId = line2[0]
- if not ProbeNameId.has_key(CellId):
- ferror.write(CellId + " doesn't exist\n")
- print CellId, " doesn't exist"
- else:
- DataId = ProbeNameId[CellId]
- datasorig = line2[dataStart:]
-
- i = 0
- for item in datasorig:
- if item != '':
- value = '('+str(DataId)+','+str(Ids[i])+','+str(item)+')'
- DataValues.append(value)
- i += 1
-
- kj += 1
- if kj % 100 == 0:
- Dataitems = ','.join(DataValues)
- cmd = 'insert ProbeSetSE values %s' % Dataitems
- db.execute(cmd)
-
- DataValues = []
- print 'inserted ',kj,' lines'
- print 'used ',time.time()-time0,' seconds'
- line = fp.readline()
-
-if len(DataValues)>0:
- DataValues = ','.join(DataValues)
- cmd = 'insert ProbeSetSE values %s' % DataValues
- db.execute(cmd)
-
-con.close()
-
-
+#!/usr/bin/python2
+"""This script use the nearest marker to the transcript as control, increasing permutation rounds according to the p-value"""
+########################################################################
+# Last Updated Sep 27, 2011 by Xiaodong
+# This version fix the bug that incorrectly exclude the first 2 probesetIDs
+########################################################################
+
+import string
+import sys
+import MySQLdb
+import getpass
+import time
+
+
+def translateAlias(str):
+ if str == "B6":
+ return "C57BL/6J"
+ elif str == "D2":
+ return "DBA/2J"
+ else:
+ return str
+
+########################################################################
+#
+# Indicate Data Start Position, ProbeFreezeId, GeneChipId, DataFile
+#
+########################################################################
+
+
+dataStart = 1
+
+GeneChipId = int(input("Enter GeneChipId:"))
+ProbeSetFreezeId = int(input("Enter ProbeSetFreezeId:"))
+input_file_name = input("Enter file name with suffix:")
+
+fp = open("%s" % input_file_name, 'rb')
+
+
+try:
+ passwd = getpass.getpass('Please enter mysql password here : ')
+ con = MySQLdb.Connect(db='db_webqtl', host='localhost',
+ user='username', passwd=passwd)
+
+ db = con.cursor()
+ print("You have successfully connected to mysql.\n")
+except:
+ print("You entered incorrect password.\n")
+ sys.exit(0)
+
+time0 = time.time()
+########################################################################
+#
+# Indicate Data Start Position, ProbeFreezeId, GeneChipId, DataFile
+#
+########################################################################
+
+#GeneChipId = 4
+#dataStart = 1
+# ProbeSetFreezeId = 359 #JAX Liver 6C Affy M430 2.0 (Jul11) MDP
+#fp = open("GSE10493_AllSamples_6C_Z_AvgSE.txt", 'rb')
+
+
+#########################################################################
+#
+# Check if each line have same number of members
+# generate the gene list of expression data here
+#
+#########################################################################
+print('Checking if each line have same number of members')
+
+GeneList = []
+isCont = 1
+header = fp.readline()
+header = header.strip().split('\t')
+header = list(map(string.strip, header))
+nfield = len(header)
+line = fp.readline()
+
+kj = 0
+while line:
+ line2 = line.strip().split('\t')
+ line2 = list(map(string.strip, line2))
+ if len(line2) != nfield:
+ isCont = 0
+ print(("Error : " + line))
+
+ GeneList.append(line2[0])
+ line = fp.readline()
+
+ kj += 1
+ if kj % 100000 == 0:
+ print(('checked ', kj, ' lines'))
+
+GeneList = sorted(map(string.lower, GeneList))
+
+if isCont == 0:
+ sys.exit(0)
+
+
+print(('used ', time.time()-time0, ' seconds'))
+#########################################################################
+#
+# Check if each strain exist in database
+# generate the string id list of expression data here
+#
+#########################################################################
+print('Checking if each strain exist in database')
+
+isCont = 1
+fp.seek(0)
+header = fp.readline()
+header = header.strip().split('\t')
+header = list(map(string.strip, header))
+header = list(map(translateAlias, header))
+header = header[dataStart:]
+Ids = []
+for item in header:
+ try:
+ db.execute('select Id from Strain where Name = "%s"' % item)
+ Ids.append(db.fetchall()[0][0])
+ except:
+ isCont = 0
+ print((item, 'does not exist, check the if the strain name is correct'))
+
+if isCont == 0:
+ sys.exit(0)
+
+
+print(('used ', time.time()-time0, ' seconds'))
+########################################################################
+#
+# Check if each ProbeSet exist in database
+#
+########################################################################
+print('Check if each ProbeSet exist in database')
+
+##---- find PID is name or target ----##
+line = fp.readline()
+line = fp.readline()
+line2 = line.strip().split('\t')
+line2 = [x.strip() for x in line2]
+PId = line2[0]
+
+db.execute('select Id from ProbeSet where Name="%s" and ChipId=%d' %
+ (PId, GeneChipId))
+results = db.fetchall()
+IdStr = 'TargetId'
+if len(results) > 0:
+ IdStr = 'Name'
+
+
+##---- get Name/TargetId list from database ----##
+db.execute('select distinct(%s) from ProbeSet where ChipId=%d order by %s' % (
+ IdStr, GeneChipId, IdStr))
+results = db.fetchall()
+
+Names = []
+for item in results:
+ Names.append(item[0])
+ Names = sorted(map(string.lower, Names))
+
+##---- compare genelist with names ----##
+x = y = 0
+x1 = -1
+GeneList2 = []
+while x < len(GeneList) and y < len(Names):
+ if GeneList[x] == Names[y]:
+ x += 1
+ y += 1
+ elif GeneList[x] < Names[y]:
+ if x != x1:
+ GeneList2.append(GeneList[x])
+ x1 = x
+ x += 1
+ elif GeneList[x] > Names[y]:
+ y += 1
+
+ if x % 100000 == 0:
+ print(('check Name, checked %d lines' % x))
+
+while x < len(GeneList):
+ GeneList2.append(GeneList[x])
+ x += 1
+
+isCont = 1
+ferror = open("ProbeSetError.txt", "wb")
+for item in GeneList2:
+ ferror.write(item + " doesn't exist \n")
+ isCont = 0
+
+ print((item, " doesn't exist"))
+if isCont == 0:
+ sys.exit(0)
+
+
+print(('used ', time.time()-time0, ' seconds'))
+#############################
+# Insert new Data into SE
+############################
+db.execute("""
+ select ProbeSet.%s, ProbeSetXRef.DataId from ProbeSet, ProbeSetXRef
+ where ProbeSet.Id=ProbeSetXRef.ProbeSetId and ProbeSetXRef.ProbeSetFreezeId=%d"""
+ % (IdStr, ProbeSetFreezeId))
+results = db.fetchall()
+
+ProbeNameId = {}
+for Name, Id in results:
+ ProbeNameId[Name] = Id
+
+ferror = open("ProbeError.txt", "wb")
+
+DataValues = []
+
+fp.seek(0) # XZ add this line
+line = fp.readline() # XZ add this line
+line = fp.readline()
+
+kj = 0
+while line:
+ line2 = line.strip().split('\t')
+ line2 = [x.strip() for x in line2]
+
+ CellId = line2[0]
+ if CellId not in ProbeNameId:
+ ferror.write(CellId + " doesn't exist\n")
+ else:
+ DataId = ProbeNameId[CellId]
+ datasorig = line2[dataStart:]
+
+ i = 0
+ for item in datasorig:
+ if item != '':
+ value = '('+str(DataId)+','+str(Ids[i])+','+str(item)+')'
+ DataValues.append(value)
+ i += 1
+
+ kj += 1
+ if kj % 100 == 0:
+ Dataitems = ','.join(DataValues)
+ cmd = 'insert ProbeSetSE values %s' % Dataitems
+ db.execute(cmd)
+
+ DataValues = []
+ line = fp.readline()
+ print((CellId, " doesn't exist"))
+ print(('inserted ', kj, ' lines'))
+ print(('used ', time.time()-time0, ' seconds'))
+
+if len(DataValues) > 0:
+ DataValues = ','.join(DataValues)
+ cmd = 'insert ProbeSetSE values %s' % DataValues
+ db.execute(cmd)
+
+con.close()
diff --git a/setup.py b/setup.py
index a9b71fab..8436dcd3 100644
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ setup(name='genenetwork2',
url = "https://github.com/genenetwork/genenetwork2/blob/master/README.md",
description = 'Website and tools for genetics.',
include_package_data=True,
- packages=['wqflask','etc'],
+ packages=['wqflask', 'etc'],
scripts=['bin/genenetwork2'],
# package_data = {
# 'etc': ['*.py']
diff --git a/test/requests/link_checker.py b/test/requests/link_checker.py
index d040ba54..6ac26ba7 100644
--- a/test/requests/link_checker.py
+++ b/test/requests/link_checker.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
import re
import requests
from lxml.html import parse
@@ -23,12 +22,9 @@ def is_in_page_link(link):
return pattern.match(link)
def get_links(doc):
- return filter(
- lambda x: not (
+ return [x for x in [y.get("href") for y in doc.cssselect("a")] if not (
is_root_link(x)
- or is_mailto_link(x))
- , map(lambda y: y.get("href")
- , doc.cssselect("a")))
+ or is_mailto_link(x))]
def verify_link(link):
if link[0] == "#":
@@ -58,7 +54,7 @@ def verify_static_file(link):
try:
result = requests.get(link, timeout=20, verify=False)
if (result.status_code == 200 and
- result.content.find("Error: 404 Not Found") <= 0):
+ result.content.find(bytes("Error: 404 Not Found", "utf-8")) <= 0):
print(link+" ==> OK")
else:
print("ERROR: link {}".format(link))
@@ -72,9 +68,9 @@ def check_page(host, start_url):
print("Checking links host "+host+" in page `"+start_url+"`")
doc = parse(start_url).getroot()
links = get_links(doc)
- in_page_links = filter(is_in_page_link, links)
- internal_links = filter(is_internal_link, links)
- external_links = filter(lambda x: not (is_internal_link(x) or is_in_page_link(x)), links)
+ in_page_links = list(filter(is_in_page_link, links))
+ internal_links = list(filter(is_internal_link, links))
+ external_links = [x for x in links if not (is_internal_link(x) or is_in_page_link(x))]
for link in internal_links:
verify_link(host+link)
diff --git a/test/requests/main_web_functionality.py b/test/requests/main_web_functionality.py
index d4c3b1ad..28033ad5 100644
--- a/test/requests/main_web_functionality.py
+++ b/test/requests/main_web_functionality.py
@@ -1,9 +1,7 @@
-from __future__ import print_function
-import re
import requests
from lxml.html import parse
from link_checker import check_page
-from requests.exceptions import ConnectionError
+
def check_home(url):
doc = parse(url).getroot()
@@ -13,18 +11,20 @@ def check_home(url):
def check_search_page(host):
data = dict(
- species="mouse"
- , group="BXD"
- , type="Hippocampus mRNA"
- , dataset="HC_M2_0606_P"
- , search_terms_or=""
- , search_terms_and="MEAN=(15 16) LRS=(23 46)")
+ species="mouse",
+ group="BXD",
+ type="Hippocampus mRNA",
+ dataset="HC_M2_0606_P",
+ search_terms_or="",
+ search_terms_and="MEAN=(15 16) LRS=(23 46)")
result = requests.get(host+"/search", params=data)
found = result.text.find("records were found")
assert(found >= 0)
assert(result.status_code == 200)
print("OK")
- check_traits_page(host, "/show_trait?trait_id=1435395_s_at&dataset=HC_M2_0606_P")
+ check_traits_page(host, ("/show_trait?trait_id=1435395_"
+ "s_at&dataset=HC_M2_0606_P"))
+
def check_traits_page(host, traits_url):
doc = parse(host+traits_url).getroot()
@@ -33,6 +33,7 @@ def check_traits_page(host, traits_url):
print("OK")
check_page(host, host+traits_url)
+
def check_main_web_functionality(args_obj, parser):
print("")
print("Checking main web functionality...")
diff --git a/test/requests/mapping_tests.py b/test/requests/mapping_tests.py
index 5748a2a3..19b22c21 100644
--- a/test/requests/mapping_tests.py
+++ b/test/requests/mapping_tests.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
import re
import copy
import json
diff --git a/test/requests/navigation_tests.py b/test/requests/navigation_tests.py
index eda27324..6b91c1fd 100644
--- a/test/requests/navigation_tests.py
+++ b/test/requests/navigation_tests.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
import re
import requests
from lxml.html import parse
diff --git a/test/requests/test-website.py b/test/requests/test-website.py
index f90d1843..8bfb47c2 100755
--- a/test/requests/test-website.py
+++ b/test/requests/test-website.py
@@ -3,7 +3,7 @@
# env GN2_PROFILE=/home/wrk/opt/gn-latest ./bin/genenetwork2 ./etc/default_settings.py -c ../test/requests/test-website.py http://localhost:5003
#
# Mostly to pick up the Guix GN2_PROFILE and python modules
-from __future__ import print_function
+
import argparse
from link_checker import check_links
from link_checker import check_packaged_js_files
diff --git a/webtests/browser_run.py b/webtests/browser_run.py
index 2ec299c5..7ee540b7 100644
--- a/webtests/browser_run.py
+++ b/webtests/browser_run.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
__all__ = ('sleep', 'testmod', 'test')
from doctest import testmod
@@ -71,4 +69,4 @@ class Test(object):
-test = Test() \ No newline at end of file
+test = Test()
diff --git a/webtests/correlation_matrix_test.py b/webtests/correlation_matrix_test.py
index 8529c265..97114890 100644
--- a/webtests/correlation_matrix_test.py
+++ b/webtests/correlation_matrix_test.py
@@ -65,8 +65,6 @@ text: 0.608\n71
"""
-from __future__ import absolute_import, division, print_function
-
from browser_run import *
testmod()
diff --git a/webtests/correlation_test.py b/webtests/correlation_test.py
index aad3a69f..311bb847 100644
--- a/webtests/correlation_test.py
+++ b/webtests/correlation_test.py
@@ -44,8 +44,6 @@ text: 1.000
"""
-from __future__ import absolute_import, division, print_function
-
from browser_run import *
testmod()
diff --git a/webtests/marker_regression_test.py b/webtests/marker_regression_test.py
index c4f76f53..9b4a4acb 100644
--- a/webtests/marker_regression_test.py
+++ b/webtests/marker_regression_test.py
@@ -48,8 +48,6 @@ text: 11.511
"""
-from __future__ import absolute_import, division, print_function
-
from browser_run import *
testmod()
diff --git a/webtests/show_trait_js_test.py b/webtests/show_trait_js_test.py
index 0fd2c16c..34ffd3b7 100644
--- a/webtests/show_trait_js_test.py
+++ b/webtests/show_trait_js_test.py
@@ -35,8 +35,6 @@ style: display: none;
"""
-from __future__ import absolute_import, division, print_function
-
from browser_run import *
testmod()
diff --git a/webtests/test_runner.py b/webtests/test_runner.py
index ef6d0d69..b5b590a6 100644
--- a/webtests/test_runner.py
+++ b/webtests/test_runner.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
import unittest
import doctest
import glob
@@ -25,4 +23,4 @@ def main():
runner.run(suite)
if __name__ == '__main__':
- main() \ No newline at end of file
+ main()
diff --git a/wqflask/base/GeneralObject.py b/wqflask/base/GeneralObject.py
index 0fccaab3..0122ee32 100644
--- a/wqflask/base/GeneralObject.py
+++ b/wqflask/base/GeneralObject.py
@@ -33,7 +33,7 @@ class GeneralObject:
def __init__(self, *args, **kw):
self.contents = list(args)
- for name, value in kw.items():
+ for name, value in list(kw.items()):
setattr(self, name, value)
def __setitem__(self, key, value):
@@ -50,16 +50,17 @@ class GeneralObject:
def __str__(self):
s = ''
- for key in self.__dict__.keys():
+ for key in list(self.__dict__.keys()):
if key != 'contents':
s += '%s = %s\n' % (key, self.__dict__[key])
return s
def __repr__(self):
s = ''
- for key in self.__dict__.keys():
+ for key in list(self.__dict__.keys()):
s += '%s = %s\n' % (key, self.__dict__[key])
return s
- def __cmp__(self, other):
- return len(self.__dict__.keys()).__cmp__(len(other.__dict__.keys()))
+ def __eq__(self, other):
+ return (len(list(self.__dict__.keys())) ==
+ len(list(other.__dict__.keys())))
diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py
index afffe780..295f5c48 100644
--- a/wqflask/base/data_set.py
+++ b/wqflask/base/data_set.py
@@ -18,13 +18,14 @@
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-from __future__ import absolute_import, print_function, division
from db.call import fetchall, fetchone, fetch1
from utility.logger import getLogger
from utility.tools import USE_GN_SERVER, USE_REDIS, flat_files, flat_file_exists, GN2_BASE_URL
from db.gn_server import menu_main
from pprint import pformat as pf
-from MySQLdb import escape_string as escape
+from utility.db_tools import escape
+from utility.db_tools import mescape
+from utility.db_tools import create_in_clause
from maintenance import get_group_samplelists
from utility.tools import locate, locate_ignore_error, flat_files
from utility import gen_geno_ob
@@ -34,7 +35,6 @@ from utility import webqtlUtil
from db import webqtlDatabaseFunction
from base import species
from base import webqtlConfig
-import reaper
from flask import Flask, g
import os
import math
@@ -45,7 +45,7 @@ import codecs
import json
import requests
import gzip
-import cPickle as pickle
+import pickle as pickle
import itertools
from redis import Redis
@@ -207,20 +207,6 @@ def create_datasets_list():
return datasets
-def create_in_clause(items):
- """Create an in clause for mysql"""
- in_clause = ', '.join("'{}'".format(x) for x in mescape(*items))
- in_clause = '( {} )'.format(in_clause)
- return in_clause
-
-
-def mescape(*items):
- """Multiple escape"""
- escaped = [escape(str(item)) for item in items]
- #logger.debug("escaped is:", escaped)
- return escaped
-
-
class Markers(object):
"""Todo: Build in cacheing so it saves us reading the same file more than once"""
@@ -255,12 +241,12 @@ class Markers(object):
logger.debug("length of self.markers:", len(self.markers))
logger.debug("length of p_values:", len(p_values))
- if type(p_values) is list:
+ if isinstance(p_values, list):
# THIS IS only needed for the case when we are limiting the number of p-values calculated
# if len(self.markers) > len(p_values):
# self.markers = self.markers[:len(p_values)]
- for marker, p_value in itertools.izip(self.markers, p_values):
+ for marker, p_value in zip(self.markers, p_values):
if not p_value:
continue
marker['p_value'] = float(p_value)
@@ -271,7 +257,7 @@ class Markers(object):
marker['lod_score'] = -math.log10(marker['p_value'])
# Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
- elif type(p_values) is dict:
+ elif isinstance(p_values, dict):
filtered_markers = []
for marker in self.markers:
#logger.debug("marker[name]", marker['name'])
@@ -457,12 +443,7 @@ class DatasetGroup(object):
full_filename = str(locate(self.genofile, 'genotype'))
else:
full_filename = str(locate(self.name + '.geno', 'genotype'))
-
- if use_reaper:
- genotype_1 = reaper.Dataset()
- genotype_1.read(full_filename)
- else:
- genotype_1 = gen_geno_ob.genotype(full_filename)
+ genotype_1 = gen_geno_ob.genotype(full_filename)
if genotype_1.type == "group" and self.parlist:
genotype_2 = genotype_1.add(
@@ -705,7 +686,7 @@ class DataSet(object):
else:
query = "SELECT {}.Name,".format(escape(dataset_type))
data_start_pos = 1
- query += string.join(temp, ', ')
+ query += ', '.join(temp)
query += ' FROM ({}, {}XRef, {}Freeze) '.format(*mescape(dataset_type,
self.type,
self.type))
@@ -1051,9 +1032,9 @@ class MrnaAssayDataSet(DataSet):
# XZ, 12/08/2008: description
# XZ, 06/05/2009: Rob asked to add probe target description
- description_string = unicode(
+ description_string = str(
str(this_trait.description).strip(codecs.BOM_UTF8), 'utf-8')
- target_string = unicode(
+ target_string = str(
str(this_trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
if len(description_string) > 1 and description_string != 'None':
diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py
index 6fec5dcd..f1929518 100644
--- a/wqflask/base/mrna_assay_tissue_data.py
+++ b/wqflask/base/mrna_assay_tissue_data.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import collections
from flask import g
@@ -7,9 +5,8 @@ from flask import g
from utility import db_tools
from utility import Bunch
-from MySQLdb import escape_string as escape
+from utility.db_tools import escape
-from pprint import pformat as pf
from utility.logger import getLogger
logger = getLogger(__name__ )
@@ -92,4 +89,4 @@ class MrnaAssayTissueData(object):
else:
symbol_values_dict[result.Symbol.lower()].append(result.value)
- return symbol_values_dict \ No newline at end of file
+ return symbol_values_dict
diff --git a/wqflask/base/species.py b/wqflask/base/species.py
index 6d99af65..2771d116 100644
--- a/wqflask/base/species.py
+++ b/wqflask/base/species.py
@@ -1,14 +1,7 @@
-from __future__ import absolute_import, print_function, division
-
import collections
from flask import Flask, g
-#from MySQLdb import escape_string as escape
-
-from utility import Bunch
-
-from pprint import pformat as pf
from utility.logger import getLogger
logger = getLogger(__name__ )
@@ -59,4 +52,4 @@ class Chromosomes(object):
results = g.db.execute(query).fetchall()
for item in results:
- self.chromosomes[item.OrderId] = IndChromosome(item.Name, item.Length) \ No newline at end of file
+ self.chromosomes[item.OrderId] = IndChromosome(item.Name, item.Length)
diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py
index 8f8b5b70..d0c79119 100644
--- a/wqflask/base/trait.py
+++ b/wqflask/base/trait.py
@@ -1,39 +1,30 @@
-from __future__ import absolute_import, division, print_function
-
-import os
-import string
-import resource
-import codecs
import requests
-import random
-import urllib
+import simplejson as json
+from wqflask import app
from base import webqtlConfig
from base.webqtlCaseData import webqtlCaseData
from base.data_set import create_dataset
-from db import webqtlDatabaseFunction
-from utility import webqtlUtil
from utility import hmac
from utility.authentication_tools import check_resource_availability
-from utility.tools import GN2_BASE_URL, GN_VERSION
-from utility.redis_tools import get_redis_conn, get_resource_id, get_resource_info
-Redis = get_redis_conn()
+from utility.tools import GN2_BASE_URL
+from utility.redis_tools import get_redis_conn, get_resource_id
-from wqflask import app
+from utility.db_tools import escape
-import simplejson as json
-from MySQLdb import escape_string as escape
-from pprint import pformat as pf
-
-from flask import Flask, g, request, url_for, redirect, make_response, render_template
+from flask import g, request, url_for
from utility.logger import getLogger
-logger = getLogger(__name__ )
+
+logger = getLogger(__name__)
+
+Redis = get_redis_conn()
+
def create_trait(**kw):
- assert bool(kw.get('dataset')) != bool(kw.get('dataset_name')), "Needs dataset ob. or name";
+ assert bool(kw.get('dataset')) != bool(
+ kw.get('dataset_name')), "Needs dataset ob. or name"
- permitted = True
if kw.get('name'):
if kw.get('dataset_name'):
if kw.get('dataset_name') != "Temp":
@@ -43,18 +34,23 @@ def create_trait(**kw):
if kw.get('dataset_name') != "Temp":
if dataset.type == 'Publish':
- permissions = check_resource_availability(dataset, kw.get('name'))
+ permissions = check_resource_availability(
+ dataset, kw.get('name'))
else:
permissions = check_resource_availability(dataset)
if "view" in permissions['data']:
the_trait = GeneralTrait(**kw)
if the_trait.dataset.type != "Temp":
- the_trait = retrieve_trait_info(the_trait, the_trait.dataset, get_qtl_info=kw.get('get_qtl_info'))
+ the_trait = retrieve_trait_info(
+ the_trait,
+ the_trait.dataset,
+ get_qtl_info=kw.get('get_qtl_info'))
return the_trait
else:
return None
+
class GeneralTrait(object):
"""
Trait class defines a trait in webqtl, can be either Microarray,
@@ -64,12 +60,17 @@ class GeneralTrait(object):
def __init__(self, get_qtl_info=False, get_sample_info=True, **kw):
# xor assertion
- assert bool(kw.get('dataset')) != bool(kw.get('dataset_name')), "Needs dataset ob. or name";
- self.name = kw.get('name') # Trait ID, ProbeSet ID, Published ID, etc.
+ assert bool(kw.get('dataset')) != bool(
+ kw.get('dataset_name')), "Needs dataset ob. or name"
+ # Trait ID, ProbeSet ID, Published ID, etc.
+ self.name = kw.get('name')
if kw.get('dataset_name'):
if kw.get('dataset_name') == "Temp":
temp_group = self.name.split("_")[2]
- self.dataset = create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = temp_group)
+ self.dataset = create_dataset(
+ dataset_name="Temp",
+ dataset_type="Temp",
+ group_name=temp_group)
else:
self.dataset = create_dataset(kw.get('dataset_name'))
else:
@@ -77,7 +78,8 @@ class GeneralTrait(object):
self.cellid = kw.get('cellid')
self.identification = kw.get('identification', 'un-named trait')
self.haveinfo = kw.get('haveinfo', False)
- self.sequence = kw.get('sequence') # Blat sequence, available for ProbeSet
+ # Blat sequence, available for ProbeSet
+ self.sequence = kw.get('sequence')
self.data = kw.get('data', {})
self.view = True
@@ -103,9 +105,10 @@ class GeneralTrait(object):
elif len(name2) == 3:
self.dataset, self.name, self.cellid = name2
- # Todo: These two lines are necessary most of the time, but perhaps not all of the time
- # So we could add a simple if statement to short-circuit this if necessary
- if get_sample_info != False:
+ # Todo: These two lines are necessary most of the time, but
+ # perhaps not all of the time So we could add a simple if
+ # statement to short-circuit this if necessary
+ if get_sample_info is not False:
self = retrieve_sample_data(self, self.dataset)
def export_informative(self, include_variance=0):
@@ -118,14 +121,14 @@ class GeneralTrait(object):
vals = []
the_vars = []
sample_aliases = []
- for sample_name, sample_data in self.data.items():
- if sample_data.value != None:
- if not include_variance or sample_data.variance != None:
+ for sample_name, sample_data in list(self.data.items()):
+ if sample_data.value is not None:
+ if not include_variance or sample_data.variance is not None:
samples.append(sample_name)
vals.append(sample_data.value)
the_vars.append(sample_data.variance)
sample_aliases.append(sample_data.name2)
- return samples, vals, the_vars, sample_aliases
+ return samples, vals, the_vars, sample_aliases
@property
def description_fmt(self):
@@ -153,8 +156,8 @@ class GeneralTrait(object):
alias = 'Not available'
if getattr(self, "alias", None):
- alias = string.replace(self.alias, ";", " ")
- alias = string.join(string.split(alias), ", ")
+ alias = self.alias.replace(";", " ")
+ alias = ", ".join(alias.split())
return alias
@@ -164,12 +167,17 @@ class GeneralTrait(object):
alias = 'Not available'
if self.symbol:
- human_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.upper())
- mouse_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.capitalize())
- other_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.lower())
+ human_response = requests.get(
+ GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.upper())
+ mouse_response = requests.get(
+ GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.capitalize())
+ other_response = requests.get(
+ GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.lower())
if human_response and mouse_response and other_response:
- alias_list = json.loads(human_response.content) + json.loads(mouse_response.content) + json.loads(other_response.content)
+ alias_list = json.loads(human_response.content) + json.loads(
+ mouse_response.content) + \
+ json.loads(other_response.content)
filtered_aliases = []
seen = set()
@@ -183,33 +191,34 @@ class GeneralTrait(object):
return alias
-
@property
def location_fmt(self):
'''Return a text formatted location
- While we're at it we set self.location in case we need it later (do we?)
+ While we're at it we set self.location in case we need it
+ later (do we?)
'''
if self.chr and self.mb:
- self.location = 'Chr %s @ %s Mb' % (self.chr,self.mb)
+ self.location = 'Chr %s @ %s Mb' % (self.chr, self.mb)
elif self.chr:
self.location = 'Chr %s @ Unknown position' % (self.chr)
else:
self.location = 'Not available'
fmt = self.location
- ##XZ: deal with direction
+ # XZ: deal with direction
if self.strand_probe == '+':
fmt += (' on the plus strand ')
elif self.strand_probe == '-':
fmt += (' on the minus strand ')
return fmt
-
+
+
def retrieve_sample_data(trait, dataset, samplelist=None):
- if samplelist == None:
+ if samplelist is None:
samplelist = []
if dataset.type == "Temp":
@@ -225,16 +234,19 @@ def retrieve_sample_data(trait, dataset, samplelist=None):
all_samples_ordered = dataset.group.all_samples_ordered()
for i, item in enumerate(results):
try:
- trait.data[all_samples_ordered[i]] = webqtlCaseData(all_samples_ordered[i], float(item))
+ trait.data[all_samples_ordered[i]] = webqtlCaseData(
+ all_samples_ordered[i], float(item))
except:
pass
else:
for item in results:
name, value, variance, num_cases, name2 = item
if not samplelist or (samplelist and name in samplelist):
- trait.data[name] = webqtlCaseData(*item) #name, value, variance, num_cases)
+ # name, value, variance, num_cases)
+ trait.data[name] = webqtlCaseData(*item)
return trait
+
@app.route("/trait/get_sample_data")
def get_sample_data():
params = request.args
@@ -250,7 +262,8 @@ def get_sample_data():
trait_dict['group'] = trait_ob.dataset.group.name
trait_dict['tissue'] = trait_ob.dataset.tissue
trait_dict['species'] = trait_ob.dataset.group.species
- trait_dict['url'] = url_for('show_trait_page', trait_id = trait, dataset = dataset)
+ trait_dict['url'] = url_for(
+ 'show_trait_page', trait_id=trait, dataset=dataset)
trait_dict['description'] = trait_ob.description_display
if trait_ob.dataset.type == "ProbeSet":
trait_dict['symbol'] = trait_ob.symbol
@@ -260,22 +273,27 @@ def get_sample_data():
trait_dict['pubmed_link'] = trait_ob.pubmed_link
trait_dict['pubmed_text'] = trait_ob.pubmed_text
- return json.dumps([trait_dict, {key: value.value for key, value in trait_ob.data.iteritems() }])
+ return json.dumps([trait_dict, {key: value.value for
+ key, value in list(
+ trait_ob.data.items())}])
else:
return None
-
+
+
def jsonable(trait):
"""Return a dict suitable for using as json
Actual turning into json doesn't happen here though"""
- dataset = create_dataset(dataset_name = trait.dataset.name, dataset_type = trait.dataset.type, group_name = trait.dataset.group.name)
-
+ dataset = create_dataset(dataset_name=trait.dataset.name,
+ dataset_type=trait.dataset.type,
+ group_name=trait.dataset.group.name)
+
if dataset.type == "ProbeSet":
return dict(name=trait.name,
symbol=trait.symbol,
dataset=dataset.name,
- dataset_name = dataset.shortname,
+ dataset_name=dataset.shortname,
description=trait.description_display,
mean=trait.mean,
location=trait.location_repr,
@@ -287,7 +305,7 @@ def jsonable(trait):
if trait.pubmed_id:
return dict(name=trait.name,
dataset=dataset.name,
- dataset_name = dataset.shortname,
+ dataset_name=dataset.shortname,
description=trait.description_display,
abbreviation=trait.abbreviation,
authors=trait.authors,
@@ -300,7 +318,7 @@ def jsonable(trait):
else:
return dict(name=trait.name,
dataset=dataset.name,
- dataset_name = dataset.shortname,
+ dataset_name=dataset.shortname,
description=trait.description_display,
abbreviation=trait.abbreviation,
authors=trait.authors,
@@ -312,19 +330,20 @@ def jsonable(trait):
elif dataset.type == "Geno":
return dict(name=trait.name,
dataset=dataset.name,
- dataset_name = dataset.shortname,
+ dataset_name=dataset.shortname,
location=trait.location_repr
)
else:
return dict()
+
def jsonable_table_row(trait, dataset_name, index):
"""Return a list suitable for json and intended to be displayed in a table
Actual turning into json doesn't happen here though"""
dataset = create_dataset(dataset_name)
-
+
if dataset.type == "ProbeSet":
if trait.mean == "":
mean = "N/A"
@@ -336,11 +355,13 @@ def jsonable_table_row(trait, dataset_name, index):
additive = "%.3f" % round(float(trait.additive), 2)
return ['<input type="checkbox" name="searchResult" class="checkbox trait_checkbox" value="' + hmac.data_hmac('{}:{}'.format(str(trait.name), dataset.name)) + '">',
index,
- '<a href="/show_trait?trait_id='+str(trait.name)+'&dataset='+dataset.name+'">'+str(trait.name)+'</a>',
+ '<a href="/show_trait?trait_id=' +
+ str(trait.name)+'&dataset='+dataset.name +
+ '">'+str(trait.name)+'</a>',
trait.symbol,
trait.description_display,
trait.location_repr,
- mean,
+ mean,
trait.LRS_score_repr,
trait.LRS_location_repr,
additive]
@@ -352,7 +373,9 @@ def jsonable_table_row(trait, dataset_name, index):
if trait.pubmed_id:
return ['<input type="checkbox" name="searchResult" class="checkbox trait_checkbox" value="' + hmac.data_hmac('{}:{}'.format(str(trait.name), dataset.name)) + '">',
index,
- '<a href="/show_trait?trait_id='+str(trait.name)+'&dataset='+dataset.name+'">'+str(trait.name)+'</a>',
+ '<a href="/show_trait?trait_id=' +
+ str(trait.name)+'&dataset='+dataset.name +
+ '">'+str(trait.name)+'</a>',
trait.description_display,
trait.authors,
'<a href="' + trait.pubmed_link + '">' + trait.pubmed_text + '</href>',
@@ -362,7 +385,9 @@ def jsonable_table_row(trait, dataset_name, index):
else:
return ['<input type="checkbox" name="searchResult" class="checkbox trait_checkbox" value="' + hmac.data_hmac('{}:{}'.format(str(trait.name), dataset.name)) + '">',
index,
- '<a href="/show_trait?trait_id='+str(trait.name)+'&dataset='+dataset.name+'">'+str(trait.name)+'</a>',
+ '<a href="/show_trait?trait_id=' +
+ str(trait.name)+'&dataset='+dataset.name +
+ '">'+str(trait.name)+'</a>',
trait.description_display,
trait.authors,
trait.pubmed_text,
@@ -372,7 +397,9 @@ def jsonable_table_row(trait, dataset_name, index):
elif dataset.type == "Geno":
return ['<input type="checkbox" name="searchResult" class="checkbox trait_checkbox" value="' + hmac.data_hmac('{}:{}'.format(str(trait.name), dataset.name)) + '">',
index,
- '<a href="/show_trait?trait_id='+str(trait.name)+'&dataset='+dataset.name+'">'+str(trait.name)+'</a>',
+ '<a href="/show_trait?trait_id=' +
+ str(trait.name)+'&dataset='+dataset.name +
+ '">'+str(trait.name)+'</a>',
trait.location_repr]
else:
return dict()
@@ -383,14 +410,16 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
resource_id = get_resource_id(dataset, trait.name)
if dataset.type == 'Publish':
- the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id)
+ the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(
+ resource_id, g.user_session.user_id)
else:
- the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name)
+ the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(
+ resource_id, g.user_session.user_id, trait.name)
try:
response = requests.get(the_url).content
trait_info = json.loads(response)
- except: #ZS: I'm assuming the trait is viewable if the try fails for some reason; it should never reach this point unless the user has privileges, since that's dealt with in create_trait
+ except: # ZS: I'm assuming the trait is viewable if the try fails for some reason; it should never reach this point unless the user has privileges, since that's dealt with in create_trait
if dataset.type == 'Publish':
query = """
SELECT
@@ -419,8 +448,8 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
logger.sql(query)
trait_info = g.db.execute(query).fetchone()
- #XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name
- #XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms.
+ # XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name
+ # XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms.
elif dataset.type == 'ProbeSet':
display_fields_string = ', ProbeSet.'.join(dataset.display_fields)
display_fields_string = 'ProbeSet.' + display_fields_string
@@ -433,14 +462,14 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
ProbeSetFreeze.Name = '%s' AND
ProbeSet.Name = '%s'
""" % (escape(display_fields_string),
- escape(dataset.name),
- escape(str(trait.name)))
+ escape(dataset.name),
+ escape(str(trait.name)))
logger.sql(query)
trait_info = g.db.execute(query).fetchone()
- #XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name
+ # XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name
# to avoid the problem of same marker name from different species.
elif dataset.type == 'Geno':
- display_fields_string = string.join(dataset.display_fields,',Geno.')
+ display_fields_string = ',Geno.'.join(dataset.display_fields)
display_fields_string = 'Geno.' + display_fields_string
query = """
SELECT %s
@@ -451,21 +480,21 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
GenoFreeze.Name = '%s' AND
Geno.Name = '%s'
""" % (escape(display_fields_string),
- escape(dataset.name),
- escape(trait.name))
+ escape(dataset.name),
+ escape(trait.name))
logger.sql(query)
trait_info = g.db.execute(query).fetchone()
- else: #Temp type
+ else: # Temp type
query = """SELECT %s FROM %s WHERE Name = %s"""
logger.sql(query)
trait_info = g.db.execute(query,
- (string.join(dataset.display_fields,','),
- dataset.type, trait.name)).fetchone()
+ ','.join(dataset.display_fields),
+ dataset.type, trait.name).fetchone()
if trait_info:
trait.haveinfo = True
for i, field in enumerate(dataset.display_fields):
- holder = trait_info[i]
+ holder = trait_info[i]
setattr(trait, field, holder)
if dataset.type == 'Publish':
@@ -478,9 +507,9 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
description = trait.post_publication_description
- #If the dataset is confidential and the user has access to confidential
- #phenotype traits, then display the pre-publication description instead
- #of the post-publication description
+ # If the dataset is confidential and the user has access to confidential
+ # phenotype traits, then display the pre-publication description instead
+ # of the post-publication description
if trait.confidential:
trait.abbreviation = trait.pre_publication_abbreviation
trait.description_display = trait.pre_publication_description
@@ -491,10 +520,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
else:
trait.description_display = ""
- trait.abbreviation = unicode(str(trait.abbreviation).strip(codecs.BOM_UTF8), 'utf-8', errors="replace")
- trait.description_display = unicode(str(trait.description_display).strip(codecs.BOM_UTF8), 'utf-8', errors="replace")
- trait.authors = unicode(str(trait.authors).strip(codecs.BOM_UTF8), 'utf-8', errors="replace")
-
if not trait.year.isdigit():
trait.pubmed_text = "N/A"
else:
@@ -504,8 +529,8 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
trait.pubmed_link = webqtlConfig.PUBMEDLINK_URL % trait.pubmed_id
if dataset.type == 'ProbeSet' and dataset.group:
- description_string = unicode(str(trait.description).strip(codecs.BOM_UTF8), 'utf-8')
- target_string = unicode(str(trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
+ description_string = trait.description
+ target_string = trait.probe_target_description
if str(description_string or "") != "" and description_string != 'None':
description_display = description_string
@@ -522,15 +547,17 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
trait.location_repr = 'N/A'
if trait.chr and trait.mb:
- trait.location_repr = 'Chr%s: %.6f' % (trait.chr, float(trait.mb))
+ trait.location_repr = 'Chr%s: %.6f' % (
+ trait.chr, float(trait.mb))
elif dataset.type == "Geno":
trait.location_repr = 'N/A'
if trait.chr and trait.mb:
- trait.location_repr = 'Chr%s: %.6f' % (trait.chr, float(trait.mb))
+ trait.location_repr = 'Chr%s: %.6f' % (
+ trait.chr, float(trait.mb))
if get_qtl_info:
- #LRS and its location
+ # LRS and its location
trait.LRS_score_repr = "N/A"
trait.LRS_location_repr = "N/A"
trait.locus = trait.locus_chr = trait.locus_mb = trait.lrs = trait.pvalue = trait.additive = ""
@@ -605,6 +632,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
if str(trait.lrs or "") != "":
trait.LRS_score_repr = LRS_score_repr = '%3.1f' % trait.lrs
else:
- raise KeyError, `trait.name`+' information is not found in the database.'
-
+ raise KeyError(repr(trait.name) +
+ ' information is not found in the database.')
return trait
diff --git a/wqflask/db/call.py b/wqflask/db/call.py
index 1a1b3adc..0971d2a2 100644
--- a/wqflask/db/call.py
+++ b/wqflask/db/call.py
@@ -3,7 +3,10 @@
from flask import g
import string
-import urllib2
+try: # Python2 support
+ import urllib.request, urllib.error, urllib.parse
+except:
+ import urllib2
import json
from utility.tools import USE_GN_SERVER, LOG_SQL, GN_SERVER_URL
from utility.benchmark import Bench
@@ -26,8 +29,8 @@ GN_SERVER result when set (which should return a Tuple)
else:
res2 = result,
if LOG_SQL:
- logger.debug("Replaced SQL call",query)
- logger.debug(path,res2)
+ logger.debug("Replaced SQL call", query)
+ logger.debug(path, res2)
return res2
else:
return fetchone(query)
@@ -37,7 +40,7 @@ def fetchone(query):
original fetchone, but with logging)
"""
- with Bench("SQL",LOG_SQL):
+ with Bench("SQL", LOG_SQL):
def helper(query):
res = g.db.execute(query)
return res.fetchone()
@@ -48,7 +51,7 @@ def fetchall(query):
original fetchall, but with logging)
"""
- with Bench("SQL",LOG_SQL):
+ with Bench("SQL", LOG_SQL):
def helper(query):
res = g.db.execute(query)
return res.fetchall()
@@ -58,8 +61,12 @@ def gn_server(path):
"""Return JSON record by calling GN_SERVER
"""
- with Bench("GN_SERVER",LOG_SQL):
- res = urllib2.urlopen(GN_SERVER_URL+path)
+ with Bench("GN_SERVER", LOG_SQL):
+ res = ()
+ try:
+ res = urllib.request.urlopen(GN_SERVER_URL+path)
+ except:
+ res = urllib2.urlopen(GN_SERVER_URL+path)
rest = res.read()
res2 = json.loads(rest)
logger.debug(res2)
diff --git a/wqflask/db/webqtlDatabaseFunction.py b/wqflask/db/webqtlDatabaseFunction.py
index 8a9dc79d..2805febd 100644
--- a/wqflask/db/webqtlDatabaseFunction.py
+++ b/wqflask/db/webqtlDatabaseFunction.py
@@ -35,13 +35,13 @@ def retrieve_species(group):
"""Get the species of a group (e.g. returns string "mouse" on "BXD"
"""
- result = fetch1("select Species.Name from Species, InbredSet where InbredSet.Name = '%s' and InbredSet.SpeciesId = Species.Id" % (group),"/cross/"+group+".json",lambda r: (r["species"],))[0]
- logger.debug("retrieve_species result:",result)
+ result = fetch1("select Species.Name from Species, InbredSet where InbredSet.Name = '%s' and InbredSet.SpeciesId = Species.Id" % (group), "/cross/"+group+".json", lambda r: (r["species"],))[0]
+ logger.debug("retrieve_species result:", result)
return result
def retrieve_species_id(group):
- result = fetch1("select SpeciesId from InbredSet where Name = '%s'" % (group),"/cross/"+group+".json",lambda r: (r["species_id"],))[0]
- logger.debug("retrieve_species_id result:",result)
+ result = fetch1("select SpeciesId from InbredSet where Name = '%s'" % (group), "/cross/"+group+".json", lambda r: (r["species_id"],))[0]
+ logger.debug("retrieve_species_id result:", result)
return result
diff --git a/wqflask/maintenance/convert_dryad_to_bimbam.py b/wqflask/maintenance/convert_dryad_to_bimbam.py
index e833b395..12ce35e9 100644
--- a/wqflask/maintenance/convert_dryad_to_bimbam.py
+++ b/wqflask/maintenance/convert_dryad_to_bimbam.py
@@ -6,7 +6,6 @@ Convert data dryad files to a BIMBAM _geno and _snps file
"""
-from __future__ import print_function, division, absolute_import
import sys
sys.path.append("..")
@@ -67,4 +66,4 @@ def convert_dryad_to_bimbam(filename):
if __name__=="__main__":
input_filename = "/home/zas1024/cfw_data/" + sys.argv[1] + ".txt"
- convert_dryad_to_bimbam(input_filename) \ No newline at end of file
+ convert_dryad_to_bimbam(input_filename)
diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py
index 528b98cf..d49742f2 100644
--- a/wqflask/maintenance/convert_geno_to_bimbam.py
+++ b/wqflask/maintenance/convert_geno_to_bimbam.py
@@ -9,7 +9,6 @@ code
"""
-from __future__ import print_function, division, absolute_import
import sys
sys.path.append("..")
import os
@@ -187,4 +186,4 @@ if __name__=="__main__":
#convertob = ConvertGenoFile("/home/zas1024/gene/genotype_files/genotypes/SRxSHRSPF2.geno", "/home/zas1024/gene/genotype_files/new_genotypes/SRxSHRSPF2.json")
#convertob.convert()
ConvertGenoFile.process_all(Old_Geno_Directory, New_Geno_Directory)
- #ConvertGenoFiles(Geno_Directory) \ No newline at end of file
+ #ConvertGenoFiles(Geno_Directory)
diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py
index 647e58a2..544e2fd1 100644
--- a/wqflask/maintenance/gen_select_dataset.py
+++ b/wqflask/maintenance/gen_select_dataset.py
@@ -30,18 +30,10 @@ It needs to be run manually when database has been changed. Run it as
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-from __future__ import print_function, division
-
-#from flask import config
-#
-#cdict = {}
-#config = config.Config(cdict).from_envvar('WQFLASK_SETTINGS')
-#print("cdict is:", cdict)
-
import sys
# NEW: Note we prepend the current path - otherwise a guix instance of GN2 may be used instead
-sys.path.insert(0,'./')
+sys.path.insert(0, './')
# NEW: import app to avoid a circular dependency on utility.tools
from wqflask import app
@@ -50,7 +42,7 @@ from utility.tools import locate, locate_ignore_error, TEMPDIR, SQL_URI
import MySQLdb
import simplejson as json
-import urlparse
+import urllib.parse
#import sqlalchemy as sa
@@ -66,7 +58,7 @@ from pprint import pformat as pf
def parse_db_uri():
"""Converts a database URI to the db name, host name, user name, and password"""
- parsed_uri = urlparse.urlparse(SQL_URI)
+ parsed_uri = urllib.parse.urlparse(SQL_URI)
db_conn_info = dict(
db = parsed_uri.path[1:],
@@ -108,7 +100,7 @@ def get_types(groups):
"""Build types list"""
types = {}
#print("Groups: ", pf(groups))
- for species, group_dict in groups.iteritems():
+ for species, group_dict in list(groups.items()):
types[species] = {}
for group_name, _group_full_name in group_dict:
# make group an alias to shorten the code
@@ -195,9 +187,9 @@ def build_types(species, group):
def get_datasets(types):
"""Build datasets list"""
datasets = {}
- for species, group_dict in types.iteritems():
+ for species, group_dict in list(types.items()):
datasets[species] = {}
- for group, type_list in group_dict.iteritems():
+ for group, type_list in list(group_dict.items()):
datasets[species][group] = {}
for type_name in type_list:
these_datasets = build_datasets(species, group, type_name[0])
@@ -319,4 +311,4 @@ def _test_it():
if __name__ == '__main__':
Conn = MySQLdb.Connect(**parse_db_uri())
Cursor = Conn.cursor()
- main() \ No newline at end of file
+ main()
diff --git a/wqflask/maintenance/generate_kinship_from_bimbam.py b/wqflask/maintenance/generate_kinship_from_bimbam.py
index b53f5dda..60257b28 100644
--- a/wqflask/maintenance/generate_kinship_from_bimbam.py
+++ b/wqflask/maintenance/generate_kinship_from_bimbam.py
@@ -8,7 +8,6 @@ and uses GEMMA to generate their corresponding kinship/relatedness matrix file
"""
-from __future__ import print_function, division, absolute_import
import sys
sys.path.append("..")
import os
@@ -58,4 +57,4 @@ if __name__=="__main__":
Bimbam_Directory = """/export/local/home/zas1024/genotype_files/genotype/bimbam/"""
GenerateKinshipMatrices.process_all(Geno_Directory, Bimbam_Directory)
- #./gemma -g /home/zas1024/genotype_files/genotype/bimbam/BXD_geno.txt -p /home/zas1024/genotype_files/genotype/bimbam/BXD_pheno.txt -gk 1 -o BXD \ No newline at end of file
+ #./gemma -g /home/zas1024/genotype_files/genotype/bimbam/BXD_geno.txt -p /home/zas1024/genotype_files/genotype/bimbam/BXD_pheno.txt -gk 1 -o BXD
diff --git a/wqflask/maintenance/generate_probesetfreeze_file.py b/wqflask/maintenance/generate_probesetfreeze_file.py
index b7b2dc8e..b1e41e9a 100644
--- a/wqflask/maintenance/generate_probesetfreeze_file.py
+++ b/wqflask/maintenance/generate_probesetfreeze_file.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
-from __future__ import absolute_import, print_function, division
-
import sys
# sys.path.insert(0, "..") - why?
@@ -82,7 +80,7 @@ def get_probeset_vals(cursor, dataset_name):
def trim_strains(strains, probeset_vals):
trimmed_strains = []
#print("probeset_vals is:", pf(probeset_vals))
- first_probeset = list(probeset_vals.itervalues())[0]
+ first_probeset = list(probeset_vals.values())[0]
print("\n**** first_probeset is:", pf(first_probeset))
for strain in strains:
print("\n**** strain is:", pf(strain))
diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py
index 9579812a..7e7fd241 100644
--- a/wqflask/maintenance/geno_to_json.py
+++ b/wqflask/maintenance/geno_to_json.py
@@ -9,7 +9,6 @@ code
"""
-from __future__ import print_function, division, absolute_import
import sys
sys.path.append("..")
import os
@@ -194,4 +193,4 @@ if __name__=="__main__":
ConvertGenoFile.process_all(Old_Geno_Directory, New_Geno_Directory)
#ConvertGenoFiles(Geno_Directory)
- #process_csv(Input_File, Output_File) \ No newline at end of file
+ #process_csv(Input_File, Output_File)
diff --git a/wqflask/maintenance/get_group_samplelists.py b/wqflask/maintenance/get_group_samplelists.py
index fb22898a..3f9d0278 100644
--- a/wqflask/maintenance/get_group_samplelists.py
+++ b/wqflask/maintenance/get_group_samplelists.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import os
import glob
import gzip
diff --git a/wqflask/maintenance/print_benchmark.py b/wqflask/maintenance/print_benchmark.py
index ae327cf3..b24ce4f1 100644
--- a/wqflask/maintenance/print_benchmark.py
+++ b/wqflask/maintenance/print_benchmark.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
-from __future__ import absolute_import, print_function, division
-
import time
from pprint import pformat as pf
@@ -40,4 +38,4 @@ def new_main():
print(pf(TheCounter.Counters))
if __name__ == '__main__':
- new_main() \ No newline at end of file
+ new_main()
diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py
index 41a3aad8..701b2b50 100644
--- a/wqflask/maintenance/quantile_normalize.py
+++ b/wqflask/maintenance/quantile_normalize.py
@@ -1,12 +1,7 @@
-from __future__ import absolute_import, print_function, division
-
import sys
-sys.path.insert(0,'./')
-
-from itertools import izip
-
+sys.path.insert(0, './')
import MySQLdb
-import urlparse
+import urllib.parse
import numpy as np
import pandas as pd
@@ -22,7 +17,7 @@ from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT, SQL_URI
def parse_db_uri():
"""Converts a database URI to the db name, host name, user name, and password"""
- parsed_uri = urlparse.urlparse(SQL_URI)
+ parsed_uri = urllib.parse.urlparse(SQL_URI)
db_conn_info = dict(
db = parsed_uri.path[1:],
@@ -37,7 +32,7 @@ def create_dataframe(input_file):
with open(input_file) as f:
ncols = len(f.readline().split("\t"))
- input_array = np.loadtxt(open(input_file, "rb"), delimiter="\t", skiprows=1, usecols=range(1, ncols))
+ input_array = np.loadtxt(open(input_file, "rb"), delimiter="\t", skiprows=1, usecols=list(range(1, ncols)))
return pd.DataFrame(input_array)
#This function taken from https://github.com/ShawnLYU/Quantile_Normalize
@@ -60,7 +55,7 @@ def set_data(dataset_name):
sample_list = []
with open(orig_file, 'r') as orig_fh, open('/home/zas1024/cfw_data/quant_norm.csv', 'r') as quant_fh:
- for i, (line1, line2) in enumerate(izip(orig_fh, quant_fh)):
+ for i, (line1, line2) in enumerate(zip(orig_fh, quant_fh)):
trait_dict = {}
sample_list = []
if i == 0:
@@ -126,4 +121,4 @@ if __name__ == '__main__':
}
)
- print(response) \ No newline at end of file
+ print(response)
diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py
index 54fd8e7e..4177c124 100644
--- a/wqflask/maintenance/set_resource_defaults.py
+++ b/wqflask/maintenance/set_resource_defaults.py
@@ -16,13 +16,11 @@ To run:
"""
-from __future__ import print_function, division
-
import sys
import json
# NEW: Note we prepend the current path - otherwise a guix instance of GN2 may be used instead
-sys.path.insert(0,'./')
+sys.path.insert(0, './')
# NEW: import app to avoid a circular dependency on utility.tools
from wqflask import app
@@ -34,7 +32,7 @@ Redis = get_redis_conn()
import MySQLdb
-import urlparse
+import urllib.parse
from utility.logger import getLogger
logger = getLogger(__name__)
@@ -42,7 +40,7 @@ logger = getLogger(__name__)
def parse_db_uri():
"""Converts a database URI to the db name, host name, user name, and password"""
- parsed_uri = urlparse.urlparse(SQL_URI)
+ parsed_uri = urllib.parse.urlparse(SQL_URI)
db_conn_info = dict(
db = parsed_uri.path[1:],
@@ -163,4 +161,4 @@ def main():
if __name__ == '__main__':
Conn = MySQLdb.Connect(**parse_db_uri())
Cursor = Conn.cursor()
- main() \ No newline at end of file
+ main()
diff --git a/wqflask/run_gunicorn.py b/wqflask/run_gunicorn.py
index adffdca3..58108e03 100644
--- a/wqflask/run_gunicorn.py
+++ b/wqflask/run_gunicorn.py
@@ -7,7 +7,7 @@
# from flask import Flask
# application = Flask(__name__)
-print "===> Starting up Gunicorn process"
+print("===> Starting up Gunicorn process")
from wqflask import app
from utility.startup_config import app_config
diff --git a/wqflask/runserver.py b/wqflask/runserver.py
index 15572d97..df957bd9 100644
--- a/wqflask/runserver.py
+++ b/wqflask/runserver.py
@@ -8,28 +8,20 @@
# /sbin/iptables -A INPUT -p tcp -i eth0 -s ! 71.236.239.43 --dport 5003 -j DROP
from wqflask import app
-
+from utility.startup_config import app_config
+from utility.tools import WEBSERVER_MODE, SERVER_PORT
import logging
-import utility.logger
-logger = utility.logger.getLogger(__name__ )
-
-import signal
-signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-BLUE = '\033[94m'
+BLUE = '\033[94m'
GREEN = '\033[92m'
-BOLD = '\033[1m'
-ENDC = '\033[0m'
-
-from utility.startup_config import app_config
+BOLD = '\033[1m'
+ENDC = '\033[0m'
app_config()
werkzeug_logger = logging.getLogger('werkzeug')
-from utility.tools import WEBSERVER_MODE, SERVER_PORT
-
if WEBSERVER_MODE == 'DEBUG':
app.run(host='0.0.0.0',
port=SERVER_PORT,
@@ -47,7 +39,7 @@ elif WEBSERVER_MODE == 'DEV':
threaded=False,
processes=0,
use_reloader=True)
-else: # staging/production modes
+else: # staging/production modes
app.run(host='0.0.0.0',
port=SERVER_PORT,
debug=False,
diff --git a/wqflask/tests/base/test_data_set.py b/wqflask/tests/base/test_data_set.py
index dd7f5051..96563a16 100644
--- a/wqflask/tests/base/test_data_set.py
+++ b/wqflask/tests/base/test_data_set.py
@@ -1,10 +1,10 @@
"""Tests for wqflask/base/data_set.py"""
import unittest
-import mock
+from unittest import mock
from wqflask import app
-from data import gen_menu_json
+from .data import gen_menu_json
from base.data_set import DatasetType
@@ -59,9 +59,14 @@ class TestDataSetTypes(unittest.TestCase):
self.assertEqual(data_set("BXDGeno"), "Geno")
self.assertEqual(data_set("BXDPublish"), "Publish")
self.assertEqual(data_set("HLC_0311"), "ProbeSet")
+
redis_mock.set.assert_called_once_with(
"dataset_structure",
- '{"BXDGeno": "Geno", "BXDPublish": "Publish", "HLCPublish": "Publish", "HLC_0311": "ProbeSet", "HC_M2_0606_P": "ProbeSet"}')
+ ('{"HLC_0311": "ProbeSet", '
+ '"HLCPublish": "Publish", '
+ '"BXDGeno": "Geno", '
+ '"HC_M2_0606_P": "ProbeSet", '
+ '"BXDPublish": "Publish"}'))
@mock.patch('base.data_set.g')
def test_set_dataset_key_mrna(self, db_mock):
@@ -74,8 +79,17 @@ class TestDataSetTypes(unittest.TestCase):
self.assertEqual(data_set("Test"), "ProbeSet")
redis_mock.set.assert_called_once_with(
"dataset_structure",
- '{"Aging-Brain-UCIPublish": "Publish", "AKXDGeno": "Geno", "B139_K_1206_M": "ProbeSet", "AD-cases-controls-MyersGeno": "Geno", "AD-cases-controls-MyersPublish": "Publish", "All Phenotypes": "Publish", "Test": "ProbeSet", "AXBXAPublish": "Publish", "B139_K_1206_R": "ProbeSet", "AXBXAGeno": "Geno"}')
- expected_db_call = """"""
+ ('{"AD-cases-controls-MyersGeno": "Geno", '
+ '"AD-cases-controls-MyersPublish": "Publish", '
+ '"AKXDGeno": "Geno", '
+ '"AXBXAGeno": "Geno", '
+ '"AXBXAPublish": "Publish", '
+ '"Aging-Brain-UCIPublish": "Publish", '
+ '"All Phenotypes": "Publish", '
+ '"B139_K_1206_M": "ProbeSet", '
+ '"B139_K_1206_R": "ProbeSet", '
+ '"Test": "ProbeSet"}'))
+
db_mock.db.execute.assert_called_with(
("SELECT ProbeSetFreeze.Id FROM ProbeSetFreeze " +
"WHERE ProbeSetFreeze.Name = \"Test\" ")
@@ -92,12 +106,21 @@ class TestDataSetTypes(unittest.TestCase):
self.assertEqual(data_set("Test"), "Publish")
redis_mock.set.assert_called_once_with(
"dataset_structure",
- '{"Aging-Brain-UCIPublish": "Publish", "AKXDGeno": "Geno", "B139_K_1206_M": "ProbeSet", "AD-cases-controls-MyersGeno": "Geno", "AD-cases-controls-MyersPublish": "Publish", "All Phenotypes": "Publish", "Test": "Publish", "AXBXAPublish": "Publish", "B139_K_1206_R": "ProbeSet", "AXBXAGeno": "Geno"}')
+ ('{"AD-cases-controls-MyersGeno": "Geno", '
+ '"AD-cases-controls-MyersPublish": "Publish", '
+ '"AKXDGeno": "Geno", '
+ '"AXBXAGeno": "Geno", '
+ '"AXBXAPublish": "Publish", '
+ '"Aging-Brain-UCIPublish": "Publish", '
+ '"All Phenotypes": "Publish", '
+ '"B139_K_1206_M": "ProbeSet", '
+ '"B139_K_1206_R": "ProbeSet", '
+ '"Test": "Publish"}'))
db_mock.db.execute.assert_called_with(
- ("SELECT InfoFiles.GN_AccesionId " +
- "FROM InfoFiles, PublishFreeze, InbredSet " +
+ ("SELECT InfoFiles.GN_AccesionId "
+ "FROM InfoFiles, PublishFreeze, InbredSet "
"WHERE InbredSet.Name = 'Test' AND "
- "PublishFreeze.InbredSetId = InbredSet.Id AND " +
+ "PublishFreeze.InbredSetId = InbredSet.Id AND "
"InfoFiles.InfoPageName = PublishFreeze.Name")
)
@@ -110,9 +133,20 @@ class TestDataSetTypes(unittest.TestCase):
data_set = DatasetType(redis_mock)
data_set.set_dataset_key("other_pheno", "Test")
self.assertEqual(data_set("Test"), "Publish")
+
redis_mock.set.assert_called_once_with(
"dataset_structure",
- '{"Aging-Brain-UCIPublish": "Publish", "AKXDGeno": "Geno", "B139_K_1206_M": "ProbeSet", "AD-cases-controls-MyersGeno": "Geno", "AD-cases-controls-MyersPublish": "Publish", "All Phenotypes": "Publish", "Test": "Publish", "AXBXAPublish": "Publish", "B139_K_1206_R": "ProbeSet", "AXBXAGeno": "Geno"}')
+ ('{"AD-cases-controls-MyersGeno": "Geno", '
+ '"AD-cases-controls-MyersPublish": "Publish", '
+ '"AKXDGeno": "Geno", '
+ '"AXBXAGeno": "Geno", '
+ '"AXBXAPublish": "Publish", '
+ '"Aging-Brain-UCIPublish": "Publish", '
+ '"All Phenotypes": "Publish", '
+ '"B139_K_1206_M": "ProbeSet", '
+ '"B139_K_1206_R": "ProbeSet", '
+ '"Test": "Publish"}'))
+
db_mock.db.execute.assert_called_with(
("SELECT PublishFreeze.Name " +
"FROM PublishFreeze, InbredSet " +
@@ -131,8 +165,17 @@ class TestDataSetTypes(unittest.TestCase):
self.assertEqual(data_set("Test"), "Geno")
redis_mock.set.assert_called_once_with(
"dataset_structure",
- '{"Aging-Brain-UCIPublish": "Publish", "AKXDGeno": "Geno", "B139_K_1206_M": "ProbeSet", "AD-cases-controls-MyersGeno": "Geno", "AD-cases-controls-MyersPublish": "Publish", "All Phenotypes": "Publish", "Test": "Geno", "AXBXAPublish": "Publish", "B139_K_1206_R": "ProbeSet", "AXBXAGeno": "Geno"}')
- expected_db_call = """"""
+ ('{"AD-cases-controls-MyersGeno": "Geno", '
+ '"AD-cases-controls-MyersPublish": "Publish", '
+ '"AKXDGeno": "Geno", '
+ '"AXBXAGeno": "Geno", '
+ '"AXBXAPublish": "Publish", '
+ '"Aging-Brain-UCIPublish": "Publish", '
+ '"All Phenotypes": "Publish", '
+ '"B139_K_1206_M": "ProbeSet", '
+ '"B139_K_1206_R": "ProbeSet", '
+ '"Test": "Geno"}'))
+
db_mock.db.execute.assert_called_with(
- ("SELECT GenoFreeze.Id FROM GenoFreeze WHERE GenoFreeze.Name = \"Test\" ")
- )
+ ("SELECT GenoFreeze.Id FROM "
+ "GenoFreeze WHERE GenoFreeze.Name = \"Test\" "))
diff --git a/wqflask/tests/base/test_general_object.py b/wqflask/tests/base/test_general_object.py
index c7701021..00fd3c72 100644
--- a/wqflask/tests/base/test_general_object.py
+++ b/wqflask/tests/base/test_general_object.py
@@ -17,9 +17,9 @@ class TestGeneralObjectTests(unittest.TestCase):
def test_object_dict(self):
"""Test whether the base class is printed properly"""
test_obj = GeneralObject("a", name="test", value=1)
- self.assertEqual(str(test_obj), "value = 1\nname = test\n")
+ self.assertEqual(str(test_obj), "name = test\nvalue = 1\n")
self.assertEqual(
- repr(test_obj), "value = 1\nname = test\ncontents = ['a']\n")
+ repr(test_obj), "contents = ['a']\nname = test\nvalue = 1\n")
self.assertEqual(len(test_obj), 2)
self.assertEqual(test_obj["value"], 1)
test_obj["test"] = 1
@@ -36,6 +36,5 @@ class TestGeneralObjectTests(unittest.TestCase):
test_obj1 = GeneralObject("a", name="test", value=1)
test_obj2 = GeneralObject("b", name="test2", value=2)
test_obj3 = GeneralObject("a", name="test", x=1, y=2)
- self.assertTrue(test_obj1 == test_obj2 )
- self.assertFalse(test_obj1 == test_obj3 )
-
+ self.assertTrue(test_obj1 == test_obj2)
+ self.assertFalse(test_obj1 == test_obj3)
diff --git a/wqflask/tests/base/test_trait.py b/wqflask/tests/base/test_trait.py
index 1a3820f2..60ebaee0 100644
--- a/wqflask/tests/base/test_trait.py
+++ b/wqflask/tests/base/test_trait.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""Tests wqflask/base/trait.py"""
import unittest
-import mock
+from unittest import mock
from base.trait import GeneralTrait
from base.trait import retrieve_trait_info
@@ -38,9 +38,8 @@ class TestRetrieveTraitInfo(unittest.TestCase):
dataset={})
@mock.patch('base.trait.requests.get')
- @mock.patch('base.trait.g')
+ @mock.patch('base.trait.g', mock.Mock())
def test_retrieve_trait_info_with_empty_trait_info(self,
- g_mock,
requests_mock):
"""Empty trait info"""
requests_mock.return_value = TestNilResponse()
@@ -49,9 +48,8 @@ class TestRetrieveTraitInfo(unittest.TestCase):
dataset=mock.MagicMock())
@mock.patch('base.trait.requests.get')
- @mock.patch('base.trait.g')
+ @mock.patch('base.trait.g', mock.Mock())
def test_retrieve_trait_info_with_non_empty_trait_info(self,
- g_mock,
requests_mock):
"""Test that attributes are set"""
mock_dataset = mock.MagicMock()
@@ -66,9 +64,8 @@ class TestRetrieveTraitInfo(unittest.TestCase):
self.assertEqual(test_trait.d, 4)
@mock.patch('base.trait.requests.get')
- @mock.patch('base.trait.g')
+ @mock.patch('base.trait.g', mock.Mock())
def test_retrieve_trait_info_utf8_parsing(self,
- g_mock,
requests_mock):
"""Test that utf-8 strings are parsed correctly"""
utf_8_string = "test_string"
@@ -96,9 +93,9 @@ class TestRetrieveTraitInfo(unittest.TestCase):
test_trait = retrieve_trait_info(trait=mock_trait,
dataset=mock_dataset)
self.assertEqual(test_trait.abbreviation,
- "ファイルを画面毎に見て行くには、次のコマンドを使います。".decode('utf-8'))
+ "ファイルを画面毎に見て行くには、次のコマンドを使います。")
self.assertEqual(test_trait.authors,
- "Jane Doe かいと".decode('utf-8'))
+ "Jane Doe かいと")
@mock.patch('base.trait.requests.get')
@mock.patch('base.trait.g')
diff --git a/wqflask/tests/utility/test_authentication_tools.py b/wqflask/tests/utility/test_authentication_tools.py
index 99c74245..5c391be5 100644
--- a/wqflask/tests/utility/test_authentication_tools.py
+++ b/wqflask/tests/utility/test_authentication_tools.py
@@ -1,6 +1,6 @@
"""Tests for authentication tools"""
import unittest
-import mock
+from unittest import mock
from utility.authentication_tools import check_resource_availability
from utility.authentication_tools import add_new_resource
@@ -38,17 +38,15 @@ class TestCheckResourceAvailability(unittest.TestCase):
"""Test methods related to checking the resource availability"""
@mock.patch('utility.authentication_tools.add_new_resource')
@mock.patch('utility.authentication_tools.Redis')
- @mock.patch('utility.authentication_tools.g')
+ @mock.patch('utility.authentication_tools.g', mock.Mock())
@mock.patch('utility.authentication_tools.get_resource_id')
def test_check_resource_availability_default_mask(
self,
resource_id_mock,
- g_mock,
redis_mock,
add_new_resource_mock):
"""Test the resource availability with default mask"""
resource_id_mock.return_value = 1
- g_mock.return_value = mock.Mock()
redis_mock.smembers.return_value = []
test_dataset = mock.MagicMock()
type(test_dataset).type = mock.PropertyMock(return_value="Test")
@@ -58,18 +56,16 @@ class TestCheckResourceAvailability(unittest.TestCase):
@mock.patch('utility.authentication_tools.requests.get')
@mock.patch('utility.authentication_tools.add_new_resource')
@mock.patch('utility.authentication_tools.Redis')
- @mock.patch('utility.authentication_tools.g')
+ @mock.patch('utility.authentication_tools.g', TestUserSession())
@mock.patch('utility.authentication_tools.get_resource_id')
def test_check_resource_availability_non_default_mask(
self,
resource_id_mock,
- g_mock,
redis_mock,
add_new_resource_mock,
requests_mock):
"""Test the resource availability with default mask"""
resource_id_mock.return_value = 1
- g_mock.return_value = mock.Mock()
redis_mock.smembers.return_value = []
add_new_resource_mock.return_value = {"default_mask": 2}
requests_mock.return_value = TestResponse()
diff --git a/wqflask/tests/utility/test_hmac.py b/wqflask/tests/utility/test_hmac.py
index 16b50771..7c61c0a6 100644
--- a/wqflask/tests/utility/test_hmac.py
+++ b/wqflask/tests/utility/test_hmac.py
@@ -2,7 +2,7 @@
"""Test hmac utility functions"""
import unittest
-import mock
+from unittest import mock
from utility.hmac import data_hmac
from utility.hmac import url_for_hmac
diff --git a/wqflask/tests/wqflask/api/test_gen_menu.py b/wqflask/tests/wqflask/api/test_gen_menu.py
index 239484aa..84898bd1 100644
--- a/wqflask/tests/wqflask/api/test_gen_menu.py
+++ b/wqflask/tests/wqflask/api/test_gen_menu.py
@@ -1,7 +1,8 @@
"""Test cases for wqflask.api.gen_menu"""
import unittest
-import mock
+from unittest import mock
+from wqflask import app
from wqflask.api.gen_menu import gen_dropdown_json
from wqflask.api.gen_menu import get_species
from wqflask.api.gen_menu import get_groups
@@ -17,6 +18,8 @@ class TestGenMenu(unittest.TestCase):
"""Tests for the gen_menu module"""
def setUp(self):
+ self.app_context = app.app_context()
+ self.app_context.push()
self.test_group = {
'mouse': [
['H_T1',
@@ -67,6 +70,9 @@ class TestGenMenu(unittest.TestCase):
}
}
+ def tearDown(self):
+ self.app_context.pop()
+
@mock.patch('wqflask.api.gen_menu.g')
def test_get_species(self, db_mock):
"""Test that assertion is raised when dataset and dataset_name
diff --git a/wqflask/tests/wqflask/marker_regression/test_display_mapping_results.py b/wqflask/tests/wqflask/marker_regression/test_display_mapping_results.py
index 6f791df1..8ae0f09f 100644
--- a/wqflask/tests/wqflask/marker_regression/test_display_mapping_results.py
+++ b/wqflask/tests/wqflask/marker_regression/test_display_mapping_results.py
@@ -1,6 +1,6 @@
import unittest
-from htmlgen import HTMLgen2 as HT
+import htmlgen as HT
from wqflask.marker_regression.display_mapping_results import (
DisplayMappingResults,
HtmlGenWrapper
@@ -26,9 +26,9 @@ class TestHtmlGenWrapper(unittest.TestCase):
width="10",
height="13",
usemap="#webqtlmap")),
- ("""<IMG src="test.png" height="13" width="10" """
- """alt="random" border="0" """
- """usemap="#webqtlmap">""")
+ ("""<img alt="random" border="0" height="13" """
+ """src="test.png" usemap="#webqtlmap" """
+ """width="10"/>""")
)
def test_create_form(self):
@@ -37,7 +37,7 @@ class TestHtmlGenWrapper(unittest.TestCase):
cgi="/testing/",
enctype='multipart/form-data',
name="formName",
- submit=HT.Input(type='hidden')
+ submit=HtmlGenWrapper.create_input_tag(type_='hidden', name='Default_Name')
)
test_image = HtmlGenWrapper.create_image_tag(
src="test.png",
@@ -49,10 +49,10 @@ class TestHtmlGenWrapper(unittest.TestCase):
)
self.assertEqual(
str(test_form).replace("\n", ""),
- ("""<FORM METHOD="POST" ACTION="/testing/" """
- """ENCTYPE="multipart/form-data" """
- """NAME="formName"><INPUT TYPE="hidden" """
- """NAME="Default_Name"></FORM>"""))
+ ("""<form action="/testing/" enctype="multipart/form-data" """
+ """method="POST" """
+ """name="formName"><input name="Default_Name" """
+ """type="hidden"/></form>"""))
hddn = {
'FormID': 'showDatabase',
'ProbeSetID': '_',
@@ -62,21 +62,26 @@ class TestHtmlGenWrapper(unittest.TestCase):
'incparentsf1': 'ON'
}
for key in hddn.keys():
- test_form.append(HT.Input(name=key, value=hddn[key],
- type='hidden'))
+ test_form.append(
+ HtmlGenWrapper.create_input_tag(
+ name=key,
+ value=hddn[key],
+ type_='hidden'))
test_form.append(test_image)
+
self.assertEqual(str(test_form).replace("\n", ""), (
- """<FORM METHOD="POST" ACTION="/testing/" """
- """ENCTYPE="multipart/form-data" NAME="formName">"""
- """<INPUT TYPE="hidden" NAME="database" VALUE="TestGeno">"""
- """<INPUT TYPE="hidden" NAME="incparentsf1" VALUE="ON">"""
- """<INPUT TYPE="hidden" NAME="FormID" VALUE="showDatabase">"""
- """<INPUT TYPE="hidden" NAME="ProbeSetID" VALUE="_">"""
- """<INPUT TYPE="hidden" NAME="RISet" VALUE="Test">"""
- """<INPUT TYPE="hidden" NAME="CellID" VALUE="_">"""
- """<IMG src="test.png" height="13" width="10" alt="random" """
- """border="0" usemap="#webqtlmap">"""
- """<INPUT TYPE="hidden" NAME="Default_Name"></FORM>"""))
+ """<form action="/testing/" enctype="multipart/form-data" """
+ """method="POST" name="formName">"""
+ """<input name="Default_Name" type="hidden"/>"""
+ """<input name="FormID" type="hidden" value="showDatabase"/>"""
+ """<input name="ProbeSetID" type="hidden" value="_"/>"""
+ """<input name="database" type="hidden" value="TestGeno"/>"""
+ """<input name="CellID" type="hidden" value="_"/>"""
+ """<input name="RISet" type="hidden" value="Test"/>"""
+ """<input name="incparentsf1" type="hidden" value="ON"/>"""
+ """<img alt="random" border="0" height="13" src="test.png" """
+ """usemap="#webqtlmap" width="10"/>"""
+ """</form>"""))
def test_create_paragraph(self):
"""Test HT.Paragraph method"""
@@ -89,48 +94,48 @@ class TestHtmlGenWrapper(unittest.TestCase):
)
self.assertEqual(
str(test_p_element),
- """<P id="smallSize"></P>"""
+ """<p id="smallSize"></p>"""
)
- test_p_element.append(HT.BR())
+ test_p_element.append(HtmlGenWrapper.create_br_tag())
test_p_element.append(par_text)
self.assertEqual(
str(test_p_element),
- """<P id="smallSize"><BR>{}</P>""".format(par_text)
+ """<p id="smallSize"><br/>{}</p>""".format(par_text)
)
def test_create_br_tag(self):
"""Test HT.BR() method"""
self.assertEqual(str(HtmlGenWrapper.create_br_tag()),
- "<BR>")
+ "<br/>")
def test_create_input_tag(self):
"""Test HT.Input method"""
self.assertEqual(
str(HtmlGenWrapper.create_input_tag(
- type="hidden",
+ type_="hidden",
name="name",
value="key",
Class="trait trait_")).replace("\n", ""),
- ("""<INPUT TYPE="hidden" NAME="name" """
- """class="trait trait_" VALUE="key">"""))
+ ("""<input class="trait trait_" name="name" """
+ """type="hidden" value="key"/>"""))
def test_create_map_tag(self):
"""Test HT.Map method"""
self.assertEqual(str(HtmlGenWrapper.create_map_tag(
name="WebqTLImageMap")).replace("\n", ""),
- """<MAP NAME="WebqTLImageMap"></MAP>""")
- gifmap = HtmlGenWrapper.create_map_tag(areas=[])
- gifmap.areas.append(HT.Area(shape="rect",
- coords='1 2 3', href='#area1'))
- gifmap.areas.append(HT.Area(shape="rect",
- coords='1 2 3', href='#area2'))
+ """<map name="WebqTLImageMap"></map>""")
+ gifmap = HtmlGenWrapper.create_map_tag(name="test")
+ gifmap.append(HtmlGenWrapper.create_area_tag(shape="rect",
+ coords='1 2 3', href='#area1'))
+ gifmap.append(HtmlGenWrapper.create_area_tag(shape="rect",
+ coords='1 2 3', href='#area2'))
self.assertEqual(
str(gifmap).replace("\n", ""),
- ("""<MAP NAME="">"""
- """<AREA coords="1 2 3" """
- """href="#area1" shape="rect">"""
- """<AREA coords="1 2 3" href="#area2" shape="rect">"""
- """</MAP>"""))
+ ("""<map name="test">"""
+ """<area coords="1 2 3" """
+ """href="#area1" shape="rect"/>"""
+ """<area coords="1 2 3" href="#area2" shape="rect"/>"""
+ """</map>"""))
def test_create_area_tag(self):
"""Test HT.Area method"""
@@ -140,12 +145,12 @@ class TestHtmlGenWrapper(unittest.TestCase):
coords="1 2",
href="http://test.com",
title="Some Title")).replace("\n", ""),
- ("""<AREA coords="1 2" href="http://test.com" """
- """shape="rect" title="Some Title">"""))
+ ("""<area coords="1 2" href="http://test.com" """
+ """shape="rect" title="Some Title"/>"""))
def test_create_link_tag(self):
"""Test HT.HREF method"""
self.assertEqual(
str(HtmlGenWrapper.create_link_tag(
"www.test.com", "test", target="_blank")).replace("\n", ""),
- """<A HREF="www.test.com" TARGET="_blank">test</A>""")
+ """<a href="www.test.com" target="_blank">test</a>""")
diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py
index b9b71129..61f408d2 100644
--- a/wqflask/utility/Plot.py
+++ b/wqflask/utility/Plot.py
@@ -24,15 +24,13 @@
#
# Last updated by GeneNetwork Core Team 2010/10/20
-from __future__ import print_function
-
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
from math import *
-import corestats
+import utility.corestats as corestats
from base import webqtlConfig
from utility.pillow_utils import draw_rotated_text
import utility.logger
@@ -54,7 +52,7 @@ def cformat(d, rank=0):
strD = "%2.6f" % d
if rank == 0:
- while strD[-1] in ('0','.'):
+ while strD[-1] in ('0', '.'):
if strD[-1] == '0' and strD[-2] == '.' and len(strD) <= 4:
break
elif strD[-1] == '.':
@@ -82,7 +80,7 @@ def frange(start, end=None, inc=1.0):
# Need to adjust the count. AFAICT, it always comes up one short.
count += 1
L = [start] * count
- for i in xrange(1, count):
+ for i in range(1, count):
L[i] = start + i * inc
return L
@@ -93,7 +91,7 @@ def find_outliers(vals):
>>> find_outliers([3.504, 5.234, 6.123, 7.234, 3.542, 5.341, 7.852, 4.555, 12.537])
(11.252500000000001, 0.5364999999999993)
- >>> >>> find_outliers([9,12,15,17,31,50,7,5,6,8])
+ >>> find_outliers([9,12,15,17,31,50,7,5,6,8])
(32.0, -8.0)
If there are no vals, returns None for the upper and lower bounds,
@@ -158,7 +156,7 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab
j = int((item-xLow)/step)
Count[j] += 1
- yLow, yTop, stepY=detScale(0,max(Count))
+ yLow, yTop, stepY=detScale(0, max(Count))
#draw data
xScale = plotWidth/(xTop-xLow)
@@ -170,7 +168,7 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab
xc = (dataXY[i]-xLow)*xScale+xLeftOffset
yc =-(count-yLow)*yScale+yTopOffset+plotHeight
im_drawer.rectangle(
- xy=((xc+2,yc),(xc+barWidth-2,yTopOffset+plotHeight)),
+ xy=((xc+2, yc), (xc+barWidth-2, yTopOffset+plotHeight)),
outline=barColor, fill=barColor)
#draw drawing region
@@ -179,81 +177,81 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab
)
#draw scale
- scaleFont=ImageFont.truetype(font=COUR_FILE,size=11)
+ scaleFont=ImageFont.truetype(font=COUR_FILE, size=11)
x=xLow
for i in range(int(stepX)+1):
xc=xLeftOffset+(x-xLow)*xScale
im_drawer.line(
- xy=((xc,yTopOffset+plotHeight),(xc,yTopOffset+plotHeight+5)),
+ xy=((xc, yTopOffset+plotHeight), (xc, yTopOffset+plotHeight+5)),
fill=axesColor)
strX = cformat(d=x, rank=0)
im_drawer.text(
text=strX,
- xy=(xc-im_drawer.textsize(strX,font=scaleFont)[0]/2,
- yTopOffset+plotHeight+14),font=scaleFont)
+ xy=(xc-im_drawer.textsize(strX, font=scaleFont)[0]/2,
+ yTopOffset+plotHeight+14), font=scaleFont)
x+= (xTop - xLow)/stepX
y=yLow
for i in range(int(stepY)+1):
yc=yTopOffset+plotHeight-(y-yLow)*yScale
- im_drawer.line(xy=((xLeftOffset,yc),(xLeftOffset-5,yc)), fill=axesColor)
+ im_drawer.line(xy=((xLeftOffset, yc), (xLeftOffset-5, yc)), fill=axesColor)
strY = "%d" %y
im_drawer.text(
text=strY,
- xy=(xLeftOffset-im_drawer.textsize(strY,font=scaleFont)[0]-6,yc+5),
+ xy=(xLeftOffset-im_drawer.textsize(strY, font=scaleFont)[0]-6, yc+5),
font=scaleFont)
y+= (yTop - yLow)/stepY
#draw label
- labelFont=ImageFont.truetype(font=TAHOMA_FILE,size=17)
+ labelFont=ImageFont.truetype(font=TAHOMA_FILE, size=17)
if XLabel:
im_drawer.text(
text=XLabel,
xy=(xLeftOffset+(
- plotWidth-im_drawer.textsize(XLabel,font=labelFont)[0])/2.0,
+ plotWidth-im_drawer.textsize(XLabel, font=labelFont)[0])/2.0,
yTopOffset+plotHeight+yBottomOffset-10),
- font=labelFont,fill=labelColor)
+ font=labelFont, fill=labelColor)
if YLabel:
draw_rotated_text(canvas, text=YLabel,
xy=(19,
yTopOffset+plotHeight-(
plotHeight-im_drawer.textsize(
- YLabel,font=labelFont)[0])/2.0),
+ YLabel, font=labelFont)[0])/2.0),
font=labelFont, fill=labelColor, angle=90)
- labelFont=ImageFont.truetype(font=VERDANA_FILE,size=16)
+ labelFont=ImageFont.truetype(font=VERDANA_FILE, size=16)
if title:
im_drawer.text(
text=title,
xy=(xLeftOffset+(plotWidth-im_drawer.textsize(
- title,font=labelFont)[0])/2.0,
+ title, font=labelFont)[0])/2.0,
20),
- font=labelFont,fill=labelColor)
+ font=labelFont, fill=labelColor)
# This function determines the scale of the plot
-def detScaleOld(min,max):
+def detScaleOld(min, max):
if min>=max:
return None
elif min == -1.0 and max == 1.0:
- return [-1.2,1.2,12]
+ return [-1.2, 1.2, 12]
else:
a=max-min
b=floor(log10(a))
- c=pow(10.0,b)
+ c=pow(10.0, b)
if a < c*5.0:
c/=2.0
#print a,b,c
low=c*floor(min/c)
high=c*ceil(max/c)
- return [low,high,round((high-low)/c)]
+ return [low, high, round((high-low)/c)]
def detScale(min=0,max=0):
if min>=max:
return None
elif min == -1.0 and max == 1.0:
- return [-1.2,1.2,12]
+ return [-1.2, 1.2, 12]
else:
a=max-min
if max != 0:
@@ -265,7 +263,7 @@ def detScale(min=0,max=0):
min -= 0.1*a
a=max-min
b=floor(log10(a))
- c=pow(10.0,b)
+ c=pow(10.0, b)
low=c*floor(min/c)
high=c*ceil(max/c)
n = round((high-low)/c)
@@ -283,7 +281,7 @@ def detScale(min=0,max=0):
high=c*ceil(max/c)
n = round((high-low)/c)
- return [low,high,n]
+ return [low, high, n]
def bluefunc(x):
return 1.0 / (1.0 + exp(-10*(x-0.6)))
@@ -292,7 +290,7 @@ def redfunc(x):
return 1.0 / (1.0 + exp(10*(x-0.5)))
def greenfunc(x):
- return 1 - pow(redfunc(x+0.2),2) - bluefunc(x-0.3)
+ return 1 - pow(redfunc(x+0.2), 2) - bluefunc(x-0.3)
def colorSpectrum(n=100):
multiple = 10
diff --git a/wqflask/utility/__init__.py b/wqflask/utility/__init__.py
index d9856eed..204ff59a 100644
--- a/wqflask/utility/__init__.py
+++ b/wqflask/utility/__init__.py
@@ -19,7 +19,7 @@ class Struct(object):
'''
def __init__(self, obj):
- for k, v in obj.iteritems():
+ for k, v in list(obj.items()):
if isinstance(v, dict):
setattr(self, k, Struct(v))
else:
@@ -30,6 +30,6 @@ class Struct(object):
def __repr__(self):
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for
- (k, v) in self.__dict__.iteritems()))
+ (k, v) in list(self.__dict__.items())))
diff --git a/wqflask/utility/after.py b/wqflask/utility/after.py
index b628a0a4..06091ecb 100644
--- a/wqflask/utility/after.py
+++ b/wqflask/utility/after.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
"""
See: http://flask.pocoo.org/docs/patterns/deferredcallbacks/#deferred-callbacks
@@ -13,4 +11,4 @@ def after_this_request(f):
if not hasattr(g, 'after_request_callbacks'):
g.after_request_callbacks = []
g.after_request_callbacks.append(f)
- return f \ No newline at end of file
+ return f
diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py
index 239b08e3..ce0c0749 100644
--- a/wqflask/utility/authentication_tools.py
+++ b/wqflask/utility/authentication_tools.py
@@ -1,34 +1,37 @@
-from __future__ import absolute_import, print_function, division
-import logging
-from flask import Flask, g, redirect, url_for
-
import json
import requests
-from base import data_set, webqtlConfig
-
-from utility import hmac
-from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id, add_resource
-Redis = get_redis_conn()
+from flask import g
+from base import webqtlConfig
-logger = logging.getLogger(__name__)
+from utility.redis_tools import (get_redis_conn,
+ get_resource_info,
+ get_resource_id,
+ add_resource)
+Redis = get_redis_conn()
def check_resource_availability(dataset, trait_id=None):
-
# At least for now assume temporary entered traits are accessible
if type(dataset) == str or dataset.type == "Temp":
return webqtlConfig.DEFAULT_PRIVILEGES
resource_id = get_resource_id(dataset, trait_id)
- if resource_id: # ZS: This should never be false, but it's technically possible if a non-Temp dataset somehow had a type other than Publish/ProbeSet/Geno
+ # ZS: This should never be false, but it's technically possible if
+ # a non-Temp dataset somehow had a type other than
+ # Publish/ProbeSet/Geno
+ if resource_id:
resource_info = get_resource_info(resource_id)
- if not resource_info: # ZS: If resource isn't already in redis, add it with default privileges
+
+ # ZS: If resource isn't already in redis, add it with default
+ # privileges
+ if not resource_info:
resource_info = add_new_resource(dataset, trait_id)
- # ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy
+ # ZS: Check if super-user - we should probably come up with some
+ # way to integrate this into the proxy
if g.user_session.user_id in Redis.smembers("super_users"):
return webqtlConfig.SUPER_PRIVILEGES
@@ -52,7 +55,10 @@ def add_new_resource(dataset, trait_id=None):
}
if dataset.type == "Publish":
- resource_ob['name'] = get_group_code(dataset) + "_" + str(trait_id)
+ group_code = get_group_code(dataset)
+ if group_code is None:
+ group_code = ""
+ resource_ob['name'] = group_code + "_" + str(trait_id)
resource_ob['data'] = {
'dataset': dataset.id,
'trait': trait_id
@@ -77,8 +83,9 @@ def add_new_resource(dataset, trait_id=None):
def get_group_code(dataset):
- results = g.db.execute("SELECT InbredSetCode from InbredSet where Name='{}'".format(
- dataset.group.name)).fetchone()
+ results = g.db.execute(
+ "SELECT InbredSetCode from InbredSet where Name='{}'".format(
+ dataset.group.name)).fetchone()
if results[0]:
return results[0]
else:
diff --git a/wqflask/utility/benchmark.py b/wqflask/utility/benchmark.py
index 8f1c916b..ea5a0ab6 100644
--- a/wqflask/utility/benchmark.py
+++ b/wqflask/utility/benchmark.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
import collections
import inspect
import time
@@ -38,9 +36,9 @@ class Bench(object):
@classmethod
def report(cls):
- total_time = sum((time_taken for time_taken in cls.entries.itervalues()))
+ total_time = sum((time_taken for time_taken in list(cls.entries.values())))
print("\nTiming report\n")
- for name, time_taken in cls.entries.iteritems():
+ for name, time_taken in list(cls.entries.items()):
percent = int(round((time_taken/total_time) * 100))
print("[{}%] {}: {}".format(percent, name, time_taken))
print()
diff --git a/wqflask/utility/chunks.py b/wqflask/utility/chunks.py
index d91b9bf4..9a7db102 100644
--- a/wqflask/utility/chunks.py
+++ b/wqflask/utility/chunks.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import math
import time
diff --git a/wqflask/utility/db_tools.py b/wqflask/utility/db_tools.py
index 4034f39c..6e19778f 100644
--- a/wqflask/utility/db_tools.py
+++ b/wqflask/utility/db_tools.py
@@ -1,6 +1,5 @@
-from __future__ import absolute_import, print_function, division
+from MySQLdb import escape_string as escape_
-from MySQLdb import escape_string as escape
def create_in_clause(items):
"""Create an in clause for mysql"""
@@ -8,8 +7,11 @@ def create_in_clause(items):
in_clause = '( {} )'.format(in_clause)
return in_clause
+
def mescape(*items):
"""Multiple escape"""
- escaped = [escape(str(item)) for item in items]
- #print("escaped is:", escaped)
- return escaped
+ return [escape_(str(item)).decode('utf8') for item in items]
+
+
+def escape(string_):
+ return escape_(string_).decode('utf8')
diff --git a/wqflask/utility/elasticsearch_tools.py b/wqflask/utility/elasticsearch_tools.py
index 15cdd0bc..a5580811 100644
--- a/wqflask/utility/elasticsearch_tools.py
+++ b/wqflask/utility/elasticsearch_tools.py
@@ -59,7 +59,7 @@ def get_elasticsearch_connection(for_user=True):
try:
assert(ELASTICSEARCH_HOST)
assert(ELASTICSEARCH_PORT)
- logger.info("ES HOST",ELASTICSEARCH_HOST)
+ logger.info("ES HOST", ELASTICSEARCH_HOST)
es = Elasticsearch([{
"host": ELASTICSEARCH_HOST, "port": ELASTICSEARCH_PORT
diff --git a/wqflask/utility/gen_geno_ob.py b/wqflask/utility/gen_geno_ob.py
index 23b0b650..81085ffe 100644
--- a/wqflask/utility/gen_geno_ob.py
+++ b/wqflask/utility/gen_geno_ob.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
import utility.logger
logger = utility.logger.getLogger(__name__ )
@@ -175,7 +173,7 @@ class Locus(object):
start_pos = 3
for allele in marker_row[start_pos:]:
- if allele in geno_table.keys():
+ if allele in list(geno_table.keys()):
self.genotype.append(geno_table[allele])
else: #ZS: Some genotype appears that isn't specified in the metadata, make it unknown
- self.genotype.append("U") \ No newline at end of file
+ self.genotype.append("U")
diff --git a/wqflask/utility/genofile_parser.py b/wqflask/utility/genofile_parser.py
index af306731..0b736176 100644
--- a/wqflask/utility/genofile_parser.py
+++ b/wqflask/utility/genofile_parser.py
@@ -1,7 +1,6 @@
# CTL analysis for GN2
# Author / Maintainer: Danny Arends <Danny.Arends@gmail.com>
-from __future__ import print_function, division, absolute_import
import sys
import os
import glob
diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py
index 9ce809b6..7eb7f013 100644
--- a/wqflask/utility/helper_functions.py
+++ b/wqflask/utility/helper_functions.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
from base import data_set
from base.trait import create_trait
from base.species import TheSpecies
@@ -13,7 +11,7 @@ logger = logging.getLogger(__name__ )
def get_species_dataset_trait(self, start_vars):
#assert type(read_genotype) == type(bool()), "Expecting boolean value for read_genotype"
- if "temp_trait" in start_vars.keys():
+ if "temp_trait" in list(start_vars.keys()):
if start_vars['temp_trait'] == "True":
self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group'])
else:
@@ -34,7 +32,7 @@ def get_species_dataset_trait(self, start_vars):
#self.genotype = self.dataset.group.genotype
def get_trait_db_obs(self, trait_db_list):
- if isinstance(trait_db_list, basestring):
+ if isinstance(trait_db_list, str):
trait_db_list = trait_db_list.split(",")
self.trait_list = []
diff --git a/wqflask/utility/hmac.py b/wqflask/utility/hmac.py
index fd75803e..6623f69a 100644
--- a/wqflask/utility/hmac.py
+++ b/wqflask/utility/hmac.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
import hmac
import hashlib
@@ -12,7 +10,9 @@ def hmac_creation(stringy):
"""Helper function to create the actual hmac"""
secret = app.config['SECRET_HMAC_CODE']
- hmaced = hmac.new(secret, stringy, hashlib.sha1)
+ hmaced = hmac.new(bytearray(secret, "utf-8"),
+ bytearray(stringy, "utf-8"),
+ hashlib.sha1)
hm = hmaced.hexdigest()
# ZS: Leaving the below comment here to ask Pjotr about
# "Conventional wisdom is that you don't lose much in terms of security if you throw away up to half of the output."
diff --git a/wqflask/utility/logger.py b/wqflask/utility/logger.py
index 510b1041..e904eb94 100644
--- a/wqflask/utility/logger.py
+++ b/wqflask/utility/logger.py
@@ -42,10 +42,10 @@ class GNLogger:
"""
- def __init__(self,name):
+ def __init__(self, name):
self.logger = logging.getLogger(name)
- def setLevel(self,value):
+ def setLevel(self, value):
"""Set the undelying log level"""
self.logger.setLevel(value)
@@ -54,7 +54,7 @@ class GNLogger:
level=num to filter on LOG_LEVEL_DEBUG.
"""
- self.collect(self.logger.debug,*args)
+ self.collect(self.logger.debug, *args)
def debug20(self,*args):
"""Call logging.debug for multiple args. Use level=num to filter on
@@ -63,15 +63,15 @@ LOG_LEVEL_DEBUG (NYI).
"""
if level <= LOG_LEVEL_DEBUG:
if self.logger.getEffectiveLevel() < 20:
- self.collect(self.logger.debug,*args)
+ self.collect(self.logger.debug, *args)
def info(self,*args):
"""Call logging.info for multiple args"""
- self.collect(self.logger.info,*args)
+ self.collect(self.logger.info, *args)
def warning(self,*args):
"""Call logging.warning for multiple args"""
- self.collect(self.logger.warning,*args)
+ self.collect(self.logger.warning, *args)
# self.logger.warning(self.collect(*args))
def error(self,*args):
@@ -79,13 +79,13 @@ LOG_LEVEL_DEBUG (NYI).
now = datetime.datetime.utcnow()
time_str = now.strftime('%H:%M:%S UTC %Y%m%d')
l = [time_str]+list(args)
- self.collect(self.logger.error,*l)
+ self.collect(self.logger.error, *l)
def infof(self,*args):
"""Call logging.info for multiple args lazily"""
# only evaluate function when logging
if self.logger.getEffectiveLevel() < 30:
- self.collectf(self.logger.debug,*args)
+ self.collectf(self.logger.debug, *args)
def debugf(self,level=0,*args):
"""Call logging.debug for multiple args lazily and handle
@@ -95,15 +95,15 @@ LOG_LEVEL_DEBUG (NYI).
# only evaluate function when logging
if level <= LOG_LEVEL_DEBUG:
if self.logger.getEffectiveLevel() < 20:
- self.collectf(self.logger.debug,*args)
+ self.collectf(self.logger.debug, *args)
def sql(self, sqlcommand, fun = None):
"""Log SQL command, optionally invoking a timed fun"""
if LOG_SQL:
caller = stack()[1][3]
- if caller in ['fetchone','fetch1','fetchall']:
+ if caller in ['fetchone', 'fetch1', 'fetchall']:
caller = stack()[2][3]
- self.info(caller,sqlcommand)
+ self.info(caller, sqlcommand)
if fun:
result = fun(sqlcommand)
if LOG_SQL:
@@ -119,7 +119,7 @@ LOG_LEVEL_DEBUG (NYI).
if isinstance(a, str):
out = out + a
else:
- out = out + pf(a,width=160)
+ out = out + pf(a, width=160)
fun(out)
def collectf(self,fun,*args):
@@ -134,7 +134,7 @@ LOG_LEVEL_DEBUG (NYI).
if isinstance(a, str):
out = out + a
else:
- out = out + pf(a,width=160)
+ out = out + pf(a, width=160)
fun(out)
# Get the module logger. You can override log levels at the
diff --git a/wqflask/utility/pillow_utils.py b/wqflask/utility/pillow_utils.py
index 0c2ce7af..c486abba 100644
--- a/wqflask/utility/pillow_utils.py
+++ b/wqflask/utility/pillow_utils.py
@@ -12,9 +12,9 @@ WHITE = ImageColor.getrgb("white")
def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90):
# type: (Image, str, ImageFont, tuple, ImageColor, int)
"""Utility function draw rotated text"""
- tmp_img = Image.new("RGBA", font.getsize(text), color=(0,0,0,0))
+ tmp_img = Image.new("RGBA", font.getsize(text), color=(0, 0, 0, 0))
draw_text = ImageDraw.Draw(tmp_img)
- draw_text.text(text=text, xy=(0,0), font=font, fill=fill)
+ draw_text.text(text=text, xy=(0, 0), font=font, fill=fill)
tmp_img2 = tmp_img.rotate(angle, expand=1)
tmp_img2.save("/{0}/{1}.png".format(TEMPDIR, text), format="png")
canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy]))
diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py
index ef02268e..d855a7fa 100644
--- a/wqflask/utility/redis_tools.py
+++ b/wqflask/utility/redis_tools.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
import uuid
import simplejson as json
import datetime
diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py
index 817284dd..f1aaebb6 100644
--- a/wqflask/utility/startup_config.py
+++ b/wqflask/utility/startup_config.py
@@ -27,7 +27,7 @@ def app_config():
port = get_setting_int("SERVER_PORT")
if get_setting_bool("USE_GN_SERVER"):
- print("GN2 API server URL is ["+BLUE+get_setting("GN_SERVER_URL")+ENDC+"]")
+ print(("GN2 API server URL is ["+BLUE+get_setting("GN_SERVER_URL")+ENDC+"]"))
import requests
page = requests.get(get_setting("GN_SERVER_URL"))
if page.status_code != 200:
@@ -36,4 +36,4 @@ def app_config():
# import utility.elasticsearch_tools as es
# es.test_elasticsearch_connection()
- print("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % (BLUE,str(port),ENDC,get_setting("WEBSERVER_URL")))
+ print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL"))))
diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py
index db13b9d1..b92cc2d1 100644
--- a/wqflask/utility/svg.py
+++ b/wqflask/utility/svg.py
@@ -25,54 +25,56 @@
# Last updated by GeneNetwork Core Team 2010/10/20
#!/usr/bin/env python
-##Copyright (c) 2002, Fedor Baart & Hans de Wit (Stichting Farmaceutische Kengetallen)
-##All rights reserved.
+# Copyright (c) 2002, Fedor Baart & Hans de Wit (Stichting Farmaceutische Kengetallen)
+# All rights reserved.
##
-##Redistribution and use in source and binary forms, with or without modification,
-##are permitted provided that the following conditions are met:
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
##
-##Redistributions of source code must retain the above copyright notice, this
-##list of conditions and the following disclaimer.
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
##
-##Redistributions in binary form must reproduce the above copyright notice,
-##this list of conditions and the following disclaimer in the documentation and/or
-##other materials provided with the distribution.
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation and/or
+# other materials provided with the distribution.
##
-##Neither the name of the Stichting Farmaceutische Kengetallen nor the names of
-##its contributors may be used to endorse or promote products derived from this
-##software without specific prior written permission.
+# Neither the name of the Stichting Farmaceutische Kengetallen nor the names of
+# its contributors may be used to endorse or promote products derived from this
+# software without specific prior written permission.
##
-##THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-##AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-##IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-##DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-##FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-##DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-##SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-##CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-##OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-##OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Thanks to Gerald Rosennfellner for his help and useful comments.
-##Thanks to Gerald Rosennfellner for his help and useful comments.
-
-__doc__="""Use SVGdraw to generate your SVGdrawings.
+import sys
+import exceptions
+__doc__ = """Use SVGdraw to generate your SVGdrawings.
SVGdraw uses an object model drawing and a method toXML to create SVG graphics
by using easy to use classes and methods usualy you start by creating a drawing eg
d=drawing()
- #then you create a SVG root element
+ # then you create a SVG root element
s=svg()
- #then you add some elements eg a circle and add it to the svg root element
+ # then you add some elements eg a circle and add it to the svg root element
c=circle()
- #you can supply attributes by using named arguments.
+ # you can supply attributes by using named arguments.
c=circle(fill='red',stroke='blue')
- #or by updating the attributes attribute:
+ # or by updating the attributes attribute:
c.attributes['stroke-width']=1
s.addElement(c)
- #then you add the svg root element to the drawing
+ # then you add the svg root element to the drawing
d.setSVG(s)
- #and finaly you xmlify the drawing
+ # and finaly you xmlify the drawing
d.toXml()
@@ -82,7 +84,7 @@ This module was created using the SVG specification of www.w3c.org and the
O'Reilly (www.oreilly.com) python books as information sources. A svg viewer
is available from www.adobe.com"""
-__version__="1.0"
+__version__ = "1.0"
# there are two possibilities to generate svg:
# via a dom implementation and directly using <element>text</element> strings
@@ -93,33 +95,34 @@ __version__="1.0"
# Note that PyXML is required for using the dom implementation.
# It is also possible to use the standard minidom. But I didn't try that one.
# Anyway the text based approach is about 60 times faster than using the full dom implementation.
-use_dom_implementation=0
+use_dom_implementation = 0
-import exceptions
-if use_dom_implementation<>0:
+if use_dom_implementation != 0:
try:
from xml.dom import implementation
from xml.dom.ext import PrettyPrint
except:
- raise exceptions.ImportError, "PyXML is required for using the dom implementation"
-#The implementation is used for the creating the XML document.
-#The prettyprint module is used for converting the xml document object to a xml file
+ raise exceptions.ImportError(
+ "PyXML is required for using the dom implementation")
+# The implementation is used for the creating the XML document.
+# The prettyprint module is used for converting the xml document object to a xml file
+
+assert sys.version_info[0] >= 2
+if sys.version_info[1] < 2:
+ True = 1
+ False = 0
+ file = open
+
+sys.setrecursionlimit = 50
+# The recursion limit is set conservative so mistakes like s=svg() s.addElement(s)
+# won't eat up too much processor time.
+
+# the following code is pasted form xml.sax.saxutils
+# it makes it possible to run the code without the xml sax package installed
+# To make it possible to have <rubbish> in your text elements, it is necessary to escape the texts
+
-import sys
-assert sys.version_info[0]>=2
-if sys.version_info[1]<2:
- True=1
- False=0
- file=open
-
-sys.setrecursionlimit=50
-#The recursion limit is set conservative so mistakes like s=svg() s.addElement(s)
-#won't eat up too much processor time.
-
-#the following code is pasted form xml.sax.saxutils
-#it makes it possible to run the code without the xml sax package installed
-#To make it possible to have <rubbish> in your text elements, it is necessary to escape the texts
def _escape(data, entities={}):
"""Escape &, <, and > in a string of data.
@@ -127,13 +130,14 @@ def _escape(data, entities={}):
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
- #data = data.replace("&", "&amp;")
+ # data = data.replace("&", "&amp;")
data = data.replace("<", "&lt;")
data = data.replace(">", "&gt;")
- for chars, entity in entities.items():
+ for chars, entity in list(entities.items()):
data = data.replace(chars, entity)
return data
+
def _quoteattr(data, entities={}):
"""Escape and quote an attribute value.
@@ -156,96 +160,121 @@ def _quoteattr(data, entities={}):
return data
-
def _xypointlist(a):
"""formats a list of xy pairs"""
- s=''
- for e in a: #this could be done more elegant
- s+=str(e)[1:-1] +' '
+ s = ''
+ for e in a: # this could be done more elegant
+ s += str(e)[1:-1] + ' '
return s
+
def _viewboxlist(a):
"""formats a tuple"""
- s=''
+ s = ''
for e in a:
- s+=str(e)+' '
+ s += str(e)+' '
return s
+
def _pointlist(a):
"""formats a list of numbers"""
return str(a)[1:-1]
+
class pathdata:
"""class used to create a pathdata object which can be used for a path.
although most methods are pretty straightforward it might be useful to look at the SVG specification."""
- #I didn't test the methods below.
- def __init__(self,x=None,y=None):
- self.path=[]
+ # I didn't test the methods below.
+
+ def __init__(self, x=None, y=None):
+ self.path = []
if x is not None and y is not None:
self.path.append('M '+str(x)+' '+str(y))
+
def closepath(self):
"""ends the path"""
self.path.append('z')
- def move(self,x,y):
+
+ def move(self, x, y):
"""move to absolute"""
self.path.append('M '+str(x)+' '+str(y))
- def relmove(self,x,y):
+
+ def relmove(self, x, y):
"""move to relative"""
self.path.append('m '+str(x)+' '+str(y))
- def line(self,x,y):
+
+ def line(self, x, y):
"""line to absolute"""
self.path.append('L '+str(x)+' '+str(y))
- def relline(self,x,y):
+
+ def relline(self, x, y):
"""line to relative"""
self.path.append('l '+str(x)+' '+str(y))
- def hline(self,x):
+
+ def hline(self, x):
"""horizontal line to absolute"""
self.path.append('H'+str(x))
- def relhline(self,x):
+
+ def relhline(self, x):
"""horizontal line to relative"""
self.path.append('h'+str(x))
- def vline(self,y):
+
+ def vline(self, y):
"""verical line to absolute"""
self.path.append('V'+str(y))
- def relvline(self,y):
+
+ def relvline(self, y):
"""vertical line to relative"""
self.path.append('v'+str(y))
- def bezier(self,x1,y1,x2,y2,x,y):
+
+ def bezier(self, x1, y1, x2, y2, x, y):
"""bezier with xy1 and xy2 to xy absolut"""
- self.path.append('C'+str(x1)+','+str(y1)+' '+str(x2)+','+str(y2)+' '+str(x)+','+str(y))
- def relbezier(self,x1,y1,x2,y2,x,y):
+ self.path.append('C'+str(x1)+','+str(y1)+' '+str(x2) +
+ ','+str(y2)+' '+str(x)+','+str(y))
+
+ def relbezier(self, x1, y1, x2, y2, x, y):
"""bezier with xy1 and xy2 to xy relative"""
- self.path.append('c'+str(x1)+','+str(y1)+' '+str(x2)+','+str(y2)+' '+str(x)+','+str(y))
- def smbezier(self,x2,y2,x,y):
+ self.path.append('c'+str(x1)+','+str(y1)+' '+str(x2) +
+ ','+str(y2)+' '+str(x)+','+str(y))
+
+ def smbezier(self, x2, y2, x, y):
"""smooth bezier with xy2 to xy absolut"""
self.path.append('S'+str(x2)+','+str(y2)+' '+str(x)+','+str(y))
- def relsmbezier(self,x2,y2,x,y):
+
+ def relsmbezier(self, x2, y2, x, y):
"""smooth bezier with xy2 to xy relative"""
self.path.append('s'+str(x2)+','+str(y2)+' '+str(x)+','+str(y))
- def qbezier(self,x1,y1,x,y):
+
+ def qbezier(self, x1, y1, x, y):
"""quadratic bezier with xy1 to xy absolut"""
self.path.append('Q'+str(x1)+','+str(y1)+' '+str(x)+','+str(y))
- def relqbezier(self,x1,y1,x,y):
+
+ def relqbezier(self, x1, y1, x, y):
"""quadratic bezier with xy1 to xy relative"""
self.path.append('q'+str(x1)+','+str(y1)+' '+str(x)+','+str(y))
- def smqbezier(self,x,y):
+
+ def smqbezier(self, x, y):
"""smooth quadratic bezier to xy absolut"""
self.path.append('T'+str(x)+','+str(y))
- def relsmqbezier(self,x,y):
+
+ def relsmqbezier(self, x, y):
"""smooth quadratic bezier to xy relative"""
self.path.append('t'+str(x)+','+str(y))
- def ellarc(self,rx,ry,xrot,laf,sf,x,y):
+
+ def ellarc(self, rx, ry, xrot, laf, sf, x, y):
"""elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy absolut"""
- self.path.append('A'+str(rx)+','+str(ry)+' '+str(xrot)+' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y))
- def relellarc(self,rx,ry,xrot,laf,sf,x,y):
+ self.path.append('A'+str(rx)+','+str(ry)+' '+str(xrot) +
+ ' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y))
+
+ def relellarc(self, rx, ry, xrot, laf, sf, x, y):
"""elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy relative"""
- self.path.append('a'+str(rx)+','+str(ry)+' '+str(xrot)+' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y))
+ self.path.append('a'+str(rx)+','+str(ry)+' '+str(xrot) +
+ ' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y))
+
def __repr__(self):
return ' '.join(self.path)
-
-
class SVGelement:
"""SVGelement(type,attributes,elements,text,namespace,**args)
Creates a arbitrary svg element and is intended to be subclassed not used on its own.
@@ -256,52 +285,56 @@ class SVGelement:
namespace. Note the elements==None, if elements = None:self.elements=[] construction.
This is done because if you default to elements=[] every object has a reference
to the same empty list."""
- def __init__(self,type='',attributes=None,elements=None,text='',namespace='',cdata=None, **args):
- self.type=type
- if attributes==None:
- self.attributes={}
+
+ def __init__(self, type='', attributes=None, elements=None, text='', namespace='', cdata=None, **args):
+ self.type = type
+ if attributes == None:
+ self.attributes = {}
else:
- self.attributes=attributes
- if elements==None:
- self.elements=[]
+ self.attributes = attributes
+ if elements == None:
+ self.elements = []
else:
- self.elements=elements
- self.text=text
- self.namespace=namespace
- self.cdata=cdata
- for arg in args.keys():
+ self.elements = elements
+ self.text = text
+ self.namespace = namespace
+ self.cdata = cdata
+ for arg in list(args.keys()):
arg2 = arg.replace("__", ":")
arg2 = arg2.replace("_", "-")
- self.attributes[arg2]=args[arg]
- def addElement(self,SVGelement):
+ self.attributes[arg2] = args[arg]
+
+ def addElement(self, SVGelement):
"""adds an element to a SVGelement
SVGelement.addElement(SVGelement)
"""
self.elements.append(SVGelement)
- def toXml(self,level,f):
+ def toXml(self, level, f):
f.write('\t'*level)
f.write('<'+self.type)
- for attkey in self.attributes.keys():
- f.write(' '+_escape(str(attkey))+'='+_quoteattr(str(self.attributes[attkey])))
+ for attkey in list(self.attributes.keys()):
+ f.write(' '+_escape(str(attkey))+'=' +
+ _quoteattr(str(self.attributes[attkey])))
if self.namespace:
- f.write(' xmlns="'+ _escape(str(self.namespace))+'" xmlns:xlink="http://www.w3.org/1999/xlink"')
+ f.write(' xmlns="' + _escape(str(self.namespace)) +
+ '" xmlns:xlink="http://www.w3.org/1999/xlink"')
if self.elements or self.text or self.cdata:
f.write('>')
if self.elements:
f.write('\n')
for element in self.elements:
- element.toXml(level+1,f)
+ element.toXml(level+1, f)
if self.cdata:
f.write('\n'+'\t'*(level+1)+'<![CDATA[')
for line in self.cdata.splitlines():
f.write('\n'+'\t'*(level+2)+line)
f.write('\n'+'\t'*(level+1)+']]>\n')
if self.text:
- if type(self.text)==type(''): #If the text is only text
+ if isinstance(self.text, type('')): # If the text is only text
f.write(_escape(str(self.text)))
- else: #If the text is a spannedtext class
+ else: # If the text is a spannedtext class
f.write(str(self.text))
if self.elements:
f.write('\t'*level+'</'+self.type+'>\n')
@@ -312,6 +345,7 @@ class SVGelement:
else:
f.write('/>\n')
+
class tspan(SVGelement):
"""ts=tspan(text='',**args)
@@ -323,19 +357,22 @@ class tspan(SVGelement):
st.addtspan(ts)
t=text(3,5,st)
"""
- def __init__(self,text=None,**args):
- SVGelement.__init__(self,'tspan',**args)
- if self.text<>None:
- self.text=text
+
+ def __init__(self, text=None, **args):
+ SVGelement.__init__(self, 'tspan', **args)
+ if self.text != None:
+ self.text = text
+
def __repr__(self):
- s="<tspan"
- for key,value in self.attributes.items():
- s+= ' %s="%s"' % (key,value)
- s+='>'
- s+=self.text
- s+='</tspan>'
+ s = "<tspan"
+ for key, value in list(self.attributes.items()):
+ s += ' %s="%s"' % (key, value)
+ s += '>'
+ s += self.text
+ s += '</tspan>'
return s
+
class tref(SVGelement):
"""tr=tref(link='',**args)
@@ -346,16 +383,19 @@ class tref(SVGelement):
st.addtref(tr)
t=text(3,5,st)
"""
- def __init__(self,link,**args):
- SVGelement.__init__(self,'tref',{'xlink:href':link},**args)
+
+ def __init__(self, link, **args):
+ SVGelement.__init__(self, 'tref', {'xlink:href': link}, **args)
+
def __repr__(self):
- s="<tref"
+ s = "<tref"
- for key,value in self.attributes.items():
- s+= ' %s="%s"' % (key,value)
- s+='/>'
+ for key, value in list(self.attributes.items()):
+ s += ' %s="%s"' % (key, value)
+ s += '/>'
return s
+
class spannedtext:
"""st=spannedtext(textlist=[])
@@ -374,46 +414,49 @@ class spannedtext:
st.addtext('This text is not bold')
t=text(3,5,st)
"""
- def __init__(self,textlist=None):
- if textlist==None:
- self.textlist=[]
+
+ def __init__(self, textlist=None):
+ if textlist == None:
+ self.textlist = []
else:
- self.textlist=textlist
- def addtext(self,text=''):
+ self.textlist = textlist
+
+ def addtext(self, text=''):
self.textlist.append(text)
- def addtspan(self,tspan):
+
+ def addtspan(self, tspan):
self.textlist.append(tspan)
- def addtref(self,tref):
+
+ def addtref(self, tref):
self.textlist.append(tref)
+
def __repr__(self):
- s=""
+ s = ""
for element in self.textlist:
- s+=str(element)
+ s += str(element)
return s
+
class rect(SVGelement):
"""r=rect(width,height,x,y,fill,stroke,stroke_width,**args)
a rectangle is defined by a width and height and a xy pair
"""
- def __init__(self,x=None,y=None,width=None,height=None,fill=None,stroke=None,stroke_width=None,**args):
- if width==None or height==None:
- if width<>None:
- raise ValueError, 'height is required'
- if height<>None:
- raise ValueError, 'width is required'
- else:
- raise ValueError, 'both height and width are required'
- SVGelement.__init__(self,'rect',{'width':width,'height':height},**args)
- if x<>None:
+
+ def __init__(self, x=None, y=None, width=None, height=None, fill=None, stroke=None, stroke_width=None, **args):
+ if width == None or height == None:
+ raise ValueError('both height and width are required')
+
+ SVGelement.__init__(self, 'rect', {'width':width,'height':height}, **args)
+ if x!=None:
self.attributes['x']=x
- if y<>None:
+ if y!=None:
self.attributes['y']=y
- if fill<>None:
+ if fill!=None:
self.attributes['fill']=fill
- if stroke<>None:
+ if stroke!=None:
self.attributes['stroke']=stroke
- if stroke_width<>None:
+ if stroke_width!=None:
self.attributes['stroke-width']=stroke_width
class ellipse(SVGelement):
@@ -423,22 +466,18 @@ class ellipse(SVGelement):
"""
def __init__(self,cx=None,cy=None,rx=None,ry=None,fill=None,stroke=None,stroke_width=None,**args):
if rx==None or ry== None:
- if rx<>None:
- raise ValueError, 'rx is required'
- if ry<>None:
- raise ValueError, 'ry is required'
- else:
- raise ValueError, 'both rx and ry are required'
- SVGelement.__init__(self,'ellipse',{'rx':rx,'ry':ry},**args)
- if cx<>None:
+ raise ValueError('both rx and ry are required')
+
+ SVGelement.__init__(self, 'ellipse', {'rx':rx,'ry':ry}, **args)
+ if cx!=None:
self.attributes['cx']=cx
- if cy<>None:
+ if cy!=None:
self.attributes['cy']=cy
- if fill<>None:
+ if fill!=None:
self.attributes['fill']=fill
- if stroke<>None:
+ if stroke!=None:
self.attributes['stroke']=stroke
- if stroke_width<>None:
+ if stroke_width!=None:
self.attributes['stroke-width']=stroke_width
@@ -449,17 +488,17 @@ class circle(SVGelement):
"""
def __init__(self,cx=None,cy=None,r=None,fill=None,stroke=None,stroke_width=None,**args):
if r==None:
- raise ValueError, 'r is required'
- SVGelement.__init__(self,'circle',{'r':r},**args)
- if cx<>None:
+ raise ValueError('r is required')
+ SVGelement.__init__(self, 'circle', {'r':r}, **args)
+ if cx!=None:
self.attributes['cx']=cx
- if cy<>None:
+ if cy!=None:
self.attributes['cy']=cy
- if fill<>None:
+ if fill!=None:
self.attributes['fill']=fill
- if stroke<>None:
+ if stroke!=None:
self.attributes['stroke']=stroke
- if stroke_width<>None:
+ if stroke_width!=None:
self.attributes['stroke-width']=stroke_width
class point(circle):
@@ -469,7 +508,7 @@ class point(circle):
very small rectangle if you use many points because a circle is difficult to render.
"""
def __init__(self,x,y,fill='black',**args):
- circle.__init__(self,x,y,1,fill,**args)
+ circle.__init__(self, x, y, 1, fill, **args)
class line(SVGelement):
"""l=line(x1,y1,x2,y2,stroke,stroke_width,**args)
@@ -477,18 +516,18 @@ class line(SVGelement):
A line is defined by a begin x,y pair and an end x,y pair
"""
def __init__(self,x1=None,y1=None,x2=None,y2=None,stroke=None,stroke_width=None,**args):
- SVGelement.__init__(self,'line',**args)
- if x1<>None:
+ SVGelement.__init__(self, 'line', **args)
+ if x1!=None:
self.attributes['x1']=x1
- if y1<>None:
+ if y1!=None:
self.attributes['y1']=y1
- if x2<>None:
+ if x2!=None:
self.attributes['x2']=x2
- if y2<>None:
+ if y2!=None:
self.attributes['y2']=y2
- if stroke_width<>None:
+ if stroke_width!=None:
self.attributes['stroke-width']=stroke_width
- if stroke<>None:
+ if stroke!=None:
self.attributes['stroke']=stroke
class polyline(SVGelement):
@@ -497,12 +536,12 @@ class polyline(SVGelement):
a polyline is defined by a list of xy pairs
"""
def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args):
- SVGelement.__init__(self,'polyline',{'points':_xypointlist(points)},**args)
- if fill<>None:
+ SVGelement.__init__(self, 'polyline', {'points':_xypointlist(points)}, **args)
+ if fill!=None:
self.attributes['fill']=fill
- if stroke_width<>None:
+ if stroke_width!=None:
self.attributes['stroke-width']=stroke_width
- if stroke<>None:
+ if stroke!=None:
self.attributes['stroke']=stroke
class polygon(SVGelement):
@@ -511,12 +550,12 @@ class polygon(SVGelement):
a polygon is defined by a list of xy pairs
"""
def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args):
- SVGelement.__init__(self,'polygon',{'points':_xypointlist(points)},**args)
- if fill<>None:
+ SVGelement.__init__(self, 'polygon', {'points':_xypointlist(points)}, **args)
+ if fill!=None:
self.attributes['fill']=fill
- if stroke_width<>None:
+ if stroke_width!=None:
self.attributes['stroke-width']=stroke_width
- if stroke<>None:
+ if stroke!=None:
self.attributes['stroke']=stroke
class path(SVGelement):
@@ -525,14 +564,14 @@ class path(SVGelement):
a path is defined by a path object and optional width, stroke and fillcolor
"""
def __init__(self,pathdata,fill=None,stroke=None,stroke_width=None,id=None,**args):
- SVGelement.__init__(self,'path',{'d':str(pathdata)},**args)
- if stroke<>None:
+ SVGelement.__init__(self, 'path', {'d':str(pathdata)}, **args)
+ if stroke!=None:
self.attributes['stroke']=stroke
- if fill<>None:
+ if fill!=None:
self.attributes['fill']=fill
- if stroke_width<>None:
+ if stroke_width!=None:
self.attributes['stroke-width']=stroke_width
- if id<>None:
+ if id!=None:
self.attributes['id']=id
@@ -542,18 +581,18 @@ class text(SVGelement):
a text element can bge used for displaying text on the screen
"""
def __init__(self,x=None,y=None,text=None,font_size=None,font_family=None,text_anchor=None,**args):
- SVGelement.__init__(self,'text',**args)
- if x<>None:
+ SVGelement.__init__(self, 'text', **args)
+ if x!=None:
self.attributes['x']=x
- if y<>None:
+ if y!=None:
self.attributes['y']=y
- if font_size<>None:
+ if font_size!=None:
self.attributes['font-size']=font_size
- if font_family<>None:
+ if font_family!=None:
self.attributes['font-family']=font_family
- if text<>None:
+ if text!=None:
self.text=text
- if text_anchor<>None:
+ if text_anchor!=None:
self.attributes['text-anchor']=text_anchor
@@ -563,8 +602,8 @@ class textpath(SVGelement):
a textpath places a text on a path which is referenced by a link.
"""
def __init__(self,link,text=None,**args):
- SVGelement.__init__(self,'textPath',{'xlink:href':link},**args)
- if text<>None:
+ SVGelement.__init__(self, 'textPath', {'xlink:href':link}, **args)
+ if text!=None:
self.text=text
class pattern(SVGelement):
@@ -575,16 +614,16 @@ class pattern(SVGelement):
in x and y to cover the areas to be painted.
"""
def __init__(self,x=None,y=None,width=None,height=None,patternUnits=None,**args):
- SVGelement.__init__(self,'pattern',**args)
- if x<>None:
+ SVGelement.__init__(self, 'pattern', **args)
+ if x!=None:
self.attributes['x']=x
- if y<>None:
+ if y!=None:
self.attributes['y']=y
- if width<>None:
+ if width!=None:
self.attributes['width']=width
- if height<>None:
+ if height!=None:
self.attributes['height']=height
- if patternUnits<>None:
+ if patternUnits!=None:
self.attributes['patternUnits']=patternUnits
class title(SVGelement):
@@ -594,8 +633,8 @@ class title(SVGelement):
add at least one to the root svg element
"""
def __init__(self,text=None,**args):
- SVGelement.__init__(self,'title',**args)
- if text<>None:
+ SVGelement.__init__(self, 'title', **args)
+ if text!=None:
self.text=text
class description(SVGelement):
@@ -605,8 +644,8 @@ class description(SVGelement):
Add this element before adding other elements.
"""
def __init__(self,text=None,**args):
- SVGelement.__init__(self,'desc',**args)
- if text<>None:
+ SVGelement.__init__(self, 'desc', **args)
+ if text!=None:
self.text=text
class lineargradient(SVGelement):
@@ -616,16 +655,16 @@ class lineargradient(SVGelement):
stop elements van be added to define the gradient colors.
"""
def __init__(self,x1=None,y1=None,x2=None,y2=None,id=None,**args):
- SVGelement.__init__(self,'linearGradient',**args)
- if x1<>None:
+ SVGelement.__init__(self, 'linearGradient', **args)
+ if x1!=None:
self.attributes['x1']=x1
- if y1<>None:
+ if y1!=None:
self.attributes['y1']=y1
- if x2<>None:
+ if x2!=None:
self.attributes['x2']=x2
- if y2<>None:
+ if y2!=None:
self.attributes['y2']=y2
- if id<>None:
+ if id!=None:
self.attributes['id']=id
class radialgradient(SVGelement):
@@ -635,18 +674,18 @@ class radialgradient(SVGelement):
stop elements van be added to define the gradient colors.
"""
def __init__(self,cx=None,cy=None,r=None,fx=None,fy=None,id=None,**args):
- SVGelement.__init__(self,'radialGradient',**args)
- if cx<>None:
+ SVGelement.__init__(self, 'radialGradient', **args)
+ if cx!=None:
self.attributes['cx']=cx
- if cy<>None:
+ if cy!=None:
self.attributes['cy']=cy
- if r<>None:
+ if r!=None:
self.attributes['r']=r
- if fx<>None:
+ if fx!=None:
self.attributes['fx']=fx
- if fy<>None:
+ if fy!=None:
self.attributes['fy']=fy
- if id<>None:
+ if id!=None:
self.attributes['id']=id
class stop(SVGelement):
@@ -655,8 +694,8 @@ class stop(SVGelement):
Puts a stop color at the specified radius
"""
def __init__(self,offset,stop_color=None,**args):
- SVGelement.__init__(self,'stop',{'offset':offset},**args)
- if stop_color<>None:
+ SVGelement.__init__(self, 'stop', {'offset':offset}, **args)
+ if stop_color!=None:
self.attributes['stop-color']=stop_color
class style(SVGelement):
@@ -665,7 +704,7 @@ class style(SVGelement):
Add a CDATA element to this element for defing in line stylesheets etc..
"""
def __init__(self,type,cdata=None,**args):
- SVGelement.__init__(self,'style',{'type':type},cdata=cdata, **args)
+ SVGelement.__init__(self, 'style', {'type':type}, cdata=cdata, **args)
class image(SVGelement):
@@ -675,16 +714,11 @@ class image(SVGelement):
"""
def __init__(self,url,x=None,y=None,width=None,height=None,**args):
if width==None or height==None:
- if width<>None:
- raise ValueError, 'height is required'
- if height<>None:
- raise ValueError, 'width is required'
- else:
- raise ValueError, 'both height and width are required'
- SVGelement.__init__(self,'image',{'xlink:href':url,'width':width,'height':height},**args)
- if x<>None:
+ raise ValueError('both height and width are required')
+ SVGelement.__init__(self, 'image', {'xlink:href':url,'width':width,'height':height}, **args)
+ if x!=None:
self.attributes['x']=x
- if y<>None:
+ if y!=None:
self.attributes['y']=y
class cursor(SVGelement):
@@ -693,7 +727,7 @@ class cursor(SVGelement):
defines a custom cursor for a element or a drawing
"""
def __init__(self,url,**args):
- SVGelement.__init__(self,'cursor',{'xlink:href':url},**args)
+ SVGelement.__init__(self, 'cursor', {'xlink:href':url}, **args)
class marker(SVGelement):
@@ -703,18 +737,18 @@ class marker(SVGelement):
add an element to it which should be used as a marker.
"""
def __init__(self,id=None,viewBox=None,refx=None,refy=None,markerWidth=None,markerHeight=None,**args):
- SVGelement.__init__(self,'marker',**args)
- if id<>None:
+ SVGelement.__init__(self, 'marker', **args)
+ if id!=None:
self.attributes['id']=id
- if viewBox<>None:
+ if viewBox!=None:
self.attributes['viewBox']=_viewboxlist(viewBox)
- if refx<>None:
+ if refx!=None:
self.attributes['refX']=refx
- if refy<>None:
+ if refy!=None:
self.attributes['refY']=refy
- if markerWidth<>None:
+ if markerWidth!=None:
self.attributes['markerWidth']=markerWidth
- if markerHeight<>None:
+ if markerHeight!=None:
self.attributes['markerHeight']=markerHeight
class group(SVGelement):
@@ -724,8 +758,8 @@ class group(SVGelement):
g.addElement(SVGelement)
"""
def __init__(self,id=None,**args):
- SVGelement.__init__(self,'g',**args)
- if id<>None:
+ SVGelement.__init__(self, 'g', **args)
+ if id!=None:
self.attributes['id']=id
class symbol(SVGelement):
@@ -738,10 +772,10 @@ class symbol(SVGelement):
"""
def __init__(self,id=None,viewBox=None,**args):
- SVGelement.__init__(self,'symbol',**args)
- if id<>None:
+ SVGelement.__init__(self, 'symbol', **args)
+ if id!=None:
self.attributes['id']=id
- if viewBox<>None:
+ if viewBox!=None:
self.attributes['viewBox']=_viewboxlist(viewBox)
class defs(SVGelement):
@@ -750,7 +784,7 @@ class defs(SVGelement):
container for defining elements
"""
def __init__(self,**args):
- SVGelement.__init__(self,'defs',**args)
+ SVGelement.__init__(self, 'defs', **args)
class switch(SVGelement):
"""sw=switch(**args)
@@ -760,7 +794,7 @@ class switch(SVGelement):
Refer to the SVG specification for details.
"""
def __init__(self,**args):
- SVGelement.__init__(self,'switch',**args)
+ SVGelement.__init__(self, 'switch', **args)
class use(SVGelement):
@@ -769,15 +803,15 @@ class use(SVGelement):
references a symbol by linking to its id and its position, height and width
"""
def __init__(self,link,x=None,y=None,width=None,height=None,**args):
- SVGelement.__init__(self,'use',{'xlink:href':link},**args)
- if x<>None:
+ SVGelement.__init__(self, 'use', {'xlink:href':link}, **args)
+ if x!=None:
self.attributes['x']=x
- if y<>None:
+ if y!=None:
self.attributes['y']=y
- if width<>None:
+ if width!=None:
self.attributes['width']=width
- if height<>None:
+ if height!=None:
self.attributes['height']=height
@@ -788,15 +822,15 @@ class link(SVGelement):
a.addElement(SVGelement)
"""
def __init__(self,link='',**args):
- SVGelement.__init__(self,'a',{'xlink:href':link},**args)
+ SVGelement.__init__(self, 'a', {'xlink:href':link}, **args)
class view(SVGelement):
"""v=view(id,**args)
a view can be used to create a view with different attributes"""
def __init__(self,id=None,**args):
- SVGelement.__init__(self,'view',**args)
- if id<>None:
+ SVGelement.__init__(self, 'view', **args)
+ if id!=None:
self.attributes['id']=id
class script(SVGelement):
@@ -806,7 +840,7 @@ class script(SVGelement):
"""
def __init__(self,type,cdata=None,**args):
- SVGelement.__init__(self,'script',{'type':type},cdata=cdata,**args)
+ SVGelement.__init__(self, 'script', {'type':type}, cdata=cdata, **args)
class animate(SVGelement):
"""an=animate(attribute,from,to,during,**args)
@@ -814,12 +848,12 @@ class animate(SVGelement):
animates an attribute.
"""
def __init__(self,attribute,fr=None,to=None,dur=None,**args):
- SVGelement.__init__(self,'animate',{'attributeName':attribute},**args)
- if fr<>None:
+ SVGelement.__init__(self, 'animate', {'attributeName':attribute}, **args)
+ if fr!=None:
self.attributes['from']=fr
- if to<>None:
+ if to!=None:
self.attributes['to']=to
- if dur<>None:
+ if dur!=None:
self.attributes['dur']=dur
class animateMotion(SVGelement):
@@ -828,10 +862,10 @@ class animateMotion(SVGelement):
animates a SVGelement over the given path in dur seconds
"""
def __init__(self,pathdata,dur,**args):
- SVGelement.__init__(self,'animateMotion',**args)
- if pathdata<>None:
+ SVGelement.__init__(self, 'animateMotion', **args)
+ if pathdata!=None:
self.attributes['path']=str(pathdata)
- if dur<>None:
+ if dur!=None:
self.attributes['dur']=dur
class animateTransform(SVGelement):
@@ -840,15 +874,15 @@ class animateTransform(SVGelement):
transform an element from and to a value.
"""
def __init__(self,type=None,fr=None,to=None,dur=None,**args):
- SVGelement.__init__(self,'animateTransform',{'attributeName':'transform'},**args)
- #As far as I know the attributeName is always transform
- if type<>None:
+ SVGelement.__init__(self, 'animateTransform', {'attributeName':'transform'}, **args)
+ # As far as I know the attributeName is always transform
+ if type!=None:
self.attributes['type']=type
- if fr<>None:
+ if fr!=None:
self.attributes['from']=fr
- if to<>None:
+ if to!=None:
self.attributes['to']=to
- if dur<>None:
+ if dur!=None:
self.attributes['dur']=dur
class animateColor(SVGelement):
"""ac=animateColor(attribute,type,from,to,dur,**args)
@@ -856,14 +890,14 @@ class animateColor(SVGelement):
Animates the color of a element
"""
def __init__(self,attribute,type=None,fr=None,to=None,dur=None,**args):
- SVGelement.__init__(self,'animateColor',{'attributeName':attribute},**args)
- if type<>None:
+ SVGelement.__init__(self, 'animateColor', {'attributeName':attribute}, **args)
+ if type!=None:
self.attributes['type']=type
- if fr<>None:
+ if fr!=None:
self.attributes['from']=fr
- if to<>None:
+ if to!=None:
self.attributes['to']=to
- if dur<>None:
+ if dur!=None:
self.attributes['dur']=dur
class set(SVGelement):
"""st=set(attribute,to,during,**args)
@@ -871,10 +905,10 @@ class set(SVGelement):
sets an attribute to a value for a
"""
def __init__(self,attribute,to=None,dur=None,**args):
- SVGelement.__init__(self,'set',{'attributeName':attribute},**args)
- if to<>None:
+ SVGelement.__init__(self, 'set', {'attributeName':attribute}, **args)
+ if to!=None:
self.attributes['to']=to
- if dur<>None:
+ if dur!=None:
self.attributes['dur']=dur
@@ -895,12 +929,12 @@ class svg(SVGelement):
d.toXml()
"""
def __init__(self,viewBox=None, width=None, height=None,**args):
- SVGelement.__init__(self,'svg',**args)
- if viewBox<>None:
+ SVGelement.__init__(self, 'svg', **args)
+ if viewBox!=None:
self.attributes['viewBox']=_viewboxlist(viewBox)
- if width<>None:
+ if width!=None:
self.attributes['width']=width
- if height<>None:
+ if height!=None:
self.attributes['height']=height
self.namespace="http://www.w3.org/2000/svg"
@@ -918,27 +952,27 @@ class drawing:
def __init__(self, entity={}):
self.svg=None
self.entity = entity
- def setSVG(self,svg):
+ def setSVG(self, svg):
self.svg=svg
- #Voeg een element toe aan de grafiek toe.
+ # Voeg een element toe aan de grafiek toe.
if use_dom_implementation==0:
def toXml(self, filename='',compress=False):
- import cStringIO
- xml=cStringIO.StringIO()
+ import io
+ xml=io.StringIO()
xml.write("<?xml version='1.0' encoding='UTF-8'?>\n")
xml.write("<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.0//EN\" \"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd\"")
if self.entity:
xml.write(" [\n")
- for item in self.entity.keys():
+ for item in list(self.entity.keys()):
xml.write("<!ENTITY %s \"%s\">\n" % (item, self.entity[item]))
xml.write("]")
xml.write(">\n")
- self.svg.toXml(0,xml)
+ self.svg.toXml(0, xml)
if not filename:
if compress:
import gzip
- f=cStringIO.StringIO()
- zf=gzip.GzipFile(fileobj=f,mode='wb')
+ f=io.StringIO()
+ zf=gzip.GzipFile(fileobj=f, mode='wb')
zf.write(xml.getvalue())
zf.close()
f.seek(0)
@@ -948,11 +982,11 @@ class drawing:
else:
if filename[-4:]=='svgz':
import gzip
- f=gzip.GzipFile(filename=filename,mode="wb", compresslevel=9)
+ f=gzip.GzipFile(filename=filename, mode="wb", compresslevel=9)
f.write(xml.getvalue())
f.close()
else:
- f=file(filename,'w')
+ f=file(filename, 'w')
f.write(xml.getvalue())
f.close()
@@ -963,40 +997,40 @@ class drawing:
writes a svg drawing to the screen or to a file
compresses if filename ends with svgz or if compress is true
"""
- doctype = implementation.createDocumentType('svg',"-//W3C//DTD SVG 1.0//EN""",'http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd ')
+ doctype = implementation.createDocumentType('svg', "-//W3C//DTD SVG 1.0//EN""", 'http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd ')
global root
- #root is defined global so it can be used by the appender. Its also possible to use it as an arugument but
- #that is a bit messy.
- root=implementation.createDocument(None,None,doctype)
- #Create the xml document.
+ # root is defined global so it can be used by the appender. Its also possible to use it as an arugument but
+ # that is a bit messy.
+ root=implementation.createDocument(None, None, doctype)
+ # Create the xml document.
global appender
- def appender(element,elementroot):
+ def appender(element, elementroot):
"""This recursive function appends elements to an element and sets the attributes
and type. It stops when alle elements have been appended"""
if element.namespace:
- e=root.createElementNS(element.namespace,element.type)
+ e=root.createElementNS(element.namespace, element.type)
else:
e=root.createElement(element.type)
if element.text:
textnode=root.createTextNode(element.text)
e.appendChild(textnode)
- for attribute in element.attributes.keys(): #in element.attributes is supported from python 2.2
- e.setAttribute(attribute,str(element.attributes[attribute]))
+ for attribute in list(element.attributes.keys()): #in element.attributes is supported from python 2.2
+ e.setAttribute(attribute, str(element.attributes[attribute]))
if element.elements:
for el in element.elements:
- e=appender(el,e)
+ e=appender(el, e)
elementroot.appendChild(e)
return elementroot
- root=appender(self.svg,root)
+ root=appender(self.svg, root)
if not filename:
- import cStringIO
- xml=cStringIO.StringIO()
- PrettyPrint(root,xml)
+ import io
+ xml=io.StringIO()
+ PrettyPrint(root, xml)
if compress:
import gzip
- f=cStringIO.StringIO()
- zf=gzip.GzipFile(fileobj=f,mode='wb')
+ f=io.StringIO()
+ zf=gzip.GzipFile(fileobj=f, mode='wb')
zf.write(xml.getvalue())
zf.close()
f.seek(0)
@@ -1007,23 +1041,23 @@ class drawing:
try:
if filename[-4:]=='svgz':
import gzip
- import cStringIO
- xml=cStringIO.StringIO()
- PrettyPrint(root,xml)
- f=gzip.GzipFile(filename=filename,mode='wb',compresslevel=9)
+ import io
+ xml=io.StringIO()
+ PrettyPrint(root, xml)
+ f=gzip.GzipFile(filename=filename, mode='wb', compresslevel=9)
f.write(xml.getvalue())
f.close()
else:
- f=open(filename,'w')
- PrettyPrint(root,f)
+ f=open(filename, 'w')
+ PrettyPrint(root, f)
f.close()
except:
- print "Cannot write SVG file: " + filename
+ print(("Cannot write SVG file: " + filename))
def validate(self):
try:
import xml.parsers.xmlproc.xmlval
except:
- raise exceptions.ImportError,'PyXml is required for validating SVG'
+ raise exceptions.ImportError('PyXml is required for validating SVG')
svg=self.toXml()
xv=xml.parsers.xmlproc.xmlval.XMLValidator()
try:
@@ -1031,38 +1065,38 @@ class drawing:
except:
raise Exception("SVG is not well formed, see messages above")
else:
- print "SVG well formed"
+ print("SVG well formed")
if __name__=='__main__':
d=drawing()
- s=svg((0,0,100,100))
- r=rect(-100,-100,300,300,'cyan')
+ s=svg((0, 0, 100, 100))
+ r=rect(-100, -100, 300, 300, 'cyan')
s.addElement(r)
t=title('SVGdraw Demo')
s.addElement(t)
g=group('animations')
- e=ellipse(0,0,5,2)
+ e=ellipse(0, 0, 5, 2)
g.addElement(e)
- c=circle(0,0,1,'red')
+ c=circle(0, 0, 1, 'red')
g.addElement(c)
- pd=pathdata(0,-10)
+ pd=pathdata(0, -10)
for i in range(6):
- pd.relsmbezier(10,5,0,10)
- pd.relsmbezier(-10,5,0,10)
- an=animateMotion(pd,10)
+ pd.relsmbezier(10, 5, 0, 10)
+ pd.relsmbezier(-10, 5, 0, 10)
+ an=animateMotion(pd, 10)
an.attributes['rotate']='auto-reverse'
an.attributes['repeatCount']="indefinite"
g.addElement(an)
s.addElement(g)
- for i in range(20,120,20):
- u=use('#animations',i,0)
+ for i in range(20, 120, 20):
+ u=use('#animations', i, 0)
s.addElement(u)
- for i in range(0,120,20):
- for j in range(5,105,10):
- c=circle(i,j,1,'red','black',.5)
+ for i in range(0, 120, 20):
+ for j in range(5, 105, 10):
+ c=circle(i, j, 1, 'red', 'black', .5)
s.addElement(c)
d.setSVG(s)
- print d.toXml()
+ print((d.toXml()))
diff --git a/wqflask/utility/temp_data.py b/wqflask/utility/temp_data.py
index 5bf700c9..4144ae00 100644
--- a/wqflask/utility/temp_data.py
+++ b/wqflask/utility/temp_data.py
@@ -1,4 +1,3 @@
-from __future__ import print_function, division, absolute_import
from redis import Redis
import simplejson as json
@@ -20,6 +19,6 @@ class TempData(object):
if __name__ == "__main__":
redis = Redis()
- for key in redis.keys():
+ for key in list(redis.keys()):
for field in redis.hkeys(key):
print("{}.{}={}".format(key, field, redis.hget(key, field)))
diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py
index 77db5d53..68ef0f04 100644
--- a/wqflask/utility/tools.py
+++ b/wqflask/utility/tools.py
@@ -15,7 +15,7 @@ OVERRIDES = {}
def app_set(command_id, value):
"""Set application wide value"""
- app.config.setdefault(command_id,value)
+ app.config.setdefault(command_id, value)
return value
def get_setting(command_id,guess=None):
@@ -45,7 +45,7 @@ def get_setting(command_id,guess=None):
def value(command):
if command:
# sys.stderr.write("Found "+command+"\n")
- app_set(command_id,command)
+ app_set(command_id, command)
return command
else:
return None
@@ -68,7 +68,7 @@ def get_setting(command_id,guess=None):
def get_setting_bool(id):
v = get_setting(id)
- if v not in [0,False,'False','FALSE',None]:
+ if v not in [0, False, 'False', 'FALSE', None]:
return True
return False
@@ -108,16 +108,16 @@ def js_path(module=None):
raise "No JS path found for "+module+" (if not in Guix check JS_GN_PATH)"
def reaper_command(guess=None):
- return get_setting("REAPER_COMMAND",guess)
+ return get_setting("REAPER_COMMAND", guess)
def gemma_command(guess=None):
- return assert_bin(get_setting("GEMMA_COMMAND",guess))
+ return assert_bin(get_setting("GEMMA_COMMAND", guess))
def gemma_wrapper_command(guess=None):
- return assert_bin(get_setting("GEMMA_WRAPPER_COMMAND",guess))
+ return assert_bin(get_setting("GEMMA_WRAPPER_COMMAND", guess))
def plink_command(guess=None):
- return assert_bin(get_setting("PLINK_COMMAND",guess))
+ return assert_bin(get_setting("PLINK_COMMAND", guess))
def flat_file_exists(subdir):
base = get_setting("GENENETWORK_FILES")
@@ -180,7 +180,7 @@ def locate(name, subdir=None):
raise Exception("Can not locate "+name+" in "+base)
def locate_phewas(name, subdir=None):
- return locate(name,'/phewas/'+subdir)
+ return locate(name, '/phewas/'+subdir)
def locate_ignore_error(name, subdir=None):
"""
@@ -204,7 +204,7 @@ def tempdir():
"""
Get UNIX TMPDIR by default
"""
- return valid_path(get_setting("TMPDIR","/tmp"))
+ return valid_path(get_setting("TMPDIR", "/tmp"))
BLUE = '\033[94m'
GREEN = '\033[92m'
@@ -214,20 +214,20 @@ ENDC = '\033[0m'
def show_settings():
from utility.tools import LOG_LEVEL
- print("Set global log level to "+BLUE+LOG_LEVEL+ENDC)
+ print(("Set global log level to "+BLUE+LOG_LEVEL+ENDC))
log_level = getattr(logging, LOG_LEVEL.upper())
logging.basicConfig(level=log_level)
logger.info(OVERRIDES)
logger.info(BLUE+"Mr. Mojo Risin 2"+ENDC)
- print "runserver.py: ****** Webserver configuration - k,v pairs from app.config ******"
- keylist = app.config.keys()
+ keylist = list(app.config.keys())
+ print("runserver.py: ****** Webserver configuration - k,v pairs from app.config ******")
keylist.sort()
for k in keylist:
try:
- print("%s: %s%s%s%s" % (k,BLUE,BOLD,get_setting(k),ENDC))
+ print(("%s: %s%s%s%s" % (k, BLUE, BOLD, get_setting(k), ENDC)))
except:
- print("%s: %s%s%s%s" % (k,GREEN,BOLD,app.config[k],ENDC))
+ print(("%s: %s%s%s%s" % (k, GREEN, BOLD, app.config[k], ENDC)))
# Cached values
@@ -279,10 +279,10 @@ SMTP_CONNECT = get_setting('SMTP_CONNECT')
SMTP_USERNAME = get_setting('SMTP_USERNAME')
SMTP_PASSWORD = get_setting('SMTP_PASSWORD')
-REAPER_COMMAND = app_set("REAPER_COMMAND",reaper_command())
-GEMMA_COMMAND = app_set("GEMMA_COMMAND",gemma_command())
+REAPER_COMMAND = app_set("REAPER_COMMAND", reaper_command())
+GEMMA_COMMAND = app_set("GEMMA_COMMAND", gemma_command())
assert(GEMMA_COMMAND is not None)
-PLINK_COMMAND = app_set("PLINK_COMMAND",plink_command())
+PLINK_COMMAND = app_set("PLINK_COMMAND", plink_command())
GEMMA_WRAPPER_COMMAND = gemma_wrapper_command()
TEMPDIR = tempdir() # defaults to UNIX TMPDIR
assert_dir(TEMPDIR)
@@ -295,11 +295,11 @@ assert_dir(JS_GUIX_PATH+'/cytoscape-panzoom')
CSS_PATH = JS_GUIX_PATH # The CSS is bundled together with the JS
# assert_dir(JS_PATH)
-JS_TWITTER_POST_FETCHER_PATH = get_setting("JS_TWITTER_POST_FETCHER_PATH",js_path("javascript-twitter-post-fetcher"))
+JS_TWITTER_POST_FETCHER_PATH = get_setting("JS_TWITTER_POST_FETCHER_PATH", js_path("javascript-twitter-post-fetcher"))
assert_dir(JS_TWITTER_POST_FETCHER_PATH)
assert_file(JS_TWITTER_POST_FETCHER_PATH+"/js/twitterFetcher_min.js")
-JS_CYTOSCAPE_PATH = get_setting("JS_CYTOSCAPE_PATH",js_path("cytoscape"))
+JS_CYTOSCAPE_PATH = get_setting("JS_CYTOSCAPE_PATH", js_path("cytoscape"))
assert_dir(JS_CYTOSCAPE_PATH)
assert_file(JS_CYTOSCAPE_PATH+'/cytoscape.min.js')
diff --git a/wqflask/utility/webqtlUtil.py b/wqflask/utility/webqtlUtil.py
index 53661ae4..5681fadf 100644
--- a/wqflask/utility/webqtlUtil.py
+++ b/wqflask/utility/webqtlUtil.py
@@ -41,22 +41,22 @@ ParInfo ={
'C57BL-6JxC57BL-6NJF2':['', '', 'C57BL/6J', 'C57BL/6NJ'],
'BXD300':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
'B6BTBRF2':['B6BTBRF1', 'BTBRB6F1', 'C57BL/6J', 'BTBRT<+>tf/J'],
-'BHHBF2':['B6HF2','HB6F2','C57BL/6J','C3H/HeJ'],
-'BHF2':['B6HF2','HB6F2','C57BL/6J','C3H/HeJ'],
+'BHHBF2':['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'],
+'BHF2':['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'],
'B6D2F2':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
'BDF2-1999':['B6D2F2', 'D2B6F2', 'C57BL/6J', 'DBA/2J'],
'BDF2-2005':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
-'CTB6F2':['CTB6F2','B6CTF2','C57BL/6J','Castaneous'],
+'CTB6F2':['CTB6F2', 'B6CTF2', 'C57BL/6J', 'Castaneous'],
'CXB':['CBF1', 'BCF1', 'C57BL/6ByJ', 'BALB/cByJ'],
'AXBXA':['ABF1', 'BAF1', 'C57BL/6J', 'A/J'],
'AXB':['ABF1', 'BAF1', 'C57BL/6J', 'A/J'],
'BXA':['BAF1', 'ABF1', 'C57BL/6J', 'A/J'],
'LXS':['LSF1', 'SLF1', 'ISS', 'ILS'],
'HXBBXH':['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'],
-'BayXSha':['BayXShaF1', 'ShaXBayF1', 'Bay-0','Shahdara'],
-'ColXBur':['ColXBurF1', 'BurXColF1', 'Col-0','Bur-0'],
-'ColXCvi':['ColXCviF1', 'CviXColF1', 'Col-0','Cvi'],
-'SXM':['SMF1', 'MSF1', 'Steptoe','Morex'],
+'BayXSha':['BayXShaF1', 'ShaXBayF1', 'Bay-0', 'Shahdara'],
+'ColXBur':['ColXBurF1', 'BurXColF1', 'Col-0', 'Bur-0'],
+'ColXCvi':['ColXCviF1', 'CviXColF1', 'Col-0', 'Cvi'],
+'SXM':['SMF1', 'MSF1', 'Steptoe', 'Morex'],
'HRDP':['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv']
}
@@ -64,7 +64,7 @@ ParInfo ={
# Accessory Functions
#########################################
-def genRandStr(prefix = "", length=8, chars=string.letters+string.digits):
+def genRandStr(prefix = "", length=8, chars=string.ascii_letters+string.digits):
from random import choice
_str = prefix[:]
for i in range(length):
@@ -91,7 +91,7 @@ def readLineCSV(line): ### dcrowell July 2008
returnList[0]=returnList[0][1:]
return returnList
-def cmpEigenValue(A,B):
+def cmpEigenValue(A, B):
try:
if A[0] > B[0]:
return -1
@@ -107,7 +107,7 @@ def hasAccessToConfidentialPhenotypeTrait(privilege, userName, authorized_users)
if webqtlConfig.USERDICT[privilege] > webqtlConfig.USERDICT['user']:
access_to_confidential_phenotype_trait = 1
else:
- AuthorizedUsersList=map(string.strip, string.split(authorized_users, ','))
- if AuthorizedUsersList.__contains__(userName):
+ AuthorizedUsersList=[x.strip() for x in authorized_users.split(',')]
+ if userName in AuthorizedUsersList:
access_to_confidential_phenotype_trait = 1
- return access_to_confidential_phenotype_trait \ No newline at end of file
+ return access_to_confidential_phenotype_trait
diff --git a/wqflask/wqflask/__init__.py b/wqflask/wqflask/__init__.py
index 7ed9c7b8..d484e525 100644
--- a/wqflask/wqflask/__init__.py
+++ b/wqflask/wqflask/__init__.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
import sys
import jinja2
diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py
index 7f5312c1..f5b50dcd 100644
--- a/wqflask/wqflask/api/correlation.py
+++ b/wqflask/wqflask/api/correlation.py
@@ -1,10 +1,8 @@
-from __future__ import absolute_import, division, print_function
-
import collections
import scipy
-from MySQLdb import escape_string as escape
+from utility.db_tools import escape
from flask import g
@@ -36,7 +34,7 @@ def do_correlation(start_vars):
#corr_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0])))
final_results = []
- for _trait_counter, trait in enumerate(corr_results.keys()[:corr_params['return_count']]):
+ for _trait_counter, trait in enumerate(list(corr_results.keys())[:corr_params['return_count']]):
if corr_params['type'] == "tissue":
[sample_r, num_overlap, sample_p, symbol] = corr_results[trait]
result_dict = {
@@ -76,20 +74,20 @@ def calculate_results(this_trait, this_dataset, target_dataset, corr_params):
if corr_params['type'] == "tissue":
trait_symbol_dict = this_dataset.retrieve_genes("Symbol")
corr_results = do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params)
- sorted_results = collections.OrderedDict(sorted(corr_results.items(),
+ sorted_results = collections.OrderedDict(sorted(list(corr_results.items()),
key=lambda t: -abs(t[1][1])))
elif corr_params['type'] == "literature" or corr_params['type'] == "lit": #ZS: Just so a user can use either "lit" or "literature"
trait_geneid_dict = this_dataset.retrieve_genes("GeneId")
corr_results = do_literature_correlation_for_all_traits(this_trait, this_dataset, trait_geneid_dict, corr_params)
- sorted_results = collections.OrderedDict(sorted(corr_results.items(),
+ sorted_results = collections.OrderedDict(sorted(list(corr_results.items()),
key=lambda t: -abs(t[1][1])))
else:
- for target_trait, target_vals in target_dataset.trait_data.iteritems():
+ for target_trait, target_vals in list(target_dataset.trait_data.items()):
result = get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, corr_params['type'])
if result is not None:
corr_results[target_trait] = result
- sorted_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0])))
+ sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), key=lambda t: -abs(t[1][0])))
return sorted_results
@@ -100,10 +98,10 @@ def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_par
if this_trait.symbol.lower() in primary_trait_tissue_vals_dict:
primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower()]
- corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=trait_symbol_dict.values())
+ corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=list(trait_symbol_dict.values()))
tissue_corr_data = {}
- for trait, symbol in trait_symbol_dict.iteritems():
+ for trait, symbol in list(trait_symbol_dict.items()):
if symbol and symbol.lower() in corr_result_tissue_vals_dict:
this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()]
@@ -119,7 +117,7 @@ def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_g
input_trait_mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), this_trait.geneid)
lit_corr_data = {}
- for trait, gene_id in trait_geneid_dict.iteritems():
+ for trait, gene_id in list(trait_geneid_dict.items()):
mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), gene_id)
if mouse_gene_id and str(mouse_gene_id).find(";") == -1:
@@ -234,4 +232,4 @@ def init_corr_params(start_vars):
'return_count' : return_count
}
- return corr_params \ No newline at end of file
+ return corr_params
diff --git a/wqflask/wqflask/api/gen_menu.py b/wqflask/wqflask/api/gen_menu.py
index fedf3e0b..1dcafe1f 100644
--- a/wqflask/wqflask/api/gen_menu.py
+++ b/wqflask/wqflask/api/gen_menu.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division
-
from flask import g
@@ -61,7 +59,7 @@ def get_types(groups):
"""Build types list"""
types = {}
- for species, group_dict in groups.iteritems():
+ for species, group_dict in list(groups.items()):
types[species] = {}
for group_name, _group_full_name, _family_name in group_dict:
if phenotypes_exist(group_name):
@@ -136,9 +134,9 @@ def build_types(species, group):
def get_datasets(types):
"""Build datasets list"""
datasets = {}
- for species, group_dict in types.iteritems():
+ for species, group_dict in list(types.items()):
datasets[species] = {}
- for group, type_list in group_dict.iteritems():
+ for group, type_list in list(group_dict.items()):
datasets[species][group] = {}
for type_name in type_list:
these_datasets = build_datasets(species, group, type_name[0])
diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py
index 92c27c9b..d59a69df 100644
--- a/wqflask/wqflask/api/mapping.py
+++ b/wqflask/wqflask/api/mapping.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
import string
from base import data_set
diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py
index 6324cabe..8e59ef27 100644
--- a/wqflask/wqflask/api/router.py
+++ b/wqflask/wqflask/api/router.py
@@ -1,16 +1,21 @@
# GN2 API
-from __future__ import absolute_import, division, print_function
+import os
+import io
+import csv
+import json
+import datetime
+import requests
-import os, io, csv, json, datetime, requests, yaml
-import zlib
from zipfile import ZipFile, ZIP_DEFLATED
-import StringIO
import flask
-from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, send_file
-import sqlalchemy
+from flask import g
+from flask import request
+from flask import make_response
+from flask import send_file
+
from wqflask import app
from wqflask.api import correlation, mapping, gen_menu
@@ -308,7 +313,7 @@ def fetch_traits(dataset_name, file_format = "json"):
else:
filename = dataset_name + "_trait_ids.csv"
- si = StringIO.StringIO()
+ si = io.StringIO()
csv_writer = csv.writer(si)
csv_writer.writerows([[trait_id] for trait_id in trait_ids])
output = make_response(si.getvalue())
@@ -322,7 +327,7 @@ def fetch_traits(dataset_name, file_format = "json"):
else:
filename = dataset_name + "_trait_names.csv"
- si = StringIO.StringIO()
+ si = io.StringIO()
csv_writer = csv.writer(si)
csv_writer.writerows([[trait_name] for trait_name in trait_names])
output = make_response(si.getvalue())
@@ -413,7 +418,7 @@ def fetch_traits(dataset_name, file_format = "json"):
for result in g.db.execute(final_query).fetchall():
results_list.append(result)
- si = StringIO.StringIO()
+ si = io.StringIO()
csv_writer = csv.writer(si)
csv_writer.writerows(results_list)
output = make_response(si.getvalue())
@@ -517,9 +522,9 @@ def all_sample_data(dataset_name, file_format = "csv"):
line_list.append("x")
results_list.append(line_list)
- results_list = map(list, zip(*results_list))
+ results_list = list(map(list, zip(*results_list)))
- si = StringIO.StringIO()
+ si = io.StringIO()
csv_writer = csv.writer(si)
csv_writer.writerows(results_list)
output = make_response(si.getvalue())
@@ -558,10 +563,10 @@ def trait_sample_data(dataset_name, trait_name, file_format = "json"):
sample_list = []
for sample in sample_data:
sample_dict = {
- "sample_name" : sample[0],
- "sample_name_2" : sample[1],
- "value" : sample[2],
- "data_id" : sample[3],
+ "sample_name": sample[0],
+ "sample_name_2": sample[1],
+ "value": sample[2],
+ "data_id": sample[3],
}
if sample[4]:
sample_dict["se"] = sample[4]
@@ -706,7 +711,7 @@ def get_mapping_results():
if format == "csv":
filename = "mapping_" + datetime.datetime.utcnow().strftime("%b_%d_%Y_%I:%M%p") + ".csv"
- si = StringIO.StringIO()
+ si = io.StringIO()
csv_writer = csv.writer(si)
csv_writer.writerows(results)
output = make_response(si.getvalue())
@@ -732,7 +737,7 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None):
if request.args['limit_to'].isdigit():
limit_num = int(request.args['limit_to'])
- si = StringIO.StringIO()
+ si = io.StringIO()
if file_format == "csv" or file_format == "geno":
filename = group_name + ".geno"
@@ -966,4 +971,4 @@ def get_group_id(group_name):
if group_id:
return group_id[0]
else:
- return None \ No newline at end of file
+ return None
diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py
index 42a09fed..15383603 100644
--- a/wqflask/wqflask/collect.py
+++ b/wqflask/wqflask/collect.py
@@ -1,6 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
-
import os
import hashlib
import datetime
@@ -10,7 +7,7 @@ import uuid
import hashlib
import base64
-import urlparse
+import urllib.parse
import simplejson as json
@@ -38,7 +35,7 @@ from utility.logger import getLogger
logger = getLogger(__name__)
def process_traits(unprocessed_traits):
- if isinstance(unprocessed_traits, basestring):
+ if isinstance(unprocessed_traits, str):
unprocessed_traits = unprocessed_traits.split(",")
traits = set()
for trait in unprocessed_traits:
@@ -193,7 +190,7 @@ def view_collection():
params = request.args
uc_id = params['uc_id']
- uc = (collection for collection in g.user_session.user_collections if collection["id"] == uc_id).next()
+ uc = next((collection for collection in g.user_session.user_collections if collection["id"] == uc_id))
traits = uc["members"]
trait_obs = []
diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
index 09d6b9cc..92de6073 100644
--- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
+++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
@@ -18,35 +18,16 @@
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-from __future__ import absolute_import, print_function, division
-
-import sys
-# sys.path.append(".") Never do this in a webserver!
-
-import string
-import cPickle
-import os
-import time
-import pp
-import math
-import collections
-import resource
-
-
from pprint import pformat as pf
from base.trait import create_trait
from base import data_set
from utility import webqtlUtil, helper_functions, corr_result_helpers
-from db import webqtlDatabaseFunction
import utility.webqtlUtil #this is for parallel computing only.
from wqflask.correlation import correlation_functions
-from utility.benchmark import Bench
from MySQLdb import escape_string as escape
-from pprint import pformat as pf
-
from flask import Flask, g
diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py
index 819836b1..929cd2c9 100644
--- a/wqflask/wqflask/correlation/corr_scatter_plot.py
+++ b/wqflask/wqflask/correlation/corr_scatter_plot.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import math
from flask import g
@@ -36,13 +34,13 @@ class CorrScatterPlot(object):
samples_1, samples_2, num_overlap = corr_result_helpers.normalize_values_with_samples(self.trait_1.data, self.trait_2.data)
self.data = []
- self.indIDs = samples_1.keys()
+ self.indIDs = list(samples_1.keys())
vals_1 = []
- for sample in samples_1.keys():
+ for sample in list(samples_1.keys()):
vals_1.append(samples_1[sample].value)
self.data.append(vals_1)
vals_2 = []
- for sample in samples_2.keys():
+ for sample in list(samples_2.keys()):
vals_2.append(samples_2[sample].value)
self.data.append(vals_2)
@@ -130,4 +128,4 @@ def get_intercept_coords(slope, intercept, x_range, y_range):
intercept_coords.append([x1, y1])
intercept_coords.append([x2, y2])
- return intercept_coords \ No newline at end of file
+ return intercept_coords
diff --git a/wqflask/wqflask/correlation/correlation_functions.py b/wqflask/wqflask/correlation/correlation_functions.py
index 06dec795..b883e361 100644
--- a/wqflask/wqflask/correlation/correlation_functions.py
+++ b/wqflask/wqflask/correlation/correlation_functions.py
@@ -24,8 +24,6 @@
#
# Last updated by NL 2011/03/23
-from __future__ import absolute_import, print_function, division
-
import math
import rpy2.robjects
import string
@@ -50,12 +48,12 @@ from flask import Flask, g
def cal_zero_order_corr_for_tiss (primaryValue=[], targetValue=[], method='pearson'):
- R_primary = rpy2.robjects.FloatVector(range(len(primaryValue)))
+ R_primary = rpy2.robjects.FloatVector(list(range(len(primaryValue))))
N = len(primaryValue)
for i in range(len(primaryValue)):
R_primary[i] = primaryValue[i]
- R_target = rpy2.robjects.FloatVector(range(len(targetValue)))
+ R_target = rpy2.robjects.FloatVector(list(range(len(targetValue))))
for i in range(len(targetValue)):
R_target[i]=targetValue[i]
@@ -114,4 +112,4 @@ def get_trait_symbol_and_tissue_values(symbol_list=None):
tissue_data = MrnaAssayTissueData(gene_symbols=symbol_list)
if len(tissue_data.gene_symbols):
- return tissue_data.get_symbol_values_pairs() \ No newline at end of file
+ return tissue_data.get_symbol_values_pairs()
diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py
index de7a1c0c..91146e5b 100644
--- a/wqflask/wqflask/correlation/show_corr_results.py
+++ b/wqflask/wqflask/correlation/show_corr_results.py
@@ -18,46 +18,25 @@
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-from __future__ import absolute_import, print_function, division
-
-import sys
-
-import string
-import cPickle
-import os
-import time
-import pp
-import math
import collections
-import resource
import json
-
import scipy
import numpy
import rpy2.robjects as ro # R Objects
-import rpy2.rinterface as ri
from rpy2.robjects.packages import importr
utils = importr("utils")
-from pprint import pformat as pf
-
-from base import webqtlConfig
-from utility.THCell import THCell
-from utility.TDCell import TDCell
-from base.trait import create_trait
from base import data_set
from utility import webqtlUtil, helper_functions, corr_result_helpers, hmac
from db import webqtlDatabaseFunction
-import utility.webqtlUtil #this is for parallel computing only.
+import utility.webqtlUtil #this is for parallel computing only.
from wqflask.correlation import correlation_functions
from utility.benchmark import Bench
import utility.webqtlUtil
-from utility.type_checking import is_float, is_int, is_str, get_float, get_int, get_string
-
-from MySQLdb import escape_string as escape
+from utility.type_checking import is_str, get_float, get_int, get_string
+from utility.db_tools import escape
-from pprint import pformat as pf
from flask import Flask, g
@@ -108,17 +87,17 @@ class CorrelationResults(object):
self.sample_data = {}
self.corr_type = start_vars['corr_type']
self.corr_method = start_vars['corr_sample_method']
- self.min_expr = get_float(start_vars,'min_expr')
- self.p_range_lower = get_float(start_vars,'p_range_lower',-1.0)
- self.p_range_upper = get_float(start_vars,'p_range_upper',1.0)
+ self.min_expr = get_float(start_vars, 'min_expr')
+ self.p_range_lower = get_float(start_vars, 'p_range_lower', -1.0)
+ self.p_range_upper = get_float(start_vars, 'p_range_upper', 1.0)
if ('loc_chr' in start_vars and
'min_loc_mb' in start_vars and
'max_loc_mb' in start_vars):
- self.location_chr = get_string(start_vars,'loc_chr')
- self.min_location_mb = get_int(start_vars,'min_loc_mb')
- self.max_location_mb = get_int(start_vars,'max_loc_mb')
+ self.location_chr = get_string(start_vars, 'loc_chr')
+ self.min_location_mb = get_int(start_vars, 'min_loc_mb')
+ self.max_location_mb = get_int(start_vars, 'max_loc_mb')
else:
self.location_chr = self.min_location_mb = self.max_location_mb = None
@@ -145,10 +124,10 @@ class CorrelationResults(object):
if corr_samples_group == 'samples_other':
primary_samples = [x for x in primary_samples if x not in (
self.dataset.group.parlist + self.dataset.group.f1list)]
- self.process_samples(start_vars, self.this_trait.data.keys(), primary_samples)
+ self.process_samples(start_vars, list(self.this_trait.data.keys()), primary_samples)
self.target_dataset = data_set.create_dataset(start_vars['corr_dataset'])
- self.target_dataset.get_trait_data(self.sample_data.keys())
+ self.target_dataset.get_trait_data(list(self.sample_data.keys()))
self.header_fields = get_header_fields(self.target_dataset.type, self.corr_method)
@@ -168,41 +147,41 @@ class CorrelationResults(object):
tissue_corr_data = self.do_tissue_correlation_for_all_traits()
if tissue_corr_data != None:
- for trait in tissue_corr_data.keys()[:self.return_number]:
+ for trait in list(tissue_corr_data.keys())[:self.return_number]:
self.get_sample_r_and_p_values(trait, self.target_dataset.trait_data[trait])
else:
- for trait, values in self.target_dataset.trait_data.iteritems():
+ for trait, values in list(self.target_dataset.trait_data.items()):
self.get_sample_r_and_p_values(trait, values)
elif self.corr_type == "lit":
self.trait_geneid_dict = self.dataset.retrieve_genes("GeneId")
lit_corr_data = self.do_lit_correlation_for_all_traits()
- for trait in lit_corr_data.keys()[:self.return_number]:
+ for trait in list(lit_corr_data.keys())[:self.return_number]:
self.get_sample_r_and_p_values(trait, self.target_dataset.trait_data[trait])
elif self.corr_type == "sample":
- for trait, values in self.target_dataset.trait_data.iteritems():
+ for trait, values in list(self.target_dataset.trait_data.items()):
self.get_sample_r_and_p_values(trait, values)
- self.correlation_data = collections.OrderedDict(sorted(self.correlation_data.items(),
+ self.correlation_data = collections.OrderedDict(sorted(list(self.correlation_data.items()),
key=lambda t: -abs(t[1][0])))
if self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Geno":
#ZS: Convert min/max chromosome to an int for the location range option
range_chr_as_int = None
- for order_id, chr_info in self.dataset.species.chromosomes.chromosomes.iteritems():
+ for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()):
if 'loc_chr' in start_vars:
if chr_info.name == self.location_chr:
range_chr_as_int = order_id
- for _trait_counter, trait in enumerate(self.correlation_data.keys()[:self.return_number]):
+ for _trait_counter, trait in enumerate(list(self.correlation_data.keys())[:self.return_number]):
trait_object = create_trait(dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False)
if self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Geno":
#ZS: Convert trait chromosome to an int for the location range option
chr_as_int = 0
- for order_id, chr_info in self.dataset.species.chromosomes.chromosomes.iteritems():
+ for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()):
if chr_info.name == trait_object.chr:
chr_as_int = order_id
@@ -297,14 +276,14 @@ class CorrelationResults(object):
#print("trait_gene_symbols: ", pf(trait_gene_symbols.values()))
corr_result_tissue_vals_dict= correlation_functions.get_trait_symbol_and_tissue_values(
- symbol_list=self.trait_symbol_dict.values())
+ symbol_list=list(self.trait_symbol_dict.values()))
#print("corr_result_tissue_vals: ", pf(corr_result_tissue_vals_dict))
#print("trait_gene_symbols: ", pf(trait_gene_symbols))
tissue_corr_data = {}
- for trait, symbol in self.trait_symbol_dict.iteritems():
+ for trait, symbol in list(self.trait_symbol_dict.items()):
if symbol and symbol.lower() in corr_result_tissue_vals_dict:
this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()]
@@ -314,7 +293,7 @@ class CorrelationResults(object):
tissue_corr_data[trait] = [symbol, result[0], result[2]]
- tissue_corr_data = collections.OrderedDict(sorted(tissue_corr_data.items(),
+ tissue_corr_data = collections.OrderedDict(sorted(list(tissue_corr_data.items()),
key=lambda t: -abs(t[1][1])))
return tissue_corr_data
@@ -359,7 +338,7 @@ class CorrelationResults(object):
input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), self.this_trait.geneid)
lit_corr_data = {}
- for trait, gene_id in self.trait_geneid_dict.iteritems():
+ for trait, gene_id in list(self.trait_geneid_dict.items()):
mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), gene_id)
if mouse_gene_id and str(mouse_gene_id).find(";") == -1:
@@ -387,7 +366,7 @@ class CorrelationResults(object):
else:
lit_corr_data[trait] = [gene_id, 0]
- lit_corr_data = collections.OrderedDict(sorted(lit_corr_data.items(),
+ lit_corr_data = collections.OrderedDict(sorted(list(lit_corr_data.items()),
key=lambda t: -abs(t[1][1])))
return lit_corr_data
@@ -648,4 +627,4 @@ def get_header_fields(data_type, corr_method):
'N',
'Sample p(r)']
- return header_fields \ No newline at end of file
+ return header_fields
diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
index 0ac94139..49ba9e5d 100644
--- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
+++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
@@ -18,21 +18,8 @@
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-from __future__ import absolute_import, print_function, division
-
-import sys
-# sys.path.append(".") Never do this in a webserver!
-
-import string
-import cPickle
-import os
import datetime
-import time
-import pp
import math
-import collections
-import resource
-
import numpy as np
import scipy
@@ -42,6 +29,8 @@ import rpy2.robjects as robjects
from pprint import pformat as pf
from utility.redis_tools import get_redis_conn
+from functools import reduce
+
Redis = get_redis_conn()
THIRTY_DAYS = 60 * 60 * 24 * 30
@@ -56,10 +45,6 @@ import utility.webqtlUtil #this is for parallel computing only.
from wqflask.correlation import correlation_functions
from utility.benchmark import Bench
-from MySQLdb import escape_string as escape
-
-from pprint import pformat as pf
-
from flask import Flask, g, url_for
import utility.logger
@@ -190,7 +175,7 @@ class CorrelationMatrix(object):
if self.do_PCA == True:
self.pca_works = "True"
self.pca_trait_ids = []
- pca = self.calculate_pca(range(len(self.traits)), corr_eigen_value, corr_eigen_vectors)
+ pca = self.calculate_pca(list(range(len(self.traits))), corr_eigen_value, corr_eigen_vectors)
self.loadings_array = self.process_loadings()
else:
self.pca_works = "False"
@@ -199,8 +184,8 @@ class CorrelationMatrix(object):
self.js_data = dict(traits = [trait.name for trait in self.traits],
groups = groups,
- cols = range(len(self.traits)),
- rows = range(len(self.traits)),
+ cols = list(range(len(self.traits))),
+ rows = list(range(len(self.traits))),
samples = self.all_sample_list,
sample_data = self.sample_data,)
# corr_results = [result[1] for result in result_row for result_row in self.corr_results])
@@ -271,14 +256,14 @@ def zScore(trait_data_array):
i = 0
for data in trait_data_array:
N = len(data)
- S = reduce(lambda x,y: x+y, data, 0.)
- SS = reduce(lambda x,y: x+y*y, data, 0.)
+ S = reduce(lambda x, y: x+y, data, 0.)
+ SS = reduce(lambda x, y: x+y*y, data, 0.)
mean = S/N
var = SS - S*S/N
stdev = math.sqrt(var/(N-1))
if stdev == 0:
stdev = 1e-100
- data2 = map(lambda x:(x-mean)/stdev,data)
+ data2 = [(x-mean)/stdev for x in data]
trait_data_array[i] = data2
i += 1
return trait_data_array
@@ -290,7 +275,7 @@ def sortEigenVectors(vector):
combines = []
i = 0
for item in eigenValues:
- combines.append([eigenValues[i],eigenVectors[i]])
+ combines.append([eigenValues[i], eigenVectors[i]])
i += 1
combines.sort(webqtlUtil.cmpEigenValue)
A = []
@@ -298,8 +283,8 @@ def sortEigenVectors(vector):
for item in combines:
A.append(item[0])
B.append(item[1])
- sum = reduce(lambda x,y: x+y, A, 0.0)
- A = map(lambda x:x*100.0/sum, A)
+ sum = reduce(lambda x, y: x+y, A, 0.0)
+ A = [x*100.0/sum for x in A]
return [A, B]
except:
- return [] \ No newline at end of file
+ return []
diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py
index 35067036..e58a7b87 100644
--- a/wqflask/wqflask/ctl/ctl_analysis.py
+++ b/wqflask/wqflask/ctl/ctl_analysis.py
@@ -125,7 +125,7 @@ class CTL(object):
gt = create_trait(name = ts[0], dataset_name = ts[1])
gt = retrieve_sample_data(gt, dataset, individuals)
for ind in individuals:
- if ind in gt.data.keys():
+ if ind in list(gt.data.keys()):
traits.append(gt.data[ind].value)
else:
traits.append("-999")
@@ -175,7 +175,7 @@ class CTL(object):
sys.stdout.flush()
# Create the interactive graph for cytoscape visualization (Nodes and Edges)
- if not type(significant) == ri.RNULLType:
+ if not isinstance(significant, ri.RNULLType):
for x in range(len(significant[0])):
logger.debug(significant[0][x], significant[1][x], significant[2][x]) # Debug to console
tsS = significant[0][x].split(':') # Source
diff --git a/wqflask/wqflask/db_info.py b/wqflask/wqflask/db_info.py
index f04e38bf..f420b472 100644
--- a/wqflask/wqflask/db_info.py
+++ b/wqflask/wqflask/db_info.py
@@ -1,127 +1,138 @@
-import httplib, urllib2
-import re
-
-from flask import Flask, g
-
-from utility.logger import getLogger
-logger = getLogger(__name__ )
-
-class InfoPage(object):
- def __init__(self, start_vars):
- self.info = None
- self.gn_accession_id = None
- if 'gn_accession_id' in start_vars:
- self.gn_accession_id = start_vars['gn_accession_id']
- self.info_page_name = start_vars['info_page_name']
-
- self.get_info()
- self.get_datasets_list()
-
- def get_info(self, create=False):
- query_base = ("SELECT InfoPageName, GN_AccesionId, Species.MenuName, Species.TaxonomyId, Tissue.Name, InbredSet.Name, " +
- "GeneChip.GeneChipName, GeneChip.GeoPlatform, AvgMethod.Name, Datasets.DatasetName, Datasets.GeoSeries, " +
- "Datasets.PublicationTitle, DatasetStatus.DatasetStatusName, Datasets.Summary, Datasets.AboutCases, " +
- "Datasets.AboutTissue, Datasets.AboutDataProcessing, Datasets.Acknowledgment, Datasets.ExperimentDesign, " +
- "Datasets.Contributors, Datasets.Citation, Datasets.Notes, Investigators.FirstName, Investigators.LastName, " +
- "Investigators.Address, Investigators.City, Investigators.State, Investigators.ZipCode, Investigators.Country, " +
- "Investigators.Phone, Investigators.Email, Investigators.Url, Organizations.OrganizationName, " +
- "InvestigatorId, DatasetId, DatasetStatusId, Datasets.AboutPlatform, InfoFileTitle, Specifics " +
- "FROM InfoFiles " +
- "LEFT JOIN Species USING (SpeciesId) " +
- "LEFT JOIN Tissue USING (TissueId) " +
- "LEFT JOIN InbredSet USING (InbredSetId) " +
- "LEFT JOIN GeneChip USING (GeneChipId) " +
- "LEFT JOIN AvgMethod USING (AvgMethodId) " +
- "LEFT JOIN Datasets USING (DatasetId) " +
- "LEFT JOIN Investigators USING (InvestigatorId) " +
- "LEFT JOIN Organizations USING (OrganizationId) " +
- "LEFT JOIN DatasetStatus USING (DatasetStatusId) WHERE ")
-
- if self.gn_accession_id:
- final_query = query_base + "GN_AccesionId = {}".format(self.gn_accession_id)
- results = g.db.execute(final_query).fetchone()
- if self.info_page_name and not results:
- final_query = query_base + "InfoPageName={}".format(self.info_page_name)
- elif self.info_page_name:
- final_query = query_base + "InfoPageName={}".format(self.info_page_name)
- results = g.db.execute(final_query).fetchone()
- else:
- raise 'No correct parameter found'
-
- if results:
- self.info = process_query_results(results)
-
- if (not results or len(results) < 1) and self.info_page_name and create:
- insert_sql = "INSERT INTO InfoFiles SET InfoFiles.InfoPageName={}".format(self.info_page_name)
- return self.get_info()
-
- if not self.gn_accession_id and self.info:
- self.gn_accession_id = self.info['accession_id']
- if not self.info_page_name and self.info:
- self.info_page_name = self.info['info_page_name']
-
- def get_datasets_list(self):
- self.filelist = []
- try:
- response = urllib2.urlopen("http://datafiles.genenetwork.org/download/GN%s" % self.gn_accession_id)
- data = response.read()
-
- matches = re.findall(r"<tr>.+?</tr>", data, re.DOTALL)
- for i, match in enumerate(matches):
- if i == 0:
- continue
- cells = re.findall(r"<td.+?>.+?</td>", match, re.DOTALL)
- full_filename = re.search(r"<a href=\"(.+?)\"", cells[1], re.DOTALL).group(1).strip()
- filename = full_filename.split("/")[-1]
- filesize = re.search(r">(.+?)<", cells[2]).group(1).strip()
- filedate = "N/A" #ZS: Since we can't get it for now
-
- self.filelist.append([filename, filedate, filesize])
- except Exception, e:
- pass
-
-def process_query_results(results):
- info_ob = {
- 'info_page_name': results[0],
- 'accession_id': results[1],
- 'menu_name': results[2],
- 'taxonomy_id': results[3],
- 'tissue_name': results[4],
- 'group_name': results[5],
- 'gene_chip_name': results[6],
- 'geo_platform': results[7],
- 'avg_method_name': results[8],
- 'dataset_name': results[9],
- 'geo_series': results[10],
- 'publication_title': results[11],
- 'dataset_status_name': results[12],
- 'dataset_summary': results[13],
- 'about_cases': results[14],
- 'about_tissue': results[15],
- 'about_data_processing': results[16],
- 'acknowledgement': results[17],
- 'experiment_design': results[18],
- 'contributors': results[19],
- 'citation': results[20],
- 'notes': results[21],
- 'investigator_firstname': results[22],
- 'investigator_lastname': results[23],
- 'investigator_address': results[24],
- 'investigator_city': results[25],
- 'investigator_state': results[26],
- 'investigator_zipcode': results[27],
- 'investigator_country': results[28],
- 'investigator_phone': results[29],
- 'investigator_email': results[30],
- 'investigator_url': results[31],
- 'organization_name': results[32],
- 'investigator_id': results[33],
- 'dataset_id': results[34],
- 'dataset_status_is': results[35],
- 'about_platform': results[36],
- 'info_file_title': results[37],
- 'specifics': results[38]
- }
-
- return info_ob
- \ No newline at end of file
+import http.client
+import urllib.request
+import urllib.error
+import urllib.parse
+import re
+
+from flask import Flask, g
+
+from utility.logger import getLogger
+logger = getLogger(__name__)
+
+
+class InfoPage(object):
+ def __init__(self, start_vars):
+ self.info = None
+ self.gn_accession_id = None
+ if 'gn_accession_id' in start_vars:
+ self.gn_accession_id = start_vars['gn_accession_id']
+ self.info_page_name = start_vars['info_page_name']
+
+ self.get_info()
+ self.get_datasets_list()
+
+ def get_info(self, create=False):
+ query_base = ("SELECT InfoPageName, GN_AccesionId, Species.MenuName, Species.TaxonomyId, Tissue.Name, InbredSet.Name, " +
+ "GeneChip.GeneChipName, GeneChip.GeoPlatform, AvgMethod.Name, Datasets.DatasetName, Datasets.GeoSeries, " +
+ "Datasets.PublicationTitle, DatasetStatus.DatasetStatusName, Datasets.Summary, Datasets.AboutCases, " +
+ "Datasets.AboutTissue, Datasets.AboutDataProcessing, Datasets.Acknowledgment, Datasets.ExperimentDesign, " +
+ "Datasets.Contributors, Datasets.Citation, Datasets.Notes, Investigators.FirstName, Investigators.LastName, " +
+ "Investigators.Address, Investigators.City, Investigators.State, Investigators.ZipCode, Investigators.Country, " +
+ "Investigators.Phone, Investigators.Email, Investigators.Url, Organizations.OrganizationName, " +
+ "InvestigatorId, DatasetId, DatasetStatusId, Datasets.AboutPlatform, InfoFileTitle, Specifics " +
+ "FROM InfoFiles " +
+ "LEFT JOIN Species USING (SpeciesId) " +
+ "LEFT JOIN Tissue USING (TissueId) " +
+ "LEFT JOIN InbredSet USING (InbredSetId) " +
+ "LEFT JOIN GeneChip USING (GeneChipId) " +
+ "LEFT JOIN AvgMethod USING (AvgMethodId) " +
+ "LEFT JOIN Datasets USING (DatasetId) " +
+ "LEFT JOIN Investigators USING (InvestigatorId) " +
+ "LEFT JOIN Organizations USING (OrganizationId) " +
+ "LEFT JOIN DatasetStatus USING (DatasetStatusId) WHERE ")
+
+ if self.gn_accession_id:
+ final_query = query_base + \
+ "GN_AccesionId = {}".format(self.gn_accession_id)
+ results = g.db.execute(final_query).fetchone()
+ if self.info_page_name and not results:
+ final_query = query_base + \
+ "InfoPageName={}".format(self.info_page_name)
+ elif self.info_page_name:
+ final_query = query_base + \
+ "InfoPageName={}".format(self.info_page_name)
+ results = g.db.execute(final_query).fetchone()
+ else:
+ raise 'No correct parameter found'
+
+ if results:
+ self.info = process_query_results(results)
+
+ if (not results or len(results) < 1) and self.info_page_name and create:
+ insert_sql = "INSERT INTO InfoFiles SET InfoFiles.InfoPageName={}".format(
+ self.info_page_name)
+ return self.get_info()
+
+ if not self.gn_accession_id and self.info:
+ self.gn_accession_id = self.info['accession_id']
+ if not self.info_page_name and self.info:
+ self.info_page_name = self.info['info_page_name']
+
+ def get_datasets_list(self):
+ self.filelist = []
+ try:
+ response = urllib.request.urlopen(
+ "http://datafiles.genenetwork.org/download/GN%s" % self.gn_accession_id)
+ data = response.read()
+
+ matches = re.findall(r"<tr>.+?</tr>", data, re.DOTALL)
+ for i, match in enumerate(matches):
+ if i == 0:
+ continue
+ cells = re.findall(r"<td.+?>.+?</td>", match, re.DOTALL)
+ full_filename = re.search(
+ r"<a href=\"(.+?)\"", cells[1], re.DOTALL).group(1).strip()
+ filename = full_filename.split("/")[-1]
+ filesize = re.search(r">(.+?)<", cells[2]).group(1).strip()
+ filedate = "N/A" # ZS: Since we can't get it for now
+
+ self.filelist.append([filename, filedate, filesize])
+ except Exception as e:
+ pass
+
+def process_query_results(results):
+ info_ob = {
+ 'info_page_name': results[0],
+ 'accession_id': results[1],
+ 'menu_name': results[2],
+ 'taxonomy_id': results[3],
+ 'tissue_name': results[4],
+ 'group_name': results[5],
+ 'gene_chip_name': results[6],
+ 'geo_platform': results[7],
+ 'avg_method_name': results[8],
+ 'dataset_name': results[9],
+ 'geo_series': results[10],
+ 'publication_title': results[11],
+ 'dataset_status_name': results[12],
+ 'dataset_summary': results[13],
+ 'about_cases': results[14],
+ 'about_tissue': results[15],
+ 'about_data_processing': results[16],
+ 'acknowledgement': results[17],
+ 'experiment_design': results[18],
+ 'contributors': results[19],
+ 'citation': results[20],
+ 'notes': results[21],
+ 'investigator_firstname': results[22],
+ 'investigator_lastname': results[23],
+ 'investigator_address': results[24],
+ 'investigator_city': results[25],
+ 'investigator_state': results[26],
+ 'investigator_zipcode': results[27],
+ 'investigator_country': results[28],
+ 'investigator_phone': results[29],
+ 'investigator_email': results[30],
+ 'investigator_url': results[31],
+ 'organization_name': results[32],
+ 'investigator_id': results[33],
+ 'dataset_id': results[34],
+ 'dataset_status_is': results[35],
+ 'about_platform': results[36],
+ 'info_file_title': results[37],
+ 'specifics': results[38]
+ }
+
+ return info_ob
+
+
diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py
index 1e15d28f..00636563 100644
--- a/wqflask/wqflask/do_search.py
+++ b/wqflask/wqflask/do_search.py
@@ -1,16 +1,13 @@
-from __future__ import print_function, division
-
import string
import requests
import json
from flask import Flask, g
-from MySQLdb import escape_string as escape
+from utility.db_tools import escape
from pprint import pformat as pf
import sys
-# sys.path.append("..") Never in a running webserver
from db import webqtlDatabaseFunction
from utility.tools import GN2_BASE_URL
@@ -19,6 +16,7 @@ import logging
from utility.logger import getLogger
logger = getLogger(__name__)
+
class DoSearch(object):
"""Parent class containing parameters/functions used for all searches"""
@@ -46,8 +44,8 @@ class DoSearch(object):
def handle_wildcard(self, str):
keyword = str.strip()
- keyword = keyword.replace("*",".*")
- keyword = keyword.replace("?",".")
+ keyword = keyword.replace("*", ".*")
+ keyword = keyword.replace("?", ".")
return keyword
diff --git a/wqflask/wqflask/docs.py b/wqflask/wqflask/docs.py
index 78407e22..8628b81d 100644
--- a/wqflask/wqflask/docs.py
+++ b/wqflask/wqflask/docs.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import codecs
from flask import g
@@ -42,4 +40,4 @@ def update_text(start_vars):
sql = "UPDATE Docs SET content='{0}' WHERE entry='{1}';".format(content, start_vars['entry_type'])
g.db.execute(sql)
except:
- pass \ No newline at end of file
+ pass
diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py
index 3272c03d..3a886537 100644
--- a/wqflask/wqflask/export_traits.py
+++ b/wqflask/wqflask/export_traits.py
@@ -1,8 +1,6 @@
-from __future__ import print_function, division
-
import csv
import xlsxwriter
-import StringIO
+import io
import datetime
import itertools
@@ -61,9 +59,9 @@ def export_search_results_csv(targs):
traits_by_group = sort_traits_by_group(trait_list)
file_list = []
- for group in traits_by_group.keys():
+ for group in list(traits_by_group.keys()):
group_traits = traits_by_group[group]
- buff = StringIO.StringIO()
+ buff = io.StringIO()
writer = csv.writer(buff)
csv_rows = []
@@ -122,7 +120,7 @@ def export_search_results_csv(targs):
csv_rows.append(row_contents)
- csv_rows = map(list, itertools.izip_longest(*[row for row in csv_rows]))
+ csv_rows = list(map(list, itertools.zip_longest(*[row for row in csv_rows])))
writer.writerows(csv_rows)
csv_data = buff.getvalue()
buff.close()
@@ -135,9 +133,9 @@ def export_search_results_csv(targs):
def sort_traits_by_group(trait_list=[]):
traits_by_group = {}
for trait in trait_list:
- if trait.dataset.group.name not in traits_by_group.keys():
+ if trait.dataset.group.name not in list(traits_by_group.keys()):
traits_by_group[trait.dataset.group.name] = []
traits_by_group[trait.dataset.group.name].append(trait)
- return traits_by_group \ No newline at end of file
+ return traits_by_group
diff --git a/wqflask/wqflask/external_tools/send_to_bnw.py b/wqflask/wqflask/external_tools/send_to_bnw.py
index 68efd10d..efa17f05 100644
--- a/wqflask/wqflask/external_tools/send_to_bnw.py
+++ b/wqflask/wqflask/external_tools/send_to_bnw.py
@@ -18,8 +18,6 @@
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-from __future__ import absolute_import, print_function, division
-
from base.trait import GeneralTrait
from utility import helper_functions, corr_result_helpers
@@ -69,4 +67,4 @@ class SendToBNW(object):
if has_none:
continue
self.form_value += ",".join(str(cell) for cell in row)
- self.form_value += ";" \ No newline at end of file
+ self.form_value += ";"
diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py
index 7a5dba73..4c958a88 100644
--- a/wqflask/wqflask/external_tools/send_to_geneweaver.py
+++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py
@@ -18,8 +18,6 @@
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-from __future__ import absolute_import, print_function, division
-
import string
from flask import Flask, g
@@ -54,10 +52,10 @@ class SendToGeneWeaver(object):
trait_name_list = get_trait_name_list(self.trait_list)
self.hidden_vars = {
- 'client' : "genenetwork",
- 'species' : species_name,
- 'idtype' : self.chip_name,
- 'list' : string.join(trait_name_list, ","),
+ 'client': "genenetwork",
+ 'species': species_name,
+ 'idtype': self.chip_name,
+ 'list': ",".join(trait_name_list),
}
def get_trait_name_list(trait_list):
@@ -109,4 +107,4 @@ def test_chip(trait_list):
chip_name = '%s_NA' % result[0]
return chip_name
- return chip_name \ No newline at end of file
+ return chip_name
diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py
index 30ca024f..2f068792 100644
--- a/wqflask/wqflask/external_tools/send_to_webgestalt.py
+++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py
@@ -18,8 +18,6 @@
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-from __future__ import absolute_import, print_function, division
-
import string
from flask import Flask, g
@@ -49,7 +47,7 @@ class SendToWebGestalt(object):
id_type = "entrezgene"
self.hidden_vars = {
- 'gene_list' : string.join(gene_id_list, "\n"),
+ 'gene_list' : "\n".join(gene_id_list),
'id_type' : "entrezgene",
'ref_set' : "genome",
'enriched_database_category' : "geneontology",
@@ -123,4 +121,4 @@ def gen_gene_id_list(trait_list):
trait_name_list.append(trait.name)
retrieve_trait_info(trait, trait.dataset)
gene_id_list.append(str(trait.geneid))
- return trait_name_list, gene_id_list \ No newline at end of file
+ return trait_name_list, gene_id_list
diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py
index 99d5db26..69ee9623 100644
--- a/wqflask/wqflask/group_manager.py
+++ b/wqflask/wqflask/group_manager.py
@@ -1,6 +1,3 @@
-
-from __future__ import print_function, division, absolute_import
-
import random, string
from flask import (Flask, g, render_template, url_for, request, make_response,
@@ -155,4 +152,4 @@ def send_group_invites(group_id, user_email_list = [], user_type="members"):
save_user(user_details, user_details['user_id'])
send_invitation_email(user_email, temp_password)
-#@app.route() \ No newline at end of file
+#@app.route()
diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py
index c65a1415..6d797a29 100644
--- a/wqflask/wqflask/gsearch.py
+++ b/wqflask/wqflask/gsearch.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import json
from flask import Flask, g
diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py
index 5098a184..cca5a4fc 100644
--- a/wqflask/wqflask/heatmap/heatmap.py
+++ b/wqflask/wqflask/heatmap/heatmap.py
@@ -1,46 +1,17 @@
-from __future__ import absolute_import, print_function, division
-
-import sys
-# sys.path.append(".") Never in a running webserver
-
import string
-import cPickle
import os
-import datetime
-import time
-import pp
-import math
import random
-import collections
-import resource
-
-import scipy
-import numpy as np
-
-from pprint import pformat as pf
-
-from base.trait import GeneralTrait
-from base import data_set
from base import species
from base import webqtlConfig
from utility import helper_functions
-from utility import Plot, Bunch
-from utility import temp_data
-from utility.tools import flat_files, REAPER_COMMAND, TEMPDIR
-
-from MySQLdb import escape_string as escape
-
-import cPickle as pickle
-import simplejson as json
-
-from pprint import pformat as pf
+from utility.tools import flat_files, REAPER_COMMAND, TEMPDIR
from redis import Redis
-Redis = Redis()
-
from flask import Flask, g
-
from utility.logger import getLogger
+
+Redis = Redis()
+
logger = getLogger(__name__ )
class Heatmap(object):
@@ -60,7 +31,7 @@ class Heatmap(object):
chrnames = []
self.species = species.TheSpecies(dataset=self.trait_list[0][1])
- for key in self.species.chromosomes.chromosomes.keys():
+ for key in list(self.species.chromosomes.chromosomes.keys()):
chrnames.append([self.species.chromosomes.chromosomes[key].name, self.species.chromosomes.chromosomes[key].mb_length])
for trait_db in self.trait_list:
@@ -93,7 +64,7 @@ class Heatmap(object):
pos = []
markernames = []
- for trait in self.trait_results.keys():
+ for trait in list(self.trait_results.keys()):
lodnames.append(trait)
self.dataset.group.get_markers()
@@ -205,4 +176,4 @@ def parse_reaper_output(gwa_filename):
marker['additive'] = float(line.split("\t")[6])
marker_obs.append(marker)
- return marker_obs \ No newline at end of file
+ return marker_obs
diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py
index 2c60dd70..d0dd7aea 100644
--- a/wqflask/wqflask/interval_analyst/GeneUtil.py
+++ b/wqflask/wqflask/interval_analyst/GeneUtil.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import string
from flask import Flask, g
@@ -24,7 +22,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
##List current Species and other Species
speciesId = speciesDict[species]
- otherSpecies = map(lambda X: [X, speciesDict[X]], speciesDict.keys())
+ otherSpecies = [[X, speciesDict[X]] for X in list(speciesDict.keys())]
otherSpecies.remove([species, speciesId])
results = g.db.execute("""
@@ -33,7 +31,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
Chromosome = '%s' AND
((TxStart > %f and TxStart <= %f) OR (TxEnd > %f and TxEnd <= %f))
ORDER BY txStart
- """ % (string.join(fetchFields, ", "),
+ """ % (", ".join(fetchFields),
speciesId, chrName,
startMb, endMb,
startMb, endMb)).fetchall()
@@ -68,7 +66,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
othSpec, othSpecId = item
newdict2 = {}
- resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (string.join(fetchFields, ", "),
+ resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (", ".join(fetchFields),
othSpecId,
newdict["GeneSymbol"])).fetchone()
diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py
index 79266df2..5a7a4614 100644
--- a/wqflask/wqflask/marker_regression/display_mapping_results.py
+++ b/wqflask/wqflask/marker_regression/display_mapping_results.py
@@ -35,7 +35,7 @@ import json
from flask import Flask, g
-from htmlgen import HTMLgen2 as HT
+import htmlgen as HT
from base import webqtlConfig
from base.GeneralObject import GeneralObject
@@ -47,7 +47,11 @@ from base.webqtlConfig import GENERATED_IMAGE_DIR
from utility.pillow_utils import draw_rotated_text, draw_open_polygon
import utility.logger
-logger = utility.logger.getLogger(__name__)
+try: # Only import this for Python3
+ from functools import reduce
+except:
+ pass
+logger = utility.logger.getLogger(__name__ )
RED = ImageColor.getrgb("red")
BLUE = ImageColor.getrgb("blue")
@@ -84,35 +88,60 @@ class HtmlGenWrapper:
"""Wrapper Methods for HTML gen"""
@staticmethod
def create_image_tag(**kwargs):
- return HT.Image(**kwargs)
+ image = HT.Image("", "")
+ for key, value in list(kwargs.items()):
+ image.set_attribute(key, value)
+ return image
@staticmethod
def create_form_tag(**kwargs):
- return HT.Form(**kwargs)
+ form = HT.Form("POST", "") # Default method is POST
+
+ for key, value in list(kwargs.items()):
+ if key == "submit":
+ form.append(value)
+ continue
+ form.set_attribute(key.replace("cgi", "action"), str(value))
+ return form
@staticmethod
def create_p_tag(**kwargs):
- return HT.Paragraph(**kwargs)
+ paragraph = HT.Paragraph()
+ for key, value in list(kwargs.items()):
+ paragraph.set_attribute(key, value)
+ return paragraph
@staticmethod
def create_br_tag():
- return HT.BR()
+ return HT.VoidElement("br")
@staticmethod
def create_input_tag(**kwargs):
- return HT.Input(**kwargs)
+ input_ = HT.Input()
+ for key, value in list(kwargs.items()):
+ input_.set_attribute(key.lower().replace("_", ""), value)
+ return input_
@staticmethod
def create_area_tag(**kwargs):
- return HT.Area(**kwargs)
+ area = HT.VoidElement("area")
+ for key, value in list(kwargs.items()):
+ area.set_attribute(key, value)
+ return area
@staticmethod
def create_link_tag(href, content, **kwargs):
- return HT.Href(href, content, **kwargs)
+ link = HT.Link(href, content)
+ for key, value in list(kwargs.items()):
+ link.set_attribute(key, value)
+ return link
@staticmethod
def create_map_tag(**kwargs):
- return HT.Map(**kwargs)
+ map_ = HT.Element("map")
+ for key, value in list(kwargs.items()):
+ map_.set_attribute(key, value)
+ return map_
class DisplayMappingResults(object):
@@ -265,7 +294,7 @@ class DisplayMappingResults(object):
self.manhattan_plot = start_vars['manhattan_plot']
- if 'permCheck' in start_vars.keys():
+ if 'permCheck' in list(start_vars.keys()):
self.permChecked = start_vars['permCheck']
else:
self.permChecked = False
@@ -278,46 +307,46 @@ class DisplayMappingResults(object):
else:
self.nperm = 0
- if 'bootCheck' in start_vars.keys():
+ if 'bootCheck' in list(start_vars.keys()):
self.bootChecked = start_vars['bootCheck']
else:
self.bootChecked = False
- if 'num_bootstrap' in start_vars.keys():
+ if 'num_bootstrap' in list(start_vars.keys()):
self.nboot = int(start_vars['num_bootstrap'])
else:
self.nboot = 0
- if 'bootstrap_results' in start_vars.keys():
+ if 'bootstrap_results' in list(start_vars.keys()):
self.bootResult = start_vars['bootstrap_results']
else:
self.bootResult = []
- if 'do_control' in start_vars.keys():
+ if 'do_control' in list(start_vars.keys()):
self.doControl = start_vars['do_control']
else:
self.doControl = "false"
- if 'control_marker' in start_vars.keys():
+ if 'control_marker' in list(start_vars.keys()):
self.controlLocus = start_vars['control_marker']
else:
self.controlLocus = ""
- if 'covariates' in start_vars.keys():
+ if 'covariates' in list(start_vars.keys()):
self.covariates = start_vars['covariates']
- if 'maf' in start_vars.keys():
+ if 'maf' in list(start_vars.keys()):
self.maf = start_vars['maf']
else:
self.maf = ""
- if 'output_files' in start_vars.keys():
+ if 'output_files' in list(start_vars.keys()):
self.output_files = start_vars['output_files']
- if 'use_loco' in start_vars.keys() and self.mapping_method == "gemma":
+ if 'use_loco' in list(start_vars.keys()) and self.mapping_method == "gemma":
self.use_loco = start_vars['use_loco']
- if 'reaper_version' in start_vars.keys() and self.mapping_method == "reaper":
+ if 'reaper_version' in list(start_vars.keys()) and self.mapping_method == "reaper":
self.reaper_version = start_vars['reaper_version']
if 'output_files' in start_vars:
self.output_files = ",".join(start_vars['output_files'])
self.categorical_vars = ""
self.perm_strata = ""
- if 'perm_strata' in start_vars.keys() and 'categorical_vars' in start_vars.keys():
+ if 'perm_strata' in list(start_vars.keys()) and 'categorical_vars' in list(start_vars.keys()):
self.categorical_vars = start_vars['categorical_vars']
self.perm_strata = start_vars['perm_strata']
@@ -359,7 +388,7 @@ class DisplayMappingResults(object):
self.graphWidth = self.MULT_GRAPH_DEFAULT_WIDTH
## BEGIN HaplotypeAnalyst
- if 'haplotypeAnalystCheck' in start_vars.keys():
+ if 'haplotypeAnalystCheck' in list(start_vars.keys()):
self.haplotypeAnalystChecked = start_vars['haplotypeAnalystCheck']
else:
self.haplotypeAnalystChecked = False
@@ -367,25 +396,25 @@ class DisplayMappingResults(object):
self.graphHeight = self.GRAPH_DEFAULT_HEIGHT
self.dominanceChecked = False
- if 'LRSCheck' in start_vars.keys():
+ if 'LRSCheck' in list(start_vars.keys()):
self.LRS_LOD = start_vars['LRSCheck']
else:
self.LRS_LOD = start_vars['score_type']
self.intervalAnalystChecked = True
self.draw2X = False
- if 'additiveCheck' in start_vars.keys():
+ if 'additiveCheck' in list(start_vars.keys()):
self.additiveChecked = start_vars['additiveCheck']
else:
self.additiveChecked = False
- if 'viewLegend' in start_vars.keys():
+ if 'viewLegend' in list(start_vars.keys()):
self.legendChecked = start_vars['viewLegend']
else:
self.legendChecked = False
- if 'showSNP' in start_vars.keys():
+ if 'showSNP' in list(start_vars.keys()):
self.SNPChecked = start_vars['showSNP']
else:
self.SNPChecked = False
- if 'showGenes' in start_vars.keys():
+ if 'showGenes' in list(start_vars.keys()):
self.geneChecked = start_vars['showGenes']
else:
self.geneChecked = False
@@ -425,9 +454,9 @@ class DisplayMappingResults(object):
Chr_Length.Name in (%s)
Order by
Chr_Length.OrderId
- """ % (self.dataset.group.name, string.join(map(lambda X: "'%s'" % X[0], self.ChrList[1:]), ", ")))
+ """ % (self.dataset.group.name, ", ".join(["'%s'" % X[0] for X in self.ChrList[1:]])))
- self.ChrLengthMbList = map(lambda x: x[0]/1000000.0, self.ChrLengthMbList)
+ self.ChrLengthMbList = [x[0]/1000000.0 for x in self.ChrLengthMbList]
self.ChrLengthMbSum = reduce(lambda x, y:x+y, self.ChrLengthMbList, 0.0)
if self.ChrLengthMbList:
self.MbGraphInterval = self.ChrLengthMbSum/(len(self.ChrLengthMbList)*12) #Empirical Mb interval
@@ -458,7 +487,7 @@ class DisplayMappingResults(object):
else:
continue
samplelist = list(self.genotype.prgy)
- for j,_geno in enumerate (self.genotype[0][1].genotype):
+ for j, _geno in enumerate (self.genotype[0][1].genotype):
for item in smd:
if item.name == samplelist[j]:
self.NR_INDIVIDUALS = self.NR_INDIVIDUALS + 1
@@ -550,11 +579,10 @@ class DisplayMappingResults(object):
src="/image/{}.png".format(self.filename),
border="0", usemap='#WebQTLImageMap'
)
- self.intImg = intImg
#Scales plot differently for high resolution
if self.draw2X:
- intCanvasX2 = Image.new("RGBA", size=(self.graphWidth*2,self.graphHeight*2))
+ intCanvasX2 = Image.new("RGBA", size=(self.graphWidth*2, self.graphHeight*2))
gifmapX2 = self.plotIntMapping(intCanvasX2, startMb = self.startMb, endMb = self.endMb, showLocusForm= showLocusForm, zoom=2)
intCanvasX2.save(
"{}.png".format(
@@ -571,12 +599,12 @@ class DisplayMappingResults(object):
cgi=os.path.join(webqtlConfig.CGIDIR, webqtlConfig.SCRIPTFILE),
enctype='multipart/form-data',
name=showLocusForm,
- submit=HtmlGenWrapper.create_input_tag(type='hidden'))
+ submit=HtmlGenWrapper.create_input_tag(type_='hidden'))
hddn = {'FormID':'showDatabase', 'ProbeSetID':'_','database':fd.RISet+"Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'}
for key in hddn.keys():
showLocusForm.append(HtmlGenWrapper.create_input_tag(
- name=key, value=hddn[key], type='hidden'))
+ name=key, value=hddn[key], type_='hidden'))
showLocusForm.append(intImg)
else:
showLocusForm = intImg
@@ -788,17 +816,17 @@ class DisplayMappingResults(object):
bootScale = bootScale[:-1] + [highestPercent]
bootOffset = 50*fontZoom
- bootScaleFont=ImageFont.truetype(font=VERDANA_FILE,size=13*fontZoom)
+ bootScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=13*fontZoom)
im_drawer.rectangle(
xy=((canvas.size[0]-bootOffset, yZero-bootHeightThresh),
- (canvas.size[0]-bootOffset-15*zoom,yZero)),
+ (canvas.size[0]-bootOffset-15*zoom, yZero)),
fill = YELLOW, outline=BLACK)
im_drawer.line(
xy=((canvas.size[0]-bootOffset+4, yZero),
(canvas.size[0]-bootOffset, yZero)),
fill=BLACK)
TEXT_Y_DISPLACEMENT = -8
- im_drawer.text(xy=(canvas.size[0]-bootOffset+10,yZero+TEXT_Y_DISPLACEMENT), text='0%',
+ im_drawer.text(xy=(canvas.size[0]-bootOffset+10, yZero+TEXT_Y_DISPLACEMENT), text='0%',
font=bootScaleFont, fill=BLACK)
for item in bootScale:
@@ -806,10 +834,10 @@ class DisplayMappingResults(object):
continue
bootY = yZero-bootHeightThresh*item/highestPercent
im_drawer.line(
- xy=((canvas.size[0]-bootOffset+4,bootY),
- (canvas.size[0]-bootOffset,bootY)),
+ xy=((canvas.size[0]-bootOffset+4, bootY),
+ (canvas.size[0]-bootOffset, bootY)),
fill=BLACK)
- im_drawer.text(xy=(canvas.size[0]-bootOffset+10,bootY+TEXT_Y_DISPLACEMENT),
+ im_drawer.text(xy=(canvas.size[0]-bootOffset+10, bootY+TEXT_Y_DISPLACEMENT),
text='%2.1f'%item, font=bootScaleFont, fill=BLACK)
if self.legendChecked:
@@ -818,7 +846,7 @@ class DisplayMappingResults(object):
smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom)
leftOffset = xLeftOffset+(nCol-1)*200
im_drawer.rectangle(
- xy=((leftOffset,startPosY-6), (leftOffset+12,startPosY+6)),
+ xy=((leftOffset, startPosY-6), (leftOffset+12, startPosY+6)),
fill=YELLOW, outline=BLACK)
im_drawer.text(xy=(leftOffset+ 20, startPosY+TEXT_Y_DISPLACEMENT),
text='Frequency of the Peak LRS',
@@ -915,7 +943,7 @@ class DisplayMappingResults(object):
TEXT_Y_DISPLACEMENT = -8
im_drawer.text(
text="Sequence Site",
- xy=(leftOffset+15,startPosY+TEXT_Y_DISPLACEMENT), font=smallLabelFont,
+ xy=(leftOffset+15, startPosY+TEXT_Y_DISPLACEMENT), font=smallLabelFont,
fill=self.TOP_RIGHT_INFO_COLOR)
def drawSNPTrackNew(self, canvas, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None):
@@ -965,7 +993,7 @@ class DisplayMappingResults(object):
def drawMultiTraitName(self, fd, canvas, gifmap, showLocusForm, offset= (40, 120, 80, 10), zoom = 1):
nameWidths = []
yPaddingTop = 10
- colorFont=ImageFont.truetype(font=TREBUC_FILE,size=12)
+ colorFont=ImageFont.truetype(font=TREBUC_FILE, size=12)
if len(self.qtlresults) >20 and self.selectedChr > -1:
rightShift = 20
rightShiftStep = 60
@@ -984,21 +1012,21 @@ class DisplayMappingResults(object):
rightShift += rightShiftStep
name = thisTrait.displayName()
- nameWidth, nameHeight = im_drawer.textsize(name,font=colorFont)
+ nameWidth, nameHeight = im_drawer.textsize(name, font=colorFont)
nameWidths.append(nameWidth)
im_drawer.rectangle(
- xy=((rightShift,yPaddingTop+kstep*15),
- (rectWidth+rightShift,yPaddingTop+10+kstep*15)),
+ xy=((rightShift, yPaddingTop+kstep*15),
+ (rectWidth+rightShift, yPaddingTop+10+kstep*15)),
fill=thisLRSColor, outline=BLACK)
im_drawer.text(
- text=name,xy=(rectWidth+2+rightShift,yPaddingTop+10+kstep*15),
- font=colorFont,fill=BLACK)
+ text=name, xy=(rectWidth+2+rightShift, yPaddingTop+10+kstep*15),
+ font=colorFont, fill=BLACK)
if thisTrait.db:
- COORDS = "%d,%d,%d,%d" %(rectWidth+2+rightShift,yPaddingTop+kstep*15,rectWidth+2+rightShift+nameWidth,yPaddingTop+10+kstep*15,)
+ COORDS = "%d,%d,%d,%d" %(rectWidth+2+rightShift, yPaddingTop+kstep*15, rectWidth+2+rightShift+nameWidth, yPaddingTop+10+kstep*15,)
HREF= "javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm, thisTrait.db.name, thisTrait.name)
- Areas = HtmlGenWrapper.create_area_tag(shape='rect',coords=COORDS,href=HREF)
- gifmap.areas.append(Areas) ### TODO
+ Areas = HtmlGenWrapper.create_area_tag(shape='rect', coords=COORDS, href=HREF)
+ gifmap.append(Areas) ### TODO
def drawLegendPanel(self, canvas, offset= (40, 120, 80, 10), zoom = 1):
im_drawer = ImageDraw.Draw(canvas)
@@ -1011,80 +1039,80 @@ class DisplayMappingResults(object):
if zoom == 2:
fontZoom = 1.5
- labelFont=ImageFont.truetype(font=TREBUC_FILE,size=12*fontZoom)
+ labelFont=ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom)
startPosY = 15
stepPosY = 12*fontZoom
if self.manhattan_plot != True:
im_drawer.line(
- xy=((xLeftOffset,startPosY),(xLeftOffset+32,startPosY)),
+ xy=((xLeftOffset, startPosY), (xLeftOffset+32, startPosY)),
fill=self.LRS_COLOR, width=2)
im_drawer.text(
- text=self.LRS_LOD, xy=(xLeftOffset+40,startPosY+TEXT_Y_DISPLACEMENT),
- font=labelFont,fill=BLACK)
+ text=self.LRS_LOD, xy=(xLeftOffset+40, startPosY+TEXT_Y_DISPLACEMENT),
+ font=labelFont, fill=BLACK)
startPosY += stepPosY
if self.additiveChecked:
startPosX = xLeftOffset
im_drawer.line(
- xy=((startPosX,startPosY),(startPosX+17,startPosY)),
+ xy=((startPosX, startPosY), (startPosX+17, startPosY)),
fill=self.ADDITIVE_COLOR_POSITIVE, width=2)
im_drawer.line(
- xy=((startPosX+18,startPosY),(startPosX+32,startPosY)),
+ xy=((startPosX+18, startPosY), (startPosX+32, startPosY)),
fill=self.ADDITIVE_COLOR_NEGATIVE, width=2)
im_drawer.text(
- text='Additive Effect',xy=(startPosX+40,startPosY+TEXT_Y_DISPLACEMENT),
- font=labelFont,fill=BLACK)
+ text='Additive Effect', xy=(startPosX+40, startPosY+TEXT_Y_DISPLACEMENT),
+ font=labelFont, fill=BLACK)
if self.genotype.type == 'intercross' and self.dominanceChecked:
startPosX = xLeftOffset
startPosY += stepPosY
im_drawer.line(
- xy=((startPosX,startPosY),(startPosX+17,startPosY)),
+ xy=((startPosX, startPosY), (startPosX+17, startPosY)),
fill=self.DOMINANCE_COLOR_POSITIVE, width=4)
im_drawer.line(
- xy=((startPosX+18,startPosY),(startPosX+35,startPosY)),
+ xy=((startPosX+18, startPosY), (startPosX+35, startPosY)),
fill=self.DOMINANCE_COLOR_NEGATIVE, width=4)
im_drawer.text(
- text='Dominance Effect', xy=(startPosX+42,startPosY+5),
- font=labelFont,fill=BLACK)
+ text='Dominance Effect', xy=(startPosX+42, startPosY+5),
+ font=labelFont, fill=BLACK)
if self.haplotypeAnalystChecked:
startPosY += stepPosY
startPosX = xLeftOffset
im_drawer.line(
- xy=((startPosX,startPosY),(startPosX+17,startPosY)),
+ xy=((startPosX, startPosY), (startPosX+17, startPosY)),
fill=self.HAPLOTYPE_POSITIVE, width=4)
im_drawer.line(
- xy=((startPosX+18,startPosY),(startPosX+35,startPosY)),
+ xy=((startPosX+18, startPosY), (startPosX+35, startPosY)),
fill=self.HAPLOTYPE_NEGATIVE, width=4)
im_drawer.line(
- xy=((startPosX+36,startPosY),(startPosX+53,startPosY)),
+ xy=((startPosX+36, startPosY), (startPosX+53, startPosY)),
fill=self.HAPLOTYPE_HETEROZYGOUS, width=4)
im_drawer.line(
- xy=((startPosX+54,startPosY),(startPosX+67,startPosY)),
+ xy=((startPosX+54, startPosY), (startPosX+67, startPosY)),
fill=self.HAPLOTYPE_RECOMBINATION, width=4)
im_drawer.text(
text='Haplotypes (Pat, Mat, Het, Unk)',
- xy=(startPosX+76,startPosY+5),font=labelFont,fill=BLACK)
+ xy=(startPosX+76, startPosY+5), font=labelFont, fill=BLACK)
if self.permChecked and self.nperm > 0:
startPosY += stepPosY
startPosX = xLeftOffset
im_drawer.line(
- xy=((startPosX, startPosY),( startPosX + 32, startPosY)),
+ xy=((startPosX, startPosY), ( startPosX + 32, startPosY)),
fill=self.SIGNIFICANT_COLOR, width=self.SIGNIFICANT_WIDTH)
im_drawer.line(
- xy=((startPosX, startPosY + stepPosY),( startPosX + 32, startPosY + stepPosY)),
+ xy=((startPosX, startPosY + stepPosY), ( startPosX + 32, startPosY + stepPosY)),
fill=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH)
im_drawer.text(
- text='Significant %s = %2.2f' % (self.LRS_LOD,self.significant),
- xy=(xLeftOffset+42,startPosY+TEXT_Y_DISPLACEMENT),font=labelFont,fill=BLACK)
+ text='Significant %s = %2.2f' % (self.LRS_LOD, self.significant),
+ xy=(xLeftOffset+42, startPosY+TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK)
im_drawer.text(
text='Suggestive %s = %2.2f' % (self.LRS_LOD, self.suggestive),
- xy=(xLeftOffset+42,startPosY + TEXT_Y_DISPLACEMENT +stepPosY),font=labelFont,
+ xy=(xLeftOffset+42, startPosY + TEXT_Y_DISPLACEMENT +stepPosY), font=labelFont,
fill=BLACK)
- labelFont = ImageFont.truetype(font=VERDANA_FILE,size=12*fontZoom)
+ labelFont = ImageFont.truetype(font=VERDANA_FILE, size=12*fontZoom)
labelColor = BLACK
if self.dataset.type == "Publish" or self.dataset.type == "Geno":
dataset_label = self.dataset.fullname
@@ -1152,22 +1180,22 @@ class DisplayMappingResults(object):
im_drawer.textsize(string2, font=labelFont)[0])
im_drawer.text(
text=identification,
- xy=(canvas.size[0] - xRightOffset-d,20*fontZoom),font=labelFont,
+ xy=(canvas.size[0] - xRightOffset-d, 20*fontZoom), font=labelFont,
fill=labelColor)
else:
d = 4+ max(
im_drawer.textsize(string1, font=labelFont)[0],
im_drawer.textsize(string2, font=labelFont)[0])
im_drawer.text(
- text=string1,xy=(canvas.size[0] - xRightOffset-d,35*fontZoom),
- font=labelFont,fill=labelColor)
+ text=string1, xy=(canvas.size[0] - xRightOffset-d, 35*fontZoom),
+ font=labelFont, fill=labelColor)
im_drawer.text(
- text=string2,xy=(canvas.size[0] - xRightOffset-d,50*fontZoom),
- font=labelFont,fill=labelColor)
+ text=string2, xy=(canvas.size[0] - xRightOffset-d, 50*fontZoom),
+ font=labelFont, fill=labelColor)
if string3 != '':
im_drawer.text(
- text=string3,xy=(canvas.size[0] - xRightOffset-d,65*fontZoom),
- font=labelFont,fill=labelColor)
+ text=string3, xy=(canvas.size[0] - xRightOffset-d, 65*fontZoom),
+ font=labelFont, fill=labelColor)
def drawGeneBand(self, canvas, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None):
@@ -1194,8 +1222,8 @@ class DisplayMappingResults(object):
tenPercentLength = geneLength*0.0001
SNPdensity = theGO["snpCount"]/geneLength
- exonStarts = map(float, theGO['exonStarts'].split(",")[:-1])
- exonEnds = map(float, theGO['exonEnds'].split(",")[:-1])
+ exonStarts = list(map(float, theGO['exonStarts'].split(",")[:-1]))
+ exonEnds = list(map(float, theGO['exonEnds'].split(",")[:-1]))
cdsStart = theGO['cdsStart']
cdsEnd = theGO['cdsEnd']
accession = theGO['NM_ID']
@@ -1388,7 +1416,7 @@ class DisplayMappingResults(object):
labelText = "3'"
im_drawer.text(
text=labelText,
- xy=(utrEndPix+2,geneYLocation+self.EACH_GENE_HEIGHT),
+ xy=(utrEndPix+2, geneYLocation+self.EACH_GENE_HEIGHT),
font=ImageFont.truetype(font=ARIAL_FILE, size=2))
#draw the genes as rectangles
@@ -1400,7 +1428,7 @@ class DisplayMappingResults(object):
COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation, geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT))
# NL: 06-02-2011 Rob required to display NCBI info in a new window
- gifmap.areas.append(
+ gifmap.append(
HtmlGenWrapper.create_area_tag(
shape='rect',
coords=COORDS,
@@ -1541,7 +1569,7 @@ class DisplayMappingResults(object):
counter = counter + 1
if item.name == samplelist[k]:
ind = counter
- maxind=max(ind,maxind)
+ maxind=max(ind, maxind)
# lines
if (oldgeno[k] == -1 and _geno == -1):
@@ -1574,7 +1602,7 @@ class DisplayMappingResults(object):
COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation+ind*self.EACH_GENE_HEIGHT, geneEndPix+1, (geneYLocation + ind*self.EACH_GENE_HEIGHT))
TITLE = "Strain: %s, marker (%s) \n Position %2.3f Mb." % (samplelist[k], _chr[j].name, float(txStart))
HREF = ''
- gifmap.areas.append(
+ gifmap.append(
HtmlGenWrapper.create_area_tag(
shape='rect',
coords=COORDS,
@@ -1698,7 +1726,7 @@ class DisplayMappingResults(object):
WEBQTL_HREF = "javascript:rangeView('%s', %f, %f)" % (self.selectedChr - 1, max(0, (calBase-webqtlZoomWidth))/1000000.0, (calBase+webqtlZoomWidth)/1000000.0)
WEBQTL_TITLE = "Click to view this section of the genome in WebQTL"
- gifmap.areas.append(
+ gifmap.append(
HtmlGenWrapper.create_area_tag(
shape='rect',
coords=WEBQTL_COORDS,
@@ -1710,7 +1738,7 @@ class DisplayMappingResults(object):
outline=self.CLICKABLE_WEBQTL_REGION_COLOR,
fill=self.CLICKABLE_WEBQTL_REGION_COLOR)
im_drawer.line(
- xy=((xBrowse1, paddingTop),( xBrowse1, (paddingTop + self.BAND_HEIGHT))),
+ xy=((xBrowse1, paddingTop), ( xBrowse1, (paddingTop + self.BAND_HEIGHT))),
fill=self.CLICKABLE_WEBQTL_REGION_OUTLINE_COLOR)
if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat":
@@ -1720,7 +1748,7 @@ class DisplayMappingResults(object):
else:
PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases)
PHENOGEN_TITLE = "Click to view this section of the genome in PhenoGen"
- gifmap.areas.append(
+ gifmap.append(
HtmlGenWrapper.create_area_tag(
shape='rect',
coords=PHENOGEN_COORDS,
@@ -1732,7 +1760,7 @@ class DisplayMappingResults(object):
outline=self.CLICKABLE_PHENOGEN_REGION_COLOR,
fill=self.CLICKABLE_PHENOGEN_REGION_COLOR)
im_drawer.line(
- xy=((xBrowse1, phenogenPaddingTop),( xBrowse1, (phenogenPaddingTop+self.BAND_HEIGHT))),
+ xy=((xBrowse1, phenogenPaddingTop), ( xBrowse1, (phenogenPaddingTop+self.BAND_HEIGHT))),
fill=self.CLICKABLE_PHENOGEN_REGION_OUTLINE_COLOR)
UCSC_COORDS = "%d, %d, %d, %d" %(xBrowse1, ucscPaddingTop, xBrowse2, (ucscPaddingTop+self.BAND_HEIGHT))
@@ -1741,7 +1769,7 @@ class DisplayMappingResults(object):
else:
UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d" % (self._ucscDb, self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases)
UCSC_TITLE = "Click to view this section of the genome in the UCSC Genome Browser"
- gifmap.areas.append(
+ gifmap.append(
HtmlGenWrapper.create_area_tag(
shape='rect',
coords=UCSC_COORDS,
@@ -1763,7 +1791,7 @@ class DisplayMappingResults(object):
else:
ENSEMBL_HREF = "http://www.ensembl.org/Rattus_norvegicus/contigview?chr=%s&start=%d&end=%d" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases)
ENSEMBL_TITLE = "Click to view this section of the genome in the Ensembl Genome Browser"
- gifmap.areas.append(HtmlGenWrapper.create_area_tag(
+ gifmap.append(HtmlGenWrapper.create_area_tag(
shape='rect',
coords=ENSEMBL_COORDS,
href=ENSEMBL_HREF,
@@ -1864,8 +1892,8 @@ class DisplayMappingResults(object):
continue
Xc = xLeftOffset + plotXScale*(_Mb - startMb)
if counter % NUM_MINOR_TICKS == 0: # Draw a MAJOR mark, not just a minor tick mark
- im_drawer.line(xy=((Xc,yZero),
- (Xc,yZero+xMajorTickHeight)),
+ im_drawer.line(xy=((Xc, yZero),
+ (Xc, yZero+xMajorTickHeight)),
fill=xAxisTickMarkColor,
width=X_MAJOR_TICK_THICKNESS) # Draw the MAJOR tick mark
labelStr = str(formatStr % _Mb) # What Mbase location to put on the label
@@ -1875,8 +1903,8 @@ class DisplayMappingResults(object):
text=labelStr, font=MBLabelFont,
fill=xAxisLabelColor)
else:
- im_drawer.line(xy=((Xc,yZero),
- (Xc,yZero+xMinorTickHeight)),
+ im_drawer.line(xy=((Xc, yZero),
+ (Xc, yZero+xMinorTickHeight)),
fill=xAxisTickMarkColor,
width=X_MINOR_TICK_THICKNESS) # Draw the MINOR tick mark
@@ -1909,7 +1937,7 @@ class DisplayMappingResults(object):
text="Megabases",
xy=(
xLeftOffset+(plotWidth-im_drawer.textsize(
- "Megabases",font=megabaseLabelFont)[0])/2,
+ "Megabases", font=megabaseLabelFont)[0])/2,
strYLoc+MBLabelFont.font.height+10*(zoom%2)),
font=megabaseLabelFont, fill=BLACK)
pass
@@ -1964,7 +1992,7 @@ class DisplayMappingResults(object):
for j, ChrInfo in enumerate(ChrAInfo):
preLpos = -1
for i, item in enumerate(ChrInfo):
- Lname,Lpos = item
+ Lname, Lpos = item
if Lpos != preLpos:
offsetA += stepA
differ = 1
@@ -1978,17 +2006,17 @@ class DisplayMappingResults(object):
Zorder = 0
if differ:
im_drawer.line(
- xy=((startPosX+Lpos,yZero),(xLeftOffset+offsetA,\
+ xy=((startPosX+Lpos, yZero), (xLeftOffset+offsetA,\
yZero+25)),
fill=lineColor)
im_drawer.line(
- xy=((xLeftOffset+offsetA,yZero+25),(xLeftOffset+offsetA,\
+ xy=((xLeftOffset+offsetA, yZero+25), (xLeftOffset+offsetA,\
yZero+40+Zorder*(LRectWidth+3))),
fill=lineColor)
rectColor = ORANGE
else:
im_drawer.line(
- xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)-3),(\
+ xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)-3), (\
xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3))),
fill=lineColor)
rectColor = DEEPPINK
@@ -1996,7 +2024,7 @@ class DisplayMappingResults(object):
xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)),
(xLeftOffset+offsetA-LRectHeight,
yZero+40+Zorder*(LRectWidth+3)+LRectWidth)),
- outline=rectColor,fill=rectColor,width = 0)
+ outline=rectColor, fill=rectColor, width = 0)
COORDS="%d,%d,%d,%d"%(xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\
xLeftOffset+offsetA,yZero+40+Zorder*(LRectWidth+3)+LRectWidth)
HREF = "/show_trait?trait_id=%s&dataset=%s" % (Lname, self.dataset.group.name+"Geno")
@@ -2007,11 +2035,11 @@ class DisplayMappingResults(object):
href=HREF,
target="_blank",
title="Locus : {}".format(Lname))
- gifmap.areas.append(Areas)
+ gifmap.append(Areas)
##piddle bug
if j == 0:
im_drawer.line(
- xy=((startPosX,yZero),(startPosX,yZero+40)),
+ xy=((startPosX, yZero), (startPosX, yZero+40)),
fill=lineColor)
startPosX += (self.ChrLengthDistList[j]+self.GraphInterval)*plotXScale
@@ -2023,7 +2051,7 @@ class DisplayMappingResults(object):
strYLoc + MBLabelFont.font.height+ 10*(zoom%2)),
font=centimorganLabelFont, fill=BLACK)
- im_drawer.line(xy=((xLeftOffset,yZero), (xLeftOffset+plotWidth,yZero)),
+ im_drawer.line(xy=((xLeftOffset, yZero), (xLeftOffset+plotWidth, yZero)),
fill=BLACK, width=X_AXIS_THICKNESS) # Draw the X axis itself
@@ -2167,7 +2195,7 @@ class DisplayMappingResults(object):
LRS_LOD_Max = 0.000001
yTopOffset + 30*(zoom - 1)
yLRS = yZero - (item/LRS_LOD_Max) * LRSHeightThresh
- im_drawer.line(xy=((xLeftOffset,yLRS), (xLeftOffset-4,yLRS)),
+ im_drawer.line(xy=((xLeftOffset, yLRS), (xLeftOffset-4, yLRS)),
fill=self.LRS_COLOR, width=1*zoom)
if all_int:
scaleStr = "%d" % item
@@ -2223,8 +2251,8 @@ class DisplayMappingResults(object):
shape='rect',
coords=sig_coords,
title=sig_title)
- gifmap.areas.append(Areas1)
- gifmap.areas.append(Areas2)
+ gifmap.append(Areas1)
+ gifmap.append(Areas2)
start_pos_x += (chr_length_dist+self.GraphInterval)*plotXScale
return start_pos_x
@@ -2243,7 +2271,7 @@ class DisplayMappingResults(object):
lrsEdgeWidth = 1
else:
if self.additiveChecked:
- additiveMax = max(map(lambda X : abs(X['additive']), self.qtlresults))
+ additiveMax = max([abs(X['additive']) for X in self.qtlresults])
lrsEdgeWidth = 3
if zoom == 2:
@@ -2410,7 +2438,7 @@ class DisplayMappingResults(object):
im_drawer.text(
text="5",
xy=(
- Xc-im_drawer.textsize("5",font=symbolFont)[0]/2+1,
+ Xc-im_drawer.textsize("5", font=symbolFont)[0]/2+1,
Yc-4),
fill=point_color, font=symbolFont)
else:
@@ -2477,8 +2505,8 @@ class DisplayMappingResults(object):
)
else:
im_drawer.line(
- xy=((Xc0,yZero-(Yc0-yZero)),
- (Xc,yZero-(Yc-yZero))),
+ xy=((Xc0, yZero-(Yc0-yZero)),
+ (Xc, yZero-(Yc-yZero))),
fill=minusColor, width=lineWidth
#, clipX=(xLeftOffset, xLeftOffset + plotWidth)
)
@@ -2565,8 +2593,8 @@ class DisplayMappingResults(object):
###draw additive scale
if not self.multipleInterval and self.additiveChecked:
- additiveScaleFont=ImageFont.truetype(font=VERDANA_FILE,size=16*zoom)
- additiveScale = Plot.detScaleOld(0,additiveMax)
+ additiveScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=16*zoom)
+ additiveScale = Plot.detScaleOld(0, additiveMax)
additiveStep = (additiveScale[1]-additiveScale[0])/additiveScale[2]
additiveAxisList = Plot.frange(0, additiveScale[1], additiveStep)
addPlotScale = AdditiveHeightThresh/additiveMax
@@ -2576,18 +2604,18 @@ class DisplayMappingResults(object):
for item in additiveAxisList:
additiveY = yZero - item*addPlotScale
im_drawer.line(
- xy=((xLeftOffset + plotWidth,additiveY),
- (xLeftOffset+4+ plotWidth,additiveY)),
+ xy=((xLeftOffset + plotWidth, additiveY),
+ (xLeftOffset+4+ plotWidth, additiveY)),
fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom)
scaleStr = "%2.3f" % item
im_drawer.text(
text=scaleStr,
- xy=(xLeftOffset + plotWidth +6,additiveY+TEXT_Y_DISPLACEMENT),
- font=additiveScaleFont,fill=self.ADDITIVE_COLOR_POSITIVE)
+ xy=(xLeftOffset + plotWidth +6, additiveY+TEXT_Y_DISPLACEMENT),
+ font=additiveScaleFont, fill=self.ADDITIVE_COLOR_POSITIVE)
im_drawer.line(
- xy=((xLeftOffset+plotWidth,additiveY),
- (xLeftOffset+plotWidth,yZero)),
+ xy=((xLeftOffset+plotWidth, additiveY),
+ (xLeftOffset+plotWidth, yZero)),
fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom)
im_drawer.line(
@@ -2647,7 +2675,7 @@ class DisplayMappingResults(object):
chrFontZoom = 2
else:
chrFontZoom = 1
- chrLabelFont=ImageFont.truetype(font=VERDANA_FILE,size=24*chrFontZoom)
+ chrLabelFont=ImageFont.truetype(font=VERDANA_FILE, size=24*chrFontZoom)
for i, _chr in enumerate(self.genotype):
if (i % 2 == 0):
@@ -2669,16 +2697,16 @@ class DisplayMappingResults(object):
TEXT_Y_DISPLACEMENT = 0
im_drawer.text(xy=(chrStartPix, yTopOffset + TEXT_Y_DISPLACEMENT),
text=_chr.name, font=chrLabelFont, fill=BLACK)
- COORDS = "%d,%d,%d,%d" %(chrStartPix, yTopOffset, chrEndPix,yTopOffset +20)
+ COORDS = "%d,%d,%d,%d" %(chrStartPix, yTopOffset, chrEndPix, yTopOffset +20)
#add by NL 09-03-2010
- HREF = "javascript:chrView(%d,%s);" % (i,self.ChrLengthMbList)
+ HREF = "javascript:chrView(%d,%s);" % (i, self.ChrLengthMbList)
#HREF = "javascript:changeView(%d,%s);" % (i,self.ChrLengthMbList)
Areas = HtmlGenWrapper.create_area_tag(
shape='rect',
coords=COORDS,
href=HREF)
- gifmap.areas.append(Areas)
+ gifmap.append(Areas)
startPosX += (self.ChrLengthDistList[i]+self.GraphInterval)*plotXScale
return plotXScale
@@ -2764,7 +2792,7 @@ class DisplayMappingResults(object):
this_row = [] #container for the cells of each row
selectCheck = HtmlGenWrapper.create_input_tag(
- type="checkbox",
+ type_="checkbox",
name="selectCheck",
value=theGO["GeneSymbol"],
Class="checkbox trait_checkbox") # checkbox for each row
@@ -2821,7 +2849,7 @@ class DisplayMappingResults(object):
else:
chr_as_int = int(theGO["Chromosome"]) - 1
if refGene:
- literatureCorrelationString = str(self.getLiteratureCorrelation(self.cursor,refGene,theGO['GeneID']) or "N/A")
+ literatureCorrelationString = str(self.getLiteratureCorrelation(self.cursor, refGene, theGO['GeneID']) or "N/A")
this_row = [selectCheck.__str__(),
str(tableIterationsCnt),
@@ -2883,7 +2911,7 @@ class DisplayMappingResults(object):
for gIndex, theGO in enumerate(geneCol):
this_row = [] # container for the cells of each row
selectCheck = str(HtmlGenWrapper.create_input_tag(
- type="checkbox",
+ type_="checkbox",
name="selectCheck",
Class="checkbox trait_checkbox")) # checkbox for each row
@@ -2959,8 +2987,8 @@ class DisplayMappingResults(object):
lCorr = None
try:
query = 'SELECT Value FROM LCorrRamin3 WHERE GeneId1 = %s and GeneId2 = %s'
- for x,y in [(geneId1,geneId2),(geneId2,geneId1)]:
- cursor.execute(query,(x,y))
+ for x, y in [(geneId1, geneId2), (geneId2, geneId1)]:
+ cursor.execute(query, (x, y))
lCorr = cursor.fetchone()
if lCorr:
lCorr = lCorr[0]
diff --git a/wqflask/wqflask/marker_regression/plink_mapping.py b/wqflask/wqflask/marker_regression/plink_mapping.py
index 2f327faf..6c38c34f 100644
--- a/wqflask/wqflask/marker_regression/plink_mapping.py
+++ b/wqflask/wqflask/marker_regression/plink_mapping.py
@@ -54,7 +54,7 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''):
for i, sample in enumerate(ped_sample_list):
try:
value = vals[i]
- value = str(value).replace('value=','')
+ value = str(value).replace('value=', '')
value = value.strip()
except:
value = -9999
@@ -78,13 +78,13 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''):
# get strain name from ped file in order
def get_samples_from_ped_file(dataset):
- ped_file= open("{}{}.ped".format(flat_files('mapping'), dataset.group.name),"r")
+ ped_file= open("{}{}.ped".format(flat_files('mapping'), dataset.group.name), "r")
line = ped_file.readline()
sample_list=[]
while line:
- lineList = string.split(string.strip(line), '\t')
- lineList = map(string.strip, lineList)
+ lineList = line.strip().split('\t')
+ lineList = list(map(string.strip, lineList))
sample_name = lineList[0]
sample_list.append(sample_name)
@@ -111,7 +111,7 @@ def parse_plink_output(output_filename, species):
line_list = build_line_list(line=line)
# only keep the records whose chromosome name is in db
- if species.chromosomes.chromosomes.has_key(int(line_list[0])) and line_list[-1] and line_list[-1].strip()!='NA':
+ if int(line_list[0]) in species.chromosomes.chromosomes and line_list[-1] and line_list[-1].strip()!='NA':
chr_name = species.chromosomes.chromosomes[int(line_list[0])]
snp = line_list[1]
@@ -121,7 +121,7 @@ def parse_plink_output(output_filename, species):
if p_value < threshold_p_value:
p_value_dict[snp] = float(p_value)
- if plink_results.has_key(chr_name):
+ if chr_name in plink_results:
value_list = plink_results[chr_name]
# pvalue range is [0,1]
@@ -155,8 +155,8 @@ def parse_plink_output(output_filename, species):
# output: lineList list
#######################################################
def build_line_list(line=None):
- line_list = string.split(string.strip(line),' ')# irregular number of whitespaces between columns
- line_list = [item for item in line_list if item <>'']
- line_list = map(string.strip, line_list)
+ line_list = line.strip().split(' ')# irregular number of whitespaces between columns
+ line_list = [item for item in line_list if item !='']
+ line_list = list(map(string.strip, line_list))
- return line_list \ No newline at end of file
+ return line_list
diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
index 6b4c05ea..78b1f7b0 100644
--- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
+++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
@@ -252,4 +252,4 @@ def natural_sort(marker_list):
"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', str(marker_list[key]['chr'])) ]
- return sorted(range(len(marker_list)), key = alphanum_key) \ No newline at end of file
+ return sorted(list(range(len(marker_list))), key = alphanum_key) \ No newline at end of file
diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py
index c5590a85..0a5758af 100644
--- a/wqflask/wqflask/marker_regression/rqtl_mapping.py
+++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py
@@ -42,7 +42,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec
png = ro.r["png"] # Map the png function
dev_off = ro.r["dev.off"] # Map the device off function
- print(r_library("qtl")) # Load R/qtl
+ print((r_library("qtl"))) # Load R/qtl
logger.info("QTL library loaded");
diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py
index 8a44b3fd..f42d2315 100644
--- a/wqflask/wqflask/marker_regression/run_mapping.py
+++ b/wqflask/wqflask/marker_regression/run_mapping.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
from base.trait import GeneralTrait
from base import data_set #import create_dataset
@@ -18,7 +16,7 @@ import uuid
import rpy2.robjects as ro
import numpy as np
-import cPickle as pickle
+import pickle as pickle
import itertools
import simplejson as json
@@ -347,7 +345,7 @@ class RunMapping(object):
if marker['chr1'] > 0 or marker['chr1'] == "X" or marker['chr1'] == "X/Y":
if marker['chr1'] > highest_chr or marker['chr1'] == "X" or marker['chr1'] == "X/Y":
highest_chr = marker['chr1']
- if 'lod_score' in marker.keys():
+ if 'lod_score' in list(marker.keys()):
self.qtl_results.append(marker)
self.trimmed_markers = results
@@ -547,9 +545,9 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale,
output_file.write("Mb," + score_type)
if 'cM' in markers[0]:
output_file.write("Cm," + score_type)
- if "additive" in markers[0].keys():
+ if "additive" in list(markers[0].keys()):
output_file.write(",Additive")
- if "dominance" in markers[0].keys():
+ if "dominance" in list(markers[0].keys()):
output_file.write(",Dominance")
output_file.write("\n")
for i, marker in enumerate(markers):
@@ -562,17 +560,17 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale,
output_file.write(str(marker['lod_score']))
else:
output_file.write(str(marker['lrs_value']))
- if "additive" in marker.keys():
+ if "additive" in list(marker.keys()):
output_file.write("," + str(marker['additive']))
- if "dominance" in marker.keys():
+ if "dominance" in list(marker.keys()):
output_file.write("," + str(marker['dominance']))
if i < (len(markers) - 1):
output_file.write("\n")
def trim_markers_for_figure(markers):
- if 'p_wald' in markers[0].keys():
+ if 'p_wald' in list(markers[0].keys()):
score_type = 'p_wald'
- elif 'lod_score' in markers[0].keys():
+ elif 'lod_score' in list(markers[0].keys()):
score_type = 'lod_score'
else:
score_type = 'lrs_value'
@@ -630,7 +628,7 @@ def trim_markers_for_figure(markers):
return filtered_markers
def trim_markers_for_table(markers):
- if 'lod_score' in markers[0].keys():
+ if 'lod_score' in list(markers[0].keys()):
sorted_markers = sorted(markers, key=lambda k: k['lod_score'], reverse=True)
else:
sorted_markers = sorted(markers, key=lambda k: k['lrs_value'], reverse=True)
@@ -714,10 +712,10 @@ def get_genofile_samplelist(dataset):
def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples):
perm_strata_strings = []
for sample in used_samples:
- if sample in sample_list.sample_attribute_values.keys():
+ if sample in list(sample_list.sample_attribute_values.keys()):
combined_string = ""
for var in categorical_vars:
- if var in sample_list.sample_attribute_values[sample].keys():
+ if var in list(sample_list.sample_attribute_values[sample].keys()):
combined_string += str(sample_list.sample_attribute_values[sample][var])
else:
combined_string += "NA"
@@ -726,8 +724,8 @@ def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples):
perm_strata_strings.append(combined_string)
- d = dict([(y,x+1) for x,y in enumerate(sorted(set(perm_strata_strings)))])
+ d = dict([(y, x+1) for x, y in enumerate(sorted(set(perm_strata_strings)))])
list_to_numbers = [d[x] for x in perm_strata_strings]
perm_strata = list_to_numbers
- return perm_strata \ No newline at end of file
+ return perm_strata
diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py
index 38117a8e..772f74e4 100644
--- a/wqflask/wqflask/model.py
+++ b/wqflask/wqflask/model.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
import uuid
import datetime
@@ -18,7 +16,7 @@ from wqflask.database import Base, init_db
class User(Base):
__tablename__ = "user"
- id = Column(Unicode(36), primary_key=True, default=lambda: unicode(uuid.uuid4()))
+ id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4()))
email_address = Column(Unicode(50), unique=True, nullable=False)
# Todo: Turn on strict mode for Mysql
@@ -120,7 +118,7 @@ class User(Base):
class Login(Base):
__tablename__ = "login"
- id = Column(Unicode(36), primary_key=True, default=lambda: unicode(uuid.uuid4()))
+ id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4()))
user = Column(Unicode(36), ForeignKey('user.id'))
timestamp = Column(DateTime(), default=lambda: datetime.datetime.utcnow())
ip_address = Column(Unicode(39))
@@ -138,7 +136,7 @@ class Login(Base):
class UserCollection(Base):
__tablename__ = "user_collection"
- id = Column(Unicode(36), primary_key=True, default=lambda: unicode(uuid.uuid4()))
+ id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4()))
user = Column(Unicode(36), ForeignKey('user.id'))
# I'd prefer this to not have a length, but for the index below it needs one
@@ -168,4 +166,4 @@ def display_collapsible(number):
def user_uuid():
"""Unique cookie for a user"""
- user_uuid = request.cookies.get('user_uuid') \ No newline at end of file
+ user_uuid = request.cookies.get('user_uuid')
diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py
index f61c40b4..cfefe4ec 100644
--- a/wqflask/wqflask/network_graph/network_graph.py
+++ b/wqflask/wqflask/network_graph/network_graph.py
@@ -18,29 +18,11 @@
#
# This module is used by GeneNetwork project (www.genenetwork.org)
-from __future__ import absolute_import, print_function, division
-
-import sys
-
-import string
-import cPickle
-import os
-import time
-import pp
-import math
-import collections
-import resource
-
import scipy
-
import simplejson as json
-
-from rpy2.robjects.packages import importr
-import rpy2.robjects as robjects
-
from pprint import pformat as pf
-from utility.THCell import THCell
+
from utility.TDCell import TDCell
from base.trait import create_trait
from base import data_set
@@ -49,11 +31,6 @@ from utility.tools import GN2_BRANCH_URL
from db import webqtlDatabaseFunction
import utility.webqtlUtil #this is for parallel computing only.
from wqflask.correlation import correlation_functions
-from utility.benchmark import Bench
-
-from MySQLdb import escape_string as escape
-
-from pprint import pformat as pf
from flask import Flask, g
@@ -202,8 +179,8 @@ class NetworkGraph(object):
self.js_data = dict(traits = [trait.name for trait in self.traits],
groups = groups,
- cols = range(len(self.traits)),
- rows = range(len(self.traits)),
+ cols = list(range(len(self.traits))),
+ rows = list(range(len(self.traits))),
samples = self.all_sample_list,
sample_data = self.sample_data,
elements = self.elements,)
@@ -218,4 +195,4 @@ class NetworkGraph(object):
trait_ob = create_trait(dataset=dataset_ob,
name=trait_name,
cellid=None)
- self.trait_list.append((trait_ob, dataset_ob)) \ No newline at end of file
+ self.trait_list.append((trait_ob, dataset_ob))
diff --git a/wqflask/wqflask/news.py b/wqflask/wqflask/news.py
index 8bc6b889..0675ec4b 100644
--- a/wqflask/wqflask/news.py
+++ b/wqflask/wqflask/news.py
@@ -1,7 +1,3 @@
-from __future__ import absolute_import, print_function, division
-import sys
-reload(sys)
-sys.setdefaultencoding('utf8')
from flask import g
class News(object):
diff --git a/wqflask/wqflask/parser.py b/wqflask/wqflask/parser.py
index 1ca5ecff..76fae54b 100644
--- a/wqflask/wqflask/parser.py
+++ b/wqflask/wqflask/parser.py
@@ -17,8 +17,6 @@ be acceptable.]
"""
-from __future__ import print_function, division
-
import re
from pprint import pformat as pf
@@ -78,22 +76,6 @@ def parse(pstring):
logger.debug("* items are:", pf(items) + "\n")
return(items)
- #def encregexp(self,str):
- # if not str:
- # return []
- # else:
- # wildcardkeyword = str.strip()
- # wildcardkeyword = string.replace(wildcardkeyword,',',' ')
- # wildcardkeyword = string.replace(wildcardkeyword,';',' ')
- # wildcardkeyword = wildcardkeyword.split()
- # NNN = len(wildcardkeyword)
- # for i in range(NNN):
- # keyword = wildcardkeyword[i]
- # keyword = string.replace(keyword,"*",".*")
- # keyword = string.replace(keyword,"?",".")
- # wildcardkeyword[i] = keyword#'[[:<:]]'+ keyword+'[[:>:]]'
- # return wildcardkeyword
-
if __name__ == '__main__':
parse("foo=[3 2 1]")
diff --git a/wqflask/wqflask/pbkdf2.py b/wqflask/wqflask/pbkdf2.py
index f7f61a09..917b9d31 100644
--- a/wqflask/wqflask/pbkdf2.py
+++ b/wqflask/wqflask/pbkdf2.py
@@ -44,7 +44,7 @@ import hmac
import hashlib
from struct import Struct
from operator import xor
-from itertools import izip, starmap
+from itertools import starmap
_pack_int = Struct('>I').pack
@@ -66,13 +66,13 @@ def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None):
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
- return map(ord, h.digest())
+ return list(map(ord, h.digest()))
buf = []
- for block in xrange(1, -(-keylen // mac.digest_size) + 1):
+ for block in range(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
- for i in xrange(iterations - 1):
+ for i in range(iterations - 1):
u = _pseudorandom(''.join(map(chr, u)))
- rv = list(starmap(xor, izip(rv, u)))
+ rv = list(starmap(xor, zip(rv, u)))
buf.extend(rv)
return ''.join(map(chr, buf))[:keylen]
@@ -81,7 +81,7 @@ def safe_str_cmp(a, b):
if len(a) != len(b):
return False
rv = 0
- for x, y in izip(a, b):
+ for x, y in zip(a, b):
rv |= ord(x) ^ ord(y)
return rv == 0
@@ -92,14 +92,14 @@ def test():
def check(data, salt, iterations, keylen, expected):
rv = pbkdf2_hex(data, salt, iterations, keylen)
if rv != expected:
- print 'Test failed:'
- print ' Expected: %s' % expected
- print ' Got: %s' % rv
- print ' Parameters:'
- print ' data=%s' % data
- print ' salt=%s' % salt
- print ' iterations=%d' % iterations
- print
+ print('Test failed:')
+ print((' Expected: %s' % expected))
+ print((' Got: %s' % rv))
+ print(' Parameters:')
+ print((' data=%s' % data))
+ print((' salt=%s' % salt))
+ print((' iterations=%d' % iterations))
+ print()
failed.append(1)
# From RFC 6070
diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py
index 39a07310..e883d5da 100644
--- a/wqflask/wqflask/resource_manager.py
+++ b/wqflask/wqflask/resource_manager.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
import json
from flask import (Flask, g, render_template, url_for, request, make_response,
@@ -125,10 +123,10 @@ def add_group_to_resource():
def get_group_names(group_masks):
group_masks_with_names = {}
- for group_id, group_mask in group_masks.iteritems():
+ for group_id, group_mask in list(group_masks.items()):
this_mask = group_mask
group_name = get_group_info(group_id)['name']
this_mask['name'] = group_name
group_masks_with_names[group_id] = this_mask
- return group_masks_with_names \ No newline at end of file
+ return group_masks_with_names
diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py
index f63a84d1..aa8f9e8f 100644
--- a/wqflask/wqflask/search_results.py
+++ b/wqflask/wqflask/search_results.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import re
import uuid
from math import *
@@ -53,7 +51,7 @@ views.py).
search = self.search_terms
self.original_search_string = self.search_terms
# check for dodgy search terms
- rx = re.compile(r'.*\W(href|http|sql|select|update)\W.*',re.IGNORECASE)
+ rx = re.compile(r'.*\W(href|http|sql|select|update)\W.*', re.IGNORECASE)
if rx.match(search):
logger.info("Regex failed search")
self.search_term_exists = False
@@ -123,7 +121,7 @@ views.py).
trait_dict['hmac'] = hmac.data_hmac('{}:{}'.format(this_trait.name, this_trait.dataset.name))
if this_trait.dataset.type == "ProbeSet":
trait_dict['symbol'] = this_trait.symbol
- trait_dict['description'] = this_trait.description_display.decode('utf-8', 'replace')
+ trait_dict['description'] = this_trait.description_display
trait_dict['location'] = this_trait.location_repr
trait_dict['mean'] = "N/A"
trait_dict['additive'] = "N/A"
@@ -272,7 +270,7 @@ def get_GO_symbols(a_search):
def insert_newlines(string, every=64):
""" This is because it is seemingly impossible to change the width of the description column, so I'm just manually adding line breaks """
lines = []
- for i in xrange(0, len(string), every):
+ for i in range(0, len(string), every):
lines.append(string[i:i+every])
return '\n'.join(lines)
diff --git a/wqflask/wqflask/send_mail.py b/wqflask/wqflask/send_mail.py
index bf5d0dd8..86e8a558 100644
--- a/wqflask/wqflask/send_mail.py
+++ b/wqflask/wqflask/send_mail.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
import datetime
import time
diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py
index ad78ebcc..6fcf7cec 100644
--- a/wqflask/wqflask/show_trait/SampleList.py
+++ b/wqflask/wqflask/show_trait/SampleList.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
from flask import Flask, g
from base import webqtlCaseData
@@ -43,7 +41,7 @@ class SampleList(object):
for counter, sample_name in enumerate(sample_names, 1):
sample_name = sample_name.replace("_2nd_", "")
- if type(self.this_trait) is list: #ZS: self.this_trait will be a list if it is a Temp trait
+ if isinstance(self.this_trait, list): #ZS: self.this_trait will be a list if it is a Temp trait
if counter <= len(self.this_trait) and str(self.this_trait[counter-1]).upper() != 'X':
sample = webqtlCaseData.webqtlCaseData(name=sample_name, value=float(self.this_trait[counter-1]))
else:
@@ -57,7 +55,7 @@ class SampleList(object):
sample = webqtlCaseData.webqtlCaseData(name=sample_name)
sample.extra_info = {}
- if self.dataset.group.name == 'AXBXA' and sample_name in ('AXB18/19/20','AXB13/14','BXA8/17'):
+ if self.dataset.group.name == 'AXBXA' and sample_name in ('AXB18/19/20', 'AXB13/14', 'BXA8/17'):
sample.extra_info['url'] = "/mouseCross.html#AXB/BXA"
sample.extra_info['css_class'] = "fs12"
diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py
index 253c887b..2d76b935 100644
--- a/wqflask/wqflask/show_trait/export_trait_data.py
+++ b/wqflask/wqflask/show_trait/export_trait_data.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division
-
import simplejson as json
from pprint import pformat as pf
@@ -47,7 +45,7 @@ def get_export_metadata(trait_id, dataset_name):
def dict_to_sorted_list(dictionary):
- sorted_list = [item for item in dictionary.iteritems()]
+ sorted_list = [item for item in list(dictionary.items())]
sorted_list = sorted(sorted_list, cmp=cmp_samples)
sorted_values = [item[1] for item in sorted_list]
return sorted_values
@@ -71,4 +69,4 @@ def cmp_samples(a, b):
else:
return 1
else:
- return -1 \ No newline at end of file
+ return -1
diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py
index f188fd9d..88cd7dca 100644
--- a/wqflask/wqflask/show_trait/show_trait.py
+++ b/wqflask/wqflask/show_trait/show_trait.py
@@ -1,9 +1,7 @@
-from __future__ import absolute_import, print_function, division
-
import string
import os
import datetime
-import cPickle
+import pickle
import uuid
import requests
import json as json
@@ -231,8 +229,8 @@ class ShowTrait(object):
hddn = OrderedDict()
if self.dataset.group.allsamples:
- hddn['allsamples'] = string.join(self.dataset.group.allsamples, ' ')
- hddn['primary_samples'] = string.join(self.primary_sample_names, ',')
+ hddn['allsamples'] = ''.join(self.dataset.group.allsamples)
+ hddn['primary_samples'] = ''.join(self.primary_sample_names)
hddn['trait_id'] = self.trait_id
hddn['trait_display_name'] = self.this_trait.display_name
hddn['dataset'] = self.dataset.name
@@ -261,7 +259,7 @@ class ShowTrait(object):
hddn['export_data'] = ""
hddn['export_format'] = "excel"
if len(self.scales_in_geno) < 2:
- hddn['mapping_scale'] = self.scales_in_geno[self.scales_in_geno.keys()[0]][0][0]
+ hddn['mapping_scale'] = self.scales_in_geno[list(self.scales_in_geno.keys())[0]][0][0]
# We'll need access to this_trait and hddn in the Jinja2 Template, so we put it inside self
self.hddn = hddn
@@ -372,7 +370,7 @@ class ShowTrait(object):
this_group = self.dataset.group.name
# We're checking a string here!
- assert isinstance(this_group, basestring), "We need a string type thing here"
+ assert isinstance(this_group, str), "We need a string type thing here"
if this_group[:3] == 'BXD' and this_group != "BXD-Harvested":
this_group = 'BXD'
@@ -405,7 +403,7 @@ class ShowTrait(object):
if not self.temp_trait:
other_sample_names = []
- for sample in self.this_trait.data.keys():
+ for sample in list(self.this_trait.data.keys()):
if (self.this_trait.data[sample].name2 in primary_sample_names) and (self.this_trait.data[sample].name not in primary_sample_names):
primary_sample_names.append(self.this_trait.data[sample].name)
primary_sample_names.remove(self.this_trait.data[sample].name2)
@@ -558,7 +556,7 @@ def get_table_widths(sample_groups, has_num_cases=False):
def has_num_cases(this_trait):
has_n = False
if this_trait.dataset.type != "ProbeSet" and this_trait.dataset.type != "Geno":
- for name, sample in this_trait.data.iteritems():
+ for name, sample in list(this_trait.data.items()):
if sample.num_cases:
has_n = True
break
@@ -611,7 +609,7 @@ def get_categorical_variables(this_trait, sample_list):
if len(sample_list.attributes) > 0:
for attribute in sample_list.attributes:
attribute_vals = []
- for sample_name in this_trait.data.keys():
+ for sample_name in list(this_trait.data.keys()):
if sample_list.attributes[attribute].name in this_trait.data[sample_name].extra_attributes:
attribute_vals.append(this_trait.data[sample_name].extra_attributes[sample_list.attributes[attribute].name])
else:
@@ -625,7 +623,7 @@ def get_categorical_variables(this_trait, sample_list):
def get_genotype_scales(genofiles):
geno_scales = {}
- if type(genofiles) is list:
+ if isinstance(genofiles, list):
for the_file in genofiles:
file_location = the_file['location']
geno_scales[file_location] = get_scales_from_genofile(file_location)
diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py
index 1d28d76a..2df71b12 100644
--- a/wqflask/wqflask/snp_browser/snp_browser.py
+++ b/wqflask/wqflask/snp_browser/snp_browser.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
from flask import Flask, g, url_for
import string
@@ -458,8 +456,8 @@ class SnpBrowser(object):
function_list = []
if function_details:
- function_list = string.split(string.strip(function_details), ",")
- function_list = map(string.strip, function_list)
+ function_list = function_details.strip().split(",")
+ function_list = list(map(string.strip, function_list))
function_list[0] = function_list[0].title()
function_details = ", ".join(item for item in function_list)
function_details = function_details.replace("_", " ")
@@ -477,7 +475,7 @@ class SnpBrowser(object):
the_bases = []
for j, item in enumerate(allele_value_list):
- if item and isinstance(item, basestring):
+ if item and isinstance(item, str):
this_base = [str(item), base_color_dict[item]]
else:
this_base = ""
@@ -612,7 +610,7 @@ class SnpBrowser(object):
this_allele_list = []
for item in self.allele_list:
- if item and isinstance(item, basestring) and (item.lower() not in this_allele_list) and (item != "-"):
+ if item and isinstance(item, str) and (item.lower() not in this_allele_list) and (item != "-"):
this_allele_list.append(item.lower())
total_allele_count = len(this_allele_list)
@@ -724,12 +722,12 @@ def get_effect_details_by_category(effect_name = None, effect_value = None):
new_codon_group_list = ['Start Gained']
codon_effect_group_list = ['Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous']
- effect_detail_list = string.split(string.strip(effect_value), '|')
- effect_detail_list = map(string.strip, effect_detail_list)
+ effect_detail_list = effect_value.strip().split('|')
+ effect_detail_list = list(map(string.strip, effect_detail_list))
for index, item in enumerate(effect_detail_list):
- item_list = string.split(string.strip(item), ',')
- item_list = map(string.strip, item_list)
+ item_list = item.strip().split(',')
+ item_list = list(map(string.strip, item_list))
gene_id = item_list[0]
gene_name = item_list[1]
@@ -748,13 +746,13 @@ def get_effect_details_by_category(effect_name = None, effect_value = None):
if effect_name in new_codon_group_list:
new_codon = item_list[6]
tmp_list = [biotype, new_codon]
- function_detail_list.append(string.join(tmp_list, ", "))
+ function_detail_list.append(", ".join(tmp_list))
elif effect_name in codon_effect_group_list:
old_new_AA = item_list[6]
old_new_codon = item_list[7]
codon_num = item_list[8]
tmp_list = [biotype, old_new_AA, old_new_codon, codon_num]
- function_detail_list.append(string.join(tmp_list, ", "))
+ function_detail_list.append(", ".join(tmp_list))
else:
function_detail_list.append(biotype)
@@ -854,7 +852,7 @@ def get_gene_id_name_dict(species_id, gene_name_list):
if len(gene_name_list) == 0:
return ""
gene_name_str_list = ["'" + gene_name + "'" for gene_name in gene_name_list]
- gene_name_str = string.join(gene_name_str_list, ",")
+ gene_name_str = ",".join(gene_name_str_list)
query = """
SELECT
diff --git a/wqflask/wqflask/submit_bnw.py b/wqflask/wqflask/submit_bnw.py
index 59e60dfd..a0e84c8c 100644
--- a/wqflask/wqflask/submit_bnw.py
+++ b/wqflask/wqflask/submit_bnw.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
from base.trait import GeneralTrait
from base import data_set
from utility import helper_functions
@@ -8,4 +6,4 @@ import utility.logger
logger = utility.logger.getLogger(__name__ )
def get_bnw_input(start_vars):
- logger.debug("BNW VARS:", start_vars) \ No newline at end of file
+ logger.debug("BNW VARS:", start_vars)
diff --git a/wqflask/wqflask/templates/admin/manage_resource.html b/wqflask/wqflask/templates/admin/manage_resource.html
index 0b12eaae..33a37594 100644
--- a/wqflask/wqflask/templates/admin/manage_resource.html
+++ b/wqflask/wqflask/templates/admin/manage_resource.html
@@ -65,7 +65,7 @@
</tr>
</thead>
<tbody>
- {% for key, value in group_masks.iteritems() %}
+ {% for key, value in group_masks.items() %}
<tr>
<td>{{ value.name }}</td>
<td>{{ value.data }}</td>
diff --git a/wqflask/wqflask/templates/loading.html b/wqflask/wqflask/templates/loading.html
index 15ab4080..9b335dfe 100644
--- a/wqflask/wqflask/templates/loading.html
+++ b/wqflask/wqflask/templates/loading.html
@@ -1,7 +1,7 @@
<title>Loading {{ start_vars.tool_used }} Results</title>
<link REL="stylesheet" TYPE="text/css" href="/static/packages/bootstrap/css/bootstrap.css" />
<form method="post" action="" name="loading_form" id="loading_form" class="form-horizontal">
- {% for key, value in start_vars.iteritems() %}
+ {% for key, value in start_vars.items() %}
<input type="hidden" name="{{ key }}" value="{{ value }}">
{% endfor %}
<div class="container">
@@ -44,4 +44,4 @@
$("#loading_form").attr("action", "{{ start_vars.form_url }}");
setTimeout(function(){ $("#loading_form").submit()}, 350);
-</script> \ No newline at end of file
+</script>
diff --git a/wqflask/wqflask/update_search_results.py b/wqflask/wqflask/update_search_results.py
index 68bea9d6..672f95b1 100644
--- a/wqflask/wqflask/update_search_results.py
+++ b/wqflask/wqflask/update_search_results.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
import json
from flask import Flask, g
diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py
index 077a799b..cb2edbc5 100644
--- a/wqflask/wqflask/user_login.py
+++ b/wqflask/wqflask/user_login.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
import os
import hashlib
import datetime
@@ -199,7 +197,7 @@ def login():
if user_details:
submitted_password = params['password']
pwfields = user_details['password']
- if type(pwfields) is str:
+ if isinstance(pwfields, str):
pwfields = json.loads(pwfields)
encrypted_pass_fields = encode_password(pwfields, submitted_password)
password_match = pbkdf2.safe_str_cmp(encrypted_pass_fields['password'], pwfields['password'])
@@ -478,4 +476,4 @@ def register():
@app.errorhandler(401)
def unauthorized(error):
- return redirect(url_for('login')) \ No newline at end of file
+ return redirect(url_for('login'))
diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py
index a871e91a..24191a15 100644
--- a/wqflask/wqflask/user_manager.py
+++ b/wqflask/wqflask/user_manager.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
import os
import hashlib
import datetime
@@ -9,7 +7,7 @@ import uuid
import hashlib
import hmac
import base64
-import urlparse
+import urllib.parse
import simplejson as json
@@ -252,7 +250,7 @@ class UserSession(object):
def add_collection(self, collection_name, traits):
"""Add collection into ElasticSearch"""
- collection_dict = {'id': unicode(uuid.uuid4()),
+ collection_dict = {'id': str(uuid.uuid4()),
'name': collection_name,
'created_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'),
'changed_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'),
@@ -867,7 +865,7 @@ def forgot_password_submit():
email_address = params['email_address']
next_page = None
if email_address != "":
- logger.debug("Wants to send password E-mail to ",email_address)
+ logger.debug("Wants to send password E-mail to ", email_address)
user_details = get_user_by_unique_column("email_address", email_address)
if user_details:
ForgotPasswordEmail(user_details["email_address"])
diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py
index 3aa2c151..c1f38396 100644
--- a/wqflask/wqflask/user_session.py
+++ b/wqflask/wqflask/user_session.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
import datetime
import time
import uuid
@@ -184,7 +182,7 @@ class UserSession(object):
def add_collection(self, collection_name, traits):
"""Add collection into Redis"""
- collection_dict = {'id': unicode(uuid.uuid4()),
+ collection_dict = {'id': str(uuid.uuid4()),
'name': collection_name,
'created_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'),
'changed_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'),
diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py
index 42a10c7a..7fdc62e5 100644
--- a/wqflask/wqflask/views.py
+++ b/wqflask/wqflask/views.py
@@ -2,8 +2,6 @@
#
# Main routing table for GN2
-from __future__ import absolute_import, division, print_function
-
import traceback # for error page
import os # for error gifs
import random # for random error gif
@@ -14,13 +12,13 @@ import csv
import simplejson as json
import yaml
import xlsxwriter
-import StringIO # Todo: Use cStringIO?
+import io # Todo: Use cStringIO?
from zipfile import ZipFile, ZIP_DEFLATED
import gc
import numpy as np
-import cPickle as pickle
+import pickle as pickle
import uuid
import flask
@@ -54,7 +52,7 @@ from wqflask.docs import Docs
from wqflask.db_info import InfoPage
from utility import temp_data
-from utility.tools import SQL_URI,TEMPDIR,USE_REDIS,USE_GN_SERVER,GN_SERVER_URL,GN_VERSION,JS_TWITTER_POST_FETCHER_PATH,JS_GUIX_PATH, CSS_PATH
+from utility.tools import SQL_URI, TEMPDIR, USE_REDIS, USE_GN_SERVER, GN_SERVER_URL, GN_VERSION, JS_TWITTER_POST_FETCHER_PATH, JS_GUIX_PATH, CSS_PATH
from utility.helper_functions import get_species_groups
from utility.authentication_tools import check_resource_availability
from utility.redis_tools import get_redis_conn
@@ -129,10 +127,10 @@ def handle_bad_request(e):
list = [fn for fn in os.listdir("./wqflask/static/gif/error") if fn.endswith(".gif") ]
animation = random.choice(list)
- resp = make_response(render_template("error.html",message=err_msg,stack=formatted_lines,error_image=animation,version=GN_VERSION))
+ resp = make_response(render_template("error.html", message=err_msg, stack=formatted_lines, error_image=animation, version=GN_VERSION))
# logger.error("Set cookie %s with %s" % (err_msg, animation))
- resp.set_cookie(err_msg[:32],animation)
+ resp.set_cookie(err_msg[:32], animation)
return resp
@app.route("/authentication_needed")
@@ -215,8 +213,6 @@ def search_page():
result = the_search.__dict__
valid_search = result['search_term_exists']
- logger.debugf("result", result)
-
if USE_REDIS and valid_search:
Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
Redis.expire(key, 60*60)
@@ -264,7 +260,7 @@ def docedit():
@app.route('/generated/<filename>')
def generated_file(filename):
logger.info(request.url)
- return send_from_directory(GENERATED_IMAGE_DIR,filename)
+ return send_from_directory(GENERATED_IMAGE_DIR, filename)
@app.route("/help")
def help():
@@ -380,7 +376,7 @@ def export_trait_excel():
logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))
- buff = StringIO.StringIO()
+ buff = io.StringIO()
workbook = xlsxwriter.Workbook(buff, {'in_memory': True})
worksheet = workbook.add_worksheet()
for i, row in enumerate(sample_data):
@@ -404,7 +400,7 @@ def export_trait_csv():
logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))
- buff = StringIO.StringIO()
+ buff = io.StringIO()
writer = csv.writer(buff)
for row in sample_data:
writer.writerow(row)
@@ -427,7 +423,7 @@ def export_traits_csv():
now = datetime.datetime.now()
time_str = now.strftime('%H:%M_%d%B%Y')
filename = "export_{}".format(time_str)
- memory_file = StringIO.StringIO()
+ memory_file = io.StringIO()
with ZipFile(memory_file, mode='w', compression=ZIP_DEFLATED) as zf:
for the_file in file_list:
zf.writestr(the_file[0], the_file[1])
@@ -470,7 +466,7 @@ def export_perm_data():
["#Comment: Results sorted from low to high peak linkage"]
]
- buff = StringIO.StringIO()
+ buff = io.StringIO()
writer = csv.writer(buff)
writer.writerows(the_rows)
for item in perm_info['perm_data']:
@@ -543,7 +539,7 @@ def heatmap_page():
result = template_vars.__dict__
- for item in template_vars.__dict__.keys():
+ for item in list(template_vars.__dict__.keys()):
logger.info(" ---**--- {}: {}".format(type(template_vars.__dict__[item]), item))
pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL)
@@ -647,7 +643,7 @@ def loading_page():
if 'wanted_inputs' in initial_start_vars:
wanted = initial_start_vars['wanted_inputs'].split(",")
start_vars = {}
- for key, value in initial_start_vars.iteritems():
+ for key, value in list(initial_start_vars.items()):
if key in wanted or key.startswith(('value:')):
start_vars[key] = value
@@ -747,7 +743,7 @@ def mapping_results_page():
'transform'
)
start_vars = {}
- for key, value in initial_start_vars.iteritems():
+ for key, value in list(initial_start_vars.items()):
if key in wanted or key.startswith(('value:')):
start_vars[key] = value
#logger.debug("Mapping called with start_vars:", start_vars)
@@ -954,8 +950,8 @@ def json_default_handler(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
# Handle integer keys for dictionaries
- elif isinstance(obj, int):
- return str(int)
+ elif isinstance(obj, int) or isinstance(obj, uuid.UUID):
+ return str(obj)
# Handle custom objects
if hasattr(obj, '__dict__'):
return obj.__dict__
@@ -963,5 +959,5 @@ def json_default_handler(obj):
# logger.info("Not going to serialize Dataset")
# return None
else:
- raise TypeError, 'Object of type %s with value of %s is not JSON serializable' % (
- type(obj), repr(obj))
+ raise TypeError('Object of type %s with value of %s is not JSON serializable' % (
+ type(obj), repr(obj)))
diff --git a/wqflask/wqflask/wgcna/wgcna_analysis.py b/wqflask/wqflask/wgcna/wgcna_analysis.py
index 880a1cb2..d79ad6df 100644
--- a/wqflask/wqflask/wgcna/wgcna_analysis.py
+++ b/wqflask/wqflask/wgcna/wgcna_analysis.py
@@ -60,7 +60,7 @@ class WGCNA(object):
print("Starting WGCNA analysis on dataset")
self.r_enableWGCNAThreads() # Enable multi threading
self.trait_db_list = [trait.strip() for trait in requestform['trait_list'].split(',')]
- print("Retrieved phenotype data from database", requestform['trait_list'])
+ print(("Retrieved phenotype data from database", requestform['trait_list']))
helper_functions.get_trait_db_obs(self, self.trait_db_list)
self.input = {} # self.input contains the phenotype values we need to send to R
@@ -101,13 +101,13 @@ class WGCNA(object):
if requestform.get('SoftThresholds') is not None:
powers = [int(threshold.strip()) for threshold in requestform['SoftThresholds'].rstrip().split(",")]
rpow = r_unlist(r_c(powers))
- print "SoftThresholds: {} == {}".format(powers, rpow)
+ print(("SoftThresholds: {} == {}".format(powers, rpow)))
self.sft = self.r_pickSoftThreshold(rM, powerVector = rpow, verbose = 5)
- print "PowerEstimate: {}".format(self.sft[0])
+ print(("PowerEstimate: {}".format(self.sft[0])))
self.results['PowerEstimate'] = self.sft[0]
if self.sft[0][0] is ri.NA_Integer:
- print "No power is suitable for the analysis, just use 1"
+ print("No power is suitable for the analysis, just use 1")
self.results['Power'] = 1 # No power could be estimated
else:
self.results['Power'] = self.sft[0][0] # Use the estimated power
@@ -122,7 +122,7 @@ class WGCNA(object):
self.results['network'] = network
# How many modules and how many gene per module ?
- print "WGCNA found {} modules".format(r_table(network[1]))
+ print(("WGCNA found {} modules".format(r_table(network[1]))))
self.results['nmod'] = r_length(r_table(network[1]))[0]
# The iconic WCGNA plot of the modules in the hanging tree
@@ -135,7 +135,7 @@ class WGCNA(object):
sys.stdout.flush()
def render_image(self, results):
- print("pre-loading imgage results:", self.results['imgloc'])
+ print(("pre-loading imgage results:", self.results['imgloc']))
imgfile = open(self.results['imgloc'], 'rb')
imgdata = imgfile.read()
imgB64 = imgdata.encode("base64")