about summary refs log tree commit diff
path: root/wqflask/wqflask
diff options
context:
space:
mode:
authorBonfaceKilz2020-10-27 01:18:38 +0300
committerGitHub2020-10-27 01:18:38 +0300
commit37c391bc62e9080effcf83c6ff0056ab8841b7fb (patch)
tree1e794c5616c25e82869314a2f4e91f64c4d40ea9 /wqflask/wqflask
parent85896707ef1f9e214b45298f6b5b1a9dc37bc839 (diff)
parentb369489e6c075eee3f58bb33e493c901b052b0a1 (diff)
downloadgenenetwork2-37c391bc62e9080effcf83c6ff0056ab8841b7fb.tar.gz
Merge pull request #422 from BonfaceKilz/build/python3-migration
Build/python3 migration
Diffstat (limited to 'wqflask/wqflask')
-rw-r--r--wqflask/wqflask/__init__.py2
-rw-r--r--wqflask/wqflask/api/correlation.py22
-rw-r--r--wqflask/wqflask/api/gen_menu.py8
-rw-r--r--wqflask/wqflask/api/mapping.py2
-rw-r--r--wqflask/wqflask/api/router.py41
-rw-r--r--wqflask/wqflask/collect.py12
-rw-r--r--wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py19
-rw-r--r--wqflask/wqflask/correlation/corr_scatter_plot.py10
-rw-r--r--wqflask/wqflask/correlation/correlation_functions.py8
-rw-r--r--wqflask/wqflask/correlation/show_corr_results.py92
-rw-r--r--wqflask/wqflask/correlation_matrix/show_corr_matrix.py69
-rw-r--r--wqflask/wqflask/ctl/ctl_analysis.py12
-rw-r--r--wqflask/wqflask/db_info.py265
-rw-r--r--wqflask/wqflask/do_search.py10
-rw-r--r--wqflask/wqflask/docs.py4
-rw-r--r--wqflask/wqflask/export_traits.py14
-rw-r--r--wqflask/wqflask/external_tools/send_to_bnw.py4
-rw-r--r--wqflask/wqflask/external_tools/send_to_geneweaver.py12
-rw-r--r--wqflask/wqflask/external_tools/send_to_webgestalt.py6
-rw-r--r--wqflask/wqflask/group_manager.py5
-rw-r--r--wqflask/wqflask/gsearch.py2
-rw-r--r--wqflask/wqflask/heatmap/heatmap.py43
-rw-r--r--wqflask/wqflask/interval_analyst/GeneUtil.py8
-rw-r--r--wqflask/wqflask/marker_regression/display_mapping_results.py309
-rw-r--r--wqflask/wqflask/marker_regression/gemma_mapping.py4
-rw-r--r--wqflask/wqflask/marker_regression/plink_mapping.py20
-rw-r--r--wqflask/wqflask/marker_regression/qtlreaper_mapping.py2
-rw-r--r--wqflask/wqflask/marker_regression/rqtl_mapping.py2
-rw-r--r--wqflask/wqflask/marker_regression/run_mapping.py32
-rw-r--r--wqflask/wqflask/model.py10
-rw-r--r--wqflask/wqflask/network_graph/network_graph.py125
-rw-r--r--wqflask/wqflask/news.py4
-rw-r--r--wqflask/wqflask/parser.py18
-rw-r--r--wqflask/wqflask/pbkdf2.py144
-rw-r--r--wqflask/wqflask/resource_manager.py6
-rw-r--r--wqflask/wqflask/search_results.py12
-rw-r--r--wqflask/wqflask/send_mail.py2
-rw-r--r--wqflask/wqflask/show_trait/SampleList.py71
-rw-r--r--wqflask/wqflask/show_trait/export_trait_data.py10
-rw-r--r--wqflask/wqflask/show_trait/show_trait.py25
-rw-r--r--wqflask/wqflask/snp_browser/snp_browser.py24
-rw-r--r--wqflask/wqflask/submit_bnw.py4
-rw-r--r--wqflask/wqflask/templates/admin/manage_resource.html2
-rw-r--r--wqflask/wqflask/templates/correlation_page.html3
-rw-r--r--wqflask/wqflask/templates/loading.html4
-rw-r--r--wqflask/wqflask/update_search_results.py2
-rw-r--r--wqflask/wqflask/user_login.py18
-rw-r--r--wqflask/wqflask/user_manager.py79
-rw-r--r--wqflask/wqflask/user_session.py10
-rw-r--r--wqflask/wqflask/views.py42
-rw-r--r--wqflask/wqflask/wgcna/wgcna_analysis.py30
51 files changed, 711 insertions, 973 deletions
diff --git a/wqflask/wqflask/__init__.py b/wqflask/wqflask/__init__.py
index 9afeb3c4..274c3d82 100644
--- a/wqflask/wqflask/__init__.py
+++ b/wqflask/wqflask/__init__.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 import sys
 import time
 import jinja2
diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py
index 7f5312c1..f5b50dcd 100644
--- a/wqflask/wqflask/api/correlation.py
+++ b/wqflask/wqflask/api/correlation.py
@@ -1,10 +1,8 @@
-from __future__ import absolute_import, division, print_function
-
 import collections
 
 import scipy
 
-from MySQLdb import escape_string as escape
+from utility.db_tools import escape
 
 from flask import g
 
@@ -36,7 +34,7 @@ def do_correlation(start_vars):
     #corr_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0])))
 
     final_results = []
-    for _trait_counter, trait in enumerate(corr_results.keys()[:corr_params['return_count']]):
+    for _trait_counter, trait in enumerate(list(corr_results.keys())[:corr_params['return_count']]):
         if corr_params['type'] == "tissue":
             [sample_r, num_overlap, sample_p, symbol] = corr_results[trait]
             result_dict = {
@@ -76,20 +74,20 @@ def calculate_results(this_trait, this_dataset, target_dataset, corr_params):
     if corr_params['type'] == "tissue":
         trait_symbol_dict = this_dataset.retrieve_genes("Symbol")
         corr_results = do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params)
-        sorted_results = collections.OrderedDict(sorted(corr_results.items(),
+        sorted_results = collections.OrderedDict(sorted(list(corr_results.items()),
                                                         key=lambda t: -abs(t[1][1])))
     elif corr_params['type'] == "literature" or corr_params['type'] == "lit": #ZS: Just so a user can use either "lit" or "literature"
         trait_geneid_dict = this_dataset.retrieve_genes("GeneId")
         corr_results = do_literature_correlation_for_all_traits(this_trait, this_dataset, trait_geneid_dict, corr_params)
-        sorted_results = collections.OrderedDict(sorted(corr_results.items(),
+        sorted_results = collections.OrderedDict(sorted(list(corr_results.items()),
                                                  key=lambda t: -abs(t[1][1])))
     else:
-        for target_trait, target_vals in target_dataset.trait_data.iteritems():
+        for target_trait, target_vals in list(target_dataset.trait_data.items()):
             result = get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, corr_params['type'])
             if result is not None:
                 corr_results[target_trait] = result
 
-        sorted_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0])))
+        sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), key=lambda t: -abs(t[1][0])))
 
     return sorted_results
 
@@ -100,10 +98,10 @@ def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_par
     if this_trait.symbol.lower() in primary_trait_tissue_vals_dict:
         primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower()]
 
-        corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=trait_symbol_dict.values())
+        corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=list(trait_symbol_dict.values()))
 
         tissue_corr_data = {}
-        for trait, symbol in trait_symbol_dict.iteritems():
+        for trait, symbol in list(trait_symbol_dict.items()):
             if symbol and symbol.lower() in corr_result_tissue_vals_dict:
                 this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()]
 
@@ -119,7 +117,7 @@ def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_g
     input_trait_mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), this_trait.geneid)
 
     lit_corr_data = {}
-    for trait, gene_id in trait_geneid_dict.iteritems():
+    for trait, gene_id in list(trait_geneid_dict.items()):
         mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), gene_id)
 
         if mouse_gene_id and str(mouse_gene_id).find(";") == -1:
@@ -234,4 +232,4 @@ def init_corr_params(start_vars):
         'return_count' : return_count
     }
 
-    return corr_params
\ No newline at end of file
+    return corr_params
diff --git a/wqflask/wqflask/api/gen_menu.py b/wqflask/wqflask/api/gen_menu.py
index fedf3e0b..1dcafe1f 100644
--- a/wqflask/wqflask/api/gen_menu.py
+++ b/wqflask/wqflask/api/gen_menu.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division
-
 from flask import g
 
 
@@ -61,7 +59,7 @@ def get_types(groups):
     """Build types list"""
     types = {}
 
-    for species, group_dict in groups.iteritems():
+    for species, group_dict in list(groups.items()):
         types[species] = {}
         for group_name, _group_full_name, _family_name in group_dict:
             if phenotypes_exist(group_name):
@@ -136,9 +134,9 @@ def build_types(species, group):
 def get_datasets(types):
     """Build datasets list"""
     datasets = {}
-    for species, group_dict in types.iteritems():
+    for species, group_dict in list(types.items()):
         datasets[species] = {}
-        for group, type_list in group_dict.iteritems():
+        for group, type_list in list(group_dict.items()):
             datasets[species][group] = {}
             for type_name in type_list:
                 these_datasets = build_datasets(species, group, type_name[0])
diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py
index 92c27c9b..d59a69df 100644
--- a/wqflask/wqflask/api/mapping.py
+++ b/wqflask/wqflask/api/mapping.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 import string
 
 from base import data_set
diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py
index 6324cabe..8e59ef27 100644
--- a/wqflask/wqflask/api/router.py
+++ b/wqflask/wqflask/api/router.py
@@ -1,16 +1,21 @@
 # GN2 API
 
-from __future__ import absolute_import, division, print_function
+import os
+import io
+import csv
+import json
+import datetime
+import requests
 
-import os, io, csv, json, datetime, requests, yaml
-import zlib
 from zipfile import ZipFile, ZIP_DEFLATED
 
-import StringIO
 
 import flask
-from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, send_file
-import sqlalchemy
+from flask import g
+from flask import request
+from flask import make_response
+from flask import send_file
+
 from wqflask import app
 
 from wqflask.api import correlation, mapping, gen_menu
@@ -308,7 +313,7 @@ def fetch_traits(dataset_name, file_format = "json"):
         else:
             filename = dataset_name + "_trait_ids.csv"
 
-            si = StringIO.StringIO()
+            si = io.StringIO()
             csv_writer = csv.writer(si)
             csv_writer.writerows([[trait_id] for trait_id in trait_ids])
             output = make_response(si.getvalue())
@@ -322,7 +327,7 @@ def fetch_traits(dataset_name, file_format = "json"):
         else:
             filename = dataset_name + "_trait_names.csv"
 
-            si = StringIO.StringIO()
+            si = io.StringIO()
             csv_writer = csv.writer(si)
             csv_writer.writerows([[trait_name] for trait_name in trait_names])
             output = make_response(si.getvalue())
@@ -413,7 +418,7 @@ def fetch_traits(dataset_name, file_format = "json"):
                 for result in g.db.execute(final_query).fetchall():
                     results_list.append(result)
 
-                si = StringIO.StringIO()
+                si = io.StringIO()
                 csv_writer = csv.writer(si)
                 csv_writer.writerows(results_list)
                 output = make_response(si.getvalue())
@@ -517,9 +522,9 @@ def all_sample_data(dataset_name, file_format = "csv"):
                         line_list.append("x")
                 results_list.append(line_list)
 
-            results_list = map(list, zip(*results_list))
+            results_list = list(map(list, zip(*results_list)))
 
-            si = StringIO.StringIO()
+            si = io.StringIO()
             csv_writer = csv.writer(si)
             csv_writer.writerows(results_list)
             output = make_response(si.getvalue())
@@ -558,10 +563,10 @@ def trait_sample_data(dataset_name, trait_name, file_format = "json"):
         sample_list = []
         for sample in sample_data:
             sample_dict = {
-              "sample_name"   : sample[0],
-              "sample_name_2" : sample[1],
-              "value"         : sample[2],
-              "data_id"       : sample[3],
+              "sample_name": sample[0],
+              "sample_name_2": sample[1],
+              "value": sample[2],
+              "data_id": sample[3],
             }
             if sample[4]:
                 sample_dict["se"] = sample[4]
@@ -706,7 +711,7 @@ def get_mapping_results():
         if format == "csv":
             filename = "mapping_" + datetime.datetime.utcnow().strftime("%b_%d_%Y_%I:%M%p") + ".csv"
 
-            si = StringIO.StringIO()
+            si = io.StringIO()
             csv_writer = csv.writer(si)
             csv_writer.writerows(results)
             output = make_response(si.getvalue())
@@ -732,7 +737,7 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None):
         if request.args['limit_to'].isdigit():
             limit_num = int(request.args['limit_to'])
 
-    si = StringIO.StringIO()
+    si = io.StringIO()
     if file_format == "csv" or file_format == "geno":
         filename = group_name + ".geno"
 
@@ -966,4 +971,4 @@ def get_group_id(group_name):
     if group_id:
         return group_id[0]
     else:
-        return None
\ No newline at end of file
+        return None
diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py
index 4c558bfe..e074a3d8 100644
--- a/wqflask/wqflask/collect.py
+++ b/wqflask/wqflask/collect.py
@@ -1,5 +1,4 @@
-from __future__ import print_function, division, absolute_import
-
+import hashlib
 import datetime
 import simplejson as json
 
@@ -27,7 +26,9 @@ Redis = get_redis_conn()
 
 
 def process_traits(unprocessed_traits):
-    if isinstance(unprocessed_traits, basestring):
+    if isinstance(unprocessed_traits, bytes):
+        unprocessed_traits = unprocessed_traits.decode('utf-8').split(",")
+    else:  # It's a string
         unprocessed_traits = unprocessed_traits.split(",")
     traits = set()
     for trait in unprocessed_traits:
@@ -114,7 +115,8 @@ def collections_new():
         g.user_session.add_traits_to_collection(collection_id, traits)
         return redirect(url_for('view_collection', uc_id=collection_id))
     else:
-        CauseAnError
+        # CauseAnError
+        pass
 
 def create_new(collection_name):
     params = request.args
@@ -182,7 +184,7 @@ def view_collection():
     params = request.args
 
     uc_id = params['uc_id']
-    uc = (collection for collection in g.user_session.user_collections if collection["id"] == uc_id).next()
+    uc = next((collection for collection in g.user_session.user_collections if collection["id"] == uc_id))
     traits = uc["members"]
 
     trait_obs = []
diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
index 09d6b9cc..92de6073 100644
--- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
+++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
@@ -18,35 +18,16 @@
 #
 # This module is used by GeneNetwork project (www.genenetwork.org)
 
-from __future__ import absolute_import, print_function, division
-
-import sys
-# sys.path.append(".")   Never do this in a webserver!
-
-import string
-import cPickle
-import os
-import time
-import pp
-import math
-import collections
-import resource
-
-
 from pprint import pformat as pf
 
 from base.trait import create_trait
 from base import data_set
 from utility import webqtlUtil, helper_functions, corr_result_helpers
-from db import webqtlDatabaseFunction
 import utility.webqtlUtil #this is for parallel computing only.
 from wqflask.correlation import correlation_functions
-from utility.benchmark import Bench
 
 from MySQLdb import escape_string as escape
 
-from pprint import pformat as pf
-
 from flask import Flask, g
 
 
diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py
index 0f3d455c..6ab8c3d8 100644
--- a/wqflask/wqflask/correlation/corr_scatter_plot.py
+++ b/wqflask/wqflask/correlation/corr_scatter_plot.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
 import math
 
 from flask import g
@@ -43,13 +41,13 @@ class CorrScatterPlot(object):
         samples_1, samples_2, num_overlap = corr_result_helpers.normalize_values_with_samples(self.trait_1.data, self.trait_2.data)
 
         self.data = []
-        self.indIDs = samples_1.keys()
+        self.indIDs = list(samples_1.keys())
         vals_1 = []
-        for sample in samples_1.keys():
+        for sample in list(samples_1.keys()):
             vals_1.append(samples_1[sample].value)
         self.data.append(vals_1)
         vals_2 = []
-        for sample in samples_2.keys():
+        for sample in list(samples_2.keys()):
             vals_2.append(samples_2[sample].value)
         self.data.append(vals_2)
 
@@ -137,4 +135,4 @@ def get_intercept_coords(slope, intercept, x_range, y_range):
     intercept_coords.append([x1, y1])
     intercept_coords.append([x2, y2])
 
-    return intercept_coords
\ No newline at end of file
+    return intercept_coords
diff --git a/wqflask/wqflask/correlation/correlation_functions.py b/wqflask/wqflask/correlation/correlation_functions.py
index 06dec795..b883e361 100644
--- a/wqflask/wqflask/correlation/correlation_functions.py
+++ b/wqflask/wqflask/correlation/correlation_functions.py
@@ -24,8 +24,6 @@
 #
 # Last updated by NL 2011/03/23
 
-from __future__ import absolute_import, print_function, division
-
 import math
 import rpy2.robjects
 import string
@@ -50,12 +48,12 @@ from flask import Flask, g
 
 def cal_zero_order_corr_for_tiss (primaryValue=[], targetValue=[], method='pearson'):
 
-    R_primary = rpy2.robjects.FloatVector(range(len(primaryValue)))
+    R_primary = rpy2.robjects.FloatVector(list(range(len(primaryValue))))
     N = len(primaryValue)
     for i in range(len(primaryValue)):
         R_primary[i] = primaryValue[i]
 
-    R_target = rpy2.robjects.FloatVector(range(len(targetValue)))
+    R_target = rpy2.robjects.FloatVector(list(range(len(targetValue))))
     for i in range(len(targetValue)):
         R_target[i]=targetValue[i]
 
@@ -114,4 +112,4 @@ def get_trait_symbol_and_tissue_values(symbol_list=None):
     tissue_data = MrnaAssayTissueData(gene_symbols=symbol_list)
 
     if len(tissue_data.gene_symbols):
-        return tissue_data.get_symbol_values_pairs()
\ No newline at end of file
+        return tissue_data.get_symbol_values_pairs()
diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py
index de7a1c0c..e710bacd 100644
--- a/wqflask/wqflask/correlation/show_corr_results.py
+++ b/wqflask/wqflask/correlation/show_corr_results.py
@@ -18,51 +18,36 @@
 #
 # This module is used by GeneNetwork project (www.genenetwork.org)
 
-from __future__ import absolute_import, print_function, division
-
-import sys
-
-import string
-import cPickle
-import os
-import time
-import pp
-import math
 import collections
-import resource
 import json
-
 import scipy
 import numpy
 import rpy2.robjects as ro                    # R Objects
-import rpy2.rinterface as ri
+import utility.logger
+import utility.webqtlUtil
 
-from rpy2.robjects.packages import importr
-utils = importr("utils")
+from base.trait import create_trait
 
-from pprint import pformat as pf
+from rpy2.robjects.packages import importr
 
-from base import webqtlConfig
-from utility.THCell import THCell
-from utility.TDCell import TDCell
-from base.trait import create_trait
 from base import data_set
-from utility import webqtlUtil, helper_functions, corr_result_helpers, hmac
-from db import webqtlDatabaseFunction
-import utility.webqtlUtil #this is for parallel computing only.
+from utility import helper_functions
+from utility import corr_result_helpers
+from utility import hmac
+
 from wqflask.correlation import correlation_functions
 from utility.benchmark import Bench
-import utility.webqtlUtil
-from utility.type_checking import is_float, is_int, is_str, get_float, get_int, get_string
 
-from MySQLdb import escape_string as escape
+from utility.type_checking import is_str
+from utility.type_checking import get_float
+from utility.type_checking import get_int
+from utility.type_checking import get_string
+from utility.db_tools import escape
 
-from pprint import pformat as pf
+from flask import g
 
-from flask import Flask, g
-
-import utility.logger
-logger = utility.logger.getLogger(__name__ )
+utils = importr("utils")
+logger = utility.logger.getLogger(__name__)
 
 METHOD_LIT = "3"
 METHOD_TISSUE_PEARSON = "4"
@@ -72,6 +57,7 @@ TISSUE_METHODS = [METHOD_TISSUE_PEARSON, METHOD_TISSUE_RANK]
 
 TISSUE_MOUSE_DB = 1
 
+
 class CorrelationResults(object):
     def __init__(self, start_vars):
         # get trait list from db (database name)
@@ -108,17 +94,17 @@ class CorrelationResults(object):
             self.sample_data = {}
             self.corr_type = start_vars['corr_type']
             self.corr_method = start_vars['corr_sample_method']
-            self.min_expr = get_float(start_vars,'min_expr')
-            self.p_range_lower = get_float(start_vars,'p_range_lower',-1.0)
-            self.p_range_upper = get_float(start_vars,'p_range_upper',1.0)
+            self.min_expr = get_float(start_vars, 'min_expr')
+            self.p_range_lower = get_float(start_vars, 'p_range_lower', -1.0)
+            self.p_range_upper = get_float(start_vars, 'p_range_upper', 1.0)
 
             if ('loc_chr' in start_vars and
                 'min_loc_mb' in start_vars and
                 'max_loc_mb' in start_vars):
 
-                self.location_chr = get_string(start_vars,'loc_chr')
-                self.min_location_mb = get_int(start_vars,'min_loc_mb')
-                self.max_location_mb = get_int(start_vars,'max_loc_mb')
+                self.location_chr = get_string(start_vars, 'loc_chr')
+                self.min_location_mb = get_int(start_vars, 'min_loc_mb')
+                self.max_location_mb = get_int(start_vars, 'max_loc_mb')
             else:
                 self.location_chr = self.min_location_mb = self.max_location_mb = None
 
@@ -145,10 +131,10 @@ class CorrelationResults(object):
                 if corr_samples_group == 'samples_other':
                     primary_samples = [x for x in primary_samples if x not in (
                                     self.dataset.group.parlist + self.dataset.group.f1list)]
-                self.process_samples(start_vars, self.this_trait.data.keys(), primary_samples)
+                self.process_samples(start_vars, list(self.this_trait.data.keys()), primary_samples)
 
             self.target_dataset = data_set.create_dataset(start_vars['corr_dataset'])
-            self.target_dataset.get_trait_data(self.sample_data.keys())
+            self.target_dataset.get_trait_data(list(self.sample_data.keys()))
 
             self.header_fields = get_header_fields(self.target_dataset.type, self.corr_method)
 
@@ -168,41 +154,41 @@ class CorrelationResults(object):
 
                 tissue_corr_data = self.do_tissue_correlation_for_all_traits()
                 if tissue_corr_data != None:
-                    for trait in tissue_corr_data.keys()[:self.return_number]:
+                    for trait in list(tissue_corr_data.keys())[:self.return_number]:
                         self.get_sample_r_and_p_values(trait, self.target_dataset.trait_data[trait])
                 else:
-                    for trait, values in self.target_dataset.trait_data.iteritems():
+                    for trait, values in list(self.target_dataset.trait_data.items()):
                         self.get_sample_r_and_p_values(trait, values)
 
             elif self.corr_type == "lit":
                 self.trait_geneid_dict = self.dataset.retrieve_genes("GeneId")
                 lit_corr_data = self.do_lit_correlation_for_all_traits()
 
-                for trait in lit_corr_data.keys()[:self.return_number]:
+                for trait in list(lit_corr_data.keys())[:self.return_number]:
                     self.get_sample_r_and_p_values(trait, self.target_dataset.trait_data[trait])
 
             elif self.corr_type == "sample":
-                for trait, values in self.target_dataset.trait_data.iteritems():
+                for trait, values in list(self.target_dataset.trait_data.items()):
                     self.get_sample_r_and_p_values(trait, values)
 
-            self.correlation_data = collections.OrderedDict(sorted(self.correlation_data.items(),
+            self.correlation_data = collections.OrderedDict(sorted(list(self.correlation_data.items()),
                                                                    key=lambda t: -abs(t[1][0])))
 
             if self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Geno":
                 #ZS: Convert min/max chromosome to an int for the location range option
                 range_chr_as_int = None
-                for order_id, chr_info in self.dataset.species.chromosomes.chromosomes.iteritems():
+                for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()):
                     if 'loc_chr' in start_vars:
                         if chr_info.name == self.location_chr:
                             range_chr_as_int = order_id
 
-            for _trait_counter, trait in enumerate(self.correlation_data.keys()[:self.return_number]):
+            for _trait_counter, trait in enumerate(list(self.correlation_data.keys())[:self.return_number]):
                 trait_object = create_trait(dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False)
 
                 if self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Geno":
                     #ZS: Convert trait chromosome to an int for the location range option
                     chr_as_int = 0
-                    for order_id, chr_info in self.dataset.species.chromosomes.chromosomes.iteritems():
+                    for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()):
                         if chr_info.name == trait_object.chr:
                             chr_as_int = order_id
 
@@ -297,14 +283,14 @@ class CorrelationResults(object):
 
             #print("trait_gene_symbols: ", pf(trait_gene_symbols.values()))
             corr_result_tissue_vals_dict= correlation_functions.get_trait_symbol_and_tissue_values(
-                                                    symbol_list=self.trait_symbol_dict.values())
+                                                    symbol_list=list(self.trait_symbol_dict.values()))
 
             #print("corr_result_tissue_vals: ", pf(corr_result_tissue_vals_dict))
 
             #print("trait_gene_symbols: ", pf(trait_gene_symbols))
 
             tissue_corr_data = {}
-            for trait, symbol in self.trait_symbol_dict.iteritems():
+            for trait, symbol in list(self.trait_symbol_dict.items()):
                 if symbol and symbol.lower() in corr_result_tissue_vals_dict:
                     this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()]
 
@@ -314,7 +300,7 @@ class CorrelationResults(object):
 
                     tissue_corr_data[trait] = [symbol, result[0], result[2]]
 
-            tissue_corr_data = collections.OrderedDict(sorted(tissue_corr_data.items(),
+            tissue_corr_data = collections.OrderedDict(sorted(list(tissue_corr_data.items()),
                                                            key=lambda t: -abs(t[1][1])))
 
             return tissue_corr_data
@@ -359,7 +345,7 @@ class CorrelationResults(object):
         input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), self.this_trait.geneid)
 
         lit_corr_data = {}
-        for trait, gene_id in self.trait_geneid_dict.iteritems():
+        for trait, gene_id in list(self.trait_geneid_dict.items()):
             mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), gene_id)
 
             if mouse_gene_id and str(mouse_gene_id).find(";") == -1:
@@ -387,7 +373,7 @@ class CorrelationResults(object):
             else:
                 lit_corr_data[trait] = [gene_id, 0]
 
-        lit_corr_data = collections.OrderedDict(sorted(lit_corr_data.items(),
+        lit_corr_data = collections.OrderedDict(sorted(list(lit_corr_data.items()),
                                                            key=lambda t: -abs(t[1][1])))
 
         return lit_corr_data
@@ -648,4 +634,4 @@ def get_header_fields(data_type, corr_method):
                                 'N',
                                 'Sample p(r)']
 
-    return header_fields
\ No newline at end of file
+    return header_fields
diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
index 3beee84f..a394f548 100644
--- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
+++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
@@ -18,52 +18,29 @@
 #
 # This module is used by GeneNetwork project (www.genenetwork.org)
 
-from __future__ import absolute_import, print_function, division
-
-import sys
-# sys.path.append(".")   Never do this in a webserver!
-
-import string
-import cPickle
-import os
 import datetime
-import time
-import pp
 import math
-import collections
-import resource
-
 import numpy as np
 import scipy
-
-from rpy2.robjects.packages import importr
 import rpy2.robjects as robjects
+import utility.webqtlUtil  # this is for parallel computing only.
+import utility.logger
 
-from pprint import pformat as pf
-
-from utility.redis_tools import get_redis_conn
-Redis = get_redis_conn()
-THIRTY_DAYS = 60 * 60 * 24 * 30
-
-from utility.THCell import THCell
-from utility.TDCell import TDCell
-from base.trait import GeneralTrait
 from base import data_set
-from utility import webqtlUtil, helper_functions, corr_result_helpers
-
-from db import webqtlDatabaseFunction
-import utility.webqtlUtil #this is for parallel computing only.
-from wqflask.correlation import correlation_functions
-from utility.benchmark import Bench
+from functools import reduce
+from functools import cmp_to_key
+from rpy2.robjects.packages import importr
 
-from MySQLdb import escape_string as escape
+from utility import webqtlUtil
+from utility import helper_functions
+from utility import corr_result_helpers
+from utility.redis_tools import get_redis_conn
 
-from pprint import pformat as pf
+logger = utility.logger.getLogger(__name__)
 
-from flask import Flask, g, url_for
+Redis = get_redis_conn()
+THIRTY_DAYS = 60 * 60 * 24 * 30
 
-import utility.logger
-logger = utility.logger.getLogger(__name__ )
 
 class CorrelationMatrix(object):
 
@@ -190,7 +167,7 @@ class CorrelationMatrix(object):
                 if self.do_PCA == True:
                     self.pca_works = "True"
                     self.pca_trait_ids = []
-                    pca = self.calculate_pca(range(len(self.traits)), corr_eigen_value, corr_eigen_vectors)
+                    pca = self.calculate_pca(list(range(len(self.traits))), corr_eigen_value, corr_eigen_vectors)
                     self.loadings_array = self.process_loadings()
                 else:
                     self.pca_works = "False"
@@ -199,8 +176,8 @@ class CorrelationMatrix(object):
 
             self.js_data = dict(traits = [trait.name for trait in self.traits],
                                 groups = groups,
-                                cols = range(len(self.traits)),
-                                rows = range(len(self.traits)),
+                                cols = list(range(len(self.traits))),
+                                rows = list(range(len(self.traits))),
                                 samples = self.all_sample_list,
                                 sample_data = self.sample_data,)
             #                    corr_results = [result[1] for result in result_row for result_row in self.corr_results])
@@ -271,14 +248,14 @@ def zScore(trait_data_array):
         i = 0
         for data in trait_data_array:
             N = len(data)
-            S = reduce(lambda x,y: x+y, data, 0.)
-            SS = reduce(lambda x,y: x+y*y, data, 0.)
+            S = reduce(lambda x, y: x+y, data, 0.)
+            SS = reduce(lambda x, y: x+y*y, data, 0.)
             mean = S/N
             var = SS - S*S/N
             stdev = math.sqrt(var/(N-1))
             if stdev == 0:
                 stdev = 1e-100
-            data2 = map(lambda x:(x-mean)/stdev,data)
+            data2 = [(x-mean)/stdev for x in data]
             trait_data_array[i] = data2
             i += 1
         return trait_data_array
@@ -290,16 +267,16 @@ def sortEigenVectors(vector):
         combines = []
         i = 0
         for item in eigenValues:
-            combines.append([eigenValues[i],eigenVectors[i]])
+            combines.append([eigenValues[i], eigenVectors[i]])
             i += 1
-        combines.sort(webqtlUtil.cmpEigenValue)
+        sorted(combines, key=cmp_to_key(webqtlUtil.cmpEigenValue))
         A = []
         B = []
         for item in combines:
             A.append(item[0])
             B.append(item[1])
-        sum = reduce(lambda x,y: x+y, A, 0.0)
-        A = map(lambda x:x*100.0/sum, A) 
+        sum = reduce(lambda x, y: x+y, A, 0.0)
+        A = [x*100.0/sum for x in A] 
         return [A, B]
     except:
-        return []
\ No newline at end of file
+        return []
diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py
index 35067036..72b4f3a3 100644
--- a/wqflask/wqflask/ctl/ctl_analysis.py
+++ b/wqflask/wqflask/ctl/ctl_analysis.py
@@ -125,7 +125,7 @@ class CTL(object):
             gt = create_trait(name = ts[0], dataset_name = ts[1])
             gt = retrieve_sample_data(gt, dataset, individuals)
             for ind in individuals:
-              if ind in gt.data.keys():
+              if ind in list(gt.data.keys()):
                 traits.append(gt.data[ind].value)
               else:
                 traits.append("-999")
@@ -143,7 +143,7 @@ class CTL(object):
         #r_write_table(rPheno, "~/outputGN/pheno.csv")
 
         # Perform the CTL scan
-        res = self.r_CTLscan(rGeno, rPheno, strategy = strategy, nperm = nperm, parametric = parametric, ncores = 6)
+        res = self.r_CTLscan(rGeno, rPheno, strategy = strategy, nperm = nperm, parametric = parametric, nthreads=6)
 
         # Get significant interactions
         significant = self.r_CTLsignificant(res, significance = significance)
@@ -175,7 +175,7 @@ class CTL(object):
         sys.stdout.flush()
 
         # Create the interactive graph for cytoscape visualization (Nodes and Edges)
-        if not type(significant) == ri.RNULLType:
+        if not isinstance(significant, ri.RNULLType):
           for x in range(len(significant[0])):
             logger.debug(significant[0][x], significant[1][x], significant[2][x])     # Debug to console
             tsS = significant[0][x].split(':')                                        # Source
@@ -186,15 +186,15 @@ class CTL(object):
             self.addNode(gtT)
             self.addEdge(gtS, gtT, significant, x)
 
-            significant[0][x] = gtS.symbol + " (" + gtS.name + ")"                    # Update the trait name for the displayed table
-            significant[2][x] = gtT.symbol + " (" + gtT.name + ")"                    # Update the trait name for the displayed table
+            significant[0][x] = "{} ({})".format(gtS.symbol, gtS.name)                    # Update the trait name for the displayed table
+            significant[2][x] = "{} ({})".format(gtT.symbol, gtT.name)                    # Update the trait name for the displayed table
 
         self.elements = json.dumps(self.nodes_list + self.edges_list)
 
     def loadImage(self, path, name):
         imgfile = open(self.results[path], 'rb')
         imgdata = imgfile.read()
-        imgB64 = imgdata.encode("base64")
+        imgB64 = base64.b64encode(imgdata)
         bytesarray = array.array('B', imgB64)
         self.results[name] = bytesarray
 
diff --git a/wqflask/wqflask/db_info.py b/wqflask/wqflask/db_info.py
index f04e38bf..f420b472 100644
--- a/wqflask/wqflask/db_info.py
+++ b/wqflask/wqflask/db_info.py
@@ -1,127 +1,138 @@
-import httplib, urllib2

-import re

-

-from flask import Flask, g

-

-from utility.logger import getLogger

-logger = getLogger(__name__ )

-

-class InfoPage(object):

-    def __init__(self, start_vars):

-        self.info = None

-        self.gn_accession_id = None

-        if 'gn_accession_id' in start_vars:

-            self.gn_accession_id = start_vars['gn_accession_id']

-        self.info_page_name = start_vars['info_page_name']

-

-        self.get_info()

-        self.get_datasets_list()

-

-    def get_info(self, create=False):

-        query_base = ("SELECT InfoPageName, GN_AccesionId, Species.MenuName, Species.TaxonomyId, Tissue.Name, InbredSet.Name, " +

-                      "GeneChip.GeneChipName, GeneChip.GeoPlatform, AvgMethod.Name, Datasets.DatasetName, Datasets.GeoSeries, " +

-                      "Datasets.PublicationTitle, DatasetStatus.DatasetStatusName, Datasets.Summary, Datasets.AboutCases, " +

-                      "Datasets.AboutTissue, Datasets.AboutDataProcessing, Datasets.Acknowledgment, Datasets.ExperimentDesign, " +

-                      "Datasets.Contributors, Datasets.Citation, Datasets.Notes, Investigators.FirstName, Investigators.LastName, " +

-                      "Investigators.Address, Investigators.City, Investigators.State, Investigators.ZipCode, Investigators.Country, " +

-                      "Investigators.Phone, Investigators.Email, Investigators.Url, Organizations.OrganizationName, " +

-                      "InvestigatorId, DatasetId, DatasetStatusId, Datasets.AboutPlatform, InfoFileTitle, Specifics " +

-                      "FROM InfoFiles " +

-                      "LEFT JOIN Species USING (SpeciesId) " +

-                      "LEFT JOIN Tissue USING (TissueId) " +

-                      "LEFT JOIN InbredSet USING (InbredSetId) " +

-                      "LEFT JOIN GeneChip USING (GeneChipId) " +

-                      "LEFT JOIN AvgMethod USING (AvgMethodId) " +

-                      "LEFT JOIN Datasets USING (DatasetId) " +

-                      "LEFT JOIN Investigators USING (InvestigatorId) " +

-                      "LEFT JOIN Organizations USING (OrganizationId) " +

-                      "LEFT JOIN DatasetStatus USING (DatasetStatusId) WHERE ")

-

-        if self.gn_accession_id:

-            final_query = query_base + "GN_AccesionId = {}".format(self.gn_accession_id)

-            results = g.db.execute(final_query).fetchone()

-            if self.info_page_name and not results:

-				final_query = query_base + "InfoPageName={}".format(self.info_page_name)

-        elif self.info_page_name:

-            final_query = query_base + "InfoPageName={}".format(self.info_page_name)

-            results = g.db.execute(final_query).fetchone()

-        else:

-            raise 'No correct parameter found'

-

-        if results:

-            self.info = process_query_results(results)

-

-        if (not results or len(results) < 1) and self.info_page_name and create:

-            insert_sql = "INSERT INTO InfoFiles SET InfoFiles.InfoPageName={}".format(self.info_page_name)

-            return self.get_info()

-

-        if not self.gn_accession_id and self.info:

-            self.gn_accession_id = self.info['accession_id']

-        if not self.info_page_name and self.info:

-            self.info_page_name = self.info['info_page_name'] 

-

-    def get_datasets_list(self):

-        self.filelist = []

-        try:

-            response = urllib2.urlopen("http://datafiles.genenetwork.org/download/GN%s" % self.gn_accession_id)

-            data = response.read()

-

-            matches = re.findall(r"<tr>.+?</tr>", data, re.DOTALL)

-            for i, match in enumerate(matches):

-                if i == 0:

-                    continue

-                cells = re.findall(r"<td.+?>.+?</td>", match, re.DOTALL)

-                full_filename = re.search(r"<a href=\"(.+?)\"", cells[1], re.DOTALL).group(1).strip()

-                filename = full_filename.split("/")[-1]

-                filesize = re.search(r">(.+?)<", cells[2]).group(1).strip()

-                filedate = "N/A" #ZS: Since we can't get it for now

-

-                self.filelist.append([filename, filedate, filesize])

-        except Exception, e:

-            pass

-

-def process_query_results(results):

-    info_ob = {

-        'info_page_name': results[0],

-        'accession_id': results[1],

-        'menu_name': results[2],

-        'taxonomy_id': results[3],

-        'tissue_name': results[4],

-        'group_name': results[5],

-        'gene_chip_name': results[6],

-        'geo_platform': results[7],

-        'avg_method_name': results[8],

-        'dataset_name': results[9],

-        'geo_series': results[10],

-        'publication_title': results[11],

-        'dataset_status_name': results[12],

-        'dataset_summary': results[13],

-        'about_cases': results[14],

-        'about_tissue': results[15],

-        'about_data_processing': results[16],

-        'acknowledgement': results[17],

-        'experiment_design': results[18],

-        'contributors': results[19],

-        'citation': results[20],

-        'notes': results[21],

-        'investigator_firstname': results[22],

-        'investigator_lastname': results[23],

-        'investigator_address': results[24],

-        'investigator_city': results[25],

-        'investigator_state': results[26],

-        'investigator_zipcode': results[27],

-        'investigator_country': results[28],

-        'investigator_phone': results[29],

-        'investigator_email': results[30],

-        'investigator_url': results[31],

-        'organization_name': results[32],

-        'investigator_id': results[33],

-        'dataset_id': results[34],

-        'dataset_status_is': results[35],

-        'about_platform': results[36],

-        'info_file_title': results[37],

-        'specifics': results[38]

-    }

-

-    return info_ob

-        
\ No newline at end of file
+import http.client
+import urllib.request
+import urllib.error
+import urllib.parse
+import re
+
+from flask import Flask, g
+
+from utility.logger import getLogger
+logger = getLogger(__name__)
+
+
+class InfoPage(object):
+    def __init__(self, start_vars):
+        self.info = None
+        self.gn_accession_id = None
+        if 'gn_accession_id' in start_vars:
+            self.gn_accession_id = start_vars['gn_accession_id']
+        self.info_page_name = start_vars['info_page_name']
+
+        self.get_info()
+        self.get_datasets_list()
+
+    def get_info(self, create=False):
+        query_base = ("SELECT InfoPageName, GN_AccesionId, Species.MenuName, Species.TaxonomyId, Tissue.Name, InbredSet.Name, " +
+                      "GeneChip.GeneChipName, GeneChip.GeoPlatform, AvgMethod.Name, Datasets.DatasetName, Datasets.GeoSeries, " +
+                      "Datasets.PublicationTitle, DatasetStatus.DatasetStatusName, Datasets.Summary, Datasets.AboutCases, " +
+                      "Datasets.AboutTissue, Datasets.AboutDataProcessing, Datasets.Acknowledgment, Datasets.ExperimentDesign, " +
+                      "Datasets.Contributors, Datasets.Citation, Datasets.Notes, Investigators.FirstName, Investigators.LastName, " +
+                      "Investigators.Address, Investigators.City, Investigators.State, Investigators.ZipCode, Investigators.Country, " +
+                      "Investigators.Phone, Investigators.Email, Investigators.Url, Organizations.OrganizationName, " +
+                      "InvestigatorId, DatasetId, DatasetStatusId, Datasets.AboutPlatform, InfoFileTitle, Specifics " +
+                      "FROM InfoFiles " +
+                      "LEFT JOIN Species USING (SpeciesId) " +
+                      "LEFT JOIN Tissue USING (TissueId) " +
+                      "LEFT JOIN InbredSet USING (InbredSetId) " +
+                      "LEFT JOIN GeneChip USING (GeneChipId) " +
+                      "LEFT JOIN AvgMethod USING (AvgMethodId) " +
+                      "LEFT JOIN Datasets USING (DatasetId) " +
+                      "LEFT JOIN Investigators USING (InvestigatorId) " +
+                      "LEFT JOIN Organizations USING (OrganizationId) " +
+                      "LEFT JOIN DatasetStatus USING (DatasetStatusId) WHERE ")
+
+        if self.gn_accession_id:
+            final_query = query_base + \
+                "GN_AccesionId = {}".format(self.gn_accession_id)
+            results = g.db.execute(final_query).fetchone()
+            if self.info_page_name and not results:
+                final_query = query_base + \
+                    "InfoPageName={}".format(self.info_page_name)
+        elif self.info_page_name:
+            final_query = query_base + \
+                "InfoPageName={}".format(self.info_page_name)
+            results = g.db.execute(final_query).fetchone()
+        else:
+            raise 'No correct parameter found'
+
+        if results:
+            self.info = process_query_results(results)
+
+        if (not results or len(results) < 1) and self.info_page_name and create:
+            insert_sql = "INSERT INTO InfoFiles SET InfoFiles.InfoPageName={}".format(
+                self.info_page_name)
+            return self.get_info()
+
+        if not self.gn_accession_id and self.info:
+            self.gn_accession_id = self.info['accession_id']
+        if not self.info_page_name and self.info:
+            self.info_page_name = self.info['info_page_name']
+
+    def get_datasets_list(self):
+        self.filelist = []
+        try:
+            response = urllib.request.urlopen(
+                "http://datafiles.genenetwork.org/download/GN%s" % self.gn_accession_id)
+            data = response.read()
+
+            matches = re.findall(r"<tr>.+?</tr>", data, re.DOTALL)
+            for i, match in enumerate(matches):
+                if i == 0:
+                    continue
+                cells = re.findall(r"<td.+?>.+?</td>", match, re.DOTALL)
+                full_filename = re.search(
+                    r"<a href=\"(.+?)\"", cells[1], re.DOTALL).group(1).strip()
+                filename = full_filename.split("/")[-1]
+                filesize = re.search(r">(.+?)<", cells[2]).group(1).strip()
+                filedate = "N/A"  # ZS: Since we can't get it for now
+
+                self.filelist.append([filename, filedate, filesize])
+        except Exception as e:
+            pass
+
+def process_query_results(results):
+    info_ob = {
+        'info_page_name': results[0],
+        'accession_id': results[1],
+        'menu_name': results[2],
+        'taxonomy_id': results[3],
+        'tissue_name': results[4],
+        'group_name': results[5],
+        'gene_chip_name': results[6],
+        'geo_platform': results[7],
+        'avg_method_name': results[8],
+        'dataset_name': results[9],
+        'geo_series': results[10],
+        'publication_title': results[11],
+        'dataset_status_name': results[12],
+        'dataset_summary': results[13],
+        'about_cases': results[14],
+        'about_tissue': results[15],
+        'about_data_processing': results[16],
+        'acknowledgement': results[17],
+        'experiment_design': results[18],
+        'contributors': results[19],
+        'citation': results[20],
+        'notes': results[21],
+        'investigator_firstname': results[22],
+        'investigator_lastname': results[23],
+        'investigator_address': results[24],
+        'investigator_city': results[25],
+        'investigator_state': results[26],
+        'investigator_zipcode': results[27],
+        'investigator_country': results[28],
+        'investigator_phone': results[29],
+        'investigator_email': results[30],
+        'investigator_url': results[31],
+        'organization_name': results[32],
+        'investigator_id': results[33],
+        'dataset_id': results[34],
+        'dataset_status_is': results[35],
+        'about_platform': results[36],
+        'info_file_title': results[37],
+        'specifics': results[38]
+    }
+
+    return info_ob
+
+
diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py
index 1e15d28f..00636563 100644
--- a/wqflask/wqflask/do_search.py
+++ b/wqflask/wqflask/do_search.py
@@ -1,16 +1,13 @@
-from __future__ import print_function, division
-
 import string
 import requests
 import json
 
 from flask import Flask, g
 
-from MySQLdb import escape_string as escape
+from utility.db_tools import escape
 from pprint import pformat as pf
 
 import sys
-# sys.path.append("..") Never in a running webserver
 
 from db import webqtlDatabaseFunction
 from utility.tools import GN2_BASE_URL
@@ -19,6 +16,7 @@ import logging
 from utility.logger import getLogger
 logger = getLogger(__name__)
 
+
 class DoSearch(object):
     """Parent class containing parameters/functions used for all searches"""
 
@@ -46,8 +44,8 @@ class DoSearch(object):
 
     def handle_wildcard(self, str):
         keyword = str.strip()
-        keyword = keyword.replace("*",".*")
-        keyword = keyword.replace("?",".")
+        keyword = keyword.replace("*", ".*")
+        keyword = keyword.replace("?", ".")
 
         return keyword
 
diff --git a/wqflask/wqflask/docs.py b/wqflask/wqflask/docs.py
index 9fad1cf1..d653c269 100644
--- a/wqflask/wqflask/docs.py
+++ b/wqflask/wqflask/docs.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
 import codecs
 
 from flask import g
@@ -42,4 +40,4 @@ def update_text(start_vars):
             sql = "UPDATE Docs SET content='{0}' WHERE entry='{1}';".format(content, start_vars['entry_type'])
             g.db.execute(sql)
     except:
-        pass
\ No newline at end of file
+        pass
diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py
index 3272c03d..3a886537 100644
--- a/wqflask/wqflask/export_traits.py
+++ b/wqflask/wqflask/export_traits.py
@@ -1,8 +1,6 @@
-from __future__ import print_function, division
-
 import csv
 import xlsxwriter
-import StringIO 
+import io 
 import datetime
 import itertools
 
@@ -61,9 +59,9 @@ def export_search_results_csv(targs):
     traits_by_group = sort_traits_by_group(trait_list)
 
     file_list = []
-    for group in traits_by_group.keys():
+    for group in list(traits_by_group.keys()):
         group_traits = traits_by_group[group]
-        buff = StringIO.StringIO()
+        buff = io.StringIO()
         writer = csv.writer(buff)
         csv_rows = []
 
@@ -122,7 +120,7 @@ def export_search_results_csv(targs):
 
             csv_rows.append(row_contents)
 
-        csv_rows = map(list, itertools.izip_longest(*[row for row in csv_rows]))
+        csv_rows = list(map(list, itertools.zip_longest(*[row for row in csv_rows])))
         writer.writerows(csv_rows)
         csv_data = buff.getvalue()
         buff.close()
@@ -135,9 +133,9 @@ def export_search_results_csv(targs):
 def sort_traits_by_group(trait_list=[]):
     traits_by_group = {}
     for trait in trait_list:
-        if trait.dataset.group.name not in traits_by_group.keys():
+        if trait.dataset.group.name not in list(traits_by_group.keys()):
             traits_by_group[trait.dataset.group.name] = []
 
         traits_by_group[trait.dataset.group.name].append(trait)
 
-    return traits_by_group
\ No newline at end of file
+    return traits_by_group
diff --git a/wqflask/wqflask/external_tools/send_to_bnw.py b/wqflask/wqflask/external_tools/send_to_bnw.py
index 68efd10d..efa17f05 100644
--- a/wqflask/wqflask/external_tools/send_to_bnw.py
+++ b/wqflask/wqflask/external_tools/send_to_bnw.py
@@ -18,8 +18,6 @@
 #
 # This module is used by GeneNetwork project (www.genenetwork.org)
 
-from __future__ import absolute_import, print_function, division
-
 from base.trait import GeneralTrait
 from utility import helper_functions, corr_result_helpers
 
@@ -69,4 +67,4 @@ class SendToBNW(object):
             if has_none:
                 continue
             self.form_value += ",".join(str(cell) for cell in row)
-            self.form_value += ";"
\ No newline at end of file
+            self.form_value += ";"
diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py
index 7a5dba73..4c958a88 100644
--- a/wqflask/wqflask/external_tools/send_to_geneweaver.py
+++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py
@@ -18,8 +18,6 @@
 #
 # This module is used by GeneNetwork project (www.genenetwork.org)
 
-from __future__ import absolute_import, print_function, division
-
 import string
 
 from flask import Flask, g
@@ -54,10 +52,10 @@ class SendToGeneWeaver(object):
             trait_name_list = get_trait_name_list(self.trait_list)
 
             self.hidden_vars = {
-                                 'client'                     : "genenetwork",
-                                 'species'                    : species_name,
-                                 'idtype'                     : self.chip_name,
-                                 'list'                       : string.join(trait_name_list, ","),
+                                 'client': "genenetwork",
+                                 'species': species_name,
+                                 'idtype': self.chip_name,
+                                 'list': ",".join(trait_name_list),
                                }
 
 def get_trait_name_list(trait_list):
@@ -109,4 +107,4 @@ def test_chip(trait_list):
                 chip_name = '%s_NA' % result[0]
                 return chip_name
 
-    return chip_name
\ No newline at end of file
+    return chip_name
diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py
index 30ca024f..2f068792 100644
--- a/wqflask/wqflask/external_tools/send_to_webgestalt.py
+++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py
@@ -18,8 +18,6 @@
 #
 # This module is used by GeneNetwork project (www.genenetwork.org)
 
-from __future__ import absolute_import, print_function, division
-
 import string
 
 from flask import Flask, g
@@ -49,7 +47,7 @@ class SendToWebGestalt(object):
             id_type = "entrezgene"
 
             self.hidden_vars = { 
-                             'gene_list'                  : string.join(gene_id_list, "\n"),
+                             'gene_list'                  : "\n".join(gene_id_list),
                              'id_type'                    : "entrezgene",
                              'ref_set'                    : "genome",
                              'enriched_database_category' : "geneontology",
@@ -123,4 +121,4 @@ def gen_gene_id_list(trait_list):
         trait_name_list.append(trait.name)
         retrieve_trait_info(trait, trait.dataset)
         gene_id_list.append(str(trait.geneid))
-    return trait_name_list, gene_id_list
\ No newline at end of file
+    return trait_name_list, gene_id_list
diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py
index 99d5db26..69ee9623 100644
--- a/wqflask/wqflask/group_manager.py
+++ b/wqflask/wqflask/group_manager.py
@@ -1,6 +1,3 @@
-
-from __future__ import print_function, division, absolute_import
-
 import random, string
 
 from flask import (Flask, g, render_template, url_for, request, make_response,
@@ -155,4 +152,4 @@ def send_group_invites(group_id, user_email_list = [], user_type="members"):
          save_user(user_details, user_details['user_id'])
          send_invitation_email(user_email, temp_password)
 
-#@app.route()
\ No newline at end of file
+#@app.route()
diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py
index c65a1415..6d797a29 100644
--- a/wqflask/wqflask/gsearch.py
+++ b/wqflask/wqflask/gsearch.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
 import json
 
 from flask import Flask, g
diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py
index 5098a184..cca5a4fc 100644
--- a/wqflask/wqflask/heatmap/heatmap.py
+++ b/wqflask/wqflask/heatmap/heatmap.py
@@ -1,46 +1,17 @@
-from __future__ import absolute_import, print_function, division
-
-import sys
-# sys.path.append(".") Never in a running webserver
-
 import string
-import cPickle
 import os
-import datetime
-import time
-import pp
-import math
 import random
-import collections
-import resource
-
-import scipy
-import numpy as np
-
-from pprint import pformat as pf
-
-from base.trait import GeneralTrait
-from base import data_set
 from base import species
 from base import webqtlConfig
 from utility import helper_functions
-from utility import Plot, Bunch
-from utility import temp_data
-from utility.tools import flat_files, REAPER_COMMAND, TEMPDIR
-
-from MySQLdb import escape_string as escape
-
-import cPickle as pickle
-import simplejson as json
-
-from pprint import pformat as pf
 
+from utility.tools import flat_files, REAPER_COMMAND, TEMPDIR
 from redis import Redis
-Redis = Redis()
-
 from flask import Flask, g
-
 from utility.logger import getLogger
+
+Redis = Redis()
+
 logger = getLogger(__name__ )
 
 class Heatmap(object):
@@ -60,7 +31,7 @@ class Heatmap(object):
 
         chrnames = []
         self.species = species.TheSpecies(dataset=self.trait_list[0][1])
-        for key in self.species.chromosomes.chromosomes.keys():
+        for key in list(self.species.chromosomes.chromosomes.keys()):
             chrnames.append([self.species.chromosomes.chromosomes[key].name, self.species.chromosomes.chromosomes[key].mb_length])
 
         for trait_db in self.trait_list:
@@ -93,7 +64,7 @@ class Heatmap(object):
         pos = []
         markernames = []
 
-        for trait in self.trait_results.keys():
+        for trait in list(self.trait_results.keys()):
             lodnames.append(trait)
 
         self.dataset.group.get_markers()
@@ -205,4 +176,4 @@ def parse_reaper_output(gwa_filename):
                 marker['additive'] = float(line.split("\t")[6])
                 marker_obs.append(marker)
 
-    return marker_obs
\ No newline at end of file
+    return marker_obs
diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py
index 2c60dd70..d0dd7aea 100644
--- a/wqflask/wqflask/interval_analyst/GeneUtil.py
+++ b/wqflask/wqflask/interval_analyst/GeneUtil.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
 import string
 
 from flask import Flask, g
@@ -24,7 +22,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
 	
 	##List current Species and other Species
 	speciesId = speciesDict[species]
-	otherSpecies = map(lambda X: [X, speciesDict[X]], speciesDict.keys())
+	otherSpecies = [[X, speciesDict[X]] for X in list(speciesDict.keys())]
 	otherSpecies.remove([species, speciesId])
 
 	results = g.db.execute("""
@@ -33,7 +31,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
                       Chromosome = '%s' AND
 					  ((TxStart > %f and TxStart <= %f) OR (TxEnd > %f and TxEnd <= %f))
 				ORDER BY txStart
-                """ % (string.join(fetchFields, ", "),
+                """ % (", ".join(fetchFields),
                        speciesId, chrName,
                        startMb, endMb,
                        startMb, endMb)).fetchall()
@@ -68,7 +66,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
 				othSpec, othSpecId = item
 				newdict2 = {}
 				
-				resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (string.join(fetchFields, ", "),
+				resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (", ".join(fetchFields),
                                                                                                                            othSpecId,
                                                                                                                            newdict["GeneSymbol"])).fetchone()
 
diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py
index c364c0db..aa3f8518 100644
--- a/wqflask/wqflask/marker_regression/display_mapping_results.py
+++ b/wqflask/wqflask/marker_regression/display_mapping_results.py
@@ -35,7 +35,7 @@ import json
 
 from flask import Flask, g
 
-from htmlgen import HTMLgen2 as HT
+import htmlgen as HT
 
 from base import webqtlConfig
 from base.GeneralObject import GeneralObject
@@ -47,7 +47,11 @@ from base.webqtlConfig import GENERATED_IMAGE_DIR
 from utility.pillow_utils import draw_rotated_text, draw_open_polygon
 
 import utility.logger
-logger = utility.logger.getLogger(__name__)
+try:  # Only import this for Python3
+    from functools import reduce
+except:
+    pass
+logger = utility.logger.getLogger(__name__ )
 
 RED = ImageColor.getrgb("red")
 BLUE = ImageColor.getrgb("blue")
@@ -84,35 +88,60 @@ class HtmlGenWrapper:
     """Wrapper Methods for HTML gen"""
     @staticmethod
     def create_image_tag(**kwargs):
-        return HT.Image(**kwargs)
+        image = HT.Image("", "")
+        for key, value in list(kwargs.items()):
+            image.set_attribute(key, value)
+        return image
 
     @staticmethod
     def create_form_tag(**kwargs):
-        return HT.Form(**kwargs)
+        form = HT.Form("POST", "")  # Default method is POST
+
+        for key, value in list(kwargs.items()):
+            if key == "submit":
+                form.append(value)
+                continue
+            form.set_attribute(key.replace("cgi", "action"), str(value))
+        return form
 
     @staticmethod
     def create_p_tag(**kwargs):
-        return HT.Paragraph(**kwargs)
+        paragraph = HT.Paragraph()
+        for key, value in list(kwargs.items()):
+            paragraph.set_attribute(key, value)
+        return paragraph
 
     @staticmethod
     def create_br_tag():
-        return HT.BR()
+        return HT.VoidElement("br")
 
     @staticmethod
     def create_input_tag(**kwargs):
-        return HT.Input(**kwargs)
+        input_ = HT.Input()
+        for key, value in list(kwargs.items()):
+            input_.set_attribute(key.lower().replace("_", ""), value)
+        return input_
 
     @staticmethod
     def create_area_tag(**kwargs):
-        return HT.Area(**kwargs)
+        area = HT.VoidElement("area")
+        for key, value in list(kwargs.items()):
+            area.set_attribute(key, value)
+        return area
 
     @staticmethod
     def create_link_tag(href, content, **kwargs):
-        return HT.Href(href, content, **kwargs)
+        link = HT.Link(href, content)
+        for key, value in list(kwargs.items()):
+            link.set_attribute(key, value)
+        return link
 
     @staticmethod
     def create_map_tag(**kwargs):
-        return HT.Map(**kwargs)
+        map_ = HT.Element("map")
+        for key, value in list(kwargs.items()):
+            map_.set_attribute(key, value)
+        return map_
 
 
 class DisplayMappingResults(object):
@@ -265,7 +294,7 @@ class DisplayMappingResults(object):
 
         self.manhattan_plot = start_vars['manhattan_plot']
 
-        if 'permCheck' in start_vars.keys():
+        if 'permCheck' in list(start_vars.keys()):
             self.permChecked = start_vars['permCheck']
         else:
             self.permChecked = False
@@ -278,46 +307,46 @@ class DisplayMappingResults(object):
         else:
             self.nperm = 0
 
-        if 'bootCheck' in start_vars.keys():
+        if 'bootCheck' in list(start_vars.keys()):
             self.bootChecked = start_vars['bootCheck']
         else:
             self.bootChecked = False
-        if 'num_bootstrap' in start_vars.keys():
+        if 'num_bootstrap' in list(start_vars.keys()):
             self.nboot = int(start_vars['num_bootstrap'])
         else:
             self.nboot = 0
-        if 'bootstrap_results' in start_vars.keys():
+        if 'bootstrap_results' in list(start_vars.keys()):
             self.bootResult = start_vars['bootstrap_results']
         else:
             self.bootResult = []
 
-        if 'do_control' in start_vars.keys():
+        if 'do_control' in list(start_vars.keys()):
             self.doControl = start_vars['do_control']
         else:
             self.doControl = "false"
-        if 'control_marker' in start_vars.keys():
+        if 'control_marker' in list(start_vars.keys()):
             self.controlLocus = start_vars['control_marker']
         else:
             self.controlLocus = ""
-        if 'covariates' in start_vars.keys():
+        if 'covariates' in list(start_vars.keys()):
             self.covariates = start_vars['covariates']
-        if 'maf' in start_vars.keys():
+        if 'maf' in list(start_vars.keys()):
             self.maf = start_vars['maf']
         else:
             self.maf = ""
-        if 'output_files' in start_vars.keys():
+        if 'output_files' in list(start_vars.keys()):
             self.output_files = start_vars['output_files']
-        if 'use_loco' in start_vars.keys() and self.mapping_method == "gemma":
+        if 'use_loco' in list(start_vars.keys()) and self.mapping_method == "gemma":
             self.use_loco = start_vars['use_loco']
 
-        if 'reaper_version' in start_vars.keys() and self.mapping_method == "reaper":
+        if 'reaper_version' in list(start_vars.keys()) and self.mapping_method == "reaper":
             self.reaper_version = start_vars['reaper_version']
             if 'output_files' in start_vars:
                 self.output_files = ",".join(start_vars['output_files'])
 
         self.categorical_vars = ""
         self.perm_strata = ""
-        if 'perm_strata' in start_vars.keys() and 'categorical_vars' in start_vars.keys():
+        if 'perm_strata' in list(start_vars.keys()) and 'categorical_vars' in list(start_vars.keys()):
             self.categorical_vars = start_vars['categorical_vars']
             self.perm_strata = start_vars['perm_strata']
 
@@ -359,7 +388,7 @@ class DisplayMappingResults(object):
                self.graphWidth  = self.MULT_GRAPH_DEFAULT_WIDTH
 
 ## BEGIN HaplotypeAnalyst
-        if 'haplotypeAnalystCheck' in start_vars.keys():
+        if 'haplotypeAnalystCheck' in list(start_vars.keys()):
             self.haplotypeAnalystChecked = start_vars['haplotypeAnalystCheck']
         else:
             self.haplotypeAnalystChecked = False
@@ -367,25 +396,25 @@ class DisplayMappingResults(object):
 
         self.graphHeight = self.GRAPH_DEFAULT_HEIGHT
         self.dominanceChecked = False
-        if 'LRSCheck' in start_vars.keys():
+        if 'LRSCheck' in list(start_vars.keys()):
             self.LRS_LOD = start_vars['LRSCheck']
         else:
             self.LRS_LOD = start_vars['score_type']
         self.intervalAnalystChecked = True
         self.draw2X = False
-        if 'additiveCheck' in start_vars.keys():
+        if 'additiveCheck' in list(start_vars.keys()):
             self.additiveChecked = start_vars['additiveCheck']
         else:
             self.additiveChecked = False
-        if 'viewLegend' in start_vars.keys():
+        if 'viewLegend' in list(start_vars.keys()):
             self.legendChecked = start_vars['viewLegend']
         else:
             self.legendChecked = False
-        if 'showSNP' in start_vars.keys():
+        if 'showSNP' in list(start_vars.keys()):
             self.SNPChecked = start_vars['showSNP']
         else:
             self.SNPChecked = False
-        if 'showGenes' in start_vars.keys():
+        if 'showGenes' in list(start_vars.keys()):
             self.geneChecked = start_vars['showGenes']
         else:
             self.geneChecked = False
@@ -425,9 +454,9 @@ class DisplayMappingResults(object):
                         Chr_Length.Name in (%s)
                 Order by
                         Chr_Length.OrderId
-                """ % (self.dataset.group.name, string.join(map(lambda X: "'%s'" % X[0], self.ChrList[1:]), ", ")))
+                """ % (self.dataset.group.name, ", ".join(["'%s'" % X[0] for X in self.ChrList[1:]])))
 
-        self.ChrLengthMbList = map(lambda x: x[0]/1000000.0, self.ChrLengthMbList)
+        self.ChrLengthMbList = [x[0]/1000000.0 for x in self.ChrLengthMbList]
         self.ChrLengthMbSum = reduce(lambda x, y:x+y, self.ChrLengthMbList, 0.0)
         if self.ChrLengthMbList:
             self.MbGraphInterval = self.ChrLengthMbSum/(len(self.ChrLengthMbList)*12) #Empirical Mb interval
@@ -458,7 +487,7 @@ class DisplayMappingResults(object):
                 else:
                     continue
             samplelist = list(self.genotype.prgy)
-            for j,_geno in enumerate (self.genotype[0][1].genotype):
+            for j, _geno in enumerate (self.genotype[0][1].genotype):
                 for item in smd:
                     if item.name == samplelist[j]:
                         self.NR_INDIVIDUALS = self.NR_INDIVIDUALS + 1
@@ -550,11 +579,10 @@ class DisplayMappingResults(object):
             src="/image/{}.png".format(self.filename),
             border="0", usemap='#WebQTLImageMap'
         )
-        self.intImg = intImg
 
         #Scales plot differently for high resolution
         if self.draw2X:
-            intCanvasX2 = Image.new("RGBA", size=(self.graphWidth*2,self.graphHeight*2))
+            intCanvasX2 = Image.new("RGBA", size=(self.graphWidth*2, self.graphHeight*2))
             gifmapX2 = self.plotIntMapping(intCanvasX2, startMb = self.startMb, endMb = self.endMb, showLocusForm= showLocusForm, zoom=2)
             intCanvasX2.save(
                 "{}.png".format(
@@ -571,12 +599,12 @@ class DisplayMappingResults(object):
                 cgi=os.path.join(webqtlConfig.CGIDIR, webqtlConfig.SCRIPTFILE),
                 enctype='multipart/form-data',
                 name=showLocusForm,
-                submit=HtmlGenWrapper.create_input_tag(type='hidden'))
+                submit=HtmlGenWrapper.create_input_tag(type_='hidden'))
 
             hddn = {'FormID':'showDatabase', 'ProbeSetID':'_','database':fd.RISet+"Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'}
             for key in hddn.keys():
                 showLocusForm.append(HtmlGenWrapper.create_input_tag(
-                    name=key, value=hddn[key], type='hidden'))
+                    name=key, value=hddn[key], type_='hidden'))
             showLocusForm.append(intImg)
         else:
             showLocusForm = intImg
@@ -788,17 +816,17 @@ class DisplayMappingResults(object):
         bootScale = bootScale[:-1] + [highestPercent]
 
         bootOffset = 50*fontZoom
-        bootScaleFont=ImageFont.truetype(font=VERDANA_FILE,size=13*fontZoom)
+        bootScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=13*fontZoom)
         im_drawer.rectangle(
             xy=((canvas.size[0]-bootOffset, yZero-bootHeightThresh),
-                (canvas.size[0]-bootOffset-15*zoom,yZero)),
+                (canvas.size[0]-bootOffset-15*zoom, yZero)),
             fill = YELLOW, outline=BLACK)
         im_drawer.line(
             xy=((canvas.size[0]-bootOffset+4, yZero),
                 (canvas.size[0]-bootOffset, yZero)),
             fill=BLACK)
         TEXT_Y_DISPLACEMENT = -8
-        im_drawer.text(xy=(canvas.size[0]-bootOffset+10,yZero+TEXT_Y_DISPLACEMENT), text='0%',
+        im_drawer.text(xy=(canvas.size[0]-bootOffset+10, yZero+TEXT_Y_DISPLACEMENT), text='0%',
                        font=bootScaleFont, fill=BLACK)
 
         for item in bootScale:
@@ -806,10 +834,10 @@ class DisplayMappingResults(object):
                 continue
             bootY = yZero-bootHeightThresh*item/highestPercent
             im_drawer.line(
-                xy=((canvas.size[0]-bootOffset+4,bootY),
-                 (canvas.size[0]-bootOffset,bootY)),
+                xy=((canvas.size[0]-bootOffset+4, bootY),
+                 (canvas.size[0]-bootOffset, bootY)),
                 fill=BLACK)
-            im_drawer.text(xy=(canvas.size[0]-bootOffset+10,bootY+TEXT_Y_DISPLACEMENT),
+            im_drawer.text(xy=(canvas.size[0]-bootOffset+10, bootY+TEXT_Y_DISPLACEMENT),
                            text='%2.1f'%item, font=bootScaleFont, fill=BLACK)
 
         if self.legendChecked:
@@ -818,7 +846,7 @@ class DisplayMappingResults(object):
             smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom)
             leftOffset = xLeftOffset+(nCol-1)*200
             im_drawer.rectangle(
-                xy=((leftOffset,startPosY-6), (leftOffset+12,startPosY+6)),
+                xy=((leftOffset, startPosY-6), (leftOffset+12, startPosY+6)),
                 fill=YELLOW, outline=BLACK)
             im_drawer.text(xy=(leftOffset+ 20, startPosY+TEXT_Y_DISPLACEMENT),
                            text='Frequency of the Peak LRS',
@@ -915,7 +943,7 @@ class DisplayMappingResults(object):
             TEXT_Y_DISPLACEMENT = -8
             im_drawer.text(
                 text="Sequence Site",
-                xy=(leftOffset+15,startPosY+TEXT_Y_DISPLACEMENT), font=smallLabelFont,
+                xy=(leftOffset+15, startPosY+TEXT_Y_DISPLACEMENT), font=smallLabelFont,
                 fill=self.TOP_RIGHT_INFO_COLOR)
 
     def drawSNPTrackNew(self, canvas, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None):
@@ -965,7 +993,7 @@ class DisplayMappingResults(object):
     def drawMultiTraitName(self, fd, canvas, gifmap, showLocusForm, offset= (40, 120, 80, 10), zoom = 1):
         nameWidths = []
         yPaddingTop = 10
-        colorFont=ImageFont.truetype(font=TREBUC_FILE,size=12)
+        colorFont=ImageFont.truetype(font=TREBUC_FILE, size=12)
         if len(self.qtlresults) >20 and self.selectedChr > -1:
             rightShift = 20
             rightShiftStep = 60
@@ -984,21 +1012,21 @@ class DisplayMappingResults(object):
                 rightShift += rightShiftStep
 
             name = thisTrait.displayName()
-            nameWidth, nameHeight = im_drawer.textsize(name,font=colorFont)
+            nameWidth, nameHeight = im_drawer.textsize(name, font=colorFont)
             nameWidths.append(nameWidth)
 
             im_drawer.rectangle(
-                xy=((rightShift,yPaddingTop+kstep*15),
-                    (rectWidth+rightShift,yPaddingTop+10+kstep*15)),
+                xy=((rightShift, yPaddingTop+kstep*15),
+                    (rectWidth+rightShift, yPaddingTop+10+kstep*15)),
                 fill=thisLRSColor, outline=BLACK)
             im_drawer.text(
-                text=name,xy=(rectWidth+2+rightShift,yPaddingTop+10+kstep*15),
-                font=colorFont,fill=BLACK)
+                text=name, xy=(rectWidth+2+rightShift, yPaddingTop+10+kstep*15),
+                font=colorFont, fill=BLACK)
             if thisTrait.db:
-                COORDS = "%d,%d,%d,%d" %(rectWidth+2+rightShift,yPaddingTop+kstep*15,rectWidth+2+rightShift+nameWidth,yPaddingTop+10+kstep*15,)
+                COORDS = "%d,%d,%d,%d" %(rectWidth+2+rightShift, yPaddingTop+kstep*15, rectWidth+2+rightShift+nameWidth, yPaddingTop+10+kstep*15,)
                 HREF= "javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm, thisTrait.db.name, thisTrait.name)
-                Areas = HtmlGenWrapper.create_area_tag(shape='rect',coords=COORDS,href=HREF)
-                gifmap.areas.append(Areas) ### TODO
+                Areas = HtmlGenWrapper.create_area_tag(shape='rect', coords=COORDS, href=HREF)
+                gifmap.append(Areas) ### TODO
 
     def drawLegendPanel(self, canvas, offset= (40, 120, 80, 10), zoom = 1):
         im_drawer = ImageDraw.Draw(canvas)
@@ -1011,80 +1039,80 @@ class DisplayMappingResults(object):
         if zoom == 2:
             fontZoom = 1.5
 
-        labelFont=ImageFont.truetype(font=TREBUC_FILE,size=12*fontZoom)
+        labelFont=ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom)
         startPosY = 15
         stepPosY = 12*fontZoom
         if self.manhattan_plot != True:
             im_drawer.line(
-                xy=((xLeftOffset,startPosY),(xLeftOffset+32,startPosY)),
+                xy=((xLeftOffset, startPosY), (xLeftOffset+32, startPosY)),
                 fill=self.LRS_COLOR, width=2)
             im_drawer.text(
-                text=self.LRS_LOD, xy=(xLeftOffset+40,startPosY+TEXT_Y_DISPLACEMENT),
-                font=labelFont,fill=BLACK)
+                text=self.LRS_LOD, xy=(xLeftOffset+40, startPosY+TEXT_Y_DISPLACEMENT),
+                font=labelFont, fill=BLACK)
             startPosY += stepPosY
 
         if self.additiveChecked:
             startPosX = xLeftOffset
             im_drawer.line(
-                xy=((startPosX,startPosY),(startPosX+17,startPosY)),
+                xy=((startPosX, startPosY), (startPosX+17, startPosY)),
                 fill=self.ADDITIVE_COLOR_POSITIVE, width=2)
             im_drawer.line(
-                xy=((startPosX+18,startPosY),(startPosX+32,startPosY)),
+                xy=((startPosX+18, startPosY), (startPosX+32, startPosY)),
                 fill=self.ADDITIVE_COLOR_NEGATIVE, width=2)
             im_drawer.text(
-                text='Additive Effect',xy=(startPosX+40,startPosY+TEXT_Y_DISPLACEMENT),
-                font=labelFont,fill=BLACK)
+                text='Additive Effect', xy=(startPosX+40, startPosY+TEXT_Y_DISPLACEMENT),
+                font=labelFont, fill=BLACK)
 
         if self.genotype.type == 'intercross' and self.dominanceChecked:
             startPosX = xLeftOffset
             startPosY += stepPosY
             im_drawer.line(
-                xy=((startPosX,startPosY),(startPosX+17,startPosY)),
+                xy=((startPosX, startPosY), (startPosX+17, startPosY)),
                 fill=self.DOMINANCE_COLOR_POSITIVE, width=4)
             im_drawer.line(
-                xy=((startPosX+18,startPosY),(startPosX+35,startPosY)),
+                xy=((startPosX+18, startPosY), (startPosX+35, startPosY)),
                 fill=self.DOMINANCE_COLOR_NEGATIVE, width=4)
             im_drawer.text(
-                text='Dominance Effect', xy=(startPosX+42,startPosY+5),
-                font=labelFont,fill=BLACK)
+                text='Dominance Effect', xy=(startPosX+42, startPosY+5),
+                font=labelFont, fill=BLACK)
 
         if self.haplotypeAnalystChecked:
             startPosY += stepPosY
             startPosX = xLeftOffset
             im_drawer.line(
-                xy=((startPosX,startPosY),(startPosX+17,startPosY)),
+                xy=((startPosX, startPosY), (startPosX+17, startPosY)),
                 fill=self.HAPLOTYPE_POSITIVE, width=4)
             im_drawer.line(
-                xy=((startPosX+18,startPosY),(startPosX+35,startPosY)),
+                xy=((startPosX+18, startPosY), (startPosX+35, startPosY)),
                 fill=self.HAPLOTYPE_NEGATIVE, width=4)
             im_drawer.line(
-                xy=((startPosX+36,startPosY),(startPosX+53,startPosY)),
+                xy=((startPosX+36, startPosY), (startPosX+53, startPosY)),
                 fill=self.HAPLOTYPE_HETEROZYGOUS, width=4)
             im_drawer.line(
-                xy=((startPosX+54,startPosY),(startPosX+67,startPosY)),
+                xy=((startPosX+54, startPosY), (startPosX+67, startPosY)),
                 fill=self.HAPLOTYPE_RECOMBINATION, width=4)
             im_drawer.text(
                 text='Haplotypes (Pat, Mat, Het, Unk)',
-                xy=(startPosX+76,startPosY+5),font=labelFont,fill=BLACK)
+                xy=(startPosX+76, startPosY+5), font=labelFont, fill=BLACK)
 
         if self.permChecked and self.nperm > 0:
             startPosY += stepPosY
             startPosX = xLeftOffset
             im_drawer.line(
-                xy=((startPosX, startPosY),( startPosX + 32, startPosY)),
+                xy=((startPosX, startPosY), ( startPosX + 32, startPosY)),
                 fill=self.SIGNIFICANT_COLOR, width=self.SIGNIFICANT_WIDTH)
             im_drawer.line(
-                xy=((startPosX, startPosY + stepPosY),( startPosX + 32, startPosY + stepPosY)),
+                xy=((startPosX, startPosY + stepPosY), ( startPosX + 32, startPosY + stepPosY)),
                 fill=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH)
             im_drawer.text(
-                text='Significant %s = %2.2f' % (self.LRS_LOD,self.significant),
-                xy=(xLeftOffset+42,startPosY+TEXT_Y_DISPLACEMENT),font=labelFont,fill=BLACK)
+                text='Significant %s = %2.2f' % (self.LRS_LOD, self.significant),
+                xy=(xLeftOffset+42, startPosY+TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK)
             im_drawer.text(
                 text='Suggestive %s = %2.2f' % (self.LRS_LOD, self.suggestive),
-                xy=(xLeftOffset+42,startPosY + TEXT_Y_DISPLACEMENT +stepPosY),font=labelFont,
+                xy=(xLeftOffset+42, startPosY + TEXT_Y_DISPLACEMENT +stepPosY), font=labelFont,
                 fill=BLACK)
 
-        labelFont = ImageFont.truetype(font=VERDANA_FILE,size=12*fontZoom)
+        labelFont = ImageFont.truetype(font=VERDANA_FILE, size=12*fontZoom)
         labelColor = BLACK
         if self.dataset.type == "Publish" or self.dataset.type == "Geno":
             dataset_label = self.dataset.fullname
@@ -1152,22 +1180,22 @@ class DisplayMappingResults(object):
                 im_drawer.textsize(string2, font=labelFont)[0])
             im_drawer.text(
                 text=identification,
-                xy=(canvas.size[0] - xRightOffset-d,20*fontZoom),font=labelFont,
+                xy=(canvas.size[0] - xRightOffset-d, 20*fontZoom), font=labelFont,
                 fill=labelColor)
         else:
             d = 4+ max(
                 im_drawer.textsize(string1, font=labelFont)[0],
                 im_drawer.textsize(string2, font=labelFont)[0])
         im_drawer.text(
-            text=string1,xy=(canvas.size[0] - xRightOffset-d,35*fontZoom),
-            font=labelFont,fill=labelColor)
+            text=string1, xy=(canvas.size[0] - xRightOffset-d, 35*fontZoom),
+            font=labelFont, fill=labelColor)
         im_drawer.text(
-            text=string2,xy=(canvas.size[0] - xRightOffset-d,50*fontZoom),
-            font=labelFont,fill=labelColor)
+            text=string2, xy=(canvas.size[0] - xRightOffset-d, 50*fontZoom),
+            font=labelFont, fill=labelColor)
         if string3 != '':
             im_drawer.text(
-                text=string3,xy=(canvas.size[0] - xRightOffset-d,65*fontZoom),
-                font=labelFont,fill=labelColor)
+                text=string3, xy=(canvas.size[0] - xRightOffset-d, 65*fontZoom),
+                font=labelFont, fill=labelColor)
 
 
     def drawGeneBand(self, canvas, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None):
@@ -1194,8 +1222,8 @@ class DisplayMappingResults(object):
                 tenPercentLength = geneLength*0.0001
                 SNPdensity = theGO["snpCount"]/geneLength
 
-                exonStarts = map(float, theGO['exonStarts'].split(",")[:-1])
-                exonEnds = map(float, theGO['exonEnds'].split(",")[:-1])
+                exonStarts = list(map(float, theGO['exonStarts'].split(",")[:-1]))
+                exonEnds = list(map(float, theGO['exonEnds'].split(",")[:-1]))
                 cdsStart = theGO['cdsStart']
                 cdsEnd = theGO['cdsEnd']
                 accession = theGO['NM_ID']
@@ -1388,7 +1416,7 @@ class DisplayMappingResults(object):
                             labelText = "3'"
                             im_drawer.text(
                                 text=labelText,
-                                xy=(utrEndPix+2,geneYLocation+self.EACH_GENE_HEIGHT),
+                                xy=(utrEndPix+2, geneYLocation+self.EACH_GENE_HEIGHT),
                                 font=ImageFont.truetype(font=ARIAL_FILE, size=2))
 
             #draw the genes as rectangles
@@ -1400,7 +1428,7 @@ class DisplayMappingResults(object):
 
             COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation, geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT))
             # NL: 06-02-2011 Rob required to display NCBI info in a new window
-            gifmap.areas.append(
+            gifmap.append(
                 HtmlGenWrapper.create_area_tag(
                     shape='rect',
                     coords=COORDS,
@@ -1541,7 +1569,7 @@ class DisplayMappingResults(object):
                                         counter = counter + 1
                                         if item.name == samplelist[k]:
                                             ind = counter
-                                    maxind=max(ind,maxind)
+                                    maxind=max(ind, maxind)
 
                                     # lines
                                     if (oldgeno[k] == -1 and _geno == -1):
@@ -1574,7 +1602,7 @@ class DisplayMappingResults(object):
                                     COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation+ind*self.EACH_GENE_HEIGHT, geneEndPix+1, (geneYLocation + ind*self.EACH_GENE_HEIGHT))
                                     TITLE = "Strain: %s, marker (%s) \n Position  %2.3f Mb." % (samplelist[k], _chr[j].name, float(txStart))
                                     HREF = ''
-                                    gifmap.areas.append(
+                                    gifmap.append(
                                         HtmlGenWrapper.create_area_tag(
                                             shape='rect',
                                             coords=COORDS,
@@ -1698,7 +1726,7 @@ class DisplayMappingResults(object):
                 WEBQTL_HREF = "javascript:rangeView('%s', %f, %f)" % (self.selectedChr - 1, max(0, (calBase-webqtlZoomWidth))/1000000.0, (calBase+webqtlZoomWidth)/1000000.0)
 
                 WEBQTL_TITLE = "Click to view this section of the genome in WebQTL"
-                gifmap.areas.append(
+                gifmap.append(
                     HtmlGenWrapper.create_area_tag(
                         shape='rect',
                         coords=WEBQTL_COORDS,
@@ -1710,7 +1738,7 @@ class DisplayMappingResults(object):
                     outline=self.CLICKABLE_WEBQTL_REGION_COLOR,
                     fill=self.CLICKABLE_WEBQTL_REGION_COLOR)
                 im_drawer.line(
-                    xy=((xBrowse1, paddingTop),( xBrowse1, (paddingTop + self.BAND_HEIGHT))),
+                    xy=((xBrowse1, paddingTop), ( xBrowse1, (paddingTop + self.BAND_HEIGHT))),
                     fill=self.CLICKABLE_WEBQTL_REGION_OUTLINE_COLOR)
 
                 if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat":
@@ -1720,7 +1748,7 @@ class DisplayMappingResults(object):
                     else:
                         PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases)
                     PHENOGEN_TITLE = "Click to view this section of the genome in PhenoGen"
-                    gifmap.areas.append(
+                    gifmap.append(
                         HtmlGenWrapper.create_area_tag(
                             shape='rect',
                             coords=PHENOGEN_COORDS,
@@ -1732,7 +1760,7 @@ class DisplayMappingResults(object):
                         outline=self.CLICKABLE_PHENOGEN_REGION_COLOR,
                         fill=self.CLICKABLE_PHENOGEN_REGION_COLOR)
                     im_drawer.line(
-                        xy=((xBrowse1, phenogenPaddingTop),( xBrowse1, (phenogenPaddingTop+self.BAND_HEIGHT))),
+                        xy=((xBrowse1, phenogenPaddingTop), ( xBrowse1, (phenogenPaddingTop+self.BAND_HEIGHT))),
                         fill=self.CLICKABLE_PHENOGEN_REGION_OUTLINE_COLOR)
 
                 UCSC_COORDS = "%d, %d, %d, %d" %(xBrowse1, ucscPaddingTop, xBrowse2, (ucscPaddingTop+self.BAND_HEIGHT))
@@ -1741,7 +1769,7 @@ class DisplayMappingResults(object):
                 else:
                     UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d" % (self._ucscDb, self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases)
                 UCSC_TITLE = "Click to view this section of the genome in the UCSC Genome Browser"
-                gifmap.areas.append(
+                gifmap.append(
                     HtmlGenWrapper.create_area_tag(
                         shape='rect',
                         coords=UCSC_COORDS,
@@ -1763,7 +1791,7 @@ class DisplayMappingResults(object):
                 else:
                     ENSEMBL_HREF = "http://www.ensembl.org/Rattus_norvegicus/contigview?chr=%s&start=%d&end=%d" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases)
                 ENSEMBL_TITLE = "Click to view this section of the genome in the Ensembl Genome Browser"
-                gifmap.areas.append(HtmlGenWrapper.create_area_tag(
+                gifmap.append(HtmlGenWrapper.create_area_tag(
                     shape='rect',
                     coords=ENSEMBL_COORDS,
                     href=ENSEMBL_HREF,
@@ -1864,8 +1892,8 @@ class DisplayMappingResults(object):
                         continue
                     Xc = xLeftOffset + plotXScale*(_Mb - startMb)
                     if counter % NUM_MINOR_TICKS == 0: # Draw a MAJOR mark, not just a minor tick mark
-                        im_drawer.line(xy=((Xc,yZero),
-                                           (Xc,yZero+xMajorTickHeight)),
+                        im_drawer.line(xy=((Xc, yZero),
+                                           (Xc, yZero+xMajorTickHeight)),
                                        fill=xAxisTickMarkColor,
                                        width=X_MAJOR_TICK_THICKNESS) # Draw the MAJOR tick mark
                         labelStr = str(formatStr % _Mb) # What Mbase location to put on the label
@@ -1875,8 +1903,8 @@ class DisplayMappingResults(object):
                                        text=labelStr, font=MBLabelFont,
                                        fill=xAxisLabelColor)
                     else:
-                        im_drawer.line(xy=((Xc,yZero),
-                                          (Xc,yZero+xMinorTickHeight)),
+                        im_drawer.line(xy=((Xc, yZero),
+                                          (Xc, yZero+xMinorTickHeight)),
                                        fill=xAxisTickMarkColor,
                                        width=X_MINOR_TICK_THICKNESS) # Draw the MINOR tick mark
 
@@ -1909,7 +1937,7 @@ class DisplayMappingResults(object):
                 text="Megabases",
                 xy=(
                     xLeftOffset+(plotWidth-im_drawer.textsize(
-                        "Megabases",font=megabaseLabelFont)[0])/2,
+                        "Megabases", font=megabaseLabelFont)[0])/2,
                     strYLoc+MBLabelFont.font.height+10*(zoom%2)),
                 font=megabaseLabelFont, fill=BLACK)
             pass
@@ -1964,7 +1992,7 @@ class DisplayMappingResults(object):
             for j, ChrInfo in enumerate(ChrAInfo):
                 preLpos = -1
                 for i, item in enumerate(ChrInfo):
-                    Lname,Lpos = item
+                    Lname, Lpos = item
                     if Lpos != preLpos:
                         offsetA += stepA
                         differ = 1
@@ -1978,17 +2006,17 @@ class DisplayMappingResults(object):
                         Zorder = 0
                     if differ:
                         im_drawer.line(
-                            xy=((startPosX+Lpos,yZero),(xLeftOffset+offsetA,\
+                            xy=((startPosX+Lpos, yZero), (xLeftOffset+offsetA,\
                         yZero+25)),
                             fill=lineColor)
                         im_drawer.line(
-                            xy=((xLeftOffset+offsetA,yZero+25),(xLeftOffset+offsetA,\
+                            xy=((xLeftOffset+offsetA, yZero+25), (xLeftOffset+offsetA,\
                         yZero+40+Zorder*(LRectWidth+3))),
                             fill=lineColor)
                         rectColor = ORANGE
                     else:
                         im_drawer.line(
-                            xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)-3),(\
+                            xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)-3), (\
                         xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3))),
                             fill=lineColor)
                         rectColor = DEEPPINK
@@ -1996,7 +2024,7 @@ class DisplayMappingResults(object):
                         xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)),
                             (xLeftOffset+offsetA-LRectHeight,
                              yZero+40+Zorder*(LRectWidth+3)+LRectWidth)),
-                        outline=rectColor,fill=rectColor,width = 0)
+                        outline=rectColor, fill=rectColor, width = 0)
                     COORDS="%d,%d,%d,%d"%(xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\
                             xLeftOffset+offsetA,yZero+40+Zorder*(LRectWidth+3)+LRectWidth)
                     HREF = "/show_trait?trait_id=%s&dataset=%s" % (Lname, self.dataset.group.name+"Geno")
@@ -2007,11 +2035,11 @@ class DisplayMappingResults(object):
                         href=HREF,
                         target="_blank",
                         title="Locus : {}".format(Lname))
-                    gifmap.areas.append(Areas)
+                    gifmap.append(Areas)
                 ##piddle bug
                 if j == 0:
                     im_drawer.line(
-                        xy=((startPosX,yZero),(startPosX,yZero+40)),
+                        xy=((startPosX, yZero), (startPosX, yZero+40)),
                         fill=lineColor)
                 startPosX += (self.ChrLengthDistList[j]+self.GraphInterval)*plotXScale
 
@@ -2023,7 +2051,7 @@ class DisplayMappingResults(object):
                     strYLoc + MBLabelFont.font.height+ 10*(zoom%2)),
                 font=centimorganLabelFont, fill=BLACK)
 
-        im_drawer.line(xy=((xLeftOffset,yZero), (xLeftOffset+plotWidth,yZero)),
+        im_drawer.line(xy=((xLeftOffset, yZero), (xLeftOffset+plotWidth, yZero)),
                        fill=BLACK, width=X_AXIS_THICKNESS) # Draw the X axis itself
 
 
@@ -2125,7 +2153,7 @@ class DisplayMappingResults(object):
         #ZS: Convert to int if all axis values are whole numbers
         all_int = True
         for item in LRSAxisList:
-            if item.is_integer():
+            if isinstance(item, int):
                 continue
             else:
                 all_int = False
@@ -2163,7 +2191,7 @@ class DisplayMappingResults(object):
                 LRS_LOD_Max = 0.000001
             yTopOffset + 30*(zoom - 1)
             yLRS = yZero - (item/LRS_LOD_Max) * LRSHeightThresh
-            im_drawer.line(xy=((xLeftOffset,yLRS), (xLeftOffset-4,yLRS)),
+            im_drawer.line(xy=((xLeftOffset, yLRS), (xLeftOffset-4, yLRS)),
                            fill=self.LRS_COLOR, width=1*zoom)
             if all_int:
                 scaleStr = "%d" % item
@@ -2219,8 +2247,8 @@ class DisplayMappingResults(object):
                     shape='rect',
                     coords=sig_coords,
                     title=sig_title)
-                gifmap.areas.append(Areas1)
-                gifmap.areas.append(Areas2)
+                gifmap.append(Areas1)
+                gifmap.append(Areas2)
 
                 start_pos_x +=  (chr_length_dist+self.GraphInterval)*plotXScale
                 return start_pos_x
@@ -2239,7 +2267,7 @@ class DisplayMappingResults(object):
             lrsEdgeWidth = 1
         else:
             if self.additiveChecked:
-                additiveMax = max(map(lambda X : abs(X['additive']), self.qtlresults))
+                additiveMax = max([abs(X['additive']) for X in self.qtlresults])
             lrsEdgeWidth = 3
 
         if zoom == 2:
@@ -2406,7 +2434,7 @@ class DisplayMappingResults(object):
                     im_drawer.text(
                         text="5",
                         xy=(
-                            Xc-im_drawer.textsize("5",font=symbolFont)[0]/2+1,
+                            Xc-im_drawer.textsize("5", font=symbolFont)[0]/2+1,
                             Yc-4),
                         fill=point_color, font=symbolFont)
                 else:
@@ -2473,8 +2501,8 @@ class DisplayMappingResults(object):
                             )
                         else:
                             im_drawer.line(
-                                xy=((Xc0,yZero-(Yc0-yZero)),
-                                    (Xc,yZero-(Yc-yZero))),
+                                xy=((Xc0, yZero-(Yc0-yZero)),
+                                    (Xc, yZero-(Yc-yZero))),
                                 fill=minusColor, width=lineWidth
                                 #, clipX=(xLeftOffset, xLeftOffset + plotWidth)
                             )
@@ -2561,8 +2589,8 @@ class DisplayMappingResults(object):
 
         ###draw additive scale
         if not self.multipleInterval and self.additiveChecked:
-            additiveScaleFont=ImageFont.truetype(font=VERDANA_FILE,size=16*zoom)
-            additiveScale = Plot.detScaleOld(0,additiveMax)
+            additiveScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=16*zoom)
+            additiveScale = Plot.detScaleOld(0, additiveMax)
             additiveStep = (additiveScale[1]-additiveScale[0])/additiveScale[2]
             additiveAxisList = Plot.frange(0, additiveScale[1], additiveStep)
             addPlotScale = AdditiveHeightThresh/additiveMax
@@ -2572,18 +2600,18 @@ class DisplayMappingResults(object):
             for item in additiveAxisList:
                 additiveY = yZero - item*addPlotScale
                 im_drawer.line(
-                    xy=((xLeftOffset + plotWidth,additiveY),
-                        (xLeftOffset+4+ plotWidth,additiveY)),
+                    xy=((xLeftOffset + plotWidth, additiveY),
+                        (xLeftOffset+4+ plotWidth, additiveY)),
                     fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom)
                 scaleStr = "%2.3f" % item
                 im_drawer.text(
                     text=scaleStr,
-                    xy=(xLeftOffset + plotWidth +6,additiveY+TEXT_Y_DISPLACEMENT),
-                    font=additiveScaleFont,fill=self.ADDITIVE_COLOR_POSITIVE)
+                    xy=(xLeftOffset + plotWidth +6, additiveY+TEXT_Y_DISPLACEMENT),
+                    font=additiveScaleFont, fill=self.ADDITIVE_COLOR_POSITIVE)
 
             im_drawer.line(
-                xy=((xLeftOffset+plotWidth,additiveY),
-                    (xLeftOffset+plotWidth,yZero)),
+                xy=((xLeftOffset+plotWidth, additiveY),
+                    (xLeftOffset+plotWidth, yZero)),
                 fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom)
 
         im_drawer.line(
@@ -2643,7 +2671,7 @@ class DisplayMappingResults(object):
                 chrFontZoom = 2
             else:
                 chrFontZoom = 1
-            chrLabelFont=ImageFont.truetype(font=VERDANA_FILE,size=24*chrFontZoom)
+            chrLabelFont=ImageFont.truetype(font=VERDANA_FILE, size=24*chrFontZoom)
 
             for i, _chr in enumerate(self.genotype):
                 if (i % 2 == 0):
@@ -2665,16 +2693,16 @@ class DisplayMappingResults(object):
                 TEXT_Y_DISPLACEMENT = 0
                 im_drawer.text(xy=(chrStartPix, yTopOffset + TEXT_Y_DISPLACEMENT),
                                text=_chr.name, font=chrLabelFont, fill=BLACK)
-                COORDS = "%d,%d,%d,%d" %(chrStartPix, yTopOffset, chrEndPix,yTopOffset +20)
+                COORDS = "%d,%d,%d,%d" %(chrStartPix, yTopOffset, chrEndPix, yTopOffset +20)
 
                 #add by NL 09-03-2010
-                HREF = "javascript:chrView(%d,%s);" % (i,self.ChrLengthMbList)
+                HREF = "javascript:chrView(%d,%s);" % (i, self.ChrLengthMbList)
                 #HREF = "javascript:changeView(%d,%s);" % (i,self.ChrLengthMbList)
                 Areas = HtmlGenWrapper.create_area_tag(
                     shape='rect',
                     coords=COORDS,
                     href=HREF)
-                gifmap.areas.append(Areas)
+                gifmap.append(Areas)
                 startPosX +=  (self.ChrLengthDistList[i]+self.GraphInterval)*plotXScale
 
         return plotXScale
@@ -2760,7 +2788,7 @@ class DisplayMappingResults(object):
 
                 this_row = [] #container for the cells of each row
                 selectCheck = HtmlGenWrapper.create_input_tag(
-                    type="checkbox",
+                    type_="checkbox",
                     name="selectCheck",
                     value=theGO["GeneSymbol"],
                     Class="checkbox trait_checkbox")  # checkbox for each row
@@ -2774,9 +2802,17 @@ class DisplayMappingResults(object):
                     geneIdString = 'http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=gene&cmd=Retrieve&dopt=Graphics&list_uids=%s' % theGO["GeneID"]
 
                     if theGO["snpCount"]:
-                        snpString = HT.Href(url="http://genenetwork.org/webqtl/main.py?FormID=snpBrowser&chr=%s&start=%s&end=%s&geneName=%s&s1=%d&s2=%d" % (theGO["Chromosome"],
-                                theGO["TxStart"], theGO["TxEnd"], theGO["GeneSymbol"], self.diffCol[0], self.diffCol[1]),
-                                text=theGO["snpCount"], target="_blank", Class="normalsize")
+                        snpString = HT.Link(
+                            (f"http://genenetwork.org/webqtl/main.py?FormID=snpBrowser&"
+                             f"chr={theGO['Chromosome']}&"
+                             f"start={theGO['TxStart']}&"
+                             f"end={theGO['TxEnd']}&"
+                             f"geneName={theGO['GeneSymbol']}&"
+                             f"s1={self.diffCol[0]}&s2=%d"),
+                            theGO["snpCount"] # The text to display
+                        )
+                        snpString.set_blank_target()
+                        snpString.set_attribute("class", "normalsize")
                     else:
                         snpString = 0
 
@@ -2817,7 +2853,7 @@ class DisplayMappingResults(object):
                     else:
                         chr_as_int = int(theGO["Chromosome"]) - 1
                     if refGene:
-                        literatureCorrelationString = str(self.getLiteratureCorrelation(self.cursor,refGene,theGO['GeneID']) or "N/A")
+                        literatureCorrelationString = str(self.getLiteratureCorrelation(self.cursor, refGene, theGO['GeneID']) or "N/A")
 
                         this_row = [selectCheck.__str__(),
                                     str(tableIterationsCnt),
@@ -2879,13 +2915,10 @@ class DisplayMappingResults(object):
             for gIndex, theGO in enumerate(geneCol):
                 this_row = []  # container for the cells of each row
                 selectCheck = str(HtmlGenWrapper.create_input_tag(
-                    type="checkbox",
+                    type_="checkbox",
                     name="selectCheck",
                     Class="checkbox trait_checkbox"))  # checkbox for each row
 
-                #ZS: May want to get this working again later
-                #webqtlSearch = HT.Href(os.path.join(webqtlConfig.CGIDIR, webqtlConfig.SCRIPTFILE)+"?cmd=sch&gene=%s&alias=1&species=rat" % theGO["GeneSymbol"], ">>", target="_blank").__str__()
-
                 if theGO["GeneID"] != "":
                     geneSymbolNCBI = str(HtmlGenWrapper.create_link_tag(
                         "http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=gene&cmd=Retrieve&dopt=Graphics&list_uids={}".format(theGO["GeneID"]),
@@ -2955,8 +2988,8 @@ class DisplayMappingResults(object):
         lCorr = None
         try:
             query = 'SELECT Value FROM LCorrRamin3 WHERE GeneId1 = %s and GeneId2 = %s'
-            for x,y in [(geneId1,geneId2),(geneId2,geneId1)]:
-                cursor.execute(query,(x,y))
+            for x, y in [(geneId1, geneId2), (geneId2, geneId1)]:
+                cursor.execute(query, (x, y))
                 lCorr =  cursor.fetchone()
                 if lCorr:
                     lCorr = lCorr[0]
diff --git a/wqflask/wqflask/marker_regression/gemma_mapping.py b/wqflask/wqflask/marker_regression/gemma_mapping.py
index b858b573..68a8d5ba 100644
--- a/wqflask/wqflask/marker_regression/gemma_mapping.py
+++ b/wqflask/wqflask/marker_regression/gemma_mapping.py
@@ -218,9 +218,9 @@ def parse_loco_output(this_dataset, gwa_output_filename):
                             marker['chr'] = int(line.split("\t")[0][3:])
                         else:
                             marker['chr'] = int(line.split("\t")[0])
-                        if marker['chr'] > previous_chr:
+                        if marker['chr'] > int(previous_chr):
                             previous_chr = marker['chr']
-                        elif marker['chr'] < previous_chr:
+                        elif marker['chr'] < int(previous_chr):
                             break
                     else:
                         marker['chr'] = line.split("\t")[0]
diff --git a/wqflask/wqflask/marker_regression/plink_mapping.py b/wqflask/wqflask/marker_regression/plink_mapping.py
index 2f327faf..6c38c34f 100644
--- a/wqflask/wqflask/marker_regression/plink_mapping.py
+++ b/wqflask/wqflask/marker_regression/plink_mapping.py
@@ -54,7 +54,7 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''):
     for i, sample in enumerate(ped_sample_list):
         try:
             value = vals[i]
-            value = str(value).replace('value=','')
+            value = str(value).replace('value=', '')
             value = value.strip()
         except:
             value = -9999
@@ -78,13 +78,13 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''):
 
 # get strain name from ped file in order
 def get_samples_from_ped_file(dataset):
-    ped_file= open("{}{}.ped".format(flat_files('mapping'), dataset.group.name),"r")
+    ped_file= open("{}{}.ped".format(flat_files('mapping'), dataset.group.name), "r")
     line = ped_file.readline()
     sample_list=[]
 
     while line:
-        lineList = string.split(string.strip(line), '\t')
-        lineList = map(string.strip, lineList)
+        lineList = line.strip().split('\t')
+        lineList = list(map(string.strip, lineList))
 
         sample_name = lineList[0]
         sample_list.append(sample_name)
@@ -111,7 +111,7 @@ def parse_plink_output(output_filename, species):
         line_list = build_line_list(line=line)
 
         # only keep the records whose chromosome name is in db
-        if species.chromosomes.chromosomes.has_key(int(line_list[0])) and line_list[-1] and line_list[-1].strip()!='NA':
+        if int(line_list[0]) in species.chromosomes.chromosomes and line_list[-1] and line_list[-1].strip()!='NA':
 
             chr_name = species.chromosomes.chromosomes[int(line_list[0])]
             snp = line_list[1]
@@ -121,7 +121,7 @@ def parse_plink_output(output_filename, species):
                 if p_value < threshold_p_value:
                     p_value_dict[snp] = float(p_value)
 
-            if plink_results.has_key(chr_name):
+            if chr_name in plink_results:
                 value_list = plink_results[chr_name]
 
                 # pvalue range is [0,1]
@@ -155,8 +155,8 @@ def parse_plink_output(output_filename, species):
 # output: lineList list
 #######################################################
 def build_line_list(line=None):
-    line_list = string.split(string.strip(line),' ')# irregular number of whitespaces between columns
-    line_list = [item for item in line_list if item <>'']
-    line_list = map(string.strip, line_list)
+    line_list = line.strip().split(' ')# irregular number of whitespaces between columns
+    line_list = [item for item in line_list if item !='']
+    line_list = list(map(string.strip, line_list))
 
-    return line_list
\ No newline at end of file
+    return line_list
diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
index 6b4c05ea..78b1f7b0 100644
--- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
+++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
@@ -252,4 +252,4 @@ def natural_sort(marker_list):
     """
     convert = lambda text: int(text) if text.isdigit() else text.lower()
     alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', str(marker_list[key]['chr'])) ]
-    return sorted(range(len(marker_list)), key = alphanum_key)
\ No newline at end of file
+    return sorted(list(range(len(marker_list))), key = alphanum_key)
\ No newline at end of file
diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py
index c5590a85..0a5758af 100644
--- a/wqflask/wqflask/marker_regression/rqtl_mapping.py
+++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py
@@ -42,7 +42,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec
     png           = ro.r["png"]                     # Map the png function
     dev_off       = ro.r["dev.off"]                 # Map the device off function
 
-    print(r_library("qtl"))                         # Load R/qtl
+    print((r_library("qtl")))                         # Load R/qtl
 
     logger.info("QTL library loaded");
 
diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py
index 39820c8c..fa61272f 100644
--- a/wqflask/wqflask/marker_regression/run_mapping.py
+++ b/wqflask/wqflask/marker_regression/run_mapping.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
 from base.trait import GeneralTrait
 from base import data_set  #import create_dataset
 
@@ -18,7 +16,7 @@ import uuid
 import rpy2.robjects as ro
 import numpy as np
 
-import cPickle as pickle
+import pickle as pickle
 import itertools
 
 import simplejson as json
@@ -347,7 +345,7 @@ class RunMapping(object):
                   if marker['chr1'] > 0 or marker['chr1'] == "X" or marker['chr1'] == "X/Y":
                       if marker['chr1'] > highest_chr or marker['chr1'] == "X" or marker['chr1'] == "X/Y":
                           highest_chr = marker['chr1']
-                      if 'lod_score' in marker.keys():
+                      if 'lod_score' in list(marker.keys()):
                           self.qtl_results.append(marker)
 
               self.trimmed_markers = results
@@ -411,8 +409,8 @@ class RunMapping(object):
 
                   self.results_for_browser.append(browser_marker)
                   self.annotations_for_browser.append(annot_marker)
-                  if marker['chr'] > 0 or marker['chr'] == "X" or marker['chr'] == "X/Y":
-                      if marker['chr'] > highest_chr or marker['chr'] == "X" or marker['chr'] == "X/Y":
+                  if str(marker['chr']) > '0' or str(marker['chr']) == "X" or str(marker['chr']) == "X/Y":
+                      if str(marker['chr']) > str(highest_chr) or str(marker['chr']) == "X" or str(marker['chr']) == "X/Y":
                           highest_chr = marker['chr']
                       if ('lod_score' in marker.keys()) or ('lrs_value' in marker.keys()):
                           if 'Mb' in marker.keys():
@@ -547,9 +545,9 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale,
             output_file.write("Mb," + score_type)
         if 'cM' in markers[0]:
             output_file.write("Cm," + score_type)
-        if "additive" in markers[0].keys():
+        if "additive" in list(markers[0].keys()):
             output_file.write(",Additive")
-        if "dominance" in markers[0].keys():
+        if "dominance" in list(markers[0].keys()):
             output_file.write(",Dominance")
         output_file.write("\n")
         for i, marker in enumerate(markers):
@@ -562,17 +560,17 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale,
                 output_file.write(str(marker['lod_score']))
             else:
                 output_file.write(str(marker['lrs_value']))
-            if "additive" in marker.keys():
+            if "additive" in list(marker.keys()):
                 output_file.write("," + str(marker['additive']))
-            if "dominance" in marker.keys():
+            if "dominance" in list(marker.keys()):
                 output_file.write("," + str(marker['dominance']))
             if i < (len(markers) - 1):
                 output_file.write("\n")
 
 def trim_markers_for_figure(markers):
-    if 'p_wald' in markers[0].keys():
+    if 'p_wald' in list(markers[0].keys()):
         score_type = 'p_wald'
-    elif 'lod_score' in markers[0].keys():
+    elif 'lod_score' in list(markers[0].keys()):
         score_type = 'lod_score'
     else:
         score_type = 'lrs_value'
@@ -630,7 +628,7 @@ def trim_markers_for_figure(markers):
     return filtered_markers
 
 def trim_markers_for_table(markers):
-    if 'lod_score' in markers[0].keys():
+    if 'lod_score' in list(markers[0].keys()):
         sorted_markers = sorted(markers, key=lambda k: k['lod_score'], reverse=True)
     else:
         sorted_markers = sorted(markers, key=lambda k: k['lrs_value'], reverse=True)
@@ -714,10 +712,10 @@ def get_genofile_samplelist(dataset):
 def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples):
     perm_strata_strings = []
     for sample in used_samples:
-        if sample in sample_list.sample_attribute_values.keys():
+        if sample in list(sample_list.sample_attribute_values.keys()):
             combined_string = ""
             for var in categorical_vars:
-                if var in sample_list.sample_attribute_values[sample].keys():
+                if var in list(sample_list.sample_attribute_values[sample].keys()):
                     combined_string += str(sample_list.sample_attribute_values[sample][var])
                 else:
                     combined_string += "NA"
@@ -726,8 +724,8 @@ def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples):
 
         perm_strata_strings.append(combined_string)
 
-    d = dict([(y,x+1) for x,y in enumerate(sorted(set(perm_strata_strings)))])
+    d = dict([(y, x+1) for x, y in enumerate(sorted(set(perm_strata_strings)))])
     list_to_numbers = [d[x] for x in perm_strata_strings]
     perm_strata = list_to_numbers
 
-    return perm_strata
\ No newline at end of file
+    return perm_strata
diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py
index 38117a8e..772f74e4 100644
--- a/wqflask/wqflask/model.py
+++ b/wqflask/wqflask/model.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
 import uuid
 import datetime
 
@@ -18,7 +16,7 @@ from wqflask.database import Base, init_db
 
 class User(Base):
     __tablename__ = "user"
-    id = Column(Unicode(36), primary_key=True, default=lambda: unicode(uuid.uuid4()))
+    id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4()))
     email_address = Column(Unicode(50), unique=True, nullable=False)
 
     # Todo: Turn on strict mode for Mysql
@@ -120,7 +118,7 @@ class User(Base):
 
 class Login(Base):
     __tablename__ = "login"
-    id = Column(Unicode(36), primary_key=True, default=lambda: unicode(uuid.uuid4()))
+    id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4()))
     user = Column(Unicode(36), ForeignKey('user.id'))
     timestamp = Column(DateTime(), default=lambda: datetime.datetime.utcnow())
     ip_address = Column(Unicode(39))
@@ -138,7 +136,7 @@ class Login(Base):
 
 class UserCollection(Base):
     __tablename__ = "user_collection"
-    id = Column(Unicode(36), primary_key=True, default=lambda: unicode(uuid.uuid4()))
+    id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4()))
     user = Column(Unicode(36), ForeignKey('user.id'))
 
     # I'd prefer this to not have a length, but for the index below it needs one
@@ -168,4 +166,4 @@ def display_collapsible(number):
 
 def user_uuid():
     """Unique cookie for a user"""
-    user_uuid = request.cookies.get('user_uuid')
\ No newline at end of file
+    user_uuid = request.cookies.get('user_uuid')
diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py
index f61c40b4..723a749f 100644
--- a/wqflask/wqflask/network_graph/network_graph.py
+++ b/wqflask/wqflask/network_graph/network_graph.py
@@ -1,4 +1,4 @@
-## Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
+# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
 #
 # This program is free software: you can redistribute it and/or modify it
 # under the terms of the GNU Affero General Public License
@@ -18,50 +18,21 @@
 #
 # This module is used by GeneNetwork project (www.genenetwork.org)
 
-from __future__ import absolute_import, print_function, division
-
-import sys
-
-import string
-import cPickle
-import os
-import time
-import pp
-import math
-import collections
-import resource
-
 import scipy
-
 import simplejson as json
 
-from rpy2.robjects.packages import importr
-import rpy2.robjects as robjects
-
-from pprint import pformat as pf
-
-from utility.THCell import THCell
-from utility.TDCell import TDCell
 from base.trait import create_trait
 from base import data_set
-from utility import webqtlUtil, helper_functions, corr_result_helpers
+from utility import helper_functions
+from utility import corr_result_helpers
 from utility.tools import GN2_BRANCH_URL
-from db import webqtlDatabaseFunction
-import utility.webqtlUtil #this is for parallel computing only.
-from wqflask.correlation import correlation_functions
-from utility.benchmark import Bench
-
-from MySQLdb import escape_string as escape
-
-from pprint import pformat as pf
-
-from flask import Flask, g
 
 
 class NetworkGraph(object):
 
     def __init__(self, start_vars):
-        trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')]
+        trait_db_list = [trait.strip()
+                         for trait in start_vars['trait_list'].split(',')]
 
         helper_functions.get_trait_db_obs(self, trait_db_list)
 
@@ -89,7 +60,8 @@ class NetworkGraph(object):
                     this_trait_vals.append('')
             self.sample_data.append(this_trait_vals)
 
-        self.lowest_overlap = 8 #ZS: Variable set to the lowest overlapping samples in order to notify user, or 8, whichever is lower (since 8 is when we want to display warning)
+        # ZS: Variable set to the lowest overlapping samples in order to notify user, or 8, whichever is lower (since 8 is when we want to display warning)
+        self.lowest_overlap = 8
 
         self.nodes_list = []
         self.edges_list = []
@@ -101,9 +73,9 @@ class NetworkGraph(object):
             this_sample_data = this_trait.data
 
             corr_result_row = []
-            is_spearman = False #ZS: To determine if it's above or below the diagonal
+            is_spearman = False  # ZS: To determine if it's above or below the diagonal
 
-            max_corr = 0 #ZS: Used to determine whether node should be hidden when correlation coefficient slider is used
+            max_corr = 0  # ZS: Used to determine whether node should be hidden when correlation coefficient slider is used
 
             for target in self.trait_list:
                 target_trait = target[0]
@@ -122,20 +94,23 @@ class NetworkGraph(object):
                         this_trait_vals.append(sample_value)
                         target_vals.append(target_sample_value)
 
-                this_trait_vals, target_vals, num_overlap = corr_result_helpers.normalize_values(this_trait_vals, target_vals)
+                this_trait_vals, target_vals, num_overlap = corr_result_helpers.normalize_values(
+                    this_trait_vals, target_vals)
 
                 if num_overlap < self.lowest_overlap:
                     self.lowest_overlap = num_overlap
                 if num_overlap == 0:
                     continue
                 else:
-                    pearson_r, pearson_p = scipy.stats.pearsonr(this_trait_vals, target_vals)
+                    pearson_r, pearson_p = scipy.stats.pearsonr(
+                        this_trait_vals, target_vals)
                     if is_spearman == False:
                         sample_r, sample_p = pearson_r, pearson_p
                         if sample_r == 1:
                             continue
                     else:
-                        sample_r, sample_p = scipy.stats.spearmanr(this_trait_vals, target_vals)
+                        sample_r, sample_p = scipy.stats.spearmanr(
+                            this_trait_vals, target_vals)
 
                     if -1 <= sample_r < -0.7:
                         color = "#0000ff"
@@ -153,44 +128,44 @@ class NetworkGraph(object):
                         color = "#ffa500"
                         width = 2
                     elif 0.7 <= sample_r <= 1:
-                        color = "#ff0000"  
-                        width = 3 
+                        color = "#ff0000"
+                        width = 3
                     else:
                         color = "#000000"
-                        width = 0                      
+                        width = 0
 
                     if abs(sample_r) > max_corr:
                         max_corr = abs(sample_r)
 
-                    edge_data = {'id' : str(this_trait.name) + '_to_' + str(target_trait.name),
-                                 'source' : str(this_trait.name) + ":" + str(this_trait.dataset.name),
-                                 'target' : str(target_trait.name) + ":" + str(target_trait.dataset.name),
-                                 'correlation' : round(sample_r, 3),
-                                 'abs_corr' : abs(round(sample_r, 3)),
-                                 'p_value' : round(sample_p, 3),
-                                 'overlap' : num_overlap,
-                                 'color' : color,
-                                 'width' : width }
+                    edge_data = {'id': str(this_trait.name) + '_to_' + str(target_trait.name),
+                                 'source': str(this_trait.name) + ":" + str(this_trait.dataset.name),
+                                 'target': str(target_trait.name) + ":" + str(target_trait.dataset.name),
+                                 'correlation': round(sample_r, 3),
+                                 'abs_corr': abs(round(sample_r, 3)),
+                                 'p_value': round(sample_p, 3),
+                                 'overlap': num_overlap,
+                                 'color': color,
+                                 'width': width}
 
-                    edge_dict = { 'data' : edge_data }
+                    edge_dict = {'data': edge_data}
 
                     self.edges_list.append(edge_dict)
 
             if trait_db[1].type == "ProbeSet":
-                node_dict = { 'data' : {'id' : str(this_trait.name) + ":" + str(this_trait.dataset.name), 
-                                        'label' : this_trait.symbol,
-                                        'symbol' : this_trait.symbol,
-                                        'geneid' : this_trait.geneid,
-                                        'omim' : this_trait.omim,
-                                        'max_corr' : max_corr } }
+                node_dict = {'data': {'id': str(this_trait.name) + ":" + str(this_trait.dataset.name),
+                                      'label': this_trait.symbol,
+                                      'symbol': this_trait.symbol,
+                                      'geneid': this_trait.geneid,
+                                      'omim': this_trait.omim,
+                                      'max_corr': max_corr}}
             elif trait_db[1].type == "Publish":
-                node_dict = { 'data' : {'id' : str(this_trait.name) + ":" + str(this_trait.dataset.name), 
-                                        'label' : this_trait.name,
-                                        'max_corr' : max_corr } }
+                node_dict = {'data': {'id': str(this_trait.name) + ":" + str(this_trait.dataset.name),
+                                      'label': this_trait.name,
+                                      'max_corr': max_corr}}
             else:
-                node_dict = { 'data' : {'id' : str(this_trait.name) + ":" + str(this_trait.dataset.name), 
-                                        'label' : this_trait.name,
-                                        'max_corr' : max_corr } }
+                node_dict = {'data': {'id': str(this_trait.name) + ":" + str(this_trait.dataset.name),
+                                      'label': this_trait.name,
+                                      'max_corr': max_corr}}
             self.nodes_list.append(node_dict)
 
         self.elements = json.dumps(self.nodes_list + self.edges_list)
@@ -200,13 +175,13 @@ class NetworkGraph(object):
         for sample in self.all_sample_list:
             groups.append(1)
 
-        self.js_data = dict(traits = [trait.name for trait in self.traits],
-                            groups = groups,
-                            cols = range(len(self.traits)),
-                            rows = range(len(self.traits)),
-                            samples = self.all_sample_list,
-                            sample_data = self.sample_data,
-                            elements = self.elements,)
+        self.js_data = dict(traits=[trait.name for trait in self.traits],
+                            groups=groups,
+                            cols=list(range(len(self.traits))),
+                            rows=list(range(len(self.traits))),
+                            samples=self.all_sample_list,
+                            sample_data=self.sample_data,
+                            elements=self.elements,)
 
     def get_trait_db_obs(self, trait_db_list):
         self.trait_list = []
@@ -216,6 +191,6 @@ class NetworkGraph(object):
             trait_name, dataset_name = trait_db.split(":")
             dataset_ob = data_set.create_dataset(dataset_name)
             trait_ob = create_trait(dataset=dataset_ob,
-                                   name=trait_name,
-                                   cellid=None)
-            self.trait_list.append((trait_ob, dataset_ob))
\ No newline at end of file
+                                    name=trait_name,
+                                    cellid=None)
+            self.trait_list.append((trait_ob, dataset_ob))
diff --git a/wqflask/wqflask/news.py b/wqflask/wqflask/news.py
index 8bc6b889..0675ec4b 100644
--- a/wqflask/wqflask/news.py
+++ b/wqflask/wqflask/news.py
@@ -1,7 +1,3 @@
-from __future__ import absolute_import, print_function, division
-import sys
-reload(sys)
-sys.setdefaultencoding('utf8')
 from flask import g
 
 class News(object):
diff --git a/wqflask/wqflask/parser.py b/wqflask/wqflask/parser.py
index 1ca5ecff..76fae54b 100644
--- a/wqflask/wqflask/parser.py
+++ b/wqflask/wqflask/parser.py
@@ -17,8 +17,6 @@ be acceptable.]
 
 """
 
-from __future__ import print_function, division
-
 import re
 
 from pprint import pformat as pf
@@ -78,22 +76,6 @@ def parse(pstring):
     logger.debug("* items are:", pf(items) + "\n")
     return(items)
 
-    #def encregexp(self,str):
-    #    if not str:
-    #        return []
-    #    else:
-    #        wildcardkeyword = str.strip()
-    #        wildcardkeyword = string.replace(wildcardkeyword,',',' ')
-    #        wildcardkeyword = string.replace(wildcardkeyword,';',' ')
-    #        wildcardkeyword = wildcardkeyword.split()
-    #    NNN = len(wildcardkeyword)
-    #    for i in range(NNN):
-    #        keyword = wildcardkeyword[i]
-    #        keyword = string.replace(keyword,"*",".*")
-    #        keyword = string.replace(keyword,"?",".")
-    #        wildcardkeyword[i] = keyword#'[[:<:]]'+ keyword+'[[:>:]]'
-    #    return wildcardkeyword
-
 
 if __name__ == '__main__':
     parse("foo=[3 2 1]")
diff --git a/wqflask/wqflask/pbkdf2.py b/wqflask/wqflask/pbkdf2.py
index f7f61a09..aea5b06c 100644
--- a/wqflask/wqflask/pbkdf2.py
+++ b/wqflask/wqflask/pbkdf2.py
@@ -1,140 +1,20 @@
-# -*- coding: utf-8 -*-
-"""
-    pbkdf2
-    ~~~~~~
-
-    This module implements pbkdf2 for Python.  It also has some basic
-    tests that ensure that it works.  The implementation is straightforward
-    and uses stdlib only stuff and can be easily be copy/pasted into
-    your favourite application.
-
-    Use this as replacement for bcrypt that does not need a c implementation
-    of a modified blowfish crypto algo.
-
-    Example usage:
-
-    >>> pbkdf2_hex('what i want to hash', 'the random salt')
-    'fa7cc8a2b0a932f8e6ea42f9787e9d36e592e0c222ada6a9'
-
-    How to use this:
-
-    1.  Use a constant time string compare function to compare the stored hash
-        with the one you're generating::
-
-            def safe_str_cmp(a, b):
-                if len(a) != len(b):
-                    return False
-                rv = 0
-                for x, y in izip(a, b):
-                    rv |= ord(x) ^ ord(y)
-                return rv == 0
-
-    2.  Use `os.urandom` to generate a proper salt of at least 8 byte.
-        Use a unique salt per hashed password.
-
-    3.  Store ``algorithm$salt:costfactor$hash`` in the database so that
-        you can upgrade later easily to a different algorithm if you need
-        one.  For instance ``PBKDF2-256$thesalt:10000$deadbeef...``.
-
-
-    :copyright: (c) Copyright 2011 by Armin Ronacher.
-    :license: BSD, see LICENSE for more details.
-"""
-import hmac
 import hashlib
-from struct import Struct
-from operator import xor
-from itertools import izip, starmap
 
+from werkzeug.security import safe_str_cmp as ssc
 
-_pack_int = Struct('>I').pack
 
-
-def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc=None):
-    """Like :func:`pbkdf2_bin` but returns a hex encoded string."""
-    return pbkdf2_bin(data, salt, iterations, keylen, hashfunc).encode('hex')
-
-
-def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None):
-    """Returns a binary digest for the PBKDF2 hash algorithm of `data`
-    with the given `salt`.  It iterates `iterations` time and produces a
-    key of `keylen` bytes.  By default SHA-1 is used as hash function,
-    a different hashlib `hashfunc` can be provided.
+# Replace this because it just wraps around Python3's internal
+# functions. Added this during migration.
+def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc="sha1"):
+    """Wrapper function of python's hashlib.pbkdf2_hmac.
     """
-    hashfunc = hashfunc or hashlib.sha1
-    mac = hmac.new(data, None, hashfunc)
-    def _pseudorandom(x, mac=mac):
-        h = mac.copy()
-        h.update(x)
-        return map(ord, h.digest())
-    buf = []
-    for block in xrange(1, -(-keylen // mac.digest_size) + 1):
-        rv = u = _pseudorandom(salt + _pack_int(block))
-        for i in xrange(iterations - 1):
-            u = _pseudorandom(''.join(map(chr, u)))
-            rv = list(starmap(xor, izip(rv, u)))
-        buf.extend(rv)
-    return ''.join(map(chr, buf))[:keylen]
+    dk = hashlib.pbkdf2_hmac(hashfunc,
+                             bytes(data, "utf-8"),  # password
+                             bytes(salt, "utf-8"),  # salt
+                             iterations,
+                             keylen)
+    return dk.hex()
 
 
 def safe_str_cmp(a, b):
-    if len(a) != len(b):
-        return False
-    rv = 0
-    for x, y in izip(a, b):
-        rv |= ord(x) ^ ord(y)
-    return rv == 0
-
-
-
-def test():
-    failed = []
-    def check(data, salt, iterations, keylen, expected):
-        rv = pbkdf2_hex(data, salt, iterations, keylen)
-        if rv != expected:
-            print 'Test failed:'
-            print '  Expected:   %s' % expected
-            print '  Got:        %s' % rv
-            print '  Parameters:'
-            print '    data=%s' % data
-            print '    salt=%s' % salt
-            print '    iterations=%d' % iterations
-            print
-            failed.append(1)
-
-    # From RFC 6070
-    check('password', 'salt', 1, 20,
-          '0c60c80f961f0e71f3a9b524af6012062fe037a6')
-    check('password', 'salt', 2, 20,
-          'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957')
-    check('password', 'salt', 4096, 20,
-          '4b007901b765489abead49d926f721d065a429c1')
-    check('passwordPASSWORDpassword', 'saltSALTsaltSALTsaltSALTsaltSALTsalt',
-          4096, 25, '3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038')
-    check('pass\x00word', 'sa\x00lt', 4096, 16,
-          '56fa6aa75548099dcc37d7f03425e0c3')
-    # This one is from the RFC but it just takes for ages
-    ##check('password', 'salt', 16777216, 20,
-    ##      'eefe3d61cd4da4e4e9945b3d6ba2158c2634e984')
-
-    # From Crypt-PBKDF2
-    check('password', 'ATHENA.MIT.EDUraeburn', 1, 16,
-          'cdedb5281bb2f801565a1122b2563515')
-    check('password', 'ATHENA.MIT.EDUraeburn', 1, 32,
-          'cdedb5281bb2f801565a1122b25635150ad1f7a04bb9f3a333ecc0e2e1f70837')
-    check('password', 'ATHENA.MIT.EDUraeburn', 2, 16,
-          '01dbee7f4a9e243e988b62c73cda935d')
-    check('password', 'ATHENA.MIT.EDUraeburn', 2, 32,
-          '01dbee7f4a9e243e988b62c73cda935da05378b93244ec8f48a99e61ad799d86')
-    check('password', 'ATHENA.MIT.EDUraeburn', 1200, 32,
-          '5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddbc5e5142f708a31e2e62b1e13')
-    check('X' * 64, 'pass phrase equals block size', 1200, 32,
-          '139c30c0966bc32ba55fdbf212530ac9c5ec59f1a452f5cc9ad940fea0598ed1')
-    check('X' * 65, 'pass phrase exceeds block size', 1200, 32,
-          '9ccad6d468770cd51b10e6a68721be611a8b4d282601db3b36be9246915ec82a')
-
-    raise SystemExit(bool(failed))
-
-
-if __name__ == '__main__':
-    test()
+    return ssc(a, b)
diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py
index 14ff2183..85cbb2fd 100644
--- a/wqflask/wqflask/resource_manager.py
+++ b/wqflask/wqflask/resource_manager.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
 import json
 
 from flask import (Flask, g, render_template, url_for, request, make_response,
@@ -125,10 +123,10 @@ def add_group_to_resource():
 
 def get_group_names(group_masks):
     group_masks_with_names = {}
-    for group_id, group_mask in group_masks.iteritems():
+    for group_id, group_mask in list(group_masks.items()):
         this_mask = group_mask
         group_name = get_group_info(group_id)['name']
         this_mask['name'] = group_name
         group_masks_with_names[group_id] = this_mask
     
-    return group_masks_with_names
\ No newline at end of file
+    return group_masks_with_names
diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py
index f63a84d1..ce836ce2 100644
--- a/wqflask/wqflask/search_results.py
+++ b/wqflask/wqflask/search_results.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
 import re
 import uuid
 from math import *
@@ -53,7 +51,7 @@ views.py).
         search = self.search_terms
         self.original_search_string = self.search_terms
         # check for dodgy search terms
-        rx = re.compile(r'.*\W(href|http|sql|select|update)\W.*',re.IGNORECASE)
+        rx = re.compile(r'.*\W(href|http|sql|select|update)\W.*', re.IGNORECASE)
         if rx.match(search):
             logger.info("Regex failed search")
             self.search_term_exists = False
@@ -123,7 +121,7 @@ views.py).
                 trait_dict['hmac'] = hmac.data_hmac('{}:{}'.format(this_trait.name, this_trait.dataset.name))
                 if this_trait.dataset.type == "ProbeSet":
                     trait_dict['symbol'] = this_trait.symbol
-                    trait_dict['description'] = this_trait.description_display.decode('utf-8', 'replace')
+                    trait_dict['description'] = this_trait.description_display
                     trait_dict['location'] = this_trait.location_repr
                     trait_dict['mean'] = "N/A"
                     trait_dict['additive'] = "N/A"
@@ -151,6 +149,10 @@ views.py).
                     trait_dict['additive'] = "N/A"
                     if this_trait.additive != "":
                         trait_dict['additive'] = '%.3f' % this_trait.additive
+                # Convert any bytes in dict to a normal utf-8 string
+                for key in trait_dict.keys():
+                    if isinstance(trait_dict[key], bytes):
+                        trait_dict[key] = trait_dict[key].decode('utf-8')
                 trait_list.append(trait_dict)
 
         self.trait_list = json.dumps(trait_list)
@@ -272,7 +274,7 @@ def get_GO_symbols(a_search):
 def insert_newlines(string, every=64):
     """ This is because it is seemingly impossible to change the width of the description column, so I'm just manually adding line breaks """
     lines = []
-    for i in xrange(0, len(string), every):
+    for i in range(0, len(string), every):
         lines.append(string[i:i+every])
     return '\n'.join(lines)
 
diff --git a/wqflask/wqflask/send_mail.py b/wqflask/wqflask/send_mail.py
index bf5d0dd8..86e8a558 100644
--- a/wqflask/wqflask/send_mail.py
+++ b/wqflask/wqflask/send_mail.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 import datetime
 import time
 
diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py
index 356db7bc..e7676458 100644
--- a/wqflask/wqflask/show_trait/SampleList.py
+++ b/wqflask/wqflask/show_trait/SampleList.py
@@ -1,35 +1,28 @@
-from __future__ import absolute_import, print_function, division
-
+import re
 import itertools
 
-import numpy as np
-from flask import Flask, g
+from flask import g
+from base import webqtlCaseData
 from pprint import pformat as pf
-from scipy import stats
 
-from base import webqtlCaseData
-from base.trait import GeneralTrait
-from utility import logger
-from utility import webqtlUtil
 from utility import Plot
 from utility import Bunch
 
-logger = logger.getLogger(__name__ )
 
 class SampleList(object):
     def __init__(self,
                  dataset,
                  sample_names,
                  this_trait,
-                 sample_group_type = "primary",
-                 header = "Samples"):
+                 sample_group_type="primary",
+                 header="Samples"):
 
         self.dataset = dataset
         self.this_trait = this_trait
         self.sample_group_type = sample_group_type    # primary or other
         self.header = header
 
-        self.sample_list = [] # The actual list
+        self.sample_list = []  # The actual list
         self.sample_attribute_values = {}
 
         self.get_attributes()
@@ -40,28 +33,37 @@ class SampleList(object):
         for counter, sample_name in enumerate(sample_names, 1):
             sample_name = sample_name.replace("_2nd_", "")
 
-            if type(self.this_trait) is list: # ZS: self.this_trait will be a list if it is a Temp trait
-                if counter <= len(self.this_trait) and str(self.this_trait[counter-1]).upper() != 'X':
-                    sample = webqtlCaseData.webqtlCaseData(name=sample_name, value=float(self.this_trait[counter-1]))
+            # ZS: self.this_trait will be a list if it is a Temp trait
+            if isinstance(self.this_trait, list):
+                if (counter <= len(self.this_trait) and
+                        str(self.this_trait[counter-1]).upper() != 'X'):
+                    sample = webqtlCaseData.webqtlCaseData(
+                        name=sample_name,
+                        value=float(self.this_trait[counter-1]))
                 else:
                     sample = webqtlCaseData.webqtlCaseData(name=sample_name)
             else:
-                # ZS - If there's no value for the sample/strain, create the sample object (so samples with no value are still displayed in the table)
+                # ZS - If there's no value for the sample/strain,
+                # create the sample object (so samples with no value
+                # are still displayed in the table)
                 try:
                     sample = self.this_trait.data[sample_name]
                 except KeyError:
                     sample = webqtlCaseData.webqtlCaseData(name=sample_name)
 
             sample.extra_info = {}
-            if self.dataset.group.name == 'AXBXA' and sample_name in ('AXB18/19/20','AXB13/14','BXA8/17'):
+            if (self.dataset.group.name == 'AXBXA' and
+                    sample_name in ('AXB18/19/20', 'AXB13/14', 'BXA8/17')):
                 sample.extra_info['url'] = "/mouseCross.html#AXB/BXA"
                 sample.extra_info['css_class'] = "fs12"
 
             sample.this_id = str(counter)
 
-            # ZS: For extra attribute columns; currently only used by several datasets
+            # ZS: For extra attribute columns; currently only used by
+            # several datasets
             if self.sample_attribute_values:
-                sample.extra_attributes = self.sample_attribute_values.get(sample_name, {})
+                sample.extra_attributes = self.sample_attribute_values.get(
+                    sample_name, {})
 
             self.sample_list.append(sample)
 
@@ -72,7 +74,8 @@ class SampleList(object):
         return "<SampleList> --> %s" % (pf(self.__dict__))
 
     def do_outliers(self):
-        values = [sample.value for sample in self.sample_list if sample.value != None]
+        values = [sample.value for sample in self.sample_list
+                  if sample.value is not None]
         upper_bound, lower_bound = Plot.find_outliers(values)
 
         for sample in self.sample_list:
@@ -100,9 +103,9 @@ class SampleList(object):
             key, name = attr
             self.attributes[key] = Bunch()
             self.attributes[key].name = name
-            self.attributes[key].distinct_values = [item.Value for item in values]
-            self.attributes[key].distinct_values.sort(key=natural_sort_key)
-
+            self.attributes[key].distinct_values = [
+                item.Value for item in values]
+            natural_sort(self.attributes[key].distinct_values)
             all_numbers = True
             for value in self.attributes[key].distinct_values:
                 try:
@@ -135,7 +138,8 @@ class SampleList(object):
                     attribute_value = item.Value
 
                     # ZS: If it's an int, turn it into one for sorting
-                    # (for example, 101 would be lower than 80 if they're strings instead of ints)
+                    # (for example, 101 would be lower than 80 if
+                    # they're strings instead of ints)
                     try:
                         attribute_value = int(attribute_value)
                     except ValueError:
@@ -144,11 +148,12 @@ class SampleList(object):
                     attribute_values[self.attributes[item.Id].name] = attribute_value
                 self.sample_attribute_values[sample_name] = attribute_values
 
-def natural_sort_key(x):
-    """Get expected results when using as a key for sort - ints or strings are sorted properly"""
-
-    try:
-        x = int(x)
-    except ValueError:
-        pass
-    return x
+def natural_sort(list, key=lambda s: s):
+    """
+    Sort the list into natural alphanumeric order.
+    """
+    def get_alphanum_key_func(key):
+        def convert(text): return int(text) if text.isdigit() else text
+        return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]
+    sort_key = get_alphanum_key_func(key)
+    list.sort(key=sort_key)
diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py
index a32e437b..48feb492 100644
--- a/wqflask/wqflask/show_trait/export_trait_data.py
+++ b/wqflask/wqflask/show_trait/export_trait_data.py
@@ -1,9 +1,7 @@
-from __future__ import print_function, division
-
 import simplejson as json
 
 from pprint import pformat as pf
-
+from functools import cmp_to_key
 from base.trait import create_trait
 from base import data_set
 
@@ -47,8 +45,8 @@ def get_export_metadata(trait_id, dataset_name):
 
 
 def dict_to_sorted_list(dictionary):
-    sorted_list = [item for item in dictionary.iteritems()]
-    sorted_list = sorted(sorted_list, cmp=cmp_samples)
+    sorted_list = [item for item in list(dictionary.items())]
+    sorted_list = sorted(sorted_list, key=cmp_to_key(cmp_samples))
     sorted_values = [item[1] for item in sorted_list]
     return sorted_values
 
@@ -71,4 +69,4 @@ def cmp_samples(a, b):
         else:
             return 1
     else:
-        return -1
\ No newline at end of file
+        return -1
diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py
index 30b03e66..edf9638c 100644
--- a/wqflask/wqflask/show_trait/show_trait.py
+++ b/wqflask/wqflask/show_trait/show_trait.py
@@ -1,9 +1,7 @@
-from __future__ import absolute_import, print_function, division
-
 import string
 import os
 import datetime
-import cPickle
+import pickle
 import uuid
 import requests
 import json as json
@@ -230,8 +228,8 @@ class ShowTrait(object):
         hddn = OrderedDict()
 
         if self.dataset.group.allsamples:
-            hddn['allsamples'] = string.join(self.dataset.group.allsamples, ' ')
-        hddn['primary_samples'] = string.join(self.primary_sample_names, ',')
+            hddn['allsamples'] = ''.join(self.dataset.group.allsamples)
+        hddn['primary_samples'] = ''.join(self.primary_sample_names)
         hddn['trait_id'] = self.trait_id
         hddn['trait_display_name'] = self.this_trait.display_name
         hddn['dataset'] = self.dataset.name
@@ -260,7 +258,7 @@ class ShowTrait(object):
         hddn['export_data'] = ""
         hddn['export_format'] = "excel"
         if len(self.scales_in_geno) < 2:
-            hddn['mapping_scale'] = self.scales_in_geno[self.scales_in_geno.keys()[0]][0][0]
+            hddn['mapping_scale'] = self.scales_in_geno[list(self.scales_in_geno.keys())[0]][0][0]
 
         # We'll need access to this_trait and hddn in the Jinja2 Template, so we put it inside self
         self.hddn = hddn
@@ -373,7 +371,7 @@ class ShowTrait(object):
             this_group = self.dataset.group.name
 
         # We're checking a string here!
-        assert isinstance(this_group, basestring), "We need a string type thing here"
+        assert isinstance(this_group, str), "We need a string type thing here"
         if this_group[:3] == 'BXD' and this_group != "BXD-Harvested":
             this_group = 'BXD'
 
@@ -409,9 +407,10 @@ class ShowTrait(object):
         if not self.temp_trait:
             other_sample_names = []
 
-            for sample in self.this_trait.data:
-                if self.this_trait.data[sample].name2 != self.this_trait.data[sample].name:
-                    if (self.this_trait.data[sample].name2 in primary_sample_names) and (self.this_trait.data[sample].name not in primary_sample_names):
+            for sample in list(self.this_trait.data.keys()):
+                if (self.this_trait.data[sample].name2 != self.this_trait.data[sample].name):
+                    if ((self.this_trait.data[sample].name2 in primary_sample_names) and
+                        (self.this_trait.data[sample].name not in primary_sample_names)):
                         primary_sample_names.append(self.this_trait.data[sample].name)
                         primary_sample_names.remove(self.this_trait.data[sample].name2)
 
@@ -568,7 +567,7 @@ def get_table_widths(sample_groups, has_num_cases=False):
 def has_num_cases(this_trait):
     has_n = False
     if this_trait.dataset.type != "ProbeSet" and this_trait.dataset.type != "Geno":
-        for name, sample in this_trait.data.iteritems():
+        for name, sample in list(this_trait.data.items()):
             if sample.num_cases:
                 has_n = True
                 break
@@ -625,7 +624,7 @@ def get_categorical_variables(this_trait, sample_list):
     if len(sample_list.attributes) > 0:
         for attribute in sample_list.attributes:
             attribute_vals = []
-            for sample_name in this_trait.data.keys():
+            for sample_name in list(this_trait.data.keys()):
                 if sample_list.attributes[attribute].name in this_trait.data[sample_name].extra_attributes:
                     attribute_vals.append(this_trait.data[sample_name].extra_attributes[sample_list.attributes[attribute].name])
                 else:
@@ -640,7 +639,7 @@ def get_categorical_variables(this_trait, sample_list):
 
 def get_genotype_scales(genofiles):
     geno_scales = {}
-    if type(genofiles) is list:
+    if isinstance(genofiles, list):
         for the_file in genofiles:
             file_location = the_file['location']
             geno_scales[file_location] = get_scales_from_genofile(file_location)
diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py
index 1d28d76a..2df71b12 100644
--- a/wqflask/wqflask/snp_browser/snp_browser.py
+++ b/wqflask/wqflask/snp_browser/snp_browser.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
 from flask import Flask, g, url_for
 
 import string
@@ -458,8 +456,8 @@ class SnpBrowser(object):
 
                 function_list = []
                 if function_details:
-                    function_list = string.split(string.strip(function_details), ",")
-                    function_list = map(string.strip, function_list)
+                    function_list = function_details.strip().split(",")
+                    function_list = list(map(string.strip, function_list))
                     function_list[0] = function_list[0].title()
                     function_details = ", ".join(item for item in function_list)
                     function_details = function_details.replace("_", " ")
@@ -477,7 +475,7 @@ class SnpBrowser(object):
 
                 the_bases = []
                 for j, item in enumerate(allele_value_list):
-                    if item and isinstance(item, basestring):
+                    if item and isinstance(item, str):
                         this_base = [str(item), base_color_dict[item]]
                     else:
                         this_base = ""
@@ -612,7 +610,7 @@ class SnpBrowser(object):
             this_allele_list = []
 
             for item in self.allele_list:
-                if item and isinstance(item, basestring) and (item.lower() not in this_allele_list) and (item != "-"):
+                if item and isinstance(item, str) and (item.lower() not in this_allele_list) and (item != "-"):
                     this_allele_list.append(item.lower())
 
             total_allele_count = len(this_allele_list)
@@ -724,12 +722,12 @@ def get_effect_details_by_category(effect_name = None, effect_value = None):
     new_codon_group_list = ['Start Gained']
     codon_effect_group_list = ['Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous']
 
-    effect_detail_list = string.split(string.strip(effect_value), '|')
-    effect_detail_list = map(string.strip, effect_detail_list)
+    effect_detail_list = effect_value.strip().split('|')
+    effect_detail_list = list(map(string.strip, effect_detail_list))
 
     for index, item in enumerate(effect_detail_list):
-        item_list = string.split(string.strip(item), ',')
-        item_list = map(string.strip, item_list)
+        item_list = item.strip().split(',')
+        item_list = list(map(string.strip, item_list))
 
         gene_id = item_list[0]
         gene_name = item_list[1]
@@ -748,13 +746,13 @@ def get_effect_details_by_category(effect_name = None, effect_value = None):
             if effect_name in new_codon_group_list:
                 new_codon = item_list[6]
                 tmp_list = [biotype, new_codon]
-                function_detail_list.append(string.join(tmp_list, ", "))
+                function_detail_list.append(", ".join(tmp_list))
             elif effect_name in codon_effect_group_list:
                 old_new_AA = item_list[6]
                 old_new_codon = item_list[7]
                 codon_num = item_list[8]
                 tmp_list = [biotype, old_new_AA, old_new_codon, codon_num]
-                function_detail_list.append(string.join(tmp_list, ", "))
+                function_detail_list.append(", ".join(tmp_list))
             else:
                 function_detail_list.append(biotype)
 
@@ -854,7 +852,7 @@ def get_gene_id_name_dict(species_id, gene_name_list):
     if len(gene_name_list) == 0:
         return ""
     gene_name_str_list = ["'" + gene_name + "'" for gene_name in gene_name_list]
-    gene_name_str = string.join(gene_name_str_list, ",")
+    gene_name_str = ",".join(gene_name_str_list)
 
     query = """
                 SELECT
diff --git a/wqflask/wqflask/submit_bnw.py b/wqflask/wqflask/submit_bnw.py
index 59e60dfd..a0e84c8c 100644
--- a/wqflask/wqflask/submit_bnw.py
+++ b/wqflask/wqflask/submit_bnw.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
 from base.trait import GeneralTrait
 from base import data_set
 from utility import helper_functions
@@ -8,4 +6,4 @@ import utility.logger
 logger = utility.logger.getLogger(__name__ )
 
 def get_bnw_input(start_vars):
-    logger.debug("BNW VARS:", start_vars)
\ No newline at end of file
+    logger.debug("BNW VARS:", start_vars)
diff --git a/wqflask/wqflask/templates/admin/manage_resource.html b/wqflask/wqflask/templates/admin/manage_resource.html
index 0b12eaae..33a37594 100644
--- a/wqflask/wqflask/templates/admin/manage_resource.html
+++ b/wqflask/wqflask/templates/admin/manage_resource.html
@@ -65,7 +65,7 @@
                         </tr>
                     </thead>
                     <tbody>
-                        {% for key, value in group_masks.iteritems() %}
+                        {% for key, value in group_masks.items() %}
                         <tr>
                             <td>{{ value.name }}</td>
                             <td>{{ value.data }}</td>
diff --git a/wqflask/wqflask/templates/correlation_page.html b/wqflask/wqflask/templates/correlation_page.html
index fb218e29..134f15be 100644
--- a/wqflask/wqflask/templates/correlation_page.html
+++ b/wqflask/wqflask/templates/correlation_page.html
@@ -170,7 +170,8 @@
                         {% endif %}
                         <td data-export="{{ trait.description_display }}">{% if trait.description_display|length > 70 %}{{ trait.description_display[:70] }}...{% else %}{{ trait.description_display }}{% endif %}</td>
                         {% if trait.authors %}
-                        <td data-export="{{ trait.authors }}">{% if trait.authors.split(',') > 6 %}{{ trait.authors.split(',')[:6]|join(', ') }}, et al.{% else %}{{ trait.authors }}{% endif %}</td>
+                        {% set authors_list = trait.authors.split(',') %}
+                        <td data-export="{{ trait.authors }}">{% if authors_list|length > 6 %}{{ authors_list[:6]|join(', ') }}, et al.{% else %}{{ trait.authors }}{% endif %}</td>
                         {% else %}
                         <td data-export="N/A">N/A</td>
                         {% endif %}
diff --git a/wqflask/wqflask/templates/loading.html b/wqflask/wqflask/templates/loading.html
index 15ab4080..9b335dfe 100644
--- a/wqflask/wqflask/templates/loading.html
+++ b/wqflask/wqflask/templates/loading.html
@@ -1,7 +1,7 @@
 <title>Loading {{ start_vars.tool_used }} Results</title>
 <link REL="stylesheet" TYPE="text/css" href="/static/packages/bootstrap/css/bootstrap.css" />
 <form method="post" action="" name="loading_form" id="loading_form" class="form-horizontal">
-  {% for key, value in start_vars.iteritems() %}
+  {% for key, value in start_vars.items() %}
   <input type="hidden" name="{{ key }}" value="{{ value }}">
   {% endfor %}
   <div class="container">
@@ -44,4 +44,4 @@
 
 $("#loading_form").attr("action", "{{ start_vars.form_url }}");
 setTimeout(function(){ $("#loading_form").submit()}, 350);
-</script>
\ No newline at end of file
+</script>
diff --git a/wqflask/wqflask/update_search_results.py b/wqflask/wqflask/update_search_results.py
index 68bea9d6..672f95b1 100644
--- a/wqflask/wqflask/update_search_results.py
+++ b/wqflask/wqflask/update_search_results.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, print_function, division
-
 import json
 
 from flask import Flask, g
diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py
index 077a799b..f25ebc32 100644
--- a/wqflask/wqflask/user_login.py
+++ b/wqflask/wqflask/user_login.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
 import os
 import hashlib
 import datetime
@@ -39,15 +37,13 @@ def basic_info():
                 ip_address = request.remote_addr,
                 user_agent = request.headers.get('User-Agent'))
 
-def encode_password(pass_gen_fields, unencrypted_password):
-    hashfunc = getattr(hashlib, pass_gen_fields['hashfunc'])
 
-    salt = base64.b64decode(pass_gen_fields['salt'])
+def encode_password(pass_gen_fields, unencrypted_password):
     encrypted_password = pbkdf2.pbkdf2_hex(str(unencrypted_password), 
-                                 pass_gen_fields['salt'], 
-                                 pass_gen_fields['iterations'], 
-                                 pass_gen_fields['keylength'], 
-                                 hashfunc)
+                                           pass_gen_fields['salt'], 
+                                           pass_gen_fields['iterations'], 
+                                           pass_gen_fields['keylength'], 
+                                           pass_gen_fields['hashfunc'])
 
     pass_gen_fields.pop("unencrypted_password", None)
     pass_gen_fields["password"] = encrypted_password
@@ -199,7 +195,7 @@ def login():
             if user_details:
                 submitted_password = params['password']
                 pwfields = user_details['password']
-                if type(pwfields) is str:
+                if isinstance(pwfields, str):
                     pwfields = json.loads(pwfields)
                 encrypted_pass_fields = encode_password(pwfields, submitted_password)
                 password_match = pbkdf2.safe_str_cmp(encrypted_pass_fields['password'], pwfields['password'])
@@ -478,4 +474,4 @@ def register():
 
 @app.errorhandler(401)
 def unauthorized(error):
-    return redirect(url_for('login'))
\ No newline at end of file
+    return redirect(url_for('login'))
diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py
index a871e91a..7b25b68e 100644
--- a/wqflask/wqflask/user_manager.py
+++ b/wqflask/wqflask/user_manager.py
@@ -1,53 +1,63 @@
-from __future__ import print_function, division, absolute_import
-
 import os
 import hashlib
 import datetime
 import time
-import logging
 import uuid
-import hashlib
 import hmac
 import base64
-import urlparse
-
+import redis  # used for collections
 import simplejson as json
+import requests
 
-#from redis import StrictRedis
-import redis # used for collections
-Redis = redis.StrictRedis()
+from base.data_set import create_datasets_list
 
-from flask import (Flask, g, render_template, url_for, request, make_response,
-                   redirect, flash, abort)
+from flask import g
+from flask import render_template
+from flask import url_for
+from flask import request
+from flask import make_response
+from flask import redirect
+from flask import flash
 
 from wqflask import app
-from pprint import pformat as pf
-
-from wqflask import pbkdf2 # password hashing
+from wqflask import pbkdf2  # password hashing
 from wqflask.database import db_session
 from wqflask import model
 
-from utility import Bunch, Struct, after
+from smtplib import SMTP
+
+from pprint import pformat as pf
 
-import logging
+from utility import Bunch
+from utility import Struct
 from utility.logger import getLogger
-logger = getLogger(__name__)
 
-from base.data_set import create_datasets_list
+from utility.redis_tools import get_user_id
+from utility.redis_tools import get_user_by_unique_column
+from utility.redis_tools import set_user_attribute
+from utility.redis_tools import save_user
+from utility.redis_tools import save_verification_code
+from utility.redis_tools import check_verification_code
+from utility.redis_tools import get_user_collections
+from utility.redis_tools import save_collections
 
-import requests
+from utility.tools import SMTP_CONNECT
+from utility.tools import SMTP_USERNAME
+from utility.tools import SMTP_PASSWORD
 
-from utility.redis_tools import get_user_id, get_user_by_unique_column, set_user_attribute, save_user, save_verification_code, check_verification_code, get_user_collections, save_collections
 
-from smtplib import SMTP
-from utility.tools import SMTP_CONNECT, SMTP_USERNAME, SMTP_PASSWORD, LOG_SQL_ALCHEMY
+logger = getLogger(__name__)
+
+
+Redis = redis.StrictRedis()
 
 THREE_DAYS = 60 * 60 * 24 * 3
-#THREE_DAYS = 45
+
 
 def timestamp():
     return datetime.datetime.utcnow().isoformat()
 
+
 class AnonUser(object):
     """Anonymous user handling"""
     cookie_name = 'anon_user_v1'
@@ -230,29 +240,10 @@ class UserSession(object):
 
         return len(self.user_collections)
 
-###
-# ZS: This is currently not used, but I'm leaving it here commented out because the old "set superuser" code (at the bottom of this file) used it
-###
-#    @property
-#    def user_ob(self):
-#        """Actual sqlalchemy record"""
-#        # Only look it up once if needed, then store it
-#        # raise "OBSOLETE: use ElasticSearch instead"
-#        try:
-#            if LOG_SQL_ALCHEMY:
-#                logging.getLogger('sqlalchemy.pool').setLevel(logging.DEBUG)
-#
-#            # Already did this before
-#            return self.db_object
-#        except AttributeError:
-#            # Doesn't exist so we'll create it
-#            self.db_object = model.User.query.get(self.user_id)
-#            return self.db_object
-
     def add_collection(self, collection_name, traits):
         """Add collection into ElasticSearch"""
 
-        collection_dict = {'id': unicode(uuid.uuid4()),
+        collection_dict = {'id': str(uuid.uuid4()),
                            'name': collection_name,
                            'created_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'),
                            'changed_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'),
@@ -867,7 +858,7 @@ def forgot_password_submit():
     email_address = params['email_address']
     next_page = None
     if email_address != "":
-        logger.debug("Wants to send password E-mail to ",email_address)
+        logger.debug("Wants to send password E-mail to ", email_address)
         user_details = get_user_by_unique_column("email_address", email_address)
         if user_details:
             ForgotPasswordEmail(user_details["email_address"])
diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py
index 3aa2c151..9f4c5458 100644
--- a/wqflask/wqflask/user_session.py
+++ b/wqflask/wqflask/user_session.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, division, absolute_import
-
 import datetime
 import time
 import uuid
@@ -129,10 +127,10 @@ class UserSession(object):
     @property
     def user_id(self):
         """Shortcut to the user_id"""
-        if 'user_id' not in self.record:
-            self.record['user_id'] = str(uuid.uuid4())
+        if b'user_id' not in self.record:
+            self.record[b'user_id'] = str(uuid.uuid4())
 
-        return self.record['user_id']
+        return self.record[b'user_id']
 
     @property
     def redis_user_id(self):
@@ -184,7 +182,7 @@ class UserSession(object):
     def add_collection(self, collection_name, traits):
         """Add collection into Redis"""
 
-        collection_dict = {'id': unicode(uuid.uuid4()),
+        collection_dict = {'id': str(uuid.uuid4()),
                            'name': collection_name,
                            'created_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'),
                            'changed_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'),
diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py
index 94ec7137..08673f79 100644
--- a/wqflask/wqflask/views.py
+++ b/wqflask/wqflask/views.py
@@ -2,8 +2,6 @@
 #
 # Main routing table for GN2
 
-from __future__ import absolute_import, division, print_function
-
 import traceback # for error page
 import os        # for error gifs
 import random    # for random error gif
@@ -14,13 +12,13 @@ import csv
 import simplejson as json
 import yaml
 import xlsxwriter
-import StringIO  # Todo: Use cStringIO?
+import io  # Todo: Use cStringIO?
 
 from zipfile import ZipFile, ZIP_DEFLATED
 
 import gc
 import numpy as np
-import cPickle as pickle
+import pickle as pickle
 import uuid
 
 import flask
@@ -54,7 +52,7 @@ from wqflask.docs import Docs, update_text
 from wqflask.db_info import InfoPage
 
 from utility import temp_data
-from utility.tools import SQL_URI,TEMPDIR,USE_REDIS,USE_GN_SERVER,GN_SERVER_URL,GN_VERSION,JS_TWITTER_POST_FETCHER_PATH,JS_GUIX_PATH, CSS_PATH
+from utility.tools import SQL_URI, TEMPDIR, USE_REDIS, USE_GN_SERVER, GN_SERVER_URL, GN_VERSION, JS_TWITTER_POST_FETCHER_PATH, JS_GUIX_PATH, CSS_PATH
 from utility.helper_functions import get_species_groups
 from utility.authentication_tools import check_resource_availability
 from utility.redis_tools import get_redis_conn
@@ -129,10 +127,10 @@ def handle_bad_request(e):
         list = [fn for fn in os.listdir("./wqflask/static/gif/error") if fn.endswith(".gif") ]
         animation = random.choice(list)
 
-    resp = make_response(render_template("error.html",message=err_msg,stack=formatted_lines,error_image=animation,version=GN_VERSION))
+    resp = make_response(render_template("error.html", message=err_msg, stack=formatted_lines, error_image=animation, version=GN_VERSION))
 
     # logger.error("Set cookie %s with %s" % (err_msg, animation))
-    resp.set_cookie(err_msg[:32],animation)
+    resp.set_cookie(err_msg[:32], animation)
     return resp
 
 @app.route("/authentication_needed")
@@ -166,7 +164,7 @@ def tmp_page(img_path):
     logger.info("initial_start_vars:", initial_start_vars)
     imgfile = open(GENERATED_IMAGE_DIR + img_path, 'rb')
     imgdata = imgfile.read()
-    imgB64 = imgdata.encode("base64")
+    imgB64 = base64.b64encode(imgdata)
     bytesarray = array.array('B', imgB64)
     return render_template("show_image.html",
                             img_base64 = bytesarray )
@@ -215,8 +213,6 @@ def search_page():
     result = the_search.__dict__
     valid_search = result['search_term_exists']
 
-    logger.debugf("result", result)
-
     if USE_REDIS and valid_search:
         Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
         Redis.expire(key, 60*60)
@@ -264,7 +260,7 @@ def docedit():
 @app.route('/generated/<filename>')
 def generated_file(filename):
     logger.info(request.url)
-    return send_from_directory(GENERATED_IMAGE_DIR,filename)
+    return send_from_directory(GENERATED_IMAGE_DIR, filename)
 
 @app.route("/help")
 def help():
@@ -380,7 +376,7 @@ def export_trait_excel():
 
     logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))
 
-    buff = StringIO.StringIO()
+    buff = io.BytesIO()
     workbook = xlsxwriter.Workbook(buff, {'in_memory': True})
     worksheet = workbook.add_worksheet()
     for i, row in enumerate(sample_data):
@@ -404,7 +400,7 @@ def export_trait_csv():
 
     logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))
 
-    buff = StringIO.StringIO()
+    buff = io.StringIO()
     writer = csv.writer(buff)
     for row in sample_data:
         writer.writerow(row)
@@ -427,7 +423,7 @@ def export_traits_csv():
         now = datetime.datetime.now()
         time_str = now.strftime('%H:%M_%d%B%Y')
         filename = "export_{}".format(time_str)
-        memory_file = StringIO.StringIO()
+        memory_file = io.StringIO()
         with ZipFile(memory_file, mode='w', compression=ZIP_DEFLATED) as zf:
             for the_file in file_list:
                 zf.writestr(the_file[0], the_file[1])
@@ -470,7 +466,7 @@ def export_perm_data():
         ["#Comment: Results sorted from low to high peak linkage"]
     ]
 
-    buff = StringIO.StringIO()
+    buff = io.StringIO()
     writer = csv.writer(buff)
     writer.writerows(the_rows)
     for item in perm_info['perm_data']:
@@ -543,7 +539,7 @@ def heatmap_page():
 
             result = template_vars.__dict__
 
-            for item in template_vars.__dict__.keys():
+            for item in list(template_vars.__dict__.keys()):
                 logger.info("  ---**--- {}: {}".format(type(template_vars.__dict__[item]), item))
 
             pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL)
@@ -647,7 +643,7 @@ def loading_page():
     if 'wanted_inputs' in initial_start_vars:
         wanted = initial_start_vars['wanted_inputs'].split(",")
         start_vars = {}
-        for key, value in initial_start_vars.iteritems():
+        for key, value in list(initial_start_vars.items()):
             if key in wanted or key.startswith(('value:')):
                 start_vars[key] = value
 
@@ -747,7 +743,7 @@ def mapping_results_page():
         'transform'
     )
     start_vars = {}
-    for key, value in initial_start_vars.iteritems():
+    for key, value in list(initial_start_vars.items()):
         if key in wanted or key.startswith(('value:')):
             start_vars[key] = value
     #logger.debug("Mapping called with start_vars:", start_vars)
@@ -794,7 +790,7 @@ def mapping_results_page():
                     logger.info("initial_start_vars:", initial_start_vars)
                     imgfile = open(TEMPDIR + img_path, 'rb')
                     imgdata = imgfile.read()
-                    imgB64 = imgdata.encode("base64")
+                    imgB64 = base64.b64encode(imgdata)
                     bytesarray = array.array('B', imgB64)
                     result['pair_scan_array'] = bytesarray
                     rendered_template = render_template("pair_scan_results.html", **result)
@@ -954,8 +950,8 @@ def json_default_handler(obj):
     if hasattr(obj, 'isoformat'):
         return obj.isoformat()
     # Handle integer keys for dictionaries
-    elif isinstance(obj, int):
-        return str(int)
+    elif isinstance(obj, int) or isinstance(obj, uuid.UUID):
+        return str(obj)
     # Handle custom objects
     if hasattr(obj, '__dict__'):
         return obj.__dict__
@@ -963,5 +959,5 @@ def json_default_handler(obj):
     #     logger.info("Not going to serialize Dataset")
     #    return None
     else:
-        raise TypeError, 'Object of type %s with value of %s is not JSON serializable' % (
-            type(obj), repr(obj))
+        raise TypeError('Object of type %s with value of %s is not JSON serializable' % (
+            type(obj), repr(obj)))
diff --git a/wqflask/wqflask/wgcna/wgcna_analysis.py b/wqflask/wqflask/wgcna/wgcna_analysis.py
index 70077703..6bf75216 100644
--- a/wqflask/wqflask/wgcna/wgcna_analysis.py
+++ b/wqflask/wqflask/wgcna/wgcna_analysis.py
@@ -3,21 +3,19 @@ WGCNA analysis for GN2
 
 Author / Maintainer: Danny Arends <Danny.Arends@gmail.com>
 """
+import base64
 import sys
-from numpy import *
-import scipy as sp                            # SciPy
 import rpy2.robjects as ro                    # R Objects
 import rpy2.rinterface as ri
 
+from array import array as arr
+from numpy import *
 from base.webqtlConfig import GENERATED_IMAGE_DIR
-from utility import webqtlUtil                # Random number for the image
-
-import base64
-import array
+from rpy2.robjects.packages import importr
 
+from utility import webqtlUtil                # Random number for the image
 from utility import helper_functions
 
-from rpy2.robjects.packages import importr
 utils = importr("utils")
 
 # Get pointers to some common R functions
@@ -71,8 +69,8 @@ class WGCNA(object):
         self.r_enableWGCNAThreads()
         self.trait_db_list = [trait.strip()
                               for trait in requestform['trait_list'].split(',')]
-        print("Retrieved phenotype data from database",
-              requestform['trait_list'])
+        print(("Retrieved phenotype data from database",
+              requestform['trait_list']))
         helper_functions.get_trait_db_obs(self, self.trait_db_list)
 
         # self.input contains the phenotype values we need to send to R
@@ -127,14 +125,14 @@ class WGCNA(object):
             powers = [int(threshold.strip())
                       for threshold in requestform['SoftThresholds'].rstrip().split(",")]
             rpow = r_unlist(r_c(powers))
-            print("SoftThresholds: {} == {}".format(powers, rpow))
+            print(("SoftThresholds: {} == {}".format(powers, rpow)))
             self.sft = self.r_pickSoftThreshold(
                 rM, powerVector=rpow, verbose=5)
 
-            print("PowerEstimate: {}".format(self.sft[0]))
+            print(("PowerEstimate: {}".format(self.sft[0])))
             self.results['PowerEstimate'] = self.sft[0]
             if self.sft[0][0] is ri.NA_Integer:
-                print "No power is suitable for the analysis, just use 1"
+                print("No power is suitable for the analysis, just use 1")
                 # No power could be estimated
                 self.results['Power'] = 1
             else:
@@ -157,7 +155,7 @@ class WGCNA(object):
         self.results['network'] = network
 
         # How many modules and how many gene per module ?
-        print "WGCNA found {} modules".format(r_table(network[1]))
+        print(("WGCNA found {} modules".format(r_table(network[1]))))
         self.results['nmod'] = r_length(r_table(network[1]))[0]
 
         # The iconic WCGNA plot of the modules in the hanging tree
@@ -172,11 +170,11 @@ class WGCNA(object):
         sys.stdout.flush()
 
     def render_image(self, results):
-        print("pre-loading imgage results:", self.results['imgloc'])
+        print(("pre-loading imgage results:", self.results['imgloc']))
         imgfile = open(self.results['imgloc'], 'rb')
         imgdata = imgfile.read()
-        imgB64 = imgdata.encode("base64")
-        bytesarray = array.array('B', imgB64)
+        imgB64 = base64.b64encode(imgdata)
+        bytesarray = arr('B', imgB64)
         self.results['imgdata'] = bytesarray
 
     def process_results(self, results):