about summary refs log tree commit diff
diff options
context:
space:
mode:
authorPjotr Prins2020-07-25 08:24:33 +0100
committerPjotr Prins2020-07-25 08:24:33 +0100
commit9f8beacddb71aac9905c896b9d81caf45b4735a0 (patch)
tree1f3ea2b1bdb6835a6c172b739e6872bf59af6181
parentc249ba2ef7d691227da8864838dfc97db68d4084 (diff)
parentf66da35a09cbb8da13cfb142cbe3ff208404970b (diff)
downloadgenenetwork2-9f8beacddb71aac9905c896b9d81caf45b4735a0.tar.gz
Merge branch 'testing' of github.com:genenetwork/genenetwork2 into testing
-rw-r--r--.gitignore2
-rw-r--r--test/__init__.py (renamed from wqflask/cfg/__init__.py)0
-rw-r--r--test/requests/main_web_functionality.py3
-rw-r--r--test/requests/mapping_tests.py12
-rwxr-xr-xtest/requests/test-website.py11
-rw-r--r--test/unittest/__init__.py (renamed from wqflask/mock/__init__.py)0
-rw-r--r--test/unittest/base/__init__.py0
-rw-r--r--test/unittest/base/test_general_object.py21
-rw-r--r--test/unittest/test_registration.py27
-rw-r--r--wqflask/.coveragerc28
-rw-r--r--wqflask/base/GeneralObject.py77
-rw-r--r--wqflask/base/data_set.py307
-rw-r--r--wqflask/base/trait.py19
-rw-r--r--wqflask/cfg/default_settings.py1
-rw-r--r--wqflask/mock/es_double.py15
-rw-r--r--wqflask/tests/__init__.py0
-rw-r--r--wqflask/tests/base/__init__.py0
-rw-r--r--wqflask/tests/base/test_data_set.py35
-rw-r--r--wqflask/tests/base/test_general_object.py41
-rw-r--r--wqflask/tests/utility/__init__.py0
-rw-r--r--wqflask/tests/utility/test_chunks.py19
-rw-r--r--wqflask/tests/utility/test_corestats.py55
-rw-r--r--wqflask/tests/utility/test_corr_result_helpers.py32
-rw-r--r--wqflask/tests/utility/test_formatting.py33
-rw-r--r--wqflask/utility/Plot.py2
-rw-r--r--wqflask/utility/authentication_tools.py61
-rw-r--r--wqflask/utility/chunks.py63
-rw-r--r--wqflask/utility/corr_result_helpers.py26
-rw-r--r--wqflask/utility/formatting.py29
-rw-r--r--wqflask/utility/redis_tools.py5
-rw-r--r--wqflask/utility/tools.py4
-rw-r--r--wqflask/wqflask/__init__.py3
-rw-r--r--wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py2
-rw-r--r--wqflask/wqflask/correlation/show_corr_results.py2
-rw-r--r--wqflask/wqflask/correlation_matrix/show_corr_matrix.py2
-rw-r--r--wqflask/wqflask/export_traits.py119
-rw-r--r--wqflask/wqflask/heatmap/heatmap.py2
-rw-r--r--wqflask/wqflask/network_graph/network_graph.py2
-rw-r--r--wqflask/wqflask/static/new/javascript/dataset_select_menu_orig.js9
-rw-r--r--wqflask/wqflask/static/new/javascript/draw_corr_scatterplot.js5
-rw-r--r--wqflask/wqflask/static/new/javascript/get_traits_from_collection.js48
-rw-r--r--wqflask/wqflask/static/new/javascript/search_results.js16
-rw-r--r--wqflask/wqflask/templates/base.html8
-rw-r--r--wqflask/wqflask/templates/correlation_page.html6
-rw-r--r--wqflask/wqflask/templates/gsearch_gene.html5
-rw-r--r--wqflask/wqflask/templates/gsearch_pheno.html5
-rw-r--r--wqflask/wqflask/templates/mapping_results.html1
-rw-r--r--wqflask/wqflask/templates/search_result_page.html18
-rw-r--r--wqflask/wqflask/templates/show_trait.html5
-rw-r--r--wqflask/wqflask/templates/show_trait_calculate_correlations.html2
-rw-r--r--wqflask/wqflask/templates/show_trait_details.html2
-rwxr-xr-xwqflask/wqflask/templates/show_trait_mapping_tools.html10
-rw-r--r--wqflask/wqflask/templates/snp_browser.html4
-rw-r--r--wqflask/wqflask/views.py28
54 files changed, 752 insertions, 480 deletions
diff --git a/.gitignore b/.gitignore
index 701623e7..8183b308 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,3 +14,5 @@ dist/
 EGG-INFO/
 wqflask/output/*
 wqflask/wqflask/static/output/*
+wqflask/.coverage
+wqflask/coverage_html_report
\ No newline at end of file
diff --git a/wqflask/cfg/__init__.py b/test/__init__.py
index e69de29b..e69de29b 100644
--- a/wqflask/cfg/__init__.py
+++ b/test/__init__.py
diff --git a/test/requests/main_web_functionality.py b/test/requests/main_web_functionality.py
index 7b89b833..d070dab9 100644
--- a/test/requests/main_web_functionality.py
+++ b/test/requests/main_web_functionality.py
@@ -20,8 +20,9 @@ def check_search_page(host):
         , search_terms_or=""
         , search_terms_and="MEAN=(15 16) LRS=(23 46)")
     result = requests.get(host+"/search", params=data)
-    found = result.text.find("/show_trait?trait_id=1435395_s_at&dataset=HC_M2_0606_P")
+    found = result.text.find("records are shown below")
     assert(found >= 0)
+    assert(result.status_code == 200)
     print("OK")
     check_traits_page(host, "/show_trait?trait_id=1435395_s_at&dataset=HC_M2_0606_P")
 
diff --git a/test/requests/mapping_tests.py b/test/requests/mapping_tests.py
index 6de81bfe..5748a2a3 100644
--- a/test/requests/mapping_tests.py
+++ b/test/requests/mapping_tests.py
@@ -15,9 +15,9 @@ def check_R_qtl_tool_selection(host, data):
     print("")
     print("R/qtl mapping tool selection")
     headers = {"Content-Type": "application/x-www-form-urlencoded"}
-    page = requests.post(host+"/marker_regression", data=data, headers=headers)
+    page = requests.post(host+"/loading", data=data, headers=headers)
     doc = fromstring(page.text)
-    form = doc.forms[1]
+    form = doc.forms[0]
     assert form.fields["dataset"] == "HC_M2_0606_P"
     assert form.fields["value:BXD1"] == "15.034"
 
@@ -25,9 +25,9 @@ def check_CIM_tool_selection(host, data):
     print("")
     print("CIM mapping tool selection (using reaper)")
     data["method"] = "reaper"
-    page = requests.post(host+"/marker_regression", data=data)
+    page = requests.post(host+"/loading", data=data)
     doc = fromstring(page.text)
-    form = doc.forms[1]
+    form = doc.forms[0]
     assert form.fields["dataset"] == "HC_M2_0606_P"
     assert form.fields["value:BXD1"] == "15.034"
 
@@ -37,6 +37,6 @@ def check_mapping(args_obj, parser):
 
     host = args_obj.host
     data = load_data_from_file()
-    check_pylmm_tool_selection(host, copy.deepcopy(data))
-    check_R_qtl_tool_selection(host, copy.deepcopy(data)) ## Why does this fail?
+    # check_pylmm_tool_selection(host, copy.deepcopy(data)) ## Not defined
+    check_R_qtl_tool_selection(host, copy.deepcopy(data))
     check_CIM_tool_selection(host, copy.deepcopy(data))
diff --git a/test/requests/test-website.py b/test/requests/test-website.py
index b2e09bc4..74057e7f 100755
--- a/test/requests/test-website.py
+++ b/test/requests/test-website.py
@@ -104,16 +104,7 @@ parser.add_argument("-i", "--integration-tests", dest="accumulate"
                     , action="store_const", const=integration_tests, default=print_help
                     , help="Runs integration tests.")
 
-# Navigation tests deactivated since system relies on Javascript
-# parser.add_argument("-n", "--navigation", dest="accumulate"
-#                     , action="store_const", const=check_navigation, default=print_help
-#                     , help="Checks for navigation.")
-
-# parser.add_argument("-s", "--skip-broken", dest="accumulate"
-#                     , action="store_const", const=dummy, default=print_help
-#                     , help="Skip tests that are known to be broken.")
-
 args = parser.parse_args()
-# print("The arguments object: ", args)
+
 
 args.accumulate(args, parser)
diff --git a/wqflask/mock/__init__.py b/test/unittest/__init__.py
index e69de29b..e69de29b 100644
--- a/wqflask/mock/__init__.py
+++ b/test/unittest/__init__.py
diff --git a/test/unittest/base/__init__.py b/test/unittest/base/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/unittest/base/__init__.py
diff --git a/test/unittest/base/test_general_object.py b/test/unittest/base/test_general_object.py
new file mode 100644
index 00000000..699cb079
--- /dev/null
+++ b/test/unittest/base/test_general_object.py
@@ -0,0 +1,21 @@
+import unittest
+
+from base.GeneralObject import GeneralObject
+
+
+class TestGeneralObjectTests(unittest.TestCase):
+    """
+    Test the GeneralObject base class
+    """
+
+    def test_object_contents(self):
+        """Test whether base contents are stored properly"""
+        test_obj = GeneralObject("a", "b", "c")
+        self.assertEqual("abc", ''.join(test_obj.contents))
+
+    def test_object_dict(self):
+        """Test whether the base class is printed properly"""
+        test_obj = GeneralObject("a", name="test", value=1)
+        self.assertEqual(str(test_obj), "value = 1\nname = test\n")
+        self.assertEqual(
+            repr(test_obj), "value = 1\nname = test\ncontents = ['a']\n")
diff --git a/test/unittest/test_registration.py b/test/unittest/test_registration.py
deleted file mode 100644
index 98d0cdff..00000000
--- a/test/unittest/test_registration.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Run test with something like
-#
-# env GN2_PROFILE=~/opt/gn-latest GENENETWORK_FILES=$HOME/gn2_data ./bin/genenetwork2 ./etc/default_settings.py -c ../test/unittest/test_registration.py
-#
-
-import unittest
-import mock.es_double as es
-from wqflask.user_manager import RegisterUser
-
-class TestRegisterUser(unittest.TestCase):
-    def setUp(self):
-        self.es = es.ESDouble()
-
-    def testRegisterUserWithCorrectData(self):
-        data = {
-            "email_address": "user@example.com"
-            , "full_name": "A.N. Other"
-            , "organization": "Some Organisation"
-            , "password": "testing"
-            , "password_confirm": "testing"
-            , "es_connection": self.es
-        }
-        result = RegisterUser(data)
-        self.assertEqual(len(result.errors), 0, "Errors were not expected")
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/wqflask/.coveragerc b/wqflask/.coveragerc
new file mode 100644
index 00000000..939e51b9
--- /dev/null
+++ b/wqflask/.coveragerc
@@ -0,0 +1,28 @@
+[run]
+branch = True
+
+[report]
+omit =
+    */site-packages/*
+    tests/*
+# Regexes for lines to exclude from consideration
+exclude_lines =
+# Have to re-enable the standard pragma
+    pragma: no cover
+
+    # Don't complain about missing debug-only code:
+    def __repr__
+    if self\.debug
+
+    # Don't complain if tests don't hit defensive assertion code:
+    raise AssertionError
+    raise NotImplementedError
+
+    # Don't complain if non-runnable code isn't run:
+    if 0:
+    if __name__ == .__main__.:
+
+ignore_errors = False
+
+[html]
+directory = coverage_html_report
\ No newline at end of file
diff --git a/wqflask/base/GeneralObject.py b/wqflask/base/GeneralObject.py
index 02a1ef06..0fccaab3 100644
--- a/wqflask/base/GeneralObject.py
+++ b/wqflask/base/GeneralObject.py
@@ -25,44 +25,41 @@
 # Last updated by GeneNetwork Core Team 2010/10/20
 
 class GeneralObject:
-	"""
-	Base class to define an Object.
-	a = [Spam(1, 4), Spam(9, 3), Spam(4,6)]
-	a.sort(lambda x, y: cmp(x.eggs, y.eggs))
-	"""
+    """
+    Base class to define an Object.
+    a = [Spam(1, 4), Spam(9, 3), Spam(4,6)]
+    a.sort(lambda x, y: cmp(x.eggs, y.eggs))
+    """
 
-	def __init__(self, *args, **kw):
-		self.contents = list(args)
-		for name, value in kw.items():
-			setattr(self, name, value)
-			
-	def __setitem__(self, key, value):
-		setattr(self, key, value)
-		
-	def __getitem__(self, key):
-		return getattr(self, key)
-		
-	def __getattr__(self, key):
-		if key in self.__dict__.keys():
-			return self.__dict__[key]
-		else:
-			return eval("self.__dict__.%s" % key)
-			
-	def __len__(self):
-		return len(self.__dict__) - 1
-				
-	def __str__(self):
-		s = ''
-		for key in self.__dict__.keys():
-			if key != 'contents':
-				s += '%s = %s\n' % (key,self.__dict__[key])
-		return s
-	
-	def __repr__(self):
-		s = ''
-		for key in self.__dict__.keys():
-			s += '%s = %s\n' % (key,self.__dict__[key])
-		return s
-	
-	def __cmp__(self,other):
-		return len(self.__dict__.keys()).__cmp__(len(other.__dict__.keys()))
\ No newline at end of file
+    def __init__(self, *args, **kw):
+        self.contents = list(args)
+        for name, value in kw.items():
+            setattr(self, name, value)
+
+    def __setitem__(self, key, value):
+        setattr(self, key, value)
+
+    def __getitem__(self, key):
+        return getattr(self, key)
+
+    def __getattr__(self, key):
+        return eval("self.__dict__.%s" % key)
+
+    def __len__(self):
+        return len(self.__dict__) - 1
+
+    def __str__(self):
+        s = ''
+        for key in self.__dict__.keys():
+            if key != 'contents':
+                s += '%s = %s\n' % (key, self.__dict__[key])
+        return s
+
+    def __repr__(self):
+        s = ''
+        for key in self.__dict__.keys():
+            s += '%s = %s\n' % (key, self.__dict__[key])
+        return s
+
+    def __cmp__(self, other):
+        return len(self.__dict__.keys()).__cmp__(len(other.__dict__.keys()))
diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py
index 2272b6ee..5d562871 100644
--- a/wqflask/base/data_set.py
+++ b/wqflask/base/data_set.py
@@ -19,6 +19,23 @@
 # This module is used by GeneNetwork project (www.genenetwork.org)
 
 from __future__ import absolute_import, print_function, division
+from db.call import fetchall, fetchone, fetch1
+from utility.logger import getLogger
+from utility.tools import USE_GN_SERVER, USE_REDIS, flat_files, flat_file_exists, GN2_BASE_URL
+from db.gn_server import menu_main
+from pprint import pformat as pf
+from MySQLdb import escape_string as escape
+from maintenance import get_group_samplelists
+from utility.tools import locate, locate_ignore_error, flat_files
+from utility import gen_geno_ob
+from utility import chunks
+from utility.benchmark import Bench
+from utility import webqtlUtil
+from db import webqtlDatabaseFunction
+from base import species
+from base import webqtlConfig
+import reaper
+from flask import Flask, g
 import os
 import math
 import string
@@ -32,39 +49,17 @@ import cPickle as pickle
 import itertools
 
 from redis import Redis
-Redis = Redis()
-
-from flask import Flask, g
-
-import reaper
-
-from base import webqtlConfig
-from base import species
-from db import webqtlDatabaseFunction
-from utility import webqtlUtil
-from utility.benchmark import Bench
-from utility import chunks
-from utility import gen_geno_ob
-from utility.tools import locate, locate_ignore_error, flat_files
-
-from wqflask.api import gen_menu
-
-from maintenance import get_group_samplelists
 
-from MySQLdb import escape_string as escape
-from pprint import pformat as pf
-from db.gn_server import menu_main
-from db.call import fetchall,fetchone,fetch1
+r = Redis()
 
-from utility.tools import USE_GN_SERVER, USE_REDIS, flat_files, flat_file_exists, GN2_BASE_URL
-from utility.logger import getLogger
-logger = getLogger(__name__ )
+logger = getLogger(__name__)
 
 # Used by create_database to instantiate objects
 # Each subclass will add to this
 DS_NAME_MAP = {}
 
-def create_dataset(dataset_name, dataset_type = None, get_samplelist = True, group_name = None):
+
+def create_dataset(dataset_name, dataset_type=None, get_samplelist=True, group_name=None):
     if dataset_name == "Temp":
         dataset_type = "Temp"
 
@@ -78,9 +73,10 @@ def create_dataset(dataset_name, dataset_type = None, get_samplelist = True, gro
     else:
         return dataset_class(dataset_name, get_samplelist)
 
-class Dataset_Types(object):
 
-    def __init__(self):
+class DatasetType:
+
+    def __init__(self, redis_instance):
         """Create a dictionary of samples where the value is set to Geno,
 Publish or ProbeSet. E.g.
 
@@ -95,14 +91,15 @@ Publish or ProbeSet. E.g.
          'B139_K_1206_R': 'ProbeSet' ...
 
         """
+        self.redis_instance = redis_instance
         self.datasets = {}
-
-        data = Redis.get("dataset_structure")
+        data = redis_instance.get("dataset_structure")
         if data:
             self.datasets = json.loads(data)
-        else: #ZS: I don't think this should ever run unless Redis is emptied
+        else:  # ZS: I don't think this should ever run unless Redis is emptied
             try:
-                data = json.loads(requests.get(GN2_BASE_URL + "/api/v_pre1/gen_dropdown", timeout = 5).content)
+                data = json.loads(requests.get(
+                    GN2_BASE_URL + "/api/v_pre1/gen_dropdown", timeout=5).content)
                 for species in data['datasets']:
                     for group in data['datasets'][species]:
                         for dataset_type in data['datasets'][species][group]:
@@ -118,10 +115,10 @@ Publish or ProbeSet. E.g.
             except:
                 pass
 
-            Redis.set("dataset_structure", json.dumps(self.datasets))
+            redis_instance.set("dataset_structure", json.dumps(self.datasets))
 
         # Set LOG_LEVEL_DEBUG=5 to see the following:
-        logger.debugf(5, "datasets",self.datasets)
+        logger.debugf(5, "datasets", self.datasets)
 
     def __call__(self, name):
         if name not in self.datasets:
@@ -137,7 +134,7 @@ Publish or ProbeSet. E.g.
             results = g.db.execute(mrna_expr_query).fetchall()
             if len(results):
                 self.datasets[name] = "ProbeSet"
-                Redis.set("dataset_structure", json.dumps(self.datasets))
+                redis_instance.set("dataset_structure", json.dumps(self.datasets))
                 return self.datasets[name]
 
             group_name = name.replace("Publish", "")
@@ -151,10 +148,10 @@ Publish or ProbeSet. E.g.
             results = g.db.execute(pheno_query).fetchall()
             if len(results):
                 self.datasets[name] = "Publish"
-                Redis.set("dataset_structure", json.dumps(self.datasets))
+                redis_instance.set("dataset_structure", json.dumps(self.datasets))
                 return self.datasets[name]
 
-            #ZS: For when there isn't an InfoFiles ID; not sure if this and the preceding query are both necessary
+            # ZS: For when there isn't an InfoFiles ID; not sure if this and the preceding query are both necessary
             other_pheno_query = """SELECT PublishFreeze.Name
                                    FROM PublishFreeze, InbredSet
                                    WHERE InbredSet.Name = '{}' AND
@@ -163,10 +160,10 @@ Publish or ProbeSet. E.g.
             results = g.db.execute(other_pheno_query).fetchall()
             if len(results):
                 self.datasets[name] = "Publish"
-                Redis.set("dataset_structure", json.dumps(self.datasets))
+                redis_instance.set("dataset_structure", json.dumps(self.datasets))
                 return self.datasets[name]
 
-            geno_query =    """
+            geno_query = """
                                 SELECT
                                     GenoFreeze.Id
                                 FROM
@@ -178,21 +175,23 @@ Publish or ProbeSet. E.g.
             results = g.db.execute(geno_query).fetchall()
             if len(results):
                 self.datasets[name] = "Geno"
-                Redis.set("dataset_structure", json.dumps(self.datasets))
+                self.redis_instance.set("dataset_structure", json.dumps(self.datasets))
                 return self.datasets[name]
 
-            #ZS: It shouldn't ever reach this
+            # ZS: It shouldn't ever reach this
             return None
         else:
             return self.datasets[name]
 
+
 # Do the intensive work at startup one time only
-Dataset_Getter = Dataset_Types()
+Dataset_Getter = DatasetType(r)
+
 
 def create_datasets_list():
     if USE_REDIS:
         key = "all_datasets"
-        result = Redis.get(key)
+        result = r.get(key)
 
         if result:
             logger.debug("Redis cache hit")
@@ -208,16 +207,16 @@ def create_datasets_list():
             for dataset_type in type_dict:
                 query = "SELECT Name FROM {}".format(type_dict[dataset_type])
                 for result in fetchall(query):
-                    #The query at the beginning of this function isn't
-                    #necessary here, but still would rather just reuse
-                    #it logger.debug("type: {}\tname:
-                    #{}".format(dataset_type, result.Name))
+                    # The query at the beginning of this function isn't
+                    # necessary here, but still would rather just reuse
+                    # it logger.debug("type: {}\tname:
+                    # {}".format(dataset_type, result.Name))
                     dataset = create_dataset(result.Name, dataset_type)
                     datasets.append(dataset)
 
         if USE_REDIS:
-            Redis.set(key, pickle.dumps(datasets, pickle.HIGHEST_PROTOCOL))
-            Redis.expire(key, 60*60)
+            r.set(key, pickle.dumps(datasets, pickle.HIGHEST_PROTOCOL))
+            r.expire(key, 60*60)
 
     return datasets
 
@@ -238,8 +237,9 @@ def mescape(*items):
 
 class Markers(object):
     """Todo: Build in cacheing so it saves us reading the same file more than once"""
+
     def __init__(self, name):
-        json_data_fh = open(locate(name + ".json",'genotype/json'))
+        json_data_fh = open(locate(name + ".json", 'genotype/json'))
 
         markers = []
         with open("%s/%s_snps.txt" % (flat_files('genotype/bimbam'), name), 'r') as bimbam_fh:
@@ -271,7 +271,7 @@ class Markers(object):
 
         if type(p_values) is list:
             # THIS IS only needed for the case when we are limiting the number of p-values calculated
-            #if len(self.markers) > len(p_values):
+            # if len(self.markers) > len(p_values):
             #    self.markers = self.markers[:len(p_values)]
 
             for marker, p_value in itertools.izip(self.markers, p_values):
@@ -283,7 +283,7 @@ class Markers(object):
                     marker['lrs_value'] = 0
                 else:
                     marker['lod_score'] = -math.log10(marker['p_value'])
-                    #Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
+                    # Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
                     marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
         elif type(p_values) is dict:
             filtered_markers = []
@@ -298,18 +298,20 @@ class Markers(object):
                         marker['lrs_value'] = 0
                     else:
                         marker['lod_score'] = -math.log10(marker['p_value'])
-                        #Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
-                        marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
+                        # Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
+                        marker['lrs_value'] = - \
+                            math.log10(marker['p_value']) * 4.61
                     filtered_markers.append(marker)
-                #else:
+                # else:
                     #logger.debug("marker {} NOT in p_values".format(i))
-                    #self.markers.remove(marker)
+                    # self.markers.remove(marker)
                     #del self.markers[i]
             self.markers = filtered_markers
 
+
 class HumanMarkers(Markers):
 
-    def __init__(self, name, specified_markers = []):
+    def __init__(self, name, specified_markers=[]):
         marker_data_fh = open(flat_files('mapping') + '/' + name + '.bim')
         self.markers = []
         for line in marker_data_fh:
@@ -332,7 +334,6 @@ class HumanMarkers(Markers):
 
         #logger.debug("markers is: ", pf(self.markers))
 
-
     def add_pvalues(self, p_values):
         super(HumanMarkers, self).add_pvalues(p_values)
 
@@ -345,12 +346,15 @@ class DatasetGroup(object):
     has multiple datasets associated with it.
 
     """
+
     def __init__(self, dataset, name=None):
         """This sets self.group and self.group_id"""
         if name == None:
-            self.name, self.id, self.genetic_type = fetchone(dataset.query_for_group)
+            self.name, self.id, self.genetic_type = fetchone(
+                dataset.query_for_group)
         else:
-            self.name, self.id, self.genetic_type = fetchone("SELECT InbredSet.Name, InbredSet.Id, InbredSet.GeneticType FROM InbredSet where Name='%s'" % name)
+            self.name, self.id, self.genetic_type = fetchone(
+                "SELECT InbredSet.Name, InbredSet.Id, InbredSet.GeneticType FROM InbredSet where Name='%s'" % name)
         if self.name == 'BXD300':
             self.name = "BXD"
 
@@ -369,7 +373,8 @@ class DatasetGroup(object):
 
     def get_mapping_methods(self):
 
-        mapping_id = g.db.execute("select MappingMethodId from InbredSet where Name= '%s'" % self.name).fetchone()[0]
+        mapping_id = g.db.execute(
+            "select MappingMethodId from InbredSet where Name= '%s'" % self.name).fetchone()[0]
         if mapping_id == "1":
             mapping_names = ["GEMMA", "QTLReaper", "R/qtl"]
         elif mapping_id == "2":
@@ -426,22 +431,23 @@ class DatasetGroup(object):
         result = None
         key = "samplelist:v3:" + self.name
         if USE_REDIS:
-            result = Redis.get(key)
+            result = r.get(key)
 
         if result is not None:
             self.samplelist = json.loads(result)
         else:
             logger.debug("Cache not hit")
 
-            genotype_fn = locate_ignore_error(self.name+".geno",'genotype')
+            genotype_fn = locate_ignore_error(self.name+".geno", 'genotype')
             if genotype_fn:
-                self.samplelist = get_group_samplelists.get_samplelist("geno", genotype_fn)
+                self.samplelist = get_group_samplelists.get_samplelist(
+                    "geno", genotype_fn)
             else:
                 self.samplelist = None
 
             if USE_REDIS:
-                Redis.set(key, json.dumps(self.samplelist))
-                Redis.expire(key, 60*5)
+                r.set(key, json.dumps(self.samplelist))
+                r.expire(key, 60*5)
 
     def all_samples_ordered(self):
         result = []
@@ -451,15 +457,16 @@ class DatasetGroup(object):
 
     def read_genotype_file(self, use_reaper=False):
         '''Read genotype from .geno file instead of database'''
-        #genotype_1 is Dataset Object without parents and f1
-        #genotype_2 is Dataset Object with parents and f1 (not for intercross)
+        # genotype_1 is Dataset Object without parents and f1
+        # genotype_2 is Dataset Object with parents and f1 (not for intercross)
 
         #genotype_1 = reaper.Dataset()
 
         # reaper barfs on unicode filenames, so here we ensure it's a string
         if self.genofile:
-            if "RData" in self.genofile: #ZS: This is a temporary fix; I need to change the way the JSON files that point to multiple genotype files are structured to point to other file types like RData
-                full_filename = str(locate(self.genofile.split(".")[0] + ".geno", 'genotype'))
+            if "RData" in self.genofile:  # ZS: This is a temporary fix; I need to change the way the JSON files that point to multiple genotype files are structured to point to other file types like RData
+                full_filename = str(
+                    locate(self.genofile.split(".")[0] + ".geno", 'genotype'))
             else:
                 full_filename = str(locate(self.genofile, 'genotype'))
         else:
@@ -472,11 +479,12 @@ class DatasetGroup(object):
             genotype_1 = gen_geno_ob.genotype(full_filename)
 
         if genotype_1.type == "group" and self.parlist:
-            genotype_2 = genotype_1.add(Mat=self.parlist[0], Pat=self.parlist[1])       #, F1=_f1)
+            genotype_2 = genotype_1.add(
+                Mat=self.parlist[0], Pat=self.parlist[1])  # , F1=_f1)
         else:
             genotype_2 = genotype_1
 
-        #determine default genotype object
+        # determine default genotype object
         if self.incparentsf1 and genotype_1.type != "intercross":
             genotype = genotype_2
         else:
@@ -487,7 +495,8 @@ class DatasetGroup(object):
 
         return genotype
 
-def datasets(group_name, this_group = None):
+
+def datasets(group_name, this_group=None):
     key = "group_dataset_menu:v2:" + group_name
     dataset_menu = []
     the_results = fetchall('''
@@ -510,12 +519,13 @@ def datasets(group_name, this_group = None):
             and InbredSet.Name like %s
           ORDER BY Tissue.Name, ProbeSetFreeze.OrderList DESC)
         ''' % (group_name,
-              group_name,
-              "'" + group_name + "'"))
+               group_name,
+               "'" + group_name + "'"))
 
     sorted_results = sorted(the_results, key=lambda kv: kv[0])
 
-    pheno_inserted = False #ZS: This is kind of awkward, but need to ensure Phenotypes show up before Genotypes in dropdown
+    # ZS: This is kind of awkward, but need to ensure Phenotypes show up before Genotypes in dropdown
+    pheno_inserted = False
     geno_inserted = False
     for dataset_item in sorted_results:
         tissue_name = dataset_item[0]
@@ -523,13 +533,16 @@ def datasets(group_name, this_group = None):
         dataset_short = dataset_item[2]
         if tissue_name in ['#PublishFreeze', '#GenoFreeze']:
             if tissue_name == '#PublishFreeze' and (dataset_short == group_name + 'Publish'):
-                dataset_menu.insert(0, dict(tissue=None, datasets=[(dataset, dataset_short)]))
+                dataset_menu.insert(
+                    0, dict(tissue=None, datasets=[(dataset, dataset_short)]))
                 pheno_inserted = True
             elif pheno_inserted and tissue_name == '#GenoFreeze':
-                dataset_menu.insert(1, dict(tissue=None, datasets=[(dataset, dataset_short)]))
+                dataset_menu.insert(
+                    1, dict(tissue=None, datasets=[(dataset, dataset_short)]))
                 geno_inserted = True
             else:
-                dataset_menu.append(dict(tissue=None, datasets=[(dataset, dataset_short)]))
+                dataset_menu.append(
+                    dict(tissue=None, datasets=[(dataset, dataset_short)]))
         else:
             tissue_already_exists = False
             for i, tissue_dict in enumerate(dataset_menu):
@@ -542,11 +555,11 @@ def datasets(group_name, this_group = None):
                 dataset_menu[i]['datasets'].append((dataset, dataset_short))
             else:
                 dataset_menu.append(dict(tissue=tissue_name,
-                                    datasets=[(dataset, dataset_short)]))
+                                         datasets=[(dataset, dataset_short)]))
 
     if USE_REDIS:
-        Redis.set(key, pickle.dumps(dataset_menu, pickle.HIGHEST_PROTOCOL))
-        Redis.expire(key, 60*5)
+        r.set(key, pickle.dumps(dataset_menu, pickle.HIGHEST_PROTOCOL))
+        r.expire(key, 60*5)
 
     if this_group != None:
         this_group._datasets = dataset_menu
@@ -554,6 +567,7 @@ def datasets(group_name, this_group = None):
     else:
         return dataset_menu
 
+
 class DataSet(object):
     """
     DataSet class defines a dataset in webqtl, can be either Microarray,
@@ -561,7 +575,7 @@ class DataSet(object):
 
     """
 
-    def __init__(self, name, get_samplelist = True, group_name = None):
+    def __init__(self, name, get_samplelist=True, group_name=None):
 
         assert name, "Need a name"
         self.name = name
@@ -569,22 +583,23 @@ class DataSet(object):
         self.shortname = None
         self.fullname = None
         self.type = None
-        self.data_scale = None #ZS: For example log2
+        self.data_scale = None  # ZS: For example log2
 
         self.setup()
 
-        if self.type == "Temp": #Need to supply group name as input if temp trait
-            self.group = DatasetGroup(self, name=group_name)   # sets self.group and self.group_id and gets genotype
+        if self.type == "Temp":  # Need to supply group name as input if temp trait
+            # sets self.group and self.group_id and gets genotype
+            self.group = DatasetGroup(self, name=group_name)
         else:
             self.check_confidentiality()
             self.retrieve_other_names()
-            self.group = DatasetGroup(self)   # sets self.group and self.group_id and gets genotype
+            # sets self.group and self.group_id and gets genotype
+            self.group = DatasetGroup(self)
             self.accession_id = self.get_accession_id()
         if get_samplelist == True:
-             self.group.get_samplelist()
+            self.group.get_samplelist()
         self.species = species.TheSpecies(self)
 
-
     def get_desc(self):
         """Gets overridden later, at least for Temp...used by trait's get_given_name"""
         return None
@@ -644,8 +659,9 @@ class DataSet(object):
     WHERE ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id
     AND ProbeFreeze.TissueId = Tissue.Id
     AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFreeze.ShortName = '%s')
-                """ % (query_args),"/dataset/"+self.name+".json",
-            lambda r: (r["id"],r["name"],r["full_name"],r["short_name"],r["data_scale"],r["tissue"])
+                """ % (query_args), "/dataset/"+self.name+".json",
+                    lambda r: (r["id"], r["name"], r["full_name"],
+                               r["short_name"], r["data_scale"], r["tissue"])
                 )
             else:
                 query_args = tuple(escape(x) for x in (
@@ -662,7 +678,8 @@ class DataSet(object):
                     """ % (query_args))
 
         except TypeError:
-            logger.debug("Dataset {} is not yet available in GeneNetwork.".format(self.name))
+            logger.debug(
+                "Dataset {} is not yet available in GeneNetwork.".format(self.name))
             pass
 
     def get_trait_data(self, sample_list=None):
@@ -720,7 +737,7 @@ class DataSet(object):
                         and {}.Id = {}XRef.{}Id
                         order by {}.Id
                         """.format(*mescape(self.type, self.type, self.type, self.name,
-                                    dataset_type, self.type, dataset_type, dataset_type))
+                                            dataset_type, self.type, dataset_type, dataset_type))
             else:
                 query += """
                         WHERE {}XRef.{}FreezeId = {}Freeze.Id
@@ -728,7 +745,7 @@ class DataSet(object):
                         and {}.Id = {}XRef.{}Id
                         order by {}.Id
                         """.format(*mescape(self.type, self.type, self.type, self.type,
-                                   self.name, dataset_type, self.type, self.type, dataset_type))
+                                            self.name, dataset_type, self.type, self.type, dataset_type))
 
             #logger.debug("trait data query: ", query)
 
@@ -748,6 +765,7 @@ class DataSet(object):
                 self.trait_data[trait_name] += (
                     trait_sample_data[chunk_counter][trait_counter][data_start_pos:])
 
+
 class PhenotypeDataSet(DataSet):
     DS_NAME_MAP['Publish'] = 'PhenotypeDataSet'
 
@@ -757,16 +775,16 @@ class PhenotypeDataSet(DataSet):
 
         # Fields in the database table
         self.search_fields = ['Phenotype.Post_publication_description',
-                            'Phenotype.Pre_publication_description',
-                            'Phenotype.Pre_publication_abbreviation',
-                            'Phenotype.Post_publication_abbreviation',
-                            'PublishXRef.mean',
-                            'Phenotype.Lab_code',
-                            'Publication.PubMed_ID',
-                            'Publication.Abstract',
-                            'Publication.Title',
-                            'Publication.Authors',
-                            'PublishXRef.Id']
+                              'Phenotype.Pre_publication_description',
+                              'Phenotype.Pre_publication_abbreviation',
+                              'Phenotype.Post_publication_abbreviation',
+                              'PublishXRef.mean',
+                              'Phenotype.Lab_code',
+                              'Publication.PubMed_ID',
+                              'Publication.Abstract',
+                              'Publication.Title',
+                              'Publication.Authors',
+                              'PublishXRef.Id']
 
         # Figure out what display_fields is
         self.display_fields = ['name', 'group_code',
@@ -788,13 +806,13 @@ class PhenotypeDataSet(DataSet):
 
         # Fields displayed in the search results table header
         self.header_fields = ['Index',
-                            'Record',
-                            'Description',
-                            'Authors',
-                            'Year',
-                            'Max LRS',
-                            'Max LRS Location',
-                            'Additive Effect']
+                              'Record',
+                              'Description',
+                              'Authors',
+                              'Year',
+                              'Max LRS',
+                              'Max LRS Location',
+                              'Additive Effect']
 
         self.type = 'Publish'
 
@@ -812,7 +830,7 @@ class PhenotypeDataSet(DataSet):
         # (Urgently?) Need to write this
         pass
 
-    def get_trait_info(self, trait_list, species = ''):
+    def get_trait_info(self, trait_list, species=''):
         for this_trait in trait_list:
 
             if not this_trait.haveinfo:
@@ -820,9 +838,9 @@ class PhenotypeDataSet(DataSet):
 
             description = this_trait.post_publication_description
 
-            #If the dataset is confidential and the user has access to confidential
-            #phenotype traits, then display the pre-publication description instead
-            #of the post-publication description
+            # If the dataset is confidential and the user has access to confidential
+            # phenotype traits, then display the pre-publication description instead
+            # of the post-publication description
             if this_trait.confidential:
                 this_trait.description_display = ""
                 continue   # for now, because no authorization features
@@ -847,7 +865,7 @@ class PhenotypeDataSet(DataSet):
             if this_trait.pubmed_id:
                 this_trait.pubmed_link = webqtlConfig.PUBMEDLINK_URL % this_trait.pubmed_id
 
-            #LRS and its location
+            # LRS and its location
             this_trait.LRS_score_repr = "N/A"
             this_trait.LRS_location_repr = "N/A"
 
@@ -867,7 +885,8 @@ class PhenotypeDataSet(DataSet):
                         LRS_Mb = result[1]
 
                         this_trait.LRS_score_repr = LRS_score_repr = '%3.1f' % this_trait.lrs
-                        this_trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (LRS_Chr, float(LRS_Mb))
+                        this_trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (
+                            LRS_Chr, float(LRS_Mb))
 
     def retrieve_sample_data(self, trait):
         query = """
@@ -934,12 +953,13 @@ class GenotypeDataSet(DataSet):
                 this_trait.retrieveInfo()
 
             if this_trait.chr and this_trait.mb:
-                this_trait.location_repr = 'Chr%s: %.6f' % (this_trait.chr, float(this_trait.mb) )
+                this_trait.location_repr = 'Chr%s: %.6f' % (
+                    this_trait.chr, float(this_trait.mb))
 
     def retrieve_sample_data(self, trait):
         query = """
                     SELECT
-                            Strain.Name, GenoData.value, GenoSE.error, GenoData.Id, Strain.Name2
+                            Strain.Name, GenoData.value, GenoSE.error, "N/A", Strain.Name2
                     FROM
                             (GenoData, GenoFreeze, Strain, Geno, GenoXRef)
                     left join GenoSE on
@@ -1003,14 +1023,14 @@ class MrnaAssayDataSet(DataSet):
 
         # Fields displayed in the search results table header
         self.header_fields = ['Index',
-                             'Record',
-                             'Symbol',
-                             'Description',
-                             'Location',
-                             'Mean',
-                             'Max LRS',
-                             'Max LRS Location',
-                             'Additive Effect']
+                              'Record',
+                              'Symbol',
+                              'Description',
+                              'Location',
+                              'Mean',
+                              'Max LRS',
+                              'Max LRS Location',
+                              'Additive Effect']
 
         # Todo: Obsolete or rename this field
         self.type = 'ProbeSet'
@@ -1026,7 +1046,6 @@ class MrnaAssayDataSet(DataSet):
                                 ProbeSetFreeze.Name = "%s"
                 ''' % escape(self.name)
 
-
     def check_confidentiality(self):
         return geno_mrna_confidentiality(self)
 
@@ -1044,10 +1063,12 @@ class MrnaAssayDataSet(DataSet):
             if not this_trait.symbol:
                 this_trait.symbol = "N/A"
 
-            #XZ, 12/08/2008: description
-            #XZ, 06/05/2009: Rob asked to add probe target description
-            description_string = unicode(str(this_trait.description).strip(codecs.BOM_UTF8), 'utf-8')
-            target_string = unicode(str(this_trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
+            # XZ, 12/08/2008: description
+            # XZ, 06/05/2009: Rob asked to add probe target description
+            description_string = unicode(
+                str(this_trait.description).strip(codecs.BOM_UTF8), 'utf-8')
+            target_string = unicode(
+                str(this_trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
 
             if len(description_string) > 1 and description_string != 'None':
                 description_display = description_string
@@ -1062,11 +1083,12 @@ class MrnaAssayDataSet(DataSet):
             this_trait.description_display = description_display
 
             if this_trait.chr and this_trait.mb:
-                this_trait.location_repr = 'Chr%s: %.6f' % (this_trait.chr, float(this_trait.mb))
+                this_trait.location_repr = 'Chr%s: %.6f' % (
+                    this_trait.chr, float(this_trait.mb))
 
-            #Get mean expression value
+            # Get mean expression value
             query = (
-            """select ProbeSetXRef.mean from ProbeSetXRef, ProbeSet
+                """select ProbeSetXRef.mean from ProbeSetXRef, ProbeSet
                 where ProbeSetXRef.ProbeSetFreezeId = %s and
                 ProbeSet.Id = ProbeSetXRef.ProbeSetId and
                 ProbeSet.Name = '%s'
@@ -1082,11 +1104,11 @@ class MrnaAssayDataSet(DataSet):
             if mean:
                 this_trait.mean = "%2.3f" % mean
 
-            #LRS and its location
+            # LRS and its location
             this_trait.LRS_score_repr = 'N/A'
             this_trait.LRS_location_repr = 'N/A'
 
-            #Max LRS and its Locus location
+            # Max LRS and its Locus location
             if this_trait.lrs and this_trait.locus:
                 query = """
                     select Geno.Chr, Geno.Mb from Geno, Species
@@ -1100,18 +1122,22 @@ class MrnaAssayDataSet(DataSet):
                 if result:
                     lrs_chr, lrs_mb = result
                     this_trait.LRS_score_repr = '%3.1f' % this_trait.lrs
-                    this_trait.LRS_location_repr = 'Chr%s: %.6f' % (lrs_chr, float(lrs_mb))
+                    this_trait.LRS_location_repr = 'Chr%s: %.6f' % (
+                        lrs_chr, float(lrs_mb))
 
         return trait_list
 
     def retrieve_sample_data(self, trait):
         query = """
                     SELECT
-                            Strain.Name, ProbeSetData.value, ProbeSetSE.error, ProbeSetData.Id, Strain.Name2
+                            Strain.Name, ProbeSetData.value, ProbeSetSE.error, NStrain.count, Strain.Name2
                     FROM
                             (ProbeSetData, ProbeSetFreeze, Strain, ProbeSet, ProbeSetXRef)
                     left join ProbeSetSE on
                             (ProbeSetSE.DataId = ProbeSetData.Id AND ProbeSetSE.StrainId = ProbeSetData.StrainId)
+                    left join NStrain on
+                            (NStrain.DataId = ProbeSetData.Id AND
+                            NStrain.StrainId = ProbeSetData.StrainId)
                     WHERE
                             ProbeSet.Name = '%s' AND ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
                             ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
@@ -1161,7 +1187,6 @@ class TempDataSet(DataSet):
         self.fullname = 'Temporary Storage'
         self.shortname = 'Temp'
 
-
     @staticmethod
     def handle_pca(desc):
         if 'PCA' in desc:
@@ -1202,7 +1227,7 @@ def geno_mrna_confidentiality(ob):
     #logger.debug("dataset_table [%s]: %s" % (type(dataset_table), dataset_table))
 
     query = '''SELECT Id, Name, FullName, confidentiality,
-                        AuthorisedUsers FROM %s WHERE Name = "%s"''' % (dataset_table,ob.name)
+                        AuthorisedUsers FROM %s WHERE Name = "%s"''' % (dataset_table, ob.name)
     logger.sql(query)
     result = g.db.execute(query)
 
diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py
index 8e11c11d..7666348e 100644
--- a/wqflask/base/trait.py
+++ b/wqflask/base/trait.py
@@ -152,7 +152,7 @@ class GeneralTrait(object):
         '''Return a text formatted alias'''
 
         alias = 'Not available'
-        if self.alias:
+        if getattr(self, "alias", None):
             alias = string.replace(self.alias, ";", " ")
             alias = string.join(string.split(alias), ", ")
 
@@ -395,20 +395,24 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
             query = """
                     SELECT
                             PublishXRef.Id, InbredSet.InbredSetCode, Publication.PubMed_ID,
-                            Phenotype.Pre_publication_description, Phenotype.Post_publication_description, Phenotype.Original_description,
-                            Phenotype.Pre_publication_abbreviation, Phenotype.Post_publication_abbreviation, PublishXRef.mean,
+                            CAST(Phenotype.Pre_publication_description AS BINARY),
+                            CAST(Phenotype.Post_publication_description AS BINARY),
+                            CAST(Phenotype.Original_description AS BINARY),
+                            CAST(Phenotype.Pre_publication_abbreviation AS BINARY),
+                            CAST(Phenotype.Post_publication_abbreviation AS BINARY), PublishXRef.mean,
                             Phenotype.Lab_code, Phenotype.Submitter, Phenotype.Owner, Phenotype.Authorized_Users,
-                            Publication.Authors, Publication.Title, Publication.Abstract,
-                            Publication.Journal, Publication.Volume, Publication.Pages,
+                            CAST(Publication.Authors AS BINARY), CAST(Publication.Title AS BINARY), CAST(Publication.Abstract AS BINARY),
+                            CAST(Publication.Journal AS BINARY), Publication.Volume, Publication.Pages,
                             Publication.Month, Publication.Year, PublishXRef.Sequence,
                             Phenotype.Units, PublishXRef.comments
                     FROM
-                            PublishXRef, Publication, Phenotype, PublishFreeze
+                            PublishXRef, Publication, Phenotype, PublishFreeze, InbredSet
                     WHERE
                             PublishXRef.Id = %s AND
                             Phenotype.Id = PublishXRef.PhenotypeId AND
                             Publication.Id = PublishXRef.PublicationId AND
                             PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
+                            PublishXRef.InbredSetId = InbredSet.Id AND
                             PublishFreeze.Id = %s
                     """ % (trait.name, dataset.id)
 
@@ -462,9 +466,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
         trait.haveinfo = True
         for i, field in enumerate(dataset.display_fields):
             holder =  trait_info[i]
-            # if isinstance(trait_info[i], basestring):
-            #     holder = unicode(holder.strip(codecs.BOM_UTF8), 'utf-8', "ignore")
-
             setattr(trait, field, holder)
 
         if dataset.type == 'Publish':
diff --git a/wqflask/cfg/default_settings.py b/wqflask/cfg/default_settings.py
deleted file mode 100644
index 5af61d5a..00000000
--- a/wqflask/cfg/default_settings.py
+++ /dev/null
@@ -1 +0,0 @@
-# no longer in use
diff --git a/wqflask/mock/es_double.py b/wqflask/mock/es_double.py
deleted file mode 100644
index 6ef8a1b9..00000000
--- a/wqflask/mock/es_double.py
+++ /dev/null
@@ -1,15 +0,0 @@
-class ESDouble(object):
-    def __init__(self):
-        self.items = {}
-
-    def ping(self):
-        return true
-
-    def create(self, index, doc_type, body, id):
-        self.items["index"] = {doc_type: {"id": id, "_source": data}}
-
-    def search(self, index, doc_type, body):
-        return {
-            "hits": {
-                "hits": self.items[index][doc_type][body]
-            }}
diff --git a/wqflask/tests/__init__.py b/wqflask/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wqflask/tests/__init__.py
diff --git a/wqflask/tests/base/__init__.py b/wqflask/tests/base/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wqflask/tests/base/__init__.py
diff --git a/wqflask/tests/base/test_data_set.py b/wqflask/tests/base/test_data_set.py
new file mode 100644
index 00000000..44a54c7e
--- /dev/null
+++ b/wqflask/tests/base/test_data_set.py
@@ -0,0 +1,35 @@
+import unittest
+import mock
+
+from wqflask import app
+
+from base.data_set import DatasetType
+
+    
+class TestDataSetTypes(unittest.TestCase):
+    def setUp(self):
+        self.app_context = app.app_context()
+        self.app_context.push()
+
+    def tearDown(self):
+        self.app_context.pop()
+
+    @mock.patch('base.data_set.g')
+    def test_data_set_type(self, db_mock):
+        with app.app_context():
+            db_mock.get = mock.Mock()
+            r = mock.Mock()
+            r.get.return_value = """
+            {
+                "AD-cases-controls-MyersGeno": "Geno",
+                "AD-cases-controls-MyersPublish": "Publish",
+                "AKXDGeno": "Geno",
+                "AXBXAGeno": "Geno",
+                "AXBXAPublish": "Publish",
+                "Aging-Brain-UCIPublish": "Publish",
+                "All Phenotypes": "Publish",
+                "B139_K_1206_M": "ProbeSet",
+                "B139_K_1206_R": "ProbeSet"
+            }
+            """
+            self.assertEqual(DatasetType(r)("All Phenotypes"), "Publish")
diff --git a/wqflask/tests/base/test_general_object.py b/wqflask/tests/base/test_general_object.py
new file mode 100644
index 00000000..c7701021
--- /dev/null
+++ b/wqflask/tests/base/test_general_object.py
@@ -0,0 +1,41 @@
+import unittest
+
+from base.GeneralObject import GeneralObject
+
+
+class TestGeneralObjectTests(unittest.TestCase):
+    """
+    Test the GeneralObject base class
+    """
+
+    def test_object_contents(self):
+        """Test whether base contents are stored properly"""
+        test_obj = GeneralObject("a", "b", "c")
+        self.assertEqual("abc", ''.join(test_obj.contents))
+        self.assertEqual(len(test_obj), 0)
+
+    def test_object_dict(self):
+        """Test whether the base class is printed properly"""
+        test_obj = GeneralObject("a", name="test", value=1)
+        self.assertEqual(str(test_obj), "value = 1\nname = test\n")
+        self.assertEqual(
+            repr(test_obj), "value = 1\nname = test\ncontents = ['a']\n")
+        self.assertEqual(len(test_obj), 2)
+        self.assertEqual(test_obj["value"], 1)
+        test_obj["test"] = 1
+        self.assertEqual(test_obj["test"], 1)
+
+    def test_get_attribute(self):
+        "Test that getattr works"
+        test_obj = GeneralObject("a", name="test", value=1)
+        self.assertEqual(getattr(test_obj, "value", None), 1)
+        self.assertEqual(getattr(test_obj, "non-existent", None), None)
+
+    def test_object_comparisons(self):
+        "Test that 2 objects of the same length are equal"
+        test_obj1 = GeneralObject("a", name="test", value=1)
+        test_obj2 = GeneralObject("b", name="test2", value=2)
+        test_obj3 = GeneralObject("a", name="test", x=1, y=2)
+        self.assertTrue(test_obj1 == test_obj2 )
+        self.assertFalse(test_obj1 == test_obj3 )
+
diff --git a/wqflask/tests/utility/__init__.py b/wqflask/tests/utility/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wqflask/tests/utility/__init__.py
diff --git a/wqflask/tests/utility/test_chunks.py b/wqflask/tests/utility/test_chunks.py
new file mode 100644
index 00000000..8d90a1ec
--- /dev/null
+++ b/wqflask/tests/utility/test_chunks.py
@@ -0,0 +1,19 @@
+"""Test chunking"""
+
+import unittest
+
+from utility.chunks import divide_into_chunks
+
+
+class TestChunks(unittest.TestCase):
+    "Test Utility method for chunking"
+    def test_divide_into_chunks(self):
+        "Check that a list is chunked correctly"
+        self.assertEqual(divide_into_chunks([1, 2, 7, 3, 22, 8, 5, 22, 333], 3),
+                         [[1, 2, 7], [3, 22, 8], [5, 22, 333]])
+        self.assertEqual(divide_into_chunks([1, 2, 7, 3, 22, 8, 5, 22, 333], 4),
+                         [[1, 2, 7], [3, 22, 8], [5, 22, 333]])
+        self.assertEqual(divide_into_chunks([1, 2, 7, 3, 22, 8, 5, 22, 333], 5),
+                         [[1, 2], [7, 3], [22, 8], [5, 22], [333]])
+        self.assertEqual(divide_into_chunks([], 5),
+                         [[]])
diff --git a/wqflask/tests/utility/test_corestats.py b/wqflask/tests/utility/test_corestats.py
new file mode 100644
index 00000000..cf91a248
--- /dev/null
+++ b/wqflask/tests/utility/test_corestats.py
@@ -0,0 +1,55 @@
+"""Test Core Stats"""
+
+import unittest
+
+from utility.corestats import Stats
+
+
+class TestChunks(unittest.TestCase):
+    "Test Utility method for chunking"
+
+    def setUp(self):
+        self.stat_test = Stats((x for x in range(1, 11)))
+
+    def test_stats_sum(self):
+        """ Test sequence sum """
+        self.assertEqual(self.stat_test.sum(), 55)
+        self.stat_test = Stats([])
+        self.assertEqual(self.stat_test.sum(), None)
+
+    def test_stats_count(self):
+        """ Test sequence count """
+        self.assertEqual(self.stat_test.count(), 10)
+        self.stat_test = Stats([])
+        self.assertEqual(self.stat_test.count(), 0)
+
+    def test_stats_min(self):
+        """ Test min value in sequence"""
+        self.assertEqual(self.stat_test.min(), 1)
+        self.stat_test = Stats([])
+        self.assertEqual(self.stat_test.min(), None)
+
+    def test_stats_max(self):
+        """ Test max value in sequence """
+        self.assertEqual(self.stat_test.max(), 10)
+        self.stat_test = Stats([])
+        self.assertEqual(self.stat_test.max(), None)
+
+    def test_stats_avg(self):
+        """ Test avg of sequence """
+        self.assertEqual(self.stat_test.avg(), 5.5)
+        self.stat_test = Stats([])
+        self.assertEqual(self.stat_test.avg(), None)
+
+    def test_stats_stdev(self):
+        """ Test standard deviation of sequence """
+        self.assertEqual(self.stat_test.stdev(), 3.0276503540974917)
+        self.stat_test = Stats([])
+        self.assertEqual(self.stat_test.stdev(), None)
+
+    def test_stats_percentile(self):
+        """ Test percentile of sequence """
+        self.assertEqual(self.stat_test.percentile(20), 3.0)
+        self.assertEqual(self.stat_test.percentile(101), None)
+        self.stat_test = Stats([])
+        self.assertEqual(self.stat_test.percentile(20), None)
diff --git a/wqflask/tests/utility/test_corr_result_helpers.py b/wqflask/tests/utility/test_corr_result_helpers.py
new file mode 100644
index 00000000..e196fbdf
--- /dev/null
+++ b/wqflask/tests/utility/test_corr_result_helpers.py
@@ -0,0 +1,32 @@
+""" Test correlation helper methods """
+
+import unittest
+from utility.corr_result_helpers import normalize_values, common_keys, normalize_values_with_samples
+
+
+class TestCorrelationHelpers(unittest.TestCase):
+    """Test methods for normalising lists"""
+
+    def test_normalize_values(self):
+        """Test that a list is normalised correctly"""
+        self.assertEqual(
+            normalize_values([2.3, None, None, 3.2, 4.1, 5], [
+                             3.4, 7.2, 1.3, None, 6.2, 4.1]),
+            ([2.3, 4.1, 5], [3.4, 6.2, 4.1], 3)
+        )
+
+    def test_common_keys(self):
+        """Test that common keys are returned as a list"""
+        a = dict(BXD1=9.113, BXD2=9.825, BXD14=8.985, BXD15=9.300)
+        b = dict(BXD1=9.723, BXD3=9.825, BXD14=9.124, BXD16=9.300)
+        self.assertEqual(sorted(common_keys(a, b)), ['BXD1', 'BXD14'])
+
+    def test_normalize_values_with_samples(self):
+        """Test that a sample(dict) is normalised correctly"""
+        self.assertEqual(
+            normalize_values_with_samples(
+                dict(BXD1=9.113, BXD2=9.825, BXD14=8.985,
+                     BXD15=9.300, BXD20=9.300),
+                dict(BXD1=9.723, BXD3=9.825, BXD14=9.124, BXD16=9.300)),
+            (({'BXD1': 9.113, 'BXD14': 8.985}, {'BXD1': 9.723, 'BXD14': 9.124}, 2))
+        )
diff --git a/wqflask/tests/utility/test_formatting.py b/wqflask/tests/utility/test_formatting.py
new file mode 100644
index 00000000..9d3033d1
--- /dev/null
+++ b/wqflask/tests/utility/test_formatting.py
@@ -0,0 +1,33 @@
+import unittest
+from utility.formatting import numify, commify
+
+
+class TestFormatting(unittest.TestCase):
+    """Test formatting numbers by numifying or commifying"""
+
+    def test_numify(self):
+        "Test that a number is correctly converted to a English readable string"
+        self.assertEqual(numify(1, 'item', 'items'),
+                         'one item')
+        self.assertEqual(numify(2, 'book'), 'two')
+        self.assertEqual(numify(2, 'book', 'books'), 'two books')
+        self.assertEqual(numify(0, 'book', 'books'), 'zero books')
+        self.assertEqual(numify(0), 'zero')
+        self.assertEqual(numify(5), 'five')
+        self.assertEqual(numify(14, 'book', 'books'), '14 books')
+        self.assertEqual(numify(999, 'book', 'books'), '999 books')
+        self.assertEqual(numify(1000000, 'book', 'books'), '1,000,000 books')
+        self.assertEqual(numify(1956), '1956')
+
+    def test_commify(self):
+        "Test that commas are added correctly"
+        self.assertEqual(commify(1), '1')
+        self.assertEqual(commify(123), '123')
+        self.assertEqual(commify(1234), '1234')
+        self.assertEqual(commify(12345), '12,345')
+        self.assertEqual(commify(1234567890), '1,234,567,890')
+        self.assertEqual(commify(123.0), '123.0')
+        self.assertEqual(commify(1234.5), '1234.5')
+        self.assertEqual(commify(1234.56789), '1234.56789')
+        self.assertEqual(commify(123456.789), '123,456.789')
+        self.assertEqual(commify(None), None)
diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py
index cce8435d..9bc84d22 100644
--- a/wqflask/utility/Plot.py
+++ b/wqflask/utility/Plot.py
@@ -35,8 +35,6 @@ import sys, os
 from numarray import linear_algebra as la
 from numarray import ones, array, dot, swapaxes
 
-import reaper
-
 import webqtlUtil
 import corestats
 from base import webqtlConfig
diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py
index f9028f32..ed7462d1 100644
--- a/wqflask/utility/authentication_tools.py
+++ b/wqflask/utility/authentication_tools.py
@@ -6,7 +6,7 @@ import requests
 from base import data_set, webqtlConfig
 
 from utility import hmac
-from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id
+from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id, add_resource
 Redis = get_redis_conn()
 
 from flask import Flask, g, redirect, url_for
@@ -16,13 +16,7 @@ logger = logging.getLogger(__name__ )
 
 def check_resource_availability(dataset, trait_id=None):
 
-    #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy
-    if g.user_session.user_id in Redis.smembers("super_users"):
-       return webqtlConfig.SUPER_PRIVILEGES
-
-    response = None
-
-    #At least for now assume temporary entered traits are accessible#At least for now assume temporary entered traits are accessible
+    #At least for now assume temporary entered traits are accessible
     if type(dataset) == str:
         return webqtlConfig.DEFAULT_PRIVILEGES
     if dataset.type == "Temp":
@@ -33,9 +27,13 @@ def check_resource_availability(dataset, trait_id=None):
     if resource_id:
         resource_info = get_resource_info(resource_id)
         if not resource_info:
-            return webqtlConfig.DEFAULT_PRIVILEGES
-    else:
-        return response #ZS: Need to substitute in something that creates the resource in Redis later
+            resource_info = add_new_resource(dataset, trait_id)
+
+    #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy
+    if g.user_session.user_id in Redis.smembers("super_users"):
+       return webqtlConfig.SUPER_PRIVILEGES
+
+    response = None
 
     the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id)
     try:
@@ -43,10 +41,43 @@ def check_resource_availability(dataset, trait_id=None):
     except:
         response = resource_info['default_mask']
 
-    if response:
-        return response
-    else: #ZS: No idea how this would happen, but just in case
-        return False
+    return response
+
+def add_new_resource(dataset, trait_id=None):
+    resource_ob = {
+        'owner_id'    : webqtlConfig.DEFAULT_OWNER_ID,
+        'default_mask': webqtlConfig.DEFAULT_PRIVILEGES,
+        'group_masks' : {}
+    }
+
+    if dataset.type == "Publish":
+        resource_ob['name'] = get_group_code(dataset) + "_" + str(trait_id)
+        resource_ob['data'] = {
+            'dataset': dataset.id,
+            'trait'  : trait_id
+        }
+        resource_ob['type'] = 'dataset-publish'
+    elif dataset.type == "Geno":
+        resource_ob['name'] = dataset.name
+        resource_ob['data'] = {
+            'dataset': dataset.id
+        }
+        resource_ob['type'] = 'dataset-geno'
+    else:
+        resource_ob['name'] = dataset.name
+        resource_ob['data'] = {
+            'dataset': dataset.id
+        }
+        resource_ob['type'] = 'dataset-probeset'
+
+    resource_info = add_resource(resource_ob, update=False)
+
+    return resource_info
+
+def get_group_code(dataset):
+    results = g.db.execute("SELECT InbredSetCode from InbredSet where Name='{}'".format(dataset.group.name)).fetchone()
+
+    return results[0]
 
 def check_admin(resource_id=None):
     the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id)
diff --git a/wqflask/utility/chunks.py b/wqflask/utility/chunks.py
index b0e33c08..d91b9bf4 100644
--- a/wqflask/utility/chunks.py
+++ b/wqflask/utility/chunks.py
@@ -31,66 +31,3 @@ def divide_into_chunks(the_list, number_chunks):
         chunks.append(the_list[counter:counter+chunksize])
 
     return chunks
-
-def _confirm_chunk(original, result):
-    all_chunked = []
-    for chunk in result:
-        all_chunked.extend(chunk)
-    print("length of all chunked:", len(all_chunked))
-    assert original == all_chunked, "You didn't chunk right"
-
-
-def _chunk_test(divide_func):
-    import random
-    random.seed(7)
-
-    number_exact = 0
-    total_amount_off = 0
-
-    for test in range(1, 1001):
-        print("\n\ntest:", test)
-        number_chunks = random.randint(1, 20)
-        number_elements = random.randint(0, 100)
-        the_list = list(range(1, number_elements))
-        result = divide_func(the_list, number_chunks)
-
-        print("Dividing list of length {} into approximately {} chunks - got {} chunks".format(
-            len(the_list), number_chunks, len(result)))
-        print("result:", result)
-
-        _confirm_chunk(the_list, result)
-
-        amount_off = abs(number_chunks - len(result))
-        if amount_off == 0:
-            number_exact += 1
-        else:
-            total_amount_off += amount_off
-
-
-        print("\n{} exact out of {}    [Total amount off: {}]".format(number_exact,
-                                                                      test,
-                                                                      total_amount_off))
-    assert number_exact == 558
-    assert total_amount_off == 1580
-    return number_exact, total_amount_off
-
-
-def _main():
-    info = dict()
-    #funcs = (("sam", sam_divide_into_chunks), ("zach", zach_divide_into_chunks))
-    funcs = (("only one", divide_into_chunks),)
-    for name, func in funcs:
-        start = time.time()
-        number_exact, total_amount_off = _chunk_test(func)
-        took = time.time() - start
-        info[name] = dict(number_exact=number_exact,
-                          total_amount_off=total_amount_off,
-                          took=took)
-
-    print("info is:", info)
-
-if __name__ == '__main__':
-    _main()
-    print("\nConfirming doctests...")
-    import doctest
-    doctest.testmod()
diff --git a/wqflask/utility/corr_result_helpers.py b/wqflask/utility/corr_result_helpers.py
index b543c589..ea3ababf 100644
--- a/wqflask/utility/corr_result_helpers.py
+++ b/wqflask/utility/corr_result_helpers.py
@@ -14,15 +14,11 @@ def normalize_values(a_values, b_values):
     min_length = min(len(a_values), len(b_values))
     a_new = []
     b_new = []
-    for counter in range(min_length):
-        if (a_values[counter] or a_values[counter] == 0) and (b_values[counter] or b_values[counter] == 0):
-            a_new.append(a_values[counter])
-            b_new.append(b_values[counter])
-
-    num_overlap = len(a_new)
-    assert num_overlap == len(b_new), "Lengths should be the same"
-
-    return a_new, b_new, num_overlap
+    for a, b in zip(a_values, b_values):
+        if not (a == None or b == None):
+            a_new.append(a)
+            b_new.append(b)
+    return a_new, b_new, len(a_new)
 
 
 def common_keys(a_samples, b_samples):
@@ -37,20 +33,10 @@ def common_keys(a_samples, b_samples):
 
 def normalize_values_with_samples(a_samples, b_samples):
     common_samples = common_keys(a_samples, b_samples)
-
     a_new = {}
     b_new = {}
     for sample in common_samples:
         a_new[sample] = a_samples[sample]
         b_new[sample] = b_samples[sample]
 
-    num_overlap = len(a_new)
-    assert num_overlap == len(b_new), "Lengths should be the same"
-
-    return a_new, b_new, num_overlap
-
-
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()
\ No newline at end of file
+    return a_new, b_new, len(a_new)
diff --git a/wqflask/utility/formatting.py b/wqflask/utility/formatting.py
index e53dda22..1da3e9b7 100644
--- a/wqflask/utility/formatting.py
+++ b/wqflask/utility/formatting.py
@@ -28,21 +28,20 @@ def numify(number, singular=None, plural=None):
     '12,334 hippopotami'
 
     """
-    num_repr = {1 : "one",
-                2 : "two",
-                3 : "three",
-                4 : "four",
-                5 : "five",
-                6 : "six",
-                7 : "seven",
-                8 : "eight",
-                9 : "nine",
-                10 : "ten",
-                11 : "eleven",
-                12 : "twelve"}
-
-    #Below line commented out cause doesn't work in Python 2.4
-    #assert all((singular, plural)) or not any((singular, plural)), "Need to pass two words or none"
+    num_repr = {0: "zero",
+                1: "one",
+                2: "two",
+                3: "three",
+                4: "four",
+                5: "five",
+                6: "six",
+                7: "seven",
+                8: "eight",
+                9: "nine",
+                10: "ten",
+                11: "eleven",
+                12: "twelve"}
+
     if number == 1:
         word = singular
     else:
diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py
index 6c912a23..1377a564 100644
--- a/wqflask/utility/redis_tools.py
+++ b/wqflask/utility/redis_tools.py
@@ -264,17 +264,14 @@ def get_resources():
     return resource_list
 
 def get_resource_id(dataset, trait_id=None):
+    resource_id = False
     if dataset.type == "Publish":
         if trait_id:
             resource_id = hmac.hmac_creation("{}:{}:{}".format('dataset-publish', dataset.id, trait_id))
-        else:
-            return False
     elif dataset.type == "ProbeSet":
         resource_id = hmac.hmac_creation("{}:{}".format('dataset-probeset', dataset.id))
     elif dataset.type == "Geno":
         resource_id = hmac.hmac_creation("{}:{}".format('dataset-geno', dataset.id))
-    else:
-        return False
 
     return resource_id
 
diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py
index 89d88516..77db5d53 100644
--- a/wqflask/utility/tools.py
+++ b/wqflask/utility/tools.py
@@ -270,8 +270,8 @@ if ORCID_CLIENT_ID != 'UNKNOWN' and ORCID_CLIENT_SECRET:
                       ORCID_CLIENT_ID+"&client_secret="+ORCID_CLIENT_SECRET
     ORCID_TOKEN_URL = get_setting('ORCID_TOKEN_URL')
 
-# ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST')
-# ELASTICSEARCH_PORT = get_setting('ELASTICSEARCH_PORT')
+ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST')
+ELASTICSEARCH_PORT = get_setting('ELASTICSEARCH_PORT')
 # import utility.elasticsearch_tools as es
 # es.test_elasticsearch_connection()
 
diff --git a/wqflask/wqflask/__init__.py b/wqflask/wqflask/__init__.py
index 62e98b36..d729aef5 100644
--- a/wqflask/wqflask/__init__.py
+++ b/wqflask/wqflask/__init__.py
@@ -12,7 +12,6 @@ logging.basicConfig(level=logging.INFO)
 
 app = Flask(__name__)
 
-app.config.from_object('cfg.default_settings')   # Get the defaults from cfg.default_settings
 app.config.from_envvar('GN2_SETTINGS')       # See http://flask.pocoo.org/docs/config/#configuring-from-files
 # Note no longer use the badly named WQFLASK_OVERRIDES (nyi)
 
@@ -22,4 +21,4 @@ app.jinja_env.globals.update(
 )
 
 from wqflask.api import router
-import wqflask.views
\ No newline at end of file
+import wqflask.views
diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
index 5d74dc9d..09d6b9cc 100644
--- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
+++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
@@ -35,8 +35,6 @@ import resource
 
 from pprint import pformat as pf
 
-import reaper
-
 from base.trait import create_trait
 from base import data_set
 from utility import webqtlUtil, helper_functions, corr_result_helpers
diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py
index bc2912f2..de7a1c0c 100644
--- a/wqflask/wqflask/correlation/show_corr_results.py
+++ b/wqflask/wqflask/correlation/show_corr_results.py
@@ -42,8 +42,6 @@ utils = importr("utils")
 
 from pprint import pformat as pf
 
-import reaper
-
 from base import webqtlConfig
 from utility.THCell import THCell
 from utility.TDCell import TDCell
diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
index 2b9467d1..0ac94139 100644
--- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
+++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
@@ -41,8 +41,6 @@ import rpy2.robjects as robjects
 
 from pprint import pformat as pf
 
-import reaper
-
 from utility.redis_tools import get_redis_conn
 Redis = get_redis_conn()
 THIRTY_DAYS = 60 * 60 * 24 * 30
diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py
index 2d96c05a..3272c03d 100644
--- a/wqflask/wqflask/export_traits.py
+++ b/wqflask/wqflask/export_traits.py
@@ -4,24 +4,30 @@ import csv
 import xlsxwriter
 import StringIO 
 import datetime
+import itertools
+
+from zipfile import ZipFile, ZIP_DEFLATED
 
 import simplejson as json
 
+from base.trait import create_trait, retrieve_trait_info
+
 from pprint import pformat as pf
 
+from utility.logger import getLogger
+logger = getLogger(__name__ )
+
 def export_search_results_csv(targs):
 
     table_data = json.loads(targs['export_data'])
-    table_headers = table_data['headers']
     table_rows = table_data['rows']
     
-    buff = StringIO.StringIO()
-    writer = csv.writer(buff)
-    
+    now = datetime.datetime.now()
+    time_str = now.strftime('%H:%M_%d%B%Y')
     if 'file_name' in targs:
-        file_name = targs['file_name']
+        zip_file_name = targs['file_name'] + "_export_" + time_str
     else:
-        file_name = "table_export.csv"
+        zip_file_name = "export_" + time_str
 
     metadata = []
 
@@ -40,19 +46,98 @@ def export_search_results_csv(targs):
         if targs['filter_term'] != "None":
             metadata.append(["Search Filter Terms: " + targs['filter_term']])
     metadata.append(["Exported Row Number: " + str(len(table_rows))])
+    metadata.append(["Funding for The GeneNetwork: NIAAA (U01AA13499, U24AA13513), NIDA, NIMH, and NIAAA (P20-DA21131), NCI MMHCC (U01CA105417), and NCRR (U01NR 105417)"])
+    metadata.append([])
+
+    trait_list = []
+    for trait in table_rows:
+        trait_name, dataset_name, _hash = trait.split(":")
+        trait_ob = create_trait(name=trait_name, dataset_name=dataset_name)
+        trait_ob = retrieve_trait_info(trait_ob, trait_ob.dataset, get_qtl_info=True)
+        trait_list.append(trait_ob)
+
+    table_headers = ['Species', 'Group', 'Dataset', 'Record ID', 'Symbol', 'Description', 'ProbeTarget', 'PubMed_ID', 'Chr', 'Mb', 'Alias', 'Gene_ID', 'Homologene_ID', 'UniGene_ID', 'Strand_Probe', 'Probe_set_specificity', 'Probe_set_BLAT_score', 'Probe_set_BLAT_Mb_start', 'Probe_set_BLAT_Mb_end', 'QTL_Chr', 'QTL_Mb', 'Locus_at_Peak', 'Max_LRS', 'P_value_of_MAX', 'Mean_Expression']
+
+    traits_by_group = sort_traits_by_group(trait_list)
+
+    file_list = []
+    for group in traits_by_group.keys():
+        group_traits = traits_by_group[group]
+        buff = StringIO.StringIO()
+        writer = csv.writer(buff)
+        csv_rows = []
+
+        sample_headers = []
+        for sample in group_traits[0].dataset.group.samplelist:
+            sample_headers.append(sample)
+            sample_headers.append(sample + "_SE")
+
+        full_headers = table_headers + sample_headers
+
+        for metadata_row in metadata:
+            writer.writerow(metadata_row)
+
+        csv_rows.append(full_headers)
+
+        for trait in group_traits:
+            if getattr(trait, "symbol", None):
+                trait_symbol = getattr(trait, "symbol")
+            elif getattr(trait, "abbreviation", None):
+                trait_symbol = getattr(trait, "abbreviation")
+            else:
+                trait_symbol = "N/A"
+            row_contents = [
+                trait.dataset.group.species,
+                trait.dataset.group.name,
+                trait.dataset.name,
+                trait.name,
+                trait_symbol,
+                getattr(trait, "description_display", "N/A"),
+                getattr(trait, "probe_target_description", "N/A"),
+                getattr(trait, "pubmed_id", "N/A"),
+                getattr(trait, "chr", "N/A"),
+                getattr(trait, "mb", "N/A"),
+                trait.alias_fmt,
+                getattr(trait, "geneid", "N/A"),
+                getattr(trait, "homologeneid", "N/A"),
+                getattr(trait, "unigeneid", "N/A"),
+                getattr(trait, "strand_probe", "N/A"),
+                getattr(trait, "probe_set_specificity", "N/A"),
+                getattr(trait, "probe_set_blat_score", "N/A"),
+                getattr(trait, "probe_set_blat_mb_start", "N/A"),
+                getattr(trait, "probe_set_blat_mb_end", "N/A"),
+                getattr(trait, "locus_chr", "N/A"),
+                getattr(trait, "locus_mb", "N/A"),
+                getattr(trait, "locus", "N/A"),
+                getattr(trait, "lrs", "N/A"),
+                getattr(trait, "pvalue", "N/A"),
+                getattr(trait, "mean", "N/A")
+            ]
+
+            for sample in trait.dataset.group.samplelist:
+                if sample in trait.data:
+                    row_contents += [trait.data[sample].value, trait.data[sample].variance]
+                else:
+                    row_contents += ["x", "x"]
+
+            csv_rows.append(row_contents)
+
+        csv_rows = map(list, itertools.izip_longest(*[row for row in csv_rows]))
+        writer.writerows(csv_rows)
+        csv_data = buff.getvalue()
+        buff.close()
 
-    for metadata_row in metadata:
-        writer.writerow(metadata_row)
+        file_name = group + "_traits.csv"
+        file_list.append([file_name, csv_data])
 
-    writer.writerow([])
+    return file_list
 
-    writer.writerow(table_headers)
-    for trait_info in table_rows:
-        writer.writerow(trait_info)
+def sort_traits_by_group(trait_list=[]):
+    traits_by_group = {}
+    for trait in trait_list:
+        if trait.dataset.group.name not in traits_by_group.keys():
+            traits_by_group[trait.dataset.group.name] = []
 
-    writer.writerow([])
-    writer.writerow(["Funding for The GeneNetwork: NIAAA (U01AA13499, U24AA13513), NIDA, NIMH, and NIAAA (P20-DA21131), NCI MMHCC (U01CA105417), and NCRR (U01NR 105417)"])
-    csv_data = buff.getvalue()
-    buff.close()
+        traits_by_group[trait.dataset.group.name].append(trait)
 
-    return csv_data, file_name
\ No newline at end of file
+    return traits_by_group
\ No newline at end of file
diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py
index e82aa0ef..5098a184 100644
--- a/wqflask/wqflask/heatmap/heatmap.py
+++ b/wqflask/wqflask/heatmap/heatmap.py
@@ -19,8 +19,6 @@ import numpy as np
 
 from pprint import pformat as pf
 
-import reaper
-
 from base.trait import GeneralTrait
 from base import data_set
 from base import species
diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py
index f41f3017..f61c40b4 100644
--- a/wqflask/wqflask/network_graph/network_graph.py
+++ b/wqflask/wqflask/network_graph/network_graph.py
@@ -40,8 +40,6 @@ import rpy2.robjects as robjects
 
 from pprint import pformat as pf
 
-import reaper
-
 from utility.THCell import THCell
 from utility.TDCell import TDCell
 from base.trait import create_trait
diff --git a/wqflask/wqflask/static/new/javascript/dataset_select_menu_orig.js b/wqflask/wqflask/static/new/javascript/dataset_select_menu_orig.js
index d172907a..48ffd731 100644
--- a/wqflask/wqflask/static/new/javascript/dataset_select_menu_orig.js
+++ b/wqflask/wqflask/static/new/javascript/dataset_select_menu_orig.js
@@ -81,11 +81,17 @@ redo_dropdown = function(dropdown, items) {
         this_opt_group.append($("<option />").val(item[0]).text(item[1]));
       } else if (current_family != "" && item[2] == current_family){
         this_opt_group.append($("<option />").val(item[0]).text(item[1]));
+        if (_i == group_family_list.length - 1){
+          _results.push(dropdown.append(this_opt_group))
+        }
       } else if (current_family != "" && item[2] != current_family && item[2] != "None"){
         current_family = item[2]
         _results.push(dropdown.append(this_opt_group))
         this_opt_group = $("<optgroup label=\"" + current_family + "\">")
         this_opt_group.append($("<option />").val(item[0]).text(item[1]));
+        if (_i == group_family_list.length - 1){
+          _results.push(dropdown.append(this_opt_group))
+        }
       } else if (current_family != "" && this_opt_group != null && item[2] == "None"){
         _results.push(dropdown.append(this_opt_group))
         current_family = ""
@@ -109,6 +115,9 @@ redo_dropdown = function(dropdown, items) {
         current_family = item[2]
         this_opt_group = $("<optgroup label=\"" + item[2] + "\">")
         this_opt_group.append($("<option />").val(item[0]).text(item[1]));
+        if (_i == type_family_list.length - 1){
+          _results.push(dropdown.append(this_opt_group))
+        }
       } else if (current_family != "" && item[2] == current_family){
         this_opt_group.append($("<option />").val(item[0]).text(item[1]));
         if (_i == type_family_list.length - 1){
diff --git a/wqflask/wqflask/static/new/javascript/draw_corr_scatterplot.js b/wqflask/wqflask/static/new/javascript/draw_corr_scatterplot.js
index 956e0467..1bae8773 100644
--- a/wqflask/wqflask/static/new/javascript/draw_corr_scatterplot.js
+++ b/wqflask/wqflask/static/new/javascript/draw_corr_scatterplot.js
@@ -337,14 +337,13 @@ function getdata() {
           continue
         }
 
+        sizev = 10;
+        datav = 0;
         if (size_cofactor_vals.length > 0){
           if (cofactor_samples.indexOf(js_data.indIDs[j])) {
             datav = size_cofactor_vals[j]
             sizev = map1to2(datamin, datamax, sizemin, sizemax, datav);
           }
-        } else {
-            datav = 0;
-            sizev = 10;
         }
 
         x_values.push(js_data.data[0][j])
diff --git a/wqflask/wqflask/static/new/javascript/get_traits_from_collection.js b/wqflask/wqflask/static/new/javascript/get_traits_from_collection.js
index 6f03b98f..4ec62157 100644
--- a/wqflask/wqflask/static/new/javascript/get_traits_from_collection.js
+++ b/wqflask/wqflask/static/new/javascript/get_traits_from_collection.js
@@ -41,29 +41,31 @@ $('#trait_table').dataTable( {
     "orderClasses": true
 } );
 
-$('#collection_table').dataTable( {
-  "createdRow": function ( row, data, index ) {
-      if ($('td', row).eq(2).text().length > 40) {
-          $('td', row).eq(2).text($('td', row).eq(2).text().substring(0, 40));
-          $('td', row).eq(2).text($('td', row).eq(2).text() + '...')
-      }
-      if ($('td', row).eq(4).text().length > 50) {
-          $('td', row).eq(4).text($('td', row).eq(4).text().substring(0, 50));
-          $('td', row).eq(4).text($('td', row).eq(4).text() + '...')
-      }
-  },
-  "columnDefs": [ {
-      "targets": 0,
-      "orderable": false
-  } ],
-  "order": [[1, "asc" ]],
-  "sDom": "ZRtr",
-  "iDisplayLength": -1,
-  "autoWidth": true,
-  "bSortClasses": false,
-  "paging": false,
-  "orderClasses": true
-} );
+if ( ! $.fn.DataTable.isDataTable( '#collection_table' ) ) {
+  $('#collection_table').dataTable( {
+    "createdRow": function ( row, data, index ) {
+        if ($('td', row).eq(2).text().length > 40) {
+            $('td', row).eq(2).text($('td', row).eq(2).text().substring(0, 40));
+            $('td', row).eq(2).text($('td', row).eq(2).text() + '...')
+        }
+        if ($('td', row).eq(4).text().length > 50) {
+            $('td', row).eq(4).text($('td', row).eq(4).text().substring(0, 50));
+            $('td', row).eq(4).text($('td', row).eq(4).text() + '...')
+        }
+    },
+    "columnDefs": [ {
+        "targets": 0,
+        "orderable": false
+    } ],
+    "order": [[1, "asc" ]],
+    "sDom": "ZRtr",
+    "iDisplayLength": -1,
+    "autoWidth": true,
+    "bSortClasses": false,
+    "paging": false,
+    "orderClasses": true
+  } );
+}
 
 collection_click = function() {
   var this_collection_url;
diff --git a/wqflask/wqflask/static/new/javascript/search_results.js b/wqflask/wqflask/static/new/javascript/search_results.js
index 39aae113..b3ed06fc 100644
--- a/wqflask/wqflask/static/new/javascript/search_results.js
+++ b/wqflask/wqflask/static/new/javascript/search_results.js
@@ -161,23 +161,11 @@ $(function() {
     trait_table.find('tbody tr').each(function (i, tr) {
       if (trait_table.find('input[name="searchResult"]:checked').length > 0) {
         if ($(this).find('input[name="searchResult"]').is(':checked')){
-          this_row = [];
-          $(tr).find('td').each(function(j, td){
-            if ($(td).data('export')){
-              this_row.push($(td).data('export'));
-            }
-          });
-          rows.push(this_row);
+          rows.push($(this).find('input[name="searchResult"]:checked').val())
         }
       }
       else {
-        this_row = [];
-        $(tr).find('td').each(function(j, td){
-          if ($(td).data('export')){
-            this_row.push($(td).data('export'));
-          }
-        });
-        rows.push(this_row);
+        rows.push($(this).find('input[name="searchResult"]').val())
       }
     });
     table_dict['rows'] = rows;
diff --git a/wqflask/wqflask/templates/base.html b/wqflask/wqflask/templates/base.html
index 262d9ee5..50562200 100644
--- a/wqflask/wqflask/templates/base.html
+++ b/wqflask/wqflask/templates/base.html
@@ -69,7 +69,7 @@
                                   <li><a href="https://systems-genetics.org/">Systems Genetics PheWAS</a></li>
                                   <li><a href="http://ucscbrowser.genenetwork.org/">Genome Browser</a></li>
                                   <li><a href="http://power.genenetwork.org">BXD Power Calculator</a></li>
-                                  <li><a href="httP://datafiles.genenetwork.org">Interplanetary File System</a></li>
+                                  <li><a href="http://datafiles.genenetwork.org">Interplanetary File System</a></li>
                                 </ul>
                         </li>
                         {% if g.user_session %}
@@ -96,7 +96,11 @@
                         </li>
                         {% if g.user_session.logged_in %}
                         <li class="">
-                            <a id="manage_groups" title="Manage Groups" href="/groups/manage">Manage Groups</a>
+                            <a href="/edit_account_settings" class="dropdow-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">User Account Settings<span class="caret"></a>
+                            <ul class="dropdown-menu">
+                                <li><a id="manage_user" title="User Options" href="/user/manage">User Options</a></li>
+                                <li><a id="manage_groups" title="Manage Groups" href="/groups/manage">Manage Groups</a></li>
+                            </ul>
                         </li>
                         {% endif %}
                         {% endif %}
diff --git a/wqflask/wqflask/templates/correlation_page.html b/wqflask/wqflask/templates/correlation_page.html
index 3d750bea..df3e43f1 100644
--- a/wqflask/wqflask/templates/correlation_page.html
+++ b/wqflask/wqflask/templates/correlation_page.html
@@ -200,7 +200,7 @@
 {% endblock %}
 
 {% block js %}
-    <!--<script type="text/javascript" src="/static/new/js_external/md5.min.js"></script>-->
+    <script type="text/javascript" src="/static/new/js_external/md5.min.js"></script>
     <script type="text/javascript" src="/static/new/javascript/search_results.js"></script>
 
     <script language="javascript" type="text/javascript" src="/static/new/js_external/jszip.min.js"></script>
@@ -427,10 +427,6 @@
                     { "type": "natural" },
                     { "type": "natural" }
                 ],
-                "createdRow": function ( row, data, index ) {
-                    $('td', row).eq(4).text(decodeURIComponent(escape($('td', row).eq(4).text())));
-                    $('td', row).eq(5).text(decodeURIComponent(escape($('td', row).eq(5).text())));
-                },
                 "order": [[9, "asc" ]],
                 "sDom": "Btir",
                 "iDisplayLength": -1,
diff --git a/wqflask/wqflask/templates/gsearch_gene.html b/wqflask/wqflask/templates/gsearch_gene.html
index 8c261eec..62ef1a7b 100644
--- a/wqflask/wqflask/templates/gsearch_gene.html
+++ b/wqflask/wqflask/templates/gsearch_gene.html
@@ -249,10 +249,11 @@
                     }
                 ],
                 'order': [[1, "asc" ]],
-                'sDom': "tir",
+                'sDom': "pitirp",
                 'autoWidth': true,
+                'iDisplayLength': 500,
                 'deferRender': true,
-                'paging': false,
+                'paging': true,
                 'orderClasses': true,
                 'processing': true,
                 'language': {
diff --git a/wqflask/wqflask/templates/gsearch_pheno.html b/wqflask/wqflask/templates/gsearch_pheno.html
index 04b45659..f5058158 100644
--- a/wqflask/wqflask/templates/gsearch_pheno.html
+++ b/wqflask/wqflask/templates/gsearch_pheno.html
@@ -250,10 +250,11 @@
                     }
                 ],
                 'order': [[1, "asc" ]],
-                'sDom': "tir",
+                'sDom': "pitirp",
                 'autoWidth': false,
                 'deferRender': true,
-                'paging': false,
+                'iDisplayLength': 500,
+                'paging': true,
                 'orderClasses': true,
                 'processing': true,
                 'language': {
diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html
index 132d5249..81803deb 100644
--- a/wqflask/wqflask/templates/mapping_results.html
+++ b/wqflask/wqflask/templates/mapping_results.html
@@ -333,6 +333,7 @@
 {% block js %}  
 
     <script type="text/javascript" src="http://d3js.org/d3.v3.min.js"></script>
+    <script type="text/javascript" src="/static/new/js_external/md5.min.js"></script>
     <script type="text/javascript" src="/static/new/js_external/underscore-min.js"></script>
     <script type="text/javascript" src="/static/new/js_external/underscore.string.min.js"></script>
     <script type="text/javascript" src="/static/new/js_external/d3-tip.min.js"></script>
diff --git a/wqflask/wqflask/templates/search_result_page.html b/wqflask/wqflask/templates/search_result_page.html
index 3dfae3dd..1f76ea82 100644
--- a/wqflask/wqflask/templates/search_result_page.html
+++ b/wqflask/wqflask/templates/search_result_page.html
@@ -377,11 +377,12 @@
                         } else{
                           author_string = data.authors
                         }
-                        try {
-                          return decodeURIComponent(escape(author_string))
-                        } catch(err){
-                          return author_string
-                        }
+                        return author_string
+                        // try {
+                        //   return decodeURIComponent(escape(author_string))
+                        // } catch(err){
+                        //   return author_string
+                        // }
                       }
                     },
                     {
@@ -441,12 +442,13 @@
                         postfixButtons: [ 'colvisRestore' ]
                     }
                 ],
-                'sDom': "Bitir",
+                'sDom': "Bpitirp",
                 {% else %}
-                'sDom': "itir",
+                'sDom': "pitirp",
                 {% endif %}
+                'iDisplayLength': 500,
                 'deferRender': true,
-                'paging': false,
+                'paging': true,
                 'orderClasses': true,
                 'processing': true,
                 'language': {
diff --git a/wqflask/wqflask/templates/show_trait.html b/wqflask/wqflask/templates/show_trait.html
index 7380d198..acee6724 100644
--- a/wqflask/wqflask/templates/show_trait.html
+++ b/wqflask/wqflask/templates/show_trait.html
@@ -21,8 +21,7 @@
         {% endif %}
     </div>
 
-    <form method="post" action="" target="_blank" name="trait_page" id="trait_data_form"
-    class="form-horizontal">
+    <form method="post" action="" target="_blank" name="trait_page" id="trait_data_form" class="form-horizontal">
         <div id="hidden_inputs">
         <input type="hidden" name="trait_hmac" value="{{ data_hmac('{}:{}'.format(this_trait.name, dataset.name)) }}">
         {% for key in hddn %}
@@ -74,7 +73,7 @@
                         <div class="panel-body">
                             {% include 'show_trait_transform_and_filter.html' %}
                         </div>
-                        <div id="transform_alert_placeholder"><div id="transform_alert" style="display: none;"class="alert alert-success outlier-alert"><a class="close" data-dismiss="alert">�</a><span>Because there are some values between 0 and 1, log2 and log10 transforms will add 1 to each value.</span></div></div>
+                        <div id="transform_alert_placeholder"><div id="transform_alert" style="display: none;"class="alert alert-success outlier-alert"><a href="#" class="close" data-dismiss="alert">�</a><span>Because there are some values between 0 and 1, log2 and log10 transforms will add 1 to each value.</span></div></div>
                     </div>
                 </div>
                 <div class="panel panel-default">
diff --git a/wqflask/wqflask/templates/show_trait_calculate_correlations.html b/wqflask/wqflask/templates/show_trait_calculate_correlations.html
index a9b371b8..1378b91b 100644
--- a/wqflask/wqflask/templates/show_trait_calculate_correlations.html
+++ b/wqflask/wqflask/templates/show_trait_calculate_correlations.html
@@ -78,7 +78,7 @@
         <div class="form-group">
             <label class="col-xs-2 control-label">Min Expr</label>
             <div class="col-xs-3 controls">
-                <input name="min_expr" value="" type="text" class="form-control" style="width: 50px;">
+                <input name="min_expr" value="" type="text" class="form-control" style="width: 70px;">
             </div>
         </div>
         <div class="form-group">
diff --git a/wqflask/wqflask/templates/show_trait_details.html b/wqflask/wqflask/templates/show_trait_details.html
index 965c0340..58353f05 100644
--- a/wqflask/wqflask/templates/show_trait_details.html
+++ b/wqflask/wqflask/templates/show_trait_details.html
@@ -234,7 +234,7 @@
         <button type="button" class="btn btn-default" title="Write or review comments about this gene">GeneWiki</button>
         </a>
         {% if dataset.group.species == "mouse" or dataset.group.species == "rat" %}
-        <a href="./snp_browser?first_run=true&species={{ dataset.group.species }}&gene_name={{ this_trait.symbol }}&limit_strains=on">
+        <a href="/snp_browser?first_run=true&species={{ dataset.group.species }}&gene_name={{ this_trait.symbol }}&limit_strains=on">
         <button type="button" class="btn btn-default" title="View SNPs and Indels">SNPs</button>
         </a>
         {% endif %}
diff --git a/wqflask/wqflask/templates/show_trait_mapping_tools.html b/wqflask/wqflask/templates/show_trait_mapping_tools.html
index 4d51adff..27040045 100755
--- a/wqflask/wqflask/templates/show_trait_mapping_tools.html
+++ b/wqflask/wqflask/templates/show_trait_mapping_tools.html
@@ -90,7 +90,7 @@
                         <div class="mapping_method_fields form-group">
                           <label class="col-xs-3 control-label"></label>
                           <div style="margin-left:20px;" class="col-xs-6">
-                            <input type="button" id="gemma_compute" class="btn submit_special btn-success" data-url="/marker_regression" title="Compute Marker Regression" value="Compute">
+                            <button id="gemma_compute" class="btn submit_special btn-success" data-url="/marker_regression" title="Compute Marker Regression" value="Compute">Compute</button>
                           </div>
                         </div>
                     </div>
@@ -99,7 +99,7 @@
                 <div class="tab-pane" id="interval_mapping">
                     <div style="margin-top: 20px" class="form-horizontal">
                         <div class="mapping_method_fields form-group">
-                            <label for="reaper_version" style="text-align: right;" class="col-xs-3 control-label">Version<sup><a title="'New' is the new qtlreaper implementation written in RUST by Christian Fischer. 'Original' corresponds to the original version written in C.">?</a></sup></label>
+                            <label for="reaper_version" style="text-align: right;" class="col-xs-3 control-label">Version<sup><a href="https://github.com/chfi/rust-qtlreaper" target="_blank" title="'New' is the new qtlreaper implementation written in RUST by Christian Fischer. 'Original' corresponds to the original version written in C.">?</a></sup></label>
                             <div style="margin-left:20px;" class="col-xs-3 controls">
                                 <select name="reaper_version" class="form-control" style="width: 80px;">
                                     <option value="new">New</option>
@@ -224,7 +224,7 @@
                         <div class="mapping_method_fields form-group">
                             <label class="col-xs-3 control-label"></label>
                             <div style="margin-left:20px;" class="col-xs-6">
-                              <input type="button" id="interval_mapping_compute" class="btn submit_special btn-success" data-url="/marker_regression" title="Compute Interval Mapping" value="Compute">
+                              <button id="interval_mapping_compute" class="btn submit_special btn-success" data-url="/marker_regression" title="Compute Interval Mapping" value="Compute">Compute</button>
                             </div>
                         </div>
                     </div>
@@ -387,7 +387,7 @@
                         <div class="mapping_method_fields form-group">
                             <label class="col-xs-3 control-label"></label>
                             <div style="margin-left:20px;" class="col-xs-6">
-                              <input type="button" id="rqtl_geno_compute" class="btn submit_special btn-success" data-url="/marker_regression" title="Compute Marker Regression" value="Compute">
+                              <button id="rqtl_geno_compute" class="btn submit_special btn-success" data-url="/marker_regression" title="Compute Marker Regression" value="Compute">Compute</button>
                             </div>
                         </div>
                     </div>
@@ -422,4 +422,4 @@
     {% else %}
     Mapping options are disabled for data not matched with genotypes.
     {% endif %}
-</div>
+</div>
\ No newline at end of file
diff --git a/wqflask/wqflask/templates/snp_browser.html b/wqflask/wqflask/templates/snp_browser.html
index 88cb4d31..4537cd06 100644
--- a/wqflask/wqflask/templates/snp_browser.html
+++ b/wqflask/wqflask/templates/snp_browser.html
@@ -406,8 +406,8 @@
       ],
       {% endif %}
       'order': [[1, "asc" ]],
-      'sDom': "rti",
-      'iDisplayLength': -1,
+      'sDom': "rtip",
+      'iDisplayLength': 500,
       'processing': true,
       'language': {
         'loadingRecords': '&nbsp;',
diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py
index 131345d3..57183eed 100644
--- a/wqflask/wqflask/views.py
+++ b/wqflask/wqflask/views.py
@@ -11,24 +11,24 @@ import datetime  # for errors
 import time      # for errors
 import sys
 import csv
+import simplejson as json
+import yaml
 import xlsxwriter
 import StringIO  # Todo: Use cStringIO?
 
+from zipfile import ZipFile, ZIP_DEFLATED
+
 import gc
 import numpy as np
-
 import cPickle as pickle
 import uuid
 
-import simplejson as json
-import yaml
-
 import flask
 import base64
 import array
 import sqlalchemy
 from wqflask import app
-from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for
+from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for, send_file
 from wqflask import group_manager
 from wqflask import resource_manager
 from wqflask import search_results
@@ -421,11 +421,21 @@ def export_traits_csv():
     logger.info("In export_traits_csv")
     logger.info("request.form:", request.form)
     logger.info(request.url)
-    csv_data, file_name = export_traits.export_search_results_csv(request.form)
+    file_list = export_traits.export_search_results_csv(request.form)
 
-    return Response(csv_data,
-                    mimetype='text/csv',
-                    headers={"Content-Disposition":"attachment;filename=" + file_name + ".csv"})
+    if len(file_list) > 1:
+        memory_file = StringIO.StringIO()
+        with ZipFile(memory_file, mode='w', compression=ZIP_DEFLATED) as zf:
+            for the_file in file_list:
+                zf.writestr(the_file[0], the_file[1])
+
+        memory_file.seek(0)
+
+        return send_file(memory_file, attachment_filename=filename + ".zip", as_attachment=True)
+    else:
+        return Response(file_list[0][1],
+                        mimetype='text/csv',
+                        headers={"Content-Disposition":"attachment;filename=" + file_list[0][0]})
 
 @app.route('/export_perm_data', methods=('POST',))
 def export_perm_data():