about summary refs log tree commit diff
diff options
context:
space:
mode:
authorPjotr Prins2016-06-18 08:06:59 +0000
committerPjotr Prins2016-06-18 08:06:59 +0000
commit0a11c10237d899cfdddfcfcf4d17140da9421f7d (patch)
tree50c7cd2fd6f4eba2d8403a32f3fb31651d38d2f9
parentee70713e9a296ac9a855c73c3f49f585bae8946a (diff)
downloadgenenetwork2-0a11c10237d899cfdddfcfcf4d17140da9421f7d.tar.gz
Logger: replacing print statements
-rw-r--r--wqflask/base/data_set.py74
-rw-r--r--wqflask/runserver.py4
-rw-r--r--wqflask/wqflask/do_search.py58
-rw-r--r--wqflask/wqflask/parser.py11
-rw-r--r--wqflask/wqflask/search_results.py21
-rw-r--r--wqflask/wqflask/user_manager.py88
-rw-r--r--wqflask/wqflask/views.py6
7 files changed, 138 insertions, 124 deletions
diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py
index 4953e728..f75447f6 100644
--- a/wqflask/base/data_set.py
+++ b/wqflask/base/data_set.py
@@ -51,6 +51,9 @@ from maintenance import get_group_samplelists
 from MySQLdb import escape_string as escape
 from pprint import pformat as pf
 
+from utility.logger import getLogger
+logger = getLogger(__name__ )
+
 # Used by create_database to instantiate objects
 # Each subclass will add to this
 DS_NAME_MAP = {}
@@ -59,7 +62,7 @@ def create_dataset(dataset_name, dataset_type = None, get_samplelist = True):
     if not dataset_type:
         dataset_type = Dataset_Getter(dataset_name)
 
-        print("dataset_type is:", dataset_type)
+        logger.debug("dataset_type is:", dataset_type)
 
     dataset_ob = DS_NAME_MAP[dataset_type]
     dataset_class = globals()[dataset_ob]
@@ -73,7 +76,7 @@ class Dataset_Types(object):
         with open(file_name, 'r') as fh:
             data = json.load(fh)
 
-        print("*" * 70)
+        logger.debug("*" * 70)
         for species in data['datasets']:
             for group in data['datasets'][species]:
                 for dataset_type in data['datasets'][species][group]:
@@ -98,7 +101,7 @@ def create_datasets_list():
     result = Redis.get(key)
 
     if result:
-        print("Cache hit!!!")
+        logger.debug("Cache hit!!!")
         datasets = pickle.loads(result)
 
     else:
@@ -113,7 +116,7 @@ def create_datasets_list():
                 for result in g.db.execute(query).fetchall():
                     #The query at the beginning of this function isn't necessary here, but still would
                     #rather just reuse it
-                    #print("type: {}\tname: {}".format(dataset_type, result.Name))
+                    #logger.debug("type: {}\tname: {}".format(dataset_type, result.Name))
                     dataset = create_dataset(result.Name, dataset_type)
                     datasets.append(dataset)
 
@@ -133,7 +136,7 @@ def create_in_clause(items):
 def mescape(*items):
     """Multiple escape"""
     escaped = [escape(str(item)) for item in items]
-    #print("escaped is:", escaped)
+    #logger.debug("escaped is:", escaped)
     return escaped
 
 
@@ -152,12 +155,12 @@ class Markers(object):
             marker['Mb'] = float(marker['Mb'])
 
         self.markers = markers
-        #print("self.markers:", self.markers)
+        #logger.debug("self.markers:", self.markers)
 
 
     def add_pvalues(self, p_values):
-        print("length of self.markers:", len(self.markers))
-        print("length of p_values:", len(p_values))
+        logger.debug("length of self.markers:", len(self.markers))
+        logger.debug("length of p_values:", len(p_values))
 
         if type(p_values) is list:
             # THIS IS only needed for the case when we are limiting the number of p-values calculated
@@ -178,10 +181,10 @@ class Markers(object):
         elif type(p_values) is dict:
             filtered_markers = []
             for marker in self.markers:
-                #print("marker[name]", marker['name'])
-                #print("p_values:", p_values)
+                #logger.debug("marker[name]", marker['name'])
+                #logger.debug("p_values:", p_values)
                 if marker['name'] in p_values:
-                    #print("marker {} IS in p_values".format(i))
+                    #logger.debug("marker {} IS in p_values".format(i))
                     marker['p_value'] = p_values[marker['name']]
                     if math.isnan(marker['p_value']) or (marker['p_value'] <= 0):
                         marker['lod_score'] = 0
@@ -192,7 +195,7 @@ class Markers(object):
                         marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
                     filtered_markers.append(marker)
                 #else:
-                    #print("marker {} NOT in p_values".format(i))
+                    #logger.debug("marker {} NOT in p_values".format(i))
                     #self.markers.remove(marker)
                     #del self.markers[i]
             self.markers = filtered_markers
@@ -204,7 +207,7 @@ class HumanMarkers(Markers):
         self.markers = []
         for line in marker_data_fh:
             splat = line.strip().split()
-            #print("splat:", splat)
+            #logger.debug("splat:", splat)
             if len(specified_markers) > 0:
                 if splat[1] in specified_markers:
                     marker = {}
@@ -220,7 +223,7 @@ class HumanMarkers(Markers):
                 marker['Mb'] = float(splat[3]) / 1000000
             self.markers.append(marker)
 
-        #print("markers is: ", pf(self.markers))
+        #logger.debug("markers is: ", pf(self.markers))
 
 
     def add_pvalues(self, p_values):
@@ -237,7 +240,7 @@ class DatasetGroup(object):
     """
     def __init__(self, dataset):
         """This sets self.group and self.group_id"""
-        #print("DATASET NAME2:", dataset.name)
+        #logger.debug("DATASET NAME2:", dataset.name)
         self.name, self.id = g.db.execute(dataset.query_for_group).fetchone()
         if self.name == 'BXD300':
             self.name = "BXD"
@@ -245,7 +248,7 @@ class DatasetGroup(object):
         self.f1list = None
         self.parlist = None
         self.get_f1_parent_strains()
-        #print("parents/f1s: {}:{}".format(self.parlist, self.f1list))
+        #logger.debug("parents/f1s: {}:{}".format(self.parlist, self.f1list))
 
         self.species = webqtlDatabaseFunction.retrieve_species(self.name)
 
@@ -257,7 +260,7 @@ class DatasetGroup(object):
         self.markers = HumanMarkers(self.name, markers)
 
     def get_markers(self):
-        #print("self.species is:", self.species)
+        #logger.debug("self.species is:", self.species)
         if self.species == "human":
             marker_class = HumanMarkers
         else:
@@ -267,10 +270,10 @@ class DatasetGroup(object):
 
     def datasets(self):
         key = "group_dataset_menu:v2:" + self.name
-        print("key is2:", key)
+        logger.debug("key is2:", key)
         dataset_menu = []
-        print("[tape4] webqtlConfig.PUBLICTHRESH:", webqtlConfig.PUBLICTHRESH)
-        print("[tape4] type webqtlConfig.PUBLICTHRESH:", type(webqtlConfig.PUBLICTHRESH))
+        logger.debug("[tape4] webqtlConfig.PUBLICTHRESH:", webqtlConfig.PUBLICTHRESH)
+        logger.debug("[tape4] type webqtlConfig.PUBLICTHRESH:", type(webqtlConfig.PUBLICTHRESH))
         results = g.db.execute('''
              (SELECT '#PublishFreeze',PublishFreeze.FullName,PublishFreeze.Name
               FROM PublishFreeze,InbredSet
@@ -317,7 +320,7 @@ class DatasetGroup(object):
                         break
 
                 if tissue_already_exists:
-                    #print("dataset_menu:", dataset_menu[i]['datasets'])
+                    #logger.debug("dataset_menu:", dataset_menu[i]['datasets'])
                     dataset_menu[i]['datasets'].append((dataset, dataset_short))
                 else:
                     dataset_menu.append(dict(tissue=tissue_name,
@@ -343,18 +346,18 @@ class DatasetGroup(object):
 
     def get_samplelist(self):
         key = "samplelist:v2:" + self.name
-        #print("key is:", key)
+        #logger.debug("key is:", key)
         #with Bench("Loading cache"):
         result = Redis.get(key)
 
         if result:
-            #print("Sample List Cache hit!!!")
-            #print("Before unjsonifying {}: {}".format(type(result), result))
+            #logger.debug("Sample List Cache hit!!!")
+            #logger.debug("Before unjsonifying {}: {}".format(type(result), result))
             self.samplelist = json.loads(result)
-            #print("  type: ", type(self.samplelist))
-            #print("  self.samplelist: ", self.samplelist)
+            #logger.debug("  type: ", type(self.samplelist))
+            #logger.debug("  self.samplelist: ", self.samplelist)
         else:
-            print("Cache not hit")
+            logger.debug("Cache not hit")
 
             genotype_fn = locate_ignore_error(self.name+".geno",'genotype')
             mapping_fn = locate_ignore_error(self.name+".fam",'mapping')
@@ -364,7 +367,7 @@ class DatasetGroup(object):
                 self.samplelist = get_group_samplelists.get_samplelist("geno", genotype_fn)
             else:
                 self.samplelist = None
-            print("Sample list: ",self.samplelist)
+            logger.debug("Sample list: ",self.samplelist)
             Redis.set(key, json.dumps(self.samplelist))
             Redis.expire(key, 60*5)
 
@@ -482,7 +485,7 @@ class DataSet(object):
                   """ % (query_args)).fetchone()
 
         except TypeError:
-            print("Dataset {} is not yet available in GeneNetwork.".format(self.name))
+            logger.debug("Dataset {} is not yet available in GeneNetwork.".format(self.name))
             pass
 
     def get_trait_data(self, sample_list=None):
@@ -549,10 +552,10 @@ class DataSet(object):
                         """.format(*mescape(self.type, self.type, self.type, self.type,
                                    self.name, dataset_type, self.type, self.type, dataset_type))
 
-            #print("trait data query: ", query)
+            #logger.debug("trait data query: ", query)
 
             results = g.db.execute(query).fetchall()
-            #print("query results:", results)
+            #logger.debug("query results:", results)
             trait_sample_data.append(results)
 
         trait_count = len(trait_sample_data[0])
@@ -571,7 +574,7 @@ class PhenotypeDataSet(DataSet):
 
     def setup(self):
 
-        #print("IS A PHENOTYPEDATASET")
+        #logger.debug("IS A PHENOTYPEDATASET")
 
         # Fields in the database table
         self.search_fields = ['Phenotype.Post_publication_description',
@@ -967,7 +970,7 @@ class MrnaAssayDataSet(DataSet):
             """ % (escape(str(this_trait.dataset.id)),
                    escape(this_trait.name)))
 
-            #print("query is:", pf(query))
+            #logger.debug("query is:", pf(query))
 
             result = g.db.execute(query).fetchone()
 
@@ -1046,7 +1049,7 @@ class MrnaAssayDataSet(DataSet):
                             Strain.Name
                     """ % (escape(trait), escape(self.name))
         results = g.db.execute(query).fetchall()
-        #print("RETRIEVED RESULTS HERE:", results)
+        #logger.debug("RETRIEVED RESULTS HERE:", results)
         return results
 
 
@@ -1130,7 +1133,7 @@ class TempDataSet(DataSet):
 
 def geno_mrna_confidentiality(ob):
     dataset_table = ob.type + "Freeze"
-    #print("dataset_table [%s]: %s" % (type(dataset_table), dataset_table))
+    #logger.debug("dataset_table [%s]: %s" % (type(dataset_table), dataset_table))
 
     query = '''SELECT Id, Name, FullName, confidentiality,
                         AuthorisedUsers FROM %s WHERE Name = %%s''' % (dataset_table)
@@ -1145,4 +1148,3 @@ def geno_mrna_confidentiality(ob):
 
     if confidential:
         return True
-
diff --git a/wqflask/runserver.py b/wqflask/runserver.py
index bfc27827..4556056d 100644
--- a/wqflask/runserver.py
+++ b/wqflask/runserver.py
@@ -20,6 +20,7 @@ from wqflask import app
 #_ch = logging.StreamHandler()
 #_log.addHandler(_ch)
 
+import logging
 import utility.logger
 logger = utility.logger.getLogger(__name__ )
 
@@ -27,6 +28,8 @@ logger.info(app.config)
 
 from utility.tools import WEBSERVER_MODE
 
+werkzeug_logger = logging.getLogger('werkzeug')
+
 if WEBSERVER_MODE == 'DEBUG':
     app.run(host='0.0.0.0',
             port=app.config['SERVER_PORT'],
@@ -35,6 +38,7 @@ if WEBSERVER_MODE == 'DEBUG':
             threaded=False,
             use_reloader=True)
 elif WEBSERVER_MODE == 'DEV':
+    werkzeug_logger.setLevel(logging.WARNING)
     app.run(host='0.0.0.0',
             port=app.config['SERVER_PORT'],
             debug=False,
diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py
index 04fd0688..48212877 100644
--- a/wqflask/wqflask/do_search.py
+++ b/wqflask/wqflask/do_search.py
@@ -1,6 +1,3 @@
-#!/usr/bin/python
-
-
 from __future__ import print_function, division
 
 import string
@@ -15,6 +12,9 @@ import sys
 
 from dbFunction import webqtlDatabaseFunction
 
+import logging
+from utility.logger import getLogger
+logger = getLogger(__name__ , level = logging.INFO)
 
 class DoSearch(object):
     """Parent class containing parameters/functions used for all searches"""
@@ -30,15 +30,15 @@ class DoSearch(object):
         self.dataset = dataset
 
         if self.dataset:
-            print("self.dataset is boo: ", type(self.dataset), pf(self.dataset))
-            print("self.dataset.group is: ", pf(self.dataset.group))
+            logger.debug("self.dataset is boo: ", type(self.dataset), pf(self.dataset))
+            logger.debug("self.dataset.group is: ", pf(self.dataset.group))
             #Get group information for dataset and the species id
             self.species_id = webqtlDatabaseFunction.retrieve_species_id(self.dataset.group.name)
 
     def execute(self, query):
         """Executes query and returns results"""
         query = self.normalize_spaces(query)
-        print("in do_search query is:", pf(query))
+        logger.debug("in do_search query is:", pf(query))
         results = g.db.execute(query, no_parameters=True).fetchall()
         return results
 
@@ -56,7 +56,7 @@ class DoSearch(object):
     def mescape(self, *items):
         """Multiple escape"""
         escaped = [escape(str(item)) for item in items]
-        print("escaped is:", escaped)
+        logger.debug("escaped is:", escaped)
         return tuple(escaped)
 
     def normalize_spaces(self, stringy):
@@ -66,13 +66,13 @@ class DoSearch(object):
 
     @classmethod
     def get_search(cls, search_type):
-        print("search_types are:", pf(cls.search_types))
+        logger.debug("search_types are:", pf(cls.search_types))
 
         search_type_string = search_type['dataset_type']
         if 'key' in search_type:
             search_type_string += '_' + search_type['key']
 
-        print("search_type_string is:", search_type_string)
+        logger.debug("search_type_string is:", search_type_string)
 
         if search_type_string in cls.search_types:
             return cls.search_types[search_type_string]
@@ -100,7 +100,7 @@ class QuickMrnaAssaySearch(DoSearch):
     def run(self):
         """Generates and runs a search for assays across all mRNA expression datasets"""
 
-        print("Running ProbeSetSearch")
+        logger.debug("Running ProbeSetSearch")
         query = self.base_query + """WHERE (MATCH (ProbeSet.Name,
                     ProbeSet.description,
                     ProbeSet.symbol,
@@ -108,7 +108,7 @@ class QuickMrnaAssaySearch(DoSearch):
                     AGAINST ('%s' IN BOOLEAN MODE))
                             """ % (escape(self.search_term[0]))
 
-        print("final query is:", pf(query))
+        logger.debug("final query is:", pf(query))
 
         return self.execute(query)
 
@@ -176,14 +176,14 @@ class MrnaAssaySearch(DoSearch):
                                     where_clause,
                                     escape(str(self.dataset.id))))
 
-        #print("query is:", pf(query))
+        #logger.debug("query is:", pf(query))
 
         return query
 
     def run_combined(self, from_clause = '', where_clause = ''):
         """Generates and runs a combined search of an mRNA expression dataset"""
 
-        print("Running ProbeSetSearch")
+        logger.debug("Running ProbeSetSearch")
         #query = self.base_query + from_clause + " WHERE " + where_clause
 
         from_clause = self.normalize_spaces(from_clause)
@@ -198,18 +198,18 @@ class MrnaAssaySearch(DoSearch):
                                     where_clause,
                                     escape(str(self.dataset.id))))
 
-        print("final query is:", pf(query))
+        logger.debug("final query is:", pf(query))
 
         return self.execute(query)
 
     def run(self):
         """Generates and runs a simple search of an mRNA expression dataset"""
 
-        print("Running ProbeSetSearch")
+        logger.debug("Running ProbeSetSearch")
         where_clause = self.get_where_clause()
         query = self.base_query + "WHERE " + where_clause + "ORDER BY ProbeSet.symbol ASC"
 
-        #print("final query is:", pf(query))
+        #logger.debug("final query is:", pf(query))
 
         return self.execute(query)
 
@@ -290,14 +290,14 @@ class PhenotypeSearch(DoSearch):
                             escape(str(self.dataset.group.id)),
                             escape(str(self.dataset.id))))
 
-        print("query is:", pf(query))
+        logger.debug("query is:", pf(query))
 
         return query
 
     def run_combined(self, from_clause, where_clause):
         """Generates and runs a combined search of an phenotype dataset"""
 
-        print("Running PhenotypeSearch")
+        logger.debug("Running PhenotypeSearch")
 
         from_clause = self.normalize_spaces(from_clause)
 
@@ -313,7 +313,7 @@ class PhenotypeSearch(DoSearch):
                         escape(str(self.dataset.group.id)),
                         escape(str(self.dataset.id))))
 
-        print("final query is:", pf(query))
+        logger.debug("final query is:", pf(query))
 
 
         return self.execute(query)
@@ -364,7 +364,7 @@ class QuickPhenotypeSearch(PhenotypeSearch):
                     PublishXRef.InbredSetId = InbredSet.Id and
                     InbredSet.SpeciesId = Species.Id""" % where_clause)
 
-        print("query is:", pf(query))
+        logger.debug("query is:", pf(query))
 
         return query
 
@@ -408,7 +408,7 @@ class GenotypeSearch(DoSearch):
             where_clause.append('''%s REGEXP "%s"''' % ("%s.%s" % self.mescape(self.dataset.type,
                                                                                field),
                                                                                self.search_term))
-        print("hello ;where_clause is:", pf(where_clause))
+        logger.debug("hello ;where_clause is:", pf(where_clause))
         where_clause = "(%s) " % ' OR '.join(where_clause)
 
         return where_clause
@@ -432,7 +432,7 @@ class GenotypeSearch(DoSearch):
                         and GenoFreeze.Id = %s"""% (where_clause,
                                                 escape(str(self.dataset.id))))
 
-        print("query is:", pf(query))
+        logger.debug("query is:", pf(query))
 
         return query
 
@@ -586,7 +586,7 @@ class LrsSearch(DoSearch):
                                                            self.species_id)
         else:
             # Deal with >, <, >=, and <=
-            print("self.search_term is:", self.search_term)
+            logger.debug("self.search_term is:", self.search_term)
             where_clause = """ %sXRef.LRS %s %s """ % self.mescape(self.dataset.type,
                                                                         self.search_operator,
                                                                         self.search_term[0])
@@ -787,7 +787,7 @@ class MeanSearch(MrnaAssaySearch):
 
     def get_final_query(self):
         self.where_clause = self.get_where_clause()
-        print("where_clause is:", pf(self.where_clause))
+        logger.debug("where_clause is:", pf(self.where_clause))
 
         self.query = self.compile_final_query(where_clause = self.where_clause)
 
@@ -795,7 +795,7 @@ class MeanSearch(MrnaAssaySearch):
 
     def run(self):
         self.where_clause = self.get_where_clause()
-        print("where_clause is:", pf(self.where_clause))
+        logger.debug("where_clause is:", pf(self.where_clause))
 
         self.query = self.compile_final_query(where_clause = self.where_clause)
 
@@ -825,7 +825,7 @@ class RangeSearch(MrnaAssaySearch):
                                      WHERE ProbeSetData.Id = ProbeSetXRef.dataId) > %s
                                     """ % (escape(self.search_term[0]))
 
-        print("where_clause is:", pf(where_clause))
+        logger.debug("where_clause is:", pf(where_clause))
 
         return where_clause
 
@@ -927,7 +927,7 @@ class PvalueSearch(MrnaAssaySearch):
                                         self.search_operator,
                                         self.search_term[0])
 
-        print("where_clause is:", pf(self.where_clause))
+        logger.debug("where_clause is:", pf(self.where_clause))
 
         self.query = self.compile_final_query(where_clause = self.where_clause)
 
@@ -992,7 +992,7 @@ if __name__ == "__main__":
     #            ProbeSet.Id = ProbeSetXRef.ProbeSetId and
     #            ProbeSetXRef.ProbeSetFreezeId = 112""")
 
-    #print(pf(cursor.fetchall()))
+    #logger.debug(pf(cursor.fetchall()))
     #results = ProbeSetSearch("shh", None, dataset, cursor, db_conn).run()
     results = PvalueSearch(['0.005'], '<', dataset, cursor, db_conn).run()
     #results = RifSearch("diabetes", dataset, cursor, db_conn).run()
@@ -1004,4 +1004,4 @@ if __name__ == "__main__":
     #results = GenotypeSearch("rs13475699", dataset, cursor, db_conn).run()
     #results = GoSearch("0045202", dataset, cursor, db_conn).run()
 
-    print("results are:", pf(results))
+    logger.debug("results are:", pf(results))
diff --git a/wqflask/wqflask/parser.py b/wqflask/wqflask/parser.py
index 35070b8c..ea7ab1b9 100644
--- a/wqflask/wqflask/parser.py
+++ b/wqflask/wqflask/parser.py
@@ -23,6 +23,9 @@ import re
 
 from pprint import pformat as pf
 
+from utility.logger import getLogger
+logger = getLogger(__name__ )
+
 def parse(pstring):
     """
 
@@ -40,13 +43,13 @@ def parse(pstring):
     separators = [re.escape(x) for x in ("<=", ">=", ":", "=", "<", ">")]
     separators = '(%s)' % ("|".join(separators))
 
-    print("separators:", separators)
+    logger.debug("separators:", separators)
 
 
 
     for item in pstring:
         splat = re.split(separators, item)
-        print("splat is:", splat)
+        logger.debug("splat is:", splat)
 
         # splat is an array of 1 if no match, otherwise more than 1
         if len(splat) > 1:
@@ -72,7 +75,7 @@ def parse(pstring):
                         search_term=[item])
 
         items.append(term)
-    print("* items are:", pf(items) + "\n")
+    logger.debug("* items are:", pf(items) + "\n")
     return(items)
 
     #def encregexp(self,str):
@@ -108,4 +111,4 @@ if __name__ == '__main__':
     parse("LRS=(9 99 Chr4 122 155) cisLRS=(9 999 10)")
     parse("sal1 LRS=(9 99 Chr4 122 155) sal2 cisLRS=(9 999 10)")
     parse("sal1 sal3 LRS=(9 99 Chr4 122 155) wiki=bar sal2 go:foobar cisLRS=(9 999 10)")
-    parse("sal1 LRS=(9 99 Chr4 122 155) wiki=bar sal2 go:foobar cisLRS=(9, 999, 10)")
\ No newline at end of file
+    parse("sal1 LRS=(9 99 Chr4 122 155) wiki=bar sal2 go:foobar cisLRS=(9, 999, 10)")
diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py
index f04881a6..30b65e80 100644
--- a/wqflask/wqflask/search_results.py
+++ b/wqflask/wqflask/search_results.py
@@ -36,6 +36,9 @@ from dbFunction import webqtlDatabaseFunction
 
 from utility import formatting
 
+from utility.logger import getLogger
+logger = getLogger(__name__ )
+
 #class QuickSearchResult(object):
     #def __init__(self, key, result_fields):
     #    self.key = key
@@ -64,9 +67,9 @@ class SearchResultPage(object):
         #else:
 
         self.uc_id = uuid.uuid4()
-        print("uc_id:", self.uc_id)
+        logger.debug("uc_id:", self.uc_id)
 
-        print("kw is:", kw)
+        logger.debug("kw is:", kw)
         if kw['search_terms_or']:
             self.and_or = "or"
             self.search_terms = kw['search_terms_or']
@@ -82,7 +85,7 @@ class SearchResultPage(object):
         else:
             dataset_type = "ProbeSet"
         self.dataset = create_dataset(kw['dataset'], dataset_type)
-        print("KEYWORD:", self.search_terms)
+        logger.debug("KEYWORD:", self.search_terms)
         self.search()
         if self.search_term_exists:
             self.gen_search_result()
@@ -100,14 +103,14 @@ class SearchResultPage(object):
 
         # result_set represents the results for each search term; a search of
         # "shh grin2b" would have two sets of results, one for each term
-        print("self.results is:", pf(self.results))
+        logger.debug("self.results is:", pf(self.results))
         for result in self.results:
             if not result:
                 continue
 
             #### Excel file needs to be generated ####
 
-            #print("foo locals are:", locals())
+            #logger.debug("foo locals are:", locals())
             trait_id = result[0]
             this_trait = GeneralTrait(dataset=self.dataset, name=trait_id, get_qtl_info=True, get_sample_info=False)
             self.trait_list.append(this_trait)
@@ -124,7 +127,7 @@ class SearchResultPage(object):
 
     def search(self):
         self.search_terms = parser.parse(self.search_terms)
-        print("After parsing:", self.search_terms)
+        logger.debug("After parsing:", self.search_terms)
 
         if len(self.search_terms) > 1:
             combined_from_clause = ""
@@ -171,19 +174,19 @@ class SearchResultPage(object):
                 self.header_fields = the_search.header_fields
 
     def get_search_ob(self, a_search):
-        print("[kodak] item is:", pf(a_search))
+        logger.debug("[kodak] item is:", pf(a_search))
         search_term = a_search['search_term']
         search_operator = a_search['separator']
         search_type = {}
         search_type['dataset_type'] = self.dataset.type
         if a_search['key']:
             search_type['key'] = a_search['key'].upper()
-        print("search_type is:", pf(search_type))
+        logger.debug("search_type is:", pf(search_type))
 
         search_ob = do_search.DoSearch.get_search(search_type)
         if search_ob:
             search_class = getattr(do_search, search_ob)
-            print("search_class is: ", pf(search_class))
+            logger.debug("search_class is: ", pf(search_class))
             the_search = search_class(search_term,
                                     search_operator,
                                     self.dataset,
diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py
index 1e454292..557708e9 100644
--- a/wqflask/wqflask/user_manager.py
+++ b/wqflask/wqflask/user_manager.py
@@ -47,7 +47,9 @@ from wqflask import model
 
 from utility import Bunch, Struct, after
 
-
+import logging
+from utility.logger import getLogger
+logger = getLogger(__name__)
 
 from base.data_set import create_datasets_list
 
@@ -64,10 +66,10 @@ class AnonUser(object):
     def __init__(self):
         self.cookie = request.cookies.get(self.cookie_name)
         if self.cookie:
-            print("already is cookie")
+            logger.debug("already is cookie")
             self.anon_id = verify_cookie(self.cookie)
         else:
-            print("creating new cookie")
+            logger.debug("creating new cookie")
             self.anon_id, self.cookie = create_signed_cookie()
 
         @after.after_this_request
@@ -87,7 +89,7 @@ def create_signed_cookie():
     the_uuid = str(uuid.uuid4())
     signature = actual_hmac_creation(the_uuid)
     uuid_signed = the_uuid + ":" + signature
-    print("uuid_signed:", uuid_signed)
+    logger.debug("uuid_signed:", uuid_signed)
     return the_uuid, uuid_signed
 
 class UserSession(object):
@@ -102,7 +104,7 @@ class UserSession(object):
             session_id = verify_cookie(cookie)
 
             self.redis_key = self.cookie_name + ":" + session_id
-            print("self.redis_key is:", self.redis_key)
+            logger.debug("self.redis_key is:", self.redis_key)
             self.session_id = session_id
             self.record = Redis.hgetall(self.redis_key)
 
@@ -123,10 +125,10 @@ class UserSession(object):
 
             if Redis.ttl(self.redis_key) < THREE_DAYS:
                 # (Almost) everytime the user does something we extend the session_id in Redis...
-                print("Extending ttl...")
+                logger.debug("Extending ttl...")
                 Redis.expire(self.redis_key, THREE_DAYS)
 
-            print("record is:", self.record)
+            logger.debug("record is:", self.record)
             self.logged_in = True
 
     @property
@@ -156,7 +158,7 @@ class UserSession(object):
     def delete_session(self):
         # And more importantly delete the redis record
         Redis.delete(self.cookie_name)
-        print("At end of delete_session")
+        logger.debug("At end of delete_session")
 
 @app.before_request
 def before_request():
@@ -165,26 +167,26 @@ def before_request():
 class UsersManager(object):
     def __init__(self):
         self.users = model.User.query.all()
-        print("Users are:", self.users)
+        logger.debug("Users are:", self.users)
 
 
 class UserManager(object):
     def __init__(self, kw):
         self.user_id = kw['user_id']
-        print("In UserManager locals are:", pf(locals()))
+        logger.debug("In UserManager locals are:", pf(locals()))
         #self.user = model.User.get(user_id)
-        #print("user is:", user)
+        #logger.debug("user is:", user)
         self.user = model.User.query.get(self.user_id)
-        print("user is:", self.user)
+        logger.debug("user is:", self.user)
         datasets = create_datasets_list()
         for dataset in datasets:
             if not dataset.check_confidentiality():
                 continue
-            print("\n  Name:", dataset.name)
-            print("  Type:", dataset.type)
-            print("  ID:", dataset.id)
-            print("  Confidential:", dataset.check_confidentiality())
-        #print("   ---> self.datasets:", self.datasets)
+            logger.debug("\n  Name:", dataset.name)
+            logger.debug("  Type:", dataset.type)
+            logger.debug("  ID:", dataset.id)
+            logger.debug("  Confidential:", dataset.check_confidentiality())
+        #logger.debug("   ---> self.datasets:", self.datasets)
 
 
 class RegisterUser(object):
@@ -215,7 +217,7 @@ class RegisterUser(object):
         if self.errors:
             return
 
-        print("No errors!")
+        logger.debug("No errors!")
 
         set_password(password, self.user)
 
@@ -233,10 +235,10 @@ class RegisterUser(object):
                                "Click the button above to sign in using an existing account.")
             return
 
-        print("Adding verification email to queue")
+        logger.debug("Adding verification email to queue")
         #self.send_email_verification()
         VerificationEmail(self.new_user)
-        print("Added verification email to queue")
+        logger.debug("Added verification email to queue")
 
         self.thank_you_mode = True
 
@@ -259,8 +261,8 @@ def set_password(password, user):
     # One more check on password length
     assert len(password) >= 6, "Password shouldn't be so short here"
 
-    print("pwfields:", vars(pwfields))
-    print("locals:", locals())
+    logger.debug("pwfields:", vars(pwfields))
+    logger.debug("locals:", locals())
 
     enc_password = Password(password,
                             pwfields.salt,
@@ -324,14 +326,14 @@ class ForgotPasswordEmail(VerificationEmail):
 class Password(object):
     def __init__(self, unencrypted_password, salt, iterations, keylength, hashfunc):
         hashfunc = getattr(hashlib, hashfunc)
-        print("hashfunc is:", hashfunc)
+        logger.debug("hashfunc is:", hashfunc)
         # On our computer it takes around 1.4 seconds in 2013
         start_time = time.time()
         salt = base64.b64decode(salt)
         self.password = pbkdf2.pbkdf2_hex(str(unencrypted_password),
                                           salt, iterations, keylength, hashfunc)
         self.encrypt_time = round(time.time() - start_time, 3)
-        print("Creating password took:", self.encrypt_time)
+        logger.debug("Creating password took:", self.encrypt_time)
 
 
 def basic_info():
@@ -355,7 +357,7 @@ def verify_email():
 
 @app.route("/n/password_reset")
 def password_reset():
-    print("in password_reset request.url is:", request.url)
+    logger.debug("in password_reset request.url is:", request.url)
 
     # We do this mainly just to assert that it's in proper form for displaying next page
     # Really not necessary but doesn't hurt
@@ -365,7 +367,7 @@ def password_reset():
 
 @app.route("/n/password_reset_step2", methods=('POST',))
 def password_reset_step2():
-    print("in password_reset request.url is:", request.url)
+    logger.debug("in password_reset request.url is:", request.url)
 
     errors = []
 
@@ -373,13 +375,13 @@ def password_reset_step2():
     verification_code, separator, hmac = user_encode.partition(':')
 
     hmac_verified = actual_hmac_creation(verification_code)
-    print("locals are:", locals())
+    logger.debug("locals are:", locals())
 
 
     assert hmac == hmac_verified, "Someone has been naughty"
 
     user = DecodeUser.actual_get_user(ForgotPasswordEmail.key_prefix, verification_code)
-    print("user is:", user)
+    logger.debug("user is:", user)
 
     password = request.form['password']
 
@@ -408,9 +410,9 @@ class DecodeUser(object):
     @staticmethod
     def actual_get_user(code_prefix, verification_code):
         data = Redis.get(code_prefix + ":" + verification_code)
-        print("in get_coded_user, data is:", data)
+        logger.debug("in get_coded_user, data is:", data)
         data = json.loads(data)
-        print("data is:", data)
+        logger.debug("data is:", data)
         return model.User.query.get(data['id'])
 
 @app.route("/n/login", methods=('GET', 'POST'))
@@ -428,14 +430,14 @@ class LoginUser(object):
     def standard_login(self):
         """Login through the normal form"""
         params = request.form if request.form else request.args
-        print("in login params are:", params)
+        logger.debug("in login params are:", params)
         if not params:
             return render_template("new_security/login_user.html")
         else:
             try:
                 user = model.User.query.filter_by(email_address=params['email_address']).one()
             except sqlalchemy.orm.exc.NoResultFound:
-                print("No account exists for that email address")
+                logger.debug("No account exists for that email address")
                 valid = False
                 user = None
             else:
@@ -446,9 +448,9 @@ class LoginUser(object):
                                               pwfields.iterations,
                                               pwfields.keylength,
                                               pwfields.hashfunc)
-                print("\n\nComparing:\n{}\n{}\n".format(encrypted.password, pwfields.password))
+                logger.debug("\n\nComparing:\n{}\n{}\n".format(encrypted.password, pwfields.password))
                 valid = pbkdf2.safe_str_cmp(encrypted.password, pwfields.password)
-                print("valid is:", valid)
+                logger.debug("valid is:", valid)
 
         if valid and not user.confirmed:
             VerificationEmail(user)
@@ -458,7 +460,7 @@ class LoginUser(object):
 
         if valid:
             if params.get('remember'):
-                print("I will remember you")
+                logger.debug("I will remember you")
                 self.remember_me = True
 
             return self.actual_login(user)
@@ -492,14 +494,14 @@ class LoginUser(object):
         #session_id = "session_id:{}".format(login_rec.session_id)
         session_id_signature = actual_hmac_creation(login_rec.session_id)
         session_id_signed = login_rec.session_id + ":" + session_id_signature
-        print("session_id_signed:", session_id_signed)
+        logger.debug("session_id_signed:", session_id_signed)
 
         session = dict(login_time = time.time(),
                        user_id = user.id,
                        user_email_address = user.email_address)
 
         key = UserSession.cookie_name + ":" + login_rec.session_id
-        print("Key when signing:", key)
+        logger.debug("Key when signing:", key)
         Redis.hmset(key, session)
         if self.remember_me:
             expire_time = self.remember_time
@@ -518,7 +520,7 @@ class LoginUser(object):
 
 @app.route("/n/logout")
 def logout():
-    print("Logging out...")
+    logger.debug("Logging out...")
     UserSession().delete_session()
     flash("You are now logged out. We hope you come back soon!")
     response = make_response(redirect(url_for('index_page')))
@@ -610,7 +612,7 @@ def register():
     params = request.form if request.form else request.args
 
     if params:
-        print("Attempting to register the user...")
+        logger.debug("Attempting to register the user...")
         result = RegisterUser(params)
         errors = result.errors
 
@@ -656,16 +658,16 @@ def data_hmac(stringy):
 
 def verify_url_hmac(url):
     """Pass in a url that was created with url_hmac and this assures it hasn't been tampered with"""
-    print("url passed in to verify is:", url)
+    logger.debug("url passed in to verify is:", url)
     # Verify parts are correct at the end - we expect to see &hm= or ?hm= followed by an hmac
     assert url[-23:-20] == "hm=", "Unexpected url (stage 1)"
     assert url[-24] in ["?", "&"], "Unexpected url (stage 2)"
     hmac = url[-20:]
     url = url[:-24]  # Url without any of the hmac stuff
 
-    #print("before urlsplit, url is:", url)
+    #logger.debug("before urlsplit, url is:", url)
     #url = divide_up_url(url)[1]
-    #print("after urlsplit, url is:", url)
+    #logger.debug("after urlsplit, url is:", url)
 
     hm = actual_hmac_creation(url)
 
@@ -706,4 +708,4 @@ class GroupsManager(object):
 class RolesManager(object):
     def __init__(self):
         self.roles = model.Role.query.all()
-        print("Roles are:", self.roles)
+        logger.debug("Roles are:", self.roles)
diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py
index c07e03ff..b6ce762f 100644
--- a/wqflask/wqflask/views.py
+++ b/wqflask/wqflask/views.py
@@ -132,7 +132,7 @@ def search_page():
             return render_template("data_sharing.html", **template_vars.__dict__)
     else:
         key = "search_results:v1:" + json.dumps(request.args, sort_keys=True)
-        logger.info("key is:", pf(key))
+        logger.debug("key is:", pf(key))
         if USE_REDIS:
             with Bench("Trying Redis cache"):
                 result = Redis.get(key)
@@ -142,7 +142,7 @@ def search_page():
 
         if result:
             logger.info("Cache hit on search results!!!")
-            logger.info("USE_REDIS=",USE_REDIS)
+            logger.debug("USE_REDIS=",USE_REDIS)
             with Bench("Loading results"):
                 result = pickle.loads(result)
         else:
@@ -151,7 +151,7 @@ def search_page():
             the_search = search_results.SearchResultPage(request.args)
             result = the_search.__dict__
 
-            logger.info("result: ", pf(result))
+            logger.debug("result: ", pf(result))
             if USE_REDIS:
                 Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
                 Redis.expire(key, 60*60)