about summary refs log tree commit diff
diff options
context:
space:
mode:
-rwxr-xr-xbin/genenetwork28
-rw-r--r--wqflask/base/webqtlConfig.py1
-rw-r--r--wqflask/utility/tools.py13
-rw-r--r--wqflask/wqflask/correlation/show_corr_results.py100
-rw-r--r--wqflask/wqflask/marker_regression/gemma_mapping.py16
5 files changed, 93 insertions, 45 deletions
diff --git a/bin/genenetwork2 b/bin/genenetwork2
index 145ce395..5438c1c0 100755
--- a/bin/genenetwork2
+++ b/bin/genenetwork2
@@ -45,6 +45,7 @@
 
 SCRIPT=$(realpath "$0")
 echo SCRIPT=$SCRIPT
+echo GN2_PROFILE=$GN2_PROFILE
 GN2_BASE_DIR=$(dirname $(dirname "$SCRIPT"))
 GN2_ID=$(cat /etc/hostname):$(basename $GN2_BASE_DIR)
 
@@ -125,7 +126,9 @@ else
     export PLINK_COMMAND="$GN2_PROFILE/bin/plink2"
     export PYLMM_COMMAND="$GN2_PROFILE/bin/pylmm_redis"
     export GEMMA_COMMAND="$GN2_PROFILE/bin/gemma"
-    export GEMMA_WRAPPER_COMMAND="$GN2_PROFILE/bin/gemma-wrapper"
+    if [ -z $GEMMA_WRAPPER_COMMAND ]; then
+        export GEMMA_WRAPPER_COMMAND="$GN2_PROFILE/bin/gemma-wrapper"
+    fi
     while IFS=":" read -ra PPATH; do
 	for PPart in "${PPATH[@]}"; do
 	    if [ ! -d $PPart ] ; then echo "PYTHONPATH not valid "$PYTHONPATH ; exit 1 ; fi
@@ -198,7 +201,8 @@ if [ "$1" = '-gunicorn-prod' ] ; then
     cd $GN2_BASE_DIR/wqflask
     echo PYTHONPATH=$PYTHONPATH
     if [ -z $SERVER_PORT ]; then echo "ERROR: Provide a SERVER_PORT" ; exit 1 ; fi
-    cmd="--bind 0.0.0.0:$SERVER_PORT --workers=32 --max-requests 1000 --timeout 1200 wsgi"
+    PID=$TMPDIR/gunicorn.$USER.pid
+    cmd="--bind 0.0.0.0:$SERVER_PORT --pid $PID -k eventlet --workers 20 --keep-alive 1200 --max-requests 1000 --timeout 1200 wsgi"
     echo RUNNING gunicorn $cmd
     gunicorn $cmd
     exit $?
diff --git a/wqflask/base/webqtlConfig.py b/wqflask/base/webqtlConfig.py
index 1ef2bc26..1e66e957 100644
--- a/wqflask/base/webqtlConfig.py
+++ b/wqflask/base/webqtlConfig.py
@@ -82,6 +82,7 @@ assert_writable_dir(GENERATED_TEXT_DIR)
 # Flat file directories
 GENODIR              = flat_files('genotype')+'/'
 assert_dir(GENODIR)
+assert_dir(GENODIR+'bimbam') # for gemma
 
 # JSON genotypes are OBSOLETE
 JSON_GENODIR         = flat_files('genotype/json')+'/'
diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py
index ec673cf5..005f9b0f 100644
--- a/wqflask/utility/tools.py
+++ b/wqflask/utility/tools.py
@@ -16,7 +16,7 @@ OVERRIDES = {}
 def app_set(command_id, value):
     """Set application wide value"""
     app.config.setdefault(command_id,value)
-    value
+    return value
 
 def get_setting(command_id,guess=None):
     """Resolve a setting from the environment or the global settings in
@@ -51,7 +51,7 @@ def get_setting(command_id,guess=None):
             return None
 
     # ---- Check whether environment exists
-    logger.debug("Looking for "+command_id+"\n")
+    # print("Looking for "+command_id+"\n")
     command = value(os.environ.get(command_id))
     if command is None or command == "":
         command = OVERRIDES.get(command_id)
@@ -63,7 +63,7 @@ def get_setting(command_id,guess=None):
                 if command is None or command == "":
                     # print command
                     raise Exception(command_id+' setting unknown or faulty (update default_settings.py?).')
-    logger.debug("Set "+command_id+"="+str(command))
+    # print("Set "+command_id+"="+str(command))
     return command
 
 def get_setting_bool(id):
@@ -280,6 +280,7 @@ SMTP_PASSWORD = get_setting_safe('SMTP_PASSWORD')
 
 PYLMM_COMMAND      = app_set("PYLMM_COMMAND",pylmm_command())
 GEMMA_COMMAND      = app_set("GEMMA_COMMAND",gemma_command())
+assert(GEMMA_COMMAND is not None)
 PLINK_COMMAND      = app_set("PLINK_COMMAND",plink_command())
 GEMMA_WRAPPER_COMMAND = gemma_wrapper_command()
 TEMPDIR            = tempdir() # defaults to UNIX TMPDIR
@@ -293,7 +294,7 @@ from six import string_types
 
 if os.environ.get('WQFLASK_OVERRIDES'):
     jsonfn = get_setting('WQFLASK_OVERRIDES')
-    logger.error("WQFLASK_OVERRIDES: %s" % jsonfn)
+    logger.info("WQFLASK_OVERRIDES: %s" % jsonfn)
     with open(jsonfn) as data_file:
         overrides = json.load(data_file)
         for k in overrides:
@@ -305,8 +306,4 @@ if os.environ.get('WQFLASK_OVERRIDES'):
             logger.debug(OVERRIDES)
 
 # assert_file(PHEWAS_FILES+"/auwerx/PheWAS_pval_EMMA_norm.RData")
-# assert_dir(get_setting("JS_BIODALLIANCE"))
-# assert_file(get_setting("JS_BIODALLIANCE")+"/build/dalliance-all.js")
-# assert_file(get_setting("JS_BIODALLIANCE")+"/build/worker-all.js")
-# assert_dir(get_setting("JS_TWITTER_POST_FETCHER"))
 assert_file(JS_TWITTER_POST_FETCHER_PATH+"/js/twitterFetcher_min.js")
diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py
index 24432ad0..3d1c0d17 100644
--- a/wqflask/wqflask/correlation/show_corr_results.py
+++ b/wqflask/wqflask/correlation/show_corr_results.py
@@ -75,6 +75,46 @@ def print_mem(stage=""):
     mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
     #print("{}: {}".format(stage, mem/1024))
 
+def is_float(value):
+    try:
+        float(value)
+        return True
+    except:
+        return False
+
+def is_int(value):
+    try:
+        int(value)
+        return True
+    except:
+        return False
+
+def is_str(value):
+    if value is None:
+        return False
+    try:
+        str(value)
+        return True
+    except:
+        return False
+
+def get_float(vars,name,default=None):
+    if name in vars:
+        if is_float(vars[name]):
+            return float(vars[name])
+    return None
+
+def get_int(vars,name,default=None):
+    if name in vars:
+        if is_int(vars[name]):
+            return float(vars[name])
+    return default
+
+def get_string(vars,name,default=None):
+    if name in vars:
+        if not vars[name] is None:
+            return str(vars[name])
+    return default
 
 class AuthException(Exception):
     pass
@@ -96,7 +136,19 @@ class CorrelationResults(object):
         # get trait list from db (database name)
         # calculate correlation with Base vector and targets
 
-        print("TESTING...")
+        # Check parameters
+        assert('corr_type' in start_vars)
+        assert(is_str(start_vars['corr_type']))
+        assert('dataset' in start_vars)
+        # assert('group' in start_vars) permitted to be empty?
+        assert('corr_sample_method' in start_vars)
+        assert('corr_samples_group' in start_vars)
+        assert('corr_dataset' in start_vars)
+        assert('min_expr' in start_vars)
+        assert('corr_return_results' in start_vars)
+        if 'loc_chr' in start_vars:
+            assert('min_loc_mb' in start_vars)
+            assert('max_loc_mb' in start_vars)
 
         with Bench("Doing correlations"):
             if start_vars['dataset'] == "Temp":
@@ -115,27 +167,17 @@ class CorrelationResults(object):
             self.sample_data = {}
             self.corr_type = start_vars['corr_type']
             self.corr_method = start_vars['corr_sample_method']
-            if 'min_expr' in start_vars:
-                if start_vars['min_expr'] != "":
-                    self.min_expr = float(start_vars['min_expr'])
-                else:
-                    self.min_expr = None
-            self.p_range_lower = float(start_vars['p_range_lower'])
-            self.p_range_upper = float(start_vars['p_range_upper'])
+            self.min_expr = get_float(start_vars,'min_expr')
+            self.p_range_lower = get_float(start_vars,'p_range_lower',-1.0)
+            self.p_range_upper = get_float(start_vars,'p_range_upper',1.0)
 
             if ('loc_chr' in start_vars and
                 'min_loc_mb' in start_vars and
                 'max_loc_mb' in start_vars):
 
-                self.location_chr = start_vars['loc_chr']
-                if start_vars['min_loc_mb'].isdigit():
-                    self.min_location_mb = start_vars['min_loc_mb']
-                else:
-                    self.min_location_mb = None
-                if start_vars['max_loc_mb'].isdigit():
-                    self.max_location_mb = start_vars['max_loc_mb']
-                else:
-                    self.max_location_mb = None
+                self.location_chr = get_string(start_vars,'loc_chr')
+                self.min_location_mb = get_int(start_vars,'min_loc_mb')
+                self.max_location_mb = get_int(start_vars,'max_loc_mb')
 
             self.get_formatted_corr_type()
             self.return_number = int(start_vars['corr_return_results'])
@@ -183,7 +225,7 @@ class CorrelationResults(object):
                 else:
                     for trait, values in self.target_dataset.trait_data.iteritems():
                         self.get_sample_r_and_p_values(trait, values)
-                        
+
             elif self.corr_type == "lit":
                 self.trait_geneid_dict = self.dataset.retrieve_genes("GeneId")
                 lit_corr_data = self.do_lit_correlation_for_all_traits()
@@ -564,7 +606,7 @@ class CorrelationResults(object):
                 self.this_trait_vals.append(sample_value)
                 target_vals.append(target_sample_value)
 
-        self.this_trait_vals, target_vals, num_overlap = corr_result_helpers.normalize_values(self.this_trait_vals, target_vals)	
+        self.this_trait_vals, target_vals, num_overlap = corr_result_helpers.normalize_values(self.this_trait_vals, target_vals)
 
         #ZS: 2015 could add biweight correlation, see http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3465711/
         if self.corr_method == 'pearson':
@@ -574,8 +616,8 @@ class CorrelationResults(object):
 
         if num_overlap > 5:
             self.correlation_data[trait] = [sample_r, sample_p, num_overlap]
-		
-		
+
+
         """
         correlations = []
 
@@ -673,8 +715,8 @@ class CorrelationResults(object):
                         method=self.method)
 
         return trait_list
-        """		
-		
+        """
+
 
     def do_tissue_corr_for_all_traits_2(self):
         """Comments Possibly Out of Date!!!!!
@@ -1089,7 +1131,7 @@ class CorrelationResults(object):
             totalTraits = len(traits) #XZ, 09/18/2008: total trait number
 
         return traits
-			
+
     def calculate_corr_for_all_tissues(self, tissue_dataset_id=None):
 
         symbol_corr_dict = {}
@@ -1129,7 +1171,7 @@ class CorrelationResults(object):
                     values_2.append(target_value)
             correlation = calCorrelation(values_1, values_2)
             self.correlation_data[trait] = correlation
-			
+
     def getFileName(self, target_db_name):  ### dcrowell  August 2008
         """Returns the name of the reference database file with which correlations are calculated.
         Takes argument cursor which is a cursor object of any instance of a subclass of templatePage
@@ -1144,7 +1186,7 @@ class CorrelationResults(object):
         return FileName
 
     def do_parallel_correlation(self, db_filename, num_overlap):
-	
+
         #XZ, 01/14/2009: This method is for parallel computing only.
         #XZ: It is supposed to be called when "Genetic Correlation, Pearson's r" (method 1)
         #XZ: or "Genetic Correlation, Spearman's rho" (method 2) is selected
@@ -1313,7 +1355,7 @@ class CorrelationResults(object):
                         z_value = z_value*math.sqrt(nOverlap-3)
                         sample_p = 2.0*(1.0 - reaper.normp(abs(z_value)))
 
-                correlation_data[traitdataName] = [sample_r, sample_p, nOverlap]	
+                correlation_data[traitdataName] = [sample_r, sample_p, nOverlap]
 
                 # traitinfo = [traitdataName, sample_r, nOverlap]
                 # allcorrelations.append(traitinfo)
@@ -1321,7 +1363,7 @@ class CorrelationResults(object):
             return correlation_data
             # return allcorrelations
 
-	
+
         datasetFile = open(webqtlConfig.GENERATED_TEXT_DIR+db_filename,'r')
 
         print("Invoking parallel computing")
@@ -1378,5 +1420,3 @@ class CorrelationResults(object):
         # for one_result in results:
             # for one_traitinfo in one_result:
                 # allcorrelations.append( one_traitinfo )
-
-
diff --git a/wqflask/wqflask/marker_regression/gemma_mapping.py b/wqflask/wqflask/marker_regression/gemma_mapping.py
index a24e43d4..68920130 100644
--- a/wqflask/wqflask/marker_regression/gemma_mapping.py
+++ b/wqflask/wqflask/marker_regression/gemma_mapping.py
@@ -3,7 +3,7 @@ import os, math, string, random, json
 from base import webqtlConfig
 from base.trait import GeneralTrait
 from base.data_set import create_dataset
-from utility.tools import flat_files, GEMMA_COMMAND, GEMMA_WRAPPER_COMMAND, TEMPDIR
+from utility.tools import flat_files, GEMMA_COMMAND, GEMMA_WRAPPER_COMMAND, TEMPDIR, assert_bin, assert_file
 
 import utility.logger
 logger = utility.logger.getLogger(__name__ )
@@ -11,6 +11,7 @@ logger = utility.logger.getLogger(__name__ )
 def run_gemma(this_dataset, samples, vals, covariates, method, use_loco):
     """Generates p-values for each marker using GEMMA"""
 
+    assert_bin(GEMMA_COMMAND);
     if this_dataset.group.genofile != None:
         genofile_name = this_dataset.group.genofile[:-5]
     else:
@@ -27,7 +28,7 @@ def run_gemma(this_dataset, samples, vals, covariates, method, use_loco):
         if i < (len(this_chromosomes) - 1):
             chr_list_string += this_chromosomes[i+1].name + ","
         else:
-            chr_list_string += this_chromosomes[i+1].name  
+            chr_list_string += this_chromosomes[i+1].name
 
     if covariates != "":
         gen_covariates_file(this_dataset, covariates)
@@ -209,8 +210,13 @@ def parse_gemma_output(genofile_name):
 def parse_loco_output(this_dataset, gwa_output_filename):
 
     output_filelist = []
-    with open("{}/gn2/".format(TEMPDIR) + gwa_output_filename + ".json") as data_file:
-       data = json.load(data_file)
+    jsonfn = "{}/gn2/".format(TEMPDIR) + gwa_output_filename + ".json"
+    assert_file(jsonfn)
+    try:
+        with open(jsonfn) as data_file:
+            data = json.load(data_file)
+    except:
+        logger.error("Can not parse "+jsonfn)
 
     files = data['files']
     for file in files:
@@ -247,4 +253,4 @@ def parse_loco_output(this_dataset, gwa_output_filename):
                     included_markers.append(line.split("\t")[1])
                     p_values.append(float(line.split("\t")[10]))
 
-    return marker_obs
\ No newline at end of file
+    return marker_obs