From a3365dae23f204e489939d3defc55edc1b4872d8 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 1 Oct 2018 16:09:47 +0000 Subject: - Can now remove cofactors from correlation scatterplot and select them by just clicking their row in collection - Cofactor color picker now works in Safari/Macs - Displays N for relevant samples in trait page sample table - Don't show bar chart when N>256 - Mapping loading page contents better centered - Anonymous collections timeout correctly listed as 30 days now - Minor allele frequency can actually be changed for GEMMA now (previously didn't work) - Fixed transcript position marker location for mapping results - Notifies user if their e-mail isn't associated with an account when they attempt to request forgotten password - Users can now map with submitted traits - Histogram width changes depending upon number of bins (need to improve this still) - Improved Q-q plot (previously called "probability plot") --- wqflask/utility/helper_functions.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index cf16879f..1c8dad10 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -14,7 +14,13 @@ logger = logging.getLogger(__name__ ) def get_species_dataset_trait(self, start_vars): #assert type(read_genotype) == type(bool()), "Expecting boolean value for read_genotype" - self.dataset = data_set.create_dataset(start_vars['dataset']) + if "temp_trait" in start_vars.keys(): + if start_vars['temp_trait'] == "True": + self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group']) + else: + self.dataset = data_set.create_dataset(start_vars['dataset']) + else: + self.dataset = data_set.create_dataset(start_vars['dataset']) logger.debug("After creating dataset") self.species = TheSpecies(dataset=self.dataset) logger.debug("After creating species") -- cgit v1.2.3 From 61c13a09dba95958f183dc55f3d59c8856b5f753 Mon Sep 17 00:00:00 2001 From: Pjotr Prins Date: Wed, 13 Feb 2019 12:41:45 +0000 Subject: Removed pylmm references and related functions --- bin/genenetwork2 | 1 - etc/default_settings.py | 1 - test/requests/mapping_tests.py | 10 -- wqflask/base/data_set.py | 2 +- wqflask/utility/tools.py | 4 - wqflask/wqflask/heatmap/heatmap.py | 5 +- wqflask/wqflask/marker_regression/run_mapping.py | 204 +---------------------- 7 files changed, 4 insertions(+), 223 deletions(-) (limited to 'wqflask/utility') diff --git a/bin/genenetwork2 b/bin/genenetwork2 index 21f0db13..7c875274 100755 --- a/bin/genenetwork2 +++ b/bin/genenetwork2 @@ -131,7 +131,6 @@ else export LC_ALL=C # FIXME export GUIX_GENENETWORK_FILES="$GN2_PROFILE/share/genenetwork2" export PLINK_COMMAND="$GN2_PROFILE/bin/plink2" - export PYLMM_COMMAND="$GN2_PROFILE/bin/pylmm_redis" export GEMMA_COMMAND="$GN2_PROFILE/bin/gemma" if [ -z $GEMMA_WRAPPER_COMMAND ]; then export GEMMA_WRAPPER_COMMAND="$GN2_PROFILE/bin/gemma-wrapper" diff --git a/etc/default_settings.py b/etc/default_settings.py index 3e54ad1f..a1fe81e5 100644 --- a/etc/default_settings.py +++ b/etc/default_settings.py @@ -89,7 +89,6 @@ GENENETWORK_FILES = HOME+"/genotype_files" # base dir for all static data fil JS_GN_PATH = os.environ['HOME']+"/genenetwork/javascript" # ---- GN2 Executables (overwrite for testing only) -# PYLMM_COMMAND = str.strip(os.popen("which pylmm_redis").read()) # PLINK_COMMAND = str.strip(os.popen("which plink2").read()) # GEMMA_COMMAND = str.strip(os.popen("which gemma").read()) # GEMMA_WRAPPER_COMMAND = str.strip(os.popen("which gemma-wrapper").read()) diff --git a/test/requests/mapping_tests.py b/test/requests/mapping_tests.py index 8eb19de7..6de81bfe 100644 --- a/test/requests/mapping_tests.py +++ b/test/requests/mapping_tests.py @@ -11,16 +11,6 @@ def load_data_from_file(): file_data = json.loads(file_handle.read().encode("utf-8")) return file_data -def check_pylmm_tool_selection(host, data): - print("") - print("pylmm mapping tool selection") - data["method"] = "pylmm" - page = requests.post(host+"/marker_regression", data=data) - doc = fromstring(page.text) - form = doc.forms[1] - assert form.fields["dataset"] == "HC_M2_0606_P" - assert form.fields["value:BXD1"] == "15.034" # Check value in the file - def check_R_qtl_tool_selection(host, data): print("") print("R/qtl mapping tool selection") diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 79f72390..ca6621e9 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -317,7 +317,7 @@ class DatasetGroup(object): mapping_id = g.db.execute("select MappingMethodId from InbredSet where Name= '%s'" % self.name).fetchone()[0] if mapping_id == "1": - mapping_names = ["QTLReaper", "PYLMM", "R/qtl"] + mapping_names = ["QTLReaper", "R/qtl"] elif mapping_id == "2": mapping_names = ["GEMMA"] elif mapping_id == "4": diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index ea216a35..86ef2e1e 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -107,9 +107,6 @@ def js_path(module=None): return try_guix raise "No JS path found for "+module+" (if not in Guix check JS_GN_PATH)" -def pylmm_command(guess=None): - return assert_bin(get_setting("PYLMM_COMMAND",guess)) - def gemma_command(guess=None): return assert_bin(get_setting("GEMMA_COMMAND",guess)) @@ -276,7 +273,6 @@ SMTP_CONNECT = get_setting('SMTP_CONNECT') SMTP_USERNAME = get_setting('SMTP_USERNAME') SMTP_PASSWORD = get_setting('SMTP_PASSWORD') -PYLMM_COMMAND = app_set("PYLMM_COMMAND",pylmm_command()) GEMMA_COMMAND = app_set("GEMMA_COMMAND",gemma_command()) assert(GEMMA_COMMAND is not None) PLINK_COMMAND = app_set("PLINK_COMMAND",plink_command()) diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index ff589693..1bdf252b 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -24,12 +24,9 @@ import reaper from base.trait import GeneralTrait from base import data_set from base import species -# from wqflask.my_pylmm.pyLMM import lmm -# from wqflask.my_pylmm.pyLMM import input from utility import helper_functions from utility import Plot, Bunch from utility import temp_data -from utility.tools import PYLMM_COMMAND from MySQLdb import escape_string as escape @@ -144,4 +141,4 @@ class Heatmap(object): if qtl.additive > 0: self.trait_results[this_trait.name].append(-float(qtl.lrs)) else: - self.trait_results[this_trait.name].append(float(qtl.lrs)) \ No newline at end of file + self.trait_results[this_trait.name].append(float(qtl.lrs)) diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 3057e340..73d985b8 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -38,7 +38,7 @@ from utility import temp_data from utility.benchmark import Bench from wqflask.marker_regression import gemma_mapping, rqtl_mapping, qtlreaper_mapping, plink_mapping -from utility.tools import locate, locate_ignore_error, PYLMM_COMMAND, GEMMA_COMMAND, PLINK_COMMAND, TEMPDIR +from utility.tools import locate, locate_ignore_error, GEMMA_COMMAND, PLINK_COMMAND, TEMPDIR from utility.external import shell from base.webqtlConfig import TMPDIR, GENERATED_TEXT_DIR @@ -239,11 +239,6 @@ class RunMapping(object): self.manhattan_plot = True results = plink_mapping.run_plink(self.this_trait, self.dataset, self.species, self.vals, self.maf) #results = self.run_plink() - elif self.mapping_method == "pylmm": - logger.debug("RUNNING PYLMM") - if self.num_perm > 0: - self.run_permutations(str(temp_uuid)) - results = self.gen_data(str(temp_uuid)) else: logger.debug("RUNNING NOTHING") @@ -354,201 +349,6 @@ class RunMapping(object): count, p_values = self.parse_rqtl_output(plink_output_filename) - def run_permutations(self, temp_uuid): - """Runs permutations and gets significant and suggestive LOD scores""" - - top_lod_scores = [] - - #logger.debug("self.num_perm:", self.num_perm) - - for permutation in range(self.num_perm): - - pheno_vector = np.array([val == "x" and np.nan or float(val) for val in self.vals]) - np.random.shuffle(pheno_vector) - - key = "pylmm:input:" + temp_uuid - - if self.dataset.group.species == "human": - p_values, t_stats = self.gen_human_results(pheno_vector, key, temp_uuid) - else: - genotype_data = [marker['genotypes'] for marker in self.dataset.group.markers.markers] - - no_val_samples = self.identify_empty_samples() - trimmed_genotype_data = self.trim_genotypes(genotype_data, no_val_samples) - - genotype_matrix = np.array(trimmed_genotype_data).T - - params = dict(pheno_vector = pheno_vector.tolist(), - genotype_matrix = genotype_matrix.tolist(), - restricted_max_likelihood = True, - refit = False, - temp_uuid = temp_uuid, - - # meta data - timestamp = datetime.datetime.now().isoformat(), - ) - - json_params = json.dumps(params) - Redis.set(key, json_params) - Redis.expire(key, 60*60) - - command = PYLMM_COMMAND+' --key {} --species {}'.format(key,"other") - shell(command) - - json_results = Redis.blpop("pylmm:results:" + temp_uuid, 45*60) - results = json.loads(json_results[1]) - p_values = [float(result) for result in results['p_values']] - - lowest_p_value = 1 - for p_value in p_values: - if p_value < lowest_p_value: - lowest_p_value = p_value - - #logger.debug("lowest_p_value:", lowest_p_value) - top_lod_scores.append(-math.log10(lowest_p_value)) - - #logger.debug("top_lod_scores:", top_lod_scores) - - self.suggestive = np.percentile(top_lod_scores, 67) - self.significant = np.percentile(top_lod_scores, 95) - - def gen_data(self, temp_uuid): - """Generates p-values for each marker""" - - logger.debug("self.vals is:", self.vals) - pheno_vector = np.array([(val == "x" or val == "") and np.nan or float(val) for val in self.vals]) - - #lmm_uuid = str(uuid.uuid4()) - - key = "pylmm:input:" + temp_uuid - logger.debug("key is:", pf(key)) - #with Bench("Loading cache"): - # result = Redis.get(key) - - if self.dataset.group.species == "human": - p_values, t_stats = self.gen_human_results(pheno_vector, key, temp_uuid) - - else: - logger.debug("NOW CWD IS:", os.getcwd()) - genotype_data = [marker['genotypes'] for marker in self.dataset.group.markers.markers] - - no_val_samples = self.identify_empty_samples() - trimmed_genotype_data = self.trim_genotypes(genotype_data, no_val_samples) - - genotype_matrix = np.array(genotype_data).T - - #logger.debug("pheno_vector: ", pf(pheno_vector)) - #logger.debug("genotype_matrix: ", pf(genotype_matrix)) - #logger.debug("genotype_matrix.shape: ", pf(genotype_matrix.shape)) - - #params = {"pheno_vector": pheno_vector, - # "genotype_matrix": genotype_matrix, - # "restricted_max_likelihood": True, - # "refit": False, - # "temp_data": tempdata} - - # logger.debug("genotype_matrix:", str(genotype_matrix.tolist())) - # logger.debug("pheno_vector:", str(pheno_vector.tolist())) - - params = dict(pheno_vector = pheno_vector.tolist(), - genotype_matrix = genotype_matrix.tolist(), - restricted_max_likelihood = True, - refit = False, - temp_uuid = temp_uuid, - - # meta data - timestamp = datetime.datetime.now().isoformat(), - ) - - json_params = json.dumps(params) - #logger.debug("json_params:", json_params) - Redis.set(key, json_params) - Redis.expire(key, 60*60) - logger.debug("before printing command") - - command = PYLMM_COMMAND + ' --key {} --species {}'.format(key, "other") - logger.debug("command is:", command) - logger.debug("after printing command") - - shell(command) - - #t_stats, p_values = lmm.run(key) - #lmm.run(key) - - json_results = Redis.blpop("pylmm:results:" + temp_uuid, 45*60) - results = json.loads(json_results[1]) - p_values = [float(result) for result in results['p_values']] - t_stats = results['t_stats'] - - #t_stats, p_values = lmm.run( - # pheno_vector, - # genotype_matrix, - # restricted_max_likelihood=True, - # refit=False, - # temp_data=tempdata - #) - #logger.debug("p_values:", p_values) - - self.dataset.group.markers.add_pvalues(p_values) - - return self.dataset.group.markers.markers - - def gen_human_results(self, pheno_vector, key, temp_uuid): - file_base = locate(self.dataset.group.name,"mapping") - - plink_input = input.plink(file_base, type='b') - input_file_name = os.path.join(webqtlConfig.SNP_PATH, self.dataset.group.name + ".snps.gz") - - pheno_vector = pheno_vector.reshape((len(pheno_vector), 1)) - covariate_matrix = np.ones((pheno_vector.shape[0],1)) - kinship_matrix = np.fromfile(open(file_base + '.kin','r'),sep=" ") - kinship_matrix.resize((len(plink_input.indivs),len(plink_input.indivs))) - - logger.debug("Before creating params") - - params = dict(pheno_vector = pheno_vector.tolist(), - covariate_matrix = covariate_matrix.tolist(), - input_file_name = input_file_name, - kinship_matrix = kinship_matrix.tolist(), - refit = False, - temp_uuid = temp_uuid, - - # meta data - timestamp = datetime.datetime.now().isoformat(), - ) - - logger.debug("After creating params") - - json_params = json.dumps(params) - Redis.set(key, json_params) - Redis.expire(key, 60*60) - - logger.debug("Before creating the command") - - command = PYLMM_COMMAND+' --key {} --species {}'.format(key, "human") - - logger.debug("command is:", command) - - os.system(command) - - json_results = Redis.blpop("pylmm:results:" + temp_uuid, 45*60) - results = json.loads(json_results[1]) - t_stats = results['t_stats'] - p_values = results['p_values'] - - - #p_values, t_stats = lmm.run_human(key) - - #p_values, t_stats = lmm.run_human( - # pheno_vector, - # covariate_matrix, - # input_file_name, - # kinship_matrix, - # loading_progress=tempdata - # ) - - return p_values, t_stats - def identify_empty_samples(self): no_val_samples = [] for sample_count, val in enumerate(self.vals): @@ -657,4 +457,4 @@ def trim_markers_for_table(markers): trimmed_sorted_markers = sorted_markers[:2000] return trimmed_sorted_markers else: - return sorted_markers \ No newline at end of file + return sorted_markers -- cgit v1.2.3 From 496759ad08efb02b4268ea7f3bbb7905974237e9 Mon Sep 17 00:00:00 2001 From: Pjotr Prins Date: Mon, 18 Feb 2019 09:39:41 +0000 Subject: Updated installation instructions and SERVER_PORT for single flask server --- doc/README.org | 71 ++++++++++++++++++++++++++++++++++-------------- wqflask/runserver.py | 8 ++++-- wqflask/utility/tools.py | 1 + 3 files changed, 57 insertions(+), 23 deletions(-) (limited to 'wqflask/utility') diff --git a/doc/README.org b/doc/README.org index 5dc9e994..620c946c 100644 --- a/doc/README.org +++ b/doc/README.org @@ -12,13 +12,15 @@ - [[#load-the-small-database-in-mysql][Load the small database in MySQL]] - [[#gn2-dependency-graph][GN2 Dependency Graph]] - [[#working-with-the-gn2-source-code][Working with the GN2 source code]] + - [[#running-elasticsearch][Running ElasticSearch]] + - [[#systemd][SystemD]] + - [[#read-more][Read more]] - [[#trouble-shooting][Trouble shooting]] - [[#importerror-no-module-named-jinja2][ImportError: No module named jinja2]] - - [[#error-can-not-find-directory-homegn2_data][ERROR: can not find directory $HOME/gn2_data]] + - [[#error-can-not-find-directory-homegn2_data-or-can-not-find-directory-homegenotype_filesgenotype][ERROR: 'can not find directory $HOME/gn2_data' or 'can not find directory $HOME/genotype_files/genotype']] - [[#cant-run-a-module][Can't run a module]] - [[#rpy2-error-show-now-found][Rpy2 error 'show' now found]] - [[#mysql-cant-connect-server-through-socket-error][Mysql can't connect server through socket ERROR]] - - [[#read-more][Read more]] - [[#irc-session][IRC session]] * Introduction @@ -100,11 +102,13 @@ mysql (which comes as part of the GNU Guix genenetwork2 install). As root configure and run -: adduser mysql && addgroup mysql -: mysqld --datadir=/var/mysql --initialize-insecure -: mkdir -p /var/run/mysqld -: chown mysql.mysql ~/mysql /var/run/mysqld -: mysqld -u mysql --datadir=/var/mysql --explicit_defaults_for_timestamp -P 12048" +#+BEGIN_SRC bash +adduser mysql && addgroup mysql +mysqld --datadir=/var/mysql --initialize-insecure +mkdir -p /var/run/mysqld +chown mysql.mysql ~/mysql /var/run/mysqld +mysqld -u mysql --datadir=/var/mysql --explicit_defaults_for_timestamp -P 12048" +#+END_SRC If you want to run as root you may have to set @@ -192,6 +196,41 @@ http://biogems.info/contrib/genenetwork/gn2.svg See [[development.org]]. +* Running ElasticSearch + +In order to start up elasticsearch: +Penguin - change user to "elasticsearch" and use the following command: "env JAVA_HOME=/opt/jdk-9.0.4 /opt/elasticsearch-6.2.1/bin/elasticsearch" + + +** SystemD + +New server - as root run "systemctl restart elasticsearch" + +#+BEGIN_SRC +tux01:/etc/systemd/system# cat elasticsearch.service +[Unit] +Description=Run Elasticsearch + +[Service] +ExecStart=/opt/elasticsearch-6.2.1/bin/elasticsearch +Environment=JAVA_HOME=/opt/jdk-9.0.4 +Environment="ES_JAVA_OPTS=-Xms1g -Xmx8g" +Environment="PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games:/opt/jdk-9.0.4/bin" +LimitNOFILE=65536 +StandardOutput=syslog +StandardError=syslog +User=elasticsearch + +[Install] +WantedBy=multi-user.target +#+END_SRC + +* Read more + +If you want to understand the architecture of GN2 read +[[Architecture.org]]. The rest of this document is mostly on deployment +of GN2. + * Trouble shooting ** ImportError: No module named jinja2 @@ -210,13 +249,17 @@ On one system: : export GEM_PATH="$HOME/.guix-profile/lib/ruby/gems/2.2.0" and perhaps a few more. -** ERROR: can not find directory $HOME/gn2_data +** ERROR: 'can not find directory $HOME/gn2_data' or 'can not find directory $HOME/genotype_files/genotype' The default settings file looks in your $HOME/gn2_data. Since these files come with a Guix installation you should take a hint from the values in the installed version of default_settings.py (see above in this document). +You can use the GENENETWORK_FILES switch to set the datadir, for example + +: env GN2_PROFILE=~/opt/gn-latest GENENETWORK_FILES=/gnu/data/gn2_data ./bin/genenetwork2 + ** Can't run a module In rare cases, development modules are not brought in with Guix @@ -257,18 +300,6 @@ if that works run genenetwork after setting SQL_URI to something like : export SQL_URI=mysql://gn2:mysql_password@127.0.0.1/db_webqtl_s -* Running ElasticSearch - -In order to start up elasticsearch: -Penguin - change user to "elasticsearch" and use the following command: "env JAVA_HOME=/opt/jdk-9.0.4 /opt/elasticsearch-6.2.1/bin/elasticsearch" - -New server - as root run "systemctl restart elasticsearch" - -* Read more - -If you want to understand the architecture of GN2 read -[[Architecture.org]]. The rest of this document is mostly on deployment -of GN2. * IRC session diff --git a/wqflask/runserver.py b/wqflask/runserver.py index 5f41d04d..7c06356b 100644 --- a/wqflask/runserver.py +++ b/wqflask/runserver.py @@ -27,9 +27,11 @@ app_config() werkzeug_logger = logging.getLogger('werkzeug') +from utility.tools import WEBSERVER_MODE, SERVER_PORT + if WEBSERVER_MODE == 'DEBUG': app.run(host='0.0.0.0', - port=port, + port=SERVER_PORT, debug=True, use_debugger=False, threaded=False, @@ -38,7 +40,7 @@ if WEBSERVER_MODE == 'DEBUG': elif WEBSERVER_MODE == 'DEV': werkzeug_logger.setLevel(logging.WARNING) app.run(host='0.0.0.0', - port=port, + port=SERVER_PORT, debug=False, use_debugger=False, threaded=False, @@ -46,7 +48,7 @@ elif WEBSERVER_MODE == 'DEV': use_reloader=True) else: # staging/production modes app.run(host='0.0.0.0', - port=port, + port=SERVER_PORT, debug=False, use_debugger=False, threaded=True, diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 86ef2e1e..8b2260f5 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -232,6 +232,7 @@ GN_VERSION = get_setting('GN_VERSION') HOME = get_setting('HOME') WEBSERVER_MODE = get_setting('WEBSERVER_MODE') GN_SERVER_URL = get_setting('GN_SERVER_URL') +SERVER_PORT = get_setting_int('SERVER_PORT') SQL_URI = get_setting('SQL_URI') LOG_LEVEL = get_setting('LOG_LEVEL') LOG_LEVEL_DEBUG = get_setting_int('LOG_LEVEL_DEBUG') -- cgit v1.2.3 From 4b4007f00f26afadc42d451c78e1253235ad65f3 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 22 Feb 2019 17:07:58 -0600 Subject: Fixed some appearance issues with several tables Fixed issue where cofactor selection screens were broken Changed case attributes to be applied across groups, though need to discuss this with Rob since it shouldn't always be done this way Added new inversion transformation --- wqflask/base/trait.py | 4 ++ wqflask/utility/Plot.py | 3 - wqflask/wqflask/search_results.py | 11 ++- wqflask/wqflask/show_trait/SampleList.py | 32 ++++----- wqflask/wqflask/show_trait/show_trait.py | 22 ++++++ .../new/javascript/dataset_menu_structure.json | 79 ++++++++++++---------- .../javascript/get_covariates_from_collection.js | 76 ++++++++++++++++++--- .../wqflask/static/new/javascript/show_trait.js | 65 ++++++------------ wqflask/wqflask/templates/search_result_page.html | 22 +++--- wqflask/wqflask/templates/show_trait.html | 30 +++++++- .../templates/show_trait_mapping_tools.html | 2 +- .../templates/show_trait_transform_and_filter.html | 1 + 12 files changed, 221 insertions(+), 126 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index e57d4176..9f76306f 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -206,6 +206,7 @@ def jsonable(trait): return dict(name=trait.name, symbol=trait.symbol, dataset=dataset.name, + dataset_name = dataset.shortname, description=trait.description_display, mean=trait.mean, location=trait.location_repr, @@ -217,6 +218,7 @@ def jsonable(trait): if trait.pubmed_id: return dict(name=trait.name, dataset=dataset.name, + dataset_name = dataset.shortname, description=trait.description_display, authors=trait.authors, pubmed_text=trait.pubmed_text, @@ -228,6 +230,7 @@ def jsonable(trait): else: return dict(name=trait.name, dataset=dataset.name, + dataset_name = dataset.shortname, description=trait.description_display, authors=trait.authors, pubmed_text=trait.pubmed_text, @@ -238,6 +241,7 @@ def jsonable(trait): elif dataset.type == "Geno": return dict(name=trait.name, dataset=dataset.name, + dataset_name = dataset.shortname, location=trait.location_repr ) else: diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 529cd117..cce8435d 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -98,8 +98,6 @@ def find_outliers(vals): """ - logger.debug("xerxes vals is:", pf(vals)) - if vals: #logger.debug("vals is:", pf(vals)) stats = corestats.Stats(vals) @@ -114,7 +112,6 @@ def find_outliers(vals): upper_bound = None lower_bound = None - logger.debug(pf(locals())) return upper_bound, lower_bound # parameter: data is either object returned by reaper permutation function (called by MarkerRegressionPage.py) diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index c67063b0..893fd172 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -112,7 +112,7 @@ views.py). trait_dict['hmac'] = user_manager.data_hmac('{}:{}'.format(this_trait.name, this_trait.dataset.name)) if this_trait.dataset.type == "ProbeSet": trait_dict['symbol'] = this_trait.symbol - trait_dict['description'] = this_trait.description_display + trait_dict['description'] = insert_newlines(this_trait.description_display) trait_dict['location'] = this_trait.location_repr trait_dict['mean'] = "N/A" trait_dict['additive'] = "N/A" @@ -125,7 +125,7 @@ views.py). elif this_trait.dataset.type == "Geno": trait_dict['location'] = this_trait.location_repr elif this_trait.dataset.type == "Publish": - trait_dict['description'] = this_trait.description_display + trait_dict['description'] = insert_newlines(this_trait.description_display) trait_dict['authors'] = this_trait.authors trait_dict['pubmed_id'] = "N/A" if this_trait.pubmed_id: @@ -222,3 +222,10 @@ views.py). return the_search else: return None + +def insert_newlines(string, every=64): + """ This is because it is seemingly impossible to change the width of the description column, so I'm just manually adding line breaks """ + lines = [] + for i in xrange(0, len(string), every): + lines.append(string[i:i+every]) + return '\n'.join(lines) diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 50d7b6c0..50026bba 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -39,7 +39,7 @@ class SampleList(object): #self.sample_qnorm = get_transform_vals(self.dataset, this_trait) - if self.this_trait and self.dataset and self.dataset.type == 'ProbeSet': + if self.this_trait and self.dataset: self.get_extra_attribute_values() for counter, sample_name in enumerate(sample_names, 1): @@ -72,10 +72,9 @@ class SampleList(object): self.sample_list.append(sample) - logger.debug("self.attributes is", pf(self.attributes)) + #logger.debug("attribute vals are", pf(self.sample_attribute_values)) self.do_outliers() - logger.debug("*the_samples are [%i]: %s" % (len(self.sample_list), pf(self.sample_list))) def __repr__(self): return " --> %s" % (pf(self.__dict__)) @@ -98,11 +97,11 @@ class SampleList(object): # Get attribute names and distinct values for each attribute results = g.db.execute(''' - SELECT DISTINCT CaseAttribute.Id, CaseAttribute.Name, CaseAttributeXRef.Value - FROM CaseAttribute, CaseAttributeXRef - WHERE CaseAttributeXRef.CaseAttributeId = CaseAttribute.Id - AND CaseAttributeXRef.ProbeSetFreezeId = %s - ORDER BY CaseAttribute.Name''', (str(self.dataset.id),)) + SELECT DISTINCT CaseAttribute.Id, CaseAttribute.Name, CaseAttributeXRefNew.Value + FROM CaseAttribute, CaseAttributeXRefNew + WHERE CaseAttributeXRefNew.CaseAttributeId = CaseAttribute.Id + AND CaseAttributeXRefNew.InbredSetId = %s + ORDER BY CaseAttribute.Name''', (str(self.dataset.group.id),)) self.attributes = {} for attr, values in itertools.groupby(results.fetchall(), lambda row: (row.Id, row.Name)): @@ -115,16 +114,17 @@ class SampleList(object): def get_extra_attribute_values(self): if self.attributes: - results = g.db.execute(''' - SELECT Strain.Name AS SampleName, CaseAttributeId AS Id, CaseAttributeXRef.Value - FROM Strain, StrainXRef, InbredSet, CaseAttributeXRef + query = ''' + SELECT Strain.Name AS SampleName, CaseAttributeId AS Id, CaseAttributeXRefNew.Value + FROM Strain, StrainXRef, InbredSet, CaseAttributeXRefNew WHERE StrainXRef.StrainId = Strain.Id AND InbredSet.Id = StrainXRef.InbredSetId - AND CaseAttributeXRef.StrainId = Strain.Id - AND InbredSet.Name = %s - AND CaseAttributeXRef.ProbeSetFreezeId = %s - ORDER BY SampleName''', - (self.dataset.group.name, self.this_trait.dataset.id)) + AND CaseAttributeXRefNew.StrainId = Strain.Id + AND InbredSet.Id = CaseAttributeXRefNew.InbredSetId + AND CaseAttributeXRefNew.InbredSetId = %s + ORDER BY SampleName''' % self.dataset.group.id + + results = g.db.execute(query) for sample_name, items in itertools.groupby(results.fetchall(), lambda row: row.SampleName): attribute_values = {} diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 0d1ed345..9b4470fe 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -350,6 +350,28 @@ def quantile_normalize_vals(sample_groups): return qnorm_by_group +def get_z_scores(sample_groups): + zscore_by_group = [] + for sample_type in sample_groups: + trait_vals = [] + for sample in sample_type.sample_list: + try: + trait_vals.append(float(sample.value)) + except: + continue + + qnorm_vals = normf(trait_vals) + qnorm_vals_with_x = [] + counter = 0 + for sample in sample_type.sample_list: + if sample.display_value == "x": + qnorm_vals_with_x.append("x") + else: + qnorm_vals_with_x.append(qnorm_vals[counter]) + counter += 1 + + qnorm_by_group.append(qnorm_vals_with_x) + def get_nearest_marker(this_trait, this_db): this_chr = this_trait.locus_chr logger.debug("this_chr:", this_chr) diff --git a/wqflask/wqflask/static/new/javascript/dataset_menu_structure.json b/wqflask/wqflask/static/new/javascript/dataset_menu_structure.json index 3f2673c1..7d00d509 100644 --- a/wqflask/wqflask/static/new/javascript/dataset_menu_structure.json +++ b/wqflask/wqflask/static/new/javascript/dataset_menu_structure.json @@ -1560,7 +1560,7 @@ "AIL-LGSM-F34-A": { "Phenotypes": [ [ - "None", + "655", "AIL-LGSM-F34-APublish", "AIL-LGSM-F34-A Phenotypes" ] @@ -1569,7 +1569,7 @@ "AIL-LGSM-F34-F39-43-GBS": { "Phenotypes": [ [ - "None", + "654", "AIL-LGSM-F34-F39-43-GBSPublish", "AIL-LGSM-F34-F39-43-GBS Phenotypes" ] @@ -1578,7 +1578,7 @@ "AIL-LGSM-F34-GBS": { "Phenotypes": [ [ - "None", + "656", "AIL-LGSM-F34-GBSPublish", "AIL-LGSM-F34-GBS Phenotypes" ] @@ -1587,7 +1587,7 @@ "AIL-LGSM-F39-43-GBS": { "Phenotypes": [ [ - "None", + "657", "AIL-LGSM-F39-43-GBSPublish", "AIL-LGSM-F39-43-GBS Phenotypes" ] @@ -1712,11 +1712,6 @@ }, "B6D2F2": { "Brain mRNA": [ - [ - "77", - "BRF2_M_0805_R", - "OHSU/VA B6D2F2 Brain mRNA M430 (Aug05) RMA" - ], [ "76", "BRF2_M_0805_M", @@ -1727,6 +1722,11 @@ "BRF2_M_0805_P", "OHSU/VA B6D2F2 Brain mRNA M430 (Aug05) PDNN" ], + [ + "77", + "BRF2_M_0805_R", + "OHSU/VA B6D2F2 Brain mRNA M430 (Aug05) RMA" + ], [ "33", "BRF2_M_0304_P", @@ -2081,16 +2081,16 @@ ] ], "Brain mRNA": [ - [ - "164", - "UTHSC_BXD_WB_RNASeq1112", - "UTHSC Mouse BXD Whole Brain RNA Sequence (Nov12) RPKM Untrimmed" - ], [ "590", "UTHSC_BXD_WB_RNASeqtrim1_1112", "UTHSC Mouse BXD Whole Brain RNA Sequence (Nov12) RPKM Trimmed 1.0" ], + [ + "164", + "UTHSC_BXD_WB_RNASeq1112", + "UTHSC Mouse BXD Whole Brain RNA Sequence (Nov12) RPKM Untrimmed" + ], [ "394", "UTHSC_BXD_WB_RNASeqEx1112", @@ -2232,11 +2232,6 @@ "Eye_M2_0908_R_ND", "Eye M430v2 WT Gpnmb (Sep08) RMA" ], - [ - "279", - "Eye_M2_0908_R_WT", - "Eye M430v2 WT Tyrp1 (Sep08) RMA" - ], [ "278", "Eye_M2_0908_R_MT", @@ -2247,6 +2242,11 @@ "Eye_M2_0908_WTWT", "Eye M430v2 WT WT (Sep08) RMA" ], + [ + "279", + "Eye_M2_0908_R_WT", + "Eye M430v2 WT Tyrp1 (Sep08) RMA" + ], [ "400", "DBA2J-ONH-1212", @@ -2530,16 +2530,16 @@ ] ], "Kidney mRNA": [ - [ - "239", - "MA_M2F_0706_R", - "Mouse kidney M430v2 Female (Aug06) RMA" - ], [ "240", "MA_M2M_0706_R", "Mouse kidney M430v2 Male (Aug06) RMA" ], + [ + "239", + "MA_M2F_0706_R", + "Mouse kidney M430v2 Female (Aug06) RMA" + ], [ "118", "MA_M2_0806_R", @@ -2691,6 +2691,11 @@ "EPFLMouseLiverHFDRMA0413", "EPFL/LISP BXD HFD Liver Affy Mouse Gene 1.0 ST (Apr13) RMA" ], + [ + "849", + "EPFLMouseLiverCDEx0413", + "EPFL/LISP BXD CD Liver Affy Mouse Gene 1.0 ST (Apr13) RMA Exon Level" + ], [ "848", "EPFLMouseLiverHFCEx0413", @@ -2701,11 +2706,6 @@ "EPFLMouseLiverCDRMA0413", "EPFL/LISP BXD CD Liver Affy Mouse Gene 1.0 ST (Apr13) RMA" ], - [ - "849", - "EPFLMouseLiverCDEx0413", - "EPFL/LISP BXD CD Liver Affy Mouse Gene 1.0 ST (Apr13) RMA Exon Level" - ], [ "433", "EPFLMouseLiverBothExRMA0413", @@ -3121,6 +3121,13 @@ ] }, "BXD-Harvested": { + "Hippocampus mRNA": [ + [ + "873", + "JAX-BXD-Hip-Pro-0219", + "JAX BXD Hippocampal Proteome (Feb19)" + ] + ], "Liver mRNA": [ [ "843", @@ -3538,11 +3545,6 @@ ] ], "Hippocampus mRNA": [ - [ - "213", - "Illum_LXS_Hipp_NOS_1008", - "Hippocampus Illumina NOS (Oct08) RankInv beta" - ], [ "219", "Illum_LXS_Hipp_NON_1008", @@ -3563,6 +3565,11 @@ "Illum_LXS_Hipp_RSS_1008", "Hippocampus Illumina RSS (Oct08) RankInv beta" ], + [ + "213", + "Illum_LXS_Hipp_NOS_1008", + "Hippocampus Illumina NOS (Oct08) RankInv beta" + ], [ "143", "Illum_LXS_Hipp_loess0807", @@ -5332,6 +5339,10 @@ "Phenotypes", "Phenotypes" ], + [ + "Hippocampus mRNA", + "Hippocampus mRNA" + ], [ "Liver mRNA", "Liver mRNA" diff --git a/wqflask/wqflask/static/new/javascript/get_covariates_from_collection.js b/wqflask/wqflask/static/new/javascript/get_covariates_from_collection.js index f2e694d8..fc6e5a78 100644 --- a/wqflask/wqflask/static/new/javascript/get_covariates_from_collection.js +++ b/wqflask/wqflask/static/new/javascript/get_covariates_from_collection.js @@ -2,12 +2,65 @@ var add_trait_data, assemble_into_json, back_to_collections, collection_click, collection_list, color_by_trait, create_trait_data_csv, get_this_trait_vals, get_trait_data, process_traits, selected_traits, submit_click, this_trait_data, trait_click, __indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; }; -collection_list = null; - this_trait_data = null; selected_traits = {}; +$('#collections_list').attr("style", "width: 100%;"); +$('#trait_table').dataTable( { + "drawCallback": function( settings ) { + $('#trait_table tr').click(function(event) { + if (event.target.type !== 'checkbox') { + $(':checkbox', this).trigger('click'); + } + }); + }, + "columns": [ + { "type": "natural", "width": "3%" }, + { "type": "natural", "width": "8%" }, + { "type": "natural", "width": "20%" }, + { "type": "natural", "width": "25%" }, + { "type": "natural", "width": "25%" }, + { "type": "natural", "width": "15%" } + ], + "columnDefs": [ { + "targets": 0, + "orderable": false + } ], + "order": [[1, "asc" ]], + "sDom": "RZtr", + "iDisplayLength": -1, + "autoWidth": true, + "bDeferRender": true, + "bSortClasses": false, + "paging": false, + "orderClasses": true +} ); + +$('#collection_table').dataTable( { + "createdRow": function ( row, data, index ) { + if ($('td', row).eq(2).text().length > 40) { + $('td', row).eq(2).text($('td', row).eq(2).text().substring(0, 40)); + $('td', row).eq(2).text($('td', row).eq(2).text() + '...') + } + if ($('td', row).eq(4).text().length > 50) { + $('td', row).eq(4).text($('td', row).eq(4).text().substring(0, 50)); + $('td', row).eq(4).text($('td', row).eq(4).text() + '...') + } + }, + "columnDefs": [ { + "targets": 0, + "orderable": false + } ], + "order": [[1, "asc" ]], + "sDom": "ZRtr", + "iDisplayLength": -1, + "autoWidth": true, + "bSortClasses": false, + "paging": false, + "orderClasses": true +} ); + collection_click = function() { var this_collection_url; console.log("Clicking on:", $(this)); @@ -28,16 +81,20 @@ submit_click = function() { $('#collections_holder').find('input[type=checkbox]:checked').each(function() { var this_dataset, this_trait; this_trait = $(this).parents('tr').find('.trait').text(); + this_description = $(this).parents('tr').find('.description').text(); console.log("this_trait is:", this_trait); - this_dataset = $(this).parents('tr').find('.dataset').text(); + this_dataset = $(this).parents('tr').find('.dataset').data("dataset"); console.log("this_dataset is:", this_dataset); covariates_string += this_trait + ":" + this_dataset + "," - covariates_display_string += this_trait + "\n" + this_covariate_display_string = this_trait + ": " + this_description + if (this_covariate_display_string.length > 50) { + this_covariate_display_string = this_covariate_display_string.substring(0, 45) + "..." + } + covariates_display_string += this_covariate_display_string + "\n" }); // Trim the last comma covariates_string = covariates_string.substring(0, covariates_string.length - 1) //covariates_display_string = covariates_display_string.substring(0, covariates_display_string.length - 2) - console.log("COVARIATES:", covariates_string) $("input[name=covariates]").val(covariates_string) $(".selected_covariates").val(covariates_display_string) @@ -125,17 +182,16 @@ process_traits = function(trait_data, textStatus, jqXHR) { the_html = ""; the_html += " "; - the_html += ""; - the_html += ""; + the_html += "
RecordData SetDescriptionMean
"; + the_html += ""; the_html += ""; for (_i = 0, _len = trait_data.length; _i < _len; _i++) { trait = trait_data[_i]; the_html += ""; the_html += ""; the_html += ""; - the_html += ""; - the_html += ""; - the_html += ""; + the_html += ""; + the_html += ""; } the_html += ""; the_html += "
RecordData SetDescription
" + trait.name + "" + trait.dataset + "" + trait.description + "" + (trait.mean || ' ') + "
" + trait.dataset_name + "" + trait.description + "
"; diff --git a/wqflask/wqflask/static/new/javascript/show_trait.js b/wqflask/wqflask/static/new/javascript/show_trait.js index 210945f0..bcb67527 100644 --- a/wqflask/wqflask/static/new/javascript/show_trait.js +++ b/wqflask/wqflask/static/new/javascript/show_trait.js @@ -2,8 +2,6 @@ var Stat_Table_Rows, is_number, __hasProp = {}.hasOwnProperty, __slice = [].slice; -console.log("start_b"); - is_number = function(o) { return !isNaN((o - 0) && o !== null); }; @@ -65,7 +63,6 @@ var add, block_by_attribute_value, block_by_index, block_outliers, change_stats_ add = function() { var trait; trait = $("input[name=trait_hmac]").val(); - console.log("trait is:", trait); return $.colorbox({ href: "/collections/add?traits=" + trait }); @@ -100,9 +97,7 @@ open_trait_selection = function() { inline: true, href: "#collections_holder", onComplete: function(){ - console.log("before get script") $.getScript("/static/new/javascript/get_traits_from_collection.js"); - console.log("after get script") } }); return $('a.collection_name').attr('onClick', 'return false'); @@ -115,10 +110,10 @@ open_covariate_selection = function() { $.colorbox({ inline: true, href: "#collections_holder", + width: "1000px", + height: "700px", onComplete: function(){ - console.log("before get cov script") $.getScript("/static/new/javascript/get_covariates_from_collection.js"); - console.log("after get cov script") } }); return $('a.collection_name').attr('onClick', 'return false'); @@ -142,21 +137,16 @@ stats_mdp_change = function() { change_stats_value = function(sample_sets, category, value_type, decimal_places, effects) { var current_value, id, in_box, the_value, title_value; id = "#" + process_id(category, value_type); - console.log("the_id:", id); in_box = $(id).html; current_value = parseFloat($(in_box)).toFixed(decimal_places); the_value = sample_sets[category][value_type](); - console.log("After running sample_sets, the_value is:", the_value); if (decimal_places > 0) { title_value = the_value.toFixed(decimal_places * 2); the_value = the_value.toFixed(decimal_places); } else { title_value = null; } - console.log("*-* the_value:", the_value); - console.log("*-* current_value:", current_value); if (the_value !== current_value) { - console.log("object:", $(id).html(the_value)); if (effects) { $(id).html(the_value).effect("highlight"); } else { @@ -179,7 +169,6 @@ update_stat_values = function(sample_sets) { _results1 = []; for (_j = 0, _len1 = Stat_Table_Rows.length; _j < _len1; _j++) { row = Stat_Table_Rows[_j]; - console.log("Calling change_stats_value"); _results1.push(change_stats_value(sample_sets, category, row.vn, row.digits, show_effects)); } return _results1; @@ -356,7 +345,6 @@ process_id = function() { processed = ""; for (_i = 0, _len = values.length; _i < _len; _i++) { value = values[_i]; - console.log("value:", value); value = value.replace(" ", "_"); if (processed.length) { processed += "-"; @@ -378,7 +366,6 @@ edit_data_change = function() { samples_other: {}, samples_all: {} }; - console.log("at beginning:", sample_sets); tables = ['samples_primary', 'samples_other']; for (_i = 0, _len = tables.length; _i < _len; _i++) { table = tables[_i]; @@ -412,39 +399,30 @@ edit_data_change = function() { } } } - console.log("towards end:", sample_sets); update_stat_values(sample_sets); if ($('#histogram').hasClass('js-plotly-plot')){ - console.log("redrawing histogram"); redraw_histogram(); } if ($('#bar_chart').hasClass('js-plotly-plot')){ - console.log("redrawing bar chart"); redraw_bar_chart(); } if ($('#box_plot').hasClass('js-plotly-plot')){ - console.log("redrawing box plot"); redraw_box_plot(); } if ($('#violin_plot').hasClass('js-plotly-plot')){ - console.log("redrawing violin plot"); redraw_violin_plot(); } if ($('#prob_plot_div').hasClass('js-plotly-plot')){ - console.log("redrawing probability plot"); return redraw_prob_plot(); } }; show_hide_outliers = function() { var label; - console.log("FOOBAR in beginning of show_hide_outliers"); label = $('#show_hide_outliers').val(); - console.log("lable is:", label); if (label === "Hide Outliers") { return $('#show_hide_outliers').val("Show Outliers"); } else if (label === "Show Outliers") { - console.log("Found Show Outliers"); $('#show_hide_outliers').val("Hide Outliers"); return console.log("Should be now Hide Outliers"); } @@ -452,7 +430,6 @@ show_hide_outliers = function() { on_corr_method_change = function() { var corr_method; corr_method = $('select[name=corr_type]').val(); - console.log("corr_method is:", corr_method); $('.correlation_desc').hide(); $('#' + corr_method + "_r_desc").show().effect("highlight"); if (corr_method === "lit") { @@ -487,7 +464,6 @@ create_value_dropdown = function(value) { }; populate_sample_attributes_values_dropdown = function() { var attribute_info, key, sample_attributes, selected_attribute, value, _i, _len, _ref, _ref1, _results; - console.log("in beginning of psavd"); $('#attribute_values').empty(); sample_attributes = {}; _ref = js_data.attribute_names; @@ -496,9 +472,7 @@ populate_sample_attributes_values_dropdown = function() { attribute_info = _ref[key]; sample_attributes[attribute_info.name] = attribute_info.distinct_values; } - console.log("[visa] attributes is:", sample_attributes); selected_attribute = $('#exclude_menu').val().replace("_", " "); - console.log("selected_attribute is:", selected_attribute); _ref1 = sample_attributes[selected_attribute]; _results = []; for (_i = 0, _len = _ref1.length; _i < _len; _i++) { @@ -507,7 +481,7 @@ populate_sample_attributes_values_dropdown = function() { } return _results; }; -if (js_data.attribute_names.length > 0) { +if (Object.keys(js_data.attribute_names).length > 0) { populate_sample_attributes_values_dropdown(); } $('#exclude_menu').change(populate_sample_attributes_values_dropdown); @@ -547,21 +521,15 @@ block_by_index = function() { } } else { index = parseInt(index_set); - console.log("index:", index); index_list.push(index); } } - console.log("index_list:", index_list); _results = []; for (_k = 0, _len1 = index_list.length; _k < _len1; _k++) { index = index_list[_k]; if ($('#block_group').val() === "primary") { - console.log("block_group:", $('#block_group').val()); - console.log("row:", $('#Primary_' + index.toString())); _results.push($('#Primary_' + index.toString()).find('.trait_value_input').val("x")); } else if ($('#block_group').val() === "other") { - console.log("block_group:", $('#block_group').val()); - console.log("row:", $('#Other_' + index.toString())); _results.push($('#Other_' + index.toString()).find('.trait_value_input').val("x")); } else { _results.push(void 0); @@ -592,9 +560,7 @@ reset_samples_table = function() { $('input[name="transform"]').val(""); return $('.trait_value_input').each((function(_this) { return function(_index, element) { - console.log("value is:", $(element).val()); $(element).val($(element).data('value')); - console.log("data-value is:", $(element).data('value')); return $(element).parents('.value_se').show(); }; })(this)); @@ -629,6 +595,21 @@ sqrt_normalize_data = function() { })(this)); }; +invert_data = function() { + return $('.edit_sample_value').each((function(_this) { + return function(_index, element) { + current_value = parseFloat($(element).val()); + if(isNaN(current_value)) { + return current_value + } else { + $(element).val(-(current_value)); + return -(current_value) + } + }; + })(this)); +}; + + qnorm_data = function() { return $('.edit_sample_value').each((function(_this) { return function(_index, element) { @@ -656,6 +637,9 @@ normalize_data = function() { $('input[name="transform"]').val("sqrt") } } + else if ($('#norm_method option:selected').val() == 'invert'){ + invert_data() + } else if ($('#norm_method option:selected').val() == 'qnorm'){ if ($('input[name="transform"]').val() != "qnorm") { qnorm_data() @@ -696,7 +680,6 @@ get_sample_table_data = function(table_name) { attribute_info = _ref[key]; row_data[attribute_info.name] = $.trim($(element).find('.column_name-' + attribute_info.name.replace(" ", "_")).text()); } - console.log("row_data is:", row_data); return samples.push(row_data); }; })(this)); @@ -707,18 +690,14 @@ export_sample_table_data = function() { sample_data = {}; sample_data.primary_samples = get_sample_table_data('samples_primary'); sample_data.other_samples = get_sample_table_data('samples_other'); - console.log("sample_data is:", sample_data); json_sample_data = JSON.stringify(sample_data); - console.log("json_sample_data is:", json_sample_data); $('input[name=export_data]').val(json_sample_data); - console.log("export_data is", $('input[name=export_data]').val()); format = $('input[name=export_format]').val(); if (format === "excel") { $('#trait_data_form').attr('action', '/export_trait_excel'); } else { $('#trait_data_form').attr('action', '/export_trait_csv'); } - console.log("action is:", $('#trait_data_form').attr('action')); return $('#trait_data_form').submit(); }; @@ -728,9 +707,7 @@ $('.export_format').change(function() { }); $('.export').click(export_sample_table_data); -console.log("before registering block_outliers"); $('#block_outliers').click(block_outliers); -console.log("after registering block_outliers"); _.mixin(_.str.exports()); get_sample_vals = function(sample_list) { diff --git a/wqflask/wqflask/templates/search_result_page.html b/wqflask/wqflask/templates/search_result_page.html index 36a25665..31a5b94e 100644 --- a/wqflask/wqflask/templates/search_result_page.html +++ b/wqflask/wqflask/templates/search_result_page.html @@ -117,7 +117,7 @@
-
+
@@ -224,10 +224,6 @@ } $('td', row).eq(4).attr('title', $('td', row).eq(4).text()); $('td', row).eq(4).attr('data-export', $('td', row).eq(4).text()); - if ($('td', row).eq(4).text().length > 55) { - $('td', row).eq(4).text($('td', row).eq(4).text().substring(0, 55)); - $('td', row).eq(4).text($('td', row).eq(4).text() + '...') - } $('td', row).slice(6,10).attr("align", "right"); $('td', row).eq(5).attr('data-export', $('td', row).eq(5).text()); $('td', row).eq(6).attr('data-export', $('td', row).eq(6).text()); @@ -237,16 +233,8 @@ {% elif dataset.type == 'Publish' %} $('td', row).eq(3).attr('title', $('td', row).eq(3).text()); $('td', row).eq(3).attr('data-export', $('td', row).eq(3).text()); - if ($('td', row).eq(3).text().length > 20) { - $('td', row).eq(3).text($('td', row).eq(3).text().substring(0, 20)); - $('td', row).eq(3).text($('td', row).eq(3).text() + '...') - } $('td', row).eq(4).attr('title', $('td', row).eq(4).text()); $('td', row).eq(4).attr('data-export', $('td', row).eq(4).text()); - if ($('td', row).eq(4).text().length > 55) { - $('td', row).eq(4).text($('td', row).eq(4).text().substring(0, 55)); - $('td', row).eq(4).text($('td', row).eq(4).text() + '...') - } $('td', row).slice(6,9).attr("align", "right"); $('td', row).eq(5).attr('data-export', $('td', row).eq(5).text()); $('td', row).eq(6).attr('data-export', $('td', row).eq(6).text()); @@ -288,6 +276,7 @@ { 'title': "Description", 'type': "natural", + 'width': "500px", 'data': "description" }, { @@ -321,11 +310,13 @@ { 'title': "Description", 'type': "natural", + 'width': "500px", 'data': "description" }, { 'title': "Authors", 'type': "natural", + 'width': "300px", 'data': "authors" }, { @@ -351,17 +342,20 @@ { 'title': "Max LRS Location", 'type': "natural", + 'width': "160px", 'data': "lrs_location" }, { - 'title': "Additive Effect?", + 'title': "Additive
Effect?", 'type': "natural", + 'width': "100px", 'data': "additive", 'orderSequence': [ "desc", "asc"] }{% elif dataset.type == 'Geno' %}, { 'title': "Location", 'type': "natural", + 'width': "160px", 'data': "location" }{% endif %} ], diff --git a/wqflask/wqflask/templates/show_trait.html b/wqflask/wqflask/templates/show_trait.html index def40f5d..9d2cb7ed 100644 --- a/wqflask/wqflask/templates/show_trait.html +++ b/wqflask/wqflask/templates/show_trait.html @@ -10,7 +10,6 @@ - {% endblock %} @@ -142,7 +141,6 @@ - @@ -152,6 +150,8 @@ + + + - - diff --git a/wqflask/wqflask/templates/show_trait_mapping_tools.html b/wqflask/wqflask/templates/show_trait_mapping_tools.html index a806a8b3..ad7412b2 100644 --- a/wqflask/wqflask/templates/show_trait_mapping_tools.html +++ b/wqflask/wqflask/templates/show_trait_mapping_tools.html @@ -4,25 +4,18 @@
-
+ {% for mapping_method in dataset.group.mapping_names %} + {% if mapping_method == "GEMMA" %} +
@@ -102,7 +97,7 @@
- {% if dataset.group.mapping_id == "1" %} + {% elif mapping_method == "QTLReaper" %}
@@ -217,7 +212,8 @@
-
+ {% elif mapping_method == "R/qtl" %} +
@@ -341,19 +337,24 @@
{% endif %} + {% endfor %}
+ {% for mapping_method in dataset.group.mapping_names %} + {% if mapping_method == "GEMMA" %}
GEMMA
Maps traits with correction for kinship among samples using a linear mixed model method, and also allows users to fit multiple covariates such as sex, age, treatment, and genetic markers (PMID: 2453419, and GitHub code). GEMMA incorporates the Leave One Chromosome Out (LOCO) method to ensure that the correction for kinship does not remove useful genetic variance near each marker. Markers can be filtered to include only those with minor allele frequencies (MAF) above a threshold. The default MAF is 0.05.
- {% if dataset.group.mapping_id == "1" %} + {% elif mapping_method == "R/qtl" %}
R/qtl
Major upgrade of R/qtl that supports most experimental populations including those with complex admixture and two or more parental lines as well as large omic data sets (PMID: 30591514). Both R/qtl and R/qtl2 are available as stand-alone R packages (R suite).
+ {% elif mapping_method == "QTLReaper" %}
Haley-Knott Regression
Fast linear mapping method (PMID 16718932) works well with F2 intercrosses and backcrosses, but that is not recommended for complex or admixed populations (e.g., GWAS or heterogeneous stock studies) or for advanced intercrosses, recombinant inbred families, or diallel crosses. Interactive plots in GeneNetwork have relied on the fast HK mapping for two decades and we still use this method for mapping omics data sets and computing genome-wide permutation threshold (QTL Reaper code).
{% endif %} + {% endfor %}
- +
+
+ + Select covariate(s) from a collection +
+
+ {% if g.user_session.num_collections < 1 %} + No collections available. Please add traits to a collection to use them as covariates. + {% else %} +
+ + +
+ + {% endif %} +
+
-- cgit v1.2.3 From 39d37ecde31f682013c7635e4f97853edc256b01 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 16 Apr 2020 13:48:08 -0500 Subject: Many changes, including: - Added permutation strata option for R/qtl - Made a variety of aesthetic changes to collections-related pages - Made a variety of aesthetic changes to the multi-trait tool options on the search result and correlation pages - Made some functional changes to collections that prevent duplicate traits and ensure new traits are at the top - The "Default Collection" is now always at the bottom of the collection list and renamed to "Your Default Collection" --- wqflask/utility/gen_geno_ob.py | 5 +- wqflask/wqflask/collect.py | 4 +- .../marker_regression/display_mapping_results.py | 12 ++- wqflask/wqflask/marker_regression/rqtl_mapping.py | 90 ++++++++++++++-------- wqflask/wqflask/marker_regression/run_mapping.py | 84 +++++++++++++++++--- wqflask/wqflask/show_trait/SampleList.py | 4 +- wqflask/wqflask/show_trait/show_trait.py | 23 +++++- .../new/javascript/show_trait_mapping_tools.js | 3 +- wqflask/wqflask/templates/collections/add.html | 28 +++---- wqflask/wqflask/templates/collections/list.html | 2 +- wqflask/wqflask/templates/collections/view.html | 15 +++- wqflask/wqflask/templates/correlation_page.html | 58 +++++++------- wqflask/wqflask/templates/loading.html | 2 +- wqflask/wqflask/templates/search_result_page.html | 49 ++++++------ wqflask/wqflask/templates/show_trait.html | 2 +- .../templates/show_trait_mapping_tools.html | 17 +++- wqflask/wqflask/user_session.py | 1 + wqflask/wqflask/views.py | 16 ++-- 18 files changed, 278 insertions(+), 137 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/gen_geno_ob.py b/wqflask/utility/gen_geno_ob.py index db40f6ea..23b0b650 100644 --- a/wqflask/utility/gen_geno_ob.py +++ b/wqflask/utility/gen_geno_ob.py @@ -156,7 +156,10 @@ class Locus(object): self.cM = float(marker_row[geno_ob.cm_column]) except: self.cM = float(marker_row[geno_ob.mb_column]) if geno_ob.mb_exists else 0 - self.Mb = float(marker_row[geno_ob.mb_column]) if geno_ob.mb_exists else None + try: + self.Mb = float(marker_row[geno_ob.mb_column]) if geno_ob.mb_exists else None + except: + self.Mb = self.cM geno_table = { geno_ob.mat: -1, diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index 74eb869f..fa6e03b4 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -75,7 +75,7 @@ def collections_add(): collections = g.user_session.user_collections if len(collections) < 1: - collection_name = "Default Collection" + collection_name = "Your Default Collection" uc_id = g.user_session.add_collection(collection_name, set()) collections = g.user_session.user_collections @@ -113,7 +113,7 @@ def collections_new(): collection_name = collection["name"] default_collection_exists = True if not default_collection_exists: - return create_new("Default Collection") + return create_new("Your Default Collection") else: collection_id = params['existing_collection'].split(":")[0] collection_name = params['existing_collection'].split(":")[1] diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index b8f84721..a7e11738 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -267,8 +267,8 @@ class DisplayMappingResults(object): else: self.genotype = self.dataset.group.read_genotype_file() - if self.mapping_method == "rqtl_geno" and self.genotype.filler == True: - self.genotype = self.genotype.read_rdata_output(self.qtlresults) + #if self.mapping_method == "rqtl_geno" and self.genotype.filler == True: + # self.genotype = self.genotype.read_rdata_output(self.qtlresults) #Darwing Options try: @@ -935,10 +935,14 @@ class DisplayMappingResults(object): string3 += 'no cofactors' elif self.mapping_method == "rqtl_plink" or self.mapping_method == "rqtl_geno": string3 = 'Using R/qtl mapping method with ' - if self.controlLocus and self.doControl != "false": + if self.covariates != "": + string3 += 'the cofactors below:' + cofactor_names = ", ".join([covar.split(":")[0] for covar in self.covariates.split(",")]) + string4 = cofactor_names + elif self.controlLocus and self.doControl != "false": string3 += '%s as control' % self.controlLocus else: - string3 += 'no control for other QTLs' + string3 += 'no cofactors' else: string3 = 'Using Haldane mapping function with ' if self.controlLocus and self.doControl != "false": diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index d76f3812..aae8e602 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -11,7 +11,7 @@ from utility.tools import locate, TEMPDIR import utility.logger logger = utility.logger.getLogger(__name__ ) -def run_rqtl_geno(vals, dataset, method, model, permCheck, num_perm, do_control, control_marker, manhattan_plot, pair_scan, samples, cofactors): +def run_rqtl_geno(vals, samples, dataset, method, model, permCheck, num_perm, perm_strata_list, do_control, control_marker, manhattan_plot, pair_scan, cofactors): ## Get pointers to some common R functions r_library = ro.r["library"] # Map the library function r_c = ro.r["c"] # Map the c function @@ -27,26 +27,26 @@ def run_rqtl_geno(vals, dataset, method, model, permCheck, num_perm, do_control, calc_genoprob = ro.r["calc.genoprob"] # Map the calc.genoprob function crossname = dataset.group.name - try: - generate_cross_from_rdata(dataset) - read_cross_from_rdata = ro.r["generate_cross_from_rdata"] # Map the local read_cross_from_rdata function - genofilelocation = locate(crossname + ".RData", "genotype/rdata") - cross_object = read_cross_from_rdata(genofilelocation) # Map the local GENOtoCSVR function - except: - generate_cross_from_geno(dataset) - GENOtoCSVR = ro.r["GENOtoCSVR"] # Map the local GENOtoCSVR function - crossfilelocation = TMPDIR + crossname + ".cross" - genofilelocation = locate(crossname + ".geno", "genotype") - - GENOtoCSVR = ro.r["GENOtoCSVR"] # Map the local GENOtoCSVR function - cross_object = GENOtoCSVR(genofilelocation, crossfilelocation) # TODO: Add the SEX if that is available + #try: + # generate_cross_from_rdata(dataset) + # read_cross_from_rdata = ro.r["generate_cross_from_rdata"] # Map the local read_cross_from_rdata function + # genofilelocation = locate(crossname + ".RData", "genotype/rdata") + # cross_object = read_cross_from_rdata(genofilelocation) # Map the local GENOtoCSVR function + #except: + generate_cross_from_geno(dataset) + GENOtoCSVR = ro.r["GENOtoCSVR"] # Map the local GENOtoCSVR function + crossfilelocation = TMPDIR + crossname + ".cross" + genofilelocation = locate(dataset.group.genofile, "genotype") + cross_object = GENOtoCSVR(genofilelocation, crossfilelocation) # TODO: Add the SEX if that is available if manhattan_plot: cross_object = calc_genoprob(cross_object) else: cross_object = calc_genoprob(cross_object, step=1, stepwidth="max") - cross_object = add_phenotype(cross_object, sanitize_rqtl_phenotype(vals), "the_pheno") # Add the phenotype + pheno_string = sanitize_rqtl_phenotype(vals) + + cross_object = add_phenotype(cross_object, pheno_string, "the_pheno") # Add the phenotype # Scan for QTLs marker_covars = create_marker_covariates(control_marker, cross_object) # Create the additive covariate markers @@ -78,15 +78,22 @@ def run_rqtl_geno(vals, dataset, method, model, permCheck, num_perm, do_control, logger.info("No covariates"); result_data_frame = scanone(cross_object, pheno = "the_pheno", model=model, method=method) if num_perm > 0 and permCheck == "ON": # Do permutation (if requested by user) - if do_control == "true" or cofactors != "": - perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", addcovar = covars, n_perm = num_perm, model=model, method=method) + if len(perm_strata_list) > 0: #ZS: The strata list would only be populated if "Stratified" was checked on before mapping + cross_object, strata_ob = add_perm_strata(cross_object, perm_strata_list) + if do_control == "true" or cofactors != "": + perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", addcovar = covars, n_perm = int(num_perm), perm_strata = strata_ob, model=model, method=method) + else: + perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", n_perm = num_perm, perm_strata = strata_ob, model=model, method=method) else: - perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", n_perm = num_perm, model=model, method=method) + if do_control == "true" or cofactors != "": + perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", addcovar = covars, n_perm = int(num_perm), model=model, method=method) + else: + perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", n_perm = num_perm, model=model, method=method) perm_output, suggestive, significant = process_rqtl_perm_results(num_perm, perm_data_frame) # Functions that sets the thresholds for the webinterface - return perm_output, suggestive, significant, process_rqtl_results(result_data_frame) + return perm_output, suggestive, significant, process_rqtl_results(result_data_frame, dataset.group.species) else: - return process_rqtl_results(result_data_frame) + return process_rqtl_results(result_data_frame, dataset.group.species) def generate_cross_from_rdata(dataset): rdata_location = locate(dataset.group.name + ".RData", "genotype/rdata") @@ -112,8 +119,12 @@ def generate_cross_from_geno(dataset): # TODO: Need to figure out why som header = readLines(genotypes, 40) # Assume a geno header is not longer than 40 lines toskip = which(unlist(lapply(header, function(x){ length(grep("Chr\t", x)) })) == 1)-1 # Major hack to skip the geno headers - genocodes <- c(getGenoCode(header, 'mat'), getGenoCode(header, 'het'), getGenoCode(header, 'pat')) # Get the genotype codes type <- getGenoCode(header, 'type') + if(type == '4-way'){ + genocodes <- c('1','2','3','4') + } else { + genocodes <- c(getGenoCode(header, 'mat'), getGenoCode(header, 'het'), getGenoCode(header, 'pat')) # Get the genotype codes + } genodata <- read.csv(genotypes, sep='\t', skip=toskip, header=TRUE, na.strings=getGenoCode(header,'unk'), colClasses='character', comment.char = '#') cat('Genodata:', toskip, " ", dim(genodata), genocodes, '\n') if(is.null(phenotype)) phenotype <- runif((ncol(genodata)-4)) # If there isn't a phenotype, generate a random one @@ -127,7 +138,21 @@ def generate_cross_from_geno(dataset): # TODO: Need to figure out why som if(type == 'riset') cross <- convert2riself(cross) # If its a RIL, convert to a RIL in R/qtl return(cross) } - """ % (dataset.group.name + ".geno")) + """ % (dataset.group.genofile)) + +def add_perm_strata(cross, perm_strata): + col_string = 'c("the_strata")' + perm_strata_string = "c(" + for item in perm_strata: + perm_strata_string += str(item) + "," + + perm_strata_string = perm_strata_string[:-1] + ")" + + cross = add_phenotype(cross, perm_strata_string, "the_strata") + + strata_ob = pull_var("perm_strata", cross, col_string) + + return cross, strata_ob def sanitize_rqtl_phenotype(vals): pheno_as_string = "c(" @@ -143,6 +168,7 @@ def sanitize_rqtl_phenotype(vals): else: pheno_as_string += str(val) pheno_as_string += ")" + return pheno_as_string def add_phenotype(cross, pheno_as_string, col_name): @@ -150,11 +176,11 @@ def add_phenotype(cross, pheno_as_string, col_name): ro.r('the_cross$pheno <- cbind(pull.pheno(the_cross), ' + col_name + ' = '+ pheno_as_string +')') return ro.r["the_cross"] -def pull_covar(cross, covar_name_string): +def pull_var(var_name, cross, var_string): ro.globalenv["the_cross"] = cross - ro.r('trait_covars <- pull.pheno(the_cross, ' + covar_name_string + ')') + ro.r(var_name +' <- pull.pheno(the_cross, ' + var_string + ')') - return ro.r["trait_covars"] + return ro.r[var_name] def add_cofactors(cross, this_dataset, covariates, samples): ro.numpy2ri.activate() @@ -190,19 +216,18 @@ def add_cofactors(cross, this_dataset, covariates, samples): covar_as_string += ")" col_name = "covar_" + str(i) + cross = add_phenotype(cross, covar_as_string, col_name) if i < (len(covariate_list) - 1): covar_name_string += '"' + col_name + '", ' else: covar_name_string += '"' + col_name + '"' - cross = add_phenotype(cross, covar_as_string, col_name) - covar_name_string += ")" - covars = pull_covar(cross, covar_name_string) + covars_ob = pull_var("trait_covars", cross, covar_name_string) - return cross, covars + return cross, covars_ob def create_marker_covariates(control_marker, cross): ro.globalenv["the_cross"] = cross @@ -245,14 +270,17 @@ def process_rqtl_perm_results(num_perm, results): return perm_output, suggestive, significant -def process_rqtl_results(result): # TODO: how to make this a one liner and not copy the stuff in a loop +def process_rqtl_results(result, species_name): # TODO: how to make this a one liner and not copy the stuff in a loop qtl_results = [] output = [tuple([result[j][i] for j in range(result.ncol)]) for i in range(result.nrow)] for i, line in enumerate(result.iter_row()): marker = {} marker['name'] = result.rownames[i] - marker['chr'] = output[i][0] + if species_name == "mouse" and output[i][0] == 20: #ZS: This is awkward, but I'm not sure how to change the 20s to Xs in the RData file + marker['chr'] = "X" + else: + marker['chr'] = output[i][0] marker['cM'] = output[i][1] marker['Mb'] = output[i][1] marker['lod_score'] = output[i][2] diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 3006c4ff..e191902c 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -38,6 +38,7 @@ from utility import Plot, Bunch from utility import temp_data from utility.benchmark import Bench from wqflask.marker_regression import gemma_mapping, rqtl_mapping, qtlreaper_mapping, plink_mapping +from wqflask.show_trait.SampleList import SampleList from utility.tools import locate, locate_ignore_error, GEMMA_COMMAND, PLINK_COMMAND, TEMPDIR from utility.external import shell @@ -74,15 +75,41 @@ class RunMapping(object): self.vals = [] if 'samples' in start_vars: self.samples = start_vars['samples'].split(",") - for sample in self.samples: - if (len(genofile_samplelist) == 0) or (sample in genofile_samplelist): + if (len(genofile_samplelist) != 0): + for sample in genofile_samplelist: + if sample in self.samples: + value = start_vars.get('value:' + sample) + if value: + self.vals.append(value) + else: + self.vals.append("x") + else: + for sample in self.samples: value = start_vars.get('value:' + sample) if value: self.vals.append(value) else: self.samples = [] - for sample in self.dataset.group.samplelist: # sample is actually the name of an individual - if (len(genofile_samplelist) == 0) or (sample in genofile_samplelist): + if (len(genofile_samplelist) != 0): + for sample in genofile_samplelist: + if sample in self.dataset.group.samplelist: + in_trait_data = False + for item in self.this_trait.data: + if self.this_trait.data[item].name == sample: + value = start_vars['value:' + self.this_trait.data[item].name] + self.samples.append(self.this_trait.data[item].name) + self.vals.append(value) + in_trait_data = True + break + if not in_trait_data: + value = start_vars.get('value:' + sample) + if value: + self.samples.append(sample) + self.vals.append(value) + else: + self.vals.append("x") + else: + for sample in self.dataset.group.samplelist: # sample is actually the name of an individual in_trait_data = False for item in self.this_trait.data: if self.this_trait.data[item].name == sample: @@ -204,8 +231,17 @@ class RunMapping(object): elif self.mapping_method == "rqtl_plink": results = self.run_rqtl_plink() elif self.mapping_method == "rqtl_geno": + perm_strata = [] + if "perm_strata" in start_vars and "categorical_vars" in start_vars: + self.categorical_vars = start_vars["categorical_vars"].split(",") + if len(self.categorical_vars) and start_vars["perm_strata"] == "True": + primary_samples = SampleList(dataset = self.dataset, + sample_names = self.samples, + this_trait = self.this_trait) + + perm_strata = get_perm_strata(self.this_trait, primary_samples, self.categorical_vars, self.samples) self.score_type = "LOD" - self.mapping_scale = "morgan" + #self.mapping_scale = "morgan" self.control_marker = start_vars['control_marker'] self.do_control = start_vars['do_control'] if 'mapmethod_rqtl_geno' in start_vars: @@ -216,9 +252,9 @@ class RunMapping(object): #if start_vars['pair_scan'] == "true": # self.pair_scan = True if self.permCheck and self.num_perm > 0: - self.perm_output, self.suggestive, self.significant, results = rqtl_mapping.run_rqtl_geno(self.vals, self.dataset, self.method, self.model, self.permCheck, self.num_perm, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.samples, self.covariates) + self.perm_output, self.suggestive, self.significant, results = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) else: - results = rqtl_mapping.run_rqtl_geno(self.vals, self.dataset, self.method, self.model, self.permCheck, self.num_perm, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.samples, self.covariates) + results = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) elif self.mapping_method == "reaper": if "startMb" in start_vars: #ZS: Check if first time page loaded, so it can default to ON if "additiveCheck" in start_vars: @@ -429,8 +465,8 @@ class RunMapping(object): chr_lengths = chr_lengths, num_perm = self.num_perm, perm_results = self.perm_output, - browser_files = browser_files, significant = significant_for_browser, + browser_files = browser_files, selected_chr = this_chr ) else: @@ -617,9 +653,14 @@ def get_chr_lengths(mapping_scale, dataset, qtl_results): this_chr = 1 highest_pos = 0 for i, result in enumerate(qtl_results): - if int(result['chr']) > this_chr or i == (len(qtl_results) - 1): + chr_as_num = 0 + try: + chr_as_num = int(result['chr']) + except: + chr_as_num = 20 + if chr_as_num > this_chr or i == (len(qtl_results) - 1): chr_lengths.append({ "chr": str(this_chr), "size": str(highest_pos)}) - this_chr = int(result['chr']) + this_chr = chr_as_num highest_pos = 0 else: if float(result['Mb']) > highest_pos: @@ -635,4 +676,25 @@ def get_genofile_samplelist(dataset): if genofile['location'] == dataset.group.genofile and 'sample_list' in genofile: genofile_samplelist = genofile['sample_list'] - return genofile_samplelist \ No newline at end of file + return genofile_samplelist + +def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples): + perm_strata_strings = [] + for sample in used_samples: + if sample in sample_list.sample_attribute_values.keys(): + combined_string = "" + for var in categorical_vars: + if var in sample_list.sample_attribute_values[sample].keys(): + combined_string += str(sample_list.sample_attribute_values[sample][var]) + else: + combined_string += "NA" + else: + combined_string = "NA" + + perm_strata_strings.append(combined_string) + + d = dict([(y,x+1) for x,y in enumerate(sorted(set(perm_strata_strings)))]) + list_to_numbers = [d[x] for x in perm_strata_strings] + perm_strata = list_to_numbers + + return perm_strata \ No newline at end of file diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 451be50b..7e126a36 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -24,8 +24,8 @@ class SampleList(object): dataset, sample_names, this_trait, - sample_group_type, - header): + sample_group_type = "primary", + header = "Samples"): self.dataset = dataset self.this_trait = this_trait diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 10ce38a7..64deb942 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -159,6 +159,8 @@ class ShowTrait(object): self.sample_group_types['samples_primary'] = self.dataset.group.name sample_lists = [group.sample_list for group in self.sample_groups] + categorical_var_list = get_categorical_variables(self.this_trait, self.sample_groups[0]) #ZS: Only using first samplelist, since I think mapping only uses those samples + #ZS: Get list of chromosomes to select for mapping self.chr_list = [["All", -1]] for i, this_chr in enumerate(self.dataset.species.chromosomes.chromosomes): @@ -226,6 +228,7 @@ class ShowTrait(object): hddn['mapping_display_all'] = True hddn['suggestive'] = 0 hddn['num_perm'] = 0 + hddn['categorical_vars'] = "" hddn['manhattan_plot'] = "" hddn['control_marker'] = "" if not self.temp_trait: @@ -250,6 +253,7 @@ class ShowTrait(object): sample_group_types = self.sample_group_types, sample_lists = sample_lists, attribute_names = self.sample_groups[0].attributes, + categorical_vars = ",".join(categorical_var_list), num_values = self.num_values, qnorm_values = self.qnorm_vals, zscore_values = self.z_scores, @@ -570,10 +574,25 @@ def get_ncbi_summary(this_trait): #ZS: Need to switch this try/except to something that checks the output later try: response = requests.get("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&id=%s&retmode=json" % this_trait.geneid) - logger.debug("NCBI:", json.loads(response.content)['result'][this_trait.geneid]) summary = json.loads(response.content)['result'][this_trait.geneid]['summary'] return summary except: return None else: - return None \ No newline at end of file + return None + +def get_categorical_variables(this_trait, sample_list): + categorical_var_list = [] + + if len(sample_list.attributes) > 0: + for attribute in sample_list.attributes: + attribute_vals = [] + for sample_name in this_trait.data.keys(): + attribute_vals.append(this_trait.data[sample_name].extra_attributes[sample_list.attributes[attribute].name]) + + num_distinct = len(set(attribute_vals)) + + if num_distinct < 10: + categorical_var_list.append(sample_list.attributes[attribute].name) + + return categorical_var_list \ No newline at end of file diff --git a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js index b26110d8..478ed87e 100644 --- a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js +++ b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js @@ -153,7 +153,7 @@ //ZS: This is a list of inputs to be passed to the loading page, since not all inputs on the trait page are relevant to mapping var mapping_input_list = ['temp_uuid', 'trait_id', 'dataset', 'tool_used', 'form_url', 'method', 'transform', 'trimmed_markers', 'selected_chr', 'chromosomes', 'mapping_scale', - 'score_type', 'suggestive', 'significant', 'num_perm', 'permCheck', 'perm_output', 'num_bootstrap', 'bootCheck', 'bootstrap_results', + 'score_type', 'suggestive', 'significant', 'num_perm', 'permCheck', 'perm_output', 'perm_strata', 'categorical_vars', 'num_bootstrap', 'bootCheck', 'bootstrap_results', 'LRSCheck', 'covariates', 'maf', 'use_loco', 'manhattan_plot', 'control_marker', 'control_marker_db', 'do_control', 'genofile', 'pair_scan', 'startMb', 'endMb', 'graphWidth', 'lrsMax', 'additiveCheck', 'showSNP', 'showGenes', 'viewLegend', 'haplotypeAnalystCheck', 'mapmethod_rqtl_geno', 'mapmodel_rqtl_geno', 'temp_trait', 'group', 'species', 'reaper_version', 'primary_samples'] @@ -167,6 +167,7 @@ $('input[name=selected_chr]').val($('#chr_rqtl_geno').val()); $('input[name=genofile]').val($('#genofile_rqtl_geno').val()); $('input[name=num_perm]').val($('input[name=num_perm_rqtl_geno]').val()); + $('input[name=categorical_vars]').val(js_data.categorical_vars) $('input[name=manhattan_plot]').val($('input[name=manhattan_plot_rqtl]:checked').val()); $('input[name=control_marker]').val($('input[name=control_rqtl_geno]').val()); $('input[name=do_control]').val($('input[name=do_control_rqtl]:checked').val()); diff --git a/wqflask/wqflask/templates/collections/add.html b/wqflask/wqflask/templates/collections/add.html index 058e269c..825dfb84 100644 --- a/wqflask/wqflask/templates/collections/add.html +++ b/wqflask/wqflask/templates/collections/add.html @@ -11,22 +11,9 @@ {% else %} {% endif %} -
- 1. Create a new collection -
- - - - {% if uc is not defined %} - This collection will be saved to your computer for a year (or until you clear your cache). - {% endif %} -
-
{% if collections|length > 0 %} -
- 2. Add to an existing collection + 1. Add to an existing collection
+ + {% if uc is not defined %} + This collection will be saved to your computer for a year (or until you clear your cache). + {% endif %} +
+
diff --git a/wqflask/wqflask/templates/collections/list.html b/wqflask/wqflask/templates/collections/list.html index a2f1a1f7..3829b950 100644 --- a/wqflask/wqflask/templates/collections/list.html +++ b/wqflask/wqflask/templates/collections/list.html @@ -28,7 +28,7 @@
{% if collections|length > 0 %} -

Loading...
+
diff --git a/wqflask/wqflask/templates/collections/view.html b/wqflask/wqflask/templates/collections/view.html index 3b7f9671..59936a8e 100644 --- a/wqflask/wqflask/templates/collections/view.html +++ b/wqflask/wqflask/templates/collections/view.html @@ -88,8 +88,9 @@ -
-
+
+ Show/Hide Columns: +
@@ -127,6 +128,8 @@ {% if this_trait.symbol %} + {% elif this_trait.abbreviation %} + {% else %} {% endif %} @@ -139,9 +142,17 @@ {% endif %} + {% if this_trait.LRS_score_repr|float > 0 %} + {% else %} + + {% endif %} + {% if this_trait.additive|float > 0 %} + {% else %} + + {% endif %} {% endfor %} diff --git a/wqflask/wqflask/templates/correlation_page.html b/wqflask/wqflask/templates/correlation_page.html index 3e8baab6..03b03aa7 100644 --- a/wqflask/wqflask/templates/correlation_page.html +++ b/wqflask/wqflask/templates/correlation_page.html @@ -29,46 +29,42 @@ {{ this_trait.name }}:{{ this_trait.dataset }}, {% endfor %}" > + - + + - + + - + + - + + - - - - - - - - -
diff --git a/wqflask/wqflask/templates/loading.html b/wqflask/wqflask/templates/loading.html index bc614e01..49bcbff7 100644 --- a/wqflask/wqflask/templates/loading.html +++ b/wqflask/wqflask/templates/loading.html @@ -9,7 +9,7 @@
{% if start_vars.tool_used == "Mapping" %} -

Computing the Map

+

Computing the Maps


n = {{ start_vars.num_vals }}
diff --git a/wqflask/wqflask/templates/search_result_page.html b/wqflask/wqflask/templates/search_result_page.html index 162bde08..33221e0f 100644 --- a/wqflask/wqflask/templates/search_result_page.html +++ b/wqflask/wqflask/templates/search_result_page.html @@ -8,7 +8,7 @@ {% endblock %} {% block content %} -
+
@@ -68,45 +68,41 @@ - + - + - + - - - - - -
@@ -252,11 +248,13 @@ $('td', row).eq(3).attr('data-export', $('td', row).eq(3).text()); $('td', row).eq(4).attr('title', $('td', row).eq(4).text()); $('td', row).eq(4).attr('data-export', $('td', row).eq(4).text()); - $('td', row).slice(6,9).attr("align", "right"); + $('td', row).eq(4).attr('align', 'right'); + $('td', row).slice(6,10).attr("align", "right"); $('td', row).eq(5).attr('data-export', $('td', row).eq(5).text()); $('td', row).eq(6).attr('data-export', $('td', row).eq(6).text()); $('td', row).eq(7).attr('data-export', $('td', row).eq(7).text()); $('td', row).eq(8).attr('data-export', $('td', row).eq(8).text()); + $('td', row).eq(9).attr('data-export', $('td', row).eq(8).text()); {% elif dataset.type == 'Geno' %} $('td', row).eq(3).attr('data-export', $('td', row).eq(3).text()); {% endif %} @@ -339,7 +337,7 @@ { 'title': "Description", 'type': "natural", - 'width': "25%", + 'width': "800px", 'data': null, 'render': function(data, type, row, meta) { try { @@ -359,7 +357,7 @@ { 'title': "Authors", 'type': "natural", - 'width': "25%", + 'width': "500px", 'data': null, 'render': function(data, type, row, meta) { author_list = data.authors.split(",") @@ -398,7 +396,7 @@ { 'title': "Max LRS Location", 'type': "natural", - 'width': "160px", + 'width': "200px", 'data': "lrs_location" }, { @@ -430,7 +428,6 @@ } ], 'sDom': "Bitir", - 'autoWidth': false, 'deferRender': true, 'paging': false, 'orderClasses': true, diff --git a/wqflask/wqflask/templates/show_trait.html b/wqflask/wqflask/templates/show_trait.html index 81661f86..27c3e398 100644 --- a/wqflask/wqflask/templates/show_trait.html +++ b/wqflask/wqflask/templates/show_trait.html @@ -364,7 +364,7 @@ } } ); - primary_table.on( 'order.dt search.dt', function () { + primary_table.on( 'order.dt search.dt draw.dt', function () { primary_table.column(1, {search:'applied', order:'applied'}).nodes().each( function (cell, i) { cell.innerHTML = i+1; } ); diff --git a/wqflask/wqflask/templates/show_trait_mapping_tools.html b/wqflask/wqflask/templates/show_trait_mapping_tools.html index 01d90d21..777d4a2d 100644 --- a/wqflask/wqflask/templates/show_trait_mapping_tools.html +++ b/wqflask/wqflask/templates/show_trait_mapping_tools.html @@ -243,6 +243,21 @@
+ {% if sample_groups[0].attributes|length > 0 %} +
+ +
+ + +
+
+ {% endif %}
@@ -365,7 +380,7 @@
Maps traits with correction for kinship among samples using a linear mixed model method, and also allows users to fit multiple covariates such as sex, age, treatment, and genetic markers (PMID: 2453419, and GitHub code). GEMMA incorporates the Leave One Chromosome Out (LOCO) method to ensure that the correction for kinship does not remove useful genetic variance near each marker. Markers can be filtered to include only those with minor allele frequencies (MAF) above a threshold. The default MAF is 0.05.
{% elif mapping_method == "R/qtl" %}
R/qtl
-
The original R/qtl mapping code that supports most classic experimental crosses provided that they do not have complex kinship or admixture (PMID: 30591514). Both R/qtl, implemented here, and R/qtl2 are available as stand-alone R packages (R suite).
+
The original R/qtl mapping package that supports classic experimental crosses including 4-parent F2 intercrosses (e.g., NIA ITP UM-HET3). R/qtl is ideal for populations that do not have complex kinship or admixture (PMID: 12724300). Both R/qtl as implemented here, and R/qtl2 (PMID: 30591514) are available as R suites.
{% elif mapping_method == "QTLReaper" %}
Haley-Knott Regression
Fast linear mapping method (PMID 16718932) works well with F2 intercrosses and backcrosses, but that is not recommended for complex or admixed populations (e.g., GWAS or heterogeneous stock studies) or for advanced intercrosses, recombinant inbred families, or diallel crosses. Interactive plots in GeneNetwork have relied on the fast HK mapping for two decades and we still use this method for mapping omics data sets and computing genome-wide permutation threshold (QTL Reaper code).
diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index 71572c03..d75a03df 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -144,6 +144,7 @@ class UserSession(object): #ZS: Get user's collections if they exist collections = get_user_collections(self.redis_user_id) + collections = [item for item in collections if item['name'] != "Your Default Collection"] + [item for item in collections if item['name'] == "Your Default Collection"] #ZS: Ensure Default Collection is last in list return collections @property diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 0fbe370d..923c89bd 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -15,6 +15,7 @@ import xlsxwriter import StringIO # Todo: Use cStringIO? import gc +import numpy as np import cPickle as pickle import uuid @@ -392,9 +393,9 @@ def export_perm_data(): buff = StringIO.StringIO() writer = csv.writer(buff) - writer.writerow(["Suggestive LRS (p=0.63) = " + str(perm_data[int(num_perm*0.37-1)])]) - writer.writerow(["Significant LRS (p=0.05) = " + str(perm_data[int(num_perm*0.95-1)])]) - writer.writerow(["Highly Significant LRS (p=0.01) = " + str(perm_data[int(num_perm*0.99-1)])]) + writer.writerow(["Suggestive LRS (p=0.63) = " + str(np.percentile(np.array(perm_data), 67))]) + writer.writerow(["Significant LRS (p=0.05) = " + str(np.percentile(np.array(perm_data), 95))]) + writer.writerow(["Highly Significant LRS (p=0.01) = " + str(np.percentile(np.array(perm_data), 99))]) writer.writerow("") writer.writerow([str(num_perm) + " Permutations"]) writer.writerow("") @@ -595,7 +596,7 @@ def loading_page(): @app.route("/run_mapping", methods=('POST',)) def mapping_results_page(): initial_start_vars = request.form - logger.debug("Mapping called with initial_start_vars:", initial_start_vars.items()) + #logger.debug("Mapping called with initial_start_vars:", initial_start_vars.items()) logger.info(request.url) temp_uuid = initial_start_vars['temp_uuid'] wanted = ( @@ -620,6 +621,9 @@ def mapping_results_page(): 'significant', 'num_perm', 'permCheck', + 'perm_strata', + 'strat_var', + 'categorical_vars', 'perm_output', 'num_bootstrap', 'bootCheck', @@ -654,11 +658,11 @@ def mapping_results_page(): for key, value in initial_start_vars.iteritems(): if key in wanted or key.startswith(('value:')): start_vars[key] = value - logger.debug("Mapping called with start_vars:", start_vars) + #logger.debug("Mapping called with start_vars:", start_vars) version = "v3" key = "mapping_results:{}:".format(version) + json.dumps(start_vars, sort_keys=True) - logger.info("key is:", pf(key)) + #logger.info("key is:", pf(key)) with Bench("Loading cache"): result = None # Just for testing #result = Redis.get(key) -- cgit v1.2.3 From 6178a48d29cd83fd3beb70854721070826e230e3 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 22 Apr 2020 16:03:07 -0500 Subject: Fixed a variety of issues related to users registering and logging in GN2 should now work when Redis is empty --- wqflask/utility/startup_config.py | 4 +- wqflask/utility/tools.py | 8 +-- wqflask/wqflask/collect.py | 2 +- wqflask/wqflask/show_trait/SampleList.py | 84 ++++++++++++++++---------------- wqflask/wqflask/show_trait/show_trait.py | 4 +- wqflask/wqflask/user_login.py | 44 ++++++++--------- wqflask/wqflask/user_session.py | 13 +++-- 7 files changed, 80 insertions(+), 79 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index 5a62cc50..817284dd 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -33,7 +33,7 @@ def app_config(): if page.status_code != 200: raise Exception("API server not found!") - import utility.elasticsearch_tools as es - es.test_elasticsearch_connection() + # import utility.elasticsearch_tools as es + # es.test_elasticsearch_connection() print("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % (BLUE,str(port),ENDC,get_setting("WEBSERVER_URL"))) diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 75bddb24..0fbedccb 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -267,10 +267,10 @@ if ORCID_CLIENT_ID != 'UNKNOWN' and ORCID_CLIENT_SECRET: ORCID_CLIENT_ID+"&client_secret="+ORCID_CLIENT_SECRET ORCID_TOKEN_URL = get_setting('ORCID_TOKEN_URL') -ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST') -ELASTICSEARCH_PORT = get_setting('ELASTICSEARCH_PORT') -import utility.elasticsearch_tools as es -es.test_elasticsearch_connection() +# ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST') +# ELASTICSEARCH_PORT = get_setting('ELASTICSEARCH_PORT') +# import utility.elasticsearch_tools as es +# es.test_elasticsearch_connection() SMTP_CONNECT = get_setting('SMTP_CONNECT') SMTP_USERNAME = get_setting('SMTP_USERNAME') diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index fa6e03b4..1d74b699 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -108,7 +108,7 @@ def collections_new(): if 'existing_collection' not in params: collections = g.user_session.user_collections for collection in collections: - if collection["name"] == "Default Collection": + if collection["name"] == "Your Default Collection": collection_id = collection["id"] collection_name = collection["name"] default_collection_exists = True diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 7e126a36..ad78ebcc 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -14,8 +14,6 @@ import simplejson as json import itertools -from utility.elasticsearch_tools import get_elasticsearch_connection - import utility.logger logger = utility.logger.getLogger(__name__ ) @@ -158,47 +156,47 @@ class SampleList(object): return any(sample.variance for sample in self.sample_list) -def get_transform_vals(dataset, trait): - es = get_elasticsearch_connection(for_user=False) - - logger.info("DATASET NAME:", dataset.name) - - query = '{"bool": {"must": [{"match": {"name": "%s"}}, {"match": {"dataset": "%s"}}]}}' % (trait.name, dataset.name) - - es_body = { - "query": { - "bool": { - "must": [ - { - "match": { - "name": "%s" % (trait.name) - } - }, - { - "match": { - "dataset": "%s" % (dataset.name) - } - } - ] - } - } - } - - response = es.search( index = "traits", doc_type = "trait", body = es_body ) - logger.info("THE RESPONSE:", response) - results = response['hits']['hits'] - - if len(results) > 0: - samples = results[0]['_source']['samples'] - - sample_dict = {} - for sample in samples: - sample_dict[sample['name']] = sample['qnorm'] - - #logger.info("SAMPLE DICT:", sample_dict) - return sample_dict - else: - return None +# def get_transform_vals(dataset, trait): +# es = get_elasticsearch_connection(for_user=False) + +# logger.info("DATASET NAME:", dataset.name) + +# query = '{"bool": {"must": [{"match": {"name": "%s"}}, {"match": {"dataset": "%s"}}]}}' % (trait.name, dataset.name) + +# es_body = { +# "query": { +# "bool": { +# "must": [ +# { +# "match": { +# "name": "%s" % (trait.name) +# } +# }, +# { +# "match": { +# "dataset": "%s" % (dataset.name) +# } +# } +# ] +# } +# } +# } + +# response = es.search( index = "traits", doc_type = "trait", body = es_body ) +# logger.info("THE RESPONSE:", response) +# results = response['hits']['hits'] + +# if len(results) > 0: +# samples = results[0]['_source']['samples'] + +# sample_dict = {} +# for sample in samples: +# sample_dict[sample['name']] = sample['qnorm'] + +# #logger.info("SAMPLE DICT:", sample_dict) +# return sample_dict +# else: +# return None def natural_sort_key(x): """Get expected results when using as a key for sort - ints or strings are sorted properly""" diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index d35ba749..8883e627 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -364,8 +364,8 @@ class ShowTrait(object): if self.dataset: dataset_menu_selected = self.dataset.name - return_results_menu = (100, 200, 500, 1000, 2000, 5000, 10000, 15000, 20000) - return_results_menu_selected = 500 + return_results_menu = (100, 200, 500, 1000, 2000, 5000, 10000, 15000, 20000) + return_results_menu_selected = 500 self.corr_tools = dict(dataset_menu = dataset_menu, dataset_menu_selected = dataset_menu_selected, diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index da3cc504..40d9925c 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -6,7 +6,6 @@ import datetime import time import logging import uuid -import hashlib import hmac import base64 import requests @@ -42,17 +41,23 @@ def basic_info(): ip_address = request.remote_addr, user_agent = request.headers.get('User-Agent')) -def encode_password(pass_gen_fields): +def encode_password(pass_gen_fields, unencrypted_password): + logger.debug("THE TYPE:", type(pass_gen_fields)) + logger.debug("pass_gen_fields:", pass_gen_fields) + logger.debug("hashfunc:", pass_gen_fields['hashfunc']) hashfunc = getattr(hashlib, pass_gen_fields['hashfunc']) salt = base64.b64decode(pass_gen_fields['salt']) - password = pbkdf2.pbkdf2_hex(str(pass_gen_fields['unencrypted_password']), + encrypted_password = pbkdf2.pbkdf2_hex(str(unencrypted_password), pass_gen_fields['salt'], pass_gen_fields['iterations'], pass_gen_fields['keylength'], hashfunc) - return password + pass_gen_fields.pop("unencrypted_password", None) + pass_gen_fields["password"] = encrypted_password + + return pass_gen_fields def set_password(password): pass_gen_fields = { @@ -67,19 +72,10 @@ def set_password(password): assert len(password) >= 6, "Password shouldn't be shorter than 6 characters" - encoded_password = encode_password(pass_gen_fields) + encoded_password = encode_password(pass_gen_fields, pass_gen_fields['unencrypted_password']) return encoded_password -def encrypt_password(unencrypted_password, pwfields): - hashfunc = getattr(hashlib, pwfields['hashfunc']) - salt = base64.b64decode(pwfields['salt']) - iterations = pwfields['iterations'] - keylength = pwfields['keylength'] - encrypted_password = pbkdf2.pbkdf2_hex(str(unencrypted_password), - salt, iterations, keylength, hashfunc) - return encrypted_password - def get_signed_session_id(user): session_id = str(uuid.uuid4()) @@ -186,9 +182,12 @@ def login(): password_match = False if user_details: submitted_password = params['password'] - pwfields = json.loads(user_details['password']) - encrypted_pass = encrypt_password(submitted_password, pwfields) - password_match = pbkdf2.safe_str_cmp(encrypted_pass, pwfields['password']) + pwfields = user_details['password'] + if type(pwfields) is str: + pwfields = json.loads(pwfields) + encrypted_pass_fields = encode_password(pwfields, submitted_password) + password_match = pbkdf2.safe_str_cmp(encrypted_pass_fields['password'], pwfields['password']) + else: # Invalid e-mail flash("Invalid e-mail address. Please try again.", "alert-danger") response = make_response(redirect(url_for('login'))) @@ -226,7 +225,7 @@ def github_oauth2(): "client_secret": GITHUB_CLIENT_SECRET, "code": code } - logger.debug("LOGIN DATA:", data) + result = requests.post("https://github.com/login/oauth/access_token", json=data) result_dict = {arr[0]:arr[1] for arr in [tok.split("=") for tok in [token.encode("utf-8") for token in result.text.split("&")]]} @@ -437,19 +436,18 @@ def register_user(params): if params.get('password_confirm') != password: errors.append("Passwords don't match.") - if errors: - return errors - user_details['password'] = set_password(password) user_details['user_id'] = str(uuid.uuid4()) user_details['confirmed'] = 1 - user_details['registration_info'] = json.dumps(basic_info(), sort_keys=True) + user_details['registration_info'] = basic_info() save_user(user_details, user_details['user_id']) + return errors + @app.route("/n/register", methods=('GET', 'POST')) def register(): - errors = None + errors = [] params = request.form if request.form else request.args params = params.to_dict(flat=True) diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index d75a03df..fd1779fb 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -9,6 +9,7 @@ import simplejson as json import redis # used for collections Redis = redis.StrictRedis() + from flask import (Flask, g, render_template, url_for, request, make_response, redirect, flash, abort) @@ -69,19 +70,23 @@ class UserSession(object): if not self.record or self.record == []: if user_cookie: self.logged_in = False + self.record = dict(login_time = time.time(), + user_type = "anon", + user_id = str(uuid.uuid4())) + Redis.hmset(self.redis_key, self.record) + Redis.expire(self.redis_key, THIRTY_DAYS) + response = make_response(redirect(url_for('login'))) + response.set_cookie(self.user_cookie_name, '', expires=0) ########### Grrr...this won't work because of the way flask handles cookies # Delete the cookie - response = make_response(redirect(url_for('login'))) - #response.set_cookie(self.cookie_name, '', expires=0) flash("Due to inactivity your session has expired. If you'd like please login again.") - #return response + return response #return else: self.record = dict(login_time = time.time(), user_type = "anon", user_id = str(uuid.uuid4())) - Redis.hmset(self.redis_key, self.record) Redis.expire(self.redis_key, THIRTY_DAYS) else: -- cgit v1.2.3 From be09d207ee4e2705e358102f8bdbcd1da7f70ca2 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 24 Apr 2020 16:06:36 -0500 Subject: Replaced hard-coded instances of GN2 urls with ones pulled from settings file --- doc/API_readme.md | 42 +++++++++++----------- wqflask/base/data_set.py | 4 +-- wqflask/base/trait.py | 7 ++-- wqflask/utility/tools.py | 2 ++ wqflask/wqflask/ctl/ctl_analysis.py | 4 ++- wqflask/wqflask/do_search.py | 3 +- wqflask/wqflask/network_graph/network_graph.py | 2 ++ wqflask/wqflask/search_results.py | 3 +- wqflask/wqflask/static/new/javascript/ctl_graph.js | 8 +---- .../new/javascript/dataset_select_menu_orig.js | 1 - .../wqflask/static/new/javascript/network_graph.js | 4 +-- wqflask/wqflask/templates/ctl_results.html | 1 + wqflask/wqflask/templates/index_page.html | 4 +-- wqflask/wqflask/templates/index_page_orig.html | 4 +-- wqflask/wqflask/templates/network_graph.html | 1 + 15 files changed, 47 insertions(+), 43 deletions(-) (limited to 'wqflask/utility') diff --git a/doc/API_readme.md b/doc/API_readme.md index 652376a0..be6668dc 100644 --- a/doc/API_readme.md +++ b/doc/API_readme.md @@ -6,17 +6,17 @@ To get a list of species with data available in GN (and their associated names and ids): ``` -curl http://gn2.genenetwork.org/api/v_pre1/species +curl http://genenetwork.org/api/v_pre1/species [ { "FullName": "Mus musculus", "Id": 1, "Name": "mouse", "TaxonomyId": 10090 }, ... { "FullName": "Populus trichocarpa", "Id": 10, "Name": "poplar", "TaxonomyId": 3689 } ] ``` Or to get a single species info: ``` -curl http://gn2.genenetwork.org/api/v_pre1/species/mouse +curl http://genenetwork.org/api/v_pre1/species/mouse ``` OR ``` -curl http://gn2.genenetwork.org/api/v_pre1/species/mouse.json +curl http://genenetwork.org/api/v_pre1/species/mouse.json ``` *For all queries where the last field is a user-specified name/ID, there will be the option to append a file format type. Currently there is only JSON (and it will default to JSON if none is provided), but other formats will be added later* @@ -26,33 +26,33 @@ curl http://gn2.genenetwork.org/api/v_pre1/species/mouse.json This query can optionally filter by species: ``` -curl http://gn2.genenetwork.org/api/v_pre1/groups (for all species) +curl http://genenetwork.org/api/v_pre1/groups (for all species) ``` OR ``` -curl http://gn2.genenetwork.org/api/v_pre1/groups/mouse (for just mouse groups/RISets) +curl http://genenetwork.org/api/v_pre1/groups/mouse (for just mouse groups/RISets) [ { "DisplayName": "BXD", "FullName": "BXD RI Family", "GeneticType": "riset", "Id": 1, "MappingMethodId": "1", "Name": "BXD", "SpeciesId": 1, "public": 2 }, ... { "DisplayName": "AIL LGSM F34 and F39-43 (GBS)", "FullName": "AIL LGSM F34 and F39-43 (GBS)", "GeneticType": "intercross", "Id": 72, "MappingMethodId": "2", "Name": "AIL-LGSM-F34-F39-43-GBS", "SpeciesId": 1, "public": 2 } ] ``` ## Fetch Genotypes for Group/RISet ## ``` -curl http://gn2.genenetwork.org/api/v_pre1/genotypes/bimbam/BXD -curl http://gn2.genenetwork.org/api/v_pre1/genotypes/BXD.bimbam +curl http://genenetwork.org/api/v_pre1/genotypes/bimbam/BXD +curl http://genenetwork.org/api/v_pre1/genotypes/BXD.bimbam ``` Returns a group's genotypes in one of several formats - bimbam, rqtl2, or geno (a format used by qtlreaper which is just a CSV file consisting of marker positions and genotypes) Rqtl2 genotype queries can also include the dataset name and will return a zip of the genotypes, phenotypes, and gene map (marker names/positions). For example: ``` -curl http://gn2.genenetwork.org/api/v_pre1/genotypes/rqtl2/BXD/HC_M2_0606_P.zip +curl http://genenetwork.org/api/v_pre1/genotypes/rqtl2/BXD/HC_M2_0606_P.zip ``` ## Fetch Datasets ## ``` -curl http://gn2.genenetwork.org/api/v_pre1/datasets/bxd +curl http://genenetwork.org/api/v_pre1/datasets/bxd ``` OR ``` -curl http://gn2.genenetwork.org/api/v_pre1/datasets/mouse/bxd +curl http://genenetwork.org/api/v_pre1/datasets/mouse/bxd [ { "AvgID": 1, "CreateTime": "Fri, 01 Aug 2003 00:00:00 GMT", "DataScale": "log2", "FullName": "UTHSC/ETHZ/EPFL BXD Liver Polar Metabolites Extraction A, CD Cohorts (Mar 2017) log2", "Id": 1, "Long_Abbreviation": "BXDMicroArray_ProbeSet_August03", "ProbeFreezeId": 3, "ShortName": "Brain U74Av2 08/03 MAS5", "Short_Abbreviation": "Br_U_0803_M", "confidentiality": 0, "public": 0 }, ... { "AvgID": 3, "CreateTime": "Tue, 14 Aug 2018 00:00:00 GMT", "DataScale": "log2", "FullName": "EPFL/LISP BXD CD Liver Affy Mouse Gene 1.0 ST (Aug18) RMA", "Id": 859, "Long_Abbreviation": "EPFLMouseLiverCDRMAApr18", "ProbeFreezeId": 181, "ShortName": "EPFL/LISP BXD CD Liver Affy Mouse Gene 1.0 ST (Aug18) RMA", "Short_Abbreviation": "EPFLMouseLiverCDRMA0818", "confidentiality": 0, "public": 1 } ] ``` (I added the option to specify species just in case we end up with the same group name across multiple species at some point, though it's currently unnecessary) @@ -61,11 +61,11 @@ curl http://gn2.genenetwork.org/api/v_pre1/datasets/mouse/bxd ### For mRNA Assay/"ProbeSet" ### ``` -curl http://gn2.genenetwork.org/api/v_pre1/dataset/HC_M2_0606_P +curl http://genenetwork.org/api/v_pre1/dataset/HC_M2_0606_P ``` OR ``` -curl http://gn2.genenetwork.org/api/v_pre1/dataset/bxd/HC_M2_0606_P``` +curl http://genenetwork.org/api/v_pre1/dataset/bxd/HC_M2_0606_P``` { "confidential": 0, "data_scale": "log2", "dataset_type": "mRNA expression", "full_name": "Hippocampus Consortium M430v2 (Jun06) PDNN", "id": 112, "name": "HC_M2_0606_P", "public": 2, "short_name": "Hippocampus M430v2 BXD 06/06 PDNN", "tissue": "Hippocampus mRNA", "tissue_id": 9 } ``` (This also has the option to specify group/riset) @@ -73,26 +73,26 @@ curl http://gn2.genenetwork.org/api/v_pre1/dataset/bxd/HC_M2_0606_P``` ### For "Phenotypes" (basically non-mRNA Expression; stuff like weight, sex, etc) ### For these traits, the query fetches publication info and takes the group and phenotype 'ID' as input. For example: ``` -curl http://gn2.genenetwork.org/api/v_pre1/dataset/bxd/10001 +curl http://genenetwork.org/api/v_pre1/dataset/bxd/10001 { "dataset_type": "phenotype", "description": "Central nervous system, morphology: Cerebellum weight, whole, bilateral in adults of both sexes [mg]", "id": 10001, "name": "CBLWT2", "pubmed_id": 11438585, "title": "Genetic control of the mouse cerebellum: identification of quantitative trait loci modulating size and architecture", "year": "2001" } ``` ## Fetch Sample Data for Dataset ## ``` -curl http://gn2.genenetwork.org/api/v_pre1/sample_data/HSNIH-PalmerPublish.csv +curl http://genenetwork.org/api/v_pre1/sample_data/HSNIH-PalmerPublish.csv ``` Returns a CSV file with sample/strain names as the columns and trait IDs as rows ## Fetch Sample Data for Single Trait ## ``` -curl http://gn2.genenetwork.org/api/v_pre1/sample_data/HC_M2_0606_P/1436869_at +curl http://genenetwork.org/api/v_pre1/sample_data/HC_M2_0606_P/1436869_at [ { "data_id": 23415463, "sample_name": "129S1/SvImJ", "sample_name_2": "129S1/SvImJ", "se": 0.123, "value": 8.201 }, { "data_id": 23415463, "sample_name": "A/J", "sample_name_2": "A/J", "se": 0.046, "value": 8.413 }, { "data_id": 23415463, "sample_name": "AKR/J", "sample_name_2": "AKR/J", "se": 0.134, "value": 8.856 }, ... ] ``` ## Fetch Trait List for Dataset ## ``` -curl http://gn2.genenetwork.org/api/v_pre1/traits/HXBBXHPublish.json +curl http://genenetwork.org/api/v_pre1/traits/HXBBXHPublish.json [ { "Additive": 0.0499967532467532, "Id": 10001, "LRS": 16.2831307029479, "Locus": "rs106114574", "PhenotypeId": 1449, "PublicationId": 319, "Sequence": 1 }, ... ] ``` @@ -101,7 +101,7 @@ Both JSON and CSV formats can be specified, with JSON as default. There is also ## Fetch Trait Info (Name, Description, Location, etc) ## ### For mRNA Expression/"ProbeSet" ### ``` -curl http://gn2.genenetwork.org/api/v_pre1/trait/HC_M2_0606_P/1436869_at +curl http://genenetwork.org/api/v_pre1/trait/HC_M2_0606_P/1436869_at { "additive": -0.214087568058076, "alias": "HHG1; HLP3; HPE3; SMMCI; Dsh; Hhg1", "chr": "5", "description": "sonic hedgehog (hedgehog)", "id": 99602, "locus": "rs8253327", "lrs": 12.7711275309832, "mb": 28.457155, "mean": 9.27909090909091, "name": "1436869_at", "p_value": 0.306, "se": null, "symbol": "Shh" } ``` @@ -110,7 +110,7 @@ For phenotypes this just gets the max LRS, its location, and additive effect (a Since each group/riset only has one phenotype "dataset", this query takes either the group/riset name or the group/riset name + "Publish" (for example "BXDPublish", which is the dataset name in the DB) as input ``` -curl http://gn2.genenetwork.org/api/v_pre1/trait/BXD/10001 +curl http://genenetwork.org/api/v_pre1/trait/BXD/10001 { "additive": 2.39444435069444, "id": 4, "locus": "rs48756159", "lrs": 13.4974911471087 } ``` @@ -130,7 +130,7 @@ Each method's query takes the following parameters respectively (more will be ad Example query: ``` -curl http://gn2.genenetwork.org/api/v_pre1/mapping?trait_id=10015&db=BXDPublish&method=gemma&use_loco=true +curl http://genenetwork.org/api/v_pre1/mapping?trait_id=10015&db=BXDPublish&method=gemma&use_loco=true ``` ### R/qtl ### @@ -146,7 +146,7 @@ curl http://gn2.genenetwork.org/api/v_pre1/mapping?trait_id=10015&db=BXDPublish& Example query: ``` -curl http://gn2.genenetwork.org/api/v_pre1/mapping?trait_id=1418701_at&db=HC_M2_0606_P&method=rqtl&num_perm=100 +curl http://genenetwork.org/api/v_pre1/mapping?trait_id=1418701_at&db=HC_M2_0606_P&method=rqtl&num_perm=100 ``` Some combinations of methods/models may not make sense. The R/qtl manual should be referred to for any questions on its use (specifically the scanone function in this case) @@ -164,6 +164,6 @@ This query currently takes the following parameters (though more will be added): Example query: ``` -curl http://gn2.genenetwork.org/api/v_pre1/correlation?trait_id=1427571_at&db=HC_M2_0606_P&target_db=BXDPublish&type=sample&return_count=100 +curl http://genenetwork.org/api/v_pre1/correlation?trait_id=1427571_at&db=HC_M2_0606_P&target_db=BXDPublish&type=sample&return_count=100 [ { "#_strains": 6, "p_value": 0.004804664723032055, "sample_r": -0.942857142857143, "trait": 20511 }, { "#_strains": 6, "p_value": 0.004804664723032055, "sample_r": -0.942857142857143, "trait": 20724 }, { "#_strains": 12, "p_value": 1.8288943424888848e-05, "sample_r": -0.9233615170820528, "trait": 13536 }, { "#_strains": 7, "p_value": 0.006807187408935392, "sample_r": 0.8928571428571429, "trait": 10157 }, { "#_strains": 7, "p_value": 0.006807187408935392, "sample_r": -0.8928571428571429, "trait": 20392 }, ... ] ``` diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index fae62875..8652e6b7 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -56,7 +56,7 @@ from pprint import pformat as pf from db.gn_server import menu_main from db.call import fetchall,fetchone,fetch1 -from utility.tools import USE_GN_SERVER, USE_REDIS, flat_files, flat_file_exists +from utility.tools import USE_GN_SERVER, USE_REDIS, flat_files, flat_file_exists, GN2_BASE_URL from utility.logger import getLogger logger = getLogger(__name__ ) @@ -94,7 +94,7 @@ Publish or ProbeSet. E.g. """ self.datasets = {} if rebuild: #ZS: May make this the only option - data = json.loads(requests.get("http://gn2.genenetwork.org/api/v_pre1/gen_dropdown").content) + data = json.loads(requests.get(GN2_BASE_URL + "/api/v_pre1/gen_dropdown").content) #data = gen_menu.gen_dropdown_json() else: file_name = "wqflask/static/new/javascript/dataset_menu_structure.json" diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 5525472e..e454c593 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -14,6 +14,7 @@ from base.data_set import create_dataset from db import webqtlDatabaseFunction from utility import webqtlUtil from utility import hmac +from utility.tools import GN2_BASE_URL from wqflask import app @@ -135,9 +136,9 @@ class GeneralTrait(object): alias = 'Not available' if self.symbol: - human_response = requests.get("http://gn2.genenetwork.org/gn3/gene/aliases/" + self.symbol.upper()) - mouse_response = requests.get("http://gn2.genenetwork.org/gn3/gene/aliases/" + self.symbol.capitalize()) - other_response = requests.get("http://gn2.genenetwork.org/gn3/gene/aliases/" + self.symbol.lower()) + human_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.upper()) + mouse_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.capitalize()) + other_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.lower()) if human_response and mouse_response and other_response: alias_list = json.loads(human_response.content) + json.loads(mouse_response.content) + json.loads(other_response.content) diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 0fbedccb..9354ece6 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -234,6 +234,8 @@ def show_settings(): GN_VERSION = get_setting('GN_VERSION') HOME = get_setting('HOME') WEBSERVER_MODE = get_setting('WEBSERVER_MODE') +GN2_BASE_URL = get_setting('GN2_BASE_URL') +GN2_BRANCH_URL = get_setting('GN2_BRANCH_URL') GN_SERVER_URL = get_setting('GN_SERVER_URL') SERVER_PORT = get_setting_int('SERVER_PORT') SQL_URI = get_setting('SQL_URI') diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index 6fda02fd..4415b86a 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -20,7 +20,7 @@ from base import data_set from base import trait as TRAIT from utility import helper_functions -from utility.tools import locate +from utility.tools import locate, GN2_BRANCH_URL from rpy2.robjects.packages import importr @@ -56,6 +56,8 @@ class CTL(object): self.edges_list = [] logger.info("Obtained pointers to CTL functions") + self.gn2_url = GN2_BRANCH_URL + def addNode(self, gt): node_dict = { 'data' : {'id' : str(gt.name) + ":" + str(gt.dataset.name), 'sid' : str(gt.name), diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index 05caa100..b0ca5ced 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -13,6 +13,7 @@ import sys # sys.path.append("..") Never in a running webserver from db import webqtlDatabaseFunction +from utility.tools import GN2_BASE_URL import logging from utility.logger import getLogger @@ -919,7 +920,7 @@ def get_aliases(symbol, species): return [] filtered_aliases = [] - response = requests.get("http://gn2.genenetwork.org/gn3/gene/aliases/" + symbol_string) + response = requests.get(GN2_BASE_URL + "/gn3/gene/aliases/" + symbol_string) if response: alias_list = json.loads(response.content) diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py index a332db46..152e4168 100644 --- a/wqflask/wqflask/network_graph/network_graph.py +++ b/wqflask/wqflask/network_graph/network_graph.py @@ -47,6 +47,7 @@ from utility.TDCell import TDCell from base.trait import GeneralTrait from base import data_set from utility import webqtlUtil, helper_functions, corr_result_helpers +from utility.tools import GN2_BRANCH_URL from db import webqtlDatabaseFunction import utility.webqtlUtil #this is for parallel computing only. from wqflask.correlation import correlation_functions @@ -195,6 +196,7 @@ class NetworkGraph(object): self.nodes_list.append(node_dict) self.elements = json.dumps(self.nodes_list + self.edges_list) + self.gn2_url = GN2_BRANCH_URL groups = [] for sample in self.all_sample_list: diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index 698389ab..8f702d58 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -28,6 +28,7 @@ from flask import render_template, Flask, g from utility import formatting from utility import hmac +from utility.tools import GN2_BASE_URL from utility.type_checking import is_float, is_int, is_str, get_float, get_int, get_string from utility.logger import getLogger @@ -295,7 +296,7 @@ def get_aliases(symbol_list, species): symbols_string = ",".join(updated_symbols) filtered_aliases = [] - response = requests.get("http://gn2.genenetwork.org/gn3/gene/aliases2/" + symbols_string) + response = requests.get(GN2_BASE_URL + "/gn3/gene/aliases2/" + symbols_string) if response: alias_lists = json.loads(response.content) seen = set() diff --git a/wqflask/wqflask/static/new/javascript/ctl_graph.js b/wqflask/wqflask/static/new/javascript/ctl_graph.js index 94bd7e9d..bd950592 100644 --- a/wqflask/wqflask/static/new/javascript/ctl_graph.js +++ b/wqflask/wqflask/static/new/javascript/ctl_graph.js @@ -82,18 +82,12 @@ window.onload=function() { function create_qtips(cy){ cy.nodes().qtip({ content: function(){ - gn_link = ''+''+this.data().id +''+'
' + gn_link = ''+''+this.data().id +''+'
' ncbi_link = 'NCBI'+'
' omim_link = '
OMIM'+'
' qtip_content = gn_link + ncbi_link + omim_link return qtip_content - //return ''+'
'+this.data().id +''+'' }, - // content: { - // title: ''+''+this.target() +''+'', - // text: this.target, - // button: true - // }, position: { my: 'top center', at: 'bottom center' diff --git a/wqflask/wqflask/static/new/javascript/dataset_select_menu_orig.js b/wqflask/wqflask/static/new/javascript/dataset_select_menu_orig.js index fad600d2..794804f4 100644 --- a/wqflask/wqflask/static/new/javascript/dataset_select_menu_orig.js +++ b/wqflask/wqflask/static/new/javascript/dataset_select_menu_orig.js @@ -74,7 +74,6 @@ redo_dropdown = function(dropdown, items) { this_opt_group = null for (_i = 0, _len = group_family_list.length; _i < _len; _i++) { item = group_family_list[_i]; - console.log("THE ITEM:", item) if (item[2] != "None" && current_family == ""){ current_family = item[2] this_opt_group = $("") diff --git a/wqflask/wqflask/static/new/javascript/network_graph.js b/wqflask/wqflask/static/new/javascript/network_graph.js index 4d507a18..02c3b817 100644 --- a/wqflask/wqflask/static/new/javascript/network_graph.js +++ b/wqflask/wqflask/static/new/javascript/network_graph.js @@ -85,7 +85,7 @@ window.onload=function() { cy.nodes().qtip({ content: function(){ qtip_content = '' - gn_link = ''+''+this.data().id +''+'
' + gn_link = ''+''+this.data().id +''+'
' qtip_content += gn_link if (typeof(this.data().geneid) !== 'undefined'){ ncbi_link = 'NCBI'+'
' @@ -115,7 +115,7 @@ window.onload=function() { correlation_line = 'Sample r: ' + this.data().correlation + '
' p_value_line = 'Sample p(r): ' + this.data().p_value + '
' overlap_line = 'Overlap: ' + this.data().overlap + '
' - scatter_plot = '
View Scatterplot' + scatter_plot = 'View Scatterplot' return correlation_line + p_value_line + overlap_line + scatter_plot }, position: { diff --git a/wqflask/wqflask/templates/ctl_results.html b/wqflask/wqflask/templates/ctl_results.html index d85075a9..0108d93a 100644 --- a/wqflask/wqflask/templates/ctl_results.html +++ b/wqflask/wqflask/templates/ctl_results.html @@ -61,6 +61,7 @@ diff --git a/wqflask/wqflask/templates/index_page.html b/wqflask/wqflask/templates/index_page.html index 0116245d..f8720d39 100644 --- a/wqflask/wqflask/templates/index_page.html +++ b/wqflask/wqflask/templates/index_page.html @@ -219,9 +219,9 @@

GN1 Mirror and development sites

diff --git a/wqflask/wqflask/templates/index_page_orig.html b/wqflask/wqflask/templates/index_page_orig.html index 963531cb..06b71f53 100755 --- a/wqflask/wqflask/templates/index_page_orig.html +++ b/wqflask/wqflask/templates/index_page_orig.html @@ -254,8 +254,8 @@

GeneNetwork v2:

GeneNetwork v1:

    diff --git a/wqflask/wqflask/templates/network_graph.html b/wqflask/wqflask/templates/network_graph.html index 4492dd3f..25af7bb1 100644 --- a/wqflask/wqflask/templates/network_graph.html +++ b/wqflask/wqflask/templates/network_graph.html @@ -137,6 +137,7 @@ -- cgit v1.2.3
{{ this_trait.symbol }}{{ this_trait.abbreviation }}N/A{{ this_trait.location_repr }} {{ '%0.3f' % this_trait.mean|float }}{{ '%0.3f' % this_trait.LRS_score_repr|float }}N/A{{ this_trait.LRS_location_repr }}{{ '%0.3f' % this_trait.additive|float }}N/A