From 6e78df4be9abb7e1ce959e9b83b9d38bd77fcade Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 2 Mar 2021 22:49:52 +0000 Subject: Adoping code from the following link to make resizeable columns for search result table - https://datatables.net/forums/discussion/63231/resizing-columns-using-jquery-ui --- wqflask/wqflask/templates/search_result_page.html | 519 +++++++++++++--------- 1 file changed, 303 insertions(+), 216 deletions(-) diff --git a/wqflask/wqflask/templates/search_result_page.html b/wqflask/wqflask/templates/search_result_page.html index e7a7bc51..a33d1b1a 100644 --- a/wqflask/wqflask/templates/search_result_page.html +++ b/wqflask/wqflask/templates/search_result_page.html @@ -7,6 +7,7 @@ + {% endblock %} {% block content %} @@ -171,225 +172,311 @@ return params; }; - //ZS: Need to make sort by symbol, also need to make sure blank symbol fields at the bottom and symbols starting with numbers below letters - trait_table = $('#trait_table').DataTable( { - 'drawCallback': function( settings ) { - $('#trait_table tr').off().on("click", function(event) { - if (event.target.type !== 'checkbox' && event.target.tagName.toLowerCase() !== 'a') { - var obj =$(this).find('input'); - obj.prop('checked', !obj.is(':checked')); - } - if ($(this).hasClass("selected") && event.target.tagName.toLowerCase() !== 'a'){ - $(this).removeClass("selected") - } else if (event.target.tagName.toLowerCase() !== 'a') { - $(this).addClass("selected") - } - change_buttons() - }); - }, - 'createdRow': function ( row, data, index ) { - $('td', row).eq(0).attr("style", "text-align: center; padding: 0px 10px 2px 10px;"); - $('td', row).eq(1).attr("align", "right"); - $('td', row).eq(1).attr('data-export', index+1); - $('td', row).eq(2).attr('data-export', $('td', row).eq(2).text()); - {% if dataset.type == 'ProbeSet' %} - $('td', row).eq(3).attr('title', $('td', row).eq(3).text()); - $('td', row).eq(3).attr('data-export', $('td', row).eq(3).text()); - if ($('td', row).eq(3).text().length > 20) { - $('td', row).eq(3).text($('td', row).eq(3).text().substring(0, 20)); - $('td', row).eq(3).text($('td', row).eq(3).text() + '...') - } - $('td', row).eq(4).attr('title', $('td', row).eq(4).text()); - $('td', row).eq(4).attr('data-export', $('td', row).eq(4).text()); - $('td', row).slice(5,10).attr("align", "right"); - $('td', row).eq(5).attr('data-export', $('td', row).eq(5).text()); - $('td', row).eq(6).attr('data-export', $('td', row).eq(6).text()); - $('td', row).eq(7).attr('data-export', $('td', row).eq(7).text()); - $('td', row).eq(8).attr('data-export', $('td', row).eq(8).text()); - $('td', row).eq(9).attr('data-export', $('td', row).eq(9).text()); - {% elif dataset.type == 'Publish' %} - $('td', row).eq(3).attr('title', $('td', row).eq(3).text()); - $('td', row).eq(3).attr('data-export', $('td', row).eq(3).text()); - $('td', row).eq(4).attr('title', $('td', row).eq(4).text()); - $('td', row).eq(4).attr('data-export', $('td', row).eq(4).text()); - $('td', row).eq(4).attr('align', 'right'); - $('td', row).slice(6,10).attr("align", "right"); - $('td', row).eq(5).attr('data-export', $('td', row).eq(5).text()); - $('td', row).eq(6).attr('data-export', $('td', row).eq(6).text()); - $('td', row).eq(7).attr('data-export', $('td', row).eq(7).text()); - $('td', row).eq(8).attr('data-export', $('td', row).eq(8).text()); - $('td', row).eq(9).attr('data-export', $('td', row).eq(8).text()); - {% elif dataset.type == 'Geno' %} - $('td', row).eq(3).attr('data-export', $('td', row).eq(3).text()); - {% endif %} + var tableId = "trait_table"; + + columnDefs = [ + { + 'data': null, + 'width': "25px", + 'orderDataType': "dom-checkbox", + 'orderable': false, + 'render': function(data, type, row, meta) { + return '' + } + }, + { + 'title': "Index", + 'type': "natural", + 'width': "30px", + 'data': "index" + }, + { + 'title': "Record", + 'type': "natural-minus-na", + 'data': null, + 'width': "60px", + 'render': function(data, type, row, meta) { + return '' + data.display_name + '' + } + }{% if dataset.type == 'ProbeSet' %}, + { + 'title': "Symbol", + 'type': "natural", + 'width': "120px", + 'data': "symbol" + }, + { + 'title': "Description", + 'type': "natural", + 'data': null, + 'render': function(data, type, row, meta) { + try { + return decodeURIComponent(escape(data.description)) + } catch(err){ + return escape(data.description) + } + } + }, + { + 'title': "
Location
", + 'type': "natural-minus-na", + 'width': "125px", + 'data': "location" + }, + { + 'title': "
Mean
", + 'type': "natural-minus-na", + 'width': "30px", + 'data': "mean", + 'orderSequence': [ "desc", "asc"] + }, + { + 'title': "
Peak  
LOD  
", + 'type': "natural-minus-na", + 'data': "lod_score", + 'width': "60px", + 'orderSequence': [ "desc", "asc"] + }, + { + 'title': "
Peak Location
", + 'type': "natural-minus-na", + 'width': "125px", + 'data': "lrs_location" + }, + { + 'title': "
Effect  
Size  
", + 'type': "natural-minus-na", + 'data': "additive", + 'width': "60px", + 'orderSequence': [ "desc", "asc"] + }{% elif dataset.type == 'Publish' %}, + { + 'title': "Description", + 'type': "natural", + 'width': "500px", + 'data': null, + 'render': function(data, type, row, meta) { + try { + return decodeURIComponent(escape(data.description)) + } catch(err){ + return data.description + } + } + }, + { + 'title': "
Mean
", + 'type': "natural-minus-na", + 'width': "30px", + 'data': "mean", + 'orderSequence': [ "desc", "asc"] + }, + { + 'title': "Authors", + 'type': "natural", + 'width': "300px", + 'data': null, + 'render': function(data, type, row, meta) { + author_list = data.authors.split(",") + if (author_list.length >= 6) { + author_string = author_list.slice(0, 6).join(",") + ", et al." + } else{ + author_string = data.authors + } + return author_string + } + }, + { + 'title': "
Year
", + 'type': "natural-minus-na", + 'data': null, + 'width': "25px", + 'render': function(data, type, row, meta) { + if (data.pubmed_id != "N/A"){ + return '' + data.pubmed_text + '' + } else { + return data.pubmed_text + } }, - 'data': trait_list, - 'columns': [ - { - 'data': null, - 'width': "25px", - 'orderDataType': "dom-checkbox", - 'orderable': false, - 'render': function(data, type, row, meta) { - return '' - } - }, - { - 'title': "Index", - 'type': "natural", - 'width': "30px", - 'data': "index" - }, - { - 'title': "Record", - 'type': "natural-minus-na", - 'data': null, - 'width': "60px", - 'render': function(data, type, row, meta) { - return '' + data.display_name + '' - } - }{% if dataset.type == 'ProbeSet' %}, - { - 'title': "Symbol", - 'type': "natural", - 'width': "120px", - 'data': "symbol" - }, - { - 'title': "Description", - 'type': "natural", - 'data': null, - 'render': function(data, type, row, meta) { - try { - return decodeURIComponent(escape(data.description)) - } catch(err){ - return escape(data.description) - } - } - }, - { - 'title': "
Location
", - 'type': "natural-minus-na", - 'width': "125px", - 'data': "location" - }, - { - 'title': "
Mean
", - 'type': "natural-minus-na", - 'width': "30px", - 'data': "mean", - 'orderSequence': [ "desc", "asc"] - }, - { - 'title': "
Peak  
LOD  
", - 'type': "natural-minus-na", - 'data': "lod_score", - 'width': "60px", - 'orderSequence': [ "desc", "asc"] - }, - { - 'title': "
Peak Location
", - 'type': "natural-minus-na", - 'width': "125px", - 'data': "lrs_location" - }, - { - 'title': "
Effect  
Size  
", - 'type': "natural-minus-na", - 'data': "additive", - 'width': "60px", - 'orderSequence': [ "desc", "asc"] - }{% elif dataset.type == 'Publish' %}, - { - 'title': "Description", - 'type': "natural", - 'width': "500px", - 'data': null, - 'render': function(data, type, row, meta) { - try { - return decodeURIComponent(escape(data.description)) - } catch(err){ - return data.description - } + 'orderSequence': [ "desc", "asc"] + }, + { + 'title': "
Peak  
LOD  
", + 'type': "natural-minus-na", + 'data': "lod_score", + 'width': "60px", + 'orderSequence': [ "desc", "asc"] + }, + { + 'title': "
Peak Location
", + 'type': "natural-minus-na", + 'width': "120px", + 'data': "lrs_location" + }, + { + 'title': "
Effect  
Size  
", + 'type': "natural-minus-na", + 'width': "60px", + 'data': "additive", + 'orderSequence': [ "desc", "asc"] + }{% elif dataset.type == 'Geno' %}, + { + 'title': "
Location
", + 'type': "natural-minus-na", + 'width': "120px", + 'data': "location" + }{% endif %} + ]; + + loadDataTable(); + + function loadDataTable(){ + //ZS: Need to make sort by symbol, also need to make sure blank symbol fields at the bottom and symbols starting with numbers below letters + trait_table = $('#' + tableId).DataTable( { + 'drawCallback': function( settings ) { + $('#' + tableId + ' tr').off().on("click", function(event) { + if (event.target.type !== 'checkbox' && event.target.tagName.toLowerCase() !== 'a') { + var obj =$(this).find('input'); + obj.prop('checked', !obj.is(':checked')); + } + if ($(this).hasClass("selected") && event.target.tagName.toLowerCase() !== 'a'){ + $(this).removeClass("selected") + } else if (event.target.tagName.toLowerCase() !== 'a') { + $(this).addClass("selected") + } + change_buttons() + }); + }, + 'createdRow': function ( row, data, index ) { + $('td', row).eq(0).attr("style", "text-align: center; padding: 0px 10px 2px 10px;"); + $('td', row).eq(1).attr("align", "right"); + $('td', row).eq(1).attr('data-export', index+1); + $('td', row).eq(2).attr('data-export', $('td', row).eq(2).text()); + {% if dataset.type == 'ProbeSet' %} + $('td', row).eq(3).attr('title', $('td', row).eq(3).text()); + $('td', row).eq(3).attr('data-export', $('td', row).eq(3).text()); + if ($('td', row).eq(3).text().length > 20) { + $('td', row).eq(3).text($('td', row).eq(3).text().substring(0, 20)); + $('td', row).eq(3).text($('td', row).eq(3).text() + '...') } - }, - { - 'title': "
Mean
", - 'type': "natural-minus-na", - 'width': "30px", - 'data': "mean", - 'orderSequence': [ "desc", "asc"] - }, - { - 'title': "Authors", - 'type': "natural", - 'width': "300px", - 'data': null, - 'render': function(data, type, row, meta) { - author_list = data.authors.split(",") - if (author_list.length >= 6) { - author_string = author_list.slice(0, 6).join(",") + ", et al." - } else{ - author_string = data.authors - } - return author_string + $('td', row).eq(4).attr('title', $('td', row).eq(4).text()); + $('td', row).eq(4).attr('data-export', $('td', row).eq(4).text()); + $('td', row).slice(5,10).attr("align", "right"); + $('td', row).eq(5).attr('data-export', $('td', row).eq(5).text()); + $('td', row).eq(6).attr('data-export', $('td', row).eq(6).text()); + $('td', row).eq(7).attr('data-export', $('td', row).eq(7).text()); + $('td', row).eq(8).attr('data-export', $('td', row).eq(8).text()); + $('td', row).eq(9).attr('data-export', $('td', row).eq(9).text()); + {% elif dataset.type == 'Publish' %} + $('td', row).eq(3).attr('title', $('td', row).eq(3).text()); + $('td', row).eq(3).attr('data-export', $('td', row).eq(3).text()); + $('td', row).eq(4).attr('title', $('td', row).eq(4).text()); + $('td', row).eq(4).attr('data-export', $('td', row).eq(4).text()); + $('td', row).eq(4).attr('align', 'right'); + $('td', row).slice(6,10).attr("align", "right"); + $('td', row).eq(5).attr('data-export', $('td', row).eq(5).text()); + $('td', row).eq(6).attr('data-export', $('td', row).eq(6).text()); + $('td', row).eq(7).attr('data-export', $('td', row).eq(7).text()); + $('td', row).eq(8).attr('data-export', $('td', row).eq(8).text()); + $('td', row).eq(9).attr('data-export', $('td', row).eq(8).text()); + {% elif dataset.type == 'Geno' %} + $('td', row).eq(3).attr('data-export', $('td', row).eq(3).text()); + {% endif %} + }, + "data": trait_list, + "columns": columnDefs, + "order": [[1, "asc" ]], + "sDom": "iti", + "destroy": true, + "autoWidth": false, + "deferRender": true, + "bSortClasses": false, + {% if trait_list|length > 20 %} + "scrollY": "100vh", + "scroller": true, + "scrollCollapse": true, + {% else %} + "iDisplayLength": -1, + {% endif %} + "initComplete": function (settings) { + //Add JQueryUI resizable functionality to each th in the ScrollHead table + $('#' + tableId + '_wrapper .dataTables_scrollHead thead th').resizable({ + handles: "e", + alsoResize: '#' + tableId + '_wrapper .dataTables_scrollHead table', //Not essential but makes the resizing smoother + stop: function () { + saveColumnSettings(); + loadDataTable(); } - }, - { - 'title': "
Year
", - 'type': "natural-minus-na", - 'data': null, - 'width': "25px", - 'render': function(data, type, row, meta) { - if (data.pubmed_id != "N/A"){ - return '' + data.pubmed_text + '' - } else { - return data.pubmed_text - } - }, - 'orderSequence': [ "desc", "asc"] - }, - { - 'title': "
Peak  
LOD  
", - 'type': "natural-minus-na", - 'data': "lod_score", - 'width': "60px", - 'orderSequence': [ "desc", "asc"] - }, - { - 'title': "
Peak Location
", - 'type': "natural-minus-na", - 'width': "120px", - 'data': "lrs_location" - }, - { - 'title': "
Effect  
Size  
", - 'type': "natural-minus-na", - 'width': "60px", - 'data': "additive", - 'orderSequence': [ "desc", "asc"] - }{% elif dataset.type == 'Geno' %}, - { - 'title': "
Location
", - 'type': "natural-minus-na", - 'width': "120px", - 'data': "location" - }{% endif %} - ], - "order": [[1, "asc" ]], - 'sDom': "iti", - "autoWidth": true, - "bSortClasses": false, - {% if trait_list|length > 20 %} - "scrollY": "100vh", - "scroller": true, - "scrollCollapse": true - {% else %} - "iDisplayLength": -1 - {% endif %} - } ); + }); + }, + } ); + } + + function setUserColumnsDefWidths() { + + var userColumnDef; + + // Get the settings for this table from localStorage + var userColumnDefs = JSON.parse(localStorage.getItem(tableId)) || []; + + if (userColumnDefs.length === 0 ) return; + + columnDefs.forEach( function(columnDef) { + + // Check if there is a width specified for this column + userColumnDef = userColumnDefs.find( function(column) { + return column.targets === columnDef.targets; + }); + + // If there is, set the width of this columnDef in px + if ( userColumnDef ) { + + columnDef.width = userColumnDef.width + 'px'; + + } + + }); + + } + + + function saveColumnSettings() { + + var userColumnDefs = JSON.parse(localStorage.getItem(tableId)) || []; + + var width, header, existingSetting; + + trait_table.columns().every( function ( targets ) { + + // Check if there is a setting for this column in localStorage + existingSetting = userColumnDefs.findIndex( function(column) { return column.targets === targets;}); + + // Get the width of this column + header = this.header(); + width = $(header).width(); + + if ( existingSetting !== -1 ) { + + // Update the width + userColumnDefs[existingSetting].width = width; + + } else { + + // Add the width for this column + userColumnDefs.push({ + targets: targets, + width: width, + }); + + } + + }); + + // Save (or update) the settings in localStorage + localStorage.setItem(tableId, JSON.stringify(userColumnDefs)); + + } - trait_table.draw(); //ZS: This makes the table adjust its height properly on initial load + //trait_table.draw(); //ZS: This makes the table adjust its height properly on initial load $('.toggle-vis').on( 'click', function (e) { e.preventDefault(); @@ -409,7 +496,7 @@ $('#redraw').click(function() { - var table = $('#trait_table').DataTable(); + var table = $('#' + tableId).DataTable(); table.colReorder.reset() }); -- cgit v1.2.3 From dffcbc4d0c7370492ff976dae5f21bd06a621bc3 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 3 Mar 2021 19:13:19 +0000 Subject: Checked if pre_publication_description is NULL, because there was an error resulting from a trait missing both pre_publication_description and pubmed ID --- wqflask/base/trait.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 41e2603c..df96d46e 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -516,10 +516,11 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): # If the dataset is confidential and the user has access to confidential # phenotype traits, then display the pre-publication description instead # of the post-publication description - trait.description_display = "" + trait.description_display = "N/A" if not trait.pubmed_id: trait.abbreviation = trait.pre_publication_abbreviation - trait.description_display = trait.pre_publication_description + if trait.pre_publication_description: + trait.description_display = trait.pre_publication_description else: trait.abbreviation = trait.post_publication_abbreviation if description: -- cgit v1.2.3 From 6b0f4e959362643df91221cf2f0a9efe2eb4dd70 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 4 Mar 2021 22:02:51 +0000 Subject: Fixed issue that caused JS to break when trait description contains a newline character --- wqflask/wqflask/templates/mapping_results.html | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index c673ce51..c08b21ef 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -372,6 +372,7 @@ + + + + + + + + + + + + +{% endblock %} -- cgit v1.2.3 From cf42f769ec4db2efaebca64c63454935cc28b2a3 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Thu, 25 Mar 2021 03:55:16 +0300 Subject: modify gn3 integration code --- wqflask/wqflask/correlation/correlation_gn3_api.py | 89 +++++++++------------- .../wqflask/templates/demo_correlation_page.html | 22 +----- 2 files changed, 38 insertions(+), 73 deletions(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index 7e269e41..7e865bf3 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -10,59 +10,6 @@ from wqflask.base.trait import retrieve_sample_data GN3_CORRELATION_API = "http://127.0.0.1:8080/api/correlation" -def compute_sample(target_dataset, trait_data, target_samplelist, method="pearson"): - """integration for integrating sample_r api correlation""" - data = { - "target_dataset": target_dataset, - "target_samplelist": target_samplelist, - "trait_data": { - "trait_sample_data": trait_data, - "trait_id": "HC_Q" - } - } - requests_url = f"http://127.0.0.1:8080/api/correlation/sample_x/{method}" - - results = requests.post(requests_url, json=data) - - data = results.json() - - return data - - -def get_tissue_correlation_input(this_trait, trait_symbol_dict): - """Gets tissue expression values for the primary trait and target tissues values""" - primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( - symbol_list=[this_trait.symbol]) - - if this_trait.symbol.lower() in primary_trait_tissue_vals_dict: - primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower( - )] - - corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( - symbol_list=list(trait_symbol_dict.values())) - - target_tissue_data = [] - for trait, symbol in list(trait_symbol_dict.items()): - if symbol and symbol.lower() in corr_result_tissue_vals_dict: - this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower( - )] - - this_trait_data = {"trait_id": trait, - "tissue_values": this_trait_tissue_values} - - target_tissue_data.append(this_trait_data) - - primary_tissue_data = { - "this_id": "TT", - "tissue_values": primary_trait_tissue_values - - } - - return (primary_tissue_data, target_tissue_data) - - return None - - def process_samples(start_vars, sample_names, excluded_samples=None): """process samples method""" sample_data = {} @@ -81,7 +28,7 @@ def process_samples(start_vars, sample_names, excluded_samples=None): def create_target_this_trait(start_vars): - """this function prefetch required data for correlation""" + """this function creates the required trait and target dataset for correlation""" this_dataset = data_set.create_dataset(dataset_name=start_vars['dataset']) target_dataset = data_set.create_dataset( @@ -148,3 +95,37 @@ def compute_correlation(start_vars, method="pearson"): data = corr_results.json() return data + + +def get_tissue_correlation_input(this_trait, trait_symbol_dict): + """Gets tissue expression values for the primary trait and target tissues values""" + primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( + symbol_list=[this_trait.symbol]) + + if this_trait.symbol.lower() in primary_trait_tissue_vals_dict: + primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower( + )] + + corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( + symbol_list=list(trait_symbol_dict.values())) + + target_tissue_data = [] + for trait, symbol in list(trait_symbol_dict.items()): + if symbol and symbol.lower() in corr_result_tissue_vals_dict: + this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower( + )] + + this_trait_data = {"trait_id": trait, + "tissue_values": this_trait_tissue_values} + + target_tissue_data.append(this_trait_data) + + primary_tissue_data = { + "this_id": "TT", + "tissue_values": primary_trait_tissue_values + + } + + return (primary_tissue_data, target_tissue_data) + + return None diff --git a/wqflask/wqflask/templates/demo_correlation_page.html b/wqflask/wqflask/templates/demo_correlation_page.html index 4d310051..ddcdf38d 100644 --- a/wqflask/wqflask/templates/demo_correlation_page.html +++ b/wqflask/wqflask/templates/demo_correlation_page.html @@ -10,27 +10,11 @@ {% endblock %} {% block content %}
- {{correlation_results}} + - -
-{% endblock %} -{% block js %} - - - - - - - - - - - - - + {% endblock %} -- cgit v1.2.3 From 7de35627a6dc3fa48a039c932be005ffe6c175c4 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Thu, 25 Mar 2021 04:02:49 +0300 Subject: fix import error --- wqflask/wqflask/correlation/correlation_gn3_api.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index 7e865bf3..479bb0d8 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -1,11 +1,11 @@ """module that calls the gn3 api's to do the correlation """ import json import requests -from wqflask.wqflask.correlation import correlation_functions +from wqflask.correlation import correlation_functions -from wqflask.base import data_set -from wqflask.base.trait import create_trait -from wqflask.base.trait import retrieve_sample_data +from base import data_set +from base.trait import create_trait +from base.trait import retrieve_sample_data GN3_CORRELATION_API = "http://127.0.0.1:8080/api/correlation" -- cgit v1.2.3 From da72efa86846179d8d2aa64cd7b06a894469dc85 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Thu, 25 Mar 2021 10:14:31 +0300 Subject: minor fix --- wqflask/wqflask/correlation/correlation_gn3_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index 479bb0d8..f1137c0e 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -88,7 +88,7 @@ def compute_correlation(start_vars, method="pearson"): else: pass # lit correlation/literature - # can fetch values in gn3 not set up in gn3 + # to fetch values from the database corr_results = requests.post(requests_url, json=corr_input_data) -- cgit v1.2.3 From 0daaa41adafdfbfed10c0dca27ef5eef008441da Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 26 Mar 2021 19:41:25 +0000 Subject: Added try/except to deal with the possibility of user_id being stored in Redis as both string and bytes --- wqflask/wqflask/user_session.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index d6f3b3fc..c5a577df 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -129,7 +129,10 @@ class UserSession(object): if b'user_id' not in self.record: self.record[b'user_id'] = str(uuid.uuid4()) - return self.record[b'user_id'] + try: + return self.record[b'user_id'].decode("utf-8") + except: + return self.record[b'user_id'] @property def redis_user_id(self): -- cgit v1.2.3 From c079bdd969b4e5f18815fad7c9939edab3522866 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 26 Mar 2021 19:43:47 +0000 Subject: Split rat RRID by _ in order to pull out just the ID number, since the number is needed for the links --- wqflask/wqflask/show_trait/SampleList.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 112298a1..857e4456 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -78,7 +78,7 @@ class SampleList(object): sample.extra_attributes['rrid'].append(webqtlConfig.RRID_MOUSE_URL % the_rrid) elif self.dataset.group.species == "rat": if len(str(sample.extra_attributes['rrid'])): - the_rrid = sample.extra_attributes['rrid'] + the_rrid = sample.extra_attributes['rrid'].split("_")[1] sample.extra_attributes['rrid'] = [sample.extra_attributes['rrid']] sample.extra_attributes['rrid'].append(webqtlConfig.RRID_RAT_URL % the_rrid) -- cgit v1.2.3 From 08ddec9dcbaa1730d0b65b643aa5c99d1077d4d5 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sun, 28 Mar 2021 21:13:52 +0300 Subject: refactor correlation integration code --- wqflask/wqflask/correlation/correlation_gn3_api.py | 42 ++++++++++++---------- .../wqflask/templates/demo_correlation_page.html | 6 ++-- 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index f1137c0e..1cd1b332 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -63,6 +63,7 @@ def compute_correlation(start_vars, method="pearson"): if corr_type == "sample": corr_input_data = { + "target": target_dataset, "target_dataset": target_dataset.trait_data, "target_samplelist": target_dataset.samplelist, "trait_data": { @@ -80,16 +81,17 @@ def compute_correlation(start_vars, method="pearson"): corr_input_data = { "primary_tissue": primary_tissue_data, - "target_tissues": target_tissue_data + "target_tissues_dict": target_tissue_data } requests_url = f"{GN3_CORRELATION_API}/tissue_corr/{method}" - else: - pass - # lit correlation/literature - # to fetch values from the database + elif corr_type == "lit": + (this_trait_geneid, geneid_dict, species) = do_lit_correlation( + this_trait, this_dataset, target_dataset) + requests_url = f"{GN3_CORRELATION_API}/lit_corr/{species}/{this_trait_geneid}" + corr_input_data = geneid_dict corr_results = requests.post(requests_url, json=corr_input_data) data = corr_results.json() @@ -97,6 +99,18 @@ def compute_correlation(start_vars, method="pearson"): return data +def do_lit_correlation(this_trait, this_dataset, target_dataset): + geneid_dict = this_dataset.retrieve_genes("GeneId") + species = this_dataset.group.species.lower() + + this_trait_geneid = this_trait.geneid + this_trait_gene_data = { + this_trait.name: this_trait_geneid + } + + return (this_trait_geneid, geneid_dict, species) + + def get_tissue_correlation_input(this_trait, trait_symbol_dict): """Gets tissue expression values for the primary trait and target tissues values""" primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( @@ -108,23 +122,15 @@ def get_tissue_correlation_input(this_trait, trait_symbol_dict): corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( symbol_list=list(trait_symbol_dict.values())) - - target_tissue_data = [] - for trait, symbol in list(trait_symbol_dict.items()): - if symbol and symbol.lower() in corr_result_tissue_vals_dict: - this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower( - )] - - this_trait_data = {"trait_id": trait, - "tissue_values": this_trait_tissue_values} - - target_tissue_data.append(this_trait_data) - primary_tissue_data = { - "this_id": "TT", + "this_id": this_trait.name, "tissue_values": primary_trait_tissue_values } + target_tissue_data = { + "trait_symbol_dict": trait_symbol_dict, + "symbol_tissue_vals_dict": corr_result_tissue_vals_dict + } return (primary_tissue_data, target_tissue_data) diff --git a/wqflask/wqflask/templates/demo_correlation_page.html b/wqflask/wqflask/templates/demo_correlation_page.html index ddcdf38d..a8651067 100644 --- a/wqflask/wqflask/templates/demo_correlation_page.html +++ b/wqflask/wqflask/templates/demo_correlation_page.html @@ -10,11 +10,11 @@ {% endblock %} {% block content %}
- - + {{correlation_results}} +
{% endblock %} -- cgit v1.2.3 From 19f91de0a78b1805b00b8345523c62c7fee31223 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 29 Mar 2021 19:59:59 +0000 Subject: Check if a trait in a collection is a properly structured string to avoid an error caused by an empty string be stored in a collection's trait list --- wqflask/wqflask/collect.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index e074a3d8..0291f2b8 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -191,6 +191,8 @@ def view_collection(): json_version = [] for atrait in traits: + if ':' not in atrait: + continue name, dataset_name = atrait.split(':') if dataset_name == "Temp": group = name.split("_")[2] -- cgit v1.2.3 From 0a6b9ec767a12bedeb892f400e6b04bbd6160673 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 29 Mar 2021 22:00:44 +0000 Subject: Changed minimum num_overlap to 2, since there apparently need to be that many shared samples torun scipy.stats.pearsonr/spearmanr --- wqflask/wqflask/network_graph/network_graph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py index 723a749f..1d5316a2 100644 --- a/wqflask/wqflask/network_graph/network_graph.py +++ b/wqflask/wqflask/network_graph/network_graph.py @@ -99,7 +99,7 @@ class NetworkGraph(object): if num_overlap < self.lowest_overlap: self.lowest_overlap = num_overlap - if num_overlap == 0: + if num_overlap < 2: continue else: pearson_r, pearson_p = scipy.stats.pearsonr( -- cgit v1.2.3 From 6a5576bb1271060c703871aedf16360847c68f8b Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 30 Mar 2021 17:14:46 +0000 Subject: Shifted the 'try' in a try/except up some to account for a possible error with calculating PCA that I don't know the cause of yet --- wqflask/wqflask/correlation_matrix/show_corr_matrix.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index a77877d2..a67ea9f4 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -150,14 +150,14 @@ class CorrelationMatrix(object): this_trait_vals.append(sample_value) self.trait_data_array.append(this_trait_vals) - corr_result_eigen = np.linalg.eig(np.array(self.pca_corr_results)) - corr_eigen_value, corr_eigen_vectors = sortEigenVectors(corr_result_eigen) + try: + corr_result_eigen = np.linalg.eig(np.array(self.pca_corr_results)) + corr_eigen_value, corr_eigen_vectors = sortEigenVectors(corr_result_eigen) - groups = [] - for sample in self.all_sample_list: - groups.append(1) + groups = [] + for sample in self.all_sample_list: + groups.append(1) - try: if self.do_PCA == True: self.pca_works = "True" self.pca_trait_ids = [] -- cgit v1.2.3 From ef51e08753defdfc7f3e67f8788cd1362d2cf631 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 30 Mar 2021 18:29:56 +0000 Subject: Shifted some code out of the try/except that shouldn't have been inside it --- wqflask/wqflask/correlation_matrix/show_corr_matrix.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index a67ea9f4..f77761d8 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -150,14 +150,14 @@ class CorrelationMatrix(object): this_trait_vals.append(sample_value) self.trait_data_array.append(this_trait_vals) + groups = [] + for sample in self.all_sample_list: + groups.append(1) + try: corr_result_eigen = np.linalg.eig(np.array(self.pca_corr_results)) corr_eigen_value, corr_eigen_vectors = sortEigenVectors(corr_result_eigen) - groups = [] - for sample in self.all_sample_list: - groups.append(1) - if self.do_PCA == True: self.pca_works = "True" self.pca_trait_ids = [] -- cgit v1.2.3 From fcb93bef5ab230b948f83e0e77a1ef54b017aca1 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Thu, 8 Apr 2021 23:59:13 +0300 Subject: minor fix --- wqflask/wqflask/correlation/correlation_gn3_api.py | 1 - 1 file changed, 1 deletion(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index 1cd1b332..c8d5347c 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -63,7 +63,6 @@ def compute_correlation(start_vars, method="pearson"): if corr_type == "sample": corr_input_data = { - "target": target_dataset, "target_dataset": target_dataset.trait_data, "target_samplelist": target_dataset.samplelist, "trait_data": { -- cgit v1.2.3 From e7b589f05e1c13612ea2f7245d66cc3f054fa14b Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 9 Apr 2021 17:40:53 +0000 Subject: Added varaiable 'categorical_attr_exists' tracking whether there are any case attributes with fewer than 10 distinct values, since it currently throws a JS error if case attributes exist but none have fewer than 10 distinct values (specifically when we have RRID as a case attribute) --- wqflask/wqflask/show_trait/show_trait.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 878c41c0..d3267190 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -203,6 +203,13 @@ class ShowTrait(object): if sample.value < 0: self.negative_vals_exist = "true" + #ZS: Check whether any attributes have few enough distinct values to show the "Block samples by group" option + self.categorical_attr_exists = False + for attribute in self.sample_groups[0].attributes: + if len(self.sample_groups[0].attributes[attribute].distinct_values) <= 10: + self.categorical_attr_exists = True + break + sample_column_width = max_samplename_width * 8 self.stats_table_width, self.trait_table_width = get_table_widths(self.sample_groups, sample_column_width, self.has_num_cases) @@ -277,6 +284,7 @@ class ShowTrait(object): se_exists = self.sample_groups[0].se_exists, has_num_cases = self.has_num_cases, attributes = self.sample_groups[0].attributes, + categorical_attr_exists = self.categorical_attr_exists, categorical_vars = ",".join(categorical_var_list), num_values = self.num_values, qnorm_values = self.qnorm_vals, -- cgit v1.2.3 From 9d7da4653c8b0241af712043bb375e3f2bc52a3f Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 9 Apr 2021 17:43:30 +0000 Subject: Store categorical_attr_exists as a string instead of boolean since apparently the boolean doesn't get passed to the template properly --- wqflask/wqflask/show_trait/show_trait.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index d3267190..6892f02b 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -204,10 +204,10 @@ class ShowTrait(object): self.negative_vals_exist = "true" #ZS: Check whether any attributes have few enough distinct values to show the "Block samples by group" option - self.categorical_attr_exists = False + self.categorical_attr_exists = "false" for attribute in self.sample_groups[0].attributes: if len(self.sample_groups[0].attributes[attribute].distinct_values) <= 10: - self.categorical_attr_exists = True + self.categorical_attr_exists = "true" break sample_column_width = max_samplename_width * 8 -- cgit v1.2.3 From 9fa88673447ab13dcd1b899c0e6c2c5915dd0114 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 9 Apr 2021 17:44:03 +0000 Subject: Replaced the conditional for whether to show 'Block samples by group' to instead check categorical_attr_exists --- wqflask/wqflask/templates/show_trait_transform_and_filter.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/show_trait_transform_and_filter.html b/wqflask/wqflask/templates/show_trait_transform_and_filter.html index b70ca590..e3f5ef81 100644 --- a/wqflask/wqflask/templates/show_trait_transform_and_filter.html +++ b/wqflask/wqflask/templates/show_trait_transform_and_filter.html @@ -20,7 +20,7 @@ - {% if sample_groups[0].attributes %} + {% if categorical_attr_exists == "true" %}
+

This collection has {{ '{}'.format(numify(trait_obs|count, "record", "records")) }}

@@ -231,6 +232,21 @@ $('#collection_name').css('display', 'inline'); } }); + + make_default = function() { + alert("The current collection is now your default collection.") + let uc_id = $('#uc_id').val(); + $.cookie('default_collection', uc_id, { + expires: 365, + path: '/' + }); + + let default_collection_id = $.cookie('default_collection'); + }; + + $("#make_default").on("click", function(){ + make_default(); + }); }); -- cgit v1.2.3 From 939a27cb0ede3102ce929e445690e1ba86d5870a Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 14 Apr 2021 20:26:34 +0000 Subject: Added JS that automatically selects the default collection if it's set --- wqflask/wqflask/templates/collections/add.html | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/wqflask/wqflask/templates/collections/add.html b/wqflask/wqflask/templates/collections/add.html index b4e5385b..0398c6e4 100644 --- a/wqflask/wqflask/templates/collections/add.html +++ b/wqflask/wqflask/templates/collections/add.html @@ -49,8 +49,20 @@
-- cgit v1.2.3 From 328b176628ed9db6c1c60590cb10f4cca212738a Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Thu, 15 Apr 2021 06:10:28 +0300 Subject: change api port --- wqflask/wqflask/correlation/correlation_gn3_api.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index c8d5347c..8ee4a9b7 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -7,7 +7,7 @@ from base import data_set from base.trait import create_trait from base.trait import retrieve_sample_data -GN3_CORRELATION_API = "http://127.0.0.1:8080/api/correlation" +GN3_CORRELATION_API = "http://127.0.0.1:8202/api/correlation" def process_samples(start_vars, sample_names, excluded_samples=None): @@ -30,6 +30,12 @@ def process_samples(start_vars, sample_names, excluded_samples=None): def create_target_this_trait(start_vars): """this function creates the required trait and target dataset for correlation""" + + print("creating the dataset and trait") + import time + + initial_time = time.time() + this_dataset = data_set.create_dataset(dataset_name=start_vars['dataset']) target_dataset = data_set.create_dataset( dataset_name=start_vars['corr_dataset']) @@ -44,6 +50,11 @@ def create_target_this_trait(start_vars): target_dataset.get_trait_data(list(sample_data.keys())) + + time_taken = time.time() - initial_time + + print(f"the time taken to create dataset abnd trait is",time_taken) + return (this_dataset, this_trait, target_dataset, sample_data) @@ -91,6 +102,8 @@ def compute_correlation(start_vars, method="pearson"): requests_url = f"{GN3_CORRELATION_API}/lit_corr/{species}/{this_trait_geneid}" corr_input_data = geneid_dict + + print("Sending this request") corr_results = requests.post(requests_url, json=corr_input_data) data = corr_results.json() -- cgit v1.2.3 From 5a9a7a645510d1385def017adf2f956d61fa2329 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Fri, 16 Apr 2021 02:09:28 +0300 Subject: add demo template --- wqflask/wqflask/correlation/correlation_gn3_api.py | 3 + .../wqflask/templates/demo_correlation_page.html | 78 ++++++++++++++++++++-- wqflask/wqflask/views.py | 2 +- 3 files changed, 77 insertions(+), 6 deletions(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index 8ee4a9b7..b4480076 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -47,11 +47,14 @@ def create_target_this_trait(start_vars): # target_dataset.get_trait_data(list(self.sample_data.keys())) this_trait = retrieve_sample_data(this_trait, this_dataset) + print(f"Starting to creat the target dataset ") + dataset_start_time = time.time() target_dataset.get_trait_data(list(sample_data.keys())) time_taken = time.time() - initial_time + print(f"the time taken to create dataset is",time.time()-dataset_start_time) print(f"the time taken to create dataset abnd trait is",time_taken) diff --git a/wqflask/wqflask/templates/demo_correlation_page.html b/wqflask/wqflask/templates/demo_correlation_page.html index a8651067..1900a0bd 100644 --- a/wqflask/wqflask/templates/demo_correlation_page.html +++ b/wqflask/wqflask/templates/demo_correlation_page.html @@ -10,11 +10,79 @@ {% endblock %} {% block content %}
- {{correlation_results}} - +
CORRELATION RESULTS
+
+

Trait_Name

+

Rho value

+

Num overlap

+

P value

+
+
+ {% for corr_result in correlation_results %} + {% for key,value in corr_result.items()%} +
+

trait_name_here

+ {%for o_key,o_value in value.items()%} +

{{o_value}}

+ {%endfor%} + {% endfor %} +
+ {% endfor %} +
+ + {% endblock %} + diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 6ca9b23f..072db466 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -882,7 +882,7 @@ def corr_compute_page(): logger.info("In corr_compute, request.form is:", pf(request.form)) logger.info(request.url) correlation_results = compute_correlation(request.form) - return render_template("demo_correlation_page.html",correlation_results=correlation_results) + return render_template("demo_correlation_page.html",correlation_results=correlation_results[1:20]) @app.route("/corr_matrix", methods=('POST',)) def corr_matrix_page(): -- cgit v1.2.3 From 33e03898ee733f18b29e54e202c217ba14921f48 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sat, 17 Apr 2021 04:14:33 +0300 Subject: use gn3 lib --- bin/genenetwork2 | 3 +- wqflask/wqflask/correlation/correlation_gn3_api.py | 57 +++++++++++++++++----- .../wqflask/templates/demo_correlation_page.html | 2 +- 3 files changed, 47 insertions(+), 15 deletions(-) diff --git a/bin/genenetwork2 b/bin/genenetwork2 index 5f4e0f9a..917d6549 100755 --- a/bin/genenetwork2 +++ b/bin/genenetwork2 @@ -154,7 +154,8 @@ if [ ! -d $R_LIBS_SITE ] ; then fi # We may change this one: -export PYTHONPATH=$PYTHON_GN_PATH:$GN2_BASE_DIR/wqflask:$PYTHONPATH +# export PYTHONPATH=$PYTHON_GN_PATH:$GN2_BASE_DIR/wqflask:$PYTHONPATH +PYTHONPATH=$PYTHON_GN_PATH:$GN2_BASE_DIR/wqflask:$HOME/genenetwork3:$PYTHONPATH # Our UNIX TMPDIR defaults to /tmp - change this on a shared server if [ -z $TMPDIR ]; then diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index b4480076..c1d6132b 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -1,11 +1,17 @@ """module that calls the gn3 api's to do the correlation """ import json import requests +import time from wqflask.correlation import correlation_functions from base import data_set from base.trait import create_trait from base.trait import retrieve_sample_data +# gn3 lib +from gn3.computations.correlations import compute_all_sample_correlation +from gn3.computations.correlations import benchmark_compute_all_sample +from gn3.computations.correlations import map_shared_keys_to_values +from gn3.computations.correlations import compute_all_tissue_correlation GN3_CORRELATION_API = "http://127.0.0.1:8202/api/correlation" @@ -30,7 +36,6 @@ def process_samples(start_vars, sample_names, excluded_samples=None): def create_target_this_trait(start_vars): """this function creates the required trait and target dataset for correlation""" - print("creating the dataset and trait") import time @@ -52,11 +57,10 @@ def create_target_this_trait(start_vars): target_dataset.get_trait_data(list(sample_data.keys())) - time_taken = time.time() - initial_time - print(f"the time taken to create dataset is",time.time()-dataset_start_time) + print(f"the time taken to create dataset is", time.time()-dataset_start_time) - print(f"the time taken to create dataset abnd trait is",time_taken) + print(f"the time taken to create dataset abnd trait is", time_taken) return (this_dataset, this_trait, target_dataset, sample_data) @@ -76,16 +80,34 @@ def compute_correlation(start_vars, method="pearson"): corr_input_data = {} if corr_type == "sample": - corr_input_data = { - "target_dataset": target_dataset.trait_data, - "target_samplelist": target_dataset.samplelist, - "trait_data": { - "trait_sample_data": sample_data, - "trait_id": start_vars["trait_id"] - } + # corr_input_data = { + # "target_dataset": target_dataset.trait_data, + # "target_samplelist": target_dataset.samplelist, + # "trait_data": { + # "trait_sample_data": sample_data, + # "trait_id": start_vars["trait_id"] + # } + # } + + + + this_trait_data = { + "trait_sample_data": sample_data, + "trait_id": start_vars["trait_id"] } - requests_url = f"{GN3_CORRELATION_API}/sample_x/{method}" + initial_time = time.time() + print("Calling sample correlation") + results = map_shared_keys_to_values( + target_dataset.samplelist, target_dataset.trait_data) + correlation_results = compute_all_sample_correlation(corr_method=method, + this_trait=this_trait_data, + target_dataset=results) + + print("Time taken is>>>>",time.time()-initial_time) + + # requests_url = f"{GN3_CORRELATION_API}/sample_x/{method}" + return correlation_results elif corr_type == "tissue": trait_symbol_dict = this_dataset.retrieve_genes("Symbol") @@ -96,8 +118,17 @@ def compute_correlation(start_vars, method="pearson"): "primary_tissue": primary_tissue_data, "target_tissues_dict": target_tissue_data } + print("Calling tissue correlation") + initial_time = time.time() + correlation_results = compute_all_tissue_correlation(primary_tissue_dict=corr_input_data["primary_tissue"], + target_tissues_data=corr_input_data["target_tissues_dict"], + corr_method=method) + + time_taken = time.time() + print("Time taken is ??????",time_taken-initial_time) - requests_url = f"{GN3_CORRELATION_API}/tissue_corr/{method}" + # requests_url = f"{GN3_CORRELATION_API}/tissue_corr/{method}" + return correlation_results elif corr_type == "lit": (this_trait_geneid, geneid_dict, species) = do_lit_correlation( diff --git a/wqflask/wqflask/templates/demo_correlation_page.html b/wqflask/wqflask/templates/demo_correlation_page.html index 1900a0bd..d2979f9d 100644 --- a/wqflask/wqflask/templates/demo_correlation_page.html +++ b/wqflask/wqflask/templates/demo_correlation_page.html @@ -21,7 +21,7 @@ {% for corr_result in correlation_results %} {% for key,value in corr_result.items()%}
-

trait_name_here

+

{{key}}

{%for o_key,o_value in value.items()%}

{{o_value}}

{%endfor%} -- cgit v1.2.3 From ba2fa2025bdc381346afc8ec3203f229ed3551d6 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sat, 17 Apr 2021 13:43:44 +0300 Subject: refactoring fetching of data --- wqflask/wqflask/correlation/correlation_gn3_api.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index c1d6132b..75bd5561 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -48,20 +48,17 @@ def create_target_this_trait(start_vars): this_trait = create_trait(dataset=this_dataset, name=start_vars['trait_id']) - sample_data = process_samples(start_vars, this_dataset.group.samplelist) + # target_dataset.get_trait_data(list(self.sample_data.keys())) - this_trait = retrieve_sample_data(this_trait, this_dataset) + # this_trait = retrieve_sample_data(this_trait, this_dataset) print(f"Starting to creat the target dataset ") dataset_start_time = time.time() + sample_data = () - target_dataset.get_trait_data(list(sample_data.keys())) + time_taken = time.time() - initial_time - print(f"the time taken to create dataset is", time.time()-dataset_start_time) - - print(f"the time taken to create dataset abnd trait is", time_taken) - return (this_dataset, this_trait, target_dataset, sample_data) @@ -89,6 +86,10 @@ def compute_correlation(start_vars, method="pearson"): # } # } + sample_data = process_samples(start_vars, this_dataset.group.samplelist) + target_dataset.get_trait_data(list(sample_data.keys())) + this_trait = retrieve_sample_data(this_trait, this_dataset) + this_trait_data = { @@ -111,8 +112,10 @@ def compute_correlation(start_vars, method="pearson"): elif corr_type == "tissue": trait_symbol_dict = this_dataset.retrieve_genes("Symbol") + time_to_retrieve = time.time() primary_tissue_data, target_tissue_data = get_tissue_correlation_input( this_trait, trait_symbol_dict) + print("Time taken to retrieve this is",time.time()-time_to_retrieve) corr_input_data = { "primary_tissue": primary_tissue_data, -- cgit v1.2.3 From 50c0ee93a59eecd40a6fbd19139671c94003c21b Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Mon, 19 Apr 2021 00:24:36 +0300 Subject: fix for correlation_demo template --- wqflask/wqflask/correlation/correlation_gn3_api.py | 53 ++++++++++------------ .../wqflask/templates/demo_correlation_page.html | 23 +++++++++- 2 files changed, 44 insertions(+), 32 deletions(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index 75bd5561..ba606b92 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -9,9 +9,10 @@ from base.trait import create_trait from base.trait import retrieve_sample_data # gn3 lib from gn3.computations.correlations import compute_all_sample_correlation -from gn3.computations.correlations import benchmark_compute_all_sample from gn3.computations.correlations import map_shared_keys_to_values from gn3.computations.correlations import compute_all_tissue_correlation +from gn3.computations.correlations import compute_all_lit_correlation +from gn3.db_utils import database_connector GN3_CORRELATION_API = "http://127.0.0.1:8202/api/correlation" @@ -36,10 +37,6 @@ def process_samples(start_vars, sample_names, excluded_samples=None): def create_target_this_trait(start_vars): """this function creates the required trait and target dataset for correlation""" - print("creating the dataset and trait") - import time - - initial_time = time.time() this_dataset = data_set.create_dataset(dataset_name=start_vars['dataset']) target_dataset = data_set.create_dataset( @@ -48,17 +45,10 @@ def create_target_this_trait(start_vars): this_trait = create_trait(dataset=this_dataset, name=start_vars['trait_id']) - # target_dataset.get_trait_data(list(self.sample_data.keys())) # this_trait = retrieve_sample_data(this_trait, this_dataset) - print(f"Starting to creat the target dataset ") - dataset_start_time = time.time() sample_data = () - - - - time_taken = time.time() - initial_time return (this_dataset, this_trait, target_dataset, sample_data) @@ -77,6 +67,7 @@ def compute_correlation(start_vars, method="pearson"): corr_input_data = {} if corr_type == "sample": + initial_time = time.time() # corr_input_data = { # "target_dataset": target_dataset.trait_data, # "target_samplelist": target_dataset.samplelist, @@ -85,50 +76,44 @@ def compute_correlation(start_vars, method="pearson"): # "trait_id": start_vars["trait_id"] # } # } - - sample_data = process_samples(start_vars, this_dataset.group.samplelist) + sample_data = process_samples( + start_vars, this_dataset.group.samplelist) target_dataset.get_trait_data(list(sample_data.keys())) this_trait = retrieve_sample_data(this_trait, this_dataset) - + print("Creating dataset and trait took",time.time()-initial_time) this_trait_data = { "trait_sample_data": sample_data, "trait_id": start_vars["trait_id"] } - initial_time = time.time() - print("Calling sample correlation") results = map_shared_keys_to_values( target_dataset.samplelist, target_dataset.trait_data) correlation_results = compute_all_sample_correlation(corr_method=method, this_trait=this_trait_data, target_dataset=results) - print("Time taken is>>>>",time.time()-initial_time) + print("doing sample correlation took",time.time()-initial_time) # requests_url = f"{GN3_CORRELATION_API}/sample_x/{method}" return correlation_results elif corr_type == "tissue": trait_symbol_dict = this_dataset.retrieve_genes("Symbol") - time_to_retrieve = time.time() primary_tissue_data, target_tissue_data = get_tissue_correlation_input( this_trait, trait_symbol_dict) - print("Time taken to retrieve this is",time.time()-time_to_retrieve) corr_input_data = { "primary_tissue": primary_tissue_data, "target_tissues_dict": target_tissue_data } - print("Calling tissue correlation") initial_time = time.time() correlation_results = compute_all_tissue_correlation(primary_tissue_dict=corr_input_data["primary_tissue"], target_tissues_data=corr_input_data["target_tissues_dict"], corr_method=method) + print("time taken for compute tissue is",time.time()-initial_time) - time_taken = time.time() - print("Time taken is ??????",time_taken-initial_time) # requests_url = f"{GN3_CORRELATION_API}/tissue_corr/{method}" return correlation_results @@ -137,15 +122,23 @@ def compute_correlation(start_vars, method="pearson"): (this_trait_geneid, geneid_dict, species) = do_lit_correlation( this_trait, this_dataset, target_dataset) - requests_url = f"{GN3_CORRELATION_API}/lit_corr/{species}/{this_trait_geneid}" - corr_input_data = geneid_dict + conn, _cursor_object = database_connector() + initial_time = time.time() + with conn: - print("Sending this request") - corr_results = requests.post(requests_url, json=corr_input_data) + lit_corr_results = compute_all_lit_correlation( + conn=conn, trait_lists=list(geneid_dict.items()), + species=species, gene_id=this_trait_geneid) - data = corr_results.json() + return lit_corr_results + print("the time taken is",time.time()-initial_time) + # requests_url = f"{GN3_CORRELATION_API}/lit_corr/{species}/{this_trait_geneid}" + # corr_input_data = geneid_dict + # corr_results = requests.post(requests_url, json=corr_input_data) - return data + # data = corr_results.json() + + # return data def do_lit_correlation(this_trait, this_dataset, target_dataset): @@ -164,11 +157,11 @@ def get_tissue_correlation_input(this_trait, trait_symbol_dict): """Gets tissue expression values for the primary trait and target tissues values""" primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( symbol_list=[this_trait.symbol]) - if this_trait.symbol.lower() in primary_trait_tissue_vals_dict: primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower( )] + time_to_to_fetch_all = time.time() corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( symbol_list=list(trait_symbol_dict.values())) primary_tissue_data = { diff --git a/wqflask/wqflask/templates/demo_correlation_page.html b/wqflask/wqflask/templates/demo_correlation_page.html index d2979f9d..67e3c57c 100644 --- a/wqflask/wqflask/templates/demo_correlation_page.html +++ b/wqflask/wqflask/templates/demo_correlation_page.html @@ -22,9 +22,28 @@ {% for key,value in corr_result.items()%}

{{key}}

- {%for o_key,o_value in value.items()%} + + {% if "corr_coeffient" in value %} +

{{value["corr_coeffient"]}}

+ {%elif "tissue_corr" in value %} +

{{value["tissue_corr"]}}

+ {%elif "lit_corr" in value %} + {{value["lit_corr"]}} + {% endif %} + {%if "tissue_number" in value %} +
{{value["tissue_number"]}}
+ {%elif "num_overlap" in value %} +

{{value["num_overlap"]}}

+ {% endif %} +

{{value["p_value"]}}

+ + + + + {% endfor %}
{% endfor %} -- cgit v1.2.3 From e6c21d96f45dc74de43db451383236f28e723847 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 19 Apr 2021 16:46:39 +0000 Subject: Removed unused code --- wqflask/base/data_set.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 178234fe..44325d5b 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -573,11 +573,6 @@ class DataSet(object): """Gets overridden later, at least for Temp...used by trait's get_given_name""" return None - # Delete this eventually - @property - def riset(): - Weve_Renamed_This_As_Group - def get_accession_id(self): if self.type == "Publish": results = g.db.execute("""select InfoFiles.GN_AccesionId from InfoFiles, PublishFreeze, InbredSet where -- cgit v1.2.3 From 34b6fbf0044fd950ecb95590b4b772abd2cbcb8e Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 19 Apr 2021 16:57:21 +0000 Subject: Fixed NIAAA link because apparently it was changed --- wqflask/wqflask/templates/base.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/base.html b/wqflask/wqflask/templates/base.html index ccb2ac5a..1fbaad51 100644 --- a/wqflask/wqflask/templates/base.html +++ b/wqflask/wqflask/templates/base.html @@ -191,7 +191,7 @@ Translational Systems Genetics of Mitochondria, Metabolism, and Aging (R01AG043930, 2013-2018)
  • - NIAAA + NIAAA Integrative Neuroscience Initiative on Alcoholism (U01 AA016662, U01 AA013499, U24 AA013513, U01 AA014425, 2006-2017)
  • -- cgit v1.2.3 From c8f9367bc12340bb8b8a7ce1a5f42789e311555a Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 19 Apr 2021 16:57:52 +0000 Subject: Removed commented out js file --- wqflask/wqflask/templates/base.html | 1 - 1 file changed, 1 deletion(-) diff --git a/wqflask/wqflask/templates/base.html b/wqflask/wqflask/templates/base.html index 1fbaad51..e6f22deb 100644 --- a/wqflask/wqflask/templates/base.html +++ b/wqflask/wqflask/templates/base.html @@ -257,7 +257,6 @@ - -- cgit v1.2.3 From 789dd88a0b9a6a8cbe0413c47212c89c1681765c Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 19 Apr 2021 17:08:30 +0000 Subject: Removedu nused function from network_graph.py --- wqflask/wqflask/network_graph/network_graph.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py index 1d5316a2..132e1884 100644 --- a/wqflask/wqflask/network_graph/network_graph.py +++ b/wqflask/wqflask/network_graph/network_graph.py @@ -182,15 +182,3 @@ class NetworkGraph(object): samples=self.all_sample_list, sample_data=self.sample_data, elements=self.elements,) - - def get_trait_db_obs(self, trait_db_list): - self.trait_list = [] - for i, trait_db in enumerate(trait_db_list): - if i == (len(trait_db_list) - 1): - break - trait_name, dataset_name = trait_db.split(":") - dataset_ob = data_set.create_dataset(dataset_name) - trait_ob = create_trait(dataset=dataset_ob, - name=trait_name, - cellid=None) - self.trait_list.append((trait_ob, dataset_ob)) -- cgit v1.2.3 From 68ee0a995fceaf6aefdd3c8f780e46a83b51a0e8 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 19 Apr 2021 17:11:41 +0000 Subject: Specify only getting the first two items after splitting the trait/dataset input string --- wqflask/utility/helper_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 7eb7f013..46eeb35d 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -40,7 +40,7 @@ def get_trait_db_obs(self, trait_db_list): data, _separator, hmac_string = trait.rpartition(':') data = data.strip() assert hmac_string==hmac.hmac_creation(data), "Data tampering?" - trait_name, dataset_name = data.split(":") + trait_name, dataset_name = data.split(":")[:2] if dataset_name == "Temp": dataset_ob = data_set.create_dataset(dataset_name=dataset_name, dataset_type="Temp", group_name=trait_name.split("_")[2]) else: -- cgit v1.2.3 From 34e4933de5a1cd444abe618fcfd93b424bf3442e Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Tue, 20 Apr 2021 01:38:26 +0300 Subject: refactor code for iterating mrna tissue data --- wqflask/base/mrna_assay_tissue_data.py | 39 +++++++++++++++++++--- .../wqflask/correlation/correlation_functions.py | 6 ++-- wqflask/wqflask/correlation/correlation_gn3_api.py | 24 +++++++------ 3 files changed, 51 insertions(+), 18 deletions(-) diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py index f1929518..0220d73b 100644 --- a/wqflask/base/mrna_assay_tissue_data.py +++ b/wqflask/base/mrna_assay_tissue_data.py @@ -6,6 +6,7 @@ from utility import db_tools from utility import Bunch from utility.db_tools import escape +from gn3.db_utils import database_connector from utility.logger import getLogger @@ -44,16 +45,42 @@ class MrnaAssayTissueData(object): and t.Mean = x.maxmean; '''.format(in_clause) - results = g.db.execute(query).fetchall() - lower_symbols = [] + # lower_symbols = [] + lower_symbols = {} for gene_symbol in gene_symbols: + # lower_symbols[gene_symbol.lower()] = True if gene_symbol != None: - lower_symbols.append(gene_symbol.lower()) - + lower_symbols[gene_symbol.lower()] = True + + import time + # initial_time = time.time() + # conn,cursor = database_connector() + # cursor.execute(query) + # for result in cursor.fetchall(): + # symbol = result[0] + # self.data[symbol].gene_id = result[1] + # self.data[symbol].data_id = result[2] + # self.data[symbol].chr = result[3] + # self.data[symbol].mb = result[4] + # self.data[symbol].description = result[5] + # self.data[symbol].probe_target_description = result[6] + + + # print("my loop takes>>>>",time.time()-initial_time) + # conn.close() + # r + + # takes 5 seconds + initial_time = time.time() + results = list(g.db.execute(query).fetchall()) for result in results: symbol = result[0] - if symbol.lower() in lower_symbols: + # if symbol is not None + # exists = lower_symbols.get(symbol.lower()) + # if symbol.lower() in lower_symbols: + if symbol is not None and lower_symbols.get(symbol.lower()): + symbol = symbol.lower() self.data[symbol].gene_id = result.GeneId @@ -62,6 +89,7 @@ class MrnaAssayTissueData(object): self.data[symbol].mb = result.Mb self.data[symbol].description = result.description self.data[symbol].probe_target_description = result.Probe_Target_Description + print("time taken in the loop is",time.time()-initial_time) ########################################################################### #Input: cursor, symbolList (list), dataIdDict(Dict) @@ -82,6 +110,7 @@ class MrnaAssayTissueData(object): WHERE TissueProbeSetData.Id IN {} and TissueProbeSetXRef.DataId = TissueProbeSetData.Id""".format(db_tools.create_in_clause(id_list)) + results = g.db.execute(query).fetchall() for result in results: if result.Symbol.lower() not in symbol_values_dict: diff --git a/wqflask/wqflask/correlation/correlation_functions.py b/wqflask/wqflask/correlation/correlation_functions.py index fd7691d4..af1d6060 100644 --- a/wqflask/wqflask/correlation/correlation_functions.py +++ b/wqflask/wqflask/correlation/correlation_functions.py @@ -82,6 +82,6 @@ def cal_zero_order_corr_for_tiss (primaryValue=[], targetValue=[], method='pears def get_trait_symbol_and_tissue_values(symbol_list=None): tissue_data = MrnaAssayTissueData(gene_symbols=symbol_list) - - if len(tissue_data.gene_symbols): - return tissue_data.get_symbol_values_pairs() + if len(tissue_data.gene_symbols) >0: + results = tissue_data.get_symbol_values_pairs() + return results diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index ba606b92..e7394647 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -12,6 +12,7 @@ from gn3.computations.correlations import compute_all_sample_correlation from gn3.computations.correlations import map_shared_keys_to_values from gn3.computations.correlations import compute_all_tissue_correlation from gn3.computations.correlations import compute_all_lit_correlation +from gn3.computations.correlations import experimental_compute_all_tissue_correlation from gn3.db_utils import database_connector GN3_CORRELATION_API = "http://127.0.0.1:8202/api/correlation" @@ -37,7 +38,6 @@ def process_samples(start_vars, sample_names, excluded_samples=None): def create_target_this_trait(start_vars): """this function creates the required trait and target dataset for correlation""" - this_dataset = data_set.create_dataset(dataset_name=start_vars['dataset']) target_dataset = data_set.create_dataset( dataset_name=start_vars['corr_dataset']) @@ -81,7 +81,7 @@ def compute_correlation(start_vars, method="pearson"): target_dataset.get_trait_data(list(sample_data.keys())) this_trait = retrieve_sample_data(this_trait, this_dataset) - print("Creating dataset and trait took",time.time()-initial_time) + print("Creating dataset and trait took", time.time()-initial_time) this_trait_data = { "trait_sample_data": sample_data, @@ -94,7 +94,7 @@ def compute_correlation(start_vars, method="pearson"): this_trait=this_trait_data, target_dataset=results) - print("doing sample correlation took",time.time()-initial_time) + print("doing sample correlation took", time.time()-initial_time) # requests_url = f"{GN3_CORRELATION_API}/sample_x/{method}" return correlation_results @@ -109,11 +109,16 @@ def compute_correlation(start_vars, method="pearson"): "target_tissues_dict": target_tissue_data } initial_time = time.time() - correlation_results = compute_all_tissue_correlation(primary_tissue_dict=corr_input_data["primary_tissue"], - target_tissues_data=corr_input_data["target_tissues_dict"], - corr_method=method) - print("time taken for compute tissue is",time.time()-initial_time) - + correlation_results = experimental_compute_all_tissue_correlation(primary_tissue_dict=corr_input_data["primary_tissue"], + target_tissues_data=corr_input_data[ + "target_tissues_dict"], + corr_method=method) + print("correlation y took", time.time()-initial_time) + # initial_time = time.time() + # correlation_results = compute_all_tissue_correlation(primary_tissue_dict=corr_input_data["primary_tissue"], + # target_tissues_data=corr_input_data["target_tissues_dict"], + # corr_method=method) + # print("time taken for compute tissue is", time.time()-initial_time) # requests_url = f"{GN3_CORRELATION_API}/tissue_corr/{method}" return correlation_results @@ -131,7 +136,7 @@ def compute_correlation(start_vars, method="pearson"): species=species, gene_id=this_trait_geneid) return lit_corr_results - print("the time taken is",time.time()-initial_time) + print("the time taken is", time.time()-initial_time) # requests_url = f"{GN3_CORRELATION_API}/lit_corr/{species}/{this_trait_geneid}" # corr_input_data = geneid_dict # corr_results = requests.post(requests_url, json=corr_input_data) @@ -161,7 +166,6 @@ def get_tissue_correlation_input(this_trait, trait_symbol_dict): primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower( )] - time_to_to_fetch_all = time.time() corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( symbol_list=list(trait_symbol_dict.values())) primary_tissue_data = { -- cgit v1.2.3 From 315d7f13b254aa62e277805d7d9816b0c7042479 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 22 Apr 2021 18:23:50 +0000 Subject: Added JS that sets the 'add to collection' dropdown to default to the collection the use set as their default collection --- wqflask/wqflask/static/new/javascript/search_results.js | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/wqflask/wqflask/static/new/javascript/search_results.js b/wqflask/wqflask/static/new/javascript/search_results.js index ecb1220d..48b9b7be 100644 --- a/wqflask/wqflask/static/new/javascript/search_results.js +++ b/wqflask/wqflask/static/new/javascript/search_results.js @@ -326,4 +326,15 @@ $(function() { } ); } + apply_default = function() { + let default_collection_id = $.cookie('default_collection'); + if (default_collection_id) { + let the_option = $('[name=existing_collection] option').filter(function() { + return ($(this).text().split(":")[0] == default_collection_id); + }) + the_option.prop('selected', true); + } + } + apply_default(); + }); \ No newline at end of file -- cgit v1.2.3 From 1a7bb988ee360b3ef48e22e25b419c375dccb9fa Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 22 Apr 2021 18:26:01 +0000 Subject: Account for situations where the trait symbol is null for ProbeSet traits; previously it could throw an error --- wqflask/wqflask/search_results.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index f23c0582..cb01a2af 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -119,7 +119,7 @@ class SearchResultPage(object): trait_dict['dataset'] = this_trait.dataset.name trait_dict['hmac'] = hmac.data_hmac('{}:{}'.format(this_trait.name, this_trait.dataset.name)) if this_trait.dataset.type == "ProbeSet": - trait_dict['symbol'] = this_trait.symbol + trait_dict['symbol'] = this_trait.symbol if this_trait.symbol else "N/A" trait_dict['description'] = "N/A" if this_trait.description_display: trait_dict['description'] = this_trait.description_display -- cgit v1.2.3 From b0ccb12682fed83bf72d22ff42f1f442a8e6176e Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 19 Apr 2021 14:43:16 +0300 Subject: Remove stale comments --- wqflask/base/data_set.py | 11 ---- wqflask/utility/helper_functions.py | 4 -- wqflask/wqflask/show_trait/show_trait.py | 72 +++++++++++++++----------- wqflask/wqflask/templates/index_page_orig.html | 10 ---- wqflask/wqflask/templates/submit_trait.html | 12 ----- wqflask/wqflask/views.py | 23 ++------ 6 files changed, 46 insertions(+), 86 deletions(-) diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 178234fe..cc5a428c 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -262,8 +262,6 @@ class Markers(object): elif isinstance(p_values, dict): filtered_markers = [] for marker in self.markers: - #logger.debug("marker[name]", marker['name']) - #logger.debug("p_values:", p_values) if marker['name'] in p_values: #logger.debug("marker {} IS in p_values".format(i)) marker['p_value'] = p_values[marker['name']] @@ -276,10 +274,6 @@ class Markers(object): marker['lrs_value'] = - \ math.log10(marker['p_value']) * 4.61 filtered_markers.append(marker) - # else: - #logger.debug("marker {} NOT in p_values".format(i)) - # self.markers.remove(marker) - #del self.markers[i] self.markers = filtered_markers @@ -306,7 +300,6 @@ class HumanMarkers(Markers): marker['Mb'] = float(splat[3]) / 1000000 self.markers.append(marker) - #logger.debug("markers is: ", pf(self.markers)) def add_pvalues(self, p_values): super(HumanMarkers, self).add_pvalues(p_values) @@ -520,7 +513,6 @@ def datasets(group_name, this_group=None): break if tissue_already_exists: - #logger.debug("dataset_menu:", dataset_menu[i]['datasets']) dataset_menu[i]['datasets'].append((dataset, dataset_short)) else: dataset_menu.append(dict(tissue=tissue_name, @@ -735,9 +727,6 @@ class PhenotypeDataSet(DataSet): DS_NAME_MAP['Publish'] = 'PhenotypeDataSet' def setup(self): - - #logger.debug("IS A PHENOTYPEDATASET") - # Fields in the database table self.search_fields = ['Phenotype.Post_publication_description', 'Phenotype.Pre_publication_description', diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 7eb7f013..15d5b3ab 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -10,7 +10,6 @@ import logging logger = logging.getLogger(__name__ ) def get_species_dataset_trait(self, start_vars): - #assert type(read_genotype) == type(bool()), "Expecting boolean value for read_genotype" if "temp_trait" in list(start_vars.keys()): if start_vars['temp_trait'] == "True": self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group']) @@ -27,9 +26,6 @@ def get_species_dataset_trait(self, start_vars): get_qtl_info=True) logger.debug("After creating trait") - #if read_genotype: - #self.dataset.group.read_genotype_file() - #self.genotype = self.dataset.group.genotype def get_trait_db_obs(self, trait_db_list): if isinstance(trait_db_list, str): diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 6892f02b..ed55d473 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -138,17 +138,12 @@ class ShowTrait(object): self.ncbi_summary = get_ncbi_summary(self.this_trait) - #Get nearest marker for composite mapping + # Get nearest marker for composite mapping if not self.temp_trait: if check_if_attr_exists(self.this_trait, 'locus_chr') and self.dataset.type != "Geno" and self.dataset.type != "Publish": self.nearest_marker = get_nearest_marker(self.this_trait, self.dataset) - #self.nearest_marker1 = get_nearest_marker(self.this_trait, self.dataset)[0] - #self.nearest_marker2 = get_nearest_marker(self.this_trait, self.dataset)[1] else: self.nearest_marker = "" - #self.nearest_marker1 = "" - #self.nearest_marker2 = "" - self.make_sample_lists() @@ -168,16 +163,19 @@ class ShowTrait(object): categorical_var_list = [] if not self.temp_trait: - categorical_var_list = get_categorical_variables(self.this_trait, self.sample_groups[0]) #ZS: Only using first samplelist, since I think mapping only uses those samples + # ZS: Only using first samplelist, since I think mapping only uses those samples + categorical_var_list = get_categorical_variables(self.this_trait, self.sample_groups[0]) - #ZS: Get list of chromosomes to select for mapping + # ZS: Get list of chromosomes to select for mapping self.chr_list = [["All", -1]] for i, this_chr in enumerate(self.dataset.species.chromosomes.chromosomes): self.chr_list.append([self.dataset.species.chromosomes.chromosomes[this_chr].name, i]) self.genofiles = self.dataset.group.get_genofiles() - if "QTLReaper" or "R/qtl" in dataset.group.mapping_names: #ZS: No need to grab scales from .geno file unless it's using a mapping method that reads .geno files + # ZS: No need to grab scales from .geno file unless it's using + # a mapping method that reads .geno files + if "QTLReaper" or "R/qtl" in dataset.group.mapping_names: if self.genofiles: self.scales_in_geno = get_genotype_scales(self.genofiles) else: @@ -187,10 +185,15 @@ class ShowTrait(object): self.has_num_cases = has_num_cases(self.this_trait) - #ZS: Needed to know whether to display bar chart + get max sample name length in order to set table column width + # ZS: Needed to know whether to display bar chart + get max + # sample name length in order to set table column width self.num_values = 0 - self.binary = "true" #ZS: So it knows whether to display the Binary R/qtl mapping method, which doesn't work unless all values are 0 or 1 - self.negative_vals_exist = "false" #ZS: Since we don't want to show log2 transform option for situations where it doesn't make sense + # ZS: So it knows whether to display the Binary R/qtl mapping + # method, which doesn't work unless all values are 0 or 1 + self.binary = "true" + # ZS: Since we don't want to show log2 transform option for + # situations where it doesn't make sense + self.negative_vals_exist = "false" max_samplename_width = 1 for group in self.sample_groups: for sample in group.sample_list: @@ -203,7 +206,8 @@ class ShowTrait(object): if sample.value < 0: self.negative_vals_exist = "true" - #ZS: Check whether any attributes have few enough distinct values to show the "Block samples by group" option + # ZS: Check whether any attributes have few enough distinct + # values to show the "Block samples by group" option self.categorical_attr_exists = "false" for attribute in self.sample_groups[0].attributes: if len(self.sample_groups[0].attributes[attribute].distinct_values) <= 10: @@ -258,7 +262,6 @@ class ShowTrait(object): if not self.temp_trait: if hasattr(self.this_trait, 'locus_chr') and self.this_trait.locus_chr != "" and self.dataset.type != "Geno" and self.dataset.type != "Publish": hddn['control_marker'] = self.nearest_marker - #hddn['control_marker'] = self.nearest_marker1+","+self.nearest_marker2 hddn['do_control'] = False hddn['maf'] = 0.05 hddn['mapping_scale'] = "physic" @@ -268,7 +271,8 @@ class ShowTrait(object): if len(self.scales_in_geno) < 2: hddn['mapping_scale'] = self.scales_in_geno[list(self.scales_in_geno.keys())[0]][0][0] - # We'll need access to this_trait and hddn in the Jinja2 Template, so we put it inside self + # We'll need access to this_trait and hddn in the Jinja2 + # Template, so we put it inside self self.hddn = hddn js_data = dict(trait_id = self.trait_id, @@ -294,7 +298,8 @@ class ShowTrait(object): self.js_data = js_data def get_external_links(self): - #ZS: There's some weirdness here because some fields don't exist while others are empty strings + # ZS: There's some weirdness here because some fields don't + # exist while others are empty strings self.pubmed_link = webqtlConfig.PUBMEDLINK_URL % self.this_trait.pubmed_id if check_if_attr_exists(self.this_trait, 'pubmed_id') else None self.ncbi_gene_link = webqtlConfig.NCBI_LOCUSID % self.this_trait.geneid if check_if_attr_exists(self.this_trait, 'geneid') else None self.omim_link = webqtlConfig.OMIM_ID % self.this_trait.omim if check_if_attr_exists(self.this_trait, 'omim') else None @@ -320,7 +325,6 @@ class ShowTrait(object): self.panther_link = webqtlConfig.PANTHER_URL % self.this_trait.symbol self.ebi_gwas_link = webqtlConfig.EBIGWAS_URL % self.this_trait.symbol self.protein_atlas_link = webqtlConfig.PROTEIN_ATLAS_URL % self.this_trait.symbol - #self.open_targets_link = webqtlConfig.OPEN_TARGETS_URL % self.this_trait.symbol if self.dataset.group.species == "mouse" or self.dataset.group.species == "human": self.rgd_link = webqtlConfig.RGD_URL % (self.this_trait.symbol, self.dataset.group.species.capitalize()) @@ -429,7 +433,9 @@ class ShowTrait(object): all_samples_ordered.append(sample) other_sample_names.append(sample) - #ZS: CFW is here because the .geno file doesn't properly contain its full list of samples. This should probably be fixed. + # ZS: CFW is here because the .geno file doesn't properly + # contain its full list of samples. This should probably + # be fixed. if self.dataset.group.species == "human" or (set(primary_sample_names) == set(parent_f1_samples)) or self.dataset.group.name == "CFW": primary_sample_names += other_sample_names other_sample_names = [] @@ -445,7 +451,8 @@ class ShowTrait(object): sample_group_type='primary', header=primary_header) - #if other_sample_names and self.dataset.group.species != "human" and self.dataset.group.name != "CFW": + # if other_sample_names and self.dataset.group.species != + # "human" and self.dataset.group.name != "CFW": if len(other_sample_names) > 0: other_sample_names.sort() #Sort other samples if parent_f1_samples: @@ -539,7 +546,8 @@ def get_z_scores(sample_groups): def get_nearest_marker(this_trait, this_db): this_chr = this_trait.locus_chr this_mb = this_trait.locus_mb - #One option is to take flanking markers, another is to take the two (or one) closest + # One option is to take flanking markers, another is to take the + # two (or one) closest query = """SELECT Geno.Name FROM Geno, GenoXRef, GenoFreeze WHERE Geno.Chr = '{}' AND @@ -552,7 +560,6 @@ def get_nearest_marker(this_trait, this_db): if result == []: return "" - #return "", "" else: return result[0][0] @@ -617,7 +624,8 @@ def check_if_attr_exists(the_trait, id_type): def get_ncbi_summary(this_trait): if check_if_attr_exists(this_trait, 'geneid'): - #ZS: Need to switch this try/except to something that checks the output later + # ZS: Need to switch this try/except to something that checks + # the output later try: response = requests.get("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&id=%s&retmode=json" % this_trait.geneid) summary = json.loads(response.content)['result'][this_trait.geneid]['summary'] @@ -661,8 +669,8 @@ def get_genotype_scales(genofiles): def get_scales_from_genofile(file_location): geno_path = locate_ignore_error(file_location, 'genotype') - - if not geno_path: #ZS: This is just to allow the code to run when + # ZS: This is just to allow the code to run when + if not geno_path: return [["physic", "Mb"]] cm_and_mb_cols_exist = True cm_column = None @@ -670,7 +678,9 @@ def get_scales_from_genofile(file_location): with open(geno_path, "r") as geno_fh: for i, line in enumerate(geno_fh): if line[0] == "#" or line[0] == "@": - if "@scale" in line: #ZS: If the scale is made explicit in the metadata, use that + # ZS: If the scale is made explicit in the metadata, + # use that + if "@scale" in line: scale = line.split(":")[1].strip() if scale == "morgan": return [["morgan", "cM"]] @@ -690,12 +700,16 @@ def get_scales_from_genofile(file_location): mb_column = 3 break - #ZS: This attempts to check whether the cM and Mb columns are 'real', since some .geno files have one column be a copy of the other column, or have one column that is all 0s + # ZS: This attempts to check whether the cM and Mb columns are + # 'real', since some .geno files have one column be a copy of + # the other column, or have one column that is all 0s cm_all_zero = True mb_all_zero = True cm_mb_all_equal = True for i, line in enumerate(geno_fh): - if first_marker_line <= i < first_marker_line + 10: #ZS: I'm assuming there won't be more than 10 markers where the position is listed as 0 + # ZS: I'm assuming there won't be more than 10 markers + # where the position is listed as 0 + if first_marker_line <= i < first_marker_line + 10: if cm_column: cm_val = line.split("\t")[cm_column].strip() if cm_val != "0": @@ -711,8 +725,8 @@ def get_scales_from_genofile(file_location): if i > first_marker_line + 10: break - - #ZS: This assumes that both won't be all zero, since if that's the case mapping shouldn't be an option to begin with + # ZS: This assumes that both won't be all zero, since if that's + # the case mapping shouldn't be an option to begin with if mb_all_zero: return [["morgan", "cM"]] elif cm_mb_all_equal: diff --git a/wqflask/wqflask/templates/index_page_orig.html b/wqflask/wqflask/templates/index_page_orig.html index 7f82b35c..87cf1b45 100755 --- a/wqflask/wqflask/templates/index_page_orig.html +++ b/wqflask/wqflask/templates/index_page_orig.html @@ -7,16 +7,6 @@ {% endblock %} {% block content %} - - -
    {{ flash_me() }} diff --git a/wqflask/wqflask/templates/submit_trait.html b/wqflask/wqflask/templates/submit_trait.html index 68b06f55..334a608d 100644 --- a/wqflask/wqflask/templates/submit_trait.html +++ b/wqflask/wqflask/templates/submit_trait.html @@ -61,18 +61,6 @@
    -

    Paste or Type Multiple Values: You can enter data by pasting a series of numbers representing trait values into this area. diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 2c0ba586..c4b510d4 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -156,12 +156,6 @@ def index_page(): import_collections = params['import_collections'] if import_collections == "true": g.user_session.import_traits_to_user(params['anon_id']) - #if USE_GN_SERVER: - # # The menu is generated using GN_SERVER - # return render_template("index_page.html", gn_server_url = GN_SERVER_URL, version=GN_VERSION) - #else: - - # Old style static menu (OBSOLETE) return render_template("index_page_orig.html", version=GN_VERSION) @@ -343,14 +337,10 @@ def intro(): @app.route("/tutorials") def tutorials(): - #doc = Docs("links", request.args) - #return render_template("docs.html", **doc.__dict__) return render_template("tutorials.html") @app.route("/credits") def credits(): - #doc = Docs("links", request.args) - #return render_template("docs.html", **doc.__dict__) return render_template("credits.html") @app.route("/update_text", methods=('POST',)) @@ -368,12 +358,9 @@ def submit_trait_form(): @app.route("/create_temp_trait", methods=('POST',)) def create_temp_trait(): logger.info(request.url) - - #template_vars = submit_trait.SubmitTrait(request.form) - doc = Docs("links") return render_template("links.html", **doc.__dict__) - #return render_template("show_trait.html", **template_vars.__dict__) + @app.route('/export_trait_excel', methods=('POST',)) def export_trait_excel(): @@ -487,21 +474,17 @@ def export_perm_data(): mimetype='text/csv', headers={"Content-Disposition":"attachment;filename=" + file_name + ".csv"}) + @app.route("/show_temp_trait", methods=('POST',)) def show_temp_trait_page(): logger.info(request.url) template_vars = show_trait.ShowTrait(request.form) - #logger.info("js_data before dump:", template_vars.js_data) template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") - # Sorting the keys messes up the ordered dictionary, so don't do that - #sort_keys=True) - - #logger.info("js_data after dump:", template_vars.js_data) - #logger.info("show_trait template_vars:", pf(template_vars.__dict__)) return render_template("show_trait.html", **template_vars.__dict__) + @app.route("/show_trait") def show_trait_page(): logger.info(request.url) -- cgit v1.2.3 From 4534daa6fb07c23b90e024560ca64091fc330eed Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 19 Apr 2021 17:46:38 +0300 Subject: Move looped sql query into one statement in "get_species_groups" It's in-efficient to have a sql query executed in a loop. As data grows, the query becomes slower. It's better to let sql handle such queries. --- wqflask/utility/helper_functions.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 15d5b3ab..4ba92ed5 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -47,19 +47,18 @@ def get_trait_db_obs(self, trait_db_list): if trait_ob: self.trait_list.append((trait_ob, dataset_ob)) -def get_species_groups(): - - species_query = "SELECT SpeciesId, MenuName FROM Species" - species_ids_and_names = g.db.execute(species_query).fetchall() - - species_and_groups = [] - for species_id, species_name in species_ids_and_names: - this_species_groups = {} - this_species_groups['species'] = species_name - groups_query = "SELECT InbredSetName FROM InbredSet WHERE SpeciesId = %s" % (species_id) - groups = [group[0] for group in g.db.execute(groups_query).fetchall()] - this_species_groups['groups'] = groups - species_and_groups.append(this_species_groups) - - return species_and_groups +def get_species_groups(): + """Group each species into a group""" + _menu = {} + for species, group_name in g.db.execute( + "SELECT s.MenuName, i.InbredSetName FROM InbredSet i " + "INNER JOIN Species s ON s.SpeciesId = i.SpeciesId " + "ORDER BY i.SpeciesId ASC, i.Name ASC").fetchall(): + if _menu.get(species): + _menu = _menu[species].append(group_name) + else: + _menu[species] = [group_name] + return [{"species": key, + "groups": value} for key, value in + list(_menu.items())] -- cgit v1.2.3 From d2e2046a3ce1af0ca97ea1b6d9ccb3a4c9aecf7c Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 23 Apr 2021 17:21:12 +0300 Subject: Add full link to genetic data collected as part of WebQTL project --- wqflask/wqflask/templates/submit_trait.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/submit_trait.html b/wqflask/wqflask/templates/submit_trait.html index 334a608d..2cc18240 100644 --- a/wqflask/wqflask/templates/submit_trait.html +++ b/wqflask/wqflask/templates/submit_trait.html @@ -14,7 +14,7 @@

    Introduction


    The trait values that you enter are statistically compared with verified genotypes collected at a set of microsatellite markers in each RI set. The markers are drawn from a set of over 750, but for each set redundant markers have been removed, preferentially retaining those that are most informative.

    -

    These error-checked RI mapping data match theoretical expectations for RI strain sets. The cumulative adjusted length of the RI maps are approximately 1400 cM, a value that matches those of both MIT maps and Chromosome Committee Report maps. See our full description of the genetic data collected as part of the WebQTL project.

    +

    These error-checked RI mapping data match theoretical expectations for RI strain sets. The cumulative adjusted length of the RI maps are approximately 1400 cM, a value that matches those of both MIT maps and Chromosome Committee Report maps. See our full description of the genetic data collected as part of the WebQTL project.


    -- cgit v1.2.3 From 2114ad9e84ad7778e048b52cf865b5f031ceab88 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 23 Apr 2021 17:27:31 +0300 Subject: Rename index_page_orig to index_page --- wqflask/wqflask/templates/index_page.html | 222 +++++++++------- wqflask/wqflask/templates/index_page_orig.html | 339 ------------------------- wqflask/wqflask/views.py | 2 +- 3 files changed, 136 insertions(+), 427 deletions(-) mode change 100644 => 100755 wqflask/wqflask/templates/index_page.html delete mode 100755 wqflask/wqflask/templates/index_page_orig.html diff --git a/wqflask/wqflask/templates/index_page.html b/wqflask/wqflask/templates/index_page.html old mode 100644 new mode 100755 index 31846f87..87cf1b45 --- a/wqflask/wqflask/templates/index_page.html +++ b/wqflask/wqflask/templates/index_page.html @@ -1,78 +1,73 @@ {% extends "base.html" %} {% block title %}GeneNetwork{% endblock %} +{% block css %} + +{% endblock %} {% block content %} - - - -
    +
    {{ flash_me() }} -
    +
    -
    +
    -
    - +
  • System Maintenance Code
  • + +
    + + + + -->
    -

    Websites affiliated with GeneNetwork

    - -

    GN1 Mirror and development sites

    - +

    GeneNetwork v2:

    + +

    GeneNetwork v1:

    -
    + + @@ -268,11 +303,7 @@ {%endblock%} {% block js %} - - - + + + + + + {% endblock %} diff --git a/wqflask/wqflask/templates/index_page_orig.html b/wqflask/wqflask/templates/index_page_orig.html deleted file mode 100755 index 87cf1b45..00000000 --- a/wqflask/wqflask/templates/index_page_orig.html +++ /dev/null @@ -1,339 +0,0 @@ -{% extends "base.html" %} -{% block title %}GeneNetwork{% endblock %} -{% block css %} - -{% endblock %} -{% block content %} - -
    - - {{ flash_me() }} - -
    - -
    - -
    - - -

    You can also use advanced commands. Copy these simple examples into the Get Any field for single term searches and Combined for searches with multiple terms:

    - -
      -
    • POSITION=(chr1 25 30) finds genes, markers, or transcripts on - chromosome 1 between 25 and 30 Mb.
    • - -
    • MEAN=(15 16) in the Combined field finds - highly expressed genes (15 to 16 log2 units)
    • - -
    • RANGE=(1.5 2.5) in the Any field finds traits with values with a specified fold-range (minimum = 1). - Useful for finding "housekeeping genes" (1.0 1.2) or highly variable molecular assays (10 100).
    • - -
    • LRS=(15 1000) or LOD=(2 8) finds all traits with peak LRS or LOD scores between lower and upper limits.
    • - -
    • LRS=(9 999 Chr4 122 155) finds all traits on Chr 4 from 122 and 155 Mb with LRS scores between 9 and 999.
    • - -
    • cisLRS=(15 1000 5) or cisLOD=(2 8 5) finds all cis eQTLs with peak LRS or LOD scores between lower and upper limits, - with an inclusion zone of 5 Mb around the parent gene.
    • - -
    • transLRS=(15 1000 5) or transLOD=(2 8 5) finds all trans eQTLs with peak LRS or LOD scores between lower and upper limits, - with an exclusion zone of 5 Mb around the parent gene. You can also add a fourth term specifying which chromosome you want the transLRS to be on - (for example transLRS=(15 1000 5 7) would find all trans eQTLs with peak LRS on chromosome 7 that is also a trans eQTL with exclusionary zone of 5Mb).
    • - -
    • POSITION=(Chr4 122 130) cisLRS=(9 999 10) - finds all traits on Chr 4 from 122 and 155 Mb with cisLRS scores - between 9 and 999 and an inclusion zone of 10 Mb.
    • - -
    • RIF=mitochondrial searches RNA databases for - GeneRIF links.
    • - -
    • WIKI=nicotine searches - GeneWiki for genes that you or other users have annotated - with the word nicotine.
    • - -
    • GO:0045202 searches for synapse-associated genes listed in the - - Gene Ontology.
    • - -
    • RIF=diabetes LRS=(9 999 Chr2 100 105) transLRS=(9 999 10) - finds diabetes-associated transcripts with peak - trans eQTLs on Chr 2 between 100 and 105 Mb with LRS - scores between 9 and 999.
    • -
    -
    -
    - -
    -
    - -
    -
    - -
    -
    - -
    - -
    - -

    GeneNetwork v2:

    - -

    GeneNetwork v1:

    -
      -
    • Main website at UTHSC
    • -
    • Time Machine: Full GN versions from 2009 to 2016 (mm9)
    • - Cloud (EC2) -
    - -
    - - -
    -
    -
    - -{%endblock%} - -{% block js %} - - - - - - - - - -{% endblock %} diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index c4b510d4..156b4772 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -156,7 +156,7 @@ def index_page(): import_collections = params['import_collections'] if import_collections == "true": g.user_session.import_traits_to_user(params['anon_id']) - return render_template("index_page_orig.html", version=GN_VERSION) + return render_template("index_page.html", version=GN_VERSION) @app.route("/tmp/") -- cgit v1.2.3 From 8bad987466203baed948ba761d248e24f04ca49c Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 23 Apr 2021 17:32:35 +0300 Subject: Rewrite For Loop in a more Pythonic way --- wqflask/wqflask/api/gen_menu.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/wqflask/wqflask/api/gen_menu.py b/wqflask/wqflask/api/gen_menu.py index 18afc5ad..eaddecd7 100644 --- a/wqflask/wqflask/api/gen_menu.py +++ b/wqflask/wqflask/api/gen_menu.py @@ -23,12 +23,7 @@ def get_species(): """Build species list""" results = g.db.execute( "SELECT Name, MenuName FROM Species ORDER BY OrderId").fetchall() - - species = [] - for result in results: - species.append([str(result[0]), str(result[1])]) - - return species + return [[name, menu_name] for name, menu_name in results] def get_groups(species): -- cgit v1.2.3 From d66a8d2ccbf10b53b31ef094835c5ba8ec672a68 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 23 Apr 2021 17:35:54 +0300 Subject: Apply PEP-8 --- wqflask/utility/helper_functions.py | 28 +++++++++++++++++----------- wqflask/wqflask/views.py | 11 +++++++++-- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 4ba92ed5..e0ae3414 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -4,19 +4,23 @@ from base.species import TheSpecies from utility import hmac -from flask import Flask, g +from flask import g import logging -logger = logging.getLogger(__name__ ) +logger = logging.getLogger(__name__) + def get_species_dataset_trait(self, start_vars): if "temp_trait" in list(start_vars.keys()): - if start_vars['temp_trait'] == "True": - self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group']) - else: - self.dataset = data_set.create_dataset(start_vars['dataset']) + if start_vars['temp_trait'] == "True": + self.dataset = data_set.create_dataset( + dataset_name="Temp", + dataset_type="Temp", + group_name=start_vars['group']) + else: + self.dataset = data_set.create_dataset(start_vars['dataset']) else: - self.dataset = data_set.create_dataset(start_vars['dataset']) + self.dataset = data_set.create_dataset(start_vars['dataset']) logger.debug("After creating dataset") self.species = TheSpecies(dataset=self.dataset) logger.debug("After creating species") @@ -35,15 +39,17 @@ def get_trait_db_obs(self, trait_db_list): for trait in trait_db_list: data, _separator, hmac_string = trait.rpartition(':') data = data.strip() - assert hmac_string==hmac.hmac_creation(data), "Data tampering?" + assert hmac_string == hmac.hmac_creation(data), "Data tampering?" trait_name, dataset_name = data.split(":") if dataset_name == "Temp": - dataset_ob = data_set.create_dataset(dataset_name=dataset_name, dataset_type="Temp", group_name=trait_name.split("_")[2]) + dataset_ob = data_set.create_dataset( + dataset_name=dataset_name, dataset_type="Temp", + group_name=trait_name.split("_")[2]) else: dataset_ob = data_set.create_dataset(dataset_name) trait_ob = create_trait(dataset=dataset_ob, - name=trait_name, - cellid=None) + name=trait_name, + cellid=None) if trait_ob: self.trait_list.append((trait_ob, dataset_ob)) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 156b4772..36033d80 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -334,26 +334,33 @@ def intro(): return render_template("docs.html", **doc.__dict__) - @app.route("/tutorials") def tutorials(): return render_template("tutorials.html") + @app.route("/credits") def credits(): return render_template("credits.html") + @app.route("/update_text", methods=('POST',)) def update_page(): update_text(request.form) doc = Docs(request.form['entry_type'], request.form) return render_template("docs.html", **doc.__dict__) + @app.route("/submit_trait") def submit_trait_form(): logger.info(request.url) species_and_groups = get_species_groups() - return render_template("submit_trait.html", **{'species_and_groups' : species_and_groups, 'gn_server_url' : GN_SERVER_URL, 'version' : GN_VERSION}) + return render_template( + "submit_trait.html", + **{'species_and_groups': species_and_groups, + 'gn_server_url': GN_SERVER_URL, + 'version': GN_VERSION}) + @app.route("/create_temp_trait", methods=('POST',)) def create_temp_trait(): -- cgit v1.2.3 From f97c298c1653c538be1753c87336f9f75c3a754c Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 23 Apr 2021 20:19:37 +0300 Subject: Remove stale comments --- wqflask/wqflask/templates/index_page.html | 77 ------------------------------- wqflask/wqflask/views.py | 7 +-- 2 files changed, 1 insertion(+), 83 deletions(-) diff --git a/wqflask/wqflask/templates/index_page.html b/wqflask/wqflask/templates/index_page.html index 87cf1b45..942776e7 100755 --- a/wqflask/wqflask/templates/index_page.html +++ b/wqflask/wqflask/templates/index_page.html @@ -65,12 +65,6 @@ - - - - - -
    @@ -116,12 +110,10 @@
    -
    - @@ -207,40 +199,10 @@ -
    - - diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 36033d80..baaece2f 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -496,17 +496,12 @@ def show_temp_trait_page(): def show_trait_page(): logger.info(request.url) template_vars = show_trait.ShowTrait(request.args) - #logger.info("js_data before dump:", template_vars.js_data) template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") - # Sorting the keys messes up the ordered dictionary, so don't do that - #sort_keys=True) - - #logger.info("js_data after dump:", template_vars.js_data) - #logger.info("show_trait template_vars:", pf(template_vars.__dict__)) return render_template("show_trait.html", **template_vars.__dict__) + @app.route("/heatmap", methods=('POST',)) def heatmap_page(): logger.info("In heatmap, request.form is:", pf(request.form)) -- cgit v1.2.3 From 1b0566d7c9779b979d20c350f66d5628fb55eba6 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Fri, 23 Apr 2021 23:22:46 +0300 Subject: debugging for fetching probe data --- wqflask/base/data_set.py | 51 ++++++++++++++++++++-- wqflask/wqflask/correlation/correlation_gn3_api.py | 2 +- wqflask/wqflask/views.py | 3 ++ 3 files changed, 51 insertions(+), 5 deletions(-) diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 178234fe..468c4da0 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -115,7 +115,8 @@ Publish or ProbeSet. E.g. except: pass - self.redis_instance.set("dataset_structure", json.dumps(self.datasets)) + self.redis_instance.set( + "dataset_structure", json.dumps(self.datasets)) def set_dataset_key(self, t, name): """If name is not in the object's dataset dictionary, set it, and update @@ -154,10 +155,12 @@ Publish or ProbeSet. E.g. if t in ['pheno', 'other_pheno']: group_name = name.replace("Publish", "") - results = g.db.execute(sql_query_mapping[t].format(group_name)).fetchone() + results = g.db.execute( + sql_query_mapping[t].format(group_name)).fetchone() if results: self.datasets[name] = dataset_name_mapping[t] - self.redis_instance.set("dataset_structure", json.dumps(self.datasets)) + self.redis_instance.set( + "dataset_structure", json.dumps(self.datasets)) return True return None @@ -169,7 +172,8 @@ Publish or ProbeSet. E.g. # This has side-effects, with the end result being a truth-y value if(self.set_dataset_key(t, name)): break - return self.datasets.get(name, None) # Return None if name has not been set + # Return None if name has not been set + return self.datasets.get(name, None) # Do the intensive work at startup one time only @@ -651,6 +655,43 @@ class DataSet(object): "Dataset {} is not yet available in GeneNetwork.".format(self.name)) pass + def fetch_probe_trait_data(self, sample_list=None): + if sample_list: + self.samplelist = sample_list + else: + self.samplelist = self.group.samplelist + + if self.group.parlist != None and self.group.f1list != None: + if (self.group.parlist + self.group.f1list) in self.samplelist: + self.samplelist += self.group.parlist + self.group.f1list + + query = """ + SELECT Strain.Name, Strain.Id FROM Strain, Species + WHERE Strain.Name IN {} + and Strain.SpeciesId=Species.Id + and Species.name = '{}' + """.format(create_in_clause(self.samplelist), *mescape(self.group.species)) + logger.sql(query) + results = dict(g.db.execute(query).fetchall()) + sample_ids = [results[item] for item in self.samplelist] + + query = """SELECT * from ProbeSetData WHERE Id in ( SELECT ProbeSetXRef.DataId FROM (ProbeSet, ProbeSetXRef, ProbeSetFreeze) WHERE ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id and ProbeSetFreeze.Name = 'HC_M2_0606_P' and ProbeSet.Id = ProbeSetXRef.ProbeSetId order by ProbeSet.Id ) and StrainId in ({})""".format( + ",".join(str(sample_id) for sample_id in sample_ids)) + + results = g.db.execute(query).fetchall() + + # with conn: + # cursor = conn.cursor() + # cursor.execute(query) + # results = cursor.fetchall() + trait_data = {} + for trait_id, StrainId, value in results: + if trait_id in trait_data: + trait_data[trait_id].append(value) + else: + trait_data[trait_id] = [value] + self.trait_data = trait_data + def get_trait_data(self, sample_list=None): if sample_list: self.samplelist = sample_list @@ -670,6 +711,7 @@ class DataSet(object): logger.sql(query) results = dict(g.db.execute(query).fetchall()) sample_ids = [results[item] for item in self.samplelist] + print("the number of sample ids are", len(sample_ids)) # MySQL limits the number of tables that can be used in a join to 61, # so we break the sample ids into smaller chunks @@ -720,6 +762,7 @@ class DataSet(object): trait_sample_data.append(results) trait_count = len(trait_sample_data[0]) + print("the trait count is >>>", trait_count) self.trait_data = collections.defaultdict(list) # put all of the separate data together into a dictionary where the keys are diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index e7394647..51bf5fb5 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -78,7 +78,7 @@ def compute_correlation(start_vars, method="pearson"): # } sample_data = process_samples( start_vars, this_dataset.group.samplelist) - target_dataset.get_trait_data(list(sample_data.keys())) + target_dataset.fetch_probe_trait_data(list(sample_data.keys())) this_trait = retrieve_sample_data(this_trait, this_dataset) print("Creating dataset and trait took", time.time()-initial_time) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 072db466..2c239425 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -881,7 +881,10 @@ def network_graph_page(): def corr_compute_page(): logger.info("In corr_compute, request.form is:", pf(request.form)) logger.info(request.url) + import time + initial_time = time.time() correlation_results = compute_correlation(request.form) + print(">>>>Time taken by this endpoint",time.time()-initial_time) return render_template("demo_correlation_page.html",correlation_results=correlation_results[1:20]) @app.route("/corr_matrix", methods=('POST',)) -- cgit v1.2.3 From 7556f8a5dfc4c98bc0f0c8241592acec22b65102 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Mon, 26 Apr 2021 15:42:07 +0300 Subject: test for probe-type sample and tissue --- wqflask/wqflask/correlation/correlation_gn3_api.py | 71 +++++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index 51bf5fb5..c945f699 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -52,8 +52,64 @@ def create_target_this_trait(start_vars): return (this_dataset, this_trait, target_dataset, sample_data) +def sample_for_trait_lists(corr_results, target_dataset, this_trait, this_dataset, start_vars): + sample_data = process_samples( + start_vars, this_dataset.group.samplelist) + target_dataset.get_trait_data(list(sample_data.keys())) + + this_trait = retrieve_sample_data(this_trait, this_dataset) + + this_trait_data = { + "trait_sample_data": sample_data, + "trait_id": start_vars["trait_id"] + } + # trait_lists = dict([(list(corr_result)[0],True) for corr_result in corr_results]) + # target_dataset.trait_data =list(filter(lambda dict_obj: dict_obj.keys()[ + # 0] in corr_results_traits, target_dataset_data)) + results = map_shared_keys_to_values( + target_dataset.samplelist, target_dataset.trait_data) + correlation_results = compute_all_sample_correlation(corr_method="pearson", + this_trait=this_trait_data, + target_dataset=results) + + + return correlation_results + + +def tissue_for_trait_lists(corr_results, this_dataset, target_dataset, this_trait): + # # print(corr_results[0])-- + # [{"awsdsd_at": {'corr_coeffient': 0.49714692782257336, 'p_value': 1.872077762359228e-05, 'num_overlap': 67}}] + + print("creating trait_lists") + # corr_results = corr_results[0::] + trait_lists = dict([(list(corr_result)[0], True) + for corr_result in corr_results]) + print("finished creating trait_list") + + traits_symbol_dict = this_dataset.retrieve_genes("Symbol") + print("Retrieved symbol dict") + print("creating dict here>>>>>>>>>") + import time + init_time = time.time() + traits_symbol_dict = dict({trait_name: symbol for ( + trait_name, symbol) in traits_symbol_dict.items() if trait_lists.get(trait_name)}) + print("time taken to create this max dict is>>>>", time.time()-init_time) + print("finished creatinf the dict") + print("Fetching tissue datas") + primary_tissue_data, target_tissue_data = get_tissue_correlation_input( + this_trait, traits_symbol_dict) + print("finihsed>>>>>>>>>>>>>>>>>>") + print("Calling experimental_compute_all_tissue_correlation") + corr_results = experimental_compute_all_tissue_correlation( + primary_tissue_dict=primary_tissue_data, target_tissues_data=target_tissue_data, corr_method="pearson") + # print('finished calling this tissue reuslts',corr_results) + + return corr_results + + def compute_correlation(start_vars, method="pearson"): """compute correlation for to call gn3 api""" + import time corr_type = start_vars['corr_type'] @@ -67,6 +123,7 @@ def compute_correlation(start_vars, method="pearson"): corr_input_data = {} if corr_type == "sample": + import time initial_time = time.time() # corr_input_data = { # "target_dataset": target_dataset.trait_data, @@ -78,7 +135,7 @@ def compute_correlation(start_vars, method="pearson"): # } sample_data = process_samples( start_vars, this_dataset.group.samplelist) - target_dataset.fetch_probe_trait_data(list(sample_data.keys())) + target_dataset.get_trait_data(list(sample_data.keys())) this_trait = retrieve_sample_data(this_trait, this_dataset) print("Creating dataset and trait took", time.time()-initial_time) @@ -94,8 +151,15 @@ def compute_correlation(start_vars, method="pearson"): this_trait=this_trait_data, target_dataset=results) + print("computedd>>>>>>>>>>>>>") + print("doing sample correlation took", time.time()-initial_time) + other_results_time = time.time() + other_results = tissue_for_trait_lists( + correlation_results, this_dataset, target_dataset, this_trait) + print(">>>time taken for this is", time.time()-other_results_time) + # requests_url = f"{GN3_CORRELATION_API}/sample_x/{method}" return correlation_results @@ -121,6 +185,9 @@ def compute_correlation(start_vars, method="pearson"): # print("time taken for compute tissue is", time.time()-initial_time) # requests_url = f"{GN3_CORRELATION_API}/tissue_corr/{method}" + + sample_results = sample_for_trait_lists( + correlation_results, target_dataset, this_trait, this_dataset, start_vars) return correlation_results elif corr_type == "lit": @@ -148,6 +215,8 @@ def compute_correlation(start_vars, method="pearson"): def do_lit_correlation(this_trait, this_dataset, target_dataset): geneid_dict = this_dataset.retrieve_genes("GeneId") + # + print("CALLING THE LIT CORRELATION HERE") species = this_dataset.group.species.lower() this_trait_geneid = this_trait.geneid -- cgit v1.2.3 From 067d27460965aaf1ceaa863a315a0c7dbc47ae02 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Mon, 26 Apr 2021 17:05:06 +0300 Subject: fix:remove debug statements and commented code --- wqflask/base/mrna_assay_tissue_data.py | 25 --------- wqflask/wqflask/correlation/correlation_gn3_api.py | 60 +++------------------- 2 files changed, 8 insertions(+), 77 deletions(-) diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py index 0220d73b..5a64afb2 100644 --- a/wqflask/base/mrna_assay_tissue_data.py +++ b/wqflask/base/mrna_assay_tissue_data.py @@ -52,33 +52,9 @@ class MrnaAssayTissueData(object): # lower_symbols[gene_symbol.lower()] = True if gene_symbol != None: lower_symbols[gene_symbol.lower()] = True - - import time - # initial_time = time.time() - # conn,cursor = database_connector() - # cursor.execute(query) - # for result in cursor.fetchall(): - # symbol = result[0] - # self.data[symbol].gene_id = result[1] - # self.data[symbol].data_id = result[2] - # self.data[symbol].chr = result[3] - # self.data[symbol].mb = result[4] - # self.data[symbol].description = result[5] - # self.data[symbol].probe_target_description = result[6] - - - # print("my loop takes>>>>",time.time()-initial_time) - # conn.close() - # r - - # takes 5 seconds - initial_time = time.time() results = list(g.db.execute(query).fetchall()) for result in results: symbol = result[0] - # if symbol is not None - # exists = lower_symbols.get(symbol.lower()) - # if symbol.lower() in lower_symbols: if symbol is not None and lower_symbols.get(symbol.lower()): symbol = symbol.lower() @@ -89,7 +65,6 @@ class MrnaAssayTissueData(object): self.data[symbol].mb = result.Mb self.data[symbol].description = result.description self.data[symbol].probe_target_description = result.Probe_Target_Description - print("time taken in the loop is",time.time()-initial_time) ########################################################################### #Input: cursor, symbolList (list), dataIdDict(Dict) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index c945f699..3c21a850 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -63,9 +63,6 @@ def sample_for_trait_lists(corr_results, target_dataset, this_trait, this_datase "trait_sample_data": sample_data, "trait_id": start_vars["trait_id"] } - # trait_lists = dict([(list(corr_result)[0],True) for corr_result in corr_results]) - # target_dataset.trait_data =list(filter(lambda dict_obj: dict_obj.keys()[ - # 0] in corr_results_traits, target_dataset_data)) results = map_shared_keys_to_values( target_dataset.samplelist, target_dataset.trait_data) correlation_results = compute_all_sample_correlation(corr_method="pearson", @@ -77,33 +74,15 @@ def sample_for_trait_lists(corr_results, target_dataset, this_trait, this_datase def tissue_for_trait_lists(corr_results, this_dataset, target_dataset, this_trait): - # # print(corr_results[0])-- - # [{"awsdsd_at": {'corr_coeffient': 0.49714692782257336, 'p_value': 1.872077762359228e-05, 'num_overlap': 67}}] - - print("creating trait_lists") - # corr_results = corr_results[0::] trait_lists = dict([(list(corr_result)[0], True) for corr_result in corr_results]) - print("finished creating trait_list") - traits_symbol_dict = this_dataset.retrieve_genes("Symbol") - print("Retrieved symbol dict") - print("creating dict here>>>>>>>>>") - import time - init_time = time.time() traits_symbol_dict = dict({trait_name: symbol for ( trait_name, symbol) in traits_symbol_dict.items() if trait_lists.get(trait_name)}) - print("time taken to create this max dict is>>>>", time.time()-init_time) - print("finished creatinf the dict") - print("Fetching tissue datas") primary_tissue_data, target_tissue_data = get_tissue_correlation_input( this_trait, traits_symbol_dict) - print("finihsed>>>>>>>>>>>>>>>>>>") - print("Calling experimental_compute_all_tissue_correlation") corr_results = experimental_compute_all_tissue_correlation( primary_tissue_dict=primary_tissue_data, target_tissues_data=target_tissue_data, corr_method="pearson") - # print('finished calling this tissue reuslts',corr_results) - return corr_results @@ -123,22 +102,14 @@ def compute_correlation(start_vars, method="pearson"): corr_input_data = {} if corr_type == "sample": - import time - initial_time = time.time() - # corr_input_data = { - # "target_dataset": target_dataset.trait_data, - # "target_samplelist": target_dataset.samplelist, - # "trait_data": { - # "trait_sample_data": sample_data, - # "trait_id": start_vars["trait_id"] - # } - # } + sample_data = process_samples( start_vars, this_dataset.group.samplelist) + initial_time = time.time() target_dataset.get_trait_data(list(sample_data.keys())) this_trait = retrieve_sample_data(this_trait, this_dataset) + print("Creating target dataset and trait took", time.time()-initial_time) - print("Creating dataset and trait took", time.time()-initial_time) this_trait_data = { "trait_sample_data": sample_data, @@ -151,15 +122,9 @@ def compute_correlation(start_vars, method="pearson"): this_trait=this_trait_data, target_dataset=results) - print("computedd>>>>>>>>>>>>>") - print("doing sample correlation took", time.time()-initial_time) - - other_results_time = time.time() - other_results = tissue_for_trait_lists( - correlation_results, this_dataset, target_dataset, this_trait) - print(">>>time taken for this is", time.time()-other_results_time) - + # other_results = tissue_for_trait_lists( + # correlation_results, this_dataset, target_dataset, this_trait) # requests_url = f"{GN3_CORRELATION_API}/sample_x/{method}" return correlation_results @@ -177,17 +142,9 @@ def compute_correlation(start_vars, method="pearson"): target_tissues_data=corr_input_data[ "target_tissues_dict"], corr_method=method) - print("correlation y took", time.time()-initial_time) - # initial_time = time.time() - # correlation_results = compute_all_tissue_correlation(primary_tissue_dict=corr_input_data["primary_tissue"], - # target_tissues_data=corr_input_data["target_tissues_dict"], - # corr_method=method) - # print("time taken for compute tissue is", time.time()-initial_time) - - # requests_url = f"{GN3_CORRELATION_API}/tissue_corr/{method}" - - sample_results = sample_for_trait_lists( - correlation_results, target_dataset, this_trait, this_dataset, start_vars) + print("computing tissue took >>>>", time.time()-initial_time) + # sample_results = sample_for_trait_lists( + # correlation_results, target_dataset, this_trait, this_dataset, start_vars) return correlation_results elif corr_type == "lit": @@ -203,7 +160,6 @@ def compute_correlation(start_vars, method="pearson"): species=species, gene_id=this_trait_geneid) return lit_corr_results - print("the time taken is", time.time()-initial_time) # requests_url = f"{GN3_CORRELATION_API}/lit_corr/{species}/{this_trait_geneid}" # corr_input_data = geneid_dict # corr_results = requests.post(requests_url, json=corr_input_data) -- cgit v1.2.3 From 243f884b0f7e5e42aaab7d7c8076479818be8578 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 27 Apr 2021 14:35:37 +0300 Subject: templates: submit_trait.html: Add field for inputting trait name --- wqflask/wqflask/templates/submit_trait.html | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/submit_trait.html b/wqflask/wqflask/templates/submit_trait.html index 2cc18240..3572b0a9 100644 --- a/wqflask/wqflask/templates/submit_trait.html +++ b/wqflask/wqflask/templates/submit_trait.html @@ -53,7 +53,7 @@ -
    +

    2. Enter Trait Data:

    File uploading isn't enabled yet, but is coming soon.


    @@ -77,6 +77,24 @@
    +
    +

    3. Enable Use of Trait Variance:

    +
    + +
    +
    +
    +

    + Name Your Trait: (optional) +

    + +
    +
    +
    + + +
    +
    -- cgit v1.2.3 From 75bcab50abcf29c8a5f98a9f17b37457ac433d3b Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 27 Apr 2021 21:37:12 +0000 Subject: Stopped using the scanone function pointer when doing R/qtl mapping, since the results are not converted into a Python object in a way that preserves marker names (which is important because pseudomarkers can be added) Instead the marker names are extracted from the scanone results using R immediately after they're generated, and then passed to process_rqtl_results --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 39 ++++++++++++----------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 4117a0e5..f593ae91 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -78,6 +78,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec cross_object = calc_genoprob(cross_object) else: cross_object = calc_genoprob(cross_object, step=5, stepwidth="max") + logger.info("after calc_genoprob"); pheno_string = sanitize_rqtl_phenotype(vals) logger.info("phenostring done"); @@ -110,11 +111,13 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec return process_pair_scan_results(result_data_frame) else: if do_control == "true" or cofactors != "": - logger.info("Using covariate"); result_data_frame = scanone(cross_object, pheno = "the_pheno", addcovar = covars, model=model, method=method) - ro.r('save.image(file = "/home/zas1024/gn2-zach/itp_cofactor_test.RData")') + ro.r(f"qtl_results = scanone(the_cross, pheno='the_pheno', addcovar=all_covars, model='{model}', method='{method}')") + result_data_frame = ro.r("qtl_results") else: - logger.info("No covariates"); result_data_frame = scanone(cross_object, pheno = "the_pheno", model=model, method=method) + ro.r(f"qtl_results = scanone(the_cross, pheno='the_pheno', model='{model}', method='{method}')") + result_data_frame = ro.r("qtl_results") + marker_names = np.asarray(ro.r("row.names(qtl_results)")) if num_perm > 0 and permCheck == "ON": # Do permutation (if requested by user) if len(perm_strata_list) > 0: #ZS: The strata list would only be populated if "Stratified" was checked on before mapping cross_object, strata_ob = add_perm_strata(cross_object, perm_strata_list) @@ -129,9 +132,10 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", n_perm = num_perm, model=model, method=method) perm_output, suggestive, significant = process_rqtl_perm_results(num_perm, perm_data_frame) # Functions that sets the thresholds for the webinterface - return perm_output, suggestive, significant, process_rqtl_results(result_data_frame, dataset.group.species) + + return perm_output, suggestive, significant, process_rqtl_results(marker_names, result_data_frame, dataset.group.species) else: - return process_rqtl_results(result_data_frame, dataset.group.species) + return process_rqtl_results(marker_names, result_data_frame, dataset.group.species) def generate_cross_from_rdata(dataset): rdata_location = locate(dataset.group.name + ".RData", "genotype/rdata") @@ -144,7 +148,6 @@ def generate_cross_from_rdata(dataset): """ % (rdata_location)) def generate_cross_from_geno(dataset, scale_units): # TODO: Need to figure out why some genofiles have the wrong format and don't convert properly - ro.r(""" trim <- function( x ) { gsub("(^[[:space:]]+|[[:space:]]+$)", "", x) } getGenoCode <- function(header, name = 'unk'){ @@ -376,9 +379,8 @@ def process_pair_scan_results(result): def process_rqtl_perm_results(num_perm, results): perm_vals = [] - for line in str(results).split("\n")[1:(num_perm+1)]: - #print("R/qtl permutation line:", line.split()) - perm_vals.append(float(line.split()[1])) + for item in results: + perm_vals.append(item) perm_output = perm_vals suggestive = np.percentile(np.array(perm_vals), 67) @@ -386,20 +388,21 @@ def process_rqtl_perm_results(num_perm, results): return perm_output, suggestive, significant -def process_rqtl_results(result, species_name): # TODO: how to make this a one liner and not copy the stuff in a loop +def process_rqtl_results(marker_names, results, species_name): # TODO: how to make this a one liner and not copy the stuff in a loop qtl_results = [] - output = [tuple([result[j][i] for j in range(result.ncol)]) for i in range(result.nrow)] - for i, line in enumerate(result.iter_row()): + for i, line in enumerate(results): marker = {} - marker['name'] = result.rownames[i] - if species_name == "mouse" and output[i][0] == 20: #ZS: This is awkward, but I'm not sure how to change the 20s to Xs in the RData file + marker['name'] = marker_names[i] + if species_name == "mouse" and line[0] == 20: marker['chr'] = "X" else: - marker['chr'] = output[i][0] - marker['cM'] = output[i][1] - marker['Mb'] = output[i][1] - marker['lod_score'] = output[i][2] + try: + marker['chr'] = int(line[0]) + except: + marker['chr'] = line[0] + marker['cM'] = marker['Mb'] = line[1] + marker['lod_score'] = line[2] qtl_results.append(marker) return qtl_results \ No newline at end of file -- cgit v1.2.3 From 1845086220e427ed38d1bb2b216dc53de3760d64 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 27 Apr 2021 22:24:11 +0000 Subject: Randomized cross object filename, since I think it throws an error if the same file is being written to simultaneously (or being written to while it's being read) --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index f593ae91..c70fbbec 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -10,6 +10,7 @@ from base.trait import create_trait from base.data_set import create_dataset from utility import webqtlUtil from utility.tools import locate, TEMPDIR +from wqflask.marker_regression.gemma_mapping import generate_random_n_string from flask import g import utility.logger @@ -148,13 +149,17 @@ def generate_cross_from_rdata(dataset): """ % (rdata_location)) def generate_cross_from_geno(dataset, scale_units): # TODO: Need to figure out why some genofiles have the wrong format and don't convert properly + + cross_filename = (f"{str(dataset.group.name)}_" + f"{generate_random_n_string(6)}") + ro.r(""" trim <- function( x ) { gsub("(^[[:space:]]+|[[:space:]]+$)", "", x) } getGenoCode <- function(header, name = 'unk'){ mat = which(unlist(lapply(header,function(x){ length(grep(paste('@',name,sep=''), x)) })) == 1) return(trim(strsplit(header[mat],':')[[1]][2])) } - GENOtoCSVR <- function(genotypes = '%s', out = 'cross.csvr', phenotype = NULL, sex = NULL, verbose = FALSE){ + GENOtoCSVR <- function(genotypes = '%s', out = '%s.csvr', phenotype = NULL, sex = NULL, verbose = FALSE){ header = readLines(genotypes, 40) # Assume a geno header is not longer than 40 lines toskip = which(unlist(lapply(header, function(x){ length(grep("Chr\t", x)) })) == 1)-1 # Major hack to skip the geno headers type <- getGenoCode(header, 'type') @@ -188,7 +193,7 @@ def generate_cross_from_geno(dataset, scale_units): # TODO: Need to figur } return(cross) } - """ % (dataset.group.genofile, scale_units)) + """ % (dataset.group.genofile, cross_filename, scale_units)) def add_perm_strata(cross, perm_strata): col_string = 'c("the_strata")' -- cgit v1.2.3 From 63ca33f75fcb67ba8c3851c60088b62becb5f735 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 27 Apr 2021 22:25:44 +0000 Subject: Convert to array and transpose R/qtl scanone results when not using cofactors. For some reason the rows/columns are inverted when converted to a python object when doing scanone with cofactors vs without cofactors --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index c70fbbec..cd43577e 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -116,7 +116,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec result_data_frame = ro.r("qtl_results") else: ro.r(f"qtl_results = scanone(the_cross, pheno='the_pheno', model='{model}', method='{method}')") - result_data_frame = ro.r("qtl_results") + result_data_frame = np.asarray(ro.r("qtl_results")).T marker_names = np.asarray(ro.r("row.names(qtl_results)")) if num_perm > 0 and permCheck == "ON": # Do permutation (if requested by user) -- cgit v1.2.3 From ac9be3f74e005e95a057f2c49baa7822d05f1ece Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Wed, 28 Apr 2021 08:46:53 +0300 Subject: minor fixes for correlation --- bin/genenetwork2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/genenetwork2 b/bin/genenetwork2 index 917d6549..f73f235c 100755 --- a/bin/genenetwork2 +++ b/bin/genenetwork2 @@ -155,7 +155,7 @@ fi # We may change this one: # export PYTHONPATH=$PYTHON_GN_PATH:$GN2_BASE_DIR/wqflask:$PYTHONPATH -PYTHONPATH=$PYTHON_GN_PATH:$GN2_BASE_DIR/wqflask:$HOME/genenetwork3:$PYTHONPATH +PYTHONPATH=$PYTHON_GN_PATH:$GN2_BASE_DIR/wqflask:$HOME/project/genenetwork3:$PYTHONPATH # Our UNIX TMPDIR defaults to /tmp - change this on a shared server if [ -z $TMPDIR ]; then -- cgit v1.2.3 From f799477b49ef8c2bd8d8408b0c1b1ee6094cb8ca Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 28 Apr 2021 20:53:13 +0300 Subject: wqflask: views.py: Clean up imports section Remove unused imports and break up long imports into shorter lines. --- wqflask/wqflask/views.py | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index baaece2f..91fb8df2 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -1,20 +1,17 @@ """Main routing table for GN2""" -import traceback # for error page +import traceback # for error page import os # for error gifs import random # for random error gif import datetime # for errors -import time # for errors import sys import csv import simplejson as json -import yaml import xlsxwriter import io # Todo: Use cStringIO? from zipfile import ZipFile, ZIP_DEFLATED -import gc import numpy as np import pickle as pickle import uuid @@ -24,23 +21,27 @@ import base64 import array import sqlalchemy from wqflask import app -from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for, send_file +from flask import g +from flask import Response +from flask import request +from flask import make_response +from flask import render_template +from flask import send_from_directory +from flask import redirect +from flask import url_for +from flask import send_file -from wqflask import group_manager -from wqflask import resource_manager from wqflask import search_results -from wqflask import export_traits -from wqflask import gsearch -from wqflask import update_search_results -from wqflask import docs from wqflask import news from wqflask import server_side from wqflask.submit_bnw import get_bnw_input -from base.data_set import create_dataset, DataSet # Used by YAML in marker_regression +from base.data_set import create_dataset # Used by YAML in marker_regression from wqflask.show_trait import show_trait from wqflask.show_trait import export_trait_data from wqflask.heatmap import heatmap -from wqflask.external_tools import send_to_bnw, send_to_webgestalt, send_to_geneweaver +from wqflask.external_tools import send_to_bnw +from wqflask.external_tools import send_to_webgestalt +from wqflask.external_tools import send_to_geneweaver from wqflask.comparison_bar_chart import comparison_bar_chart from wqflask.marker_regression import run_mapping from wqflask.marker_regression import display_mapping_results @@ -59,24 +60,31 @@ from wqflask.docs import Docs, update_text from wqflask.db_info import InfoPage from utility import temp_data -from utility.tools import SQL_URI, TEMPDIR, USE_REDIS, USE_GN_SERVER, GN_SERVER_URL, GN_VERSION, JS_TWITTER_POST_FETCHER_PATH, JS_GUIX_PATH, CSS_PATH +from utility.tools import SQL_URI +from utility.tools import TEMPDIR +from utility.tools import USE_REDIS +from utility.tools import GN_SERVER_URL +from utility.tools import GN_VERSION +from utility.tools import JS_TWITTER_POST_FETCHER_PATH +from utility.tools import JS_GUIX_PATH from utility.helper_functions import get_species_groups from utility.authentication_tools import check_resource_availability from utility.redis_tools import get_redis_conn -Redis = get_redis_conn() + from base.webqtlConfig import GENERATED_IMAGE_DIR, DEFAULT_PRIVILEGES from utility.benchmark import Bench from pprint import pformat as pf -from wqflask import collect from wqflask.database import db_session -import werkzeug import utility.logger -logger = utility.logger.getLogger(__name__ ) + +Redis = get_redis_conn() + +logger = utility.logger.getLogger(__name__) @app.before_request -- cgit v1.2.3 From 5ae817e71a7232a747c988cd9f6abfa25a35971c Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 28 Apr 2021 21:17:01 +0300 Subject: wqflask: views: Remove news import This name clashes with "def news" which is defined later. --- wqflask/wqflask/views.py | 1 - 1 file changed, 1 deletion(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 91fb8df2..3aafd7d3 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -32,7 +32,6 @@ from flask import url_for from flask import send_file from wqflask import search_results -from wqflask import news from wqflask import server_side from wqflask.submit_bnw import get_bnw_input from base.data_set import create_dataset # Used by YAML in marker_regression -- cgit v1.2.3 From ed434e8fba9f807d94892d06d6191f5de6670bd9 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 28 Apr 2021 21:20:32 +0300 Subject: Remove local variables that are unused --- wqflask/wqflask/views.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 3aafd7d3..b1c2ed68 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -97,7 +97,6 @@ def connect_db(): @app.before_request def check_access_permissions(): logger.debug("@app.before_request check_access_permissions") - available = True if 'dataset' in request.args: permissions = DEFAULT_PRIVILEGES if request.args['dataset'] != "Temp": @@ -656,7 +655,6 @@ def loading_page(): dataset = create_dataset(start_vars['dataset'], group_name = start_vars['group']) else: dataset = create_dataset(start_vars['dataset']) - genofile_samplelist = [] samples = start_vars['primary_samples'].split(",") if 'genofile' in start_vars: if start_vars['genofile'] != "": @@ -846,7 +844,6 @@ def export_pdf(): svg_xml = request.form.get("data", "Invalid data") logger.info("svg_xml:", svg_xml) filename = request.form.get("filename", "interval_map_pdf") - filepath = GENERATED_IMAGE_DIR+filename pdf_file = cairosvg.svg2pdf(bytestring=svg_xml) response = Response(pdf_file, mimetype="application/pdf") response.headers["Content-Disposition"] = "attachment; filename=%s"%filename @@ -942,7 +939,6 @@ def security_tutorial_page(): @app.route("/submit_bnw", methods=('POST',)) def submit_bnw(): logger.info(request.url) - template_vars = get_bnw_input(request.form) return render_template("empty_collection.html", **{'tool':'Correlation Matrix'}) # Take this out or secure it before putting into production -- cgit v1.2.3 From 41791eb9eb809c11b5bd953235ae98c4b4d82156 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 28 Apr 2021 21:20:54 +0300 Subject: Remove stale comments --- wqflask/wqflask/views.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index b1c2ed68..31ba8df0 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -972,9 +972,6 @@ def json_default_handler(obj): # Handle custom objects if hasattr(obj, '__dict__'): return obj.__dict__ - #elif type(obj) == "Dataset": - # logger.info("Not going to serialize Dataset") - # return None else: raise TypeError('Object of type %s with value of %s is not JSON serializable' % ( type(obj), repr(obj))) -- cgit v1.2.3 From ca1bc2a22f93591ac9c120bca1ba554ae891c9ab Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 28 Apr 2021 21:21:03 +0300 Subject: Use right block level comments --- wqflask/wqflask/views.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 31ba8df0..a33c64f1 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -962,7 +962,7 @@ def browser_inputs(): ########################################################################## def json_default_handler(obj): - '''Based on http://stackoverflow.com/a/2680060/1175849''' + """Based on http://stackoverflow.com/a/2680060/1175849""" # Handle datestamps if hasattr(obj, 'isoformat'): return obj.isoformat() -- cgit v1.2.3 From f23a198951e3aa3040b6ae8c28aba0fd34514ebd Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 28 Apr 2021 21:24:04 +0300 Subject: wqflask: views: Apply pep-8 --- wqflask/wqflask/views.py | 222 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 154 insertions(+), 68 deletions(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index a33c64f1..319f1270 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -91,9 +91,11 @@ def connect_db(): logger.info("@app.before_request connect_db") db = getattr(g, '_database', None) if db is None: - g.db = g._database = sqlalchemy.create_engine(SQL_URI, encoding="latin1") + g.db = g._database = sqlalchemy.create_engine( + SQL_URI, encoding="latin1") logger.debug(g.db) + @app.before_request def check_access_permissions(): logger.debug("@app.before_request check_access_permissions") @@ -105,7 +107,8 @@ def check_access_permissions(): if dataset.type == "Temp": permissions = DEFAULT_PRIVILEGES elif 'trait_id' in request.args: - permissions = check_resource_availability(dataset, request.args['trait_id']) + permissions = check_resource_availability( + dataset, request.args['trait_id']) elif dataset.type != "Publish": permissions = check_resource_availability(dataset) @@ -116,6 +119,7 @@ def check_access_permissions(): if permissions['data'] == 'no-access': return redirect(url_for("no_access_page")) + @app.teardown_appcontext def shutdown_session(exception=None): db = getattr(g, '_database', None) @@ -124,6 +128,7 @@ def shutdown_session(exception=None): db_session.remove() g.db = None + @app.errorhandler(Exception) def handle_bad_request(e): err_msg = str(e) @@ -134,25 +139,30 @@ def handle_bad_request(e): logger.error(traceback.format_exc()) now = datetime.datetime.utcnow() time_str = now.strftime('%l:%M%p UTC %b %d, %Y') - formatted_lines = [request.url + " ("+time_str+")"]+traceback.format_exc().splitlines() + formatted_lines = [request.url + + " ("+time_str+")"]+traceback.format_exc().splitlines() # Handle random animations # Use a cookie to have one animation on refresh animation = request.cookies.get(err_msg[:32]) if not animation: - list = [fn for fn in os.listdir("./wqflask/static/gif/error") if fn.endswith(".gif") ] + list = [fn for fn in os.listdir( + "./wqflask/static/gif/error") if fn.endswith(".gif")] animation = random.choice(list) - resp = make_response(render_template("error.html", message=err_msg, stack=formatted_lines, error_image=animation, version=GN_VERSION)) + resp = make_response(render_template("error.html", message=err_msg, + stack=formatted_lines, error_image=animation, version=GN_VERSION)) # logger.error("Set cookie %s with %s" % (err_msg, animation)) resp.set_cookie(err_msg[:32], animation) return resp + @app.route("/authentication_needed") def no_access_page(): return render_template("new_security/not_authenticated.html") + @app.route("/") def index_page(): logger.info("Sending index_page") @@ -177,7 +187,7 @@ def tmp_page(img_path): imgB64 = base64.b64encode(imgdata) bytesarray = array.array('B', imgB64) return render_template("show_image.html", - img_base64 = bytesarray ) + img_base64=bytesarray) @app.route("/js/") @@ -189,6 +199,7 @@ def js(filename): name = name.replace('js_alt/', '') return send_from_directory(js_path, name) + @app.route("/css/") def css(filename): js_path = JS_GUIX_PATH @@ -198,10 +209,12 @@ def css(filename): name = name.replace('js_alt/', '') return send_from_directory(js_path, name) + @app.route("/twitter/") def twitter(filename): return send_from_directory(JS_TWITTER_POST_FETCHER_PATH, filename) + @app.route("/search", methods=('GET',)) def search_page(): logger.info("in search_page") @@ -209,7 +222,8 @@ def search_page(): result = None if USE_REDIS: with Bench("Trying Redis cache"): - key = "search_results:v1:" + json.dumps(request.args, sort_keys=True) + key = "search_results:v1:" + \ + json.dumps(request.args, sort_keys=True) logger.debug("key is:", pf(key)) result = Redis.get(key) if result: @@ -232,6 +246,7 @@ def search_page(): else: return render_template("search_error.html") + @app.route("/search_table", methods=('GET',)) def search_page_table(): logger.info("in search_page table") @@ -242,7 +257,7 @@ def search_page_table(): logger.info(type(the_search.trait_list)) logger.info(the_search.trait_list) - + current_page = server_side.ServerSideTable( len(the_search.trait_list), the_search.trait_list, @@ -252,6 +267,7 @@ def search_page_table(): return flask.jsonify(current_page) + @app.route("/gsearch", methods=('GET',)) def gsearchact(): logger.info(request.url) @@ -262,6 +278,7 @@ def gsearchact(): elif type == "phenotype": return render_template("gsearch_pheno.html", **result) + @app.route("/gsearch_updating", methods=('POST',)) def gsearch_updating(): logger.info("REQUEST ARGS:", request.values) @@ -292,41 +309,59 @@ def generated_file(filename): logger.info(request.url) return send_from_directory(GENERATED_IMAGE_DIR, filename) + @app.route("/help") def help(): logger.info(request.url) doc = Docs("help", request.args) return render_template("docs.html", **doc.__dict__) + @app.route("/wgcna_setup", methods=('POST',)) def wcgna_setup(): - logger.info("In wgcna, request.form is:", request.form) # We are going to get additional user input for the analysis + # We are going to get additional user input for the analysis + logger.info("In wgcna, request.form is:", request.form) logger.info(request.url) - return render_template("wgcna_setup.html", **request.form) # Display them using the template + # Display them using the template + return render_template("wgcna_setup.html", **request.form) + @app.route("/wgcna_results", methods=('POST',)) def wcgna_results(): logger.info("In wgcna, request.form is:", request.form) logger.info(request.url) - wgcna = wgcna_analysis.WGCNA() # Start R, load the package and pointers and create the analysis - wgcnaA = wgcna.run_analysis(request.form) # Start the analysis, a wgcnaA object should be a separate long running thread - result = wgcna.process_results(wgcnaA) # After the analysis is finished store the result - return render_template("wgcna_results.html", **result) # Display them using the template + # Start R, load the package and pointers and create the analysis + wgcna = wgcna_analysis.WGCNA() + # Start the analysis, a wgcnaA object should be a separate long running thread + wgcnaA = wgcna.run_analysis(request.form) + # After the analysis is finished store the result + result = wgcna.process_results(wgcnaA) + # Display them using the template + return render_template("wgcna_results.html", **result) + @app.route("/ctl_setup", methods=('POST',)) def ctl_setup(): - logger.info("In ctl, request.form is:", request.form) # We are going to get additional user input for the analysis + # We are going to get additional user input for the analysis + logger.info("In ctl, request.form is:", request.form) logger.info(request.url) - return render_template("ctl_setup.html", **request.form) # Display them using the template + # Display them using the template + return render_template("ctl_setup.html", **request.form) + @app.route("/ctl_results", methods=('POST',)) def ctl_results(): logger.info("In ctl, request.form is:", request.form) logger.info(request.url) - ctl = ctl_analysis.CTL() # Start R, load the package and pointers and create the analysis - ctlA = ctl.run_analysis(request.form) # Start the analysis, a ctlA object should be a separate long running thread - result = ctl.process_results(ctlA) # After the analysis is finished store the result - return render_template("ctl_results.html", **result) # Display them using the template + # Start R, load the package and pointers and create the analysis + ctl = ctl_analysis.CTL() + # Start the analysis, a ctlA object should be a separate long running thread + ctlA = ctl.run_analysis(request.form) + # After the analysis is finished store the result + result = ctl.process_results(ctlA) + # Display them using the template + return render_template("ctl_results.html", **result) + @app.route("/news") def news(): @@ -381,9 +416,11 @@ def export_trait_excel(): logger.info("In export_trait_excel") logger.info("request.form:", request.form) logger.info(request.url) - trait_name, sample_data = export_trait_data.export_sample_table(request.form) + trait_name, sample_data = export_trait_data.export_sample_table( + request.form) - logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data))) + logger.info("sample_data - type: %s -- size: %s" % + (type(sample_data), len(sample_data))) buff = io.BytesIO() workbook = xlsxwriter.Workbook(buff, {'in_memory': True}) @@ -397,7 +434,8 @@ def export_trait_excel(): return Response(excel_data, mimetype='application/vnd.ms-excel', - headers={"Content-Disposition":"attachment;filename="+ trait_name + ".xlsx"}) + headers={"Content-Disposition": "attachment;filename=" + trait_name + ".xlsx"}) + @app.route('/export_trait_csv', methods=('POST',)) def export_trait_csv(): @@ -405,9 +443,11 @@ def export_trait_csv(): logger.info("In export_trait_csv") logger.info("request.form:", request.form) logger.info(request.url) - trait_name, sample_data = export_trait_data.export_sample_table(request.form) + trait_name, sample_data = export_trait_data.export_sample_table( + request.form) - logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data))) + logger.info("sample_data - type: %s -- size: %s" % + (type(sample_data), len(sample_data))) buff = io.StringIO() writer = csv.writer(buff) @@ -418,7 +458,8 @@ def export_trait_csv(): return Response(csv_data, mimetype='text/csv', - headers={"Content-Disposition":"attachment;filename="+ trait_name + ".csv"}) + headers={"Content-Disposition": "attachment;filename=" + trait_name + ".csv"}) + @app.route('/export_traits_csv', methods=('POST',)) def export_traits_csv(): @@ -443,7 +484,8 @@ def export_traits_csv(): else: return Response(file_list[0][1], mimetype='text/csv', - headers={"Content-Disposition":"attachment;filename=" + file_list[0][0]}) + headers={"Content-Disposition": "attachment;filename=" + file_list[0][0]}) + @app.route('/export_perm_data', methods=('POST',)) def export_perm_data(): @@ -454,7 +496,8 @@ def export_perm_data(): now = datetime.datetime.now() time_str = now.strftime('%H:%M_%d%B%Y') - file_name = "Permutation_" + perm_info['num_perm'] + "_" + perm_info['trait_name'] + "_" + time_str + file_name = "Permutation_" + \ + perm_info['num_perm'] + "_" + perm_info['trait_name'] + "_" + time_str the_rows = [ ["#Permutation Test"], @@ -468,10 +511,14 @@ def export_perm_data(): ["#N_genotypes: " + str(perm_info['n_genotypes'])], ["#Genotype_file: " + perm_info['genofile']], ["#Units_linkage: " + perm_info['units_linkage']], - ["#Permutation_stratified_by: " + ", ".join([ str(cofactor) for cofactor in perm_info['strat_cofactors']])], - ["#RESULTS_1: Suggestive LRS(p=0.63) = " + str(np.percentile(np.array(perm_info['perm_data']), 67))], - ["#RESULTS_2: Significant LRS(p=0.05) = " + str(np.percentile(np.array(perm_info['perm_data']), 95))], - ["#RESULTS_3: Highly Significant LRS(p=0.01) = " + str(np.percentile(np.array(perm_info['perm_data']), 99))], + ["#Permutation_stratified_by: " + + ", ".join([str(cofactor) for cofactor in perm_info['strat_cofactors']])], + ["#RESULTS_1: Suggestive LRS(p=0.63) = " + + str(np.percentile(np.array(perm_info['perm_data']), 67))], + ["#RESULTS_2: Significant LRS(p=0.05) = " + str( + np.percentile(np.array(perm_info['perm_data']), 95))], + ["#RESULTS_3: Highly Significant LRS(p=0.01) = " + str( + np.percentile(np.array(perm_info['perm_data']), 99))], ["#Comment: Results sorted from low to high peak linkage"] ] @@ -485,7 +532,7 @@ def export_perm_data(): return Response(csv_data, mimetype='text/csv', - headers={"Content-Disposition":"attachment;filename=" + file_name + ".csv"}) + headers={"Content-Disposition": "attachment;filename=" + file_name + ".csv"}) @app.route("/show_temp_trait", methods=('POST',)) @@ -519,7 +566,8 @@ def heatmap_page(): traits = [trait.strip() for trait in start_vars['trait_list'].split(',')] if traits[0] != "": version = "v5" - key = "heatmap:{}:".format(version) + json.dumps(start_vars, sort_keys=True) + key = "heatmap:{}:".format( + version) + json.dumps(start_vars, sort_keys=True) logger.info("key is:", pf(key)) with Bench("Loading cache"): result = Redis.get(key) @@ -540,7 +588,8 @@ def heatmap_page(): result = template_vars.__dict__ for item in list(template_vars.__dict__.keys()): - logger.info(" ---**--- {}: {}".format(type(template_vars.__dict__[item]), item)) + logger.info( + " ---**--- {}: {}".format(type(template_vars.__dict__[item]), item)) pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL) logger.info("pickled result length:", len(pickled_result)) @@ -551,10 +600,12 @@ def heatmap_page(): rendered_template = render_template("heatmap.html", **result) else: - rendered_template = render_template("empty_collection.html", **{'tool':'Heatmap'}) + rendered_template = render_template( + "empty_collection.html", **{'tool': 'Heatmap'}) return rendered_template + @app.route("/bnw_page", methods=('POST',)) def bnw_page(): logger.info("In run BNW, request.form is:", pf(request.form)) @@ -569,10 +620,12 @@ def bnw_page(): result = template_vars.__dict__ rendered_template = render_template("bnw_page.html", **result) else: - rendered_template = render_template("empty_collection.html", **{'tool':'BNW'}) + rendered_template = render_template( + "empty_collection.html", **{'tool': 'BNW'}) return rendered_template + @app.route("/webgestalt_page", methods=('POST',)) def webgestalt_page(): logger.info("In run WebGestalt, request.form is:", pf(request.form)) @@ -587,10 +640,12 @@ def webgestalt_page(): result = template_vars.__dict__ rendered_template = render_template("webgestalt_page.html", **result) else: - rendered_template = render_template("empty_collection.html", **{'tool':'WebGestalt'}) + rendered_template = render_template( + "empty_collection.html", **{'tool': 'WebGestalt'}) return rendered_template + @app.route("/geneweaver_page", methods=('POST',)) def geneweaver_page(): logger.info("In run WebGestalt, request.form is:", pf(request.form)) @@ -605,10 +660,12 @@ def geneweaver_page(): result = template_vars.__dict__ rendered_template = render_template("geneweaver_page.html", **result) else: - rendered_template = render_template("empty_collection.html", **{'tool':'GeneWeaver'}) + rendered_template = render_template( + "empty_collection.html", **{'tool': 'GeneWeaver'}) return rendered_template + @app.route("/comparison_bar_chart", methods=('POST',)) def comp_bar_chart_page(): logger.info("In comp bar chart, request.form is:", pf(request.form)) @@ -620,26 +677,30 @@ def comp_bar_chart_page(): if traits[0] != "": template_vars = comparison_bar_chart.ComparisonBarChart(request.form) template_vars.js_data = json.dumps(template_vars.js_data, - default=json_default_handler, - indent=" ") + default=json_default_handler, + indent=" ") result = template_vars.__dict__ - rendered_template = render_template("comparison_bar_chart.html", **result) + rendered_template = render_template( + "comparison_bar_chart.html", **result) else: - rendered_template = render_template("empty_collection.html", **{'tool':'Comparison Bar Chart'}) + rendered_template = render_template( + "empty_collection.html", **{'tool': 'Comparison Bar Chart'}) return rendered_template + @app.route("/mapping_results_container") def mapping_results_container_page(): return render_template("mapping_results_container.html") + @app.route("/loading", methods=('POST',)) def loading_page(): logger.info(request.url) initial_start_vars = request.form start_vars_container = {} - n_samples = 0 #ZS: So it can be displayed on loading page + n_samples = 0 # ZS: So it can be displayed on loading page if 'wanted_inputs' in initial_start_vars: wanted = initial_start_vars['wanted_inputs'].split(",") start_vars = {} @@ -652,7 +713,8 @@ def loading_page(): else: sample_vals_dict = json.loads(start_vars['sample_vals']) if 'group' in start_vars: - dataset = create_dataset(start_vars['dataset'], group_name = start_vars['group']) + dataset = create_dataset( + start_vars['dataset'], group_name=start_vars['group']) else: dataset = create_dataset(start_vars['dataset']) samples = start_vars['primary_samples'].split(",") @@ -660,7 +722,8 @@ def loading_page(): if start_vars['genofile'] != "": genofile_string = start_vars['genofile'] dataset.group.genofile = genofile_string.split(":")[0] - genofile_samples = run_mapping.get_genofile_samplelist(dataset) + genofile_samples = run_mapping.get_genofile_samplelist( + dataset) if len(genofile_samples) > 1: samples = genofile_samples @@ -680,6 +743,7 @@ def loading_page(): return rendered_template + @app.route("/run_mapping", methods=('POST',)) def mapping_results_page(): initial_start_vars = request.form @@ -750,9 +814,10 @@ def mapping_results_page(): start_vars[key] = value version = "v3" - key = "mapping_results:{}:".format(version) + json.dumps(start_vars, sort_keys=True) + key = "mapping_results:{}:".format( + version) + json.dumps(start_vars, sort_keys=True) with Bench("Loading cache"): - result = None # Just for testing + result = None # Just for testing #result = Redis.get(key) #logger.info("************************ Starting result *****************") @@ -772,12 +837,12 @@ def mapping_results_page(): rendered_template = render_template("mapping_error.html") return rendered_template except: - rendered_template = render_template("mapping_error.html") - return rendered_template + rendered_template = render_template("mapping_error.html") + return rendered_template template_vars.js_data = json.dumps(template_vars.js_data, - default=json_default_handler, - indent=" ") + default=json_default_handler, + indent=" ") result = template_vars.__dict__ @@ -792,18 +857,22 @@ def mapping_results_page(): imgB64 = base64.b64encode(imgdata) bytesarray = array.array('B', imgB64) result['pair_scan_array'] = bytesarray - rendered_template = render_template("pair_scan_results.html", **result) + rendered_template = render_template( + "pair_scan_results.html", **result) else: - gn1_template_vars = display_mapping_results.DisplayMappingResults(result).__dict__ + gn1_template_vars = display_mapping_results.DisplayMappingResults( + result).__dict__ with Bench("Rendering template"): #if (gn1_template_vars['mapping_method'] == "gemma") or (gn1_template_vars['mapping_method'] == "plink"): #gn1_template_vars.pop('qtlresults', None) - rendered_template = render_template("mapping_results.html", **gn1_template_vars) + rendered_template = render_template( + "mapping_results.html", **gn1_template_vars) return rendered_template -@app.route("/export_mapping_results", methods = ('POST',)) + +@app.route("/export_mapping_results", methods=('POST',)) def export_mapping_results(): logger.info("request.form:", request.form) logger.info(request.url) @@ -811,32 +880,35 @@ def export_mapping_results(): results_csv = open(file_path, "r").read() response = Response(results_csv, mimetype='text/csv', - headers={"Content-Disposition":"attachment;filename=mapping_results.csv"}) + headers={"Content-Disposition": "attachment;filename=mapping_results.csv"}) return response -@app.route("/export_corr_matrix", methods = ('POST',)) + +@app.route("/export_corr_matrix", methods=('POST',)) def export_corr_matrix(): file_path = request.form.get("export_filepath") file_name = request.form.get("export_filename") results_csv = open(file_path, "r").read() response = Response(results_csv, mimetype='text/csv', - headers={"Content-Disposition":"attachment;filename=" + file_name + ".csv"}) + headers={"Content-Disposition": "attachment;filename=" + file_name + ".csv"}) return response -@app.route("/export", methods = ('POST',)) + +@app.route("/export", methods=('POST',)) def export(): logger.info("request.form:", request.form) logger.info(request.url) svg_xml = request.form.get("data", "Invalid data") filename = request.form.get("filename", "manhattan_plot_snp") response = Response(svg_xml, mimetype="image/svg+xml") - response.headers["Content-Disposition"] = "attachment; filename=%s"%filename + response.headers["Content-Disposition"] = "attachment; filename=%s" % filename return response -@app.route("/export_pdf", methods = ('POST',)) + +@app.route("/export_pdf", methods=('POST',)) def export_pdf(): import cairosvg logger.info("request.form:", request.form) @@ -846,9 +918,10 @@ def export_pdf(): filename = request.form.get("filename", "interval_map_pdf") pdf_file = cairosvg.svg2pdf(bytestring=svg_xml) response = Response(pdf_file, mimetype="application/pdf") - response.headers["Content-Disposition"] = "attachment; filename=%s"%filename + response.headers["Content-Disposition"] = "attachment; filename=%s" % filename return response + @app.route("/network_graph", methods=('POST',)) def network_graph_page(): logger.info("In network_graph, request.form is:", pf(request.form)) @@ -863,7 +936,8 @@ def network_graph_page(): return render_template("network_graph.html", **template_vars.__dict__) else: - return render_template("empty_collection.html", **{'tool':'Network Graph'}) + return render_template("empty_collection.html", **{'tool': 'Network Graph'}) + @app.route("/corr_compute", methods=('POST',)) def corr_compute_page(): @@ -872,6 +946,7 @@ def corr_compute_page(): template_vars = show_corr_results.CorrelationResults(request.form) return render_template("correlation_page.html", **template_vars.__dict__) + @app.route("/corr_matrix", methods=('POST',)) def corr_matrix_page(): logger.info("In corr_matrix, request.form is:", pf(request.form)) @@ -887,7 +962,8 @@ def corr_matrix_page(): return render_template("correlation_matrix.html", **template_vars.__dict__) else: - return render_template("empty_collection.html", **{'tool':'Correlation Matrix'}) + return render_template("empty_collection.html", **{'tool': 'Correlation Matrix'}) + @app.route("/corr_scatter_plot") def corr_scatter_plot_page(): @@ -898,6 +974,7 @@ def corr_scatter_plot_page(): indent=" ") return render_template("corr_scatterplot.html", **template_vars.__dict__) + @app.route("/snp_browser", methods=('GET',)) def snp_browser_page(): logger.info(request.url) @@ -905,12 +982,14 @@ def snp_browser_page(): return render_template("snp_browser.html", **template_vars.__dict__) + @app.route("/db_info", methods=('GET',)) def db_info_page(): template_vars = InfoPage(request.args) return render_template("info_page.html", **template_vars.__dict__) + @app.route("/snp_browser_table", methods=('GET',)) def snp_browser_table(): logger.info(request.url) @@ -924,30 +1003,36 @@ def snp_browser_table(): return flask.jsonify(current_page) + @app.route("/tutorial/WebQTLTour", methods=('GET',)) def tutorial_page(): - #ZS: Currently just links to GN1 + # ZS: Currently just links to GN1 logger.info(request.url) return redirect("http://gn1.genenetwork.org/tutorial/WebQTLTour/") + @app.route("/tutorial/security", methods=('GET',)) def security_tutorial_page(): - #ZS: Currently just links to GN1 + # ZS: Currently just links to GN1 logger.info(request.url) return render_template("admin/security_help.html") + @app.route("/submit_bnw", methods=('POST',)) def submit_bnw(): logger.info(request.url) - return render_template("empty_collection.html", **{'tool':'Correlation Matrix'}) + return render_template("empty_collection.html", **{'tool': 'Correlation Matrix'}) # Take this out or secure it before putting into production + + @app.route("/get_temp_data") def get_temp_data(): logger.info(request.url) temp_uuid = request.args['key'] return flask.jsonify(temp_data.TempData(temp_uuid).get_all()) + @app.route("/browser_input", methods=('GET',)) def browser_inputs(): """ Returns JSON from tmp directory for the purescript genome browser""" @@ -961,6 +1046,7 @@ def browser_inputs(): ########################################################################## + def json_default_handler(obj): """Based on http://stackoverflow.com/a/2680060/1175849""" # Handle datestamps -- cgit v1.2.3 From a68795a639a731e2bdae0a977bccb01f291fc9f7 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 28 Apr 2021 21:25:01 +0300 Subject: wqflask: views: Delete stale comments --- wqflask/wqflask/views.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 319f1270..28ab630f 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -285,11 +285,7 @@ def gsearch_updating(): logger.info(request.url) result = UpdateGSearch(request.args).__dict__ return result['results'] - # type = request.args['type'] - # if type == "gene": - # return render_template("gsearch_gene_updating.html", **result) - # elif type == "phenotype": - # return render_template("gsearch_pheno.html", **result) + @app.route("/docedit") def docedit(): @@ -864,8 +860,6 @@ def mapping_results_page(): result).__dict__ with Bench("Rendering template"): - #if (gn1_template_vars['mapping_method'] == "gemma") or (gn1_template_vars['mapping_method'] == "plink"): - #gn1_template_vars.pop('qtlresults', None) rendered_template = render_template( "mapping_results.html", **gn1_template_vars) -- cgit v1.2.3 From 6e8afe5a6871a680778bfaed6b79c934b33d0307 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 28 Apr 2021 19:40:58 +0000 Subject: Account for temp traits when adding covariates with GEMMA mapping --- wqflask/wqflask/marker_regression/gemma_mapping.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/marker_regression/gemma_mapping.py b/wqflask/wqflask/marker_regression/gemma_mapping.py index 83ebcdf9..06c9300a 100644 --- a/wqflask/wqflask/marker_regression/gemma_mapping.py +++ b/wqflask/wqflask/marker_regression/gemma_mapping.py @@ -146,7 +146,12 @@ def gen_covariates_file(this_dataset, covariates, samples): for covariate in covariate_list: this_covariate_data = [] trait_name = covariate.split(":")[0] - dataset_ob = create_dataset(covariate.split(":")[1]) + dataset_name = covariate.split(":")[1] + if dataset_name == "Temp": + temp_group = trait_name.split("_")[2] + dataset_ob = create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = temp_group) + else: + dataset_ob = create_dataset(covariate.split(":")[1]) trait_ob = create_trait(dataset=dataset_ob, name=trait_name, cellid=None) -- cgit v1.2.3 From 65a2c8c1f455c5fad48fbac1a3e75310d49ed844 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 28 Apr 2021 19:45:37 +0000 Subject: Added something to jsonable in trait.py to account for temp traits (this is necessary for them to show up correctly when selecting traits from collections in pop-up windows, like when selecting cofactors for mapping) --- wqflask/base/trait.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index df96d46e..b4b4452a 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -337,6 +337,10 @@ def jsonable(trait): dataset_name=dataset.shortname, location=trait.location_repr ) + elif dataset.name == "Temp": + return dict(name=trait.name, + dataset="Temp", + dataset_name="Temp") else: return dict() -- cgit v1.2.3 From 90a6a950fba0bb463cff9669f04a87cab8efb36c Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 29 Apr 2021 11:49:42 +0300 Subject: wqflask: views: Replace dict de-structuring with kw arguments * wqflask/wqflask/views.py (submit_trait_form): Use kw arguments when passing variables to the template. This is more readable. --- wqflask/wqflask/views.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 28ab630f..38c56b71 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -394,9 +394,9 @@ def submit_trait_form(): species_and_groups = get_species_groups() return render_template( "submit_trait.html", - **{'species_and_groups': species_and_groups, - 'gn_server_url': GN_SERVER_URL, - 'version': GN_VERSION}) + species_and_groups=species_and_groups, + gn_server_url=GN_SERVER_URL, + version=GN_VERSION) @app.route("/create_temp_trait", methods=('POST',)) -- cgit v1.2.3 From e048c6321b37a1c05adbfb6754513d7920b10dd8 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 29 Apr 2021 12:45:28 +0300 Subject: templates: edit_trait.html: New file Copy of submit_trait.html. This is a copy; that'll form the basis of the new edit page. --- wqflask/wqflask/templates/edit_trait.html | 111 ++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 wqflask/wqflask/templates/edit_trait.html diff --git a/wqflask/wqflask/templates/edit_trait.html b/wqflask/wqflask/templates/edit_trait.html new file mode 100644 index 00000000..3572b0a9 --- /dev/null +++ b/wqflask/wqflask/templates/edit_trait.html @@ -0,0 +1,111 @@ +{% extends "base.html" %} +{% block title %}Trait Submission{% endblock %} +{% block content %} + +
    +
    + + {{ flash_me() }} + +
    +
    +
    +
    +

    Introduction

    +
    +

    The trait values that you enter are statistically compared with verified genotypes collected at a set of microsatellite markers in each RI set. The markers are drawn from a set of over 750, but for each set redundant markers have been removed, preferentially retaining those that are most informative.

    +

    These error-checked RI mapping data match theoretical expectations for RI strain sets. The cumulative adjusted length of the RI maps are approximately 1400 cM, a value that matches those of both MIT maps and Chromosome Committee Report maps. See our full description of the genetic data collected as part of the WebQTL project.

    +
    +
    +
    +
    +
    +

    About Your Data

    +
    +

    You can open a separate window giving the number of strains for each data set and sample data.

    +

    None of your submitted data is copied or stored by this system except during the actual processing of your submission. By the time the reply page displays in your browser, your submission has been cleared from this system.

    +
    +
    +
    +
    +
    +
    +

    Trait Submission Form

    +
    +
    +

    1. Choose Species and Group:

    +
    +
    + +
    +
    +
    + +
    + +
    +
    +
    + +
    + +
    +
    +
    +
    +
    +

    2. Enter Trait Data:

    +

    File uploading isn't enabled yet, but is coming soon.

    +
    +
    + +
    +
    +
    +

    + Paste or Type Multiple Values: You can enter data by pasting a series of numbers representing trait values into this area. + The values can be on one line separated by spaces or tabs, or they can be on separate lines. Include one value for each individual + or line. Use an "x" for missing values. If you have chosen a set of inbred strains, then your data will be displayed in a form in + which you can confirm and/or edit. If you enter a file name in the previous section, + any data that you paste here will be ignored. Check sample data for the correct format. +

    + +
    +
    +
    + + +
    +
    +
    +

    3. Enable Use of Trait Variance:

    +
    + +
    +
    +
    +

    + Name Your Trait: (optional) +

    + +
    +
    +
    + + +
    +
    +
    +
    +
    +
    +
    + +{%endblock%} + +{% block js %} + + +{% endblock %} -- cgit v1.2.3 From 442fc67ba1d66dd7931e20d161b40451b4e4b8c8 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 29 Apr 2021 13:17:34 +0300 Subject: wqflask: views: Add new function for selecting a dataset * wqflask/wqflask/views.py(edit_trait_page): New function. --- wqflask/wqflask/views.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 38c56b71..81ccaf7d 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -399,6 +399,16 @@ def submit_trait_form(): version=GN_VERSION) +@app.route("/edit_trait_form") +def edit_trait_page(): + species_and_groups = get_species_groups() + return render_template( + "edit_trait.html", + species_and_groups=species_and_groups, + gn_server_url=GN_SERVER_URL, + version=GN_VERSION) + + @app.route("/create_temp_trait", methods=('POST',)) def create_temp_trait(): logger.info(request.url) -- cgit v1.2.3 From 1e2e5b23ec30937f200778524f446515a5f83fb3 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 29 Apr 2021 13:18:50 +0300 Subject: templates: edit_trait.html: Add comment headers The comment headers mark different sections. --- wqflask/wqflask/templates/edit_trait.html | 2 ++ 1 file changed, 2 insertions(+) diff --git a/wqflask/wqflask/templates/edit_trait.html b/wqflask/wqflask/templates/edit_trait.html index 3572b0a9..82d7120d 100644 --- a/wqflask/wqflask/templates/edit_trait.html +++ b/wqflask/wqflask/templates/edit_trait.html @@ -39,12 +39,14 @@
    +
    +
    -- cgit v1.2.3 From ba8cd37b5cd7e038e31c879fbbc38939fa093acf Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 29 Apr 2021 13:19:29 +0300 Subject: templates: edit_trait.html: Add extra fields for the form Add form elements for selecting group, type, and dataset. This mimics the home page. --- wqflask/wqflask/templates/edit_trait.html | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/wqflask/wqflask/templates/edit_trait.html b/wqflask/wqflask/templates/edit_trait.html index 82d7120d..4c9b3ba1 100644 --- a/wqflask/wqflask/templates/edit_trait.html +++ b/wqflask/wqflask/templates/edit_trait.html @@ -53,6 +53,20 @@
    + +
    + +
    + +
    +
    + +
    + +
    + +
    +
    -- cgit v1.2.3 From 43ed9d7e5c22de2a457457e75b8153f6c2e1488c Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 29 Apr 2021 14:57:03 +0300 Subject: templates: edit_trait.html: Trim form to only contain selection menu --- wqflask/wqflask/templates/edit_trait.html | 45 ++----------------------------- 1 file changed, 2 insertions(+), 43 deletions(-) diff --git a/wqflask/wqflask/templates/edit_trait.html b/wqflask/wqflask/templates/edit_trait.html index 4c9b3ba1..14f44698 100644 --- a/wqflask/wqflask/templates/edit_trait.html +++ b/wqflask/wqflask/templates/edit_trait.html @@ -30,14 +30,11 @@
    -

    Trait Submission Form

    +

    Edit Trait Form


    -

    1. Choose Species and Group:

    +

    Choose Dataset to Edit:


    -
    - -
    @@ -68,44 +65,6 @@
    -
    -
    -

    2. Enter Trait Data:

    -

    File uploading isn't enabled yet, but is coming soon.

    -
    -
    - -
    -
    -
    -

    - Paste or Type Multiple Values: You can enter data by pasting a series of numbers representing trait values into this area. - The values can be on one line separated by spaces or tabs, or they can be on separate lines. Include one value for each individual - or line. Use an "x" for missing values. If you have chosen a set of inbred strains, then your data will be displayed in a form in - which you can confirm and/or edit. If you enter a file name in the previous section, - any data that you paste here will be ignored. Check sample data for the correct format. -

    - -
    -
    -
    - - -
    -
    -
    -

    3. Enable Use of Trait Variance:

    -
    - -
    -
    -
    -

    - Name Your Trait: (optional) -

    - -
    -
    -- cgit v1.2.3 From 99f8349b8cda4a3b17380c913207fc8395481653 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 29 Apr 2021 15:25:18 +0300 Subject: wqflask: Add new js file for processing menu entries * wqflask/wqflask/static/new/javascript/dataset_select_menu_edit_trait.js: New file. Copy of dataset_select_menu_orig.js that will later modified... * wqflask/wqflask/templates/edit_trait.html: Use :point_up:. --- .../javascript/dataset_select_menu_edit_trait.js | 253 +++++++++++++++++++++ wqflask/wqflask/templates/edit_trait.html | 4 +- 2 files changed, 255 insertions(+), 2 deletions(-) create mode 100644 wqflask/wqflask/static/new/javascript/dataset_select_menu_edit_trait.js diff --git a/wqflask/wqflask/static/new/javascript/dataset_select_menu_edit_trait.js b/wqflask/wqflask/static/new/javascript/dataset_select_menu_edit_trait.js new file mode 100644 index 00000000..1d4a94d9 --- /dev/null +++ b/wqflask/wqflask/static/new/javascript/dataset_select_menu_edit_trait.js @@ -0,0 +1,253 @@ +var apply_default, check_search_term, dataset_info, group_info, make_default, open_window, populate_dataset, populate_group, populate_species, populate_type, process_json, redo_dropdown; +process_json = function(data) { + window.jdata = data; + populate_species(); + if ($('#type').length > 0) { //This is to determine if it's the index page or the submit_trait page (which only has species and group selection and no make default option) + return apply_default(); + } +}; + +$.ajax('/api/v_pre1/gen_dropdown', { + dataType: 'json', + success: process_json +}); + +populate_species = function() { + var species_list; + species_list = this.jdata.species; + redo_dropdown($('#species'), species_list); + return populate_group(); +}; +window.populate_species = populate_species; +populate_group = function() { + var group_list, species; + console.log("in populate group"); + species = $('#species').val(); + group_list = this.jdata.groups[species]; + for (_i = 0, _len = group_list.length; _i < (_len - 1); _i++) { + if (group_list[_i][0] == "BXD300"){ + group_list.splice(_i, 1) + } + } + redo_dropdown($('#group'), group_list); + if ($('#type').length > 0) { //This is to determine if it's the index page or the submit_trait page (which only has species and group selection and no make default option) + return populate_type(); + } +}; +window.populate_group = populate_group; +populate_type = function() { + var group, species, type_list; + console.log("in populate type"); + species = $('#species').val(); + group = $('#group').val(); + type_list = this.jdata.types[species][group]; + redo_dropdown($('#type'), type_list); + return populate_dataset(); +}; +window.populate_type = populate_type; +populate_dataset = function() { + var dataset_list, group, species, type; + console.log("in populate dataset"); + species = $('#species').val(); + group = $('#group').val(); + type = $('#type').val(); + console.log("sgt:", species, group, type); + dataset_list = this.jdata.datasets[species][group][type]; + console.log("pop_dataset:", dataset_list); + return redo_dropdown($('#dataset'), dataset_list); +}; +window.populate_dataset = populate_dataset; +redo_dropdown = function(dropdown, items) { + var item, _i, _len, _results; + console.log("in redo:", dropdown, items); + dropdown.empty(); + _results = []; + + if (dropdown.attr('id') == "group"){ + group_family_list = []; + for (_i = 0, _len = items.length; _i < _len; _i++) { + item = items[_i]; + group_family = item[2].toString().split(":")[1] + group_family_list.push([item[0], item[1], group_family]) + } + + current_family = "" + this_opt_group = null + for (_i = 0, _len = group_family_list.length; _i < _len; _i++) { + item = group_family_list[_i]; + if (item[2] != "None" && current_family == ""){ + current_family = item[2] + this_opt_group = $("") + this_opt_group.append($("") + this_opt_group.append($("") + this_opt_group.append($("") + this_opt_group.append($("
    @@ -247,6 +247,7 @@ {% if geno_db_exists == "True" %}{% endif %} +

    @@ -612,7 +613,7 @@ return $('#marker_regression_form').submit(); } - $('#export_mapping_results').click(export_mapping_results); + $('.export_mapping_results').click(export_mapping_results); $('#browser_tab').click(function() { $('#gn1_map_options').css("display", "none") -- cgit v1.2.3 From 1e11d0ea0fd68a7b16dfb208217a442ad95909e9 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 5 May 2021 18:18:20 +0000 Subject: Added a number of new metadata items to the mapping loading page, such as covariates and permutation/bootstrap numbers --- wqflask/wqflask/templates/loading.html | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/loading.html b/wqflask/wqflask/templates/loading.html index 926f258d..69610f34 100644 --- a/wqflask/wqflask/templates/loading.html +++ b/wqflask/wqflask/templates/loading.html @@ -15,12 +15,31 @@ n = {{ start_vars.n_samples }}
    Method = {% if start_vars.method == "gemma" %}GEMMA{% else %}{{ start_vars.method }}{% endif %} + {% if start_vars.num_perm | int > 0 %}
    + # Permutations = {{ start_vars.num_perm }} + {% endif %} + {% if start_vars.num_bootstrap | int > 0 %} +
    + # Bootstrap = {{ start_vars.num_bootstrap }} + {% endif %} {% if start_vars.transform != "" %} - transform = {{ start_vars.transform }}
    + transform = {{ start_vars.transform }} {% endif %} + {% if start_vars.maf != "" and start_vars.method != "reaper" %} +
    MAF >= {{ start_vars.maf }} + {% endif %} + {% if start_vars.covariates != "" and start_vars.method != "reaper" %} +
    + {% set covariate_list = start_vars.covariates.split(",") %} + Trait Covariates: {% for covariate in covariate_list %}{% set this_covariate = covariate.split(":")[0] %}{{ this_covariate }}{% if not loop.last %}, {% endif %}{% endfor %} + {% endif %} + {% if start_vars.control_marker != "" and start_vars.do_control == "true" and start_vars.method != "gemma" %} +
    + Marker Covariate: {{ start_vars.control_marker }} + {% endif %} {% else %}

    Loading {{ start_vars.tool_used }} Results...

    {% endif %} -- cgit v1.2.3 From 6919762a2e0bce1afea0e979cb60a5c4b1dceacf Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 5 May 2021 18:19:41 +0000 Subject: Removed an unused item from mapping_input_list --- wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js | 2 +- wqflask/wqflask/templates/mapping_results.html | 2 +- wqflask/wqflask/views.py | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js index 53bcd1f6..09e9d024 100644 --- a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js +++ b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js @@ -143,7 +143,7 @@ $('input[name=display_all]').change((function(_this) { //ZS: This is a list of inputs to be passed to the loading page, since not all inputs on the trait page are relevant to mapping var mapping_input_list = ['temp_uuid', 'trait_id', 'dataset', 'tool_used', 'form_url', 'method', 'transform', 'trimmed_markers', 'selected_chr', 'chromosomes', 'mapping_scale', 'sample_vals', 'score_type', 'suggestive', 'significant', 'num_perm', 'permCheck', 'perm_output', 'perm_strata', 'categorical_vars', 'num_bootstrap', 'bootCheck', 'bootstrap_results', - 'LRSCheck', 'covariates', 'maf', 'use_loco', 'manhattan_plot', 'control_marker', 'control_marker_db', 'do_control', 'genofile', + 'LRSCheck', 'covariates', 'maf', 'use_loco', 'manhattan_plot', 'control_marker', 'do_control', 'genofile', 'pair_scan', 'startMb', 'endMb', 'graphWidth', 'lrsMax', 'additiveCheck', 'showSNP', 'showGenes', 'viewLegend', 'haplotypeAnalystCheck', 'mapmethod_rqtl_geno', 'mapmodel_rqtl_geno', 'temp_trait', 'group', 'species', 'reaper_version', 'primary_samples'] diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index edc30164..f054506c 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -481,7 +481,7 @@ var mapping_input_list = ['temp_uuid', 'trait_id', 'dataset', 'tool_used', 'form_url', 'method', 'transform', 'trimmed_markers', 'selected_chr', 'chromosomes', 'mapping_scale', 'sample_vals', 'score_type', 'suggestive', 'significant', 'num_perm', 'permCheck', 'perm_output', 'perm_strata', 'categorical_vars', 'num_bootstrap', 'bootCheck', 'bootstrap_results', - 'LRSCheck', 'covariates', 'maf', 'use_loco', 'manhattan_plot', 'color_scheme', 'manhattan_single_color', 'control_marker', 'control_marker_db', 'do_control', 'genofile', + 'LRSCheck', 'covariates', 'maf', 'use_loco', 'manhattan_plot', 'color_scheme', 'manhattan_single_color', 'control_marker', 'do_control', 'genofile', 'pair_scan', 'startMb', 'endMb', 'graphWidth', 'lrsMax', 'additiveCheck', 'showSNP', 'showGenes', 'viewLegend', 'haplotypeAnalystCheck', 'mapmethod_rqtl_geno', 'mapmodel_rqtl_geno', 'temp_trait', 'group', 'species', 'reaper_version', 'primary_samples', 'n_samples'] diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index f9b8f310..276d3019 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -794,7 +794,6 @@ def mapping_results_page(): 'color_scheme', 'manhattan_single_color', 'control_marker', - 'control_marker_db', 'do_control', 'genofile', 'genofile_string', -- cgit v1.2.3 From 1dd80ec2767c1de0028ab720c7ef4caae834a99c Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 5 May 2021 18:23:09 +0000 Subject: Added genotype file to mapping loading page --- wqflask/wqflask/templates/loading.html | 3 +++ 1 file changed, 3 insertions(+) diff --git a/wqflask/wqflask/templates/loading.html b/wqflask/wqflask/templates/loading.html index 69610f34..7344b918 100644 --- a/wqflask/wqflask/templates/loading.html +++ b/wqflask/wqflask/templates/loading.html @@ -15,6 +15,9 @@ n = {{ start_vars.n_samples }}
    Method = {% if start_vars.method == "gemma" %}GEMMA{% else %}{{ start_vars.method }}{% endif %} +
    + {% set genofile_desc = start_vars.genofile.split(":")[1] %} + Genotype File = {{ genofile_desc }} {% if start_vars.num_perm | int > 0 %}
    # Permutations = {{ start_vars.num_perm }} -- cgit v1.2.3 From a25ca626b7a6e1ca75d1fa0a830705e1df4d1e6c Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 5 May 2021 18:32:18 +0000 Subject: Added whether GEMMA is using LOCO to mapping loading page --- wqflask/wqflask/templates/loading.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/loading.html b/wqflask/wqflask/templates/loading.html index 7344b918..124f0608 100644 --- a/wqflask/wqflask/templates/loading.html +++ b/wqflask/wqflask/templates/loading.html @@ -14,7 +14,7 @@
    n = {{ start_vars.n_samples }}
    - Method = {% if start_vars.method == "gemma" %}GEMMA{% else %}{{ start_vars.method }}{% endif %} + Method = {% if start_vars.method == "gemma" %}GEMMA {% if start_vars.use_loco == "True" %}using LOCO {% endif %}{% else %}{{ start_vars.method }}{% endif %}
    {% set genofile_desc = start_vars.genofile.split(":")[1] %} Genotype File = {{ genofile_desc }} -- cgit v1.2.3 From 6202ef9c4016d08cecf75fd8217c9b53d18ed201 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 5 May 2021 15:58:41 +0300 Subject: wqflask: database: Remove logging --- wqflask/wqflask/database.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/wqflask/wqflask/database.py b/wqflask/wqflask/database.py index e743c4b3..42fa1594 100644 --- a/wqflask/wqflask/database.py +++ b/wqflask/wqflask/database.py @@ -5,9 +5,6 @@ from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.ext.declarative import declarative_base from utility.tools import SQL_URI -import utility.logger -logger = utility.logger.getLogger(__name__) - engine = create_engine(SQL_URI, encoding="latin1") @@ -23,10 +20,8 @@ def init_db(): # they will be registered properly on the metadata. Otherwise # you will have to import them first before calling init_db() #import yourapplication.models - logger.info("Initializing database connection") import wqflask.model Base.metadata.create_all(bind=engine) - logger.info("Done creating all model metadata") init_db() -- cgit v1.2.3 From 773c758efb1a48f97f85ac1e43e91355f67bde72 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 5 May 2021 16:25:41 +0300 Subject: wqflask: user_manager: Delete usermanager.py and it's references This module is not used anywhere. --- test/requests/test_forgot_password.py | 4 +- test/requests/test_login_local.py | 3 - wqflask/wqflask/user_manager.py | 1095 --------------------------------- 3 files changed, 1 insertion(+), 1101 deletions(-) delete mode 100644 wqflask/wqflask/user_manager.py diff --git a/test/requests/test_forgot_password.py b/test/requests/test_forgot_password.py index 2bf34c5c..346524bc 100644 --- a/test/requests/test_forgot_password.py +++ b/test/requests/test_forgot_password.py @@ -1,5 +1,4 @@ import requests -from wqflask import user_manager from utility.elasticsearch_tools import get_user_by_unique_column from parameterized import parameterized from parametrized_test import ParametrizedTest @@ -27,8 +26,7 @@ class TestForgotPassword(ParametrizedTest): "password": "test_password", "password_confirm": "test_password" } - user_manager.basic_info = lambda : { "basic_info": "basic" } - user_manager.RegisterUser(data) + def testWithoutEmail(self): data = {"email_address": ""} diff --git a/test/requests/test_login_local.py b/test/requests/test_login_local.py index 808649ca..6691d135 100644 --- a/test/requests/test_login_local.py +++ b/test/requests/test_login_local.py @@ -1,5 +1,4 @@ import requests -from wqflask import user_manager from parameterized import parameterized from parametrized_test import ParametrizedTest @@ -19,8 +18,6 @@ class TestLoginLocal(ParametrizedTest): "password": "test_password", "password_confirm": "test_password" } - user_manager.basic_info = lambda : { "basic_info": "basic" } - user_manager.RegisterUser(data) @parameterized.expand([ diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py deleted file mode 100644 index cf84ea73..00000000 --- a/wqflask/wqflask/user_manager.py +++ /dev/null @@ -1,1095 +0,0 @@ -import os -import hashlib -import datetime -import time -import uuid -import hmac -import base64 -import redis # used for collections -import simplejson as json -import requests - -from base.data_set import create_datasets_list - -from flask import g -from flask import render_template -from flask import url_for -from flask import request -from flask import make_response -from flask import redirect -from flask import flash - -from wqflask import app -from wqflask import pbkdf2 # password hashing -from wqflask.database import db_session -from wqflask import model - -from smtplib import SMTP - -from pprint import pformat as pf - -from utility import Bunch -from utility import Struct -from utility.logger import getLogger - -from utility.redis_tools import get_user_id -from utility.redis_tools import get_user_by_unique_column -from utility.redis_tools import set_user_attribute -from utility.redis_tools import save_user -from utility.redis_tools import save_verification_code -from utility.redis_tools import check_verification_code -from utility.redis_tools import get_user_collections -from utility.redis_tools import save_collections - -from utility.tools import SMTP_CONNECT -from utility.tools import SMTP_USERNAME -from utility.tools import SMTP_PASSWORD - - -logger = getLogger(__name__) - - -Redis = redis.StrictRedis() - -THREE_DAYS = 60 * 60 * 24 * 3 - - -def timestamp(): - return datetime.datetime.utcnow().isoformat() - - -class AnonUser: - """Anonymous user handling""" - cookie_name = 'anon_user_v1' - - def __init__(self): - self.cookie = request.cookies.get(self.cookie_name) - if self.cookie: - logger.debug("ANON COOKIE ALREADY EXISTS") - self.anon_id = verify_cookie(self.cookie) - else: - logger.debug("CREATING NEW ANON COOKIE") - self.anon_id, self.cookie = create_signed_cookie() - - self.key = "anon_collection:v1:{}".format(self.anon_id) - - def add_collection(self, new_collection): - collection_dict = dict(name=new_collection.name, - created_timestamp=datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), - changed_timestamp=datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), - num_members=new_collection.num_members, - members=new_collection.get_members()) - - Redis.set(self.key, json.dumps(collection_dict)) - Redis.expire(self.key, 60 * 60 * 24 * 365) - - def delete_collection(self, collection_name): - existing_collections = self.get_collections() - updated_collections = [] - for i, collection in enumerate(existing_collections): - if collection['name'] == collection_name: - continue - else: - this_collection = {} - this_collection['id'] = collection['id'] - this_collection['name'] = collection['name'] - this_collection['created_timestamp'] = collection['created_timestamp'].strftime( - '%b %d %Y %I:%M%p') - this_collection['changed_timestamp'] = collection['changed_timestamp'].strftime( - '%b %d %Y %I:%M%p') - this_collection['num_members'] = collection['num_members'] - this_collection['members'] = collection['members'] - updated_collections.append(this_collection) - - Redis.set(self.key, json.dumps(updated_collections)) - - def get_collections(self): - json_collections = Redis.get(self.key) - if json_collections == None or json_collections == "None": - return [] - else: - collections = json.loads(json_collections) - for collection in collections: - collection['created_timestamp'] = datetime.datetime.strptime( - collection['created_timestamp'], '%b %d %Y %I:%M%p') - collection['changed_timestamp'] = datetime.datetime.strptime( - collection['changed_timestamp'], '%b %d %Y %I:%M%p') - - collections = sorted( - collections, key=lambda i: i['changed_timestamp'], reverse=True) - return collections - - def import_traits_to_user(self): - result = Redis.get(self.key) - collections_list = json.loads(result if result else "[]") - for collection in collections_list: - collection_exists = g.user_session.get_collection_by_name( - collection['name']) - if collection_exists: - continue - else: - g.user_session.add_collection( - collection['name'], collection['members']) - - def display_num_collections(self): - """ - Returns the number of collections or a blank string if there are zero. - - Because this is so unimportant...we wrap the whole thing in a try/expect...last thing we - want is a webpage not to be displayed because of an error here - - Importand TODO: use redis to cache this, don't want to be constantly computing it - """ - try: - num = len(self.get_collections()) - if num > 0: - return num - else: - return "" - except Exception as why: - print("Couldn't display_num_collections:", why) - return "" - - -def verify_cookie(cookie): - the_uuid, separator, the_signature = cookie.partition(':') - assert len(the_uuid) == 36, "Is session_id a uuid?" - assert separator == ":", "Expected a : here" - assert the_signature == actual_hmac_creation( - the_uuid), "Uh-oh, someone tampering with the cookie?" - return the_uuid - - -def create_signed_cookie(): - the_uuid = str(uuid.uuid4()) - signature = actual_hmac_creation(the_uuid) - uuid_signed = the_uuid + ":" + signature - logger.debug("uuid_signed:", uuid_signed) - return the_uuid, uuid_signed - - -class UserSession: - """Logged in user handling""" - - cookie_name = 'session_id_v1' - - def __init__(self): - cookie = request.cookies.get(self.cookie_name) - if not cookie: - logger.debug("NO USER COOKIE") - self.logged_in = False - return - else: - session_id = verify_cookie(cookie) - - self.redis_key = self.cookie_name + ":" + session_id - logger.debug("self.redis_key is:", self.redis_key) - self.session_id = session_id - self.record = Redis.hgetall(self.redis_key) - - if not self.record: - # This will occur, for example, when the browser has been left open over a long - # weekend and the site hasn't been visited by the user - self.logged_in = False - - # Grrr...this won't work because of the way flask handles cookies - # Delete the cookie - #response = make_response(redirect(url_for('login'))) - #response.set_cookie(self.cookie_name, '', expires=0) - # flash( - # "Due to inactivity your session has expired. If you'd like please login again.") - # return response - return - - if Redis.ttl(self.redis_key) < THREE_DAYS: - # (Almost) everytime the user does something we extend the session_id in Redis... - logger.debug("Extending ttl...") - Redis.expire(self.redis_key, THREE_DAYS) - - logger.debug("record is:", self.record) - self.logged_in = True - - @property - def user_id(self): - """Shortcut to the user_id""" - if 'user_id' in self.record: - return self.record['user_id'] - else: - return '' - - @property - def redis_user_id(self): - """User id from ElasticSearch (need to check if this is the same as the id stored in self.records)""" - - user_email = self.record['user_email_address'] - - # ZS: Get user's collections if they exist - user_id = None - user_id = get_user_id("email_address", user_email) - return user_id - - @property - def user_name(self): - """Shortcut to the user_name""" - if 'user_name' in self.record: - return self.record['user_name'] - else: - return '' - - @property - def user_collections(self): - """List of user's collections""" - - # ZS: Get user's collections if they exist - collections = get_user_collections(self.redis_user_id) - return collections - - @property - def num_collections(self): - """Number of user's collections""" - - return len(self.user_collections) - - def add_collection(self, collection_name, traits): - """Add collection into ElasticSearch""" - - collection_dict = {'id': str(uuid.uuid4()), - 'name': collection_name, - 'created_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), - 'changed_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), - 'num_members': len(traits), - 'members': list(traits)} - - current_collections = self.user_collections - current_collections.append(collection_dict) - self.update_collections(current_collections) - - return collection_dict['id'] - - def delete_collection(self, collection_id): - """Remove collection with given ID""" - - updated_collections = [] - for collection in self.user_collections: - if collection['id'] == collection_id: - continue - else: - updated_collections.append(collection) - - self.update_collections(updated_collections) - - return collection['name'] - - def add_traits_to_collection(self, collection_id, traits_to_add): - """Add specified traits to a collection""" - - this_collection = self.get_collection_by_id(collection_id) - - updated_collection = this_collection - updated_traits = this_collection['members'] + traits_to_add - - updated_collection['members'] = updated_traits - updated_collection['num_members'] = len(updated_traits) - updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime( - '%b %d %Y %I:%M%p') - - updated_collections = [] - for collection in self.user_collections: - if collection['id'] == collection_id: - updated_collections.append(updated_collection) - else: - updated_collections.append(collection) - - self.update_collections(updated_collections) - - def remove_traits_from_collection(self, collection_id, traits_to_remove): - """Remove specified traits from a collection""" - - this_collection = self.get_collection_by_id(collection_id) - - updated_collection = this_collection - updated_traits = [] - for trait in this_collection['members']: - if trait in traits_to_remove: - continue - else: - updated_traits.append(trait) - - updated_collection['members'] = updated_traits - updated_collection['num_members'] = len(updated_traits) - updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime( - '%b %d %Y %I:%M%p') - - updated_collections = [] - for collection in self.user_collections: - if collection['id'] == collection_id: - updated_collections.append(updated_collection) - else: - updated_collections.append(collection) - - self.update_collections(updated_collections) - - return updated_traits - - def get_collection_by_id(self, collection_id): - for collection in self.user_collections: - if collection['id'] == collection_id: - return collection - - def get_collection_by_name(self, collection_name): - for collection in self.user_collections: - if collection['name'] == collection_name: - return collection - - return None - - def update_collections(self, updated_collections): - collection_body = json.dumps(updated_collections) - - save_collections(self.redis_user_id, collection_body) - - def delete_session(self): - # And more importantly delete the redis record - Redis.delete(self.cookie_name) - logger.debug("At end of delete_session") - - -@app.before_request -def get_cookie(): - logger.info("@app.before_request get cookie") - g.user_session = UserSession() - g.cookie_session = AnonUser() - -# @app.after_request - - -def set_cookie(response): - if not request.cookies.get(g.cookie_session.cookie_name): - response.set_cookie(g.cookie_session.cookie_name, - g.cookie_session.cookie) - return response - - -class UsersManager: - def __init__(self): - self.users = model.User.query.all() - logger.debug("Users are:", self.users) - - -class UserManager: - def __init__(self, kw): - self.user_id = kw['user_id'] - logger.debug("In UserManager locals are:", pf(locals())) - #self.user = model.User.get(user_id) - #logger.debug("user is:", user) - self.user = model.User.query.get(self.user_id) - logger.debug("user is:", self.user) - datasets = create_datasets_list() - for dataset in datasets: - if not dataset.check_confidentiality(): - continue - logger.debug("\n Name:", dataset.name) - logger.debug(" Type:", dataset.type) - logger.debug(" ID:", dataset.id) - logger.debug(" Confidential:", dataset.check_confidentiality()) - #logger.debug(" ---> self.datasets:", self.datasets) - - -class RegisterUser: - def __init__(self, kw): - self.thank_you_mode = False - self.errors = [] - self.user = Bunch() - - self.user.email_address = kw.get( - 'email_address', '').encode("utf-8").strip() - if not (5 <= len(self.user.email_address) <= 50): - self.errors.append( - 'Email Address needs to be between 5 and 50 characters.') - else: - email_exists = get_user_by_unique_column( - "email_address", self.user.email_address) - #email_exists = get_user_by_unique_column(es, "email_address", self.user.email_address) - if email_exists: - self.errors.append('User already exists with that email') - - self.user.full_name = kw.get('full_name', '').encode("utf-8").strip() - if not (5 <= len(self.user.full_name) <= 50): - self.errors.append( - 'Full Name needs to be between 5 and 50 characters.') - - self.user.organization = kw.get( - 'organization', '').encode("utf-8").strip() - if self.user.organization and not (5 <= len(self.user.organization) <= 50): - self.errors.append( - 'Organization needs to be empty or between 5 and 50 characters.') - - password = str(kw.get('password', '')) - if not (6 <= len(password)): - self.errors.append('Password needs to be at least 6 characters.') - - if kw.get('password_confirm') != password: - self.errors.append("Passwords don't match.") - - if self.errors: - return - - logger.debug("No errors!") - - set_password(password, self.user) - self.user.user_id = str(uuid.uuid4()) - self.user.confirmed = 1 - - self.user.registration_info = json.dumps(basic_info(), sort_keys=True) - save_user(self.user.__dict__, self.user.user_id) - - -def set_password(password, user): - pwfields = Bunch() - - pwfields.algorithm = "pbkdf2" - pwfields.hashfunc = "sha256" - #hashfunc = getattr(hashlib, pwfields.hashfunc) - - # Encoding it to base64 makes storing it in json much easier - pwfields.salt = base64.b64encode(os.urandom(32)) - - # https://forums.lastpass.com/viewtopic.php?t=84104 - pwfields.iterations = 100000 - pwfields.keylength = 32 - - pwfields.created_ts = timestamp() - # One more check on password length - assert len(password) >= 6, "Password shouldn't be so short here" - - logger.debug("pwfields:", vars(pwfields)) - logger.debug("locals:", locals()) - - enc_password = Password(password, - pwfields.salt, - pwfields.iterations, - pwfields.keylength, - pwfields.hashfunc) - - pwfields.password = enc_password.password - pwfields.encrypt_time = enc_password.encrypt_time - - user.password = json.dumps(pwfields.__dict__, - sort_keys=True, - ) - - -class VerificationEmail: - template_name = "email/verification.txt" - key_prefix = "verification_code" - subject = "GeneNetwork email verification" - - def __init__(self, user): - verification_code = str(uuid.uuid4()) - key = self.key_prefix + ":" + verification_code - - data = json.dumps(dict(id=user.user_id, - timestamp=timestamp()) - ) - - Redis.set(key, data) - #two_days = 60 * 60 * 24 * 2 - Redis.expire(key, THREE_DAYS) - to = user.email_address - subject = self.subject - body = render_template(self.template_name, - verification_code=verification_code) - send_email(to, subject, body) - - -class ForgotPasswordEmail(VerificationEmail): - template_name = "email/forgot_password.txt" - key_prefix = "forgot_password_code" - subject = "GeneNetwork password reset" - fromaddr = "no-reply@genenetwork.org" - - def __init__(self, toaddr): - from email.MIMEMultipart import MIMEMultipart - from email.MIMEText import MIMEText - verification_code = str(uuid.uuid4()) - key = self.key_prefix + ":" + verification_code - - data = { - "verification_code": verification_code, - "email_address": toaddr, - "timestamp": timestamp() - } - - save_verification_code(toaddr, verification_code) - - subject = self.subject - body = render_template( - self.template_name, - verification_code=verification_code) - - msg = MIMEMultipart() - msg["To"] = toaddr - msg["Subject"] = self.subject - msg["From"] = self.fromaddr - msg.attach(MIMEText(body, "plain")) - - send_email(toaddr, msg.as_string()) - - -class Password: - def __init__(self, unencrypted_password, salt, iterations, keylength, hashfunc): - hashfunc = getattr(hashlib, hashfunc) - logger.debug("hashfunc is:", hashfunc) - # On our computer it takes around 1.4 seconds in 2013 - start_time = time.time() - salt = base64.b64decode(salt) - self.password = pbkdf2.pbkdf2_hex(str(unencrypted_password), - salt, iterations, keylength, hashfunc) - self.encrypt_time = round(time.time() - start_time, 3) - logger.debug("Creating password took:", self.encrypt_time) - - -def basic_info(): - return dict(timestamp=timestamp(), - ip_address=request.remote_addr, - user_agent=request.headers.get('User-Agent')) - -# @app.route("/manage/verify_email") - - -def verify_email(): - user = DecodeUser(VerificationEmail.key_prefix).user - user.confirmed = json.dumps(basic_info(), sort_keys=True) - db_session.commit() - - # As long as they have access to the email account - # We might as well log them in - - session_id_signed = LoginUser().successful_login(user) - response = make_response(render_template("new_security/thank_you.html")) - response.set_cookie(UserSession.cookie_name, session_id_signed) - return response - -# @app.route("/n/password_reset", methods=['GET']) - - -def password_reset(): - """Entry point after user clicks link in E-mail""" - logger.debug("in password_reset request.url is:", request.url) - # We do this mainly just to assert that it's in proper form for displaying next page - # Really not necessary but doesn't hurt - # user_encode = DecodeUser(ForgotPasswordEmail.key_prefix).reencode_standalone() - verification_code = request.args.get('code') - hmac = request.args.get('hm') - - if verification_code: - user_email = check_verification_code(verification_code) - if user_email: - user_details = get_user_by_unique_column( - 'email_address', user_email) - if user_details: - return render_template( - "new_security/password_reset.html", user_encode=user_details["user_id"]) - else: - flash("Invalid code: User no longer exists!", "error") - else: - flash( - "Invalid code: Password reset code does not exist or might have expired!", "error") - else: - return redirect(url_for("login")) - -# @app.route("/n/password_reset_step2", methods=('POST',)) - - -def password_reset_step2(): - """Handle confirmation E-mail for password reset""" - logger.debug("in password_reset request.url is:", request.url) - - errors = [] - user_id = request.form['user_encode'] - - logger.debug("locals are:", locals()) - - user = Bunch() - password = request.form['password'] - set_password(password, user) - - set_user_attribute(user_id, "password", user.__dict__.get("password")) - - flash("Password changed successfully. You can now sign in.", "alert-info") - response = make_response(redirect(url_for('login'))) - - return response - - -class DecodeUser: - - def __init__(self, code_prefix): - verify_url_hmac(request.url) - - #params = urlparse.parse_qs(url) - - self.verification_code = request.args['code'] - self.user = self.actual_get_user(code_prefix, self.verification_code) - - def reencode_standalone(self): - hmac = actual_hmac_creation(self.verification_code) - return self.verification_code + ":" + hmac - - @staticmethod - def actual_get_user(code_prefix, verification_code): - data = Redis.get(code_prefix + ":" + verification_code) - logger.debug("in get_coded_user, data is:", data) - data = json.loads(data) - logger.debug("data is:", data) - return model.User.query.get(data['id']) - -# @app.route("/n/login", methods=('GET', 'POST')) - - -def login(): - lu = LoginUser() - login_type = request.args.get("type") - if login_type: - uid = request.args.get("uid") - return lu.oauth2_login(login_type, uid) - else: - return lu.standard_login() - -# @app.route("/n/login/github_oauth2", methods=('GET', 'POST')) - - -def github_oauth2(): - from utility.tools import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET - code = request.args.get("code") - data = { - "client_id": GITHUB_CLIENT_ID, - "client_secret": GITHUB_CLIENT_SECRET, - "code": code - } - result = requests.post( - "https://github.com/login/oauth/access_token", json=data) - result_dict = {arr[0]: arr[1] for arr in [tok.split( - "=") for tok in [token.encode("utf-8") for token in result.text.split("&")]]} - - github_user = get_github_user_details(result_dict["access_token"]) - - user_details = get_user_by_unique_column("github_id", github_user["id"]) - if user_details == None: - user_details = { - "user_id": str(uuid.uuid4()), "name": github_user["name"].encode("utf-8"), "github_id": github_user["id"], "user_url": github_user["html_url"].encode("utf-8"), "login_type": "github", "organization": "", "active": 1, "confirmed": 1 - } - save_user(user_details, user_details["user_id"]) - - url = "/n/login?type=github&uid=" + user_details["user_id"] - return redirect(url) - -# @app.route("/n/login/orcid_oauth2", methods=('GET', 'POST')) - - -def orcid_oauth2(): - from uuid import uuid4 - from utility.tools import ORCID_CLIENT_ID, ORCID_CLIENT_SECRET, ORCID_TOKEN_URL, ORCID_AUTH_URL - code = request.args.get("code") - error = request.args.get("error") - url = "/n/login" - if code: - data = { - "client_id": ORCID_CLIENT_ID, "client_secret": ORCID_CLIENT_SECRET, "grant_type": "authorization_code", "code": code - } - result = requests.post(ORCID_TOKEN_URL, data=data) - result_dict = json.loads(result.text.encode("utf-8")) - - user_details = get_user_by_unique_column("orcid", result_dict["orcid"]) - if user_details == None: - user_details = { - "user_id": str(uuid4()), "name": result_dict["name"], "orcid": result_dict["orcid"], "user_url": "%s/%s" % ( - "/".join(ORCID_AUTH_URL.split("/")[:-2]), - result_dict["orcid"]), "login_type": "orcid", "organization": "", "active": 1, "confirmed": 1 - } - save_user(user_details, user_details["user_id"]) - - url = "/n/login?type=orcid&uid=" + user_details["user_id"] - else: - flash("There was an error getting code from ORCID") - return redirect(url) - - -def get_github_user_details(access_token): - from utility.tools import GITHUB_API_URL - result = requests.get(GITHUB_API_URL, params={ - "access_token": access_token}) - return result.json() - - -class LoginUser: - remember_time = 60 * 60 * 24 * 30 # One month in seconds - - def __init__(self): - self.remember_me = False - self.logged_in = False - - def oauth2_login(self, login_type, user_id): - """Login via an OAuth2 provider""" - - user_details = get_user_by_unique_column("user_id", user_id) - if user_details: - user = model.User() - user.id = user_details["user_id"] if user_details["user_id"] == None else "N/A" - user.full_name = user_details["name"] - user.login_type = user_details["login_type"] - return self.actual_login(user) - else: - flash("Error logging in via OAuth2") - return make_response(redirect(url_for('login'))) - - def standard_login(self): - """Login through the normal form""" - params = request.form if request.form else request.args - logger.debug("in login params are:", params) - - if not params: - from utility.tools import GITHUB_AUTH_URL, GITHUB_CLIENT_ID, ORCID_AUTH_URL, ORCID_CLIENT_ID - external_login = {} - if GITHUB_AUTH_URL and GITHUB_CLIENT_ID != 'UNKNOWN': - external_login["github"] = GITHUB_AUTH_URL - if ORCID_AUTH_URL and ORCID_CLIENT_ID != 'UNKNOWN': - external_login["orcid"] = ORCID_AUTH_URL - - return render_template( - "new_security/login_user.html", external_login=external_login, redis_is_available=is_redis_available()) - else: - user_details = get_user_by_unique_column( - "email_address", params["email_address"]) - #user_details = get_user_by_unique_column(es, "email_address", params["email_address"]) - user = None - valid = None - if user_details: - user = model.User() - for key in user_details: - user.__dict__[key] = user_details[key] - valid = False - - submitted_password = params['password'] - pwfields = Struct(json.loads(user.password)) - encrypted = Password( - submitted_password, - pwfields.salt, - pwfields.iterations, - pwfields.keylength, - pwfields.hashfunc) - logger.debug("\n\nComparing:\n{}\n{}\n".format( - encrypted.password, pwfields.password)) - valid = pbkdf2.safe_str_cmp( - encrypted.password, pwfields.password) - logger.debug("valid is:", valid) - - if valid and not user.confirmed: - VerificationEmail(user) - return render_template("new_security/verification_still_needed.html", - subject=VerificationEmail.subject) - if valid: - if params.get('remember'): - logger.debug("I will remember you") - self.remember_me = True - - if 'import_collections' in params: - import_col = "true" - else: - import_col = "false" - - # g.cookie_session.import_traits_to_user() - - self.logged_in = True - - return self.actual_login(user, import_collections=import_col) - - else: - if user: - self.unsuccessful_login(user) - flash("Invalid email-address or password. Please try again.", - "alert-danger") - response = make_response(redirect(url_for('login'))) - - return response - - def actual_login(self, user, assumed_by=None, import_collections=None): - """The meat of the logging in process""" - session_id_signed = self.successful_login(user, assumed_by) - flash("Thank you for logging in {}.".format( - user.full_name), "alert-success") - response = make_response( - redirect(url_for('index_page', import_collections=import_collections))) - if self.remember_me: - max_age = self.remember_time - else: - max_age = None - - response.set_cookie(UserSession.cookie_name, - session_id_signed, max_age=max_age) - return response - - def successful_login(self, user, assumed_by=None): - login_rec = model.Login(user) - login_rec.successful = True - login_rec.session_id = str(uuid.uuid4()) - login_rec.assumed_by = assumed_by - #session_id = "session_id:{}".format(login_rec.session_id) - session_id_signature = actual_hmac_creation(login_rec.session_id) - session_id_signed = login_rec.session_id + ":" + session_id_signature - logger.debug("session_id_signed:", session_id_signed) - - if not user.id: - user.id = '' - - session = dict(login_time=time.time(), - user_id=user.id, - user_name=user.full_name, - user_email_address=user.email_address) - - key = UserSession.cookie_name + ":" + login_rec.session_id - logger.debug("Key when signing:", key) - Redis.hmset(key, session) - if self.remember_me: - expire_time = self.remember_time - else: - expire_time = THREE_DAYS - Redis.expire(key, expire_time) - - return session_id_signed - - def unsuccessful_login(self, user): - login_rec = model.Login(user) - login_rec.successful = False - db_session.add(login_rec) - db_session.commit() - -# @app.route("/n/logout") - - -def logout(): - logger.debug("Logging out...") - UserSession().delete_session() - flash("You are now logged out. We hope you come back soon!") - response = make_response(redirect(url_for('index_page'))) - # Delete the cookie - response.set_cookie(UserSession.cookie_name, '', expires=0) - return response - - -# @app.route("/n/forgot_password", methods=['GET']) -def forgot_password(): - """Entry point for forgotten password""" - print("ARGS: ", request.args) - errors = {"no-email": request.args.get("no-email")} - print("ERRORS: ", errors) - return render_template("new_security/forgot_password.html", errors=errors) - -# @app.route("/n/forgot_password_submit", methods=('POST',)) - - -def forgot_password_submit(): - """When a forgotten password form is submitted we get here""" - params = request.form - email_address = params['email_address'] - next_page = None - if email_address != "": - logger.debug("Wants to send password E-mail to ", email_address) - user_details = get_user_by_unique_column( - "email_address", email_address) - if user_details: - ForgotPasswordEmail(user_details["email_address"]) - return render_template("new_security/forgot_password_step2.html", - subject=ForgotPasswordEmail.subject) - else: - flash("The e-mail entered is not associated with an account.", - "alert-danger") - return redirect(url_for("forgot_password")) - - else: - flash("You MUST provide an email", "alert-danger") - return redirect(url_for("forgot_password")) - - -@app.errorhandler(401) -def unauthorized(error): - return redirect(url_for('login')) - - -def is_redis_available(): - try: - Redis.ping() - except: - return False - return True - -### -# ZS: The following 6 functions require the old MySQL User accounts; I'm leaving them commented out just in case we decide to reimplement them using ElasticSearch -### -# def super_only(): -# try: -# superuser = g.user_session.user_ob.superuser -# except AttributeError: -# superuser = False -# if not superuser: -# flash("You must be a superuser to access that page.", "alert-error") -# abort(401) - -# @app.route("/manage/users") -# def manage_users(): -# super_only() -# template_vars = UsersManager() -# return render_template("admin/user_manager.html", **template_vars.__dict__) - -# @app.route("/manage/user") -# def manage_user(): -# super_only() -# template_vars = UserManager(request.args) -# return render_template("admin/ind_user_manager.html", **template_vars.__dict__) - -# @app.route("/manage/groups") -# def manage_groups(): -# super_only() -# template_vars = GroupsManager(request.args) -# return render_template("admin/group_manager.html", **template_vars.__dict__) - -# @app.route("/manage/make_superuser") -# def make_superuser(): -# super_only() -# params = request.args -# user_id = params['user_id'] -# user = model.User.query.get(user_id) -# superuser_info = basic_info() -# superuser_info['crowned_by'] = g.user_session.user_id -# user.superuser = json.dumps(superuser_info, sort_keys=True) -# db_session.commit() -# flash("We've made {} a superuser!".format(user.name_and_org)) -# return redirect(url_for("manage_users")) - -# @app.route("/manage/assume_identity") -# def assume_identity(): -# super_only() -# params = request.args -# user_id = params['user_id'] -# user = model.User.query.get(user_id) -# assumed_by = g.user_session.user_id -# return LoginUser().actual_login(user, assumed_by=assumed_by) - - -# @app.route("/n/register", methods=('GET', 'POST')) -def register(): - params = None - errors = None - - params = request.form if request.form else request.args - params = params.to_dict(flat=True) - - if params: - logger.debug("Attempting to register the user...") - result = RegisterUser(params) - errors = result.errors - - if len(errors) == 0: - flash( - "Registration successful. You may login with your new account", "alert-info") - return redirect(url_for("login")) - - return render_template("new_security/register_user.html", values=params, errors=errors) - - -################################# Sign and unsign ##################################### - -def url_for_hmac(endpoint, **values): - """Like url_for but adds an hmac at the end to insure the url hasn't been tampered with""" - - url = url_for(endpoint, **values) - - hm = actual_hmac_creation(url) - if '?' in url: - combiner = "&" - else: - combiner = "?" - return url + combiner + "hm=" + hm - - -def data_hmac(stringy): - """Takes arbitray data string and appends :hmac so we know data hasn't been tampered with""" - return stringy + ":" + actual_hmac_creation(stringy) - - -def verify_url_hmac(url): - """Pass in a url that was created with url_hmac and this assures it hasn't been tampered with""" - logger.debug("url passed in to verify is:", url) - # Verify parts are correct at the end - we expect to see &hm= or ?hm= followed by an hmac - assert url[-23:-20] == "hm=", "Unexpected url (stage 1)" - assert url[-24] in ["?", "&"], "Unexpected url (stage 2)" - hmac = url[-20:] - url = url[:-24] # Url without any of the hmac stuff - - #logger.debug("before urlsplit, url is:", url) - #url = divide_up_url(url)[1] - #logger.debug("after urlsplit, url is:", url) - - hm = actual_hmac_creation(url) - - assert hm == hmac, "Unexpected url (stage 3)" - - -def actual_hmac_creation(stringy): - """Helper function to create the actual hmac""" - - secret = app.config['SECRET_HMAC_CODE'] - - hmaced = hmac.new(secret, stringy, hashlib.sha1) - hm = hmaced.hexdigest() - # "Conventional wisdom is that you don't lose much in terms of security if you throw away up to half of the output." - # http://www.w3.org/QA/2009/07/hmac_truncation_in_xml_signatu.html - hm = hm[:20] - return hm - - -app.jinja_env.globals.update(url_for_hmac=url_for_hmac, - data_hmac=data_hmac) - -####################################################################################### - -# def send_email(to, subject, body): -# msg = json.dumps(dict(From="no-reply@genenetwork.org", -# To=to, -# Subject=subject, -# Body=body)) -# Redis.rpush("mail_queue", msg) - - -def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): - """Send an E-mail through SMTP_CONNECT host. If SMTP_USERNAME is not - 'UNKNOWN' TLS is used - - """ - if SMTP_USERNAME == 'UNKNOWN': - logger.debug("SMTP: connecting with host " + SMTP_CONNECT) - server = SMTP(SMTP_CONNECT) - server.sendmail(fromaddr, toaddr, msg) - else: - logger.debug("SMTP: connecting TLS with host " + SMTP_CONNECT) - server = SMTP(SMTP_CONNECT) - server.starttls() - logger.debug("SMTP: login with user " + SMTP_USERNAME) - server.login(SMTP_USERNAME, SMTP_PASSWORD) - logger.debug("SMTP: " + fromaddr) - logger.debug("SMTP: " + toaddr) - logger.debug("SMTP: " + msg) - server.sendmail(fromaddr, toaddr, msg) - server.quit() - logger.info("Successfully sent email to " + toaddr) - - -class GroupsManager: - def __init__(self, kw): - self.datasets = create_datasets_list() - - -class RolesManager: - def __init__(self): - self.roles = model.Role.query.all() - logger.debug("Roles are:", self.roles) -- cgit v1.2.3 From 5c4da47e6f4c7c61666b80dc5133aa584db38e5d Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 5 May 2021 16:26:09 +0300 Subject: Delete model.py and it's references * wqflask/wqflask/database.py (init_db): Remove references to "wqflask.model" * wqflask/wqflask/model.py: Delete it. --- wqflask/wqflask/database.py | 5 -- wqflask/wqflask/model.py | 182 -------------------------------------------- 2 files changed, 187 deletions(-) delete mode 100644 wqflask/wqflask/model.py diff --git a/wqflask/wqflask/database.py b/wqflask/wqflask/database.py index 42fa1594..38c37d2c 100644 --- a/wqflask/wqflask/database.py +++ b/wqflask/wqflask/database.py @@ -16,11 +16,6 @@ Base.query = db_session.query_property() def init_db(): - # import all modules here that might define models so that - # they will be registered properly on the metadata. Otherwise - # you will have to import them first before calling init_db() - #import yourapplication.models - import wqflask.model Base.metadata.create_all(bind=engine) diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py deleted file mode 100644 index a222b87c..00000000 --- a/wqflask/wqflask/model.py +++ /dev/null @@ -1,182 +0,0 @@ -import uuid -import datetime - -import simplejson as json - -from flask import request - -from wqflask import app - -import sqlalchemy -from sqlalchemy import (Column, ForeignKey, Unicode, Boolean, DateTime, - Text, Index) -from sqlalchemy.orm import relationship - -from wqflask.database import Base, init_db - - -class User(Base): - __tablename__ = "user" - id = Column(Unicode(36), primary_key=True, - default=lambda: str(uuid.uuid4())) - email_address = Column(Unicode(50), unique=True, nullable=False) - - # Todo: Turn on strict mode for Mysql - password = Column(Text, nullable=False) - - full_name = Column(Unicode(50)) - organization = Column(Unicode(50)) - - active = Column(Boolean(), nullable=False, default=True) - - # json detailing when they were registered, etc. - registration_info = Column(Text) - - confirmed = Column(Text) # json detailing when they confirmed, etc. - - # json detailing when they became a superuser, otherwise empty - superuser = Column(Text) - # if not superuser - - logins = relationship("Login", - order_by="desc(Login.timestamp)", - lazy='dynamic', # Necessary for filter in login_count - foreign_keys="Login.user", - ) - - user_collections = relationship("UserCollection", - order_by="asc(UserCollection.name)", - lazy='dynamic', - ) - - def display_num_collections(self): - """ - Returns the number of collections or a blank string if there are zero. - - - Because this is so unimportant...we wrap the whole thing in a try/expect...last thing we - want is a webpage not to be displayed because of an error here - - Importand TODO: use redis to cache this, don't want to be constantly computing it - - """ - try: - num = len(list(self.user_collections)) - return display_collapsible(num) - except Exception as why: - print("Couldn't display_num_collections:", why) - return "" - - def get_collection_by_name(self, collection_name): - try: - collect = self.user_collections.filter_by( - name=collection_name).first() - except sqlalchemy.orm.exc.NoResultFound: - collect = None - return collect - - @property - def name_and_org(self): - """Nice shortcut for printing out who the user is""" - if self.organization: - return "{} from {}".format(self.full_name, self.organization) - else: - return self.full_name - - @property - def login_count(self): - return self.logins.filter_by(successful=True).count() - - @property - def confirmed_at(self): - if self.confirmed: - confirmed_info = json.loads(self.confirmed) - return confirmed_info['timestamp'] - else: - return None - - @property - def superuser_info(self): - if self.superuser: - return json.loads(self.superuser) - else: - return None - - @property - def crowner(self): - """If made superuser, returns object of person who did the crowning""" - if self.superuser: - superuser_info = json.loads(self.superuser) - crowner = User.query.get(superuser_info['crowned_by']) - return crowner - else: - return None - - @property - def most_recent_login(self): - try: - return self.logins[0] - except IndexError: - return None - - -class Login(Base): - __tablename__ = "login" - id = Column(Unicode(36), primary_key=True, - default=lambda: str(uuid.uuid4())) - user = Column(Unicode(36), ForeignKey('user.id')) - timestamp = Column(DateTime(), default=lambda: datetime.datetime.utcnow()) - ip_address = Column(Unicode(39)) - # False if wrong password was entered - successful = Column(Boolean(), nullable=False) - # Set only if successfully logged in, otherwise should be blank - session_id = Column(Text) - - # Set to user who assumes identity if this was a login for debugging purposes by a superuser - assumed_by = Column(Unicode(36), ForeignKey('user.id')) - - def __init__(self, user): - self.user = user.id - self.ip_address = request.remote_addr - -################################################################################################## - - -class UserCollection(Base): - __tablename__ = "user_collection" - id = Column(Unicode(36), primary_key=True, - default=lambda: str(uuid.uuid4())) - user = Column(Unicode(36), ForeignKey('user.id')) - - # I'd prefer this to not have a length, but for the index below it needs one - name = Column(Unicode(50)) - created_timestamp = Column( - DateTime(), default=lambda: datetime.datetime.utcnow()) - changed_timestamp = Column( - DateTime(), default=lambda: datetime.datetime.utcnow()) - members = Column(Text) # We're going to store them as a json list - - # This index ensures a user doesn't have more than one collection with the same name - __table_args__ = (Index('usercollection_index', "user", "name"), ) - - @property - def num_members(self): - try: - return len(json.loads(self.members)) - except: - return 0 - - def members_as_set(self): - return set(json.loads(self.members)) - - -def display_collapsible(number): - if number: - return number - else: - return "" - - -def user_uuid(): - """Unique cookie for a user""" - user_uuid = request.cookies.get('user_uuid') -- cgit v1.2.3 From d60d30ca54106ff052d6953917f29465ae6e764b Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 6 May 2021 19:45:37 +0000 Subject: Changed the way metadata is displayed on the mapping loading page --- wqflask/wqflask/templates/loading.html | 98 ++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 47 deletions(-) diff --git a/wqflask/wqflask/templates/loading.html b/wqflask/wqflask/templates/loading.html index 124f0608..95cecf76 100644 --- a/wqflask/wqflask/templates/loading.html +++ b/wqflask/wqflask/templates/loading.html @@ -7,56 +7,60 @@ {% endfor %}
    -
    +
    - {% if start_vars.tool_used == "Mapping" %} -

    Computing the Maps

    -
    - n = {{ start_vars.n_samples }} -
    - Method = {% if start_vars.method == "gemma" %}GEMMA {% if start_vars.use_loco == "True" %}using LOCO {% endif %}{% else %}{{ start_vars.method }}{% endif %} -
    - {% set genofile_desc = start_vars.genofile.split(":")[1] %} - Genotype File = {{ genofile_desc }} - {% if start_vars.num_perm | int > 0 %} -
    - # Permutations = {{ start_vars.num_perm }} - {% endif %} - {% if start_vars.num_bootstrap | int > 0 %} -
    - # Bootstrap = {{ start_vars.num_bootstrap }} - {% endif %} - {% if start_vars.transform != "" %} -
    - transform = {{ start_vars.transform }} - {% endif %} - {% if start_vars.maf != "" and start_vars.method != "reaper" %} -
    - MAF >= {{ start_vars.maf }} - {% endif %} - {% if start_vars.covariates != "" and start_vars.method != "reaper" %} -
    - {% set covariate_list = start_vars.covariates.split(",") %} - Trait Covariates: {% for covariate in covariate_list %}{% set this_covariate = covariate.split(":")[0] %}{{ this_covariate }}{% if not loop.last %}, {% endif %}{% endfor %} - {% endif %} - {% if start_vars.control_marker != "" and start_vars.do_control == "true" and start_vars.method != "gemma" %} -
    - Marker Covariate: {{ start_vars.control_marker }} - {% endif %} - {% else %} -

    Loading {{ start_vars.tool_used }} Results...

    - {% endif %} -

    - -
    - -
    -- cgit v1.2.3 From 6dc0c82ed93978be67357e0e84b6e5b3531054e6 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 6 May 2021 19:45:57 +0000 Subject: Include group/species in form parameters on trait page so they can be displayed on mapping loading page --- wqflask/wqflask/show_trait/show_trait.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index fcebbc4d..b6fcbcb8 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -263,6 +263,9 @@ class ShowTrait: hddn['temp_trait'] = True hddn['group'] = self.temp_group hddn['species'] = self.temp_species + else: + hddn['group'] = self.dataset.group.name + hddn['species'] = self.dataset.group.species hddn['use_outliers'] = False hddn['method'] = "gemma" hddn['selected_chr'] = -1 -- cgit v1.2.3 From 70735035fe7b59bae13fe955e4f32595055b3940 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 6 May 2021 19:54:48 +0000 Subject: Include group and species in form parameters for mapping results page, so they can be included in the loading page if the map is reloaded --- wqflask/wqflask/templates/mapping_results.html | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index f054506c..73d7501b 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -17,8 +17,9 @@ {% if temp_trait is defined %} - {% endif %} + + -- cgit v1.2.3 From 3c430082b767a29c3e35cb03e68c1b22373ad353 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 6 May 2021 20:02:35 +0000 Subject: Fixed vertical position of loading text so that it should be positioned in the center vertically --- wqflask/wqflask/templates/loading.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/loading.html b/wqflask/wqflask/templates/loading.html index 95cecf76..d384a087 100644 --- a/wqflask/wqflask/templates/loading.html +++ b/wqflask/wqflask/templates/loading.html @@ -8,7 +8,7 @@
    -
    +
    {% if start_vars.tool_used == "Mapping" %}

    Computing the Maps


    -- cgit v1.2.3 From 2444d60a93ef7c9900ed9a52877bff0ee08fbfb6 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 6 May 2021 22:54:05 +0000 Subject: Because each permutations value is returned as an array, need to just take the first item to convert to numbers --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index cd43577e..b9cb99cc 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -385,7 +385,7 @@ def process_pair_scan_results(result): def process_rqtl_perm_results(num_perm, results): perm_vals = [] for item in results: - perm_vals.append(item) + perm_vals.append(item[0]) perm_output = perm_vals suggestive = np.percentile(np.array(perm_vals), 67) -- cgit v1.2.3 From cec8c8870f12a52ac20ffcc2f4a8a05003a13a9f Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 6 May 2021 23:14:22 +0000 Subject: Used a list comprehension for perm_vals as mentioned in Bonface's comment --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 072e4530..06eceada 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -431,9 +431,7 @@ def process_pair_scan_results(result): def process_rqtl_perm_results(num_perm, results): - perm_vals = [] - for item in results: - perm_vals.append(item[0]) + perm_vals = item[0] for item in results perm_output = perm_vals suggestive = np.percentile(np.array(perm_vals), 67) -- cgit v1.2.3 From a2627777d51b969869d8647624ce008fc9454c7b Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 6 May 2021 23:18:08 +0000 Subject: Forgot to add brackets around list comprehension, so added them --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 06eceada..1c8477bf 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -431,7 +431,7 @@ def process_pair_scan_results(result): def process_rqtl_perm_results(num_perm, results): - perm_vals = item[0] for item in results + perm_vals = [item[0] for item in results] perm_output = perm_vals suggestive = np.percentile(np.array(perm_vals), 67) -- cgit v1.2.3 From 3e34d0a5f964bb7931cccdd58b7a360187d35a4d Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Thu, 6 May 2021 09:32:11 +0300 Subject: add blogs blueprint --- wqflask/wqflask/__init__.py | 2 ++ wqflask/wqflask/markdown_routes.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/wqflask/wqflask/__init__.py b/wqflask/wqflask/__init__.py index a2bf3085..bf5e094b 100644 --- a/wqflask/wqflask/__init__.py +++ b/wqflask/wqflask/__init__.py @@ -12,6 +12,7 @@ from wqflask.markdown_routes import links_blueprint from wqflask.markdown_routes import policies_blueprint from wqflask.markdown_routes import environments_blueprint from wqflask.markdown_routes import facilities_blueprint +from wqflask.markdown_routes import blogs_blueprint app = Flask(__name__) @@ -29,6 +30,7 @@ app.register_blueprint(links_blueprint, url_prefix="/links") app.register_blueprint(policies_blueprint, url_prefix="/policies") app.register_blueprint(environments_blueprint, url_prefix="/environments") app.register_blueprint(facilities_blueprint, url_prefix="/facilities") +app.register_blueprint(blogs_blueprint, url_prefix="/blogs") @app.before_request diff --git a/wqflask/wqflask/markdown_routes.py b/wqflask/wqflask/markdown_routes.py index c27ff143..b9a6f8ce 100644 --- a/wqflask/wqflask/markdown_routes.py +++ b/wqflask/wqflask/markdown_routes.py @@ -20,6 +20,8 @@ links_blueprint = Blueprint("links_blueprint", __name__) policies_blueprint = Blueprint("policies_blueprint", __name__) facilities_blueprint = Blueprint("facilities_blueprint", __name__) +blogs_blueprint = Blueprint("blogs_blueprint", __name__) + def render_markdown(file_name, is_remote_file=True): """Try to fetch the file name from Github and if that fails, try to @@ -124,3 +126,36 @@ def policies(): @facilities_blueprint.route("/") def facilities(): return render_template("facilities.html", rendered_markdown=render_markdown("general/help/facilities.md")), 200 + + +@blogs_blueprint.route("/") +def display_blog(blog_title): + # should use the blog title path + + return render_template("blogs.html", rendered_markdown=render_markdown("blog/2021/proteome/Wang_WIlliams_Rat_Brain_Proteome_For_Blog.md")) + + +@blogs_blueprint.route("/") +def blogs_list(): + + # should fetch this from github + + blogs = {"2021": [ + { + "title": "proteome", + "subtitle": "Wang_WIlliams_Rat_Brain_Proteome_For_Blog" + }, + { + "title":"xxx", + "subtitle":"blog 2" + } + ], + "2020": [ + { + "title": "other", + "subtitle": "other" + } + ] + } + + return render_template("blogs_list.html", blogs=blogs) -- cgit v1.2.3 From 2e2e4601f4cebc767854e66af2d2ca8c3e8123c9 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Thu, 6 May 2021 09:32:35 +0300 Subject: add template for blogs --- wqflask/wqflask/templates/base.html | 1 + wqflask/wqflask/templates/blogs.html | 24 ++++++++++++ wqflask/wqflask/templates/blogs_list.html | 61 +++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+) create mode 100644 wqflask/wqflask/templates/blogs.html create mode 100644 wqflask/wqflask/templates/blogs_list.html diff --git a/wqflask/wqflask/templates/base.html b/wqflask/wqflask/templates/base.html index e6f22deb..ddb1d272 100644 --- a/wqflask/wqflask/templates/base.html +++ b/wqflask/wqflask/templates/base.html @@ -69,6 +69,7 @@
    + {% else %} +
    + +
    + +
    +
    {% endif %}
    -- cgit v1.2.3 From 582893c0cc87f87e5dfc7fe0209b2e4a3f85d4a4 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 25 Aug 2021 18:02:31 +0000 Subject: Fixed issue in get_diff_of_vals that caused the diff to be calculated wrong (due to one set of values being rounded to 3 digits and the other not --- wqflask/wqflask/show_trait/show_trait.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 80f5d117..3f93bae0 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -830,11 +830,11 @@ def get_diff_of_vals(new_vals: Dict, trait_id: str) -> Dict: diff_dict = {} for sample in shared_samples: try: - new_val = float(new_vals[sample]) + new_val = round(float(new_vals[sample]), 3) except: new_val = "x" try: - old_val = float(old_vals[sample]) + old_val = round(float(old_vals[sample]), 3) except: old_val = "x" -- cgit v1.2.3 From cc7c392198f73ac8e8419be1367ce566d85a873c Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 25 Aug 2021 19:05:07 +0000 Subject: Fix issue where correlation results weren't included parents/f1s --- wqflask/wqflask/correlation/correlation_gn3_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index a78cb0b9..d0d4bcba 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -148,7 +148,7 @@ def lit_for_trait_list(corr_results, this_dataset, this_trait): def fetch_sample_data(start_vars, this_trait, this_dataset, target_dataset): sample_data = process_samples( - start_vars, this_dataset.group.samplelist) + start_vars, this_dataset.group.all_samples_ordered()) if target_dataset.type == "ProbeSet": target_dataset.get_probeset_data(list(sample_data.keys())) -- cgit v1.2.3 From 5ebe36d5ff685e6b663b14c130606aa60b0123c2 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 3 Sep 2021 14:58:05 +0000 Subject: Fix issue where values written to phenotype file for R/qtl sometimes had trailing decimal values by grounding to 3 places past the decimal --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 09afb8d1..cd578870 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -89,7 +89,7 @@ def write_phenotype_file(trait_name: str, for i, sample in enumerate(samples): this_row = [sample] if vals[i] != "x": - this_row.append(vals[i]) + this_row.append(str(round(float(vals[i]), 3))) else: this_row.append("NA") for cofactor in cofactor_data: @@ -126,7 +126,7 @@ def cofactors_to_dict(cofactors: str, dataset_ob, samples) -> Dict: sample_data = trait_ob.data for index, sample in enumerate(samples): if sample in sample_data: - sample_value = sample_data[sample].value + sample_value = str(round(float(sample_data[sample].value), 3)) cofactor_dict[cofactor_name].append(sample_value) else: cofactor_dict[cofactor_name].append("NA") -- cgit v1.2.3 From 8af212bc7eac5b39e8e6838df446a4003633a55e Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 3 Sep 2021 14:59:14 +0000 Subject: Fix issue that caused javascript to not work on the R/qtl mapping result page when permutations weren't used (because it wrongly expected the permutation histogram to always exist) --- wqflask/wqflask/templates/mapping_results.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index 81eb1ba1..d446745d 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -529,7 +529,7 @@ }); {% endif %} - {% if mapping_method != "gemma" and mapping_method != "plink" %} + {% if mapping_method != "gemma" and mapping_method != "plink" and nperm > 0 and permChecked == "ON" %} $('#download_perm').click(function(){ perm_info_dict = { perm_data: js_data.perm_results, -- cgit v1.2.3 From f4297bc9e4c49f58e0759d02d3e7aea392ba5615 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 3 Sep 2021 15:01:34 +0000 Subject: Allow categorical_var_list to be passed as a template variable --- wqflask/wqflask/show_trait/show_trait.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 3f93bae0..c4d1ae1c 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -178,11 +178,11 @@ class ShowTrait: self.sample_group_types['samples_primary'] = self.dataset.group.name sample_lists = [group.sample_list for group in self.sample_groups] - categorical_var_list = [] + self.categorical_var_list = [] self.numerical_var_list = [] if not self.temp_trait: # ZS: Only using first samplelist, since I think mapping only uses those samples - categorical_var_list = get_categorical_variables( + self.categorical_var_list = get_categorical_variables( self.this_trait, self.sample_groups[0]) self.numerical_var_list = get_numerical_variables( self.this_trait, self.sample_groups[0]) @@ -287,8 +287,8 @@ class ShowTrait: hddn['study_samplelists'] = json.dumps(study_samplelist_json) hddn['num_perm'] = 0 hddn['categorical_vars'] = "" - if categorical_var_list: - hddn['categorical_vars'] = ",".join(categorical_var_list) + if self.categorical_var_list: + hddn['categorical_vars'] = ",".join(self.categorical_var_list) hddn['manhattan_plot'] = "" hddn['control_marker'] = "" if not self.temp_trait: @@ -323,7 +323,7 @@ class ShowTrait: has_num_cases=self.has_num_cases, attributes=self.sample_groups[0].attributes, categorical_attr_exists=self.categorical_attr_exists, - categorical_vars=",".join(categorical_var_list), + categorical_vars=",".join(self.categorical_var_list), num_values=self.num_values, qnorm_values=self.qnorm_vals, zscore_values=self.z_scores, -- cgit v1.2.3 From f81a5629ab6ea7e39893af00e59c3ac6d79d7892 Mon Sep 17 00:00:00 2001 From: zsloan Date: Sun, 5 Sep 2021 17:03:51 +0000 Subject: Fixed issue that caused sample data to not be fetched correctly; there's something wrong with the 'get_probeset_data' function (not sure why this function exists) --- wqflask/wqflask/correlation/correlation_gn3_api.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/wqflask/wqflask/correlation/correlation_gn3_api.py b/wqflask/wqflask/correlation/correlation_gn3_api.py index d0d4bcba..a18bceaf 100644 --- a/wqflask/wqflask/correlation/correlation_gn3_api.py +++ b/wqflask/wqflask/correlation/correlation_gn3_api.py @@ -150,10 +150,7 @@ def fetch_sample_data(start_vars, this_trait, this_dataset, target_dataset): sample_data = process_samples( start_vars, this_dataset.group.all_samples_ordered()) - if target_dataset.type == "ProbeSet": - target_dataset.get_probeset_data(list(sample_data.keys())) - else: - target_dataset.get_trait_data(list(sample_data.keys())) + target_dataset.get_trait_data(list(sample_data.keys())) this_trait = retrieve_sample_data(this_trait, this_dataset) this_trait_data = { "trait_sample_data": sample_data, -- cgit v1.2.3 From 25a5fe8027a00a64513855630a4365480cf567d7 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 7 Sep 2021 18:21:34 +0000 Subject: Add timer to loading page to track how long the process has been running --- wqflask/wqflask/templates/loading.html | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/wqflask/wqflask/templates/loading.html b/wqflask/wqflask/templates/loading.html index 1edde31e..ccf810b0 100644 --- a/wqflask/wqflask/templates/loading.html +++ b/wqflask/wqflask/templates/loading.html @@ -12,6 +12,8 @@ {% if start_vars.tool_used == "Mapping" %}

    Computing the Maps


    + Time Elapsed: +
    Trait Metadata
    species = {{ start_vars.species[0] | upper }}{{ start_vars.species[1:] }} @@ -101,9 +103,6 @@ -- cgit v1.2.3 From d243e3a69b26d60709fe10ab0b70a0e1d53ba50d Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 7 Sep 2021 18:22:44 +0000 Subject: Add trait hash and datetime to mapping figure --- .../marker_regression/display_mapping_results.py | 68 ++++++++++++++-------- wqflask/wqflask/marker_regression/run_mapping.py | 1 + .../new/javascript/show_trait_mapping_tools.js | 10 ++-- wqflask/wqflask/templates/mapping_results.html | 1 + wqflask/wqflask/views.py | 1 + 5 files changed, 51 insertions(+), 30 deletions(-) diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 3986c441..5f5fe6a3 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -24,6 +24,7 @@ # # Last updated by Zach 12/14/2010 +import datetime import string from math import * from PIL import Image @@ -271,6 +272,7 @@ class DisplayMappingResults: # Needing for form submission when doing single chr # mapping or remapping after changing options self.sample_vals = start_vars['sample_vals'] + self.vals_hash= start_vars['vals_hash'] self.sample_vals_dict = json.loads(self.sample_vals) self.transform = start_vars['transform'] @@ -651,7 +653,7 @@ class DisplayMappingResults: btminfo.append( 'Mapping using genotype data as a trait will result in infinity LRS at one locus. In order to display the result properly, all LRSs higher than 100 are capped at 100.') - def plotIntMapping(self, canvas, offset=(80, 120, 90, 100), zoom=1, startMb=None, endMb=None, showLocusForm=""): + def plotIntMapping(self, canvas, offset=(80, 120, 110, 100), zoom=1, startMb=None, endMb=None, showLocusForm=""): im_drawer = ImageDraw.Draw(canvas) # calculating margins xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset @@ -661,7 +663,7 @@ class DisplayMappingResults: if self.legendChecked: yTopOffset += 10 if self.covariates != "" and self.controlLocus and self.doControl != "false": - yTopOffset += 20 + yTopOffset += 25 if len(self.transform) > 0: yTopOffset += 5 else: @@ -1195,43 +1197,47 @@ class DisplayMappingResults: dataset_label = "%s - %s" % (self.dataset.group.name, self.dataset.fullname) - string1 = 'Dataset: %s' % (dataset_label) + + current_datetime = datetime.datetime.now() + string1 = 'UTC Timestamp: %s' % (current_datetime.strftime("%b %d %Y %H:%M:%S")) + string2 = 'Dataset: %s' % (dataset_label) + string3 = 'Trait Hash: %s' % (self.vals_hash) if self.genofile_string == "": - string2 = 'Genotype File: %s.geno' % self.dataset.group.name + string4 = 'Genotype File: %s.geno' % self.dataset.group.name else: - string2 = 'Genotype File: %s' % self.genofile_string + string4 = 'Genotype File: %s' % self.genofile_string - string4 = '' + string6 = '' if self.mapping_method == "gemma" or self.mapping_method == "gemma_bimbam": if self.use_loco == "True": - string3 = 'Using GEMMA mapping method with LOCO and ' + string5 = 'Using GEMMA mapping method with LOCO and ' else: - string3 = 'Using GEMMA mapping method with ' + string5 = 'Using GEMMA mapping method with ' if self.covariates != "": - string3 += 'the cofactors below:' + string5 += 'the cofactors below:' cofactor_names = ", ".join( [covar.split(":")[0] for covar in self.covariates.split(",")]) - string4 = cofactor_names + string6 = cofactor_names else: - string3 += 'no cofactors' + string5 += 'no cofactors' elif self.mapping_method == "rqtl_plink" or self.mapping_method == "rqtl_geno": - string3 = 'Using R/qtl mapping method with ' + string5 = 'Using R/qtl mapping method with ' if self.covariates != "": - string3 += 'the cofactors below:' + string5 += 'the cofactors below:' cofactor_names = ", ".join( [covar.split(":")[0] for covar in self.covariates.split(",")]) - string4 = cofactor_names + string6 = cofactor_names elif self.controlLocus and self.doControl != "false": - string3 += '%s as control' % self.controlLocus + string5 += '%s as control' % self.controlLocus else: - string3 += 'no cofactors' + string5 += 'no cofactors' else: - string3 = 'Using Haldane mapping function with ' + string5 = 'Using Haldane mapping function with ' if self.controlLocus and self.doControl != "false": - string3 += '%s as control' % self.controlLocus + string5 += '%s as control' % self.controlLocus else: - string3 += 'no control for other QTLs' + string5 += 'no control for other QTLs' y_constant = 10 if self.this_trait.name: @@ -1260,7 +1266,9 @@ class DisplayMappingResults: d = 4 + max( im_drawer.textsize(identification, font=labelFont)[0], im_drawer.textsize(string1, font=labelFont)[0], - im_drawer.textsize(string2, font=labelFont)[0]) + im_drawer.textsize(string2, font=labelFont)[0], + im_drawer.textsize(string3, font=labelFont)[0], + im_drawer.textsize(string4, font=labelFont)[0]) im_drawer.text( text=identification, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, @@ -1269,7 +1277,9 @@ class DisplayMappingResults: else: d = 4 + max( im_drawer.textsize(string1, font=labelFont)[0], - im_drawer.textsize(string2, font=labelFont)[0]) + im_drawer.textsize(string2, font=labelFont)[0], + im_drawer.textsize(string3, font=labelFont)[0], + im_drawer.textsize(string4, font=labelFont)[0]) if len(self.transform) > 0: transform_text = "Transform - " @@ -1296,14 +1306,22 @@ class DisplayMappingResults: text=string2, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) y_constant += 15 - if string3 != '': + im_drawer.text( + text=string3, xy=(xLeftOffset, y_constant * fontZoom), + font=labelFont, fill=labelColor) + y_constant += 15 + im_drawer.text( + text=string4, xy=(xLeftOffset, y_constant * fontZoom), + font=labelFont, fill=labelColor) + y_constant += 15 + if string4 != '': im_drawer.text( - text=string3, xy=(xLeftOffset, y_constant * fontZoom), + text=string5, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) y_constant += 15 - if string4 != '': + if string5 != '': im_drawer.text( - text=string4, xy=(xLeftOffset, y_constant * fontZoom), + text=string6, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) def drawGeneBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb=None, endMb=None): diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index ebad7d36..2f90b475 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -75,6 +75,7 @@ class RunMapping: self.vals = [] self.samples = [] self.sample_vals = start_vars['sample_vals'] + self.vals_hash = start_vars['vals_hash'] sample_val_dict = json.loads(self.sample_vals) samples = sample_val_dict.keys() if (len(genofile_samplelist) != 0): diff --git a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js index 09e9d024..b75d658e 100644 --- a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js +++ b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js @@ -141,11 +141,11 @@ $('input[name=display_all]').change((function(_this) { })(this)); //ZS: This is a list of inputs to be passed to the loading page, since not all inputs on the trait page are relevant to mapping -var mapping_input_list = ['temp_uuid', 'trait_id', 'dataset', 'tool_used', 'form_url', 'method', 'transform', 'trimmed_markers', 'selected_chr', 'chromosomes', 'mapping_scale', 'sample_vals', - 'score_type', 'suggestive', 'significant', 'num_perm', 'permCheck', 'perm_output', 'perm_strata', 'categorical_vars', 'num_bootstrap', 'bootCheck', 'bootstrap_results', - 'LRSCheck', 'covariates', 'maf', 'use_loco', 'manhattan_plot', 'control_marker', 'do_control', 'genofile', - 'pair_scan', 'startMb', 'endMb', 'graphWidth', 'lrsMax', 'additiveCheck', 'showSNP', 'showGenes', 'viewLegend', 'haplotypeAnalystCheck', - 'mapmethod_rqtl_geno', 'mapmodel_rqtl_geno', 'temp_trait', 'group', 'species', 'reaper_version', 'primary_samples'] +var mapping_input_list = ['temp_uuid', 'trait_id', 'dataset', 'tool_used', 'form_url', 'method', 'transform', 'trimmed_markers', 'selected_chr', 'chromosomes', 'mapping_scale', + 'sample_vals', 'vals_hash', 'score_type', 'suggestive', 'significant', 'num_perm', 'permCheck', 'perm_output', 'perm_strata', 'categorical_vars', + 'num_bootstrap', 'bootCheck', 'bootstrap_results', 'LRSCheck', 'covariates', 'maf', 'use_loco', 'manhattan_plot', 'control_marker', + 'do_control', 'genofile', 'pair_scan', 'startMb', 'endMb', 'graphWidth', 'lrsMax', 'additiveCheck', 'showSNP', 'showGenes', 'viewLegend', + 'haplotypeAnalystCheck', 'mapmethod_rqtl_geno', 'mapmodel_rqtl_geno', 'temp_trait', 'group', 'species', 'reaper_version', 'primary_samples'] $(".rqtl-geno-tab, #rqtl_geno_compute").on("click", (function(_this) { return function() { diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index d446745d..162ae810 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -34,6 +34,7 @@ + diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 11a9380c..707b18e1 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -1051,6 +1051,7 @@ def mapping_results_page(): 'samples', 'vals', 'sample_vals', + 'vals_hash', 'first_run', 'output_files', 'geno_db_exists', -- cgit v1.2.3 From 1f2e32a91727abab77ecdf501fcc5040b17dfece Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 9 Sep 2021 17:07:06 +0000 Subject: Replaced trait name with trait display name in display_mapping_results so the group codes will be includes in phenotype IDs --- wqflask/wqflask/marker_regression/display_mapping_results.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 5f5fe6a3..e9ba7dff 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -1249,18 +1249,18 @@ class DisplayMappingResults: if self.this_trait.symbol: identification += "Trait: %s - %s" % ( - self.this_trait.name, self.this_trait.symbol) + self.this_trait.display_name, self.this_trait.symbol) elif self.dataset.type == "Publish": if self.this_trait.post_publication_abbreviation: identification += "Trait: %s - %s" % ( - self.this_trait.name, self.this_trait.post_publication_abbreviation) + self.this_trait.display_name, self.this_trait.post_publication_abbreviation) elif self.this_trait.pre_publication_abbreviation: identification += "Trait: %s - %s" % ( - self.this_trait.name, self.this_trait.pre_publication_abbreviation) + self.this_trait.display_name, self.this_trait.pre_publication_abbreviation) else: - identification += "Trait: %s" % (self.this_trait.name) + identification += "Trait: %s" % (self.this_trait.display_name) else: - identification += "Trait: %s" % (self.this_trait.name) + identification += "Trait: %s" % (self.this_trait.display_name) identification += " with %s samples" % (self.n_samples) d = 4 + max( -- cgit v1.2.3 From 5020158b3ab7cf14a5809af65d2c616d32714a22 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 9 Sep 2021 17:07:47 +0000 Subject: Change trait name to display name in the metadata at the top of the page (so group codes will be includes in phenotype IDs --- wqflask/wqflask/templates/mapping_results.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index 162ae810..7f865056 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -68,7 +68,7 @@

    Map Viewer: Whole Genome


    Population: {{ dataset.group.species|capitalize }} {{ dataset.group.name }}
    Database: {{ dataset.fullname }}
    - {% if dataset.type == "ProbeSet" %}Trait ID:{% else %}Record ID:{% endif %} {{ this_trait.name }}
    + {% if dataset.type == "ProbeSet" %}Trait ID:{% else %}Record ID:{% endif %} {{ this_trait.display_name }}
    {% if dataset.type == "ProbeSet" %} Gene Symbol: {{ this_trait.symbol }}
    Location: Chr {{ this_trait.chr }} @ {{ this_trait.mb }} Mb
    -- cgit v1.2.3 From f51569c80a4e00ad7dcd99ed9bf3ed6d9aaf4051 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 10 Sep 2021 18:54:44 +0000 Subject: Removed encoding, since it's apparently not needed since the Python 3 switchover (and was causing there to be no matches between user IDs and groups) --- wqflask/utility/redis_tools.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index ff125bd2..99295c67 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -135,10 +135,8 @@ def get_user_groups(user_id): for key in groups_list: try: group_ob = json.loads(groups_list[key]) - group_admins = set([this_admin.encode( - 'utf-8') if this_admin else None for this_admin in group_ob['admins']]) - group_members = set([this_member.encode( - 'utf-8') if this_member else None for this_member in group_ob['members']]) + group_admins = set([this_admin if this_admin else None for this_admin in group_ob['admins']]) + group_members = set([this_member if this_member else None for this_member in group_ob['members']]) if user_id in group_admins: admin_group_ids.append(group_ob['id']) elif user_id in group_members: -- cgit v1.2.3 From d76e1ec30195074cf8dfd0f8396285d31faa8984 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 10 Sep 2021 19:02:10 +0000 Subject: Fix issue with the way the template was checking if genofile_string was set; it was receiving an empty string, so the previous logic made it think there was a genofile string when there wasn't one --- wqflask/wqflask/templates/mapping_results.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index 7f865056..7a222d0c 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -73,7 +73,7 @@ Gene Symbol: {{ this_trait.symbol }}
    Location: Chr {{ this_trait.chr }} @ {{ this_trait.mb }} Mb
    {% endif %} - {% if genofile_string is defined %} + {% if genofile_string != "" %} Genotypes: {{ genofile_string.split(":")[1] }} {% endif %}
    -- cgit v1.2.3 From f414cd4109229546bfb8c56fea95b6729121f0b3 Mon Sep 17 00:00:00 2001 From: jgart Date: Thu, 9 Sep 2021 22:28:02 -0500 Subject: Add Jupyter Notebook Launcher to Tools dropdown menu --- wqflask/wqflask/templates/base.html | 1 + 1 file changed, 1 insertion(+) diff --git a/wqflask/wqflask/templates/base.html b/wqflask/wqflask/templates/base.html index 049ebe6d..14e6bc88 100644 --- a/wqflask/wqflask/templates/base.html +++ b/wqflask/wqflask/templates/base.html @@ -87,6 +87,7 @@
  • Systems Genetics PheWAS
  • Genome Browser
  • BXD Power Calculator
  • +
  • Jupyter Notebook Launcher
  • Interplanetary File System
  • -- cgit v1.2.3 From 38a8544553506651a263b0134ab5372c6a62c629 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 13 Sep 2021 18:58:10 +0000 Subject: Temporarily point the View in GN1 button to gn1-lily.genenetwork.org since some features aren't working on gn1.genenetwork.org yet --- wqflask/wqflask/templates/show_trait_details.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/show_trait_details.html b/wqflask/wqflask/templates/show_trait_details.html index 53e16aa0..36d3c15a 100644 --- a/wqflask/wqflask/templates/show_trait_details.html +++ b/wqflask/wqflask/templates/show_trait_details.html @@ -233,7 +233,7 @@ {% endif %} {% endif %} - + {% if admin_status == "owner" or admin_status == "edit-admins" or admin_status == "edit-access" %} {% if this_trait.dataset.type == 'Publish' %} -- cgit v1.2.3 From 6a94f7a3a4893f087af53c2d39685d9dcb4b733d Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 13 Sep 2021 19:41:19 +0000 Subject: Change Genotype File text to only show the meaningful genotype deescription --- wqflask/wqflask/marker_regression/display_mapping_results.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index e9ba7dff..a9ebeb64 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -1206,7 +1206,7 @@ class DisplayMappingResults: if self.genofile_string == "": string4 = 'Genotype File: %s.geno' % self.dataset.group.name else: - string4 = 'Genotype File: %s' % self.genofile_string + string4 = 'Genotype File: %s' % self.genofile_string.split(":")[1] string6 = '' if self.mapping_method == "gemma" or self.mapping_method == "gemma_bimbam": -- cgit v1.2.3 From 75132a5382d32d9332f743aca0e406ae6137d7e5 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 13 Sep 2021 19:49:05 +0000 Subject: Include some extra metadata in the mapping page details (in addition to the figure itself) --- wqflask/wqflask/marker_regression/display_mapping_results.py | 4 ++-- wqflask/wqflask/templates/mapping_results.html | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index a9ebeb64..77d6e2db 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -1198,8 +1198,8 @@ class DisplayMappingResults: self.dataset.fullname) - current_datetime = datetime.datetime.now() - string1 = 'UTC Timestamp: %s' % (current_datetime.strftime("%b %d %Y %H:%M:%S")) + self.current_datetime = datetime.datetime.now().strftime("%b %d %Y %H:%M:%S") + string1 = 'UTC Timestamp: %s' % (self.current_datetime) string2 = 'Dataset: %s' % (dataset_label) string3 = 'Trait Hash: %s' % (self.vals_hash) diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index 7a222d0c..f2d11e89 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -69,14 +69,15 @@ Population: {{ dataset.group.species|capitalize }} {{ dataset.group.name }}
    Database: {{ dataset.fullname }}
    {% if dataset.type == "ProbeSet" %}Trait ID:{% else %}Record ID:{% endif %} {{ this_trait.display_name }}
    + Trait Hash: {{ vals_hash }}
    {% if dataset.type == "ProbeSet" %} Gene Symbol: {{ this_trait.symbol }}
    Location: Chr {{ this_trait.chr }} @ {{ this_trait.mb }} Mb
    {% endif %} {% if genofile_string != "" %} - Genotypes: {{ genofile_string.split(":")[1] }} + Genotypes: {{ genofile_string.split(":")[1] }}
    {% endif %} -
    + Current Date/Time: {{ current_datetime }}

    Download Full Results
    -- cgit v1.2.3 From 2ef7cad06cc1dcde61c91d2eafef002d25cb194e Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 13 Sep 2021 19:58:43 +0000 Subject: Add trait name and trait hash to exported mapping results --- wqflask/wqflask/marker_regression/run_mapping.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 2f90b475..b101b948 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -422,8 +422,9 @@ class RunMapping: total_markers = len(self.qtl_results) with Bench("Exporting Results"): - export_mapping_results(self.dataset, self.this_trait, self.qtl_results, self.mapping_results_path, - self.mapping_scale, self.score_type, self.transform, self.covariates, self.n_samples) + export_mapping_results(self.dataset, self.this_trait, self.qtl_results, + self.mapping_results_path, self.mapping_scale, self.score_type, + self.transform, self.covariates, self.n_samples, self.vals_hash) with Bench("Trimming Markers for Figure"): if len(self.qtl_results) > 30000: @@ -541,13 +542,15 @@ class RunMapping: return trimmed_genotype_data -def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, score_type, transform, covariates, n_samples): +def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, score_type, transform, covariates, n_samples, vals_hash): with open(results_path, "w+") as output_file: output_file.write( "Time/Date: " + datetime.datetime.now().strftime("%x / %X") + "\n") output_file.write( "Population: " + dataset.group.species.title() + " " + dataset.group.name + "\n") output_file.write("Data Set: " + dataset.fullname + "\n") + output_file.write("Trait: " + trait.display_name + "\n") + output_file.write("Trait Hash: " + vals_hash + "\n") output_file.write("N Samples: " + str(n_samples) + "\n") if len(transform) > 0: transform_text = "Transform - " -- cgit v1.2.3 From 91d04e5b573107aead1da8482dd9184d9869b82a Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 13 Sep 2021 20:03:45 +0000 Subject: Change mapping export filename to use the trait hash --- wqflask/wqflask/marker_regression/run_mapping.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index b101b948..361c413b 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -104,9 +104,7 @@ class RunMapping: if "results_path" in start_vars: self.mapping_results_path = start_vars['results_path'] else: - mapping_results_filename = self.dataset.group.name + "_" + \ - ''.join(random.choice(string.ascii_uppercase + string.digits) - for _ in range(6)) + mapping_results_filename = "_".join([self.dataset.group.name, self.vals_hash]) self.mapping_results_path = "{}{}.csv".format( webqtlConfig.GENERATED_IMAGE_DIR, mapping_results_filename) -- cgit v1.2.3 From 24e0de14969edb220a03e22d1c9b7e6ed33f6b82 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 13 Sep 2021 20:04:30 +0000 Subject: Change mapping export filename to use the path passed from run_mapping, instead of the generic mapping_results.csv --- wqflask/wqflask/views.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 707b18e1..54314a20 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -1170,7 +1170,7 @@ def export_mapping_results(): results_csv = open(file_path, "r").read() response = Response(results_csv, mimetype='text/csv', - headers={"Content-Disposition": "attachment;filename=mapping_results.csv"}) + headers={"Content-Disposition": "attachment;filename=" + os.path.basename(file_path)}) return response -- cgit v1.2.3 From c0fbbb42a79baa823b517ee8f537cec1edc057c8 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 14 Sep 2021 18:09:36 +0000 Subject: Replace / with _ in the file hashes in rqtl_mapping.py, since they get translated to directories --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 1 + 1 file changed, 1 insertion(+) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index cd578870..a148b49c 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -61,6 +61,7 @@ def get_hash_of_textio(the_file: TextIO) -> str: the_file.seek(0) hash_of_file = hashlib.md5(the_file.read().encode()).hexdigest() + hash_of_file = hash_of_file.replace("/", "_") # Replace / with _ to prevent issue with filenames being translated to directories return hash_of_file -- cgit v1.2.3 From 8d54d1871e1319f7db46132d3eed61a817d5dc4c Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 14 Sep 2021 18:14:33 +0000 Subject: Replace / with _ in the mapping results filename, due to / causing issues with being translated to directories --- wqflask/wqflask/marker_regression/run_mapping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 361c413b..1df53fef 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -104,7 +104,7 @@ class RunMapping: if "results_path" in start_vars: self.mapping_results_path = start_vars['results_path'] else: - mapping_results_filename = "_".join([self.dataset.group.name, self.vals_hash]) + mapping_results_filename = "_".join([self.dataset.group.name, self.vals_hash]).replace("/", "_") self.mapping_results_path = "{}{}.csv".format( webqtlConfig.GENERATED_IMAGE_DIR, mapping_results_filename) -- cgit v1.2.3 From b6c0885007d516b257d1027b97df66c11b8672dd Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 16 Sep 2021 13:26:53 +0300 Subject: wqflask: views: Redirect to the correct URL after phenotype update --- wqflask/wqflask/views.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 54314a20..85aa6b17 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -633,7 +633,7 @@ def update_phenotype(): json_data=json.dumps(diff_data))) flash(f"Diff-data: \n{diff_data}\nhas been uploaded", "success") return redirect(f"/trait/{data_.get('dataset-name')}" - f"/edit/phenotype-id/{data_.get('phenotype-id')}") + f"/edit/inbredset-id/{data_.get('inbred-set-id')}") @app.route("/probeset/update", methods=["POST"]) -- cgit v1.2.3 From 03a91557c119fc2c4cdfc36015260034ead0ba98 Mon Sep 17 00:00:00 2001 From: Frederick Muriuki Muriithi Date: Mon, 20 Sep 2021 06:54:04 +0300 Subject: Provide UI elements for clustered heatmap generation Issue: https://github.com/genenetwork/gn-gemtext-threads/blob/main/topics/gn1-migration-to-gn2/clustering.gmi * wqflask/wqflask/collect.py: provide hard-coded URL for testing * wqflask/wqflask/templates/collections/view.html: provide button Provide a button to trigger the heatmap generation. As a test, we also provide a hard-coded URL for the API endpoint to get the heatmap data. --- wqflask/wqflask/collect.py | 3 ++- wqflask/wqflask/templates/collections/view.html | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index 01274ba9..41ff47ae 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -219,7 +219,8 @@ def view_collection(): json_version.append(jsonable(trait_ob)) collection_info = dict(trait_obs=trait_obs, - uc=uc) + uc=uc, + heatmap_data_url="http://localhost:8080/api/heatmaps/clustered") if "json" in params: return json.dumps(json_version) diff --git a/wqflask/wqflask/templates/collections/view.html b/wqflask/wqflask/templates/collections/view.html index 9ec98ab1..09f9dd21 100644 --- a/wqflask/wqflask/templates/collections/view.html +++ b/wqflask/wqflask/templates/collections/view.html @@ -50,6 +50,12 @@ +
    -- cgit v1.2.3 From 99bfeeef777bdaa804e4f91cae486a34fdf1e1c2 Mon Sep 17 00:00:00 2001 From: Frederick Muriuki Muriithi Date: Mon, 20 Sep 2021 08:55:01 +0300 Subject: Implement proof-of-concept code to submit data Issue: https://github.com/genenetwork/gn-gemtext-threads/blob/main/topics/gn1-migration-to-gn2/clustering.gmi * Implement some javascript to activate the "Clustered Heatmap" button. This commit provides a proof-of-concept implementation to help with identifying the requirements for sending and receiving of the heatmaps data. --- wqflask/wqflask/templates/collections/view.html | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/wqflask/wqflask/templates/collections/view.html b/wqflask/wqflask/templates/collections/view.html index 09f9dd21..783458fc 100644 --- a/wqflask/wqflask/templates/collections/view.html +++ b/wqflask/wqflask/templates/collections/view.html @@ -253,6 +253,27 @@ $("#make_default").on("click", function(){ make_default(); }); + + $("#clustered-heatmaps").on("click", function() { + heatmap_url = $(this).attr("data-url") + console.log("heatmap url:", heatmap_url) + traits = $(".trait_checkbox:checked").map(function() { + return this.value + }).get(); + console.log("SELECTED TRAITS", traits); + $.ajax({ + type: "POST", + url: heatmap_url, + contentType: "application/json", + data: JSON.stringify({ + "traits_names": traits + }), + dataType: "JSON", + success: function(res) { + console.log("results:", res) + } + }); + }); }); -- cgit v1.2.3 From 614f641624582754e29b84d632e311ed5f186c1e Mon Sep 17 00:00:00 2001 From: Frederick Muriuki Muriithi Date: Mon, 20 Sep 2021 09:06:28 +0300 Subject: Move "Clustered Heatmap" button to separate form Issue: https://github.com/genenetwork/gn-gemtext-threads/blob/main/topics/gn1-migration-to-gn2/clustering.gmi * Move the button out of the "export_form" into a new "heatmaps_form" to avoid some weird JS interaction that showed up. --- wqflask/wqflask/templates/collections/view.html | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/wqflask/wqflask/templates/collections/view.html b/wqflask/wqflask/templates/collections/view.html index 783458fc..06fd80f4 100644 --- a/wqflask/wqflask/templates/collections/view.html +++ b/wqflask/wqflask/templates/collections/view.html @@ -50,13 +50,16 @@ - + +
    + +
    Show/Hide Columns: -- cgit v1.2.3 From cfe0de277021c41eedeb65ec7f1560ac6d67ad0a Mon Sep 17 00:00:00 2001 From: Frederick Muriuki Muriithi Date: Mon, 20 Sep 2021 09:15:10 +0300 Subject: Fix id used. --- wqflask/wqflask/templates/collections/view.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/collections/view.html b/wqflask/wqflask/templates/collections/view.html index 06fd80f4..0578460d 100644 --- a/wqflask/wqflask/templates/collections/view.html +++ b/wqflask/wqflask/templates/collections/view.html @@ -257,7 +257,7 @@ make_default(); }); - $("#clustered-heatmaps").on("click", function() { + $("#clustered-heatmap").on("click", function() { heatmap_url = $(this).attr("data-url") console.log("heatmap url:", heatmap_url) traits = $(".trait_checkbox:checked").map(function() { -- cgit v1.2.3 From f1876d4d8da5c973375fc398fedaa12825a0b780 Mon Sep 17 00:00:00 2001 From: Frederick Muriuki Muriithi Date: Mon, 20 Sep 2021 09:20:44 +0300 Subject: Prevent the default submit action Issue: https://github.com/genenetwork/gn-gemtext-threads/blob/main/topics/gn1-migration-to-gn2/clustering.gmi * Prevent the default submit action. --- wqflask/wqflask/templates/collections/view.html | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wqflask/wqflask/templates/collections/view.html b/wqflask/wqflask/templates/collections/view.html index 0578460d..51b96e10 100644 --- a/wqflask/wqflask/templates/collections/view.html +++ b/wqflask/wqflask/templates/collections/view.html @@ -257,6 +257,10 @@ make_default(); }); + $("#heatmaps_form").submit(function(e) { + e.preventDefault(); + }); + $("#clustered-heatmap").on("click", function() { heatmap_url = $(this).attr("data-url") console.log("heatmap url:", heatmap_url) -- cgit v1.2.3 From 719b256996facb1f4bee2a0288bce1f18168f99b Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 16 Sep 2021 17:59:49 +0000 Subject: Added back the option to edit privileges on trait pages, since the metadata edit button had replaced it --- wqflask/wqflask/templates/show_trait_details.html | 3 +++ 1 file changed, 3 insertions(+) diff --git a/wqflask/wqflask/templates/show_trait_details.html b/wqflask/wqflask/templates/show_trait_details.html index 36d3c15a..6a541c8c 100644 --- a/wqflask/wqflask/templates/show_trait_details.html +++ b/wqflask/wqflask/templates/show_trait_details.html @@ -242,6 +242,9 @@ {% if this_trait.dataset.type == 'ProbeSet' %} {% endif %} + {% if admin_status == "owner" or admin_status == "edit-admins" or admin_status == "edit-access" %} + + {% endif %} {% endif %}
    -- cgit v1.2.3 From a9509a01191883e61cd5013453d18e89db022df2 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 20 Sep 2021 17:34:26 +0000 Subject: Removed the reaper_version selection option, since original qtlreaper is no longer supported --- wqflask/wqflask/templates/show_trait_mapping_tools.html | 9 --------- 1 file changed, 9 deletions(-) diff --git a/wqflask/wqflask/templates/show_trait_mapping_tools.html b/wqflask/wqflask/templates/show_trait_mapping_tools.html index 5365140d..c6b6c0e1 100755 --- a/wqflask/wqflask/templates/show_trait_mapping_tools.html +++ b/wqflask/wqflask/templates/show_trait_mapping_tools.html @@ -95,15 +95,6 @@ {% elif mapping_method == "QTLReaper" %}
    -
    - -
    - -
    -
    -- cgit v1.2.3 From 2ad5b87f1c9bc5ce5bd3f54f7d83f18123ce3c3e Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 20 Sep 2021 17:37:18 +0000 Subject: Returned the GN1 url to gn1.genenetwork.org since it's apparently working well enough again now that some issues have been fixed --- wqflask/wqflask/templates/show_trait_details.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/show_trait_details.html b/wqflask/wqflask/templates/show_trait_details.html index 6a541c8c..2a21dd24 100644 --- a/wqflask/wqflask/templates/show_trait_details.html +++ b/wqflask/wqflask/templates/show_trait_details.html @@ -233,7 +233,7 @@ {% endif %} {% endif %} - + {% if admin_status == "owner" or admin_status == "edit-admins" or admin_status == "edit-access" %} {% if this_trait.dataset.type == 'Publish' %} -- cgit v1.2.3 From 98f9027b8ce8f33dda7f0b1b5495b22b4a450349 Mon Sep 17 00:00:00 2001 From: Frederick Muriuki Muriithi Date: Wed, 22 Sep 2021 06:30:42 +0300 Subject: Test heatmap creation from serialized figure Issue: https://github.com/genenetwork/gn-gemtext-threads/blob/main/topics/gn1-migration-to-gn2/clustering.gmi * Attempt using the figure, serialized as JSON, to display the clustered heatmap. --- wqflask/wqflask/templates/collections/view.html | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/wqflask/wqflask/templates/collections/view.html b/wqflask/wqflask/templates/collections/view.html index 51b96e10..bca629a9 100644 --- a/wqflask/wqflask/templates/collections/view.html +++ b/wqflask/wqflask/templates/collections/view.html @@ -128,6 +128,9 @@ +
    +

    @@ -148,6 +151,8 @@ + + +{% endblock %} + -- cgit v1.2.3 From 167ec0df8d8d487832e6a0acaee3eac8963d9804 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Fri, 1 Oct 2021 05:34:20 +0300 Subject: add xterm web terminal --- wqflask/wqflask/templates/test_wgcna_results.html | 225 +++++++++++----------- 1 file changed, 116 insertions(+), 109 deletions(-) diff --git a/wqflask/wqflask/templates/test_wgcna_results.html b/wqflask/wqflask/templates/test_wgcna_results.html index 37ea2aa0..f95766ad 100644 --- a/wqflask/wqflask/templates/test_wgcna_results.html +++ b/wqflask/wqflask/templates/test_wgcna_results.html @@ -1,146 +1,153 @@ {% extends "base.html" %} {% block title %}WCGNA results{% endblock %} +{% block content %} + -{% block content %} - -
    +.control_net_colors { + display: flex; + flex-wrap: wrap; + justify-content: space-between; + align-items: center; + text-align: center; +} -
    -
    - {% for key, value in results["data"]["output"]["soft_threshold"].items()%} -
    -

    {{key}}

    - {% for val in value %} -

    {{val|round(3)}}

    - - {% endfor %} - -
    - {% endfor %} +.control_mod_eigens { + display: grid; + grid-template-columns: repeat(2, 200px); +} -
    +#terminal { -
    -

    Net colors

    -
    - {% for key,value in results["data"]["output"]["net_colors"].items() %} -
    -

    {{key}}

    -

    {{value}}

    -
    - - {% endfor %} + max-width: 768px; -
    - + margin: 10px; -
    - - -
    -

    Module eigen genes

    - -
    - {% for strain in results["data"]["input"]["sample_names"]%} - {{strain}} - {% endfor %} - {% for mod,values in results["data"]["output"]["ModEigens"].items() %} - {{mod}} {{values}} - - {% endfor %} - -
    - -
    - - - dsffsdf - +} + +
    +
    +
    +
    +
    +
    +
    + {% for key, value in results["data"]["output"]["soft_threshold"].items()%} +
    +

    {{key}}

    + {% for val in value %} +

    {{val|round(3)}}

    + {% endfor %} +
    + {% endfor %} +
    +
    +

    Net colors

    +
    + {% for key,value in results["data"]["output"]["net_colors"].items() %} +
    +

    {{key}}

    +

    {{value}}

    +
    + {% endfor %} +
    +
    +
    +

    Module eigen genes

    +
    + {% for strain in results["data"]["input"]["sample_names"]%} + {{strain}} + {% endfor %} + {% for mod,values in results["data"]["output"]["ModEigens"].items() %} + {{mod}} {{values}} + {% endfor %} +
    + + +
    +
    +
    + + -{% endblock %} + writeToTerminal({ cursorBlink: true, lineHeight: 1.2 }, "terminal")(terminal_output) + +{% endblock %} \ No newline at end of file -- cgit v1.2.3 From 266d4c4a425ca0a215c8d789e2978d213d5ff37d Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 21 Sep 2021 21:25:13 +0300 Subject: Rename "admin_login_required" to "edit_access_required" --- wqflask/wqflask/decorators.py | 2 +- wqflask/wqflask/views.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/wqflask/wqflask/decorators.py b/wqflask/wqflask/decorators.py index f0978fd3..f6e3eb8a 100644 --- a/wqflask/wqflask/decorators.py +++ b/wqflask/wqflask/decorators.py @@ -3,7 +3,7 @@ from flask import g from functools import wraps -def admin_login_required(f): +def edit_access_required(f): """Use this for endpoints where admins are required""" @wraps(f) def wrap(*args, **kwargs): diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index b297da08..5067ca0e 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -85,7 +85,7 @@ from wqflask.export_traits import export_search_results_csv from wqflask.gsearch import GSearch from wqflask.update_search_results import GSearch as UpdateGSearch from wqflask.docs import Docs, update_text -from wqflask.decorators import admin_login_required +from wqflask.decorators import edit_access_required from wqflask.db_info import InfoPage from utility import temp_data @@ -420,7 +420,7 @@ def submit_trait_form(): @app.route("/trait//edit/inbredset-id/") -@admin_login_required +@edit_access_required def edit_phenotype(name, inbredset_id): conn = MySQLdb.Connect(db=current_app.config.get("DB_NAME"), user=current_app.config.get("DB_USER"), @@ -477,7 +477,7 @@ def edit_phenotype(name, inbredset_id): @app.route("/trait/edit/probeset-name/") -@admin_login_required +@edit_access_required def edit_probeset(dataset_name): conn = MySQLdb.Connect(db=current_app.config.get("DB_NAME"), user=current_app.config.get("DB_USER"), @@ -520,7 +520,7 @@ def edit_probeset(dataset_name): @app.route("/trait/update", methods=["POST"]) -@admin_login_required +@edit_access_required def update_phenotype(): conn = MySQLdb.Connect(db=current_app.config.get("DB_NAME"), user=current_app.config.get("DB_USER"), @@ -646,7 +646,7 @@ def update_phenotype(): @app.route("/probeset/update", methods=["POST"]) -@admin_login_required +@edit_access_required def update_probeset(): conn = MySQLdb.Connect(db=current_app.config.get("DB_NAME"), user=current_app.config.get("DB_USER"), @@ -1381,7 +1381,7 @@ def get_sample_data_as_csv(trait_name: int, phenotype_id: int): @app.route("/admin/data-sample/diffs/") -@admin_login_required +@edit_access_required def display_diffs_admin(): TMPDIR = current_app.config.get("TMPDIR") DIFF_DIR = f"{TMPDIR}/sample-data/diffs" -- cgit v1.2.3 From 7f317126d7d422b073cb4e4a8698757fe1e763f3 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 21 Sep 2021 21:36:32 +0300 Subject: Replace hard-coded e-mails with gn-proxy queries * wqflask/wqflask/decorators.py (edit_access_required.wrap): Query the proxy to see the access rights of a given user. --- wqflask/wqflask/decorators.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/wqflask/wqflask/decorators.py b/wqflask/wqflask/decorators.py index f6e3eb8a..54aa6795 100644 --- a/wqflask/wqflask/decorators.py +++ b/wqflask/wqflask/decorators.py @@ -1,14 +1,36 @@ """This module contains gn2 decorators""" from flask import g +from typing import Dict from functools import wraps +from utility.hmac import hmac_creation + +import json +import requests def edit_access_required(f): """Use this for endpoints where admins are required""" @wraps(f) def wrap(*args, **kwargs): - if g.user_session.record.get(b"user_email_address") not in [ - b"labwilliams@gmail.com"]: + resource_id: str = "" + if kwargs.get("inbredset_id"): # data type: dataset-publish + resource_id = hmac_creation("dataset-publish:" + f"{kwargs.get('inbredset_id')}:" + f"{kwargs.get('name')}") + if kwargs.get("dataset_name"): # data type: dataset-probe + resource_id = hmac_creation("dataset-probeset:" + f"{kwargs.get('dataset_name')}") + response: Dict = {} + try: + _user_id = g.user_session.record.get(b"user_id", + "").decode("utf-8") + response = json.loads( + requests.get("http://localhost:8080/" + "available?resource=" + f"{resource_id}&user={_user_id}").content) + except: + response = {} + if "edit" not in response.get("data", []): return "You need to be admin", 401 return f(*args, **kwargs) return wrap -- cgit v1.2.3 From ac14326be6695f185f843d29bf3ff016f5eb3016 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 22 Sep 2021 14:30:09 +0300 Subject: new_security: login_user.html: Delete commented out block --- .../wqflask/templates/new_security/login_user.html | 26 ---------------------- 1 file changed, 26 deletions(-) diff --git a/wqflask/wqflask/templates/new_security/login_user.html b/wqflask/wqflask/templates/new_security/login_user.html index 095036f0..88eab6bc 100644 --- a/wqflask/wqflask/templates/new_security/login_user.html +++ b/wqflask/wqflask/templates/new_security/login_user.html @@ -114,31 +114,5 @@ label.error,div.error{ {% endblock %} {% block js %} - - - {% include "new_security/_scripts.html" %} {% endblock %} -- cgit v1.2.3 From 84a0cce8a341b8b45b3b0037379818c32d5614b2 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 22 Sep 2021 14:32:03 +0300 Subject: Remove "_scripts.html" and all it's references --- wqflask/wqflask/templates/new_security/_scripts.html | 1 - wqflask/wqflask/templates/new_security/forgot_password.html | 1 - wqflask/wqflask/templates/new_security/forgot_password_step2.html | 1 - wqflask/wqflask/templates/new_security/password_reset.html | 1 - wqflask/wqflask/templates/new_security/register_user.html | 1 - wqflask/wqflask/templates/new_security/registered.html | 1 - wqflask/wqflask/templates/new_security/thank_you.html | 1 - wqflask/wqflask/templates/new_security/verification_still_needed.html | 1 - 8 files changed, 8 deletions(-) delete mode 100644 wqflask/wqflask/templates/new_security/_scripts.html diff --git a/wqflask/wqflask/templates/new_security/_scripts.html b/wqflask/wqflask/templates/new_security/_scripts.html deleted file mode 100644 index 5fefe305..00000000 --- a/wqflask/wqflask/templates/new_security/_scripts.html +++ /dev/null @@ -1 +0,0 @@ - diff --git a/wqflask/wqflask/templates/new_security/forgot_password.html b/wqflask/wqflask/templates/new_security/forgot_password.html index e5c42a45..60a221da 100644 --- a/wqflask/wqflask/templates/new_security/forgot_password.html +++ b/wqflask/wqflask/templates/new_security/forgot_password.html @@ -48,6 +48,5 @@ {% endblock %} {% block js %} - {% include "new_security/_scripts.html" %} {% endblock %} diff --git a/wqflask/wqflask/templates/new_security/forgot_password_step2.html b/wqflask/wqflask/templates/new_security/forgot_password_step2.html index b4bf41c7..1835fd4c 100644 --- a/wqflask/wqflask/templates/new_security/forgot_password_step2.html +++ b/wqflask/wqflask/templates/new_security/forgot_password_step2.html @@ -20,7 +20,6 @@ {% endblock %} {% block js %} - {% include "new_security/_scripts.html" %} {% endblock %} diff --git a/wqflask/wqflask/templates/new_security/password_reset.html b/wqflask/wqflask/templates/new_security/password_reset.html index 684c12b1..e21f075c 100644 --- a/wqflask/wqflask/templates/new_security/password_reset.html +++ b/wqflask/wqflask/templates/new_security/password_reset.html @@ -73,7 +73,6 @@ {% block js %} - {% include "new_security/_scripts.html" %} {% endblock %} diff --git a/wqflask/wqflask/templates/new_security/register_user.html b/wqflask/wqflask/templates/new_security/register_user.html index 3ae4488b..c2895517 100644 --- a/wqflask/wqflask/templates/new_security/register_user.html +++ b/wqflask/wqflask/templates/new_security/register_user.html @@ -100,7 +100,6 @@ {% block js %} - {% include "new_security/_scripts.html" %} {% endblock %} diff --git a/wqflask/wqflask/templates/new_security/registered.html b/wqflask/wqflask/templates/new_security/registered.html index f2f58ec1..29889a97 100644 --- a/wqflask/wqflask/templates/new_security/registered.html +++ b/wqflask/wqflask/templates/new_security/registered.html @@ -19,7 +19,6 @@ {% block js %} - {% include "new_security/_scripts.html" %} {% endblock %} diff --git a/wqflask/wqflask/templates/new_security/thank_you.html b/wqflask/wqflask/templates/new_security/thank_you.html index 0ff7ee8d..d4f5e574 100644 --- a/wqflask/wqflask/templates/new_security/thank_you.html +++ b/wqflask/wqflask/templates/new_security/thank_you.html @@ -18,7 +18,6 @@ {% endblock %} {% block js %} - {% include "new_security/_scripts.html" %} {% endblock %} diff --git a/wqflask/wqflask/templates/new_security/verification_still_needed.html b/wqflask/wqflask/templates/new_security/verification_still_needed.html index dc0f9e68..1f91fd8d 100644 --- a/wqflask/wqflask/templates/new_security/verification_still_needed.html +++ b/wqflask/wqflask/templates/new_security/verification_still_needed.html @@ -21,7 +21,6 @@ {% endblock %} {% block js %} - {% include "new_security/_scripts.html" %} {% endblock %} -- cgit v1.2.3 From 6e0ea75ca427721aed0a5f394b501b2cde9bf769 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 23 Sep 2021 13:17:55 +0300 Subject: Add script for creating/ updating groups during authorisation --- scripts/authentication/group.py | 130 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 scripts/authentication/group.py diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py new file mode 100644 index 00000000..b89bc3ec --- /dev/null +++ b/scripts/authentication/group.py @@ -0,0 +1,130 @@ +"""A script for adding users to a specific group. + +Example: + +Assuming there are no groups and 'test@bonfacemunyoki.com' does not +exist in Redis: + +.. code-block:: bash + python group.py -g "editors" -m "test@bonfacemunyoki.com" + +results in:: + + Successfully created the group: 'editors' + Data: '{"admins": [], "members": []}' + +If 'me@bonfacemunyoki.com' exists in 'users' in Redis and we run: + +.. code-block:: bash + python group.py -g "editors" -m "me@bonfacemunyoki.com" + +now results in:: + + No new group was created. + Updated Data: {'admins': [], 'members': ['me@bonfacemunyoki.com']} + +""" + +import argparse +import redis +import json + +from typing import Dict, List, Optional, Set +from glom import glom # type: ignore + + +def create_group_data(users: Dict, target_group: str, + members: Optional[str] = None, + admins: Optional[str] = None) -> Dict: + """Return a dictionary that contains the following keys: "key", + "field", and "value" that can be used in a redis hash as follows: + HSET key field value + + Parameters: + + - `users`: a list of users for example: + + {'8ad942fe-490d-453e-bd37-56f252e41603': + '{"email_address": "me@test.com", + "full_name": "John Doe", + "organization": "Genenetwork", + "password": {"algorithm": "pbkdf2", + "hashfunc": "sha256", + "salt": "gJrd1HnPSSCmzB5veMPaVk2ozzDlS1Z7Ggcyl1+pciA=", + "iterations": 100000, "keylength": 32, + "created_timestamp": "2021-09-22T11:32:44.971912", + "password": "edcdaa60e84526c6"}, + "user_id": "8ad942fe", "confirmed": 1, + "registration_info": { + "timestamp": "2021-09-22T11:32:45.028833", + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0"}}'} + + - `target_group`: the group name that will be stored inside the + "groups" hash in Redis. + + - `members`: a comma-separated list of values that contain members + of the `target_group` e.g. "me@test1.com, me@test2.com, + me@test3.com" + + - `admins`: a comma-separated list of values that contain + administrators of the `target_group` e.g. "me@test1.com, + me@test2.com, me@test3.com" + + """ + _members = "".join(members.split()).split(",") if members else [] + _admins: List = "".join(admins.split()).split(",") if admins else [] + + user_emails: Set = {glom(json.loads(user_details), "email_address") + for _, user_details in users.items()} + + return {"key": "groups", + "field": target_group, + "value": json.dumps({ + "admins": [admin for admin in _admins + if admin in user_emails], + "members": [member for member in _members + if member in user_emails] + })} + + +if __name__ == "__main__": + # Initialising the parser CLI arguments + parser = argparse.ArgumentParser() + parser.add_argument("-g", "--group-name", + help="This is the name of the GROUP mask") + parser.add_argument("-m", "--members", + help="Members of the GROUP mask") + parser.add_argument("-a", "--admins", + help="Admins of the GROUP mask") + args = parser.parse_args() + + if not args.group_name: + exit("\nExiting. Please specify a group name to use!\n") + + members = args.members if args.members else None + admins = args.admins if args.admins else None + REDIS_CONN = redis.Redis() + USERS = {key.decode(): val.decode() + for key, val in REDIS_CONN.hgetall("users").items()} + + if not any([members, admins]): + exit("\nExiting. Please provide a value for " + "MEMBERS(-m) or ADMINS(-a)!\n") + + data = create_group_data( + users=USERS, + target_group=args.group_name, + members=members, + admins=admins) + created_p = REDIS_CONN.hset(data.get("key", ""), + data.get("field", ""), + data.get("value", "")) + + groups = json.loads(REDIS_CONN.hget("groups", + args.group_name)) # type: ignore + if created_p: + exit(f"\nSuccessfully created the group: '{args.group_name}'\n" + f"Data: {groups}\n") + exit("\nNo new group was created.\n" + f"Updated Data: {groups}\n") -- cgit v1.2.3 From c585945c5516092b362efecc16325ad9ecc54291 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 23 Sep 2021 15:39:32 +0300 Subject: Add script that adds "editors" group to all resources in Redis --- scripts/authentication/resource.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 scripts/authentication/resource.py diff --git a/scripts/authentication/resource.py b/scripts/authentication/resource.py new file mode 100644 index 00000000..75ef9e93 --- /dev/null +++ b/scripts/authentication/resource.py @@ -0,0 +1,25 @@ +"""A script that adds the group: 'editors' to every +resource. 'editors' should have the right to edit both metadata and +data. + +To use this script, simply run: + +.. code-block:: python + python resource.py + +""" +import json +import redis + + +if __name__ == "__main__": + REDIS_CONN = redis.Redis() + resources = REDIS_CONN.hgetall("resources_clone") + for resource_id, resource in resources.items(): + deserialized_resource = json.loads(resource) + deserialized_resource["group_masks"] = { + "editors": {"metadata": "edit", + "data": "edit"}} + REDIS_CONN.hset("resources_clone", + resource_id, + json.dumps(deserialized_resource)) -- cgit v1.2.3 From edb6fe1b9dd98d84deb0925a2d83726e739d8677 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 27 Sep 2021 16:28:12 +0300 Subject: wqflask: resource_manager: Remove logger --- wqflask/wqflask/resource_manager.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index b28c1b04..c54dd0b3 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -8,8 +8,6 @@ from wqflask import app from utility.authentication_tools import check_owner_or_admin from utility.redis_tools import get_resource_info, get_group_info, get_groups_like_unique_column, get_user_id, get_user_by_unique_column, get_users_like_unique_column, add_access_mask, add_resource, change_resource_owner -from utility.logger import getLogger -logger = getLogger(__name__) @app.route("/resources/manage", methods=('GET', 'POST')) -- cgit v1.2.3 From df487791c91a5aa1a9a3b4e1a6c9ce17a58eafe6 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 4 Oct 2021 12:21:16 +0300 Subject: Modify resource editing script to enable data backups & restoration --- scripts/authentication/resource.py | 101 ++++++++++++++++++++++++++++++++----- 1 file changed, 88 insertions(+), 13 deletions(-) diff --git a/scripts/authentication/resource.py b/scripts/authentication/resource.py index 75ef9e93..8fcf09d7 100644 --- a/scripts/authentication/resource.py +++ b/scripts/authentication/resource.py @@ -1,25 +1,100 @@ -"""A script that adds the group: 'editors' to every -resource. 'editors' should have the right to edit both metadata and -data. +"""A script that: -To use this script, simply run: +- Optionally restores data from a json file. + +- By default, without any args provided, adds the group: 'editors' to +every resource. 'editors' should have the right to edit both metadata +and data. + +- Optionally creates a back-up every time you edit a resource. + + +To restore a back-up: + +.. code-block:: python + python resource.py --restore + +To add editors to every resource without creating a back-up: .. code-block:: python python resource.py +To add editors to every resource while creating a back-up before any +destructive edits: + +.. code-block:: python + python resource.py --enable-backup + """ +import argparse import json import redis +import os + +from datetime import datetime + + +def recover_hash(name: str, file_path: str, set_function) -> bool: + """Recover back-ups using the `set_function` + + Parameters: + + - `name`: Redis hash where `file_path` will be restored + + - `file_path`: File path where redis hash is sourced from + + - `set_function`: Function used to do the Redis backup for + example: HSET + + """ + try: + with open(file_path, "r") as f: + resources = json.load(f) + for resource_id, resource in resources.items(): + set_function(name=name, + key=resource_id, + value=resource) + return True + except Exception as e: + print(e) + return False if __name__ == "__main__": - REDIS_CONN = redis.Redis() - resources = REDIS_CONN.hgetall("resources_clone") - for resource_id, resource in resources.items(): - deserialized_resource = json.loads(resource) - deserialized_resource["group_masks"] = { - "editors": {"metadata": "edit", - "data": "edit"}} - REDIS_CONN.hset("resources_clone", + # Initialising the parser CLI arguments + parser = argparse.ArgumentParser() + parser.add_argument("--restore", + help="Restore from a given backup") + parser.add_argument("--enable-backup", action="store_true", + help="Create a back up before edits") + args = parser.parse_args() + + if args.restore: + if recover_hash(name="resources", + file_path=args.back_up, + set_function=redis.Redis(decode_responses=True).hset): + exit(f"\n Done restoring {args.back_up}!\n") + else: + exit(f"\n There was an error restoring {args.back_up}!\n") + + REDIS_CONN = redis.Redis(decode_responses=True) + RESOURCES = REDIS_CONN.hgetall("resources") + BACKUP_DIR = os.path.join(os.getenv("HOME"), "redis") + if args.enable_backup: + FILENAME = ("resources-" + f"{datetime.now().strftime('%Y-%m-%d-%I:%M:%S-%p')}" + ".json") + if not os.path.exists(BACKUP_DIR): + os.mkdir(BACKUP_DIR) + with open(os.path.join(BACKUP_DIR, FILENAME), "w") as f: + json.dump(RESOURCES, f, indent=4) + print(f"\nDone backing upto {FILENAME}") + + for resource_id, resource in RESOURCES.items(): + _resource = json.loads(resource) # str -> dict conversion + _resource["group_masks"] = {"editors": {"metadata": "edit", + "data": "edit"}} + REDIS_CONN.hset("resources", resource_id, - json.dumps(deserialized_resource)) + json.dumps(_resource)) + exit("Done updating `resources`\n") -- cgit v1.2.3 From c5215d1ed224480a274476933beded9d2ba7f7dc Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 4 Oct 2021 13:40:06 +0300 Subject: Decode redis response by default --- scripts/authentication/group.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index b89bc3ec..265e8664 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -104,9 +104,8 @@ if __name__ == "__main__": members = args.members if args.members else None admins = args.admins if args.admins else None - REDIS_CONN = redis.Redis() - USERS = {key.decode(): val.decode() - for key, val in REDIS_CONN.hgetall("users").items()} + REDIS_CONN = redis.Redis(decode_responses=True) + USERS = REDIS_CONN.hgetall("users") if not any([members, admins]): exit("\nExiting. Please provide a value for " -- cgit v1.2.3 From 8fae92c83d49042da68638319385df02061df44b Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 4 Oct 2021 13:45:59 +0300 Subject: scripts: group.py: Remove "glom" dependency --- scripts/authentication/group.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index 265e8664..02f782b3 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -30,7 +30,6 @@ import redis import json from typing import Dict, List, Optional, Set -from glom import glom # type: ignore def create_group_data(users: Dict, target_group: str, @@ -74,10 +73,12 @@ def create_group_data(users: Dict, target_group: str, """ _members = "".join(members.split()).split(",") if members else [] _admins: List = "".join(admins.split()).split(",") if admins else [] - - user_emails: Set = {glom(json.loads(user_details), "email_address") - for _, user_details in users.items()} - + user_emails: Set = set() + for _, user_details in users.items(): + _details = json.loads(user_details) + if _details.get("email_address"): + user_emails.add(_details.get("email_address")) + print(user_emails) return {"key": "groups", "field": target_group, "value": json.dumps({ -- cgit v1.2.3 From 7c1dd1211f96ca1021debc27a80d1700e70b9c6b Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 4 Oct 2021 13:49:12 +0300 Subject: scripts: group.py: Modify exit message when displaying updated data --- scripts/authentication/group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index 02f782b3..cc0b037e 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -125,6 +125,6 @@ if __name__ == "__main__": args.group_name)) # type: ignore if created_p: exit(f"\nSuccessfully created the group: '{args.group_name}'\n" - f"Data: {groups}\n") + f"`HGETALL groups {args.group_name}`: {groups}\n") exit("\nNo new group was created.\n" - f"Updated Data: {groups}\n") + f"`HGETALL groups {args.group_name}`: {groups}\n") -- cgit v1.2.3 From 609865fc42b7436d8c34cdcefd159c3352c5d91c Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 4 Oct 2021 20:58:14 +0000 Subject: Add group link for user member groups --- wqflask/wqflask/templates/admin/group_manager.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/admin/group_manager.html b/wqflask/wqflask/templates/admin/group_manager.html index c0b99e75..692a7abc 100644 --- a/wqflask/wqflask/templates/admin/group_manager.html +++ b/wqflask/wqflask/templates/admin/group_manager.html @@ -81,7 +81,7 @@ {{ loop.index }} - {{ group.name }} + {{ group.name }} {{ group.admins|length + group.members|length }} {{ group.created_timestamp }} {{ group.changed_timestamp }} -- cgit v1.2.3 From 5b116de4aaf796be138ee0ad06c2242b3f3c33c7 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 4 Oct 2021 21:02:37 +0000 Subject: Changed get_user_groups to pull both the ID and details in the for loop from group_list by using group_list.items() --- wqflask/utility/redis_tools.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 99295c67..de9dde46 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -127,20 +127,20 @@ def check_verification_code(code): def get_user_groups(user_id): - # ZS: Get the groups where a user is an admin or a member and + # Get the groups where a user is an admin or a member and # return lists corresponding to those two sets of groups - admin_group_ids = [] # ZS: Group IDs where user is an admin - user_group_ids = [] # ZS: Group IDs where user is a regular user + admin_group_ids = [] # Group IDs where user is an admin + user_group_ids = [] # Group IDs where user is a regular user groups_list = Redis.hgetall("groups") - for key in groups_list: + for group_id, group_details in groups_list.items(): try: - group_ob = json.loads(groups_list[key]) - group_admins = set([this_admin if this_admin else None for this_admin in group_ob['admins']]) - group_members = set([this_member if this_member else None for this_member in group_ob['members']]) + _details = json.loads(group_details) + group_admins = set([this_admin if this_admin else None for this_admin in _details['admins']]) + group_members = set([this_member if this_member else None for this_member in _details['members']]) if user_id in group_admins: - admin_group_ids.append(group_ob['id']) + admin_group_ids.append(group_id) elif user_id in group_members: - user_group_ids.append(group_ob['id']) + user_group_ids.append(group_id) else: continue except: -- cgit v1.2.3 From 35a970adba5ee1d60769a81b446122a60eac9494 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 4 Oct 2021 21:03:52 +0000 Subject: Changed the group.py script to replace user e-mails with IDs and to include id, name, changed_timestamp, and created_timestamp in group details --- scripts/authentication/group.py | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index cc0b037e..a5f75aad 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -26,11 +26,13 @@ now results in:: """ import argparse +import datetime import redis import json from typing import Dict, List, Optional, Set +REDIS_CONN = redis.Redis(decode_responses=True) def create_group_data(users: Dict, target_group: str, members: Optional[str] = None, @@ -71,21 +73,26 @@ def create_group_data(users: Dict, target_group: str, me@test2.com, me@test3.com" """ - _members = "".join(members.split()).split(",") if members else [] + + _members: List = "".join(members.split()).split(",") if members else [] _admins: List = "".join(admins.split()).split(",") if admins else [] - user_emails: Set = set() - for _, user_details in users.items(): + + user_ids: Dict = dict() + for user_id, user_details in users.items(): _details = json.loads(user_details) if _details.get("email_address"): - user_emails.add(_details.get("email_address")) - print(user_emails) + user_ids[_details.get("email_address")] = user_id + print(user_ids) return {"key": "groups", "field": target_group, "value": json.dumps({ - "admins": [admin for admin in _admins - if admin in user_emails], - "members": [member for member in _members - if member in user_emails] + "id": target_group, + "name": target_group, + "admins": [user_ids[admin] for admin in _admins + if admin in user_ids], + "members": [user_ids[member] for member in _members + if member in user_ids], + "changed_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') })} @@ -105,7 +112,6 @@ if __name__ == "__main__": members = args.members if args.members else None admins = args.admins if args.admins else None - REDIS_CONN = redis.Redis(decode_responses=True) USERS = REDIS_CONN.hgetall("users") if not any([members, admins]): @@ -117,6 +123,12 @@ if __name__ == "__main__": target_group=args.group_name, members=members, admins=admins) + + if not REDIS_CONN.hget("groups", data.get("field", "")): + updated_data = json.loads(data["value"]) + updated_data["created_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + data["value"] = json.dumps(updated_data) + created_p = REDIS_CONN.hset(data.get("key", ""), data.get("field", ""), data.get("value", "")) -- cgit v1.2.3 From 4c6a7e46dd7afe311c0bed38c4a69ddadf3cb416 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 4 Oct 2021 21:09:15 +0000 Subject: Moved REDIS_CONN back into if __name__ == '__main__' since it doesn't need to be globally accessed anymore (I think I intiially moved it because I was calling it in create_group_data, but that ended up being unnecessary --- scripts/authentication/group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index a5f75aad..76c7fb4f 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -32,8 +32,6 @@ import json from typing import Dict, List, Optional, Set -REDIS_CONN = redis.Redis(decode_responses=True) - def create_group_data(users: Dict, target_group: str, members: Optional[str] = None, admins: Optional[str] = None) -> Dict: @@ -112,6 +110,8 @@ if __name__ == "__main__": members = args.members if args.members else None admins = args.admins if args.admins else None + + REDIS_CONN = redis.Redis(decode_responses=True) USERS = REDIS_CONN.hgetall("users") if not any([members, admins]): -- cgit v1.2.3 From 18b50f56d614021d5727d546f3d6e360575e4468 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Tue, 5 Oct 2021 19:07:24 +0300 Subject: work on wgcna_setup form --- wqflask/wqflask/templates/wgcna_setup.html | 54 ++++++++++++++++-------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/wqflask/wqflask/templates/wgcna_setup.html b/wqflask/wqflask/templates/wgcna_setup.html index c5461497..44932389 100644 --- a/wqflask/wqflask/templates/wgcna_setup.html +++ b/wqflask/wqflask/templates/wgcna_setup.html @@ -2,8 +2,10 @@ {% block title %}WCGNA analysis{% endblock %} {% block content %} -

    WGCNA analysis parameters

    +
    +
    +

    WGCNA analysis parameters

    {% if request.form['trait_list'].split(",")|length < 4 %} {% else %} -
    - -
    - -
    - -
    + + +
    + +
    +
    -
    - -
    - -
    +
    +
    + +
    +
    -
    - -
    - -
    +
    + +
    + +
    +
    -
    - -
    - -
    +
    +
    + +
    +
    +
    -
    +
    - + {% endif %}
    +
    {% endblock %} -- cgit v1.2.3 From 31071d78f2ffb0ae4c7e0b99e74c7729a1a36e9c Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Tue, 5 Oct 2021 19:49:50 +0300 Subject: move xterm code to setup --- wqflask/wqflask/templates/wgcna_setup.html | 140 +++++++++++++++++++---------- 1 file changed, 94 insertions(+), 46 deletions(-) diff --git a/wqflask/wqflask/templates/wgcna_setup.html b/wqflask/wqflask/templates/wgcna_setup.html index 44932389..4b13e54e 100644 --- a/wqflask/wqflask/templates/wgcna_setup.html +++ b/wqflask/wqflask/templates/wgcna_setup.html @@ -1,53 +1,101 @@ {% extends "base.html" %} {% block title %}WCGNA analysis{% endblock %} - -{% block content %} - +{% block content %} + + + + +
    -
    -

    WGCNA analysis parameters

    - {% if request.form['trait_list'].split(",")|length < 4 %} - - {% else %} -
    - -
    - -
    - -
    -
    -
    - -
    - -
    -
    - -
    - -
    - -
    -
    -
    - -
    - +
    +

    WGCNA analysis parameters

    + {% if request.form['trait_list'].split(",")|length < 4 %} -
    -
    -
    - -
    + {% else %} + + +
    + +
    + +
    +
    +
    + +
    + +
    +
    +
    + +
    + +
    +
    +
    + +
    + +
    +
    +
    +
    + +
    +
    + + {% endif %} +
    +
    +
    +
    +
    - - {% endif %}
    -{% endblock %} + +{% endblock %} \ No newline at end of file -- cgit v1.2.3 From 6f739fccbb7cff9f05b53ff4775ecee0761c293b Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Tue, 5 Oct 2021 19:50:37 +0300 Subject: js formatting --- wqflask/wqflask/templates/wgcna_setup.html | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/wqflask/wqflask/templates/wgcna_setup.html b/wqflask/wqflask/templates/wgcna_setup.html index 4b13e54e..34690f29 100644 --- a/wqflask/wqflask/templates/wgcna_setup.html +++ b/wqflask/wqflask/templates/wgcna_setup.html @@ -84,11 +84,11 @@ document.addEventListener('DOMContentLoaded', function() { // open socket connection - const socket = io(`${GN_SERVER_URL}`) + const socket = io(`${GN_SERVER_URL}`) // add namespace - socket.on("output",({data})=>{ + socket.on("output", ({ data }) => { - term.writeln(data) + term.writeln(data) }) } else { -- cgit v1.2.3 From 37c37996424e826187ac3eca9ed4cd11b9715736 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Tue, 5 Oct 2021 19:52:51 +0300 Subject: code format for wgcna results file --- wqflask/wqflask/templates/test_wgcna_results.html | 61 +++++++++-------------- 1 file changed, 23 insertions(+), 38 deletions(-) diff --git a/wqflask/wqflask/templates/test_wgcna_results.html b/wqflask/wqflask/templates/test_wgcna_results.html index f95766ad..9484595f 100644 --- a/wqflask/wqflask/templates/test_wgcna_results.html +++ b/wqflask/wqflask/templates/test_wgcna_results.html @@ -2,13 +2,17 @@ {% block title %}WCGNA results{% endblock %} {% block content %} + + +
    @@ -122,32 +118,21 @@ let terminal_output = results.output let { output } = results.data let sft = output.soft_threshold - - + -{% endblock %} \ No newline at end of file +{% endblock %} +** \ No newline at end of file -- cgit v1.2.3 From 6b4b7a5774f42ff6be1fb5fde6ba1fef632cd1a4 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Wed, 6 Oct 2021 11:48:50 +0300 Subject: add datatable for eigengenes --- wqflask/wqflask/templates/test_wgcna_results.html | 80 ++++++++++++++++------- 1 file changed, 57 insertions(+), 23 deletions(-) diff --git a/wqflask/wqflask/templates/test_wgcna_results.html b/wqflask/wqflask/templates/test_wgcna_results.html index 9484595f..ac82647d 100644 --- a/wqflask/wqflask/templates/test_wgcna_results.html +++ b/wqflask/wqflask/templates/test_wgcna_results.html @@ -2,8 +2,17 @@ {% block title %}WCGNA results{% endblock %} {% block content %} + - + + + + + + + + +
    @@ -104,11 +113,12 @@
    -
    - - CLuster dendogram - + {% if image["image_generated"] %} +
    +
    + + {% endif %}
    @@ -161,12 +171,5 @@ $(document).ready(function(){ } ); }) - - - - - - - {% endblock %} \ No newline at end of file -- cgit v1.2.3 From 949789a00d8e6e901cc18b939737cd42e14c0236 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 6 Oct 2021 16:12:22 +0300 Subject: scripts: group: Use a unique key to identify a group --- scripts/authentication/group.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index 76c7fb4f..eea13efe 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -29,6 +29,7 @@ import argparse import datetime import redis import json +import uuid from typing import Dict, List, Optional, Set @@ -71,26 +72,31 @@ def create_group_data(users: Dict, target_group: str, me@test2.com, me@test3.com" """ + # Emails + _members: Set = set("".join(members.split()).split(",") + if members else []) + _admins: Set = set("".join(admins.split()).split(",") + if admins else []) - _members: List = "".join(members.split()).split(",") if members else [] - _admins: List = "".join(admins.split()).split(",") if admins else [] + # Unique IDs + member_ids: Set = set() + admin_ids: Set = set() - user_ids: Dict = dict() for user_id, user_details in users.items(): _details = json.loads(user_details) - if _details.get("email_address"): - user_ids[_details.get("email_address")] = user_id - print(user_ids) + if _details.get("email_address") in _members: + member_ids.add(user_id) + if _details.get("email_address") in _admins: + admin_ids.add(user_id) + + timestamp: str = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') return {"key": "groups", - "field": target_group, + "field": str(uuid.uuid4()), "value": json.dumps({ - "id": target_group, "name": target_group, - "admins": [user_ids[admin] for admin in _admins - if admin in user_ids], - "members": [user_ids[member] for member in _members - if member in user_ids], - "changed_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + "admins": list(admin_ids), + "members": list(member_ids), + "changed_timestamp": timestamp, })} -- cgit v1.2.3 From 931c7eb07cc995118ba808df760fd74de036853f Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 6 Oct 2021 16:15:31 +0300 Subject: scripts: group: Remove unused import --- scripts/authentication/group.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index eea13efe..d3f9a1e4 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -31,7 +31,8 @@ import redis import json import uuid -from typing import Dict, List, Optional, Set +from typing import Dict, Optional, Set + def create_group_data(users: Dict, target_group: str, members: Optional[str] = None, -- cgit v1.2.3 From 870edaf2cf8ce8588ee7c58d08fc1f307f7198ec Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 6 Oct 2021 16:19:04 +0300 Subject: scripts: group: Remove empty `""` value for data.get data.get("field") will default to None if there is no value; and None is falsy. --- scripts/authentication/group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index d3f9a1e4..1919d9db 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -131,10 +131,10 @@ if __name__ == "__main__": members=members, admins=admins) - if not REDIS_CONN.hget("groups", data.get("field", "")): updated_data = json.loads(data["value"]) updated_data["created_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') data["value"] = json.dumps(updated_data) + if not REDIS_CONN.hget("groups", data.get("field")): created_p = REDIS_CONN.hset(data.get("key", ""), data.get("field", ""), -- cgit v1.2.3 From c1b23a1b01071c252ddae6dbea14500e4c248d84 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 6 Oct 2021 21:01:54 +0300 Subject: workflows: main.yml: Disable link checking --- .github/workflows/main.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f279a7e5..8e2c7966 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -55,11 +55,11 @@ jobs: GENENETWORK_FILES=/genotype_files/ bin/genenetwork2 \ etc/default_settings.py -c -m unittest discover -v - - name: Test for Broken Links - run: | - env GN2_PROFILE=/gn2-profile \ - TMPDIR=/tmp\ - WEBSERVER_MODE=DEBUG LOG_LEVEL=DEBUG \ - GENENETWORK_FILES=/genotype_files/ bin/genenetwork2 \ - etc/default_settings.py -c \ - $PWD/test/requests/links_scraper/genelinks.py + # - name: Test for Broken Links + # run: | + # env GN2_PROFILE=/gn2-profile \ + # TMPDIR=/tmp\ + # WEBSERVER_MODE=DEBUG LOG_LEVEL=DEBUG \ + # GENENETWORK_FILES=/genotype_files/ bin/genenetwork2 \ + # etc/default_settings.py -c \ + # $PWD/test/requests/links_scraper/genelinks.py -- cgit v1.2.3 From 40dddc1a78a7808b480d26594ced689cdcc08c24 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 6 Oct 2021 21:23:08 +0300 Subject: scripts: group: Fix indentation --- scripts/authentication/group.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index 1919d9db..7e73be15 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -131,10 +131,10 @@ if __name__ == "__main__": members=members, admins=admins) - updated_data = json.loads(data["value"]) - updated_data["created_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') - data["value"] = json.dumps(updated_data) if not REDIS_CONN.hget("groups", data.get("field")): + updated_data = json.loads(data["value"]) + updated_data["created_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + data["value"] = json.dumps(updated_data) created_p = REDIS_CONN.hset(data.get("key", ""), data.get("field", ""), -- cgit v1.2.3 From 67222a6cb11995eb5a4af58f63cc9385ccfb9226 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 6 Oct 2021 21:24:16 +0300 Subject: scripts: group: Break up long line --- scripts/authentication/group.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index 7e73be15..ed17f260 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -133,7 +133,8 @@ if __name__ == "__main__": if not REDIS_CONN.hget("groups", data.get("field")): updated_data = json.loads(data["value"]) - updated_data["created_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + timestamp = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + updated_data["created_timestamp"] = timestamp data["value"] = json.dumps(updated_data) created_p = REDIS_CONN.hset(data.get("key", ""), -- cgit v1.2.3 From dc378d26c003a8f0503ad69235d1685d66e4d611 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 6 Oct 2021 21:26:03 +0300 Subject: scripts: group: Update docstrings for "create_group_data" --- scripts/authentication/group.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index ed17f260..08a4a2bc 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -41,6 +41,9 @@ def create_group_data(users: Dict, target_group: str, "field", and "value" that can be used in a redis hash as follows: HSET key field value + The "field" return value is a unique-id that is used to + distinguish the groups. + Parameters: - `users`: a list of users for example: -- cgit v1.2.3 From d5f6670836cbed804a00e02ec0258d0c87564006 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 6 Oct 2021 21:40:35 +0300 Subject: scripts: group: Replace args.group_name with data["field"] --- scripts/authentication/group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py index 08a4a2bc..c8c2caad 100644 --- a/scripts/authentication/group.py +++ b/scripts/authentication/group.py @@ -145,7 +145,7 @@ if __name__ == "__main__": data.get("value", "")) groups = json.loads(REDIS_CONN.hget("groups", - args.group_name)) # type: ignore + data.get("field"))) # type: ignore if created_p: exit(f"\nSuccessfully created the group: '{args.group_name}'\n" f"`HGETALL groups {args.group_name}`: {groups}\n") -- cgit v1.2.3 From 70f8ed53f85cfb42ca81ed6c3b4c9cf1060940e5 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 6 Oct 2021 21:44:51 +0300 Subject: scripts: resource: Add option for specifying a groups uuid --- scripts/authentication/resource.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/scripts/authentication/resource.py b/scripts/authentication/resource.py index 8fcf09d7..4996f34c 100644 --- a/scripts/authentication/resource.py +++ b/scripts/authentication/resource.py @@ -63,12 +63,16 @@ def recover_hash(name: str, file_path: str, set_function) -> bool: if __name__ == "__main__": # Initialising the parser CLI arguments parser = argparse.ArgumentParser() + parser.add_argument("--group-id", + help="Add the group id to all resources") parser.add_argument("--restore", help="Restore from a given backup") parser.add_argument("--enable-backup", action="store_true", help="Create a back up before edits") args = parser.parse_args() + if not args.group_id: + exit("Please specify the group-id!\n") if args.restore: if recover_hash(name="resources", file_path=args.back_up, @@ -92,8 +96,8 @@ if __name__ == "__main__": for resource_id, resource in RESOURCES.items(): _resource = json.loads(resource) # str -> dict conversion - _resource["group_masks"] = {"editors": {"metadata": "edit", - "data": "edit"}} + _resource["group_masks"] = {args.group_id: {"metadata": "edit", + "data": "edit"}} REDIS_CONN.hset("resources", resource_id, json.dumps(_resource)) -- cgit v1.2.3 From 7805a48172ada364d3783db043dbcf637445a7fe Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 8 Oct 2021 22:07:43 +0000 Subject: Adding convert_dol_genotypes.py to scripts; everything is hard-coded in it since I was only writing it to generate a specific file and it probably won't be re-used --- scripts/convert_dol_genotypes.py | 68 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 scripts/convert_dol_genotypes.py diff --git a/scripts/convert_dol_genotypes.py b/scripts/convert_dol_genotypes.py new file mode 100644 index 00000000..353f1b53 --- /dev/null +++ b/scripts/convert_dol_genotypes.py @@ -0,0 +1,68 @@ +# This is just to convert the Rqtl2 format genotype files for DOL into a .geno file +# Everything is hard-coded since I doubt this will be re-used and I just wanted to generate the file quickly + +import os + +geno_dir = "/home/zas1024/gn2-zach/DO_genotypes/" +markers_file = "/home/zas1024/gn2-zach/DO_genotypes/SNP_Map.txt" +gn_geno_path = "/home/zas1024/gn2-zach/DO_genotypes/DOL.geno" + +marker_data = {} +with open(markers_file, "r") as markers_fh: + for i, line in enumerate(markers_fh): + if i == 0: + continue + else: + line_items = line.split("\t") + this_marker = {} + this_marker['chr'] = line_items[2] if line_items[2] != "0" else "M" + this_marker['pos'] = f'{float(line_items[3])/1000000:.6f}' + marker_data[line_items[1]] = this_marker + +sample_names = [] +for filename in os.listdir(geno_dir): + if "gm4qtl2_geno" in filename: + with open(geno_dir + "/" + filename, "r") as rqtl_geno_fh: + for i, line in enumerate(rqtl_geno_fh): + line_items = line.split(",") + if i < 3: + continue + elif not len(sample_names) and i == 3: + sample_names = [item.replace("TLB", "TB") for item in line_items[1:]] + elif i > 3: + marker_data[line_items[0]]['genotypes'] = ["X" if item.strip() == "-" else item.strip() for item in line_items[1:]] + +def sort_func(e): + try: + return int(e['chr']) + except: + if e['chr'] == "X": + return 20 + elif e['chr'] == "Y": + return 21 + elif e['chr'] == "M": + return 22 + +marker_list = [] +for key, value in marker_data.items(): + if 'genotypes' in value: + this_marker = { + 'chr': value['chr'], + 'locus': key, + 'pos': value['pos'], + 'genotypes': value['genotypes'] + } + marker_list.append(this_marker) + +marker_list.sort(key=sort_func) + +with open(gn_geno_path, "w") as gn_geno_fh: + gn_geno_fh.write("\t".join((["Chr", "Locus", "cM", "Mb"] + sample_names))) + for marker in marker_list: + row_contents = [ + marker['chr'], + marker['locus'], + marker['pos'], + marker['pos'] + ] + marker['genotypes'] + gn_geno_fh.write("\t".join(row_contents) + "\n") -- cgit v1.2.3 From b37a9c6c495d142852d0cee54d83f5c9e815e37b Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 8 Oct 2021 22:19:07 +0000 Subject: Fixed the sort to account for both chr and pos in a kind of hack-y way + added some comments + changed EOL to LF because the file suddenly started including EOL characters --- scripts/convert_dol_genotypes.py | 142 ++++++++++++++++++++------------------- 1 file changed, 74 insertions(+), 68 deletions(-) diff --git a/scripts/convert_dol_genotypes.py b/scripts/convert_dol_genotypes.py index 353f1b53..81b3bd6d 100644 --- a/scripts/convert_dol_genotypes.py +++ b/scripts/convert_dol_genotypes.py @@ -1,68 +1,74 @@ -# This is just to convert the Rqtl2 format genotype files for DOL into a .geno file -# Everything is hard-coded since I doubt this will be re-used and I just wanted to generate the file quickly - -import os - -geno_dir = "/home/zas1024/gn2-zach/DO_genotypes/" -markers_file = "/home/zas1024/gn2-zach/DO_genotypes/SNP_Map.txt" -gn_geno_path = "/home/zas1024/gn2-zach/DO_genotypes/DOL.geno" - -marker_data = {} -with open(markers_file, "r") as markers_fh: - for i, line in enumerate(markers_fh): - if i == 0: - continue - else: - line_items = line.split("\t") - this_marker = {} - this_marker['chr'] = line_items[2] if line_items[2] != "0" else "M" - this_marker['pos'] = f'{float(line_items[3])/1000000:.6f}' - marker_data[line_items[1]] = this_marker - -sample_names = [] -for filename in os.listdir(geno_dir): - if "gm4qtl2_geno" in filename: - with open(geno_dir + "/" + filename, "r") as rqtl_geno_fh: - for i, line in enumerate(rqtl_geno_fh): - line_items = line.split(",") - if i < 3: - continue - elif not len(sample_names) and i == 3: - sample_names = [item.replace("TLB", "TB") for item in line_items[1:]] - elif i > 3: - marker_data[line_items[0]]['genotypes'] = ["X" if item.strip() == "-" else item.strip() for item in line_items[1:]] - -def sort_func(e): - try: - return int(e['chr']) - except: - if e['chr'] == "X": - return 20 - elif e['chr'] == "Y": - return 21 - elif e['chr'] == "M": - return 22 - -marker_list = [] -for key, value in marker_data.items(): - if 'genotypes' in value: - this_marker = { - 'chr': value['chr'], - 'locus': key, - 'pos': value['pos'], - 'genotypes': value['genotypes'] - } - marker_list.append(this_marker) - -marker_list.sort(key=sort_func) - -with open(gn_geno_path, "w") as gn_geno_fh: - gn_geno_fh.write("\t".join((["Chr", "Locus", "cM", "Mb"] + sample_names))) - for marker in marker_list: - row_contents = [ - marker['chr'], - marker['locus'], - marker['pos'], - marker['pos'] - ] + marker['genotypes'] - gn_geno_fh.write("\t".join(row_contents) + "\n") +# This is just to convert the Rqtl2 format genotype files for DOL into a .geno file +# Everything is hard-coded since I doubt this will be re-used and I just wanted to generate the file quickly + +import os + +geno_dir = "/home/zas1024/gn2-zach/DO_genotypes/" +markers_file = "/home/zas1024/gn2-zach/DO_genotypes/SNP_Map.txt" +gn_geno_path = "/home/zas1024/gn2-zach/DO_genotypes/DOL.geno" + +# Iterate through the SNP_Map.txt file to get marker positions +marker_data = {} +with open(markers_file, "r") as markers_fh: + for i, line in enumerate(markers_fh): + if i == 0: + continue + else: + line_items = line.split("\t") + this_marker = {} + this_marker['chr'] = line_items[2] if line_items[2] != "0" else "M" + this_marker['pos'] = f'{float(line_items[3])/1000000:.6f}' + marker_data[line_items[1]] = this_marker + +# Iterate through R/qtl2 format genotype files and pull out the samplelist and genotypes for each marker +sample_names = [] +for filename in os.listdir(geno_dir): + if "gm4qtl2_geno" in filename: + with open(geno_dir + "/" + filename, "r") as rqtl_geno_fh: + for i, line in enumerate(rqtl_geno_fh): + line_items = line.split(",") + if i < 3: + continue + elif not len(sample_names) and i == 3: + sample_names = [item.replace("TLB", "TB") for item in line_items[1:]] + elif i > 3: + marker_data[line_items[0]]['genotypes'] = ["X" if item.strip() == "-" else item.strip() for item in line_items[1:]] + +# Generate list of marker obs to iterate through when writing to .geno file +marker_list = [] +for key, value in marker_data.items(): + if 'genotypes' in value: + this_marker = { + 'chr': value['chr'], + 'locus': key, + 'pos': value['pos'], + 'genotypes': value['genotypes'] + } + marker_list.append(this_marker) + +def sort_func(e): + """For ensuring that X/Y chromosomes/mitochondria are sorted to the end correctly""" + try: + return float((e['chr']))*1000 + float(e['pos']) + except: + if e['chr'] == "X": + return 20000 + float(e['pos']) + elif e['chr'] == "Y": + return 21000 + float(e['pos']) + elif e['chr'] == "M": + return 22000 + float(e['pos']) + +# Sort markers by chromosome +marker_list.sort(key=sort_func) + +# Write lines to .geno file +with open(gn_geno_path, "w") as gn_geno_fh: + gn_geno_fh.write("\t".join((["Chr", "Locus", "cM", "Mb"] + sample_names))) + for marker in marker_list: + row_contents = [ + marker['chr'], + marker['locus'], + marker['pos'], + marker['pos'] + ] + marker['genotypes'] + gn_geno_fh.write("\t".join(row_contents) + "\n") -- cgit v1.2.3 From e473a210491620477898ba69f33f69f14fdf5893 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 8 Oct 2021 22:49:20 +0000 Subject: Fix issue where outliers weren't being highlighted for rows drawn by Scroller (so all rows beyond a certain point) --- wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js | 2 ++ wqflask/wqflask/templates/show_trait.html | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js b/wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js index 4de1b0ac..0a060cdc 100644 --- a/wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js +++ b/wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js @@ -130,6 +130,7 @@ var primary_table = $('#samples_primary').DataTable( { $(row).addClass("value_se"); if (data.outlier) { $(row).addClass("outlier"); + $(row).attr("style", "background-color: orange;"); } $('td', row).eq(1).addClass("column_name-Index") $('td', row).eq(2).addClass("column_name-Sample") @@ -189,6 +190,7 @@ if (js_data.sample_lists.length > 1){ $(row).addClass("value_se"); if (data.outlier) { $(row).addClass("outlier"); + $(row).attr("style", "background-color: orange;"); } $('td', row).eq(1).addClass("column_name-Index") $('td', row).eq(2).addClass("column_name-Sample") diff --git a/wqflask/wqflask/templates/show_trait.html b/wqflask/wqflask/templates/show_trait.html index 3dbf5f57..f3fa1332 100644 --- a/wqflask/wqflask/templates/show_trait.html +++ b/wqflask/wqflask/templates/show_trait.html @@ -254,8 +254,6 @@ } ); {% endif %} - $('#samples_primary, #samples_other').find("tr.outlier").css('background-color', 'orange') - $('.edit_sample_checkbox:checkbox').change(function() { if ($(this).is(":checked")) { if (!$(this).closest('tr').hasClass('selected')) { -- cgit v1.2.3 From 9b28d111ad156f3862286e88bc220e02d5e1312b Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 8 Oct 2021 23:31:19 +0000 Subject: Fixed some issues with scale and score_type in mapping export + include mapping method in export filename --- wqflask/wqflask/marker_regression/run_mapping.py | 25 ++++++++++++------------ 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 290c4a14..80094057 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -104,7 +104,7 @@ class RunMapping: if "results_path" in start_vars: self.mapping_results_path = start_vars['results_path'] else: - mapping_results_filename = "_".join([self.dataset.group.name, self.vals_hash]).replace("/", "_") + mapping_results_filename = "_".join([self.dataset.group.name, self.mapping_method, self.vals_hash]).replace("/", "_") self.mapping_results_path = "{}{}.csv".format( webqtlConfig.GENERATED_IMAGE_DIR, mapping_results_filename) @@ -405,8 +405,8 @@ class RunMapping: total_markers = len(self.qtl_results) with Bench("Exporting Results"): - export_mapping_results(self.dataset, self.this_trait, self.qtl_results, - self.mapping_results_path, self.mapping_scale, self.score_type, + export_mapping_results(self.dataset, self.this_trait, self.qtl_results, self.mapping_results_path, + self.mapping_method, self.mapping_scale, self.score_type, self.transform, self.covariates, self.n_samples, self.vals_hash) with Bench("Trimming Markers for Figure"): @@ -525,7 +525,11 @@ class RunMapping: return trimmed_genotype_data -def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, score_type, transform, covariates, n_samples, vals_hash): +def export_mapping_results(dataset, trait, markers, results_path, mapping_method, mapping_scale, score_type, transform, covariates, n_samples, vals_hash): + if mapping_scale == "physic": + scale_string = "Mb" + else: + scale_string = "cM" with open(results_path, "w+") as output_file: output_file.write( "Time/Date: " + datetime.datetime.now().strftime("%x / %X") + "\n") @@ -535,6 +539,7 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, output_file.write("Trait: " + trait.display_name + "\n") output_file.write("Trait Hash: " + vals_hash + "\n") output_file.write("N Samples: " + str(n_samples) + "\n") + output_file.write("Mapping Tool: " + str(mapping_method) + "\n") if len(transform) > 0: transform_text = "Transform - " if transform == "qnorm": @@ -564,10 +569,7 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, output_file.write("Name,Chr,") if score_type.lower() == "-logP": score_type = "-logP" - if 'Mb' in markers[0]: - output_file.write("Mb," + score_type) - if 'cM' in markers[0]: - output_file.write("Cm," + score_type) + output_file.write(scale_string + "," + score_type) if "additive" in list(markers[0].keys()): output_file.write(",Additive") if "dominance" in list(markers[0].keys()): @@ -575,11 +577,8 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, output_file.write("\n") for i, marker in enumerate(markers): output_file.write(marker['name'] + "," + str(marker['chr']) + ",") - if 'Mb' in marker: - output_file.write(str(marker['Mb']) + ",") - if 'cM' in marker: - output_file.write(str(marker['cM']) + ",") - if "lod_score" in marker.keys(): + output_file.write(str(marker[scale_string]) + ",") + if score_type == "-logP": output_file.write(str(marker['lod_score'])) else: output_file.write(str(marker['lrs_value'])) -- cgit v1.2.3 From 6578f6fa44dfa54bee29a16347bf3265ec6d76ad Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sat, 9 Oct 2021 21:55:08 +0300 Subject: add function to process gn3 wgcna output --- wqflask/wqflask/wgcna/gn3_wgcna.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 wqflask/wqflask/wgcna/gn3_wgcna.py diff --git a/wqflask/wqflask/wgcna/gn3_wgcna.py b/wqflask/wqflask/wgcna/gn3_wgcna.py new file mode 100644 index 00000000..2657a099 --- /dev/null +++ b/wqflask/wqflask/wgcna/gn3_wgcna.py @@ -0,0 +1,27 @@ +"""module contains code to consume gn3-wgcna api +and process data to be rendered by datatables +""" + + + +def process_wgcna_data(response): + """function for processing modeigene genes + for create row data for datataba""" + mod_eigens = response["output"]["ModEigens"] + + sample_names = response["input"]["sample_names"] + + mod_dataset = [[sample] for sample in sample_names] + + for _, mod_values in mod_eigens.items(): + for (index, _sample) in enumerate(sample_names): + mod_dataset[index].append(round(mod_values[index], 3)) + + return { + "col_names": ["sample_names", *mod_eigens.keys()], + "mod_dataset": mod_dataset + } + + +def process_image(): + pass \ No newline at end of file -- cgit v1.2.3 From ef80c72194dd8a0b8868ece15589e0a3cf04516f Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sat, 9 Oct 2021 21:55:39 +0300 Subject: unittest for processing wgcna output --- wqflask/tests/unit/wqflask/wgcna/__init__.py | 0 wqflask/tests/unit/wqflask/wgcna/test_wgcna.py | 50 ++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 wqflask/tests/unit/wqflask/wgcna/__init__.py create mode 100644 wqflask/tests/unit/wqflask/wgcna/test_wgcna.py diff --git a/wqflask/tests/unit/wqflask/wgcna/__init__.py b/wqflask/tests/unit/wqflask/wgcna/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/wqflask/tests/unit/wqflask/wgcna/test_wgcna.py b/wqflask/tests/unit/wqflask/wgcna/test_wgcna.py new file mode 100644 index 00000000..8e947e2f --- /dev/null +++ b/wqflask/tests/unit/wqflask/wgcna/test_wgcna.py @@ -0,0 +1,50 @@ + +"""module contains for processing gn3 wgcna data""" +from unittest import TestCase + +from wqflask.wgcna.gn3_wgcna import process_wgcna_data + + +class DataProcessingTests(TestCase): + """class contains data processing tests""" + + def test_data_processing(self): + """test for parsing data for datatable""" + output = { + "input": { + "sample_names": ["BXD1", "BXD2", "BXD3", "BXD4", "BXD5", "BXD6"], + + }, + "output": { + "ModEigens": { + "MEturquoise": [ + 0.0646677768085351, + 0.137200224277058, + 0.63451113720732, + -0.544002665501479, + -0.489487590361863, + 0.197111117570427 + ], + "MEgrey": [ + 0.213, + 0.214, + 0.3141, + -0.545, + -0.423, + 0.156, + ] + }}} + + row_data = [['BXD1', 0.065, 0.213], + ['BXD2', 0.137, 0.214], + ['BXD3', 0.635, 0.314], + ['BXD4', -0.544, -0.545], + ['BXD5', -0.489, -0.423], + ['BXD6', 0.197, 0.156]] + + expected_results = { + "col_names": ["sample_names", "MEturquoise", "MEgrey"], + "mod_dataset": row_data + } + + self.assertEqual(process_wgcna_data(output), expected_results) -- cgit v1.2.3 From 599ac567990a3881dc3821ad226a18ce538d1a17 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sat, 9 Oct 2021 21:58:36 +0300 Subject: add function to process image data --- wqflask/wqflask/wgcna/gn3_wgcna.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/wqflask/wqflask/wgcna/gn3_wgcna.py b/wqflask/wqflask/wgcna/gn3_wgcna.py index 2657a099..225bef22 100644 --- a/wqflask/wqflask/wgcna/gn3_wgcna.py +++ b/wqflask/wqflask/wgcna/gn3_wgcna.py @@ -3,7 +3,6 @@ and process data to be rendered by datatables """ - def process_wgcna_data(response): """function for processing modeigene genes for create row data for datataba""" @@ -23,5 +22,12 @@ def process_wgcna_data(response): } -def process_image(): - pass \ No newline at end of file +def process_image(response): + """function to process image check if byte string is empty""" + image_data = response["output"]["image_data"] + return ({ + "image_generated": True, + "image_data": image_data + } if image_data else { + "image_generated": False + }) -- cgit v1.2.3 From 1b77d2417ba71ad7e2274429dd20c9272cb0f582 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sat, 9 Oct 2021 22:00:26 +0300 Subject: function to fetch trait data --- wqflask/wqflask/wgcna/gn3_wgcna.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/wqflask/wqflask/wgcna/gn3_wgcna.py b/wqflask/wqflask/wgcna/gn3_wgcna.py index 225bef22..f7ed4cef 100644 --- a/wqflask/wqflask/wgcna/gn3_wgcna.py +++ b/wqflask/wqflask/wgcna/gn3_wgcna.py @@ -3,6 +3,16 @@ and process data to be rendered by datatables """ +def fetch_trait_data(requestform): + """fetch trait data""" + db_obj = SimpleNamespace() + get_trait_db_obs(db_obj, + [trait.strip() + for trait in requestform['trait_list'].split(',')]) + + return process_dataset(db_obj.trait_list) + + def process_wgcna_data(response): """function for processing modeigene genes for create row data for datataba""" -- cgit v1.2.3 From c9ae69a30a972f47232f8457e9e1b8cd514f9832 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sat, 9 Oct 2021 22:04:17 +0300 Subject: add function to process trait sample data --- wqflask/wqflask/wgcna/gn3_wgcna.py | 44 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/wqflask/wqflask/wgcna/gn3_wgcna.py b/wqflask/wqflask/wgcna/gn3_wgcna.py index f7ed4cef..9ab6b3e0 100644 --- a/wqflask/wqflask/wgcna/gn3_wgcna.py +++ b/wqflask/wqflask/wgcna/gn3_wgcna.py @@ -1,6 +1,7 @@ """module contains code to consume gn3-wgcna api and process data to be rendered by datatables """ +from utility.helper_functions import get_trait_db_obs def fetch_trait_data(requestform): @@ -13,6 +14,49 @@ def fetch_trait_data(requestform): return process_dataset(db_obj.trait_list) +def process_dataset(trait_list): + """process datasets and strains""" + + input_data = {} + traits = [] + strains = [] + + # xtodo unique traits and strains + + for trait in trait_list: + traits.append(trait[0].name) + + input_data[trait[0].name] = {} + for strain in trait[0].data: + strains.append(strain) + input_data[trait[0].name][strain] = trait[0].data[strain].value + + return { + "wgcna_input": input_data + } + + def process_dataset(trait_list): + """process datasets and strains""" + + input_data = {} + traits = [] + strains = [] + + # xtodo unique traits and strains + + for trait in trait_list: + traits.append(trait[0].name) + + input_data[trait[0].name] = {} + for strain in trait[0].data: + strains.append(strain) + input_data[trait[0].name][strain] = trait[0].data[strain].value + + return { + "wgcna_input": input_data + } + + def process_wgcna_data(response): """function for processing modeigene genes for create row data for datataba""" -- cgit v1.2.3 From 585cd45c56ae4bc444336815cbde791b0c0d2e7b Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sat, 9 Oct 2021 22:11:50 +0300 Subject: add function to call wgcna api --- wqflask/wqflask/wgcna/gn3_wgcna.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/wqflask/wqflask/wgcna/gn3_wgcna.py b/wqflask/wqflask/wgcna/gn3_wgcna.py index 9ab6b3e0..520f3d04 100644 --- a/wqflask/wqflask/wgcna/gn3_wgcna.py +++ b/wqflask/wqflask/wgcna/gn3_wgcna.py @@ -1,6 +1,7 @@ """module contains code to consume gn3-wgcna api and process data to be rendered by datatables """ +from typing import SimpleNamespace from utility.helper_functions import get_trait_db_obs @@ -85,3 +86,33 @@ def process_image(response): } if image_data else { "image_generated": False }) + + +def run_wgcna(form_data): + """function to run wgcna""" + + wgcna_api = f"{GN3_URL}/api/wgcna/run_wgcna" + + # parse form data + + trait_dataset = fetch_trait_data(form_data) + + response = requests.post(wgcna_api, { + "socket_id": form_data.get("socket_id"), # streaming disabled + "sample_names": list(set(strains)), + "trait_names": form_traits, + "trait_sample_data": form_strains, + "TOMtype": form_data["TOMtype"], + "minModuleSize": int(form_data["MinModuleSize"]), + "corType": form_data["corType"] + + } + ).json() + + if response.status_code == 200: + return { + {"results": response, + "data": process_wgcna_data(response), + "image": process_image(response) + } + } -- cgit v1.2.3 From 9a17787cab82fe1c89dc68521eca9e6c8bb1dbb6 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sun, 10 Oct 2021 01:44:45 +0300 Subject: pep8 formatting fix for parsing response data --- wqflask/wqflask/wgcna/gn3_wgcna.py | 66 ++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 38 deletions(-) diff --git a/wqflask/wqflask/wgcna/gn3_wgcna.py b/wqflask/wqflask/wgcna/gn3_wgcna.py index 520f3d04..96510223 100644 --- a/wqflask/wqflask/wgcna/gn3_wgcna.py +++ b/wqflask/wqflask/wgcna/gn3_wgcna.py @@ -1,10 +1,18 @@ """module contains code to consume gn3-wgcna api and process data to be rendered by datatables """ -from typing import SimpleNamespace + +import requests +from types import SimpleNamespace from utility.helper_functions import get_trait_db_obs +def validate_form(requestform): + return { + "" + } + + def fetch_trait_data(requestform): """fetch trait data""" db_obj = SimpleNamespace() @@ -31,30 +39,14 @@ def process_dataset(trait_list): for strain in trait[0].data: strains.append(strain) input_data[trait[0].name][strain] = trait[0].data[strain].value + # "sample_names": list(set(strains)), + # "trait_names": form_traits, + # "trait_sample_data": form_strains, return { - "wgcna_input": input_data - } - - def process_dataset(trait_list): - """process datasets and strains""" - - input_data = {} - traits = [] - strains = [] - - # xtodo unique traits and strains - - for trait in trait_list: - traits.append(trait[0].name) - - input_data[trait[0].name] = {} - for strain in trait[0].data: - strains.append(strain) - input_data[trait[0].name][strain] = trait[0].data[strain].value - - return { - "wgcna_input": input_data + "input": input_data, + "trait_names": traits, + "sample_names": strains } @@ -91,28 +83,26 @@ def process_image(response): def run_wgcna(form_data): """function to run wgcna""" + GN3_URL = "http://127.0.0.1:8081" + wgcna_api = f"{GN3_URL}/api/wgcna/run_wgcna" # parse form data trait_dataset = fetch_trait_data(form_data) + form_data["minModuleSize"] = int(form_data["MinModuleSize"]) - response = requests.post(wgcna_api, { - "socket_id": form_data.get("socket_id"), # streaming disabled - "sample_names": list(set(strains)), - "trait_names": form_traits, - "trait_sample_data": form_strains, - "TOMtype": form_data["TOMtype"], - "minModuleSize": int(form_data["MinModuleSize"]), - "corType": form_data["corType"] + response = requests.post(wgcna_api, json={ + "sample_names": list(set(trait_dataset["sample_names"])), + "trait_names": trait_dataset["trait_names"], + "trait_sample_data": list(trait_dataset["input"].values()), + **form_data } ).json() - if response.status_code == 200: - return { - {"results": response, - "data": process_wgcna_data(response), - "image": process_image(response) - } - } + return { + "results": response, + "data": process_wgcna_data(response["data"]), + "image": process_image(response["data"]) + } -- cgit v1.2.3 From b8a2b58cbf5bb96d86d59da7e72a9cb5f874fc41 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sun, 10 Oct 2021 01:47:50 +0300 Subject: call run_wgcna in views && render test template --- wqflask/wqflask/views.py | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 44560427..a462b31a 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -39,8 +39,8 @@ from gn3.db.phenotypes import Probeset from gn3.db.phenotypes import Publication from gn3.db.phenotypes import PublishXRef from gn3.db.phenotypes import probeset_mapping -from gn3.db.traits import get_trait_csv_sample_data -from gn3.db.traits import update_sample_data +# from gn3.db.traits import get_trait_csv_sample_data +# from gn3.db.traits import update_sample_data from flask import current_app @@ -77,6 +77,7 @@ from wqflask.correlation_matrix import show_corr_matrix from wqflask.correlation import corr_scatter_plot # from wqflask.wgcna import wgcna_analysis # from wqflask.ctl import ctl_analysis +from wqflask.wgcna.gn3_wgcna import run_wgcna from wqflask.snp_browser import snp_browser from wqflask.search_results import SearchResultPage from wqflask.export_traits import export_search_results_csv @@ -365,18 +366,11 @@ def wcgna_setup(): return render_template("wgcna_setup.html", **request.form) -# @app.route("/wgcna_results", methods=('POST',)) -# def wcgna_results(): -# logger.info("In wgcna, request.form is:", request.form) -# logger.info(request.url) -# # Start R, load the package and pointers and create the analysis -# wgcna = wgcna_analysis.WGCNA() -# # Start the analysis, a wgcnaA object should be a separate long running thread -# wgcnaA = wgcna.run_analysis(request.form) -# # After the analysis is finished store the result -# result = wgcna.process_results(wgcnaA) -# # Display them using the template -# return render_template("wgcna_results.html", **result) +@app.route("/wgcna_results", methods=('POST',)) +def wcgna_results(): + """call the gn3 api to get wgcna response data""" + results = run_wgcna(dict(request.form)) + return render_template("test_wgcna_results.html", **results) @app.route("/ctl_setup", methods=('POST',)) -- cgit v1.2.3 From f07d5014a3d6756199bd206b4251f6d3b48bf165 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sun, 10 Oct 2021 01:49:14 +0300 Subject: pep8 formatting && remove unused functions --- wqflask/wqflask/wgcna/gn3_wgcna.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/wqflask/wqflask/wgcna/gn3_wgcna.py b/wqflask/wqflask/wgcna/gn3_wgcna.py index 96510223..c4cc2e7f 100644 --- a/wqflask/wqflask/wgcna/gn3_wgcna.py +++ b/wqflask/wqflask/wgcna/gn3_wgcna.py @@ -7,12 +7,6 @@ from types import SimpleNamespace from utility.helper_functions import get_trait_db_obs -def validate_form(requestform): - return { - "" - } - - def fetch_trait_data(requestform): """fetch trait data""" db_obj = SimpleNamespace() -- cgit v1.2.3 From 63a161e2a6d3863720aa6814f1060bed22c22a39 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sun, 10 Oct 2021 01:54:51 +0300 Subject: enable wgcna in gn2 toolbar --- wqflask/wqflask/templates/tool_buttons.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wqflask/wqflask/templates/tool_buttons.html b/wqflask/wqflask/templates/tool_buttons.html index 3f9d8211..3ee5be19 100644 --- a/wqflask/wqflask/templates/tool_buttons.html +++ b/wqflask/wqflask/templates/tool_buttons.html @@ -18,13 +18,13 @@ BNW - +
    - +
    - - + {% if image["image_generated"] %}
    @@ -140,13 +129,8 @@ let results = {{results|safe}} let phenoModules = results["data"]["output"]["net_colors"] -let phenotypes = Array.from(Object.keys(phenoModules)); +let phenotypes = Object.keys(phenoModules) let phenoMods = Object.values(phenoModules) -console.log(phenotypes) - - -console.log(typeof phenotypes); - let {col_names,mod_dataset} = {{data|safe}} $('#eigens').DataTable( { @@ -159,7 +143,6 @@ let {col_names,mod_dataset} = {{data|safe}} } ); $('#phenos').DataTable( { data:phenotypes.map((phenoName,idx)=>{ - console.log(phenoName) return [phenoName,phenoMods[idx]] }), columns: [{ -- cgit v1.2.3 From 1642c6bf0b8dee3bd3d4c32636a2c11ab97025a0 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Sun, 10 Oct 2021 12:02:01 +0300 Subject: replace form input with select options --- wqflask/wqflask/templates/wgcna_setup.html | 35 ++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/wqflask/wqflask/templates/wgcna_setup.html b/wqflask/wqflask/templates/wgcna_setup.html index 9d4bbfc7..86d9fa10 100644 --- a/wqflask/wqflask/templates/wgcna_setup.html +++ b/wqflask/wqflask/templates/wgcna_setup.html @@ -35,29 +35,42 @@
    -
    - -
    - -
    + +
    + +
    + +
    +
    - +
    -
    - -
    - -
    + +
    + +
    + +
    +
    + + + {% endif %}
    -- cgit v1.2.3 From a212ad123f902b6a9c74bcac1d98bc274cebbdda Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 12 Oct 2021 17:36:02 +0000 Subject: Fixed export_mapping_results test in test_run_mapping.py --- .../wqflask/marker_regression/test_run_mapping.py | 33 +++++++++++----------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_run_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_run_mapping.py index 3747aeb8..868b0b0b 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_run_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_run_mapping.py @@ -43,7 +43,7 @@ class TestRunMapping(unittest.TestCase): }) } self.dataset = AttributeSetter( - {"fullname": "dataser_1", "group": self.group, "type": "ProbeSet"}) + {"fullname": "dataset_1", "group": self.group, "type": "ProbeSet"}) self.chromosomes = AttributeSetter({"chromosomes": chromosomes}) self.trait = AttributeSetter( @@ -180,37 +180,36 @@ class TestRunMapping(unittest.TestCase): with mock.patch("wqflask.marker_regression.run_mapping.datetime.datetime", new=datetime_mock): export_mapping_results(dataset=self.dataset, trait=self.trait, markers=markers, - results_path="~/results", mapping_scale="physic", score_type="-log(p)", - transform="qnorm", covariates="Dataset1:Trait1,Dataset2:Trait2", n_samples="100", - vals_hash="") + results_path="~/results", mapping_method="gemma", mapping_scale="physic", + score_type="-logP", transform="qnorm", + covariates="Dataset1:Trait1,Dataset2:Trait2", + n_samples="100", vals_hash="") write_calls = [ mock.call('Time/Date: 09/01/19 / 10:12:12\n'), mock.call('Population: Human GP1_\n'), mock.call( - 'Data Set: dataser_1\n'), + 'Data Set: dataset_1\n'), mock.call('Trait: Test Name\n'), mock.call('Trait Hash: \n'), - mock.call('N Samples: 100\n'), mock.call( - 'Transform - Quantile Normalized\n'), + mock.call('N Samples: 100\n'), + mock.call('Mapping Tool: gemma\n'), + mock.call('Transform - Quantile Normalized\n'), mock.call('Gene Symbol: IGFI\n'), mock.call( 'Location: X1 @ 123313 Mb\n'), mock.call('Cofactors (dataset - trait):\n'), mock.call('Trait1 - Dataset1\n'), mock.call('Trait2 - Dataset2\n'), mock.call('\n'), mock.call('Name,Chr,'), - mock.call('Mb,-log(p)'), mock.call('Cm,-log(p)'), + mock.call('Mb,-logP'), mock.call(',Additive'), mock.call(',Dominance'), mock.call('\n'), mock.call('MK1,C1,'), - mock.call('12000,'), mock.call('1,'), - mock.call('3'), mock.call(',VA'), - mock.call(',TT'), mock.call('\n'), - mock.call('MK2,C2,'), mock.call('10000,'), - mock.call('15,'), mock.call('7'), + mock.call('12000,'), mock.call('3'), + mock.call(',VA'), mock.call(',TT'), + mock.call('\n'), mock.call('MK2,C2,'), + mock.call('10000,'), mock.call('7'), mock.call('\n'), mock.call('MK1,C3,'), - mock.call('1,'), mock.call('45,'), - mock.call('7'), mock.call(',VE'), - mock.call(',Tt') - + mock.call('1,'), mock.call('7'), + mock.call(',VE'), mock.call(',Tt') ] mock_open.assert_called_once_with("~/results", "w+") filehandler = mock_open() -- cgit v1.2.3 From db9225caf0a78b13af1892d47c69463e00262d03 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Wed, 13 Oct 2021 08:22:51 +0300 Subject: disable heatmap image --- wqflask/wqflask/templates/test_wgcna_results.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wqflask/wqflask/templates/test_wgcna_results.html b/wqflask/wqflask/templates/test_wgcna_results.html index 0d253564..1dddd393 100644 --- a/wqflask/wqflask/templates/test_wgcna_results.html +++ b/wqflask/wqflask/templates/test_wgcna_results.html @@ -94,9 +94,9 @@
    {% endif %} -
    +
    -- cgit v1.2.3 From 2b5b6a119e22d0bbe29c08681c34a50f9769f38e Mon Sep 17 00:00:00 2001 From: Frederick Muriuki Muriithi Date: Wed, 13 Oct 2021 09:06:57 +0300 Subject: Update the action button text Issue: https://github.com/genenetwork/gn-gemtext-threads/blob/main/topics/gn1-migration-to-gn2/non-clustered-heatmaps-and-flipping.gmi * Update the action button text to more closely correspond to the action that the button triggers. --- wqflask/wqflask/templates/collections/view.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/templates/collections/view.html b/wqflask/wqflask/templates/collections/view.html index d347a060..0ded66a6 100644 --- a/wqflask/wqflask/templates/collections/view.html +++ b/wqflask/wqflask/templates/collections/view.html @@ -54,7 +54,7 @@ class="btn btn-primary" data-url="{{heatmap_data_url}}" title="Generate heatmap from this collection"> - Clustered Heatmap + Generate Heatmap -- cgit v1.2.3 From ba98ef026544d4437e65a7bd248ff9591296b48e Mon Sep 17 00:00:00 2001 From: Frederick Muriuki Muriithi Date: Wed, 13 Oct 2021 09:37:19 +0300 Subject: Add some documentation for generating heatmaps Issue: https://github.com/genenetwork/gn-gemtext-threads/blob/main/topics/gn1-migration-to-gn2/non-clustered-heatmaps-and-flipping.gmi * Add some documentation on generating the heatmaps, that would be useful for the end user. --- doc/heatmap-generation.org | 34 ++++++++++++++++++++++++++++++++ doc/images/gn2_header_collections.png | Bin 0 -> 7890 bytes doc/images/heatmap_form.png | Bin 0 -> 9363 bytes doc/images/heatmap_with_hover_tools.png | Bin 0 -> 42578 bytes 4 files changed, 34 insertions(+) create mode 100644 doc/heatmap-generation.org create mode 100644 doc/images/gn2_header_collections.png create mode 100644 doc/images/heatmap_form.png create mode 100644 doc/images/heatmap_with_hover_tools.png diff --git a/doc/heatmap-generation.org b/doc/heatmap-generation.org new file mode 100644 index 00000000..a697c70b --- /dev/null +++ b/doc/heatmap-generation.org @@ -0,0 +1,34 @@ +#+STARTUP: inlineimages +#+TITLE: Heatmap Generation +#+AUTHOR: Muriithi Frederick Muriuki + +* Generating Heatmaps + +Like a lot of other features, the heatmap generation requires an existing collection. If none exists, see [[][Creating a new collection]] for how to create a new collection. + +Once you have a collection, you can navigate to the collections page by clicking on the "Collections" link in the header + + +[[./images/gn2_header_collections.png]] + +From that page, pick the collection that you want to work with by clicking on its name on the collections table. + +That takes you to that collection's page, where you can select the data that you want to use to generate the heatmap. + +** Selecting Orientation + +Once you have selected the data, select the orientation of the heatmap you want generated. You do this by selecting either *"vertical"* or *"horizontal"* in the heatmaps form: + +[[./images/heatmap_form.png]] + +Once you have selected the orientation, click on the "Generate Heatmap" button as in the image above. + +The heatmap generation might take a while, but once it is done, an image shows up above the data table. + +** Downloading the PNG copy of the Heatmap + +Once the heatmap image is shown, hovering over it, displays some tools to interact with the image. + +To download, hover over the heatmap image, and click on the "Download plot as png" icon as shown. + +[[./images/heatmap_with_hover_tools.png]] diff --git a/doc/images/gn2_header_collections.png b/doc/images/gn2_header_collections.png new file mode 100644 index 00000000..ac23f9c1 Binary files /dev/null and b/doc/images/gn2_header_collections.png differ diff --git a/doc/images/heatmap_form.png b/doc/images/heatmap_form.png new file mode 100644 index 00000000..163fbb60 Binary files /dev/null and b/doc/images/heatmap_form.png differ diff --git a/doc/images/heatmap_with_hover_tools.png b/doc/images/heatmap_with_hover_tools.png new file mode 100644 index 00000000..4ab79f99 Binary files /dev/null and b/doc/images/heatmap_with_hover_tools.png differ -- cgit v1.2.3 From 25cafac773edf3a053819b53ef860321a678941a Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 13 Oct 2021 17:46:18 +0000 Subject: Fix issue where score_type being set wrong caused an error when exporting mapping results (a while back it was changed from LOD to -logP) --- wqflask/wqflask/marker_regression/run_mapping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 80094057..769b9240 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -230,7 +230,7 @@ class RunMapping: self.perm_strata = get_perm_strata( self.this_trait, primary_samples, self.categorical_vars, self.samples) - self.score_type = "LOD" + self.score_type = "-logP" self.control_marker = start_vars['control_marker'] self.do_control = start_vars['do_control'] if 'mapmethod_rqtl_geno' in start_vars: -- cgit v1.2.3