diff options
author | zsloan | 2020-07-23 18:14:41 -0500 |
---|---|---|
committer | zsloan | 2020-07-23 18:14:41 -0500 |
commit | 880aa1cab6de4983414a2bbddfc557a10e02e1d3 (patch) | |
tree | 96783c5fcfb0becb4d9008597b87e636d89b1ac2 /wqflask | |
parent | 81c35e99a7ff808bf2fcf1f5e19631adf07d42e8 (diff) | |
download | genenetwork2-880aa1cab6de4983414a2bbddfc557a10e02e1d3.tar.gz |
Changed search page/collection export to include trait sample data and export a zip file if there are multiple groups between the traits
Diffstat (limited to 'wqflask')
-rw-r--r-- | wqflask/wqflask/export_traits.py | 119 | ||||
-rw-r--r-- | wqflask/wqflask/static/new/javascript/search_results.js | 16 | ||||
-rw-r--r-- | wqflask/wqflask/views.py | 28 |
3 files changed, 123 insertions, 40 deletions
diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py index 2d96c05a..3272c03d 100644 --- a/wqflask/wqflask/export_traits.py +++ b/wqflask/wqflask/export_traits.py @@ -4,24 +4,30 @@ import csv import xlsxwriter import StringIO import datetime +import itertools + +from zipfile import ZipFile, ZIP_DEFLATED import simplejson as json +from base.trait import create_trait, retrieve_trait_info + from pprint import pformat as pf +from utility.logger import getLogger +logger = getLogger(__name__ ) + def export_search_results_csv(targs): table_data = json.loads(targs['export_data']) - table_headers = table_data['headers'] table_rows = table_data['rows'] - buff = StringIO.StringIO() - writer = csv.writer(buff) - + now = datetime.datetime.now() + time_str = now.strftime('%H:%M_%d%B%Y') if 'file_name' in targs: - file_name = targs['file_name'] + zip_file_name = targs['file_name'] + "_export_" + time_str else: - file_name = "table_export.csv" + zip_file_name = "export_" + time_str metadata = [] @@ -40,19 +46,98 @@ def export_search_results_csv(targs): if targs['filter_term'] != "None": metadata.append(["Search Filter Terms: " + targs['filter_term']]) metadata.append(["Exported Row Number: " + str(len(table_rows))]) + metadata.append(["Funding for The GeneNetwork: NIAAA (U01AA13499, U24AA13513), NIDA, NIMH, and NIAAA (P20-DA21131), NCI MMHCC (U01CA105417), and NCRR (U01NR 105417)"]) + metadata.append([]) + + trait_list = [] + for trait in table_rows: + trait_name, dataset_name, _hash = trait.split(":") + trait_ob = create_trait(name=trait_name, dataset_name=dataset_name) + trait_ob = retrieve_trait_info(trait_ob, trait_ob.dataset, get_qtl_info=True) + trait_list.append(trait_ob) + + table_headers = ['Species', 'Group', 'Dataset', 'Record ID', 'Symbol', 'Description', 'ProbeTarget', 'PubMed_ID', 'Chr', 'Mb', 'Alias', 'Gene_ID', 'Homologene_ID', 'UniGene_ID', 'Strand_Probe', 'Probe_set_specificity', 'Probe_set_BLAT_score', 'Probe_set_BLAT_Mb_start', 'Probe_set_BLAT_Mb_end', 'QTL_Chr', 'QTL_Mb', 'Locus_at_Peak', 'Max_LRS', 'P_value_of_MAX', 'Mean_Expression'] + + traits_by_group = sort_traits_by_group(trait_list) + + file_list = [] + for group in traits_by_group.keys(): + group_traits = traits_by_group[group] + buff = StringIO.StringIO() + writer = csv.writer(buff) + csv_rows = [] + + sample_headers = [] + for sample in group_traits[0].dataset.group.samplelist: + sample_headers.append(sample) + sample_headers.append(sample + "_SE") + + full_headers = table_headers + sample_headers + + for metadata_row in metadata: + writer.writerow(metadata_row) + + csv_rows.append(full_headers) + + for trait in group_traits: + if getattr(trait, "symbol", None): + trait_symbol = getattr(trait, "symbol") + elif getattr(trait, "abbreviation", None): + trait_symbol = getattr(trait, "abbreviation") + else: + trait_symbol = "N/A" + row_contents = [ + trait.dataset.group.species, + trait.dataset.group.name, + trait.dataset.name, + trait.name, + trait_symbol, + getattr(trait, "description_display", "N/A"), + getattr(trait, "probe_target_description", "N/A"), + getattr(trait, "pubmed_id", "N/A"), + getattr(trait, "chr", "N/A"), + getattr(trait, "mb", "N/A"), + trait.alias_fmt, + getattr(trait, "geneid", "N/A"), + getattr(trait, "homologeneid", "N/A"), + getattr(trait, "unigeneid", "N/A"), + getattr(trait, "strand_probe", "N/A"), + getattr(trait, "probe_set_specificity", "N/A"), + getattr(trait, "probe_set_blat_score", "N/A"), + getattr(trait, "probe_set_blat_mb_start", "N/A"), + getattr(trait, "probe_set_blat_mb_end", "N/A"), + getattr(trait, "locus_chr", "N/A"), + getattr(trait, "locus_mb", "N/A"), + getattr(trait, "locus", "N/A"), + getattr(trait, "lrs", "N/A"), + getattr(trait, "pvalue", "N/A"), + getattr(trait, "mean", "N/A") + ] + + for sample in trait.dataset.group.samplelist: + if sample in trait.data: + row_contents += [trait.data[sample].value, trait.data[sample].variance] + else: + row_contents += ["x", "x"] + + csv_rows.append(row_contents) + + csv_rows = map(list, itertools.izip_longest(*[row for row in csv_rows])) + writer.writerows(csv_rows) + csv_data = buff.getvalue() + buff.close() - for metadata_row in metadata: - writer.writerow(metadata_row) + file_name = group + "_traits.csv" + file_list.append([file_name, csv_data]) - writer.writerow([]) + return file_list - writer.writerow(table_headers) - for trait_info in table_rows: - writer.writerow(trait_info) +def sort_traits_by_group(trait_list=[]): + traits_by_group = {} + for trait in trait_list: + if trait.dataset.group.name not in traits_by_group.keys(): + traits_by_group[trait.dataset.group.name] = [] - writer.writerow([]) - writer.writerow(["Funding for The GeneNetwork: NIAAA (U01AA13499, U24AA13513), NIDA, NIMH, and NIAAA (P20-DA21131), NCI MMHCC (U01CA105417), and NCRR (U01NR 105417)"]) - csv_data = buff.getvalue() - buff.close() + traits_by_group[trait.dataset.group.name].append(trait) - return csv_data, file_name
\ No newline at end of file + return traits_by_group
\ No newline at end of file diff --git a/wqflask/wqflask/static/new/javascript/search_results.js b/wqflask/wqflask/static/new/javascript/search_results.js index 39aae113..b3ed06fc 100644 --- a/wqflask/wqflask/static/new/javascript/search_results.js +++ b/wqflask/wqflask/static/new/javascript/search_results.js @@ -161,23 +161,11 @@ $(function() { trait_table.find('tbody tr').each(function (i, tr) { if (trait_table.find('input[name="searchResult"]:checked').length > 0) { if ($(this).find('input[name="searchResult"]').is(':checked')){ - this_row = []; - $(tr).find('td').each(function(j, td){ - if ($(td).data('export')){ - this_row.push($(td).data('export')); - } - }); - rows.push(this_row); + rows.push($(this).find('input[name="searchResult"]:checked').val()) } } else { - this_row = []; - $(tr).find('td').each(function(j, td){ - if ($(td).data('export')){ - this_row.push($(td).data('export')); - } - }); - rows.push(this_row); + rows.push($(this).find('input[name="searchResult"]').val()) } }); table_dict['rows'] = rows; diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 131345d3..57183eed 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -11,24 +11,24 @@ import datetime # for errors import time # for errors import sys import csv +import simplejson as json +import yaml import xlsxwriter import StringIO # Todo: Use cStringIO? +from zipfile import ZipFile, ZIP_DEFLATED + import gc import numpy as np - import cPickle as pickle import uuid -import simplejson as json -import yaml - import flask import base64 import array import sqlalchemy from wqflask import app -from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for +from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for, send_file from wqflask import group_manager from wqflask import resource_manager from wqflask import search_results @@ -421,11 +421,21 @@ def export_traits_csv(): logger.info("In export_traits_csv") logger.info("request.form:", request.form) logger.info(request.url) - csv_data, file_name = export_traits.export_search_results_csv(request.form) + file_list = export_traits.export_search_results_csv(request.form) - return Response(csv_data, - mimetype='text/csv', - headers={"Content-Disposition":"attachment;filename=" + file_name + ".csv"}) + if len(file_list) > 1: + memory_file = StringIO.StringIO() + with ZipFile(memory_file, mode='w', compression=ZIP_DEFLATED) as zf: + for the_file in file_list: + zf.writestr(the_file[0], the_file[1]) + + memory_file.seek(0) + + return send_file(memory_file, attachment_filename=filename + ".zip", as_attachment=True) + else: + return Response(file_list[0][1], + mimetype='text/csv', + headers={"Content-Disposition":"attachment;filename=" + file_list[0][0]}) @app.route('/export_perm_data', methods=('POST',)) def export_perm_data(): |