aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--wqflask/base/data_set.py7
-rw-r--r--wqflask/base/trait.py3
-rw-r--r--wqflask/utility/authentication_tools.py61
-rw-r--r--wqflask/utility/redis_tools.py5
-rw-r--r--wqflask/wqflask/export_traits.py119
-rw-r--r--wqflask/wqflask/static/new/javascript/search_results.js16
-rw-r--r--wqflask/wqflask/templates/base.html6
-rw-r--r--wqflask/wqflask/templates/mapping_results.html1
-rw-r--r--wqflask/wqflask/templates/show_trait.html3
-rw-r--r--wqflask/wqflask/views.py28
10 files changed, 184 insertions, 65 deletions
diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py
index 21ace006..5d562871 100644
--- a/wqflask/base/data_set.py
+++ b/wqflask/base/data_set.py
@@ -959,7 +959,7 @@ class GenotypeDataSet(DataSet):
def retrieve_sample_data(self, trait):
query = """
SELECT
- Strain.Name, GenoData.value, GenoSE.error, GenoData.Id, Strain.Name2
+ Strain.Name, GenoData.value, GenoSE.error, "N/A", Strain.Name2
FROM
(GenoData, GenoFreeze, Strain, Geno, GenoXRef)
left join GenoSE on
@@ -1130,11 +1130,14 @@ class MrnaAssayDataSet(DataSet):
def retrieve_sample_data(self, trait):
query = """
SELECT
- Strain.Name, ProbeSetData.value, ProbeSetSE.error, ProbeSetData.Id, Strain.Name2
+ Strain.Name, ProbeSetData.value, ProbeSetSE.error, NStrain.count, Strain.Name2
FROM
(ProbeSetData, ProbeSetFreeze, Strain, ProbeSet, ProbeSetXRef)
left join ProbeSetSE on
(ProbeSetSE.DataId = ProbeSetData.Id AND ProbeSetSE.StrainId = ProbeSetData.StrainId)
+ left join NStrain on
+ (NStrain.DataId = ProbeSetData.Id AND
+ NStrain.StrainId = ProbeSetData.StrainId)
WHERE
ProbeSet.Name = '%s' AND ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py
index f9da7b87..7666348e 100644
--- a/wqflask/base/trait.py
+++ b/wqflask/base/trait.py
@@ -152,7 +152,7 @@ class GeneralTrait(object):
'''Return a text formatted alias'''
alias = 'Not available'
- if self.alias:
+ if getattr(self, "alias", None):
alias = string.replace(self.alias, ";", " ")
alias = string.join(string.split(alias), ", ")
@@ -412,6 +412,7 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
Phenotype.Id = PublishXRef.PhenotypeId AND
Publication.Id = PublishXRef.PublicationId AND
PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
+ PublishXRef.InbredSetId = InbredSet.Id AND
PublishFreeze.Id = %s
""" % (trait.name, dataset.id)
diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py
index f9028f32..ed7462d1 100644
--- a/wqflask/utility/authentication_tools.py
+++ b/wqflask/utility/authentication_tools.py
@@ -6,7 +6,7 @@ import requests
from base import data_set, webqtlConfig
from utility import hmac
-from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id
+from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id, add_resource
Redis = get_redis_conn()
from flask import Flask, g, redirect, url_for
@@ -16,13 +16,7 @@ logger = logging.getLogger(__name__ )
def check_resource_availability(dataset, trait_id=None):
- #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy
- if g.user_session.user_id in Redis.smembers("super_users"):
- return webqtlConfig.SUPER_PRIVILEGES
-
- response = None
-
- #At least for now assume temporary entered traits are accessible#At least for now assume temporary entered traits are accessible
+ #At least for now assume temporary entered traits are accessible
if type(dataset) == str:
return webqtlConfig.DEFAULT_PRIVILEGES
if dataset.type == "Temp":
@@ -33,9 +27,13 @@ def check_resource_availability(dataset, trait_id=None):
if resource_id:
resource_info = get_resource_info(resource_id)
if not resource_info:
- return webqtlConfig.DEFAULT_PRIVILEGES
- else:
- return response #ZS: Need to substitute in something that creates the resource in Redis later
+ resource_info = add_new_resource(dataset, trait_id)
+
+ #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy
+ if g.user_session.user_id in Redis.smembers("super_users"):
+ return webqtlConfig.SUPER_PRIVILEGES
+
+ response = None
the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id)
try:
@@ -43,10 +41,43 @@ def check_resource_availability(dataset, trait_id=None):
except:
response = resource_info['default_mask']
- if response:
- return response
- else: #ZS: No idea how this would happen, but just in case
- return False
+ return response
+
+def add_new_resource(dataset, trait_id=None):
+ resource_ob = {
+ 'owner_id' : webqtlConfig.DEFAULT_OWNER_ID,
+ 'default_mask': webqtlConfig.DEFAULT_PRIVILEGES,
+ 'group_masks' : {}
+ }
+
+ if dataset.type == "Publish":
+ resource_ob['name'] = get_group_code(dataset) + "_" + str(trait_id)
+ resource_ob['data'] = {
+ 'dataset': dataset.id,
+ 'trait' : trait_id
+ }
+ resource_ob['type'] = 'dataset-publish'
+ elif dataset.type == "Geno":
+ resource_ob['name'] = dataset.name
+ resource_ob['data'] = {
+ 'dataset': dataset.id
+ }
+ resource_ob['type'] = 'dataset-geno'
+ else:
+ resource_ob['name'] = dataset.name
+ resource_ob['data'] = {
+ 'dataset': dataset.id
+ }
+ resource_ob['type'] = 'dataset-probeset'
+
+ resource_info = add_resource(resource_ob, update=False)
+
+ return resource_info
+
+def get_group_code(dataset):
+ results = g.db.execute("SELECT InbredSetCode from InbredSet where Name='{}'".format(dataset.group.name)).fetchone()
+
+ return results[0]
def check_admin(resource_id=None):
the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id)
diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py
index 6c912a23..1377a564 100644
--- a/wqflask/utility/redis_tools.py
+++ b/wqflask/utility/redis_tools.py
@@ -264,17 +264,14 @@ def get_resources():
return resource_list
def get_resource_id(dataset, trait_id=None):
+ resource_id = False
if dataset.type == "Publish":
if trait_id:
resource_id = hmac.hmac_creation("{}:{}:{}".format('dataset-publish', dataset.id, trait_id))
- else:
- return False
elif dataset.type == "ProbeSet":
resource_id = hmac.hmac_creation("{}:{}".format('dataset-probeset', dataset.id))
elif dataset.type == "Geno":
resource_id = hmac.hmac_creation("{}:{}".format('dataset-geno', dataset.id))
- else:
- return False
return resource_id
diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py
index 2d96c05a..3272c03d 100644
--- a/wqflask/wqflask/export_traits.py
+++ b/wqflask/wqflask/export_traits.py
@@ -4,24 +4,30 @@ import csv
import xlsxwriter
import StringIO
import datetime
+import itertools
+
+from zipfile import ZipFile, ZIP_DEFLATED
import simplejson as json
+from base.trait import create_trait, retrieve_trait_info
+
from pprint import pformat as pf
+from utility.logger import getLogger
+logger = getLogger(__name__ )
+
def export_search_results_csv(targs):
table_data = json.loads(targs['export_data'])
- table_headers = table_data['headers']
table_rows = table_data['rows']
- buff = StringIO.StringIO()
- writer = csv.writer(buff)
-
+ now = datetime.datetime.now()
+ time_str = now.strftime('%H:%M_%d%B%Y')
if 'file_name' in targs:
- file_name = targs['file_name']
+ zip_file_name = targs['file_name'] + "_export_" + time_str
else:
- file_name = "table_export.csv"
+ zip_file_name = "export_" + time_str
metadata = []
@@ -40,19 +46,98 @@ def export_search_results_csv(targs):
if targs['filter_term'] != "None":
metadata.append(["Search Filter Terms: " + targs['filter_term']])
metadata.append(["Exported Row Number: " + str(len(table_rows))])
+ metadata.append(["Funding for The GeneNetwork: NIAAA (U01AA13499, U24AA13513), NIDA, NIMH, and NIAAA (P20-DA21131), NCI MMHCC (U01CA105417), and NCRR (U01NR 105417)"])
+ metadata.append([])
+
+ trait_list = []
+ for trait in table_rows:
+ trait_name, dataset_name, _hash = trait.split(":")
+ trait_ob = create_trait(name=trait_name, dataset_name=dataset_name)
+ trait_ob = retrieve_trait_info(trait_ob, trait_ob.dataset, get_qtl_info=True)
+ trait_list.append(trait_ob)
+
+ table_headers = ['Species', 'Group', 'Dataset', 'Record ID', 'Symbol', 'Description', 'ProbeTarget', 'PubMed_ID', 'Chr', 'Mb', 'Alias', 'Gene_ID', 'Homologene_ID', 'UniGene_ID', 'Strand_Probe', 'Probe_set_specificity', 'Probe_set_BLAT_score', 'Probe_set_BLAT_Mb_start', 'Probe_set_BLAT_Mb_end', 'QTL_Chr', 'QTL_Mb', 'Locus_at_Peak', 'Max_LRS', 'P_value_of_MAX', 'Mean_Expression']
+
+ traits_by_group = sort_traits_by_group(trait_list)
+
+ file_list = []
+ for group in traits_by_group.keys():
+ group_traits = traits_by_group[group]
+ buff = StringIO.StringIO()
+ writer = csv.writer(buff)
+ csv_rows = []
+
+ sample_headers = []
+ for sample in group_traits[0].dataset.group.samplelist:
+ sample_headers.append(sample)
+ sample_headers.append(sample + "_SE")
+
+ full_headers = table_headers + sample_headers
+
+ for metadata_row in metadata:
+ writer.writerow(metadata_row)
+
+ csv_rows.append(full_headers)
+
+ for trait in group_traits:
+ if getattr(trait, "symbol", None):
+ trait_symbol = getattr(trait, "symbol")
+ elif getattr(trait, "abbreviation", None):
+ trait_symbol = getattr(trait, "abbreviation")
+ else:
+ trait_symbol = "N/A"
+ row_contents = [
+ trait.dataset.group.species,
+ trait.dataset.group.name,
+ trait.dataset.name,
+ trait.name,
+ trait_symbol,
+ getattr(trait, "description_display", "N/A"),
+ getattr(trait, "probe_target_description", "N/A"),
+ getattr(trait, "pubmed_id", "N/A"),
+ getattr(trait, "chr", "N/A"),
+ getattr(trait, "mb", "N/A"),
+ trait.alias_fmt,
+ getattr(trait, "geneid", "N/A"),
+ getattr(trait, "homologeneid", "N/A"),
+ getattr(trait, "unigeneid", "N/A"),
+ getattr(trait, "strand_probe", "N/A"),
+ getattr(trait, "probe_set_specificity", "N/A"),
+ getattr(trait, "probe_set_blat_score", "N/A"),
+ getattr(trait, "probe_set_blat_mb_start", "N/A"),
+ getattr(trait, "probe_set_blat_mb_end", "N/A"),
+ getattr(trait, "locus_chr", "N/A"),
+ getattr(trait, "locus_mb", "N/A"),
+ getattr(trait, "locus", "N/A"),
+ getattr(trait, "lrs", "N/A"),
+ getattr(trait, "pvalue", "N/A"),
+ getattr(trait, "mean", "N/A")
+ ]
+
+ for sample in trait.dataset.group.samplelist:
+ if sample in trait.data:
+ row_contents += [trait.data[sample].value, trait.data[sample].variance]
+ else:
+ row_contents += ["x", "x"]
+
+ csv_rows.append(row_contents)
+
+ csv_rows = map(list, itertools.izip_longest(*[row for row in csv_rows]))
+ writer.writerows(csv_rows)
+ csv_data = buff.getvalue()
+ buff.close()
- for metadata_row in metadata:
- writer.writerow(metadata_row)
+ file_name = group + "_traits.csv"
+ file_list.append([file_name, csv_data])
- writer.writerow([])
+ return file_list
- writer.writerow(table_headers)
- for trait_info in table_rows:
- writer.writerow(trait_info)
+def sort_traits_by_group(trait_list=[]):
+ traits_by_group = {}
+ for trait in trait_list:
+ if trait.dataset.group.name not in traits_by_group.keys():
+ traits_by_group[trait.dataset.group.name] = []
- writer.writerow([])
- writer.writerow(["Funding for The GeneNetwork: NIAAA (U01AA13499, U24AA13513), NIDA, NIMH, and NIAAA (P20-DA21131), NCI MMHCC (U01CA105417), and NCRR (U01NR 105417)"])
- csv_data = buff.getvalue()
- buff.close()
+ traits_by_group[trait.dataset.group.name].append(trait)
- return csv_data, file_name \ No newline at end of file
+ return traits_by_group \ No newline at end of file
diff --git a/wqflask/wqflask/static/new/javascript/search_results.js b/wqflask/wqflask/static/new/javascript/search_results.js
index 39aae113..b3ed06fc 100644
--- a/wqflask/wqflask/static/new/javascript/search_results.js
+++ b/wqflask/wqflask/static/new/javascript/search_results.js
@@ -161,23 +161,11 @@ $(function() {
trait_table.find('tbody tr').each(function (i, tr) {
if (trait_table.find('input[name="searchResult"]:checked').length > 0) {
if ($(this).find('input[name="searchResult"]').is(':checked')){
- this_row = [];
- $(tr).find('td').each(function(j, td){
- if ($(td).data('export')){
- this_row.push($(td).data('export'));
- }
- });
- rows.push(this_row);
+ rows.push($(this).find('input[name="searchResult"]:checked').val())
}
}
else {
- this_row = [];
- $(tr).find('td').each(function(j, td){
- if ($(td).data('export')){
- this_row.push($(td).data('export'));
- }
- });
- rows.push(this_row);
+ rows.push($(this).find('input[name="searchResult"]').val())
}
});
table_dict['rows'] = rows;
diff --git a/wqflask/wqflask/templates/base.html b/wqflask/wqflask/templates/base.html
index 1879e075..50562200 100644
--- a/wqflask/wqflask/templates/base.html
+++ b/wqflask/wqflask/templates/base.html
@@ -96,7 +96,11 @@
</li>
{% if g.user_session.logged_in %}
<li class="">
- <a id="manage_groups" title="Manage Groups" href="/groups/manage">Manage Groups</a>
+ <a href="/edit_account_settings" class="dropdow-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">User Account Settings<span class="caret"></a>
+ <ul class="dropdown-menu">
+ <li><a id="manage_user" title="User Options" href="/user/manage">User Options</a></li>
+ <li><a id="manage_groups" title="Manage Groups" href="/groups/manage">Manage Groups</a></li>
+ </ul>
</li>
{% endif %}
{% endif %}
diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html
index 132d5249..81803deb 100644
--- a/wqflask/wqflask/templates/mapping_results.html
+++ b/wqflask/wqflask/templates/mapping_results.html
@@ -333,6 +333,7 @@
{% block js %}
<script type="text/javascript" src="http://d3js.org/d3.v3.min.js"></script>
+ <script type="text/javascript" src="/static/new/js_external/md5.min.js"></script>
<script type="text/javascript" src="/static/new/js_external/underscore-min.js"></script>
<script type="text/javascript" src="/static/new/js_external/underscore.string.min.js"></script>
<script type="text/javascript" src="/static/new/js_external/d3-tip.min.js"></script>
diff --git a/wqflask/wqflask/templates/show_trait.html b/wqflask/wqflask/templates/show_trait.html
index 94885f26..acee6724 100644
--- a/wqflask/wqflask/templates/show_trait.html
+++ b/wqflask/wqflask/templates/show_trait.html
@@ -21,8 +21,7 @@
{% endif %}
</div>
- <form method="post" action="" target="_blank" name="trait_page" id="trait_data_form"
- class="form-horizontal">
+ <form method="post" action="" target="_blank" name="trait_page" id="trait_data_form" class="form-horizontal">
<div id="hidden_inputs">
<input type="hidden" name="trait_hmac" value="{{ data_hmac('{}:{}'.format(this_trait.name, dataset.name)) }}">
{% for key in hddn %}
diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py
index 131345d3..57183eed 100644
--- a/wqflask/wqflask/views.py
+++ b/wqflask/wqflask/views.py
@@ -11,24 +11,24 @@ import datetime # for errors
import time # for errors
import sys
import csv
+import simplejson as json
+import yaml
import xlsxwriter
import StringIO # Todo: Use cStringIO?
+from zipfile import ZipFile, ZIP_DEFLATED
+
import gc
import numpy as np
-
import cPickle as pickle
import uuid
-import simplejson as json
-import yaml
-
import flask
import base64
import array
import sqlalchemy
from wqflask import app
-from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for
+from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for, send_file
from wqflask import group_manager
from wqflask import resource_manager
from wqflask import search_results
@@ -421,11 +421,21 @@ def export_traits_csv():
logger.info("In export_traits_csv")
logger.info("request.form:", request.form)
logger.info(request.url)
- csv_data, file_name = export_traits.export_search_results_csv(request.form)
+ file_list = export_traits.export_search_results_csv(request.form)
- return Response(csv_data,
- mimetype='text/csv',
- headers={"Content-Disposition":"attachment;filename=" + file_name + ".csv"})
+ if len(file_list) > 1:
+ memory_file = StringIO.StringIO()
+ with ZipFile(memory_file, mode='w', compression=ZIP_DEFLATED) as zf:
+ for the_file in file_list:
+ zf.writestr(the_file[0], the_file[1])
+
+ memory_file.seek(0)
+
+ return send_file(memory_file, attachment_filename=filename + ".zip", as_attachment=True)
+ else:
+ return Response(file_list[0][1],
+ mimetype='text/csv',
+ headers={"Content-Disposition":"attachment;filename=" + file_list[0][0]})
@app.route('/export_perm_data', methods=('POST',))
def export_perm_data():