From d2c7f76294cd7a1158402cdc67e0382bead17619 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 28 Apr 2020 11:48:45 -0500 Subject: Added some metadata to the trait export --- wqflask/wqflask/show_trait/export_trait_data.py | 30 ++++++++++++++++++++++++- wqflask/wqflask/user_manager.py | 24 ++++++++++---------- 2 files changed, 41 insertions(+), 13 deletions(-) diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py index 7ca4a4c0..107f87c6 100644 --- a/wqflask/wqflask/show_trait/export_trait_data.py +++ b/wqflask/wqflask/show_trait/export_trait_data.py @@ -4,11 +4,17 @@ import simplejson as json from pprint import pformat as pf +from base.trait import GeneralTrait +from base import data_set + def export_sample_table(targs): sample_data = json.loads(targs['export_data']) trait_name = targs['trait_display_name'] - final_sample_data = [] + + meta_data = get_export_metadata(targs['trait_id'], targs['dataset']) + + final_sample_data = meta_data for sample_group in ['primary_samples', 'other_samples']: for row in sample_data[sample_group]: @@ -18,6 +24,28 @@ def export_sample_table(targs): return trait_name, final_sample_data +def get_export_metadata(trait_id, dataset_name): + dataset = data_set.create_dataset(dataset_name) + this_trait = GeneralTrait(dataset=dataset, + name=trait_id, + cellid=None, + get_qtl_info=False) + + metadata = [] + if dataset.type == "Publish": + metadata.append(["Phenotype ID: " + trait_id]) + metadata.append(["Phenotype URL: " + "http://genenetwork.org/show_trait?trait_id=" + trait_id + "&dataset=" + dataset_name]) + metadata.append(["Group: " + dataset.group.name]) + metadata.append(["Phenotype: " + this_trait.description_display.replace(",", "\",\"")]) + metadata.append(["Authors: " + this_trait.authors]) + metadata.append(["Title: " + this_trait.title]) + metadata.append(["Journal: " + this_trait.journal]) + metadata.append(["Dataset Link: http://gn1.genenetwork.org/webqtl/main.py?FormID=sharinginfo&InfoPageName=" + dataset.name]) + metadata.append([]) + + return metadata + + def dict_to_sorted_list(dictionary): sorted_list = [item for item in dictionary.iteritems()] sorted_list = sorted(sorted_list, cmp=cmp_samples) diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index 1b27d7cb..4dc3e18c 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -350,12 +350,12 @@ class UserSession(object): Redis.delete(self.cookie_name) logger.debug("At end of delete_session") -@app.before_request +#@app.before_request def before_request(): g.user_session = UserSession() g.cookie_session = AnonUser() -@app.after_request +#@app.after_request def set_cookie(response): if not request.cookies.get(g.cookie_session.cookie_name): response.set_cookie(g.cookie_session.cookie_name, g.cookie_session.cookie) @@ -537,7 +537,7 @@ def basic_info(): ip_address = request.remote_addr, user_agent = request.headers.get('User-Agent')) -@app.route("/manage/verify_email") +#@app.route("/manage/verify_email") def verify_email(): user = DecodeUser(VerificationEmail.key_prefix).user user.confirmed = json.dumps(basic_info(), sort_keys=True) @@ -551,7 +551,7 @@ def verify_email(): response.set_cookie(UserSession.cookie_name, session_id_signed) return response -@app.route("/n/password_reset", methods=['GET']) +#@app.route("/n/password_reset", methods=['GET']) def password_reset(): """Entry point after user clicks link in E-mail""" logger.debug("in password_reset request.url is:", request.url) @@ -575,7 +575,7 @@ def password_reset(): else: return redirect(url_for("login")) -@app.route("/n/password_reset_step2", methods=('POST',)) +#@app.route("/n/password_reset_step2", methods=('POST',)) def password_reset_step2(): """Handle confirmation E-mail for password reset""" logger.debug("in password_reset request.url is:", request.url) @@ -619,7 +619,7 @@ class DecodeUser(object): logger.debug("data is:", data) return model.User.query.get(data['id']) -@app.route("/n/login", methods=('GET', 'POST')) +#@app.route("/n/login", methods=('GET', 'POST')) def login(): lu = LoginUser() login_type = request.args.get("type") @@ -629,7 +629,7 @@ def login(): else: return lu.standard_login() -@app.route("/n/login/github_oauth2", methods=('GET', 'POST')) +#@app.route("/n/login/github_oauth2", methods=('GET', 'POST')) def github_oauth2(): from utility.tools import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET code = request.args.get("code") @@ -660,7 +660,7 @@ def github_oauth2(): url = "/n/login?type=github&uid="+user_details["user_id"] return redirect(url) -@app.route("/n/login/orcid_oauth2", methods=('GET', 'POST')) +#@app.route("/n/login/orcid_oauth2", methods=('GET', 'POST')) def orcid_oauth2(): from uuid import uuid4 from utility.tools import ORCID_CLIENT_ID, ORCID_CLIENT_SECRET, ORCID_TOKEN_URL, ORCID_AUTH_URL @@ -840,7 +840,7 @@ class LoginUser(object): db_session.add(login_rec) db_session.commit() -@app.route("/n/logout") +#@app.route("/n/logout") def logout(): logger.debug("Logging out...") UserSession().delete_session() @@ -851,7 +851,7 @@ def logout(): return response -@app.route("/n/forgot_password", methods=['GET']) +#@app.route("/n/forgot_password", methods=['GET']) def forgot_password(): """Entry point for forgotten password""" print("ARGS: ", request.args) @@ -859,7 +859,7 @@ def forgot_password(): print("ERRORS: ", errors) return render_template("new_security/forgot_password.html", errors=errors) -@app.route("/n/forgot_password_submit", methods=('POST',)) +#@app.route("/n/forgot_password_submit", methods=('POST',)) def forgot_password_submit(): """When a forgotten password form is submitted we get here""" params = request.form @@ -944,7 +944,7 @@ def is_redis_available(): # return LoginUser().actual_login(user, assumed_by=assumed_by) -@app.route("/n/register", methods=('GET', 'POST')) +#@app.route("/n/register", methods=('GET', 'POST')) def register(): params = None errors = None -- cgit v1.2.3 From 10b8dc7af35f0d221daf121d0e3c0a52d9223368 Mon Sep 17 00:00:00 2001 From: Danny Arends Date: Wed, 29 Apr 2020 04:38:42 -0500 Subject: Fixing loading of the ITP data as a 4way cross, worked on http://gn2-test3.genenetwork.org/ --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index e1aa290b..9909b6d4 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -128,7 +128,7 @@ def generate_cross_from_geno(dataset): # TODO: Need to figure out why som if(type == '4-way'){ genocodes <- c('1','2','3','4') } else { - genocodes <- c(getGenoCode(header, 'mat'), getGenoCode(header, 'het'), getGenoCode(header, 'pat')) # Get the genotype codes + genocodes <- c(getGenoCode(header, 'mat'), getGenoCode(header, 'het'), getGenoCode(header, 'pat')) # Get the genotype codes } genodata <- read.csv(genotypes, sep='\t', skip=toskip, header=TRUE, na.strings=getGenoCode(header,'unk'), colClasses='character', comment.char = '#') cat('Genodata:', toskip, " ", dim(genodata), genocodes, '\n') @@ -139,7 +139,11 @@ def generate_cross_from_geno(dataset): # TODO: Need to figure out why som cbind(genodata[,c('Locus','Chr', 'cM')], genodata[, 5:ncol(genodata)])) # Genotypes write.table(outCSVR, file = out, row.names=FALSE, col.names=FALSE,quote=FALSE, sep=',') # Save it to a file require(qtl) - cross = read.cross(file=out, 'csvr', genotypes=genocodes) # Load the created cross file using R/qtl read.cross + if(type == '4-way'){ + cross = read.cross(file=out, 'csvr', genotypes=genocodes, crosstype="4way", convertXdata=FALSE) # Load the created cross file using R/qtl read.cross + }else{ + cross = read.cross(file=out, 'csvr', genotypes=genocodes) # Load the created cross file using R/qtl read.cross + } if(type == 'riset') cross <- convert2riself(cross) # If its a RIL, convert to a RIL in R/qtl return(cross) } -- cgit v1.2.3 From 98d54f1861f2bbc82cffa049fef408b43351688a Mon Sep 17 00:00:00 2001 From: Danny Arends Date: Wed, 29 Apr 2020 04:50:31 -0500 Subject: Adding some debug, so we have some info in the output --- wqflask/wqflask/marker_regression/rqtl_mapping.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 9909b6d4..c1a56787 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -140,11 +140,16 @@ def generate_cross_from_geno(dataset): # TODO: Need to figure out why som write.table(outCSVR, file = out, row.names=FALSE, col.names=FALSE,quote=FALSE, sep=',') # Save it to a file require(qtl) if(type == '4-way'){ + cat('Loading in as 4-WAY\n') cross = read.cross(file=out, 'csvr', genotypes=genocodes, crosstype="4way", convertXdata=FALSE) # Load the created cross file using R/qtl read.cross }else{ + cat('Loading in as normal\n') cross = read.cross(file=out, 'csvr', genotypes=genocodes) # Load the created cross file using R/qtl read.cross } - if(type == 'riset') cross <- convert2riself(cross) # If its a RIL, convert to a RIL in R/qtl + if(type == 'riset'){ + cat('Converting to RISELF\n') + cross <- convert2riself(cross) # If its a RIL, convert to a RIL in R/qtl + } return(cross) } """ % (dataset.group.genofile)) -- cgit v1.2.3 From 5b57353c5325a2677fef07630d2576a01641787a Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 29 Apr 2020 11:19:21 -0500 Subject: Fixed issue with global search when description field is empty --- wqflask/wqflask/gsearch.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py index 3d9b508a..04e3d578 100644 --- a/wqflask/wqflask/gsearch.py +++ b/wqflask/wqflask/gsearch.py @@ -75,7 +75,10 @@ class GSearch(object): this_trait['group'] = line[1] this_trait['tissue'] = line[2] this_trait['symbol'] = line[6] - this_trait['description'] = line[7].decode('utf-8', 'replace') + if line[7]: + this_trait['description'] = line[7].decode('utf-8', 'replace') + else: + this_trait['description'] = "N/A" this_trait['location_repr'] = 'N/A' if (line[8] != "NULL" and line[8] != "") and (line[9] != 0): this_trait['location_repr'] = 'Chr%s: %.6f' % (line[8], float(line[9])) -- cgit v1.2.3 From 77e3974eebfc48e49af07e07c08cd312edd34b99 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 29 Apr 2020 16:34:58 -0500 Subject: Changed a lot about how mapping scale is read and gave the option to change mapping scale when both scales are available --- .../marker_regression/display_mapping_results.py | 25 +++--- wqflask/wqflask/marker_regression/rqtl_mapping.py | 48 +++++------- wqflask/wqflask/marker_regression/run_mapping.py | 18 +++-- wqflask/wqflask/show_trait/show_trait.py | 89 +++++++++++++++++++++- .../new/javascript/show_trait_mapping_tools.js | 19 +++++ wqflask/wqflask/templates/mapping_results.html | 4 + .../templates/show_trait_mapping_tools.html | 22 +++++- 7 files changed, 177 insertions(+), 48 deletions(-) diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 2a53b60e..f70bc555 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -265,14 +265,12 @@ class DisplayMappingResults(object): else: self.colorCollection = [self.LRS_COLOR] + self.dataset.group.genofile = self.genofile_string.split(":")[0] if self.mapping_method == "reaper" and self.manhattan_plot != True: self.genotype = self.dataset.group.read_genotype_file(use_reaper=True) else: self.genotype = self.dataset.group.read_genotype_file() - #if self.mapping_method == "rqtl_geno" and self.genotype.filler == True: - # self.genotype = self.genotype.read_rdata_output(self.qtlresults) - #Darwing Options try: if self.selectedChr > -1: @@ -1761,9 +1759,9 @@ class DisplayMappingResults(object): break if all_int: - max_lrs_width = canvas.stringWidth("%d" % LRS_LOD_Max, font=LRSScaleFont) + 30 + max_lrs_width = canvas.stringWidth("%d" % LRS_LOD_Max, font=LRSScaleFont) + 40 else: - max_lrs_width = canvas.stringWidth("%2.1f" % LRS_LOD_Max, font=LRSScaleFont) + 20 + max_lrs_width = canvas.stringWidth("%2.1f" % LRS_LOD_Max, font=LRSScaleFont) + 30 #draw the "LRS" or "LOD" string to the left of the axis canvas.drawString(self.LRS_LOD, xLeftOffset - max_lrs_width - 15*(zoom-1), \ @@ -1899,13 +1897,16 @@ class DisplayMappingResults(object): this_chr = str(self.ChrList[self.selectedChr][1]+1) if self.selectedChr == -1 or str(qtlresult['chr']) == this_chr: - if self.plotScale != "physic" and self.genotype.filler == True: - if self.selectedChr != -1: - start_cm = self.genotype[self.selectedChr - 1][0].cM - Xc = startPosX + (qtlresult['Mb'] - start_cm)*plotXScale - else: - start_cm = self.genotype[previous_chr_as_int][0].cM - Xc = startPosX + ((qtlresult['Mb']-start_cm-startMb)*plotXScale)*(((qtlresult['Mb']-start_cm-startMb)*plotXScale)/((qtlresult['Mb']-start_cm-startMb+self.GraphInterval)*plotXScale)) + if self.plotScale != "physic" and self.mapping_method == "reaper" and not self.manhattan_plot: + Xc = startPosX + (qtlresult['cM']-startMb)*plotXScale + if hasattr(self.genotype, "filler"): + if self.genotype.filler: + if self.selectedChr != -1: + start_cm = self.genotype[self.selectedChr - 1][0].cM + Xc = startPosX + (qtlresult['Mb'] - start_cm)*plotXScale + else: + start_cm = self.genotype[previous_chr_as_int][0].cM + Xc = startPosX + ((qtlresult['Mb']-start_cm-startMb)*plotXScale)*(((qtlresult['Mb']-start_cm-startMb)*plotXScale)/((qtlresult['Mb']-start_cm-startMb+self.GraphInterval)*plotXScale)) else: Xc = startPosX + (qtlresult['Mb']-startMb)*plotXScale diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index c1a56787..8c294460 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -11,7 +11,7 @@ from utility.tools import locate, TEMPDIR import utility.logger logger = utility.logger.getLogger(__name__ ) -def run_rqtl_geno(vals, samples, dataset, method, model, permCheck, num_perm, perm_strata_list, do_control, control_marker, manhattan_plot, pair_scan, cofactors): +def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permCheck, num_perm, perm_strata_list, do_control, control_marker, manhattan_plot, pair_scan, cofactors): ## Get pointers to some common R functions r_library = ro.r["library"] # Map the library function r_c = ro.r["c"] # Map the c function @@ -33,7 +33,13 @@ def run_rqtl_geno(vals, samples, dataset, method, model, permCheck, num_perm, pe # genofilelocation = locate(crossname + ".RData", "genotype/rdata") # cross_object = read_cross_from_rdata(genofilelocation) # Map the local GENOtoCSVR function #except: - generate_cross_from_geno(dataset) + + if mapping_scale == "morgan": + scale_units = "cM" + else: + scale_units = "Mb" + + generate_cross_from_geno(dataset, scale_units) GENOtoCSVR = ro.r["GENOtoCSVR"] # Map the local GENOtoCSVR function crossfilelocation = TMPDIR + crossname + ".cross" if dataset.group.genofile: @@ -47,6 +53,8 @@ def run_rqtl_geno(vals, samples, dataset, method, model, permCheck, num_perm, pe else: cross_object = calc_genoprob(cross_object, step=1, stepwidth="max") + logger.debug("VAL LEN:", len(vals)) + pheno_string = sanitize_rqtl_phenotype(vals) cross_object = add_phenotype(cross_object, pheno_string, "the_pheno") # Add the phenotype @@ -94,11 +102,9 @@ def run_rqtl_geno(vals, samples, dataset, method, model, permCheck, num_perm, pe perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", n_perm = num_perm, model=model, method=method) perm_output, suggestive, significant = process_rqtl_perm_results(num_perm, perm_data_frame) # Functions that sets the thresholds for the webinterface - the_scale = check_mapping_scale(genofilelocation) - return perm_output, suggestive, significant, process_rqtl_results(result_data_frame, dataset.group.species), the_scale + return perm_output, suggestive, significant, process_rqtl_results(result_data_frame, dataset.group.species) else: - the_scale = check_mapping_scale(genofilelocation) - return process_rqtl_results(result_data_frame, dataset.group.species), the_scale + return process_rqtl_results(result_data_frame, dataset.group.species) def generate_cross_from_rdata(dataset): rdata_location = locate(dataset.group.name + ".RData", "genotype/rdata") @@ -110,7 +116,7 @@ def generate_cross_from_rdata(dataset): } """ % (rdata_location)) -def generate_cross_from_geno(dataset): # TODO: Need to figure out why some genofiles have the wrong format and don't convert properly +def generate_cross_from_geno(dataset, scale_units): # TODO: Need to figure out why some genofiles have the wrong format and don't convert properly ro.r(""" trim <- function( x ) { gsub("(^[[:space:]]+|[[:space:]]+$)", "", x) } @@ -127,21 +133,23 @@ def generate_cross_from_geno(dataset): # TODO: Need to figure out why som type <- getGenoCode(header, 'type') if(type == '4-way'){ genocodes <- c('1','2','3','4') + genodata <- read.csv(genotypes, sep='\t', skip=toskip, header=TRUE, na.strings=getGenoCode(header,'unk'), colClasses='character', comment.char = '#', crosstype="4way") } else { genocodes <- c(getGenoCode(header, 'mat'), getGenoCode(header, 'het'), getGenoCode(header, 'pat')) # Get the genotype codes + genodata <- read.csv(genotypes, sep='\t', skip=toskip, header=TRUE, na.strings=getGenoCode(header,'unk'), colClasses='character', comment.char = '#') } - genodata <- read.csv(genotypes, sep='\t', skip=toskip, header=TRUE, na.strings=getGenoCode(header,'unk'), colClasses='character', comment.char = '#') cat('Genodata:', toskip, " ", dim(genodata), genocodes, '\n') if(is.null(phenotype)) phenotype <- runif((ncol(genodata)-4)) # If there isn't a phenotype, generate a random one if(is.null(sex)) sex <- rep('m', (ncol(genodata)-4)) # If there isn't a sex phenotype, treat all as males outCSVR <- rbind(c('Pheno', '', '', phenotype), # Phenotype c('sex', '', '', sex), # Sex phenotype for the mice - cbind(genodata[,c('Locus','Chr', 'cM')], genodata[, 5:ncol(genodata)])) # Genotypes + cbind(genodata[,c('Locus','Chr', '%s')], genodata[, 5:ncol(genodata)])) # Genotypes write.table(outCSVR, file = out, row.names=FALSE, col.names=FALSE,quote=FALSE, sep=',') # Save it to a file require(qtl) if(type == '4-way'){ cat('Loading in as 4-WAY\n') - cross = read.cross(file=out, 'csvr', genotypes=genocodes, crosstype="4way", convertXdata=FALSE) # Load the created cross file using R/qtl read.cross + cross = read.cross(file=out, 'csvr', genotypes=genocodes) + #cross = read.cross(file=out, 'csvr', genotypes=genocodes, crosstype="4way", convertXdata=FALSE) # Load the created cross file using R/qtl read.cross }else{ cat('Loading in as normal\n') cross = read.cross(file=out, 'csvr', genotypes=genocodes) # Load the created cross file using R/qtl read.cross @@ -152,7 +160,7 @@ def generate_cross_from_geno(dataset): # TODO: Need to figure out why som } return(cross) } - """ % (dataset.group.genofile)) + """ % (dataset.group.genofile, scale_units)) def add_perm_strata(cross, perm_strata): col_string = 'c("the_strata")' @@ -300,20 +308,4 @@ def process_rqtl_results(result, species_name): # TODO: how to make this marker['lod_score'] = output[i][2] qtl_results.append(marker) - return qtl_results - -def check_mapping_scale(genofile_location): - scale = "physic" - with open(genofile_location, "r") as geno_fh: - for line in geno_fh: - if line[0] == "@" or line[0] == "#": - - if "@scale" in line: - scale = line.split(":")[1].strip() - break - else: - continue - else: - break - - return scale \ No newline at end of file + return qtl_results \ No newline at end of file diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 589be702..7449d8ce 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -156,6 +156,8 @@ class RunMapping(object): self.transform = "" self.score_type = "LRS" #ZS: LRS or LOD self.mapping_scale = "physic" + if "mapping_scale" in start_vars: + self.mapping_scale = start_vars['mapping_scale'] self.num_perm = 0 self.perm_output = [] self.bootstrap_results = [] @@ -255,9 +257,9 @@ class RunMapping(object): #if start_vars['pair_scan'] == "true": # self.pair_scan = True if self.permCheck and self.num_perm > 0: - self.perm_output, self.suggestive, self.significant, results, self.mapping_scale = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) + self.perm_output, self.suggestive, self.significant, results= rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) else: - results, self.mapping_scale = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) + results = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) elif self.mapping_method == "reaper": if "startMb" in start_vars: #ZS: Check if first time page loaded, so it can default to ON if "additiveCheck" in start_vars: @@ -429,7 +431,7 @@ class RunMapping(object): with Bench("Trimming Markers for Table"): self.trimmed_markers = trim_markers_for_table(results) - chr_lengths = get_chr_lengths(self.mapping_scale, self.dataset, self.qtl_results) + chr_lengths = get_chr_lengths(self.mapping_scale, self.mapping_method, self.dataset, self.qtl_results) #ZS: For zooming into genome browser, need to pass chromosome name instead of number if self.dataset.group.species == "mouse": @@ -643,7 +645,7 @@ def geno_db_exists(this_dataset): except: return "False" -def get_chr_lengths(mapping_scale, dataset, qtl_results): +def get_chr_lengths(mapping_scale, mapping_method, dataset, qtl_results): chr_lengths = [] if mapping_scale == "physic": for i, the_chr in enumerate(dataset.species.chromosomes.chromosomes): @@ -666,8 +668,12 @@ def get_chr_lengths(mapping_scale, dataset, qtl_results): this_chr = chr_as_num highest_pos = 0 else: - if float(result['Mb']) > highest_pos: - highest_pos = float(result['Mb']) + if mapping_method == "reaper": + if float(result['cM']) > highest_pos: + highest_pos = float(result['cM']) + else: + if float(result['Mb']) > highest_pos: + highest_pos = float(result['Mb']) return chr_lengths diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 8883e627..40e344b8 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -22,6 +22,7 @@ from base import webqtlConfig from base import webqtlCaseData from wqflask.show_trait.SampleList import SampleList from utility import webqtlUtil, Plot, Bunch, helper_functions +from utility.tools import locate_ignore_error from base.trait import GeneralTrait from base import data_set from db import webqtlDatabaseFunction @@ -170,6 +171,17 @@ class ShowTrait(object): self.genofiles = self.dataset.group.get_genofiles() + if "QTLReaper" or "R/qtl" in dataset.group.mapping_names: #ZS: No need to grab scales from .geno file unless it's using a mapping method that reads .geno files + if self.genofiles: + self.scales_in_geno = get_genotype_scales(self.genofiles) + else: + self.scales_in_geno = get_genotype_scales(self.dataset.group + ".geno") + + if len(self.scales_in_geno) < 2: + hddn['mapping_scale'] = self.scales_in_geno[self.scales_in_geno.keys()[0]][0] + else: + self.scales_in_geno = {} + self.has_num_cases = has_num_cases(self.this_trait) self.stats_table_width, self.trait_table_width = get_table_widths(self.sample_groups, self.has_num_cases) @@ -239,6 +251,7 @@ class ShowTrait(object): #hddn['control_marker'] = self.nearest_marker1+","+self.nearest_marker2 hddn['do_control'] = False hddn['maf'] = 0.05 + hddn['mapping_scale'] = "physic" hddn['compare_traits'] = [] hddn['export_data'] = "" hddn['export_format'] = "excel" @@ -251,6 +264,7 @@ class ShowTrait(object): short_description = short_description, unit_type = trait_units, dataset_type = self.dataset.type, + scales_in_geno = self.scales_in_geno, data_scale = self.dataset.data_scale, sample_group_types = self.sample_group_types, sample_lists = sample_lists, @@ -597,4 +611,77 @@ def get_categorical_variables(this_trait, sample_list): if num_distinct < 10: categorical_var_list.append(sample_list.attributes[attribute].name) - return categorical_var_list \ No newline at end of file + return categorical_var_list + +def get_genotype_scales(genofiles): + geno_scales = {} + if type(genofiles) is list: + for the_file in genofiles: + file_location = the_file['location'] + geno_scales[file_location] = get_scales_from_genofile(file_location) + else: + geno_scales[genofiles] = get_scales_from_genofile(genofiles) + + return geno_scales + +def get_scales_from_genofile(file_location): + geno_path = locate_ignore_error(file_location, 'genotype') + + if not geno_path: #ZS: This is just to allow the code to run when + return [["physic", "Mb"]] + cm_and_mb_cols_exist = True + cm_column = None + mb_column = None + with open(geno_path, "r") as geno_fh: + for i, line in enumerate(geno_fh): + if line[0] == "#" or line[0] == "@": + if "@scale" in line: #ZS: If the scale is made explicit in the metadata, use that + scale = line.split(":")[1].strip() + if scale == "morgan": + return [["morgan", "cM"]] + else: + return [["physic", "Mb"]] + else: + continue + if line[:3] == "Chr": + first_marker_line = i + 1 + if line.split("\t")[2].strip() == "cM": + cm_column = 2 + elif line.split("\t")[3].strip() == "cM": + cm_column = 3 + if line.split("\t")[2].strip() == "Mb": + mb_column = 2 + elif line.split("\t")[3].strip() == "Mb": + mb_column = 3 + break + + #ZS: This attempts to check whether the cM and Mb columns are 'real', since some .geno files have one column be a copy of the other column, or have one column that is all 0s + cm_all_zero = True + mb_all_zero = True + cm_mb_all_equal = True + for i, line in enumerate(geno_fh): + if first_marker_line <= i < first_marker_line + 10: #ZS: I'm assuming there won't be more than 10 markers where the position is listed as 0 + if cm_column: + cm_val = line.split("\t")[cm_column].strip() + if cm_val != "0": + cm_all_zero = False + if mb_column: + mb_val = line.split("\t")[mb_column].strip() + if mb_val != "0": + mb_all_zero = False + if cm_column and mb_column: + if cm_val != mb_val: + cm_mb_all_equal = False + else: + if i > first_marker_line + 10: + break + + #ZS: This assumes that both won't be all zero, since if that's the case mapping shouldn't be an option to begin with + if mb_all_zero: + return [["morgan", "cM"]] + elif cm_mb_all_equal: + return [["physic", "Mb"]] + elif cm_and_mb_cols_exist: + return [["physic", "Mb"], ["morgan", "cM"]] + else: + return [["physic", "Mb"]] diff --git a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js index 478ed87e..7176a0da 100644 --- a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js +++ b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js @@ -165,6 +165,7 @@ url = "/loading"; $('input[name=method]').val("rqtl_geno"); $('input[name=selected_chr]').val($('#chr_rqtl_geno').val()); + $('input[name=mapping_scale]').val($('#scale_rqtl_geno').val()); $('input[name=genofile]').val($('#genofile_rqtl_geno').val()); $('input[name=num_perm]').val($('input[name=num_perm_rqtl_geno]').val()); $('input[name=categorical_vars]').val(js_data.categorical_vars) @@ -210,6 +211,7 @@ url = "/loading"; $('input[name=method]').val("reaper"); $('input[name=selected_chr]').val($('#chr_reaper').val()); + $('input[name=mapping_scale]').val($('#scale_reaper').val()); $('input[name=genofile]').val($('#genofile_reaper').val()); $('input[name=num_perm]').val($('input[name=num_perm_reaper]').val()); $('input[name=control_marker]').val($('input[name=control_reaper]').val()); @@ -289,4 +291,21 @@ return toggle_enable_disable("#suggestive_lrs"); }); + $('#genofile_rqtl_geno').change(function() { + geno_location = $(this).children("option:selected").val().split(":")[0] + $('#scale_rqtl_geno').empty() + the_scales = js_data.scales_in_geno[geno_location] + for (var i = 0; i < the_scales.length; i++){ + $('#scale_rqtl_geno').append($("").attr("value", the_scales[i][0]).text(the_scales[i][1])); + } + }); + $('#genofile_reaper').change(function() { + geno_location = $(this).children("option:selected").val().split(":")[0] + $('#scale_reaper').empty() + the_scales = js_data.scales_in_geno[geno_location] + for (var i = 0; i < the_scales.length; i++){ + $('#scale_reaper').append($("").attr("value", the_scales[i][0]).text(the_scales[i][1])); + } + }); + }).call(this); diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index 7e05be18..94ac0350 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -274,7 +274,11 @@ {% endif %}
Outliers highlighted in - yellow + orange can be hidden using the Hide Outliers button.
-- cgit v1.2.3 From dc002daace320135b4d32ac08de2ee568e68ba51 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 20 May 2020 14:17:17 -0500 Subject: Fixed issue when some case attribute values don't exist + added phenogen link to trait page --- wqflask/wqflask/show_trait/show_trait.py | 9 ++++++--- wqflask/wqflask/templates/show_trait_details.html | 6 ++++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 072d7f8c..29b2f77e 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -294,7 +294,7 @@ class ShowTrait(object): if check_if_attr_exists(self.this_trait, 'uniprotid'): self.uniprot_link = webqtlConfig.UNIPROT_URL % self.this_trait.uniprotid - self.genotation_link = self.rgd_link = self.gtex_link = self.genebridge_link = self.ucsc_blat_link = self.biogps_link = self.protein_atlas_link = None + self.genotation_link = self.rgd_link = self.phenogen_link = self.gtex_link = self.genebridge_link = self.ucsc_blat_link = self.biogps_link = self.protein_atlas_link = None self.string_link = self.panther_link = self.aba_link = self.ebi_gwas_link = self.wiki_pi_link = self.genemania_link = self.ensembl_link = None if self.this_trait.symbol: self.genotation_link = webqtlConfig.GENOTATION_URL % self.this_trait.symbol @@ -332,6 +332,7 @@ class ShowTrait(object): if self.dataset.group.species == "rat": self.rgd_link = webqtlConfig.RGD_URL % (self.this_trait.symbol, self.dataset.group.species.capitalize()) + self.phenogen_link = webqtlConfig.PHENOGEN_URL % (self.this_trait.symbol) self.genemania_link = webqtlConfig.GENEMANIA_URL % ("rattus-norvegicus", self.this_trait.symbol) query = """SELECT kgID, chromosome, txStart, txEnd @@ -603,8 +604,10 @@ def get_categorical_variables(this_trait, sample_list): for attribute in sample_list.attributes: attribute_vals = [] for sample_name in this_trait.data.keys(): - attribute_vals.append(this_trait.data[sample_name].extra_attributes[sample_list.attributes[attribute].name]) - + if sample_list.attributes[attribute].name in this_trait.data[sample_name].extra_attributes: + attribute_vals.append(this_trait.data[sample_name].extra_attributes[sample_list.attributes[attribute].name]) + else: + attribute_vals.append("N/A") num_distinct = len(set(attribute_vals)) if num_distinct < 10: diff --git a/wqflask/wqflask/templates/show_trait_details.html b/wqflask/wqflask/templates/show_trait_details.html index 62268a54..878b6ced 100644 --- a/wqflask/wqflask/templates/show_trait_details.html +++ b/wqflask/wqflask/templates/show_trait_details.html @@ -141,6 +141,12 @@ GTEx Portal + {% if phenogen_link %} + + PhenoGen + + + {% endif %} {% if genebridge_link %} GeneBridge -- cgit v1.2.3 From 81ba67bb1f8f18983ae983e70d985eebe16acde1 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 21 May 2020 12:45:31 -0500 Subject: Fixed search page table widths by setting max-width, at least for the time being --- wqflask/wqflask/templates/search_result_page.html | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/wqflask/wqflask/templates/search_result_page.html b/wqflask/wqflask/templates/search_result_page.html index 767e3487..89ca6681 100644 --- a/wqflask/wqflask/templates/search_result_page.html +++ b/wqflask/wqflask/templates/search_result_page.html @@ -8,7 +8,7 @@ {% endblock %} {% block content %} -Loading... |
@@ -265,7 +265,7 @@
'columns': [
{
'data': null,
- 'width': "30px",
+ 'width': "25px",
'orderDataType': "dom-checkbox",
'orderSequence': [ "desc", "asc"],
'render': function(data, type, row, meta) {
@@ -322,18 +322,20 @@
'title': "Max LRS?",
'type': "natural",
'data': "lrs_score",
+ 'width': "80px",
'orderSequence': [ "desc", "asc"]
},
{
'title': "Max LRS Location",
'type': "natural",
- 'width': "120px",
+ 'width': "150px",
'data': "lrs_location"
},
{
'title': "Additive Effect?",
'type': "natural",
'data': "additive",
+ 'width': "120px",
'orderSequence': [ "desc", "asc"]
}{% elif dataset.type == 'Publish' %},
{
@@ -380,6 +382,7 @@
'type': "natural",
'orderDataType': "dom-inner-text",
'data': null,
+ 'width': "80px",
'render': function(data, type, row, meta) {
if (data.pubmed_id != "N/A"){
return '' + data.pubmed_text + ''
@@ -393,25 +396,26 @@
'title': "Max LRS?",
'type': "natural",
'data': "lrs_score",
+ 'width': "80px",
'orderSequence': [ "desc", "asc"]
},
{
'title': "Max LRS Location",
'type': "natural",
- 'width': "200px",
+ 'width': "150px",
'data': "lrs_location"
},
{
- 'title': "Additive
Loading... |
@@ -341,7 +346,6 @@
{
'title': "Description",
'type': "natural",
- 'width': "800px",
'data': null,
'render': function(data, type, row, meta) {
try {
@@ -420,6 +424,7 @@
}{% endif %}
],
"order": [[1, "asc" ]],
+ {% if dataset.type != 'Geno' %}
buttons: [
{
extend: 'columnsToggle',
@@ -434,6 +439,9 @@
}
],
'sDom': "Bitir",
+ {% else %}
+ 'sDom': "itir",
+ {% endif %}
'deferRender': true,
'paging': false,
'orderClasses': true,
--
cgit v1.2.3
From 2c3301d25c505c217518e7133e0f4cf53797c5b9 Mon Sep 17 00:00:00 2001
From: zsloan
Date: Thu, 21 May 2020 16:34:09 -0500
Subject: More adjusting to table widths
---
wqflask/wqflask/templates/search_result_page.html | 3 +++
1 file changed, 3 insertions(+)
diff --git a/wqflask/wqflask/templates/search_result_page.html b/wqflask/wqflask/templates/search_result_page.html
index b238a28a..3dfae3dd 100644
--- a/wqflask/wqflask/templates/search_result_page.html
+++ b/wqflask/wqflask/templates/search_result_page.html
@@ -296,11 +296,13 @@
{
'title': "Symbol",
'type': "natural",
+ 'width': "120px",
'data': "symbol"
},
{
'title': "Description",
'type': "natural",
+ 'width': "500px",
'data': null,
'render': function(data, type, row, meta) {
try {
@@ -346,6 +348,7 @@
{
'title': "Description",
'type': "natural",
+ 'width': "500px",
'data': null,
'render': function(data, type, row, meta) {
try {
--
cgit v1.2.3
From 55b654a9f40bb247272c45671c58c772874f43f6 Mon Sep 17 00:00:00 2001
From: zsloan
Date: Sat, 23 May 2020 13:12:22 -0500
Subject: Changed row highlight color to the more pale yellow it was before
---
wqflask/wqflask/static/new/css/show_trait.css | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/wqflask/wqflask/static/new/css/show_trait.css b/wqflask/wqflask/static/new/css/show_trait.css
index a0e84474..d3e6672a 100644
--- a/wqflask/wqflask/static/new/css/show_trait.css
+++ b/wqflask/wqflask/static/new/css/show_trait.css
@@ -3,7 +3,7 @@ tr .outlier {
}
table.dataTable tbody tr.selected {
- background-color: #ffff00
+ background-color: #ffee99;
}
#bar_chart_container {
--
cgit v1.2.3
From ff94904574c51eeb7aecb327d6f2679fa4a60fb4 Mon Sep 17 00:00:00 2001
From: zsloan
Date: Thu, 28 May 2020 20:24:01 -0500
Subject: Added lines calling proxy for publish datasets + added some resource
redis queries and a missing import for the hmac functions
---
wqflask/base/trait.py | 12 ++++++----
wqflask/utility/hmac.py | 2 ++
wqflask/utility/redis_tools.py | 54 ++++++++++++++++++++++++++++++++++++++----
3 files changed, 59 insertions(+), 9 deletions(-)
diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py
index e454c593..1b7cb23c 100644
--- a/wqflask/base/trait.py
+++ b/wqflask/base/trait.py
@@ -5,9 +5,6 @@ import resource
import codecs
import requests
-import redis
-Redis = redis.StrictRedis()
-
from base import webqtlConfig
from base.webqtlCaseData import webqtlCaseData
from base.data_set import create_dataset
@@ -15,6 +12,8 @@ from db import webqtlDatabaseFunction
from utility import webqtlUtil
from utility import hmac
from utility.tools import GN2_BASE_URL
+from utility.redis_tools import get_redis_conn
+Redis = get_redis_conn()
from wqflask import app
@@ -349,8 +348,13 @@ def jsonable_table_row(trait, dataset_name, index):
def retrieve_trait_info(trait, dataset, get_qtl_info=False):
assert dataset, "Dataset doesn't exist"
-
+
if dataset.type == 'Publish':
+ resource_id = hmac.data_hmac("{}:{}".format(dataset.id, trait.name))
+
+ the_url = "http://localhost:8080/run_action/?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id)
+ trait_data = json.loads(requests.get("http://localhost:8080/run_action/?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id)))
+
query = """
SELECT
PublishXRef.Id, InbredSet.InbredSetCode, Publication.PubMed_ID,
diff --git a/wqflask/utility/hmac.py b/wqflask/utility/hmac.py
index d8a0eace..b08be97e 100644
--- a/wqflask/utility/hmac.py
+++ b/wqflask/utility/hmac.py
@@ -3,6 +3,8 @@ from __future__ import print_function, division, absolute_import
import hmac
import hashlib
+from flask import url_for
+
from wqflask import app
def hmac_creation(stringy):
diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py
index ca42f7b7..15841032 100644
--- a/wqflask/utility/redis_tools.py
+++ b/wqflask/utility/redis_tools.py
@@ -1,17 +1,25 @@
from __future__ import print_function, division, absolute_import
+import uuid
import simplejson as json
import redis # used for collections
-Redis = redis.StrictRedis()
import logging
from flask import (render_template, flash)
+from utility import hmac
+
from utility.logger import getLogger
logger = getLogger(__name__)
+def get_redis_conn():
+ Redis = redis.StrictRedis(port=6380)
+ return Redis
+
+Redis = get_redis_conn()
+
def is_redis_available():
try:
Redis.ping()
@@ -70,14 +78,15 @@ def check_verification_code(code):
email_address = None
user_details = None
email_address = Redis.hget("verification_codes", code)
- return email_address
if email_address:
user_details = get_user_by_unique_column('email_address', email_address)
- return user_details
+ if user_details:
+ return user_details
+ else:
+ return None
else:
return None
- flash("Invalid code: Password reset code does not exist or might have expired!", "error")
def get_user_groups(user_id):
#ZS: Get the groups where a user is an admin or a member and return lists corresponding to those two sets of groups
@@ -167,4 +176,39 @@ def change_group_name(user_id, group_id, new_name):
group_info["name"] = new_name
return group_info
else:
- return None
\ No newline at end of file
+ return None
+
+def get_resources():
+ resource_list = Redis.hgetall("resources")
+ return resource_list
+
+def get_resource_id(dataset_type, dataset_id, trait_id = None, all_resources = None):
+ if not all_resources:
+ all_resources = get_resources()
+
+ resource_list = [[key, json.loads(value)] for key, value in all_resources.items()]
+
+ if not trait_id:
+ matched_resources = [resource[0] for resource in resource_list if resource[1]['data']['dataset'] == dataset_id]
+ else:
+ matched_resources = [resource[0] for resource in resource_list if resource[1]['data']['dataset'] == dataset_id and resource[1]['data']['trait'] == trait_id]
+
+ if len(matched_resources):
+ return matched_resources[0]
+ else:
+ return False
+
+def get_resource_info(resource_id):
+ resource_info = Redis.hget("resources", resource_id)
+ return json.loads(resource_info)
+
+def add_resource(resource_info):
+
+ if 'trait' in resource_info['data']:
+ resource_id = hmac.data_hmac('{}:{}'.format(str(resource_info['data']['dataset']), str(resource_info['data']['trait'])))
+ else:
+ resource_id = hmac.data_hmac('{}'.format(str(resource_info['data']['dataset'])))
+
+ Redis.hset("resources", resource_id, json.dumps(resource_info))
+
+ return resource_info
--
cgit v1.2.3
From 1a663f987bf3a640d21c2c89402318d5433efd9e Mon Sep 17 00:00:00 2001
From: zsloan
Date: Thu, 4 Jun 2020 14:23:30 -0500
Subject: Really should have split this into many more commits:
- Now use proxy to pull trait data and hide traits/results that the user
doesn't have view permission for
- Created a factory method for creating trait ob so it can return None
when user doesn't have view permissions (this is why such a large number
of files are changed)
- Added metadata to permutation export
- Added current group management code
- Added fixed password verification e-mail code
---
wqflask/base/trait.py | 177 ++++----
wqflask/utility/helper_functions.py | 11 +-
wqflask/utility/redis_tools.py | 58 +--
wqflask/wqflask/api/correlation.py | 472 ++++++++++-----------
wqflask/wqflask/api/gen_menu.py | 11 +-
wqflask/wqflask/api/mapping.py | 4 +-
wqflask/wqflask/collect.py | 15 +-
.../comparison_bar_chart/comparison_bar_chart.py | 4 +-
wqflask/wqflask/correlation/corr_scatter_plot.py | 8 +-
wqflask/wqflask/correlation/show_corr_results.py | 8 +-
.../wqflask/correlation_matrix/show_corr_matrix.py | 22 +-
wqflask/wqflask/ctl/ctl_analysis.py | 10 +-
wqflask/wqflask/do_search.py | 11 +-
wqflask/wqflask/gsearch.py | 13 +-
.../marker_regression/display_mapping_results.py | 6 +
wqflask/wqflask/marker_regression/gemma_mapping.py | 4 +-
wqflask/wqflask/marker_regression/rqtl_mapping.py | 64 ++-
wqflask/wqflask/marker_regression/run_mapping.py | 3 +-
wqflask/wqflask/network_graph/network_graph.py | 4 +-
wqflask/wqflask/search_results.py | 106 ++---
wqflask/wqflask/show_trait/export_trait_data.py | 4 +-
wqflask/wqflask/show_trait/show_trait.py | 21 +-
wqflask/wqflask/templates/admin/group_manager.html | 45 +-
wqflask/wqflask/templates/correlation_page.html | 1 +
wqflask/wqflask/templates/email/verification.txt | 7 -
wqflask/wqflask/templates/gsearch_pheno.html | 2 +-
wqflask/wqflask/templates/mapping_results.html | 31 +-
wqflask/wqflask/user_login.py | 43 +-
wqflask/wqflask/user_session.py | 18 +-
wqflask/wqflask/views.py | 71 +++-
30 files changed, 637 insertions(+), 617 deletions(-)
delete mode 100644 wqflask/wqflask/templates/email/verification.txt
diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py
index 1b7cb23c..b133bf21 100644
--- a/wqflask/base/trait.py
+++ b/wqflask/base/trait.py
@@ -11,6 +11,7 @@ from base.data_set import create_dataset
from db import webqtlDatabaseFunction
from utility import webqtlUtil
from utility import hmac
+from utility.authentication_tools import check_resource_availability
from utility.tools import GN2_BASE_URL
from utility.redis_tools import get_redis_conn
Redis = get_redis_conn()
@@ -21,11 +22,33 @@ import simplejson as json
from MySQLdb import escape_string as escape
from pprint import pformat as pf
-from flask import Flask, g, request, url_for
+from flask import Flask, g, request, url_for, redirect
from utility.logger import getLogger
logger = getLogger(__name__ )
+def create_trait(**kw):
+ assert bool(kw.get('dataset')) != bool(kw.get('dataset_name')), "Needs dataset ob. or name";
+
+ permitted = True
+ if kw.get('name'):
+ if kw.get('dataset_name'):
+ if kw.get('dataset_name') != "Temp":
+ dataset = create_dataset(kw.get('dataset_name'))
+ else:
+ dataset = kw.get('dataset')
+
+ if kw.get('dataset_name') != "Temp":
+ if dataset.type == 'Publish':
+ permitted = check_resource_availability(dataset, kw.get('name'))
+ else:
+ permitted = check_resource_availability(dataset)
+
+ if permitted:
+ return GeneralTrait(**kw)
+ else:
+ return None
+
class GeneralTrait(object):
"""
Trait class defines a trait in webqtl, can be either Microarray,
@@ -50,6 +73,7 @@ class GeneralTrait(object):
self.haveinfo = kw.get('haveinfo', False)
self.sequence = kw.get('sequence') # Blat sequence, available for ProbeSet
self.data = kw.get('data', {})
+ self.view = True
# Sets defaults
self.locus = None
@@ -77,6 +101,7 @@ class GeneralTrait(object):
# So we could add a simple if statement to short-circuit this if necessary
if self.dataset.type != "Temp":
self = retrieve_trait_info(self, self.dataset, get_qtl_info=get_qtl_info)
+
if get_sample_info != False:
self = retrieve_sample_data(self, self.dataset)
@@ -212,26 +237,28 @@ def get_sample_data():
trait = params['trait']
dataset = params['dataset']
- trait_ob = GeneralTrait(name=trait, dataset_name=dataset)
-
- trait_dict = {}
- trait_dict['name'] = trait
- trait_dict['db'] = dataset
- trait_dict['type'] = trait_ob.dataset.type
- trait_dict['group'] = trait_ob.dataset.group.name
- trait_dict['tissue'] = trait_ob.dataset.tissue
- trait_dict['species'] = trait_ob.dataset.group.species
- trait_dict['url'] = url_for('show_trait_page', trait_id = trait, dataset = dataset)
- trait_dict['description'] = trait_ob.description_display
- if trait_ob.dataset.type == "ProbeSet":
- trait_dict['symbol'] = trait_ob.symbol
- trait_dict['location'] = trait_ob.location_repr
- elif trait_ob.dataset.type == "Publish":
- if trait_ob.pubmed_id:
- trait_dict['pubmed_link'] = trait_ob.pubmed_link
- trait_dict['pubmed_text'] = trait_ob.pubmed_text
-
- return json.dumps([trait_dict, {key: value.value for key, value in trait_ob.data.iteritems() }])
+ trait_ob = create_trait(name=trait, dataset_name=dataset)
+ if trait_ob:
+ trait_dict = {}
+ trait_dict['name'] = trait
+ trait_dict['db'] = dataset
+ trait_dict['type'] = trait_ob.dataset.type
+ trait_dict['group'] = trait_ob.dataset.group.name
+ trait_dict['tissue'] = trait_ob.dataset.tissue
+ trait_dict['species'] = trait_ob.dataset.group.species
+ trait_dict['url'] = url_for('show_trait_page', trait_id = trait, dataset = dataset)
+ trait_dict['description'] = trait_ob.description_display
+ if trait_ob.dataset.type == "ProbeSet":
+ trait_dict['symbol'] = trait_ob.symbol
+ trait_dict['location'] = trait_ob.location_repr
+ elif trait_ob.dataset.type == "Publish":
+ if trait_ob.pubmed_id:
+ trait_dict['pubmed_link'] = trait_ob.pubmed_link
+ trait_dict['pubmed_text'] = trait_ob.pubmed_text
+
+ return json.dumps([trait_dict, {key: value.value for key, value in trait_ob.data.iteritems() }])
+ else:
+ return None
def jsonable(trait):
"""Return a dict suitable for using as json
@@ -350,91 +377,36 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
assert dataset, "Dataset doesn't exist"
if dataset.type == 'Publish':
- resource_id = hmac.data_hmac("{}:{}".format(dataset.id, trait.name))
-
- the_url = "http://localhost:8080/run_action/?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id)
- trait_data = json.loads(requests.get("http://localhost:8080/run_action/?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id)))
-
- query = """
- SELECT
- PublishXRef.Id, InbredSet.InbredSetCode, Publication.PubMed_ID,
- Phenotype.Pre_publication_description, Phenotype.Post_publication_description, Phenotype.Original_description,
- Phenotype.Pre_publication_abbreviation, Phenotype.Post_publication_abbreviation, PublishXRef.mean,
- Phenotype.Lab_code, Phenotype.Submitter, Phenotype.Owner, Phenotype.Authorized_Users,
- Publication.Authors, Publication.Title, Publication.Abstract,
- Publication.Journal, Publication.Volume, Publication.Pages,
- Publication.Month, Publication.Year, PublishXRef.Sequence,
- Phenotype.Units, PublishXRef.comments
- FROM
- PublishXRef, Publication, Phenotype, PublishFreeze, InbredSet
- WHERE
- PublishXRef.Id = %s AND
- Phenotype.Id = PublishXRef.PhenotypeId AND
- Publication.Id = PublishXRef.PublicationId AND
- PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
- PublishXRef.InbredSetId = InbredSet.Id AND
- PublishFreeze.Id = %s
- """ % (trait.name, dataset.id)
-
- logger.sql(query)
- trait_info = g.db.execute(query).fetchone()
-
-
- #XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name
- #XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms.
+ resource_id = hmac.hmac_creation("{}:{}:{}".format('dataset-publish', dataset.id, trait.name))
+ the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id)
elif dataset.type == 'ProbeSet':
- display_fields_string = ', ProbeSet.'.join(dataset.display_fields)
- display_fields_string = 'ProbeSet.' + display_fields_string
- query = """
- SELECT %s
- FROM ProbeSet, ProbeSetFreeze, ProbeSetXRef
- WHERE
- ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
- ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
- ProbeSetFreeze.Name = '%s' AND
- ProbeSet.Name = '%s'
- """ % (escape(display_fields_string),
- escape(dataset.name),
- escape(str(trait.name)))
- logger.sql(query)
- trait_info = g.db.execute(query).fetchone()
- #XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name
- # to avoid the problem of same marker name from different species.
- elif dataset.type == 'Geno':
- display_fields_string = string.join(dataset.display_fields,',Geno.')
- display_fields_string = 'Geno.' + display_fields_string
- query = """
- SELECT %s
- FROM Geno, GenoFreeze, GenoXRef
- WHERE
- GenoXRef.GenoFreezeId = GenoFreeze.Id AND
- GenoXRef.GenoId = Geno.Id AND
- GenoFreeze.Name = '%s' AND
- Geno.Name = '%s'
- """ % (escape(display_fields_string),
- escape(dataset.name),
- escape(trait.name))
- logger.sql(query)
- trait_info = g.db.execute(query).fetchone()
- else: #Temp type
- query = """SELECT %s FROM %s WHERE Name = %s"""
- logger.sql(query)
- trait_info = g.db.execute(query,
- (string.join(dataset.display_fields,','),
- dataset.type, trait.name)).fetchone()
+ resource_id = hmac.hmac_creation("{}:{}".format('dataset-probeset', dataset.id))
+ the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name)
+ else:
+ resource_id = hmac.hmac_creation("{}:{}".format('dataset-geno', dataset.id))
+ the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name)
+
+ try:
+ response = requests.get(the_url).content
+ if response.strip() == "no-access":
+ trait.view = False
+ return trait
+ except:
+ resource_info = get_resource_info(resource_id)
+ default_permissions = resource_info['default_mask']['data']
+ if 'view' not in default_persmissions:
+ trait.view = False
+ return trait
+
+ trait_info = json.loads(response)
if trait_info:
trait.haveinfo = True
- #XZ: assign SQL query result to trait attributes.
for i, field in enumerate(dataset.display_fields):
holder = trait_info[i]
- # if isinstance(trait_info[i], basestring):
- # logger.debug("HOLDER:", holder)
- # logger.debug("HOLDER2:", holder.decode(encoding='latin1'))
- # holder = unicode(trait_info[i], "utf-8", "ignore")
- if isinstance(trait_info[i], basestring):
- holder = holder.encode('latin1')
+ #if isinstance(trait_info[i], basestring):
+ # holder = holder.encode('latin1')
setattr(trait, field, holder)
if dataset.type == 'Publish':
@@ -453,13 +425,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
if trait.confidential:
trait.abbreviation = trait.pre_publication_abbreviation
trait.description_display = trait.pre_publication_description
-
- #if not webqtlUtil.hasAccessToConfidentialPhenotypeTrait(
- # privilege=self.dataset.privilege,
- # userName=self.dataset.userName,
- # authorized_users=self.authorized_users):
- #
- # description = self.pre_publication_description
else:
trait.abbreviation = trait.post_publication_abbreviation
if description:
diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py
index e7c04fef..9ce809b6 100644
--- a/wqflask/utility/helper_functions.py
+++ b/wqflask/utility/helper_functions.py
@@ -1,7 +1,7 @@
from __future__ import absolute_import, print_function, division
-from base.trait import GeneralTrait
from base import data_set
+from base.trait import create_trait
from base.species import TheSpecies
from utility import hmac
@@ -11,7 +11,6 @@ from flask import Flask, g
import logging
logger = logging.getLogger(__name__ )
-
def get_species_dataset_trait(self, start_vars):
#assert type(read_genotype) == type(bool()), "Expecting boolean value for read_genotype"
if "temp_trait" in start_vars.keys():
@@ -24,7 +23,7 @@ def get_species_dataset_trait(self, start_vars):
logger.debug("After creating dataset")
self.species = TheSpecies(dataset=self.dataset)
logger.debug("After creating species")
- self.this_trait = GeneralTrait(dataset=self.dataset,
+ self.this_trait = create_trait(dataset=self.dataset,
name=start_vars['trait_id'],
cellid=None,
get_qtl_info=True)
@@ -34,7 +33,6 @@ def get_species_dataset_trait(self, start_vars):
#self.dataset.group.read_genotype_file()
#self.genotype = self.dataset.group.genotype
-
def get_trait_db_obs(self, trait_db_list):
if isinstance(trait_db_list, basestring):
trait_db_list = trait_db_list.split(",")
@@ -49,10 +47,11 @@ def get_trait_db_obs(self, trait_db_list):
dataset_ob = data_set.create_dataset(dataset_name=dataset_name, dataset_type="Temp", group_name=trait_name.split("_")[2])
else:
dataset_ob = data_set.create_dataset(dataset_name)
- trait_ob = GeneralTrait(dataset=dataset_ob,
+ trait_ob = create_trait(dataset=dataset_ob,
name=trait_name,
cellid=None)
- self.trait_list.append((trait_ob, dataset_ob))
+ if trait_ob:
+ self.trait_list.append((trait_ob, dataset_ob))
def get_species_groups():
diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py
index 15841032..0ad96879 100644
--- a/wqflask/utility/redis_tools.py
+++ b/wqflask/utility/redis_tools.py
@@ -2,6 +2,7 @@ from __future__ import print_function, division, absolute_import
import uuid
import simplejson as json
+import datetime
import redis # used for collections
@@ -96,15 +97,22 @@ def get_user_groups(user_id):
for key in groups_list:
group_ob = json.loads(groups_list[key])
group_admins = set(group_ob['admins'])
- group_users = set(group_ob['users'])
+ group_members = set(group_ob['members'])
if user_id in group_admins:
admin_group_ids.append(group_ob['id'])
- elif user_id in group_users:
+ elif user_id in group_members:
user_group_ids.append(group_ob['id'])
else:
continue
- return admin_group_ids, user_group_ids
+ admin_groups = []
+ user_groups = []
+ for the_id in admin_group_ids:
+ admin_groups.append(get_group_info(the_id))
+ for the_id in user_group_ids:
+ user_groups.append(get_group_info(the_id))
+
+ return admin_groups, user_groups
def get_group_info(group_id):
group_json = Redis.hget("groups", group_id)
@@ -114,18 +122,18 @@ def get_group_info(group_id):
return group_info
-def create_group(admin_member_ids, user_member_ids = [], group_name = ""):
+def create_group(admin_user_ids, member_user_ids = [], group_name = "Default Group Name"):
group_id = str(uuid.uuid4())
new_group = {
"id" : group_id,
- "admins": admin_member_ids,
- "users" : user_member_ids,
+ "admins": admin_user_ids,
+ "members" : member_user_ids,
"name" : group_name,
"created_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'),
"changed_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
}
- Redis.hset("groups", group_id, new_group)
+ Redis.hset("groups", group_id, json.dumps(new_group))
return new_group
@@ -144,7 +152,7 @@ def add_users_to_group(user_id, group_id, user_emails = [], admins = False): #ZS
if admins:
group_users = set(group_info["admins"])
else:
- group_users = set(group_info["users"])
+ group_users = set(group_info["members"])
for email in user_emails:
user_id = get_user_id("email_address", email)
@@ -153,7 +161,7 @@ def add_users_to_group(user_id, group_id, user_emails = [], admins = False): #ZS
if admins:
group_info["admins"] = list(group_users)
else:
- group_info["users"] = list(group_users)
+ group_info["members"] = list(group_users)
group_info["changed_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
Redis.hset("groups", group_id, json.dumps(group_info))
@@ -161,7 +169,7 @@ def add_users_to_group(user_id, group_id, user_emails = [], admins = False): #ZS
else:
return None
-def remove_users_from_group(user_id, users_to_remove_ids, group_id, user_type = "users"): #ZS: User type is because I assume admins can remove other admins
+def remove_users_from_group(user_id, users_to_remove_ids, group_id, user_type = "members"): #ZS: User type is because I assume admins can remove other admins
group_info = get_group_info(group_id)
if user_id in group_info["admins"]:
group_users = set(group_info[user_type])
@@ -174,6 +182,7 @@ def change_group_name(user_id, group_id, new_name):
group_info = get_group_info(group_id)
if user_id in group_info["admins"]:
group_info["name"] = new_name
+ Redis.hset("groups", group_id, json.dumps(group_info))
return group_info
else:
return None
@@ -182,22 +191,21 @@ def get_resources():
resource_list = Redis.hgetall("resources")
return resource_list
-def get_resource_id(dataset_type, dataset_id, trait_id = None, all_resources = None):
- if not all_resources:
- all_resources = get_resources()
-
- resource_list = [[key, json.loads(value)] for key, value in all_resources.items()]
-
- if not trait_id:
- matched_resources = [resource[0] for resource in resource_list if resource[1]['data']['dataset'] == dataset_id]
- else:
- matched_resources = [resource[0] for resource in resource_list if resource[1]['data']['dataset'] == dataset_id and resource[1]['data']['trait'] == trait_id]
-
- if len(matched_resources):
- return matched_resources[0]
+def get_resource_id(dataset, trait_id=None):
+ if dataset.type == "Publish":
+ if trait_id:
+ resource_id = hmac.hmac_creation("{}:{}:{}".format('dataset-publish', dataset.id, trait_id))
+ else:
+ return False
+ elif dataset.type == "ProbeSet":
+ resource_id = hmac.hmac_creation("{}:{}".format('dataset-probeset', dataset.id))
+ elif dataset.type == "Geno":
+ resource_id = hmac.hmac_creation("{}:{}".format('dataset-geno', dataset.id))
else:
return False
+ return resource_id
+
def get_resource_info(resource_id):
resource_info = Redis.hget("resources", resource_id)
return json.loads(resource_info)
@@ -205,9 +213,9 @@ def get_resource_info(resource_id):
def add_resource(resource_info):
if 'trait' in resource_info['data']:
- resource_id = hmac.data_hmac('{}:{}'.format(str(resource_info['data']['dataset']), str(resource_info['data']['trait'])))
+ resource_id = hmac.hmac_creation('{}:{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset']), str(resource_info['data']['trait'])))
else:
- resource_id = hmac.data_hmac('{}'.format(str(resource_info['data']['dataset'])))
+ resource_id = hmac.hmac_creation('{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset'])))
Redis.hset("resources", resource_id, json.dumps(resource_info))
diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py
index 66eb94ac..7f5312c1 100644
--- a/wqflask/wqflask/api/correlation.py
+++ b/wqflask/wqflask/api/correlation.py
@@ -1,237 +1,237 @@
-from __future__ import absolute_import, division, print_function
-
-import collections
-
-import scipy
-
-from MySQLdb import escape_string as escape
-
-from flask import g
-
-from base import data_set
-from base.trait import GeneralTrait, retrieve_sample_data
-
-from wqflask.correlation.show_corr_results import generate_corr_json
-from wqflask.correlation import correlation_functions
-
-from utility import webqtlUtil, helper_functions, corr_result_helpers
-from utility.benchmark import Bench
-
-import utility.logger
-logger = utility.logger.getLogger(__name__ )
-
-def do_correlation(start_vars):
- assert('db' in start_vars)
- assert('target_db' in start_vars)
- assert('trait_id' in start_vars)
-
- this_dataset = data_set.create_dataset(dataset_name = start_vars['db'])
- target_dataset = data_set.create_dataset(dataset_name = start_vars['target_db'])
- this_trait = GeneralTrait(dataset = this_dataset, name = start_vars['trait_id'])
- this_trait = retrieve_sample_data(this_trait, this_dataset)
-
- corr_params = init_corr_params(start_vars)
-
- corr_results = calculate_results(this_trait, this_dataset, target_dataset, corr_params)
- #corr_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0])))
-
- final_results = []
- for _trait_counter, trait in enumerate(corr_results.keys()[:corr_params['return_count']]):
- if corr_params['type'] == "tissue":
- [sample_r, num_overlap, sample_p, symbol] = corr_results[trait]
- result_dict = {
- "trait" : trait,
- "sample_r" : sample_r,
- "#_strains" : num_overlap,
- "p_value" : sample_p,
- "symbol" : symbol
- }
- elif corr_params['type'] == "literature" or corr_params['type'] == "lit":
- [gene_id, sample_r] = corr_results[trait]
- result_dict = {
- "trait" : trait,
- "sample_r" : sample_r,
- "gene_id" : gene_id
- }
- else:
- [sample_r, sample_p, num_overlap] = corr_results[trait]
- result_dict = {
- "trait" : trait,
- "sample_r" : sample_r,
- "#_strains" : num_overlap,
- "p_value" : sample_p
- }
-
- final_results.append(result_dict)
-
- # json_corr_results = generate_corr_json(final_corr_results, this_trait, this_dataset, target_dataset, for_api = True)
-
- return final_results
-
-def calculate_results(this_trait, this_dataset, target_dataset, corr_params):
- corr_results = {}
-
- target_dataset.get_trait_data()
-
- if corr_params['type'] == "tissue":
- trait_symbol_dict = this_dataset.retrieve_genes("Symbol")
- corr_results = do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params)
- sorted_results = collections.OrderedDict(sorted(corr_results.items(),
- key=lambda t: -abs(t[1][1])))
- elif corr_params['type'] == "literature" or corr_params['type'] == "lit": #ZS: Just so a user can use either "lit" or "literature"
- trait_geneid_dict = this_dataset.retrieve_genes("GeneId")
- corr_results = do_literature_correlation_for_all_traits(this_trait, this_dataset, trait_geneid_dict, corr_params)
- sorted_results = collections.OrderedDict(sorted(corr_results.items(),
- key=lambda t: -abs(t[1][1])))
- else:
- for target_trait, target_vals in target_dataset.trait_data.iteritems():
- result = get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, corr_params['type'])
- if result is not None:
- corr_results[target_trait] = result
-
- sorted_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0])))
-
- return sorted_results
-
-def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params, tissue_dataset_id=1):
- #Gets tissue expression values for the primary trait
- primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list = [this_trait.symbol])
-
- if this_trait.symbol.lower() in primary_trait_tissue_vals_dict:
- primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower()]
-
- corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=trait_symbol_dict.values())
-
- tissue_corr_data = {}
- for trait, symbol in trait_symbol_dict.iteritems():
- if symbol and symbol.lower() in corr_result_tissue_vals_dict:
- this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()]
-
- result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values,
- this_trait_tissue_values,
- corr_params['method'])
-
- tissue_corr_data[trait] = [result[0], result[1], result[2], symbol]
-
- return tissue_corr_data
-
-def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_geneid_dict, corr_params):
- input_trait_mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), this_trait.geneid)
-
- lit_corr_data = {}
- for trait, gene_id in trait_geneid_dict.iteritems():
- mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), gene_id)
-
- if mouse_gene_id and str(mouse_gene_id).find(";") == -1:
- result = g.db.execute(
- """SELECT value
- FROM LCorrRamin3
- WHERE GeneId1='%s' and
- GeneId2='%s'
- """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id))
- ).fetchone()
- if not result:
- result = g.db.execute("""SELECT value
- FROM LCorrRamin3
- WHERE GeneId2='%s' and
- GeneId1='%s'
- """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id))
- ).fetchone()
- if result:
- lit_corr = result.value
- lit_corr_data[trait] = [gene_id, lit_corr]
- else:
- lit_corr_data[trait] = [gene_id, 0]
- else:
- lit_corr_data[trait] = [gene_id, 0]
-
- return lit_corr_data
-
-def get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, type):
- """
- Calculates the sample r (or rho) and p-value
-
- Given a primary trait and a target trait's sample values,
- calculates either the pearson r or spearman rho and the p-value
- using the corresponding scipy functions.
- """
-
- this_trait_vals = []
- shared_target_vals = []
- for i, sample in enumerate(target_dataset.group.samplelist):
- if sample in this_trait.data:
- this_sample_value = this_trait.data[sample].value
- target_sample_value = target_vals[i]
- this_trait_vals.append(this_sample_value)
- shared_target_vals.append(target_sample_value)
-
- this_trait_vals, shared_target_vals, num_overlap = corr_result_helpers.normalize_values(this_trait_vals, shared_target_vals)
-
- if type == 'pearson':
- sample_r, sample_p = scipy.stats.pearsonr(this_trait_vals, shared_target_vals)
- else:
- sample_r, sample_p = scipy.stats.spearmanr(this_trait_vals, shared_target_vals)
-
- if num_overlap > 5:
- if scipy.isnan(sample_r):
- return None
- else:
- return [sample_r, sample_p, num_overlap]
-
-def convert_to_mouse_gene_id(species=None, gene_id=None):
- """If the species is rat or human, translate the gene_id to the mouse geneid
-
- If there is no input gene_id or there's no corresponding mouse gene_id, return None
-
- """
- if not gene_id:
- return None
-
- mouse_gene_id = None
-
- if species == 'mouse':
- mouse_gene_id = gene_id
-
- elif species == 'rat':
-
- query = """SELECT mouse
- FROM GeneIDXRef
- WHERE rat='%s'""" % escape(gene_id)
-
- result = g.db.execute(query).fetchone()
- if result != None:
- mouse_gene_id = result.mouse
-
- elif species == 'human':
-
- query = """SELECT mouse
- FROM GeneIDXRef
- WHERE human='%s'""" % escape(gene_id)
-
- result = g.db.execute(query).fetchone()
- if result != None:
- mouse_gene_id = result.mouse
-
- return mouse_gene_id
-
-def init_corr_params(start_vars):
- method = "pearson"
- if 'method' in start_vars:
- method = start_vars['method']
-
- type = "sample"
- if 'type' in start_vars:
- type = start_vars['type']
-
- return_count = 500
- if 'return_count' in start_vars:
- assert(start_vars['return_count'].isdigit())
- return_count = int(start_vars['return_count'])
-
- corr_params = {
- 'method' : method,
- 'type' : type,
- 'return_count' : return_count
- }
-
+from __future__ import absolute_import, division, print_function
+
+import collections
+
+import scipy
+
+from MySQLdb import escape_string as escape
+
+from flask import g
+
+from base import data_set
+from base.trait import create_trait, retrieve_sample_data
+
+from wqflask.correlation.show_corr_results import generate_corr_json
+from wqflask.correlation import correlation_functions
+
+from utility import webqtlUtil, helper_functions, corr_result_helpers
+from utility.benchmark import Bench
+
+import utility.logger
+logger = utility.logger.getLogger(__name__ )
+
+def do_correlation(start_vars):
+ assert('db' in start_vars)
+ assert('target_db' in start_vars)
+ assert('trait_id' in start_vars)
+
+ this_dataset = data_set.create_dataset(dataset_name = start_vars['db'])
+ target_dataset = data_set.create_dataset(dataset_name = start_vars['target_db'])
+ this_trait = create_trait(dataset = this_dataset, name = start_vars['trait_id'])
+ this_trait = retrieve_sample_data(this_trait, this_dataset)
+
+ corr_params = init_corr_params(start_vars)
+
+ corr_results = calculate_results(this_trait, this_dataset, target_dataset, corr_params)
+ #corr_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0])))
+
+ final_results = []
+ for _trait_counter, trait in enumerate(corr_results.keys()[:corr_params['return_count']]):
+ if corr_params['type'] == "tissue":
+ [sample_r, num_overlap, sample_p, symbol] = corr_results[trait]
+ result_dict = {
+ "trait" : trait,
+ "sample_r" : sample_r,
+ "#_strains" : num_overlap,
+ "p_value" : sample_p,
+ "symbol" : symbol
+ }
+ elif corr_params['type'] == "literature" or corr_params['type'] == "lit":
+ [gene_id, sample_r] = corr_results[trait]
+ result_dict = {
+ "trait" : trait,
+ "sample_r" : sample_r,
+ "gene_id" : gene_id
+ }
+ else:
+ [sample_r, sample_p, num_overlap] = corr_results[trait]
+ result_dict = {
+ "trait" : trait,
+ "sample_r" : sample_r,
+ "#_strains" : num_overlap,
+ "p_value" : sample_p
+ }
+
+ final_results.append(result_dict)
+
+ # json_corr_results = generate_corr_json(final_corr_results, this_trait, this_dataset, target_dataset, for_api = True)
+
+ return final_results
+
+def calculate_results(this_trait, this_dataset, target_dataset, corr_params):
+ corr_results = {}
+
+ target_dataset.get_trait_data()
+
+ if corr_params['type'] == "tissue":
+ trait_symbol_dict = this_dataset.retrieve_genes("Symbol")
+ corr_results = do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params)
+ sorted_results = collections.OrderedDict(sorted(corr_results.items(),
+ key=lambda t: -abs(t[1][1])))
+ elif corr_params['type'] == "literature" or corr_params['type'] == "lit": #ZS: Just so a user can use either "lit" or "literature"
+ trait_geneid_dict = this_dataset.retrieve_genes("GeneId")
+ corr_results = do_literature_correlation_for_all_traits(this_trait, this_dataset, trait_geneid_dict, corr_params)
+ sorted_results = collections.OrderedDict(sorted(corr_results.items(),
+ key=lambda t: -abs(t[1][1])))
+ else:
+ for target_trait, target_vals in target_dataset.trait_data.iteritems():
+ result = get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, corr_params['type'])
+ if result is not None:
+ corr_results[target_trait] = result
+
+ sorted_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0])))
+
+ return sorted_results
+
+def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params, tissue_dataset_id=1):
+ #Gets tissue expression values for the primary trait
+ primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list = [this_trait.symbol])
+
+ if this_trait.symbol.lower() in primary_trait_tissue_vals_dict:
+ primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower()]
+
+ corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=trait_symbol_dict.values())
+
+ tissue_corr_data = {}
+ for trait, symbol in trait_symbol_dict.iteritems():
+ if symbol and symbol.lower() in corr_result_tissue_vals_dict:
+ this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()]
+
+ result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values,
+ this_trait_tissue_values,
+ corr_params['method'])
+
+ tissue_corr_data[trait] = [result[0], result[1], result[2], symbol]
+
+ return tissue_corr_data
+
+def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_geneid_dict, corr_params):
+ input_trait_mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), this_trait.geneid)
+
+ lit_corr_data = {}
+ for trait, gene_id in trait_geneid_dict.iteritems():
+ mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), gene_id)
+
+ if mouse_gene_id and str(mouse_gene_id).find(";") == -1:
+ result = g.db.execute(
+ """SELECT value
+ FROM LCorrRamin3
+ WHERE GeneId1='%s' and
+ GeneId2='%s'
+ """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id))
+ ).fetchone()
+ if not result:
+ result = g.db.execute("""SELECT value
+ FROM LCorrRamin3
+ WHERE GeneId2='%s' and
+ GeneId1='%s'
+ """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id))
+ ).fetchone()
+ if result:
+ lit_corr = result.value
+ lit_corr_data[trait] = [gene_id, lit_corr]
+ else:
+ lit_corr_data[trait] = [gene_id, 0]
+ else:
+ lit_corr_data[trait] = [gene_id, 0]
+
+ return lit_corr_data
+
+def get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, type):
+ """
+ Calculates the sample r (or rho) and p-value
+
+ Given a primary trait and a target trait's sample values,
+ calculates either the pearson r or spearman rho and the p-value
+ using the corresponding scipy functions.
+ """
+
+ this_trait_vals = []
+ shared_target_vals = []
+ for i, sample in enumerate(target_dataset.group.samplelist):
+ if sample in this_trait.data:
+ this_sample_value = this_trait.data[sample].value
+ target_sample_value = target_vals[i]
+ this_trait_vals.append(this_sample_value)
+ shared_target_vals.append(target_sample_value)
+
+ this_trait_vals, shared_target_vals, num_overlap = corr_result_helpers.normalize_values(this_trait_vals, shared_target_vals)
+
+ if type == 'pearson':
+ sample_r, sample_p = scipy.stats.pearsonr(this_trait_vals, shared_target_vals)
+ else:
+ sample_r, sample_p = scipy.stats.spearmanr(this_trait_vals, shared_target_vals)
+
+ if num_overlap > 5:
+ if scipy.isnan(sample_r):
+ return None
+ else:
+ return [sample_r, sample_p, num_overlap]
+
+def convert_to_mouse_gene_id(species=None, gene_id=None):
+ """If the species is rat or human, translate the gene_id to the mouse geneid
+
+ If there is no input gene_id or there's no corresponding mouse gene_id, return None
+
+ """
+ if not gene_id:
+ return None
+
+ mouse_gene_id = None
+
+ if species == 'mouse':
+ mouse_gene_id = gene_id
+
+ elif species == 'rat':
+
+ query = """SELECT mouse
+ FROM GeneIDXRef
+ WHERE rat='%s'""" % escape(gene_id)
+
+ result = g.db.execute(query).fetchone()
+ if result != None:
+ mouse_gene_id = result.mouse
+
+ elif species == 'human':
+
+ query = """SELECT mouse
+ FROM GeneIDXRef
+ WHERE human='%s'""" % escape(gene_id)
+
+ result = g.db.execute(query).fetchone()
+ if result != None:
+ mouse_gene_id = result.mouse
+
+ return mouse_gene_id
+
+def init_corr_params(start_vars):
+ method = "pearson"
+ if 'method' in start_vars:
+ method = start_vars['method']
+
+ type = "sample"
+ if 'type' in start_vars:
+ type = start_vars['type']
+
+ return_count = 500
+ if 'return_count' in start_vars:
+ assert(start_vars['return_count'].isdigit())
+ return_count = int(start_vars['return_count'])
+
+ corr_params = {
+ 'method' : method,
+ 'type' : type,
+ 'return_count' : return_count
+ }
+
return corr_params
\ No newline at end of file
diff --git a/wqflask/wqflask/api/gen_menu.py b/wqflask/wqflask/api/gen_menu.py
index c7bcb65d..bdcc3bf7 100644
--- a/wqflask/wqflask/api/gen_menu.py
+++ b/wqflask/wqflask/api/gen_menu.py
@@ -126,9 +126,7 @@ def build_types(species, group):
InbredSet.Name = '{1}' AND
ProbeFreeze.TissueId = Tissue.Id AND
ProbeFreeze.InbredSetId = InbredSet.Id AND
- ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id AND
- ProbeSetFreeze.public > 0 AND
- ProbeSetFreeze.confidentiality < 1
+ ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id
ORDER BY Tissue.Name""".format(species, group)
results = []
@@ -194,9 +192,7 @@ def build_datasets(species, group, type_name):
FROM InfoFiles, GenoFreeze, InbredSet
WHERE InbredSet.Name = '{}' AND
GenoFreeze.InbredSetId = InbredSet.Id AND
- InfoFiles.InfoPageName = GenoFreeze.ShortName AND
- GenoFreeze.public > 0 AND
- GenoFreeze.confidentiality < 1
+ InfoFiles.InfoPageName = GenoFreeze.ShortName
ORDER BY GenoFreeze.CreateTime DESC""".format(group)).fetchone()
if results != None:
@@ -214,8 +210,7 @@ def build_datasets(species, group, type_name):
Species.Id = InbredSet.SpeciesId AND
InbredSet.Name = '{1}' AND
ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and Tissue.Name = '{2}' AND
- ProbeFreeze.TissueId = Tissue.Id and ProbeFreeze.InbredSetId = InbredSet.Id AND
- ProbeSetFreeze.confidentiality < 1 and ProbeSetFreeze.public > 0
+ ProbeFreeze.TissueId = Tissue.Id and ProbeFreeze.InbredSetId = InbredSet.Id
ORDER BY ProbeSetFreeze.CreateTime DESC""".format(species, group, type_name)).fetchall()
datasets = []
diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py
index d830cefc..92c27c9b 100644
--- a/wqflask/wqflask/api/mapping.py
+++ b/wqflask/wqflask/api/mapping.py
@@ -4,7 +4,7 @@ import string
from base import data_set
from base import webqtlConfig
-from base.trait import GeneralTrait, retrieve_sample_data
+from base.trait import create_trait, retrieve_sample_data
from utility import helper_functions
from wqflask.marker_regression import gemma_mapping, rqtl_mapping, qtlreaper_mapping, plink_mapping
@@ -18,7 +18,7 @@ def do_mapping_for_api(start_vars):
dataset = data_set.create_dataset(dataset_name = start_vars['db'])
dataset.group.get_markers()
- this_trait = GeneralTrait(dataset = dataset, name = start_vars['trait_id'])
+ this_trait = create_trait(dataset = dataset, name = start_vars['trait_id'])
this_trait = retrieve_sample_data(this_trait, dataset)
samples = []
diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py
index b22e0004..4fb8e69b 100644
--- a/wqflask/wqflask/collect.py
+++ b/wqflask/wqflask/collect.py
@@ -14,9 +14,6 @@ import urlparse
import simplejson as json
-import redis
-Redis = redis.StrictRedis()
-
from flask import (Flask, g, render_template, url_for, request, make_response,
redirect, flash, jsonify)
@@ -30,8 +27,10 @@ from wqflask import model
from utility import Bunch, Struct, hmac
from utility.formatting import numify
+from utility.redis_tools import get_redis_conn
+Redis = get_redis_conn()
-from base import trait
+from base.trait import create_trait, retrieve_trait_info, jsonable
from base.data_set import create_dataset
import logging
@@ -208,14 +207,14 @@ def view_collection():
if dataset_name == "Temp":
group = name.split("_")[2]
dataset = create_dataset(dataset_name, dataset_type = "Temp", group_name = group)
- trait_ob = trait.GeneralTrait(name=name, dataset=dataset)
+ trait_ob = create_trait(name=name, dataset=dataset)
else:
dataset = create_dataset(dataset_name)
- trait_ob = trait.GeneralTrait(name=name, dataset=dataset)
- trait_ob = trait.retrieve_trait_info(trait_ob, dataset, get_qtl_info=True)
+ trait_ob = create_trait(name=name, dataset=dataset)
+ trait_ob = retrieve_trait_info(trait_ob, dataset, get_qtl_info=True)
trait_obs.append(trait_ob)
- json_version.append(trait.jsonable(trait_ob))
+ json_version.append(jsonable(trait_ob))
collection_info = dict(trait_obs=trait_obs,
uc = uc)
diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
index 21eb1493..5d74dc9d 100644
--- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
+++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
@@ -37,7 +37,7 @@ from pprint import pformat as pf
import reaper
-from base.trait import GeneralTrait
+from base.trait import create_trait
from base import data_set
from utility import webqtlUtil, helper_functions, corr_result_helpers
from db import webqtlDatabaseFunction
@@ -108,7 +108,7 @@ class ComparisonBarChart(object):
trait_name, dataset_name = trait_db.split(":")
#print("dataset_name:", dataset_name)
dataset_ob = data_set.create_dataset(dataset_name)
- trait_ob = GeneralTrait(dataset=dataset_ob,
+ trait_ob = create_trait(dataset=dataset_ob,
name=trait_name,
cellid=None)
self.trait_list.append((trait_ob, dataset_ob))
diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py
index dfb81c54..04ec427d 100644
--- a/wqflask/wqflask/correlation/corr_scatter_plot.py
+++ b/wqflask/wqflask/correlation/corr_scatter_plot.py
@@ -4,7 +4,7 @@ import math
from flask import g
-from base.trait import GeneralTrait
+from base.trait import create_trait
from base import data_set
from utility import corr_result_helpers
from scipy import stats
@@ -20,9 +20,9 @@ class CorrScatterPlot(object):
self.data_set_1 = data_set.create_dataset(params['dataset_1'])
self.data_set_2 = data_set.create_dataset(params['dataset_2'])
#self.data_set_3 = data_set.create_dataset(params['dataset_3'])
- self.trait_1 = GeneralTrait(name=params['trait_1'], dataset=self.data_set_1)
- self.trait_2 = GeneralTrait(name=params['trait_2'], dataset=self.data_set_2)
- #self.trait_3 = GeneralTrait(name=params['trait_3'], dataset=self.data_set_3)
+ self.trait_1 = create_trait(name=params['trait_1'], dataset=self.data_set_1)
+ self.trait_2 = create_trait(name=params['trait_2'], dataset=self.data_set_2)
+ #self.trait_3 = create_trait(name=params['trait_3'], dataset=self.data_set_3)
samples_1, samples_2, num_overlap = corr_result_helpers.normalize_values_with_samples(self.trait_1.data, self.trait_2.data)
diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py
index b099b83d..7eab7184 100644
--- a/wqflask/wqflask/correlation/show_corr_results.py
+++ b/wqflask/wqflask/correlation/show_corr_results.py
@@ -47,7 +47,7 @@ import reaper
from base import webqtlConfig
from utility.THCell import THCell
from utility.TDCell import TDCell
-from base.trait import GeneralTrait
+from base.trait import create_trait
from base import data_set
from utility import webqtlUtil, helper_functions, corr_result_helpers, hmac
from db import webqtlDatabaseFunction
@@ -97,7 +97,7 @@ class CorrelationResults(object):
if start_vars['dataset'] == "Temp":
self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group'])
self.trait_id = start_vars['trait_id']
- self.this_trait = GeneralTrait(dataset=self.dataset,
+ self.this_trait = create_trait(dataset=self.dataset,
name=self.trait_id,
cellid=None)
else:
@@ -199,7 +199,9 @@ class CorrelationResults(object):
range_chr_as_int = order_id
for _trait_counter, trait in enumerate(self.correlation_data.keys()[:self.return_number]):
- trait_object = GeneralTrait(dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False)
+ trait_object = create_trait(dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False)
+ if not trait_object:
+ continue
if self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Geno":
#ZS: Convert trait chromosome to an int for the location range option
diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
index b5c45d05..2b9467d1 100644
--- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
+++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
@@ -43,14 +43,16 @@ from pprint import pformat as pf
import reaper
-import redis
-Redis = redis.StrictRedis()
+from utility.redis_tools import get_redis_conn
+Redis = get_redis_conn()
+THIRTY_DAYS = 60 * 60 * 24 * 30
from utility.THCell import THCell
from utility.TDCell import TDCell
from base.trait import GeneralTrait
from base import data_set
from utility import webqtlUtil, helper_functions, corr_result_helpers
+
from db import webqtlDatabaseFunction
import utility.webqtlUtil #this is for parallel computing only.
from wqflask.correlation import correlation_functions
@@ -204,20 +206,6 @@ class CorrelationMatrix(object):
samples = self.all_sample_list,
sample_data = self.sample_data,)
# corr_results = [result[1] for result in result_row for result_row in self.corr_results])
-
- def get_trait_db_obs(self, trait_db_list):
-
- self.trait_list = []
- for i, trait_db in enumerate(trait_db_list):
- if i == (len(trait_db_list) - 1):
- break
- trait_name, dataset_name = trait_db.split(":")
- #print("dataset_name:", dataset_name)
- dataset_ob = data_set.create_dataset(dataset_name)
- trait_ob = GeneralTrait(dataset=dataset_ob,
- name=trait_name,
- cellid=None)
- self.trait_list.append((trait_ob, dataset_ob))
def calculate_pca(self, cols, corr_eigen_value, corr_eigen_vectors):
base = importr('base')
@@ -257,7 +245,7 @@ class CorrelationMatrix(object):
this_vals_string += "x "
this_vals_string = this_vals_string[:-1]
- Redis.set(trait_id, this_vals_string)
+ Redis.set(trait_id, this_vals_string, ex=THIRTY_DAYS)
self.pca_trait_ids.append(trait_id)
return pca
diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py
index 4415b86a..35067036 100644
--- a/wqflask/wqflask/ctl/ctl_analysis.py
+++ b/wqflask/wqflask/ctl/ctl_analysis.py
@@ -17,7 +17,7 @@ import csv
import itertools
from base import data_set
-from base import trait as TRAIT
+from base.trait import create_trait, retrieve_sample_data
from utility import helper_functions
from utility.tools import locate, GN2_BRANCH_URL
@@ -122,8 +122,8 @@ class CTL(object):
logger.debug("retrieving data for", trait)
if trait != "":
ts = trait.split(':')
- gt = TRAIT.GeneralTrait(name = ts[0], dataset_name = ts[1])
- gt = TRAIT.retrieve_sample_data(gt, dataset, individuals)
+ gt = create_trait(name = ts[0], dataset_name = ts[1])
+ gt = retrieve_sample_data(gt, dataset, individuals)
for ind in individuals:
if ind in gt.data.keys():
traits.append(gt.data[ind].value)
@@ -180,8 +180,8 @@ class CTL(object):
logger.debug(significant[0][x], significant[1][x], significant[2][x]) # Debug to console
tsS = significant[0][x].split(':') # Source
tsT = significant[2][x].split(':') # Target
- gtS = TRAIT.GeneralTrait(name = tsS[0], dataset_name = tsS[1]) # Retrieve Source info from the DB
- gtT = TRAIT.GeneralTrait(name = tsT[0], dataset_name = tsT[1]) # Retrieve Target info from the DB
+ gtS = create_trait(name = tsS[0], dataset_name = tsS[1]) # Retrieve Source info from the DB
+ gtT = create_trait(name = tsT[0], dataset_name = tsT[1]) # Retrieve Target info from the DB
self.addNode(gtS)
self.addNode(gtT)
self.addEdge(gtS, gtT, significant, x)
diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py
index b0ca5ced..1e15d28f 100644
--- a/wqflask/wqflask/do_search.py
+++ b/wqflask/wqflask/do_search.py
@@ -34,10 +34,7 @@ class DoSearch(object):
self.search_type = search_type
if self.dataset:
- logger.debug("self.dataset is boo: ", type(self.dataset), pf(self.dataset))
- logger.debug("self.dataset.group is: ", pf(self.dataset.group))
#Get group information for dataset and the species id
-
self.species_id = webqtlDatabaseFunction.retrieve_species_id(self.dataset.group.name)
def execute(self, query):
@@ -54,10 +51,6 @@ class DoSearch(object):
return keyword
- #def escape(self, stringy):
- # """Shorter name than self.db_conn.escape_string"""
- # return escape(str(stringy))
-
def mescape(self, *items):
"""Multiple escape"""
escaped = [escape(str(item)) for item in items]
@@ -71,8 +64,6 @@ class DoSearch(object):
@classmethod
def get_search(cls, search_type):
- logger.debug("search_types are:", pf(cls.search_types))
-
search_type_string = search_type['dataset_type']
if 'key' in search_type and search_type['key'] != None:
search_type_string += '_' + search_type['key']
@@ -648,7 +639,7 @@ class CisTransLrsSearch(DoSearch):
escape(self.dataset.type),
chromosome)
else:
- location_clause = "(ABS(%s.Mb-Geno.Mb) %s %s and %s.Chr = Geno.Chr) or (%s.Chr != Geno.Chr)" % (escape(self.dataset.type), the_operator, escape(str(self.mb_buffer)), escape(self.dataset.type))
+ location_clause = "(ABS(%s.Mb-Geno.Mb) %s %s and %s.Chr = Geno.Chr) or (%s.Chr != Geno.Chr)" % (escape(self.dataset.type), the_operator, escape(str(self.mb_buffer)), escape(self.dataset.type), escape(self.dataset.type))
where_clause = sub_clause + """
%sXRef.Locus = Geno.name and
Geno.SpeciesId = %s and
diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py
index 04e3d578..c65a1415 100644
--- a/wqflask/wqflask/gsearch.py
+++ b/wqflask/wqflask/gsearch.py
@@ -4,7 +4,7 @@ import json
from flask import Flask, g
from base.data_set import create_dataset
-from base.trait import GeneralTrait
+from base.trait import create_trait
from db import webqtlDatabaseFunction
from base import webqtlConfig
@@ -96,7 +96,9 @@ class GSearch(object):
#dataset = create_dataset(line[3], "ProbeSet", get_samplelist=False)
#trait_id = line[4]
#with Bench("Building trait object"):
- trait_ob = GeneralTrait(dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False)
+ trait_ob = create_trait(dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False)
+ if not trait_ob:
+ continue
max_lrs_text = "N/A"
if trait_ob.locus_chr != "" and trait_ob.locus_mb != "":
max_lrs_text = "Chr" + str(trait_ob.locus_chr) + ": " + str(trait_ob.locus_mb)
@@ -210,13 +212,12 @@ class GSearch(object):
if line[11] != "" and line[11] != None:
this_trait['additive'] = '%.3f' % line[11]
- #dataset = create_dataset(line[2], "Publish")
- #trait_id = line[3]
- #this_trait = GeneralTrait(dataset=dataset, name=trait_id, get_qtl_info=True, get_sample_info=False)
this_trait['max_lrs_text'] = "N/A"
+ trait_ob = create_trait(dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False)
+ if not trait_ob:
+ continue
if this_trait['dataset'] == this_trait['group'] + "Publish":
try:
- trait_ob = GeneralTrait(dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False)
if trait_ob.locus_chr != "" and trait_ob.locus_mb != "":
this_trait['max_lrs_text'] = "Chr" + str(trait_ob.locus_chr) + ": " + str(trait_ob.locus_mb)
except:
diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py
index a648667b..74fa4329 100644
--- a/wqflask/wqflask/marker_regression/display_mapping_results.py
+++ b/wqflask/wqflask/marker_regression/display_mapping_results.py
@@ -246,6 +246,12 @@ class DisplayMappingResults(object):
if 'output_files' in start_vars:
self.output_files = ",".join(start_vars['output_files'])
+ self.categorical_vars = ""
+ self.perm_strata = ""
+ if 'perm_strata' in start_vars.keys() and 'categorical_vars' in start_vars.keys():
+ self.categorical_vars = start_vars['categorical_vars']
+ self.perm_strata = start_vars['perm_strata']
+
self.selectedChr = int(start_vars['selected_chr'])
self.strainlist = start_vars['samples']
diff --git a/wqflask/wqflask/marker_regression/gemma_mapping.py b/wqflask/wqflask/marker_regression/gemma_mapping.py
index e2b15c26..88d27517 100644
--- a/wqflask/wqflask/marker_regression/gemma_mapping.py
+++ b/wqflask/wqflask/marker_regression/gemma_mapping.py
@@ -1,7 +1,7 @@
import os, math, string, random, json
from base import webqtlConfig
-from base.trait import GeneralTrait
+from base.trait import create_trait
from base.data_set import create_dataset
from utility.tools import flat_files, GEMMA_COMMAND, GEMMA_WRAPPER_COMMAND, TEMPDIR, WEBSERVER_MODE
@@ -129,7 +129,7 @@ def gen_covariates_file(this_dataset, covariates, samples):
this_covariate_data = []
trait_name = covariate.split(":")[0]
dataset_ob = create_dataset(covariate.split(":")[1])
- trait_ob = GeneralTrait(dataset=dataset_ob,
+ trait_ob = create_trait(dataset=dataset_ob,
name=trait_name,
cellid=None)
diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py
index e4a4d127..c5590a85 100644
--- a/wqflask/wqflask/marker_regression/rqtl_mapping.py
+++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py
@@ -6,7 +6,7 @@ import json
from flask import g
from base.webqtlConfig import TMPDIR
-from base.trait import GeneralTrait
+from base.trait import create_trait
from base.data_set import create_dataset
from utility import webqtlUtil
from utility.tools import locate, TEMPDIR
@@ -86,7 +86,6 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec
cross_object = add_phenotype(cross_object, pheno_string, "the_pheno") # Add the phenotype
cross_object = add_names(cross_object, names_string, "the_names") # Add the phenotype
logger.info("Added pheno and names");
- # Scan for QTLs
marker_covars = create_marker_covariates(control_marker, cross_object) # Create the additive covariate markers
logger.info("Marker covars done");
if cofactors != "":
@@ -115,6 +114,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec
else:
if do_control == "true" or cofactors != "":
logger.info("Using covariate"); result_data_frame = scanone(cross_object, pheno = "the_pheno", addcovar = covars, model=model, method=method)
+ ro.r('save.image(file = "/home/zas1024/gn2-zach/itp_cofactor_test.RData")')
else:
logger.info("No covariates"); result_data_frame = scanone(cross_object, pheno = "the_pheno", model=model, method=method)
@@ -295,7 +295,7 @@ def add_cofactors(cross, this_dataset, covariates, samples):
covar_as_string = "c("
trait_name = covariate.split(":")[0]
dataset_ob = create_dataset(covariate.split(":")[1])
- trait_ob = GeneralTrait(dataset=dataset_ob,
+ trait_ob = create_trait(dataset=dataset_ob,
name=trait_name,
cellid=None)
@@ -321,27 +321,27 @@ def add_cofactors(cross, this_dataset, covariates, samples):
datatype = get_trait_data_type(covariate)
logger.info("Covariate: " + covariate + " is of type: " + datatype);
if(datatype == "categorical"): # Cat variable
- logger.info("call of add_categorical_covar");
- cross, col_names = add_categorical_covar(cross, covar_as_string, i) # Expand and add it to the cross
- logger.info("add_categorical_covar returned");
- for z, col_name in enumerate(col_names): # Go through the additional covar names
+ logger.info("call of add_categorical_covar");
+ cross, col_names = add_categorical_covar(cross, covar_as_string, i) # Expand and add it to the cross
+ logger.info("add_categorical_covar returned");
+ for z, col_name in enumerate(col_names): # Go through the additional covar names
+ if i < (len(covariate_list) - 1):
+ covar_name_string += '"' + col_name + '", '
+ else:
+ if(z < (len(col_names) -1)):
+ covar_name_string += '"' + col_name + '", '
+ else:
+ covar_name_string += '"' + col_name + '"'
+
+ logger.info("covar_name_string:" + covar_name_string)
+ else:
+ col_name = "covar_" + str(i)
+ cross = add_phenotype(cross, covar_as_string, col_name)
if i < (len(covariate_list) - 1):
- covar_name_string += '"' + col_name + '", '
- else:
- if(z < (len(col_names) -1)):
covar_name_string += '"' + col_name + '", '
- else:
+ else:
covar_name_string += '"' + col_name + '"'
- logger.info("covar_name_string:" + covar_name_string);
- else:
- col_name = "covar_" + str(i)
- cross = add_phenotype(cross, covar_as_string, col_name)
- if i < (len(covariate_list) - 1):
- covar_name_string += '"' + col_name + '", '
- else:
- covar_name_string += '"' + col_name + '"'
-
covar_name_string += ")"
logger.info("covar_name_string:" + covar_name_string);
covars_ob = pull_var("trait_covars", cross, covar_name_string)
@@ -350,9 +350,13 @@ def add_cofactors(cross, this_dataset, covariates, samples):
def create_marker_covariates(control_marker, cross):
ro.globalenv["the_cross"] = cross
ro.r('genotypes <- pull.geno(the_cross)') # Get the genotype matrix
- userinputS = control_marker.replace(" ", "").split(",") # TODO: sanitize user input, Never Ever trust a user
- covariate_names = ', '.join('"{0}"'.format(w) for w in userinputS)
- ro.r('covnames <- c(' + covariate_names + ')')
+ userinput_sanitized = control_marker.replace(" ", "").split(",") # TODO: sanitize user input, Never Ever trust a user
+ logger.debug(userinput_sanitized)
+ if len(userinput_sanitized) > 0:
+ covariate_names = ', '.join('"{0}"'.format(w) for w in userinput_sanitized)
+ ro.r('covnames <- c(' + covariate_names + ')')
+ else:
+ ro.r('covnames <- c()')
ro.r('covInGeno <- which(covnames %in% colnames(genotypes))')
ro.r('covnames <- covnames[covInGeno]')
ro.r("cat('covnames (purged): ', covnames,'\n')")
@@ -404,16 +408,4 @@ def process_rqtl_results(result, species_name): # TODO: how to make this
marker['lod_score'] = output[i][2]
qtl_results.append(marker)
- return qtl_results
-
-def get_trait_data_type(trait_db_string):
- # Get a trait's type (numeric, categorical, etc) from the DB
- the_query = "SELECT value FROM TraitMetadata WHERE type='trait_data_type'"
- results_json = g.db.execute(the_query).fetchone()
-
- results_ob = json.loads(results_json[0])
-
- if trait_db_string in results_ob:
- return results_ob[trait_db_string]
- else:
- return "numeric"
+ return qtl_results
\ No newline at end of file
diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py
index 5f7710ab..0711b852 100644
--- a/wqflask/wqflask/marker_regression/run_mapping.py
+++ b/wqflask/wqflask/marker_regression/run_mapping.py
@@ -161,7 +161,7 @@ class RunMapping(object):
self.num_perm = 0
self.perm_output = []
self.bootstrap_results = []
- self.covariates = start_vars['covariates'] if "covariates" in start_vars else None
+ self.covariates = start_vars['covariates'] if "covariates" in start_vars else ""
#ZS: This is passed to GN1 code for single chr mapping
self.selected_chr = -1
@@ -467,6 +467,7 @@ class RunMapping(object):
#mapping_scale = self.mapping_scale,
#chromosomes = chromosome_mb_lengths,
#qtl_results = self.qtl_results,
+ categorical_vars = self.categorical_vars,
chr_lengths = chr_lengths,
num_perm = self.num_perm,
perm_results = self.perm_output,
diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py
index 152e4168..f41f3017 100644
--- a/wqflask/wqflask/network_graph/network_graph.py
+++ b/wqflask/wqflask/network_graph/network_graph.py
@@ -44,7 +44,7 @@ import reaper
from utility.THCell import THCell
from utility.TDCell import TDCell
-from base.trait import GeneralTrait
+from base.trait import create_trait
from base import data_set
from utility import webqtlUtil, helper_functions, corr_result_helpers
from utility.tools import GN2_BRANCH_URL
@@ -217,7 +217,7 @@ class NetworkGraph(object):
break
trait_name, dataset_name = trait_db.split(":")
dataset_ob = data_set.create_dataset(dataset_name)
- trait_ob = GeneralTrait(dataset=dataset_ob,
+ trait_ob = create_trait(dataset=dataset_ob,
name=trait_name,
cellid=None)
self.trait_list.append((trait_ob, dataset_ob))
\ No newline at end of file
diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py
index 8f702d58..de4b01eb 100644
--- a/wqflask/wqflask/search_results.py
+++ b/wqflask/wqflask/search_results.py
@@ -1,15 +1,9 @@
-# from __future__ import absolute_import, print_function, division
+from __future__ import absolute_import, print_function, division
-
-import os
-import cPickle
import re
import uuid
from math import *
import time
-import math
-import datetime
-import collections
import re
import requests
@@ -18,18 +12,16 @@ from pprint import pformat as pf
import json
from base.data_set import create_dataset
-from base import trait
+from base.trait import create_trait
from wqflask import parser
from wqflask import do_search
-from utility import webqtlUtil,tools
from db import webqtlDatabaseFunction
-from flask import render_template, Flask, g
+from flask import Flask, g
-from utility import formatting
-from utility import hmac
+from utility import hmac, helper_functions
from utility.tools import GN2_BASE_URL
-from utility.type_checking import is_float, is_int, is_str, get_float, get_int, get_string
+from utility.type_checking import is_str
from utility.logger import getLogger
logger = getLogger(__name__ )
@@ -86,7 +78,7 @@ views.py).
try:
self.search()
except:
- self.search_term_exists = False
+ self.search_term_exists = False
if self.search_term_exists:
self.gen_search_result()
@@ -113,50 +105,49 @@ views.py).
trait_dict = {}
trait_id = result[0]
- trait_dict['index'] = index + 1
- this_trait = trait.GeneralTrait(dataset=self.dataset, name=trait_id, get_qtl_info=True, get_sample_info=False)
- trait_dict['name'] = this_trait.name
- if this_trait.dataset.type == "Publish":
- trait_dict['display_name'] = this_trait.display_name
- else:
- trait_dict['display_name'] = this_trait.name
- trait_dict['dataset'] = this_trait.dataset.name
- trait_dict['hmac'] = hmac.data_hmac('{}:{}'.format(this_trait.name, this_trait.dataset.name))
- if this_trait.dataset.type == "ProbeSet":
- trait_dict['symbol'] = this_trait.symbol
- trait_dict['description'] = this_trait.description_display.decode('utf-8', 'replace')
- trait_dict['location'] = this_trait.location_repr
- trait_dict['mean'] = "N/A"
- trait_dict['additive'] = "N/A"
- if this_trait.mean != "" and this_trait.mean != None:
- trait_dict['mean'] = '%.3f' % this_trait.mean
- trait_dict['lrs_score'] = this_trait.LRS_score_repr
- trait_dict['lrs_location'] = this_trait.LRS_location_repr
- if this_trait.additive != "":
- trait_dict['additive'] = '%.3f' % this_trait.additive
- elif this_trait.dataset.type == "Geno":
- trait_dict['location'] = this_trait.location_repr
- elif this_trait.dataset.type == "Publish":
- trait_dict['description'] = this_trait.description_display
- trait_dict['authors'] = this_trait.authors
- trait_dict['pubmed_id'] = "N/A"
- if this_trait.pubmed_id:
- trait_dict['pubmed_id'] = this_trait.pubmed_id
- trait_dict['pubmed_link'] = this_trait.pubmed_link
- trait_dict['pubmed_text'] = this_trait.pubmed_text
- trait_dict['mean'] = "N/A"
- if this_trait.mean != "" and this_trait.mean != None:
- trait_dict['mean'] = '%.3f' % this_trait.mean
- trait_dict['lrs_score'] = this_trait.LRS_score_repr
- trait_dict['lrs_location'] = this_trait.LRS_location_repr
- trait_dict['additive'] = "N/A"
- if this_trait.additive != "":
- trait_dict['additive'] = '%.3f' % this_trait.additive
- trait_list.append(trait_dict)
- #json_trait_list.append(trait.jsonable_table_row(this_trait, self.dataset.name, index + 1))
+ this_trait = create_trait(dataset=self.dataset, name=trait_id, get_qtl_info=True, get_sample_info=False)
+ if this_trait:
+ trait_dict['index'] = index + 1
+ trait_dict['name'] = this_trait.name
+ if this_trait.dataset.type == "Publish":
+ trait_dict['display_name'] = this_trait.display_name
+ else:
+ trait_dict['display_name'] = this_trait.name
+ trait_dict['dataset'] = this_trait.dataset.name
+ trait_dict['hmac'] = hmac.data_hmac('{}:{}'.format(this_trait.name, this_trait.dataset.name))
+ if this_trait.dataset.type == "ProbeSet":
+ trait_dict['symbol'] = this_trait.symbol
+ trait_dict['description'] = this_trait.description_display.decode('utf-8', 'replace')
+ trait_dict['location'] = this_trait.location_repr
+ trait_dict['mean'] = "N/A"
+ trait_dict['additive'] = "N/A"
+ if this_trait.mean != "" and this_trait.mean != None:
+ trait_dict['mean'] = '%.3f' % this_trait.mean
+ trait_dict['lrs_score'] = this_trait.LRS_score_repr
+ trait_dict['lrs_location'] = this_trait.LRS_location_repr
+ if this_trait.additive != "":
+ trait_dict['additive'] = '%.3f' % this_trait.additive
+ elif this_trait.dataset.type == "Geno":
+ trait_dict['location'] = this_trait.location_repr
+ elif this_trait.dataset.type == "Publish":
+ trait_dict['description'] = this_trait.description_display
+ trait_dict['authors'] = this_trait.authors
+ trait_dict['pubmed_id'] = "N/A"
+ if this_trait.pubmed_id:
+ trait_dict['pubmed_id'] = this_trait.pubmed_id
+ trait_dict['pubmed_link'] = this_trait.pubmed_link
+ trait_dict['pubmed_text'] = this_trait.pubmed_text
+ trait_dict['mean'] = "N/A"
+ if this_trait.mean != "" and this_trait.mean != None:
+ trait_dict['mean'] = '%.3f' % this_trait.mean
+ trait_dict['lrs_score'] = this_trait.LRS_score_repr
+ trait_dict['lrs_location'] = this_trait.LRS_location_repr
+ trait_dict['additive'] = "N/A"
+ if this_trait.additive != "":
+ trait_dict['additive'] = '%.3f' % this_trait.additive
+ trait_list.append(trait_dict)
self.trait_list = json.dumps(trait_list)
- #self.json_trait_list = json.dumps(json_trait_list)
def search(self):
"""
@@ -234,7 +225,6 @@ views.py).
self.header_fields = the_search.header_fields
def get_search_ob(self, a_search):
- logger.debug("[kodak] item is:", pf(a_search))
search_term = a_search['search_term']
search_operator = a_search['separator']
search_type = {}
@@ -243,12 +233,10 @@ views.py).
search_type['key'] = a_search['key'].upper()
else:
search_type['key'] = None
- logger.debug("search_type is:", pf(search_type))
search_ob = do_search.DoSearch.get_search(search_type)
if search_ob:
search_class = getattr(do_search, search_ob)
- logger.debug("search_class is: ", pf(search_class))
the_search = search_class(search_term,
search_operator,
self.dataset,
diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py
index 107f87c6..253c887b 100644
--- a/wqflask/wqflask/show_trait/export_trait_data.py
+++ b/wqflask/wqflask/show_trait/export_trait_data.py
@@ -4,7 +4,7 @@ import simplejson as json
from pprint import pformat as pf
-from base.trait import GeneralTrait
+from base.trait import create_trait
from base import data_set
def export_sample_table(targs):
@@ -26,7 +26,7 @@ def export_sample_table(targs):
def get_export_metadata(trait_id, dataset_name):
dataset = data_set.create_dataset(dataset_name)
- this_trait = GeneralTrait(dataset=dataset,
+ this_trait = create_trait(dataset=dataset,
name=trait_id,
cellid=None,
get_qtl_info=False)
diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py
index 29b2f77e..c77e247f 100644
--- a/wqflask/wqflask/show_trait/show_trait.py
+++ b/wqflask/wqflask/show_trait/show_trait.py
@@ -10,9 +10,6 @@ import json as json
from collections import OrderedDict
-import redis
-Redis = redis.StrictRedis()
-
import numpy as np
import scipy.stats as ss
@@ -21,11 +18,15 @@ from flask import Flask, g
from base import webqtlConfig
from base import webqtlCaseData
from wqflask.show_trait.SampleList import SampleList
-from utility import webqtlUtil, Plot, Bunch, helper_functions
-from utility.tools import locate_ignore_error
-from base.trait import GeneralTrait
+from base.trait import create_trait
from base import data_set
from db import webqtlDatabaseFunction
+from utility import webqtlUtil, Plot, Bunch, helper_functions
+from utility.authentication_tools import check_owner
+from utility.tools import locate_ignore_error
+from utility.redis_tools import get_redis_conn, get_resource_id
+Redis = get_redis_conn()
+ONE_YEAR = 60 * 60 * 24 * 365
from pprint import pformat as pf
@@ -55,9 +56,9 @@ class ShowTrait(object):
self.temp_group = kw['group']
self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = self.temp_group)
# Put values in Redis so they can be looked up later if added to a collection
- Redis.set(self.trait_id, kw['trait_paste'])
+ Redis.set(self.trait_id, kw['trait_paste'], ex=ONE_YEAR)
self.trait_vals = kw['trait_paste'].split()
- self.this_trait = GeneralTrait(dataset=self.dataset,
+ self.this_trait = create_trait(dataset=self.dataset,
name=self.trait_id,
cellid=None)
else:
@@ -66,11 +67,13 @@ class ShowTrait(object):
self.temp_species = self.trait_id.split("_")[1]
self.temp_group = self.trait_id.split("_")[2]
self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = self.temp_group)
- self.this_trait = GeneralTrait(dataset=self.dataset,
+ self.this_trait = create_trait(dataset=self.dataset,
name=self.trait_id,
cellid=None)
self.trait_vals = Redis.get(self.trait_id).split()
+ self.resource_id = check_owner(self.dataset, self.trait_id)
+
#ZS: Get verify/rna-seq link URLs
try:
blatsequence = self.this_trait.sequence
diff --git a/wqflask/wqflask/templates/admin/group_manager.html b/wqflask/wqflask/templates/admin/group_manager.html
index ac5c1350..b7df1aad 100644
--- a/wqflask/wqflask/templates/admin/group_manager.html
+++ b/wqflask/wqflask/templates/admin/group_manager.html
@@ -2,17 +2,25 @@
{% block title %}Group Manager{% endblock %}
{% block content %}
- {{ header("List of groups", "" )}}
-
@@ -26,7 +34,7 @@ | ||||
---|---|---|---|---|
+ | {{ loop.index }} | {{ group.name }} | {{ group.admins|length + group.users|length }} | @@ -36,12 +44,16 @@ {% endfor %}
@@ -65,12 +77,12 @@ {% endfor %} |
---|
Loading... |
diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html
index b4429b46..c5d49168 100644
--- a/wqflask/wqflask/templates/mapping_results.html
+++ b/wqflask/wqflask/templates/mapping_results.html
@@ -41,7 +41,8 @@
-
+
+
@@ -464,13 +465,27 @@
{% if mapping_method != "gemma" and mapping_method != "plink" %}
$('#download_perm').click(function(){
- var num_perm, perm_data;
- num_perm = js_data.num_perm
- perm_data = js_data.perm_results
- json_perm_data = JSON.stringify(perm_data);
- $('input[name=perm_results]').val(json_perm_data);
- $('#marker_regression_form').attr('action', '/export_perm_data');
- return $('#marker_regression_form').submit();
+ perm_info_dict = {
+ perm_data: js_data.perm_results,
+ num_perm: "{{ nperm }}",
+ trait_name: "{{ this_trait.display_name }}",
+ trait_description: "{{ this_trait.description_display }}",
+ cofactors: "{{ covariates }}",
+ n_samples: {{ n_samples }},
+ n_genotypes: {{ qtl_results|length }},
+ {% if genofile_string is defined %}
+ genofile: "{{ genofile_string }}",
+ {% else %}
+ genofile: "",
+ {% endif %}
+ units_linkage: "{{ LRS_LOD }}",
+ strat_cofactors: js_data.categorical_vars
+ }
+ json_perm_data = JSON.stringify(perm_info_dict);
+
+ $('input[name=perm_info]').val(json_perm_data);
+ $('#marker_regression_form').attr('action', '/export_perm_data');
+ return $('#marker_regression_form').submit();
});
modebar_options = {
diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py
index edd272c2..cfee0079 100644
--- a/wqflask/wqflask/user_login.py
+++ b/wqflask/wqflask/user_login.py
@@ -12,9 +12,6 @@ import requests
import simplejson as json
-import redis # used for collections
-Redis = redis.StrictRedis()
-
from flask import (Flask, g, render_template, url_for, request, make_response,
redirect, flash, abort)
@@ -23,7 +20,8 @@ from wqflask import pbkdf2
from wqflask.user_session import UserSession
from utility import hmac
-from utility.redis_tools import is_redis_available, get_user_id, get_user_by_unique_column, set_user_attribute, save_user, save_verification_code, check_verification_code, get_user_collections, save_collections
+from utility.redis_tools import is_redis_available, get_redis_conn, get_user_id, get_user_by_unique_column, set_user_attribute, save_user, save_verification_code, check_verification_code, get_user_collections, save_collections
+Redis = get_redis_conn()
from utility.logger import getLogger
logger = getLogger(__name__)
@@ -127,7 +125,7 @@ def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"):
server.quit()
logger.info("Successfully sent email to "+toaddr)
-def send_verification_email(user_details, template_name = "email/verification.txt", key_prefix = "verification_code", subject = "GeneNetwork email verification"):
+def send_verification_email(user_details, template_name = "email/user_verification.txt", key_prefix = "verification_code", subject = "GeneNetwork e-mail verification"):
verification_code = str(uuid.uuid4())
key = key_prefix + ":" + verification_code
@@ -141,6 +139,21 @@ def send_verification_email(user_details, template_name = "email/verification.tx
send_email(recipient, subject, body)
return {"recipient": recipient, "subject": subject, "body": body}
+@app.route("/manage/verify_email")
+def verify_email():
+ if 'code' in request.args:
+ user_details = check_verification_code(request.args['code'])
+ if user_details:
+ # As long as they have access to the email account
+ # We might as well log them in
+ session_id_signed = get_signed_session_id(user_details)
+ flash("Thank you for logging in {}.".format(user_details['full_name']), "alert-success")
+ response = make_response(redirect(url_for('index_page', import_collections = import_col, anon_id = anon_id)))
+ response.set_cookie(UserSession.user_cookie_name, session_id_signed, max_age=None)
+ return response
+ else:
+ flash("Invalid code: Password reset code does not exist or might have expired!", "error")
+
@app.route("/n/login", methods=('GET', 'POST'))
def login():
params = request.form if request.form else request.args
@@ -204,7 +217,7 @@ def login():
response.set_cookie(UserSession.user_cookie_name, session_id_signed, max_age=None)
return response
else:
- email_ob = send_verification_email(user_details)
+ email_ob = send_verification_email(user_details, template_name = "email/user_verification.txt")
return render_template("newsecurity/verification_still_needed.html", subject=email_ob['subject'])
else: # Incorrect password
#ZS: It previously seemed to store that there was an incorrect log-in attempt here, but it did so in the MySQL DB so this might need to be reproduced with Redis
@@ -374,16 +387,13 @@ def password_reset():
hmac = request.args.get('hm')
if verification_code:
- user_email = check_verification_code(verification_code)
- if user_email:
- user_details = get_user_by_unique_column('email_address', user_email)
- if user_details:
- return render_template(
- "new_security/password_reset.html", user_encode=user_details["email_address"])
- else:
- flash("Invalid code: User no longer exists!", "error")
+ user_details = check_verification_code(verification_code)
+ if user_details:
+ return render_template(
+ "new_security/password_reset.html", user_encode=user_details["email_address"])
else:
flash("Invalid code: Password reset code does not exist or might have expired!", "error")
+ return redirect(url_for("login"))
else:
return redirect(url_for("login"))
@@ -394,6 +404,7 @@ def password_reset_step2():
errors = []
user_email = request.form['user_encode']
+ user_id = get_user_id("email_address", user_email)
password = request.form['password']
encoded_password = set_password(password)
@@ -401,9 +412,7 @@ def password_reset_step2():
set_user_attribute(user_id, "password", encoded_password)
flash("Password changed successfully. You can now sign in.", "alert-info")
- response = make_response(redirect(url_for('login')))
-
- return response
+ return redirect(url_for('login'))
def register_user(params):
thank_you_mode = False
diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py
index 50419146..ec6d4ae3 100644
--- a/wqflask/wqflask/user_session.py
+++ b/wqflask/wqflask/user_session.py
@@ -6,10 +6,6 @@ import uuid
import simplejson as json
-import redis # used for collections
-Redis = redis.StrictRedis()
-
-
from flask import (Flask, g, render_template, url_for, request, make_response,
redirect, flash, abort)
@@ -17,7 +13,8 @@ from wqflask import app
from utility import hmac
#from utility.elasticsearch_tools import get_elasticsearch_connection
-from utility.redis_tools import get_user_id, get_user_by_unique_column, get_user_collections, save_collections
+from utility.redis_tools import get_redis_conn, get_user_id, get_user_collections, save_collections
+Redis = get_redis_conn()
from utility.logger import getLogger
logger = getLogger(__name__)
@@ -29,6 +26,11 @@ THIRTY_DAYS = 60 * 60 * 24 * 30
def get_user_session():
logger.info("@app.before_request get_session")
g.user_session = UserSession()
+ #ZS: I think this should solve the issue of deleting the cookie and redirecting to the home page when a user's session has expired
+ if not g.user_session:
+ response = make_response(redirect(url_for('login')))
+ response.set_cookie('session_id_v2', '', expires=0)
+ return response
@app.after_request
def set_user_session(response):
@@ -37,7 +39,6 @@ def set_user_session(response):
response.set_cookie(g.user_session.cookie_name, g.user_session.cookie)
return response
-
def verify_cookie(cookie):
the_uuid, separator, the_signature = cookie.partition(':')
assert len(the_uuid) == 36, "Is session_id a uuid?"
@@ -88,14 +89,11 @@ class UserSession(object):
user_id = str(uuid.uuid4()))
Redis.hmset(self.redis_key, self.record)
Redis.expire(self.redis_key, THIRTY_DAYS)
- response = make_response(redirect(url_for('login')))
- response.set_cookie(self.user_cookie_name, '', expires=0)
########### Grrr...this won't work because of the way flask handles cookies
# Delete the cookie
flash("Due to inactivity your session has expired. If you'd like please login again.")
- return response
- #return
+ return None
else:
self.record = dict(login_time = time.time(),
user_type = "anon",
diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py
index 938570f3..24a4dcee 100644
--- a/wqflask/wqflask/views.py
+++ b/wqflask/wqflask/views.py
@@ -23,16 +23,13 @@ import uuid
import simplejson as json
import yaml
-#Switching from Redis to StrictRedis; might cause some issues
-import redis
-Redis = redis.StrictRedis()
-
import flask
import base64
import array
import sqlalchemy
from wqflask import app
-from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect
+from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for
+from wqflask import group_manager
from wqflask import search_results
from wqflask import export_traits
from wqflask import gsearch
@@ -55,11 +52,13 @@ from wqflask.correlation import corr_scatter_plot
from wqflask.wgcna import wgcna_analysis
from wqflask.ctl import ctl_analysis
from wqflask.snp_browser import snp_browser
-#from wqflask.trait_submission import submit_trait
from utility import temp_data
from utility.tools import SQL_URI,TEMPDIR,USE_REDIS,USE_GN_SERVER,GN_SERVER_URL,GN_VERSION,JS_TWITTER_POST_FETCHER_PATH,JS_GUIX_PATH, CSS_PATH
from utility.helper_functions import get_species_groups
+from utility.authentication_tools import check_resource_availability
+from utility.redis_tools import get_redis_conn
+Redis = get_redis_conn()
from base.webqtlConfig import GENERATED_IMAGE_DIR
from utility.benchmark import Bench
@@ -87,6 +86,24 @@ def connect_db():
g.db = g._database = sqlalchemy.create_engine(SQL_URI, encoding="latin1")
logger.debug(g.db)
+@app.before_request
+def check_access_permissions():
+ logger.debug("@app.before_request check_access_permissions")
+ if "temp_trait" in request.args:
+ if request.args['temp_trait'] == "True":
+ pass
+ else:
+ if 'dataset' in request.args:
+ dataset = create_dataset(request.args['dataset'])
+ logger.debug("USER:", Redis.hget("users"))
+ if 'trait_id' in request.args:
+ available = check_resource_availability(dataset, request.args['trait_id'])
+ else:
+ available = check_resource_availability(dataset)
+
+ if not available:
+ return redirect(url_for("no_access_page"))
+
@app.teardown_appcontext
def shutdown_session(exception=None):
db = getattr(g, '_database', None)
@@ -120,6 +137,10 @@ def handle_bad_request(e):
resp.set_cookie(err_msg[:32],animation)
return resp
+@app.route("/authentication_needed")
+def no_access_page():
+ return render_template("new_security/not_authenticated.html")
+
@app.route("/")
def index_page():
logger.info("Sending index_page")
@@ -401,25 +422,43 @@ def export_traits_csv():
def export_perm_data():
"""CSV file consisting of the permutation data for the mapping results"""
logger.info(request.url)
- num_perm = float(request.form['num_perm'])
- perm_data = json.loads(request.form['perm_results'])
+ perm_info = json.loads(request.form['perm_info'])
+
+ now = datetime.datetime.now()
+ time_str = now.strftime('%H:%M_%d%B%Y')
+
+ file_name = "Permutation_" + perm_info['num_perm'] + "_" + perm_info['trait_name'] + "_" + time_str
+
+ the_rows = [
+ ["#Permutation Test"],
+ ["#File_name: " + file_name],
+ ["#Metadata: From GeneNetwork.org"],
+ ["#Trait_ID: " + perm_info['trait_name']],
+ ["#Trait_description: " + perm_info['trait_description']],
+ ["#N_permutations: " + str(perm_info['num_perm'])],
+ ["#Cofactors: " + perm_info['cofactors']],
+ ["#N_cases: " + str(perm_info['n_samples'])],
+ ["#N_genotypes: " + str(perm_info['n_genotypes'])],
+ ["#Genotype_file: " + perm_info['genofile']],
+ ["#Units_linkage: " + perm_info['units_linkage']],
+ ["#Permutation_stratified_by: " + ", ".join([ str(cofactor) for cofactor in perm_info['strat_cofactors']])],
+ ["#RESULTS_1: Suggestive LRS(p=0.63) = " + str(np.percentile(np.array(perm_info['perm_data']), 67))],
+ ["#RESULTS_2: Significant LRS(p=0.05) = " + str(np.percentile(np.array(perm_info['perm_data']), 95))],
+ ["#RESULTS_3: Highly Significant LRS(p=0.01) = " + str(np.percentile(np.array(perm_info['perm_data']), 99))],
+ ["#Comment: Results sorted from low to high peak linkage"]
+ ]
buff = StringIO.StringIO()
writer = csv.writer(buff)
- writer.writerow(["Suggestive LRS (p=0.63) = " + str(np.percentile(np.array(perm_data), 67))])
- writer.writerow(["Significant LRS (p=0.05) = " + str(np.percentile(np.array(perm_data), 95))])
- writer.writerow(["Highly Significant LRS (p=0.01) = " + str(np.percentile(np.array(perm_data), 99))])
- writer.writerow("")
- writer.writerow([str(num_perm) + " Permutations"])
- writer.writerow("")
- for item in perm_data:
+ writer.writerows(the_rows)
+ for item in perm_info['perm_data']:
writer.writerow([item])
csv_data = buff.getvalue()
buff.close()
return Response(csv_data,
mimetype='text/csv',
- headers={"Content-Disposition":"attachment;filename=perm_data.csv"})
+ headers={"Content-Disposition":"attachment;filename=" + file_name + ".csv"})
@app.route("/show_temp_trait", methods=('POST',))
def show_temp_trait_page():
--
cgit v1.2.3
From 218576a04f90cc0bc9e53685323e1caa8cffe986 Mon Sep 17 00:00:00 2001
From: zsloan
Date: Thu, 4 Jun 2020 15:50:56 -0500
Subject: Added back in trait info queries for situations where the proxy isn't
running
---
wqflask/base/trait.py | 94 +++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 80 insertions(+), 14 deletions(-)
diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py
index b133bf21..405c4ebf 100644
--- a/wqflask/base/trait.py
+++ b/wqflask/base/trait.py
@@ -1,9 +1,11 @@
from __future__ import absolute_import, division, print_function
+import os
import string
import resource
import codecs
import requests
+import random
from base import webqtlConfig
from base.webqtlCaseData import webqtlCaseData
@@ -12,8 +14,8 @@ from db import webqtlDatabaseFunction
from utility import webqtlUtil
from utility import hmac
from utility.authentication_tools import check_resource_availability
-from utility.tools import GN2_BASE_URL
-from utility.redis_tools import get_redis_conn
+from utility.tools import GN2_BASE_URL, GN_VERSION
+from utility.redis_tools import get_redis_conn, get_resource_id, get_resource_info
Redis = get_redis_conn()
from wqflask import app
@@ -22,7 +24,7 @@ import simplejson as json
from MySQLdb import escape_string as escape
from pprint import pformat as pf
-from flask import Flask, g, request, url_for, redirect
+from flask import Flask, g, request, url_for, redirect, make_response, render_template
from utility.logger import getLogger
logger = getLogger(__name__ )
@@ -45,7 +47,10 @@ def create_trait(**kw):
permitted = check_resource_availability(dataset)
if permitted:
- return GeneralTrait(**kw)
+ the_trait = GeneralTrait(**kw)
+ if the_trait.dataset.type != "Temp":
+ the_trait = retrieve_trait_info(the_trait, the_trait.dataset, get_qtl_info=kw.get('get_qtl_info'))
+ return the_trait
else:
return None
@@ -99,9 +104,6 @@ class GeneralTrait(object):
# Todo: These two lines are necessary most of the time, but perhaps not all of the time
# So we could add a simple if statement to short-circuit this if necessary
- if self.dataset.type != "Temp":
- self = retrieve_trait_info(self, self.dataset, get_qtl_info=get_qtl_info)
-
if get_sample_info != False:
self = retrieve_sample_data(self, self.dataset)
@@ -373,17 +375,15 @@ def jsonable_table_row(trait, dataset_name, index):
else:
return dict()
+
def retrieve_trait_info(trait, dataset, get_qtl_info=False):
assert dataset, "Dataset doesn't exist"
+ resource_id = get_resource_id(dataset, trait.name)
if dataset.type == 'Publish':
- resource_id = hmac.hmac_creation("{}:{}:{}".format('dataset-publish', dataset.id, trait.name))
the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id)
- elif dataset.type == 'ProbeSet':
- resource_id = hmac.hmac_creation("{}:{}".format('dataset-probeset', dataset.id))
- the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name)
else:
- resource_id = hmac.hmac_creation("{}:{}".format('dataset-geno', dataset.id))
+
the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name)
try:
@@ -394,11 +394,77 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
except:
resource_info = get_resource_info(resource_id)
default_permissions = resource_info['default_mask']['data']
- if 'view' not in default_persmissions:
+ if 'view' not in default_permissions:
trait.view = False
return trait
- trait_info = json.loads(response)
+ if dataset.type == 'Publish':
+ query = """
+ SELECT
+ PublishXRef.Id, Publication.PubMed_ID,
+ Phenotype.Pre_publication_description, Phenotype.Post_publication_description, Phenotype.Original_description,
+ Phenotype.Pre_publication_abbreviation, Phenotype.Post_publication_abbreviation,
+ Phenotype.Lab_code, Phenotype.Submitter, Phenotype.Owner, Phenotype.Authorized_Users,
+ Publication.Authors, Publication.Title, Publication.Abstract,
+ Publication.Journal, Publication.Volume, Publication.Pages,
+ Publication.Month, Publication.Year, PublishXRef.Sequence,
+ Phenotype.Units, PublishXRef.comments
+ FROM
+ PublishXRef, Publication, Phenotype, PublishFreeze
+ WHERE
+ PublishXRef.Id = %s AND
+ Phenotype.Id = PublishXRef.PhenotypeId AND
+ Publication.Id = PublishXRef.PublicationId AND
+ PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
+ PublishFreeze.Id = %s
+ """ % (trait.name, dataset.id)
+
+ logger.sql(query)
+ trait_info = g.db.execute(query).fetchone()
+
+
+ #XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name
+ #XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms.
+ elif dataset.type == 'ProbeSet':
+ display_fields_string = ', ProbeSet.'.join(dataset.display_fields)
+ display_fields_string = 'ProbeSet.' + display_fields_string
+ query = """
+ SELECT %s
+ FROM ProbeSet, ProbeSetFreeze, ProbeSetXRef
+ WHERE
+ ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
+ ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
+ ProbeSetFreeze.Name = '%s' AND
+ ProbeSet.Name = '%s'
+ """ % (escape(display_fields_string),
+ escape(dataset.name),
+ escape(str(trait.name)))
+ logger.sql(query)
+ trait_info = g.db.execute(query).fetchone()
+ #XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name
+ # to avoid the problem of same marker name from different species.
+ elif dataset.type == 'Geno':
+ display_fields_string = string.join(dataset.display_fields,',Geno.')
+ display_fields_string = 'Geno.' + display_fields_string
+ query = """
+ SELECT %s
+ FROM Geno, GenoFreeze, GenoXRef
+ WHERE
+ GenoXRef.GenoFreezeId = GenoFreeze.Id AND
+ GenoXRef.GenoId = Geno.Id AND
+ GenoFreeze.Name = '%s' AND
+ Geno.Name = '%s'
+ """ % (escape(display_fields_string),
+ escape(dataset.name),
+ escape(trait.name))
+ logger.sql(query)
+ trait_info = g.db.execute(query).fetchone()
+ else: #Temp type
+ query = """SELECT %s FROM %s WHERE Name = %s"""
+ logger.sql(query)
+ trait_info = g.db.execute(query,
+ (string.join(dataset.display_fields,','),
+ dataset.type, trait.name)).fetchone()
if trait_info:
trait.haveinfo = True
--
cgit v1.2.3
From a302a2b0ac0e7c0f26a0d063c3f2b057f61d47f1 Mon Sep 17 00:00:00 2001
From: zsloan
Date: Fri, 5 Jun 2020 16:52:56 -0500
Subject: Commiting other current group/resource management code, plus the new
files
---
wqflask/base/trait.py | 2 +
wqflask/maintenance/set_resource_defaults.py | 155 +++++++++++++++++++++
wqflask/utility/authentication_tools.py | 46 ++++++
wqflask/utility/redis_tools.py | 37 +++--
wqflask/wqflask/group_manager.py | 77 ++++++++++
wqflask/wqflask/resource_manager.py | 72 ++++++++++
.../wqflask/static/new/javascript/group_manager.js | 38 +++++
wqflask/wqflask/templates/admin/create_group.html | 89 ++++++++++++
wqflask/wqflask/templates/admin/group_manager.html | 68 ++++-----
.../wqflask/templates/admin/manage_resource.html | 92 ++++++++++++
.../wqflask/templates/admin/search_for_groups.html | 64 +++++++++
.../templates/admin/select_group_to_add.html | 54 +++++++
.../templates/new_security/not_authenticated.html | 11 ++
wqflask/wqflask/templates/show_trait_details.html | 5 +
wqflask/wqflask/views.py | 3 +-
15 files changed, 764 insertions(+), 49 deletions(-)
create mode 100644 wqflask/maintenance/set_resource_defaults.py
create mode 100644 wqflask/utility/authentication_tools.py
create mode 100644 wqflask/wqflask/group_manager.py
create mode 100644 wqflask/wqflask/resource_manager.py
create mode 100644 wqflask/wqflask/static/new/javascript/group_manager.js
create mode 100644 wqflask/wqflask/templates/admin/create_group.html
create mode 100644 wqflask/wqflask/templates/admin/manage_resource.html
create mode 100644 wqflask/wqflask/templates/admin/search_for_groups.html
create mode 100644 wqflask/wqflask/templates/admin/select_group_to_add.html
create mode 100644 wqflask/wqflask/templates/new_security/not_authenticated.html
diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py
index 405c4ebf..2a945588 100644
--- a/wqflask/base/trait.py
+++ b/wqflask/base/trait.py
@@ -391,6 +391,8 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False):
if response.strip() == "no-access":
trait.view = False
return trait
+ else:
+ trait_info = json.loads(response)
except:
resource_info = get_resource_info(resource_id)
default_permissions = resource_info['default_mask']['data']
diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py
new file mode 100644
index 00000000..ba102d9c
--- /dev/null
+++ b/wqflask/maintenance/set_resource_defaults.py
@@ -0,0 +1,155 @@
+"""
+
+Script that sets default resource access masks for use with the DB proxy
+
+Defaults will be:
+Owner - omni_gn
+Mask - Public/non-confidential: { data: "view",
+ metadata: "view",
+ admin: "not-admin" }
+ Private/confidentia: { data: "no-access",
+ metadata: "no-access",
+ admin: "not-admin" }
+
+To run:
+./bin/genenetwork2 ~/my_settings.py -c ./wqflask/maintenance/gen_select_dataset.py
+
+"""
+
+from __future__ import print_function, division
+
+import sys
+import json
+
+# NEW: Note we prepend the current path - otherwise a guix instance of GN2 may be used instead
+sys.path.insert(0,'./')
+
+# NEW: import app to avoid a circular dependency on utility.tools
+from wqflask import app
+
+from utility.tools import SQL_URI
+from utility.redis_tools import get_redis_conn, get_user_id, add_resource, get_resources
+Redis = get_redis_conn()
+
+import MySQLdb
+
+import urlparse
+
+from utility.logger import getLogger
+logger = getLogger(__name__)
+
+def parse_db_uri():
+ """Converts a database URI to the db name, host name, user name, and password"""
+
+ parsed_uri = urlparse.urlparse(SQL_URI)
+
+ db_conn_info = dict(
+ db = parsed_uri.path[1:],
+ host = parsed_uri.hostname,
+ user = parsed_uri.username,
+ passwd = parsed_uri.password)
+
+ print(db_conn_info)
+ return db_conn_info
+
+def insert_probeset_resources(default_owner_id):
+ current_resources = Redis.hgetall("resources")
+ Cursor.execute(""" SELECT
+ ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.confidentiality, ProbeSetFreeze.public
+ FROM
+ ProbeSetFreeze""")
+
+ resource_results = Cursor.fetchall()
+ for i, resource in enumerate(resource_results):
+ if i % 20 == 0:
+ print(i)
+ resource_ob = {}
+ resource_ob['name'] = resource[1]
+ resource_ob['owner_id'] = default_owner_id
+ resource_ob['data'] = { "dataset" : str(resource[0])}
+ resource_ob['type'] = "dataset-probeset"
+ if resource[2] < 1 and resource[3] > 0:
+ resource_ob['default_mask'] = { "data": ["no-access", "view"] }
+ else:
+ resource_ob['default_mask'] = { "data": ["no-access"] }
+ resource_ob['group_masks'] = {}
+
+ add_resource(resource_ob)
+
+def insert_publish_resources(default_owner_id):
+ current_resources = Redis.hgetall("resources")
+ Cursor.execute(""" SELECT
+ PublishXRef.Id, PublishFreeze.Id, InbredSet.InbredSetCode
+ FROM
+ PublishXRef, PublishFreeze, InbredSet, Publication
+ WHERE
+ PublishFreeze.InbredSetId = PublishXRef.InbredSetId AND
+ InbredSet.Id = PublishXRef.InbredSetId AND
+ Publication.Id = PublishXRef.PublicationId""")
+
+ resource_results = Cursor.fetchall()
+ for resource in resource_results:
+ if resource[2]:
+ resource_ob = {}
+ if resource[2]:
+ resource_ob['name'] = resource[2] + "_" + str(resource[0])
+ else:
+ resource_ob['name'] = str(resource[0])
+ resource_ob['owner_id'] = default_owner_id
+ resource_ob['data'] = { "dataset" : str(resource[1]) ,
+ "trait" : str(resource[0])}
+ resource_ob['type'] = "dataset-publish"
+ resource_ob['default_mask'] = { "data": "view" }
+
+ resource_ob['group_masks'] = {}
+
+ add_resource(resource_ob)
+ else:
+ continue
+
+def insert_geno_resources(default_owner_id):
+ current_resources = Redis.hgetall("resources")
+ Cursor.execute(""" SELECT
+ GenoFreeze.Id, GenoFreeze.ShortName, GenoFreeze.confidentiality
+ FROM
+ GenoFreeze""")
+
+ resource_results = Cursor.fetchall()
+ for i, resource in enumerate(resource_results):
+ if i % 20 == 0:
+ print(i)
+ resource_ob = {}
+ resource_ob['name'] = resource[1]
+ resource_ob['owner_id'] = default_owner_id
+ resource_ob['data'] = { "dataset" : str(resource[0]) }
+ resource_ob['type'] = "dataset-geno"
+ if resource[2] < 1:
+ resource_ob['default_mask'] = { "data": "view" }
+ else:
+ resource_ob['default_mask'] = { "data": "no-access" }
+ resource_ob['group_masks'] = {}
+
+ add_resource(resource_ob)
+
+def insert_resources(default_owner_id):
+ current_resources = get_resources()
+ print("START")
+ insert_publish_resources(default_owner_id)
+ print("AFTER PUBLISH")
+ insert_geno_resources(default_owner_id)
+ print("AFTER GENO")
+ insert_probeset_resources(default_owner_id)
+ print("AFTER PROBESET")
+
+def main():
+ """Generates and outputs (as json file) the data for the main dropdown menus on the home page"""
+
+ Redis.delete("resources")
+
+ owner_id = get_user_id("email_address", "zachary.a.sloan@gmail.com")
+ insert_resources(owner_id)
+
+if __name__ == '__main__':
+ Conn = MySQLdb.Connect(**parse_db_uri())
+ Cursor = Conn.cursor()
+ main()
\ No newline at end of file
diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py
new file mode 100644
index 00000000..537881a5
--- /dev/null
+++ b/wqflask/utility/authentication_tools.py
@@ -0,0 +1,46 @@
+from __future__ import absolute_import, print_function, division
+
+import json
+import requests
+
+from base import data_set
+
+from utility import hmac
+from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id
+
+from flask import Flask, g, redirect, url_for
+
+import logging
+logger = logging.getLogger(__name__ )
+
+def check_resource_availability(dataset, trait_id=None):
+ resource_id = get_resource_id(dataset, trait_id)
+
+ if resource_id:
+ the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id)
+ try:
+ response = json.loads(requests.get(the_url).content)['data']
+ except:
+ resource_info = get_resource_info(resource_id)
+ response = resource_info['default_mask']['data']
+
+ if 'view' in response:
+ return True
+ else:
+ return redirect(url_for("no_access_page"))
+
+ return True
+
+def check_owner(dataset=None, trait_id=None, resource_id=None):
+ if resource_id:
+ resource_info = get_resource_info(resource_id)
+ if g.user_session.user_id == resource_info['owner_id']:
+ return resource_id
+ else:
+ resource_id = get_resource_id(dataset, trait_id)
+ if resource_id:
+ resource_info = get_resource_info(resource_id)
+ if g.user_session.user_id == resource_info['owner_id']:
+ return resource_id
+
+ return False
\ No newline at end of file
diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py
index 0ad96879..bc30a0af 100644
--- a/wqflask/utility/redis_tools.py
+++ b/wqflask/utility/redis_tools.py
@@ -95,14 +95,17 @@ def get_user_groups(user_id):
user_group_ids = [] #ZS: Group IDs where user is a regular user
groups_list = Redis.hgetall("groups")
for key in groups_list:
- group_ob = json.loads(groups_list[key])
- group_admins = set(group_ob['admins'])
- group_members = set(group_ob['members'])
- if user_id in group_admins:
- admin_group_ids.append(group_ob['id'])
- elif user_id in group_members:
- user_group_ids.append(group_ob['id'])
- else:
+ try:
+ group_ob = json.loads(groups_list[key])
+ group_admins = set(group_ob['admins'])
+ group_members = set(group_ob['members'])
+ if user_id in group_admins:
+ admin_group_ids.append(group_ob['id'])
+ elif user_id in group_members:
+ user_group_ids.append(group_ob['id'])
+ else:
+ continue
+ except:
continue
admin_groups = []
@@ -122,6 +125,24 @@ def get_group_info(group_id):
return group_info
+def get_group_by_unique_column(column_name, column_value):
+ """ Get group by column; not sure if there's a faster way to do this """
+
+ matched_groups = []
+
+ all_group_list = Redis.hgetall("groups")
+ for key in all_group_list:
+ group_info = json.loads(all_group_list[key])
+ if column_name == "admins" or column_name == "members": #ZS: Since these fields are lists, search in the list
+ if column_value in group_info[column_name]:
+ matched_groups.append(group_info)
+ else:
+ if group_info[column_name] == column_value:
+ matched_groups.append(group_info)
+
+ return matched_groups
+
+
def create_group(admin_user_ids, member_user_ids = [], group_name = "Default Group Name"):
group_id = str(uuid.uuid4())
new_group = {
diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py
new file mode 100644
index 00000000..f41ae56d
--- /dev/null
+++ b/wqflask/wqflask/group_manager.py
@@ -0,0 +1,77 @@
+
+from __future__ import print_function, division, absolute_import
+
+from flask import (Flask, g, render_template, url_for, request, make_response,
+ redirect, flash)
+
+from wqflask import app
+from wqflask.user_login import send_verification_email
+
+from utility.redis_tools import get_user_groups, get_group_info, create_group, delete_group, add_users_to_group, remove_users_from_group, \
+ change_group_name, save_verification_code, check_verification_code, get_user_by_unique_column
+
+from utility.logger import getLogger
+logger = getLogger(__name__)
+
+@app.route("/groups/manage", methods=('GET', 'POST'))
+def manage_groups():
+ params = request.form if request.form else request.args
+ if "add_new_group" in params:
+ return redirect(url_for('add_group'))
+ else:
+ admin_groups, user_groups = get_user_groups(g.user_session.user_id)
+ return render_template("admin/group_manager.html", admin_groups=admin_groups, user_groups=user_groups)
+
+@app.route("/groups/remove", methods=('POST',))
+def remove_groups():
+ group_ids_to_remove = request.form['selected_group_ids']
+ for group_id in group_ids_to_remove.split(":"):
+ delete_group(g.user_session.user_id, group_id)
+
+ return redirect(url_for('manage_groups'))
+
+@app.route("/groups/create", methods=('GET', 'POST'))
+def add_group():
+ params = request.form if request.form else request.args
+ if "group_name" in params:
+ member_user_ids = set()
+ admin_user_ids = set()
+ admin_user_ids.add(g.user_session.user_id) #ZS: Always add the user creating the group as an admin
+ if "admin_emails" in params:
+ admin_emails = params['admin_emails_to_add'].split(",")
+ for email in admin_emails:
+ user_details = get_user_by_unique_column("email_address", email)
+ if user_details:
+ admin_user_ids.add(user_details['user_id'])
+ #send_group_invites(params['group_id'], user_email_list = admin_emails, user_type="admins")
+ if "user_emails" in params:
+ member_emails = params['member_emails_to_add'].split(",")
+ for email in member_emails:
+ user_details = get_user_by_unique_column("email_address", email)
+ if user_details:
+ member_user_ids.add(user_details['user_id'])
+ #send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members")
+
+ create_group(list(admin_user_ids), list(member_user_ids), params['group_name'])
+ return redirect(url_for('manage_groups'))
+ else:
+ return render_template("admin/create_group.html")
+
+#ZS: Will integrate this later, for now just letting users be added directly
+def send_group_invites(group_id, user_email_list = [], user_type="members"):
+ for user_email in user_email_list:
+ user_details = get_user_by_unique_column("email_address", user_email)
+ if user_details:
+ group_info = get_group_info(group_id)
+ #ZS: Probably not necessary since the group should normally always exist if group_id is being passed here,
+ # but it's technically possible to hit it if Redis is cleared out before submitting the new users or something
+ if group_info:
+ #ZS: Don't add user if they're already an admin or if they're being added a regular user and are already a regular user,
+ # but do add them if they're a regular user and are added as an admin
+ if (user_details['user_id'] in group_info['admins']) or \
+ ((user_type == "members") and (user_details['user_id'] in group_info['members'])):
+ continue
+ else:
+ send_verification_email(user_details, template_name = "email/group_verification.txt", key_prefix = "verification_code", subject = "You've been invited to join a GeneNetwork user group")
+
+#@app.route()
\ No newline at end of file
diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py
new file mode 100644
index 00000000..7d88b8ed
--- /dev/null
+++ b/wqflask/wqflask/resource_manager.py
@@ -0,0 +1,72 @@
+from __future__ import print_function, division, absolute_import
+
+from flask import (Flask, g, render_template, url_for, request, make_response,
+ redirect, flash)
+
+from wqflask import app
+
+from utility.authentication_tools import check_owner
+from utility.redis_tools import get_resource_info, get_group_info, get_group_by_unique_column, get_user_id
+
+from utility.logger import getLogger
+logger = getLogger(__name__)
+
+@app.route("/resources/manage", methods=('GET', 'POST'))
+def view_resource():
+ params = request.form if request.form else request.args
+ if 'resource_id' in request.args:
+ resource_id = request.args['resource_id']
+ if check_owner(resource_id=resource_id):
+ resource_info = get_resource_info(resource_id)
+ group_masks = resource_info['group_masks']
+ group_masks_with_names = get_group_names(group_masks)
+ default_mask = resource_info['default_mask']['data']
+ return render_template("admin/manage_resource.html", resource_id = resource_id, resource_info=resource_info, default_mask=default_mask, group_masks=group_masks_with_names)
+ else:
+ return redirect(url_for("no_access_page"))
+
+@app.route("/resources/add_group", methods=('POST',))
+def add_group_to_resource():
+ resource_id = request.form['resource_id']
+ if check_owner(resource_id=resource_id):
+ if all(key in request.form for key in ('group_id', 'group_name', 'user_name', 'user_email')):
+ group_list = []
+ if request.form['group_id'] != "":
+ the_group = get_group_info(request.form['group_id'])
+ if the_group:
+ group_list.append(the_group)
+ if request.form['group_name'] != "":
+ matched_groups = get_group_by_unique_column("name", request.form['group_name'])
+ for group in matched_groups:
+ group_list.append(group)
+ if request.form['user_name'] != "":
+ user_id = get_user_id("user_name", request.form['user_name'])
+ if user_id:
+ matched_groups = get_group_by_unique_column("admins", user_id)
+ matched_groups += get_group_by_unique_column("members", user_id)
+ for group in matched_groups:
+ group_list.append(group)
+ if request.form['user_email'] != "":
+ user_id = get_user_id("email_address", request.form['user_email'])
+ if user_id:
+ matched_groups = get_group_by_unique_column("admins", user_id)
+ matched_groups += get_group_by_unique_column("members", user_id)
+ for group in matched_groups:
+ group_list.append(group)
+ return render_template("admin/select_group_to_add.html", group_list=group_list, resource_id = resource_id)
+ elif 'selected_group' in request.form:
+ group_id = request.form['selected_group']
+ return render_template("admin/set_group_privileges.html", resource_id = resource_id, group_id = group_id)
+ else:
+ return render_template("admin/search_for_groups.html", resource_id = resource_id)
+ else:
+ return redirect(url_for("no_access_page"))
+
+def get_group_names(group_masks):
+ group_masks_with_names = {}
+ for group_id, group_mask in group_masks.iteritems():
+ this_mask = group_mask
+ group_name = get_group_info(group_id)['name']
+ this_mask['name'] = group_name
+
+ return group_masks_with_names
\ No newline at end of file
diff --git a/wqflask/wqflask/static/new/javascript/group_manager.js b/wqflask/wqflask/static/new/javascript/group_manager.js
new file mode 100644
index 00000000..5e82d104
--- /dev/null
+++ b/wqflask/wqflask/static/new/javascript/group_manager.js
@@ -0,0 +1,38 @@
+$('#add_to_admins').click(function() {
+ add_emails('admin')
+})
+
+$('#add_to_members').click(function() {
+ add_emails('member')
+})
+
+$('#clear_admins').click(function(){
+ clear_emails('admin')
+})
+
+$('#clear_members').click(function(){
+ clear_emails('member')
+})
+
+
+function add_emails(user_type){
+ var email_address = $('input[name=user_email]').val();
+ var email_list_string = $('input[name=' + user_type + '_emails_to_add]').val()
+ console.log(email_list_string)
+ if (email_list_string == ""){
+ var email_set = new Set();
+ } else {
+ var email_set = new Set(email_list_string.split(","))
+ }
+ email_set.add(email_address)
+
+ $('input[name=' + user_type + '_emails_to_add]').val(Array.from(email_set).join(','))
+
+ var emails_display_string = Array.from(email_set).join('\n')
+ $('.added_' + user_type + 's').val(emails_display_string)
+}
+
+function clear_emails(user_type){
+ $('input[name=' + user_type + '_emails_to_add]').val("")
+ $('.added_' + user_type + 's').val("")
+}
\ No newline at end of file
diff --git a/wqflask/wqflask/templates/admin/create_group.html b/wqflask/wqflask/templates/admin/create_group.html
new file mode 100644
index 00000000..55c3fa0b
--- /dev/null
+++ b/wqflask/wqflask/templates/admin/create_group.html
@@ -0,0 +1,89 @@
+{% extends "base.html" %}
+{% block title %}Group Manager{% endblock %}
+{% block content %}
+
+
@@ -29,17 +37,19 @@ | # Members | Created | Last Changed | +Group ID | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
- | {{ loop.index }} | +{{ loop.index }} | {{ group.name }} | -{{ group.admins|length + group.users|length }} | +{{ group.admins|length + group.users|length }} | {{ group.created_timestamp }} | {{ group.changed_timestamp }} | +{{ group.id }} |
@@ -88,48 +98,26 @@ {% endblock %} {% block js %} - - - + + + + +{% endblock %} diff --git a/wqflask/wqflask/templates/admin/search_for_groups.html b/wqflask/wqflask/templates/admin/search_for_groups.html new file mode 100644 index 00000000..89eb11dd --- /dev/null +++ b/wqflask/wqflask/templates/admin/search_for_groups.html @@ -0,0 +1,64 @@ +{% extends "base.html" %} +{% block title %}Resource Manager{% endblock %} +{% block content %} + + |
---|
+ | Name | +Created | +Last Changed | +
---|---|---|---|
+ | {% if 'name' in group %}{{ group.name }}{% else %}N/A{% endif %} | +{% if 'created_timestamp' in group %}{{ group.created_timestamp }}{% else %}N/A{% endif %} | +{% if 'changed_timestamp' in group %}{{ group.changed_timestamp }}{% else %}N/A{% endif %} | +
Please contact the data's owner or GN administrators if you believe you should have access to this data.
+Name | +Data | +Metadata | +Admin | +
---|---|---|---|
{{ value.name }} | +{{ value.data }} | +{{ value.metadata }} | +{{ value.admin }} | +
- | Name | -Created | -Last Changed | -
---|---|---|---|
- | {% if 'name' in group %}{{ group.name }}{% else %}N/A{% endif %} | -{% if 'created_timestamp' in group %}{{ group.created_timestamp }}{% else %}N/A{% endif %} | -{% if 'changed_timestamp' in group %}{{ group.changed_timestamp }}{% else %}N/A{% endif %} | -
+ | No-Access | +View | +Edit | +|||
---|---|---|---|---|---|---|
Data: | + {% if 'data' in default_privileges %} ++ | + | + {% else %} + | + | + | + {% endif %} + |
Metadata: | + {% if 'metadata' in default_privileges %} ++ | + | + {% else %} + | + | + | + {% endif %} + |
+ | Not Admin | +Edit Access | +Edit Admins | +|||
---|---|---|---|---|---|---|
Admin: | + {% if 'admin' in default_privileges %} ++ | + | + {% else %} + | + | + | + {% endif %} + |
+ | Index | +Name | +Email Address | +Organization | +
---|---|---|---|---|
+ | {{ loop.index }} | +{% if 'full_name' in admin %}{{ admin.full_name }}{% else %}N/A{% endif %} | +{% if 'email_address' in admin %}{{ admin.email_address }}{% else %}N/A{% endif %} | +{% if 'organization' in admin %}{{ admin.organization }}{% else %}N/A{% endif %} | +
+ | Index | +Name | +Email Address | +Organization | +
---|---|---|---|---|
+ | {{ loop.index }} | +{% if 'full_name' in member %}{{ member.full_name }}{% else %}N/A{% endif %} | +{% if 'email_address' in member %}{{ member.email_address }}{% else %}N/A{% endif %} | +{% if 'organization' in member %}{{ member.organization }}{% else %}N/A{% endif %} | +
Index | +Name | +Data | +Metadata | +Admin | +
---|---|---|---|---|
{{ loop.index }} | +{% if 'name' in resource %}{{ resource.name }}{% else %}N/A{% endif %} | +{% if 'data' in resource %}{{ resource.data }}{% else %}N/A{% endif %} | +{% if 'metadata' in resource %}{{ resource.metadata }}{% else %}N/A{% endif %} | +{% if 'admin' in resource %}{{ resource.admin }}{% else %}N/A{% endif %} | +
+ | No-Access | +View | +Edit | +
---|---|---|---|
Data: | ++ | + | + |
Metadata: | ++ | + | + |
+ | Not Admin | +Edit Access | +Edit Admins | +
---|---|---|---|
Admin: | ++ | + | + |
@@ -75,7 +77,7 @@ | |||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
{{ loop.index }} | @@ -107,8 +109,8 @@ 'sDom': 'tr' }); {% endif %} - {% if user_groups|length != 0 %} - $('#user_groups').dataTable({ + {% if member_groups|length != 0 %} + $('#member_groups').dataTable({ 'sDom': 'tr' }); {% endif %} -- cgit v1.2.3 From 75802ed1f9e5d955987bf5f5eb78a9cb120116ec Mon Sep 17 00:00:00 2001 From: zsloan Date: Sat, 20 Jun 2020 17:33:22 -0500 Subject: Added some admin functionality and fixed issue with temp traits --- wqflask/base/trait.py | 17 ++++-- wqflask/base/webqtlConfig.py | 4 ++ wqflask/maintenance/set_resource_defaults.py | 20 +++++-- wqflask/utility/authentication_tools.py | 79 +++++++++++++++------------- wqflask/wqflask/resource_manager.py | 6 +-- wqflask/wqflask/views.py | 20 ++++--- 6 files changed, 90 insertions(+), 56 deletions(-) diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 7700ecd5..c2b8b910 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -42,11 +42,11 @@ def create_trait(**kw): if kw.get('dataset_name') != "Temp": if dataset.type == 'Publish': - permitted = check_resource_availability(dataset, kw.get('name')) + permissions = check_resource_availability(dataset, kw.get('name')) else: - permitted = check_resource_availability(dataset) + permissions = check_resource_availability(dataset) - if permitted != "no-access": + if "view" in permissions['data']: the_trait = GeneralTrait(**kw) if the_trait.dataset.type != "Temp": @@ -382,9 +382,16 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): resource_id = get_resource_id(dataset, trait.name) if dataset.type == 'Publish': - the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8081/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) else: - the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) + the_url = "http://localhost:8081/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) + + response = requests.get(the_url).content + if response.strip() == "no-access": + trait.view = False + return trait + else: + trait_info = json.loads(response) try: response = requests.get(the_url).content diff --git a/wqflask/base/webqtlConfig.py b/wqflask/base/webqtlConfig.py index 55407123..3d86bc22 100644 --- a/wqflask/base/webqtlConfig.py +++ b/wqflask/base/webqtlConfig.py @@ -17,6 +17,10 @@ DEBUG = 1 #USER privilege USERDICT = {'guest':1,'user':2, 'admin':3, 'root':4} +#Set privileges +SUPER_PRIVILEGES = {'data': ['no-access', 'view', 'edit'], 'metadata': ['no-access', 'view', 'edit'], 'admin': ['not-admin', 'edit-access', 'edit-admins']} +DEFAULT_PRIVILEGES = {'data': ['no-access', 'view'], 'metadata': ['no-access', 'view'], 'admin': ['not-admin']} + #minimum number of informative strains KMININFORMATIVE = 5 diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index 0c221bbf..ddb3b17b 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -68,9 +68,13 @@ def insert_probeset_resources(default_owner_id): resource_ob['data'] = { "dataset" : str(resource[0])} resource_ob['type'] = "dataset-probeset" if resource[2] < 1 and resource[3] > 0: - resource_ob['default_mask'] = { "data": "view" } + resource_ob['default_mask'] = { "data": "view", + "metadata": "view", + "admin": "not-admin"} else: - resource_ob['default_mask'] = { "data": "no-access" } + resource_ob['default_mask'] = { "data": "no-access", + "metadata": "no-access", + "admin": "not-admin"} resource_ob['group_masks'] = {} add_resource(resource_ob) @@ -98,7 +102,9 @@ def insert_publish_resources(default_owner_id): resource_ob['data'] = { "dataset" : str(resource[1]) , "trait" : str(resource[0])} resource_ob['type'] = "dataset-publish" - resource_ob['default_mask'] = { "data": "view" } + resource_ob['default_mask'] = { "data": "view", + "metadata": "view", + "admin": "not-admin"} resource_ob['group_masks'] = {} @@ -124,9 +130,13 @@ def insert_geno_resources(default_owner_id): resource_ob['data'] = { "dataset" : str(resource[0]) } resource_ob['type'] = "dataset-geno" if resource[2] < 1: - resource_ob['default_mask'] = { "data": "view" } + resource_ob['default_mask'] = { "data": "view", + "metadata": "view", + "admin": "not-admin"} else: - resource_ob['default_mask'] = { "data": "no-access" } + resource_ob['default_mask'] = { "data": "no-access", + "metadata": "no-access", + "admin": "not-admin"} resource_ob['group_masks'] = {} add_resource(resource_ob) diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index dfa0e2d9..6c88949b 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -3,7 +3,7 @@ from __future__ import absolute_import, print_function, division import json import requests -from base import data_set +from base import data_set, webqtlConfig from utility import hmac from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id @@ -18,45 +18,47 @@ def check_resource_availability(dataset, trait_id=None): #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy if g.user_session.user_id in Redis.smembers("super_users"): - return "edit" + return webqtlConfig.SUPER_PRIVILEGES - resource_id = get_resource_id(dataset, trait_id) response = None - if resource_id: - resource_info = get_resource_info(resource_id) - - the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) - try: - response = json.loads(requests.get(the_url).content)['data'] - except: - response = resource_info['default_mask']['data'] - if 'edit' in response: - return "edit" - elif 'view' in response: - return "view" - else: - return "no-access" + #At least for now assume temporary entered traits are accessible#At least for now assume temporary entered traits are accessible + if type(dataset) == str: + return webqtlConfig.DEFAULT_PRIVILEGES + if dataset.type == "Temp": + return webqtlConfig.DEFAULT_PRIVILEGES - return False + resource_id = get_resource_id(dataset, trait_id) -def check_admin(resource_id=None): + if resource_id: + resource_info = get_resource_info(resource_id) + else: + return response #ZS: Need to substitute in something that creates the resource in Redis later - return "not-admin" + the_url = "http://localhost:8081/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + try: + response = json.loads(requests.get(the_url).content) + except: + response = resource_info['default_mask'] - # ZS: commented out until proxy can return this - # the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) - # try: - # response = json.loads(requests.get(the_url).content) - # except: - # response = resource_info['default_mask']['admin'] + if response: + return response + else: #ZS: No idea how this would happen, but just in case + return False - # if 'edit-admins' in response: - # return "edit-admins" - # elif 'edit-access' in response: - # return "edit-access" - # else: - # return "not-admin" +def check_admin(resource_id=None): + the_url = "http://localhost:8081/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + try: + response = json.loads(requests.get(the_url).content)['admin'] + except: + response = resource_info['default_mask']['admin'] + + if 'edit-admins' in response: + return "edit-admins" + elif 'edit-access' in response: + return "edit-access" + else: + return "not-admin" def check_owner(dataset=None, trait_id=None, resource_id=None): if resource_id: @@ -74,15 +76,18 @@ def check_owner(dataset=None, trait_id=None, resource_id=None): def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): if not resource_id: - resource_id = get_resource_id(dataset, trait_id) + if dataset.type == "Temp": + return "not-admin" + else: + resource_id = get_resource_id(dataset, trait_id) if g.user_session.user_id in Redis.smembers("super_users"): - return [resource_id, "owner"] + return "owner" resource_info = get_resource_info(resource_id) if g.user_session.user_id == resource_info['owner_id']: - return [resource_id, "owner"] + return "owner" else: - return [resource_id, check_admin(resource_id)] + return check_admin(resource_id) - return [resource_id, "not-admin"] \ No newline at end of file + return "not-admin" \ No newline at end of file diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index 0f9f5c9d..39a07310 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -18,7 +18,7 @@ def manage_resource(): params = request.form if request.form else request.args if 'resource_id' in request.args: resource_id = request.args['resource_id'] - admin_status = check_owner_or_admin(resource_id=resource_id)[1] + admin_status = check_owner_or_admin(resource_id=resource_id) resource_info = get_resource_info(resource_id) group_masks = resource_info['group_masks'] @@ -67,7 +67,7 @@ def search_for_groups(): def change_owner(): resource_id = request.form['resource_id'] if 'new_owner' in request.form: - admin_status = check_owner_or_admin(resource_id=resource_id)[1] + admin_status = check_owner_or_admin(resource_id=resource_id) if admin_status == "owner": new_owner_id = request.form['new_owner'] change_resource_owner(resource_id, new_owner_id) @@ -100,7 +100,7 @@ def change_default_privileges(): @app.route("/resources/add_group", methods=('POST',)) def add_group_to_resource(): resource_id = request.form['resource_id'] - admin_status = check_owner_or_admin(resource_id=resource_id)[1] + admin_status = check_owner_or_admin(resource_id=resource_id) if admin_status == "owner" or admin_status == "edit-admins" or admin_status == "edit-access": if 'selected_group' in request.form: group_id = request.form['selected_group'] diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index dc431aa9..bc01839b 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -96,13 +96,21 @@ def check_access_permissions(): pass else: if 'dataset' in request.args: - dataset = create_dataset(request.args['dataset']) - if 'trait_id' in request.args: - available = check_resource_availability(dataset, request.args['trait_id']) + if request.args['dataset'] == "Temp": + permissions = check_resource_availability("Temp") else: - available = check_resource_availability(dataset) - - if available == "no-access": + dataset = create_dataset(request.args['dataset']) + + if dataset.type == "Temp": + permissions = False + if 'trait_id' in request.args: + permissions = check_resource_availability(dataset, request.args['trait_id']) + elif dataset.type != "Publish": + permissions = check_resource_availability(dataset) + else: + return None + + if 'view' not in permissions['data']: return redirect(url_for("no_access_page")) @app.teardown_appcontext -- cgit v1.2.3 From 51417c06061246bc92be89db198b3e74e7126035 Mon Sep 17 00:00:00 2001 From: zsloan Date: Sat, 20 Jun 2020 17:47:38 -0500 Subject: Fixed ports for proxy (though I need to add the port to global variables) and also simplified the check_owner_or_admin function a little --- wqflask/base/trait.py | 4 ++-- wqflask/utility/authentication_tools.py | 4 ++-- wqflask/wqflask/show_trait/show_trait.py | 3 ++- wqflask/wqflask/templates/show_trait_details.html | 4 ++-- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index c2b8b910..0e26ca2c 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -382,9 +382,9 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): resource_id = get_resource_id(dataset, trait.name) if dataset.type == 'Publish': - the_url = "http://localhost:8081/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) else: - the_url = "http://localhost:8081/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) + the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) response = requests.get(the_url).content if response.strip() == "no-access": diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 6c88949b..06b2854a 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -35,7 +35,7 @@ def check_resource_availability(dataset, trait_id=None): else: return response #ZS: Need to substitute in something that creates the resource in Redis later - the_url = "http://localhost:8081/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) try: response = json.loads(requests.get(the_url).content) except: @@ -47,7 +47,7 @@ def check_resource_availability(dataset, trait_id=None): return False def check_admin(resource_id=None): - the_url = "http://localhost:8081/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) try: response = json.loads(requests.get(the_url).content)['admin'] except: diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index ed4ff0ad..4698807a 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -72,7 +72,8 @@ class ShowTrait(object): cellid=None) self.trait_vals = Redis.get(self.trait_id).split() - self.admin_status = check_owner_or_admin(self.dataset, self.trait_id) + self.resource_id = get_resource_id(self.dataset, self.trait_id) + self.admin_status = check_owner_or_admin(resource_id=self.resource_id) #ZS: Get verify/rna-seq link URLs try: diff --git a/wqflask/wqflask/templates/show_trait_details.html b/wqflask/wqflask/templates/show_trait_details.html index 5e0bae79..965c0340 100644 --- a/wqflask/wqflask/templates/show_trait_details.html +++ b/wqflask/wqflask/templates/show_trait_details.html @@ -248,8 +248,8 @@ - {% if admin_status[1] == "owner" or admin_status[1] == "edit-admins" or admin_status[1] == "edit-access" %} - + {% if admin_status == "owner" or admin_status == "edit-admins" or admin_status == "edit-access" %} + {% endif %} -- cgit v1.2.3 From a13e2c5856acd05610a5966d3f8ecc17038e4735 Mon Sep 17 00:00:00 2001 From: zsloan Date: Sat, 20 Jun 2020 17:53:51 -0500 Subject: Missed one issue introduced by last commit --- wqflask/wqflask/show_trait/show_trait.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 4698807a..5fc69cab 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -49,18 +49,23 @@ class ShowTrait(object): self.temp_trait = False self.trait_id = kw['trait_id'] helper_functions.get_species_dataset_trait(self, kw) + self.resource_id = get_resource_id(self.dataset, self.trait_id) + self.admin_status = check_owner_or_admin(resource_id=self.resource_id) elif 'group' in kw: self.temp_trait = True self.trait_id = "Temp_"+kw['species']+ "_" + kw['group'] + "_" + datetime.datetime.now().strftime("%m%d%H%M%S") self.temp_species = kw['species'] self.temp_group = kw['group'] self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = self.temp_group) + # Put values in Redis so they can be looked up later if added to a collection Redis.set(self.trait_id, kw['trait_paste'], ex=ONE_YEAR) self.trait_vals = kw['trait_paste'].split() self.this_trait = create_trait(dataset=self.dataset, name=self.trait_id, cellid=None) + + self.admin_status = check_owner_or_admin(dataset=self.dataset, trait_id=self.trait_id) else: self.temp_trait = True self.trait_id = kw['trait_id'] @@ -70,10 +75,9 @@ class ShowTrait(object): self.this_trait = create_trait(dataset=self.dataset, name=self.trait_id, cellid=None) - self.trait_vals = Redis.get(self.trait_id).split() - self.resource_id = get_resource_id(self.dataset, self.trait_id) - self.admin_status = check_owner_or_admin(resource_id=self.resource_id) + self.trait_vals = Redis.get(self.trait_id).split() + self.admin_status = check_owner_or_admin(dataset=self.dataset, trait_id=self.trait_id) #ZS: Get verify/rna-seq link URLs try: -- cgit v1.2.3 From 01c3acf485de10fb1696fc24471751d4d3ab8e05 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 22 Jun 2020 13:30:01 -0500 Subject: Fixed issue with temp traits not working for trait page functions like correlation or mapping --- wqflask/base/data_set.py | 3 +++ wqflask/wqflask/static/new/javascript/show_trait.js | 3 ++- wqflask/wqflask/views.py | 5 ++++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 92dc8615..2272b6ee 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -65,6 +65,9 @@ logger = getLogger(__name__ ) DS_NAME_MAP = {} def create_dataset(dataset_name, dataset_type = None, get_samplelist = True, group_name = None): + if dataset_name == "Temp": + dataset_type = "Temp" + if not dataset_type: dataset_type = Dataset_Getter(dataset_name) diff --git a/wqflask/wqflask/static/new/javascript/show_trait.js b/wqflask/wqflask/static/new/javascript/show_trait.js index c0b2e6db..738cd536 100644 --- a/wqflask/wqflask/static/new/javascript/show_trait.js +++ b/wqflask/wqflask/static/new/javascript/show_trait.js @@ -515,7 +515,8 @@ $('select[name=corr_type]').change(on_corr_method_change); submit_special = function(url) { $("#trait_data_form").attr("action", url); - return $("#trait_data_form").submit(); + $("#trait_data_form").submit(); + return false }; var corr_input_list = ['corr_type', 'primary_samples', 'trait_id', 'dataset', 'group', 'tool_used', 'form_url', 'corr_sample_method', 'corr_samples_group', 'corr_dataset', 'min_expr', diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index bc01839b..80164a22 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -641,7 +641,10 @@ def loading_page(): if 'num_vals' in start_vars: num_vals = int(start_vars['num_vals']) else: - dataset = create_dataset(start_vars['dataset']) + if 'group' in start_vars: + dataset = create_dataset(start_vars['dataset'], group_name = start_vars['group']) + else: + dataset = create_dataset(start_vars['dataset']) genofile_samplelist = [] samples = start_vars['primary_samples'].split(",") if 'genofile' in start_vars: -- cgit v1.2.3 From 10a281056b19ee150b471afdf016251d5f9ead32 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 23 Jun 2020 17:25:54 -0500 Subject: Changed back previous 'fix' that was intended to fix issue where two tabs were opened when doing mapping/correlations; apparently this is a chrome specific problem so it will need to be deal twiht differently --- wqflask/wqflask/static/new/javascript/get_covariates_from_collection.js | 2 +- wqflask/wqflask/static/new/javascript/show_trait.js | 1 - wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/wqflask/wqflask/static/new/javascript/get_covariates_from_collection.js b/wqflask/wqflask/static/new/javascript/get_covariates_from_collection.js index 8cd6dac3..934cc14d 100644 --- a/wqflask/wqflask/static/new/javascript/get_covariates_from_collection.js +++ b/wqflask/wqflask/static/new/javascript/get_covariates_from_collection.js @@ -110,7 +110,7 @@ submit_click = function() { trait_click = function() { var dataset, this_trait_url, trait; trait = $(this).parent().find('.trait').text(); - dataset = $(this).parent().find('.dataset').text(); + dataset = $(this).parent().find('.dataset').data("dataset"); $("input[name=covariates]").val(trait + ":" + dataset) $(".selected_covariates").text(trait) return $.colorbox.close(); diff --git a/wqflask/wqflask/static/new/javascript/show_trait.js b/wqflask/wqflask/static/new/javascript/show_trait.js index 738cd536..c0784073 100644 --- a/wqflask/wqflask/static/new/javascript/show_trait.js +++ b/wqflask/wqflask/static/new/javascript/show_trait.js @@ -516,7 +516,6 @@ $('select[name=corr_type]').change(on_corr_method_change); submit_special = function(url) { $("#trait_data_form").attr("action", url); $("#trait_data_form").submit(); - return false }; var corr_input_list = ['corr_type', 'primary_samples', 'trait_id', 'dataset', 'group', 'tool_used', 'form_url', 'corr_sample_method', 'corr_samples_group', 'corr_dataset', 'min_expr', diff --git a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js index db17af40..4dce0705 100644 --- a/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js +++ b/wqflask/wqflask/static/new/javascript/show_trait_mapping_tools.js @@ -5,7 +5,6 @@ submit_special = function(url) { $("#trait_data_form").attr("action", url); $("#trait_data_form").submit(); - return false; }; update_time_remaining = function(percent_complete) { -- cgit v1.2.3 From 9b1aecdbb51c7cb843ca79ab430d8dc2b9d3767e Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 24 Jun 2020 15:35:47 -0500 Subject: Fixed issue where scatterplot had different N than correlation results + fixed some aesthetic issues with correlatoin results table --- wqflask/wqflask/correlation/corr_scatter_plot.py | 23 ++++++++++++++++------- wqflask/wqflask/templates/correlation_page.html | 24 ++++++++---------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index 04ec427d..819836b1 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -4,7 +4,7 @@ import math from flask import g -from base.trait import create_trait +from base.trait import create_trait, retrieve_sample_data from base import data_set from utility import corr_result_helpers from scipy import stats @@ -17,12 +17,21 @@ class CorrScatterPlot(object): """Page that displays a correlation scatterplot with a line fitted to it""" def __init__(self, params): - self.data_set_1 = data_set.create_dataset(params['dataset_1']) - self.data_set_2 = data_set.create_dataset(params['dataset_2']) - #self.data_set_3 = data_set.create_dataset(params['dataset_3']) - self.trait_1 = create_trait(name=params['trait_1'], dataset=self.data_set_1) - self.trait_2 = create_trait(name=params['trait_2'], dataset=self.data_set_2) - #self.trait_3 = create_trait(name=params['trait_3'], dataset=self.data_set_3) + self.dataset_1 = data_set.create_dataset(params['dataset_1']) + self.dataset_2 = data_set.create_dataset(params['dataset_2']) + #self.dataset_3 = data_set.create_dataset(params['dataset_3']) + self.trait_1 = create_trait(name=params['trait_1'], dataset=self.dataset_1) + self.trait_2 = create_trait(name=params['trait_2'], dataset=self.dataset_2) + #self.trait_3 = create_trait(name=params['trait_3'], dataset=self.dataset_3) + + primary_samples = self.dataset_1.group.samplelist + if self.dataset_1.group.parlist != None: + primary_samples += self.dataset_1.group.parlist + if self.dataset_1.group.f1list != None: + primary_samples += self.dataset_1.group.f1list + + self.trait_1 = retrieve_sample_data(self.trait_1, self.dataset_1, primary_samples) + self.trait_2 = retrieve_sample_data(self.trait_2, self.dataset_2, primary_samples) samples_1, samples_2, num_overlap = corr_result_helpers.normalize_values_with_samples(self.trait_1.data, self.trait_2.data) diff --git a/wqflask/wqflask/templates/correlation_page.html b/wqflask/wqflask/templates/correlation_page.html index 71705390..f429948d 100644 --- a/wqflask/wqflask/templates/correlation_page.html +++ b/wqflask/wqflask/templates/correlation_page.html @@ -4,6 +4,7 @@ + {% endblock %} {% block content %}
@@ -143,7 +144,7 @@ | {{ trait.description_display }} | {{ trait.location_repr }} | {{ '%0.3f' % trait.mean|float }} | -{{ '%0.3f'|format(trait.sample_r) }} | +{{ '%0.3f'|format(trait.sample_r) }} | {{ trait.num_overlap }} | {{ '%0.3e'|format(trait.sample_p) }} | {% if trait.lit_corr == "" or trait.lit_corr == 0.000 %} @@ -167,8 +168,8 @@ {% else %}N/A | {% endif %} -{% if trait.description_display|length > 50 %}{{ trait.description_display[:50] }}...{% else %}{{ trait.description_display }}{% endif %} | -{{ trait.authors }} | +{% if trait.description_display|length > 70 %}{{ trait.description_display[:70] }}...{% else %}{{ trait.description_display }}{% endif %} | +{% if trait.authors.split(',') > 6 %}{{ trait.authors.split(',')[:6]|join(', ') }}, et al.{% else %}{{ trait.authors }}{% endif %} | {{ trait.pubmed_text }} @@ -211,7 +212,6 @@ |
---|