From ff94904574c51eeb7aecb327d6f2679fa4a60fb4 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 28 May 2020 20:24:01 -0500 Subject: Added lines calling proxy for publish datasets + added some resource redis queries and a missing import for the hmac functions --- wqflask/utility/hmac.py | 2 ++ wqflask/utility/redis_tools.py | 54 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 51 insertions(+), 5 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/hmac.py b/wqflask/utility/hmac.py index d8a0eace..b08be97e 100644 --- a/wqflask/utility/hmac.py +++ b/wqflask/utility/hmac.py @@ -3,6 +3,8 @@ from __future__ import print_function, division, absolute_import import hmac import hashlib +from flask import url_for + from wqflask import app def hmac_creation(stringy): diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index ca42f7b7..15841032 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -1,17 +1,25 @@ from __future__ import print_function, division, absolute_import +import uuid import simplejson as json import redis # used for collections -Redis = redis.StrictRedis() import logging from flask import (render_template, flash) +from utility import hmac + from utility.logger import getLogger logger = getLogger(__name__) +def get_redis_conn(): + Redis = redis.StrictRedis(port=6380) + return Redis + +Redis = get_redis_conn() + def is_redis_available(): try: Redis.ping() @@ -70,14 +78,15 @@ def check_verification_code(code): email_address = None user_details = None email_address = Redis.hget("verification_codes", code) - return email_address if email_address: user_details = get_user_by_unique_column('email_address', email_address) - return user_details + if user_details: + return user_details + else: + return None else: return None - flash("Invalid code: Password reset code does not exist or might have expired!", "error") def get_user_groups(user_id): #ZS: Get the groups where a user is an admin or a member and return lists corresponding to those two sets of groups @@ -167,4 +176,39 @@ def change_group_name(user_id, group_id, new_name): group_info["name"] = new_name return group_info else: - return None \ No newline at end of file + return None + +def get_resources(): + resource_list = Redis.hgetall("resources") + return resource_list + +def get_resource_id(dataset_type, dataset_id, trait_id = None, all_resources = None): + if not all_resources: + all_resources = get_resources() + + resource_list = [[key, json.loads(value)] for key, value in all_resources.items()] + + if not trait_id: + matched_resources = [resource[0] for resource in resource_list if resource[1]['data']['dataset'] == dataset_id] + else: + matched_resources = [resource[0] for resource in resource_list if resource[1]['data']['dataset'] == dataset_id and resource[1]['data']['trait'] == trait_id] + + if len(matched_resources): + return matched_resources[0] + else: + return False + +def get_resource_info(resource_id): + resource_info = Redis.hget("resources", resource_id) + return json.loads(resource_info) + +def add_resource(resource_info): + + if 'trait' in resource_info['data']: + resource_id = hmac.data_hmac('{}:{}'.format(str(resource_info['data']['dataset']), str(resource_info['data']['trait']))) + else: + resource_id = hmac.data_hmac('{}'.format(str(resource_info['data']['dataset']))) + + Redis.hset("resources", resource_id, json.dumps(resource_info)) + + return resource_info -- cgit v1.2.3 From 1a663f987bf3a640d21c2c89402318d5433efd9e Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 4 Jun 2020 14:23:30 -0500 Subject: Really should have split this into many more commits: - Now use proxy to pull trait data and hide traits/results that the user doesn't have view permission for - Created a factory method for creating trait ob so it can return None when user doesn't have view permissions (this is why such a large number of files are changed) - Added metadata to permutation export - Added current group management code - Added fixed password verification e-mail code --- wqflask/base/trait.py | 177 ++++---- wqflask/utility/helper_functions.py | 11 +- wqflask/utility/redis_tools.py | 58 +-- wqflask/wqflask/api/correlation.py | 472 ++++++++++----------- wqflask/wqflask/api/gen_menu.py | 11 +- wqflask/wqflask/api/mapping.py | 4 +- wqflask/wqflask/collect.py | 15 +- .../comparison_bar_chart/comparison_bar_chart.py | 4 +- wqflask/wqflask/correlation/corr_scatter_plot.py | 8 +- wqflask/wqflask/correlation/show_corr_results.py | 8 +- .../wqflask/correlation_matrix/show_corr_matrix.py | 22 +- wqflask/wqflask/ctl/ctl_analysis.py | 10 +- wqflask/wqflask/do_search.py | 11 +- wqflask/wqflask/gsearch.py | 13 +- .../marker_regression/display_mapping_results.py | 6 + wqflask/wqflask/marker_regression/gemma_mapping.py | 4 +- wqflask/wqflask/marker_regression/rqtl_mapping.py | 64 ++- wqflask/wqflask/marker_regression/run_mapping.py | 3 +- wqflask/wqflask/network_graph/network_graph.py | 4 +- wqflask/wqflask/search_results.py | 106 ++--- wqflask/wqflask/show_trait/export_trait_data.py | 4 +- wqflask/wqflask/show_trait/show_trait.py | 21 +- wqflask/wqflask/templates/admin/group_manager.html | 45 +- wqflask/wqflask/templates/correlation_page.html | 1 + wqflask/wqflask/templates/email/verification.txt | 7 - wqflask/wqflask/templates/gsearch_pheno.html | 2 +- wqflask/wqflask/templates/mapping_results.html | 31 +- wqflask/wqflask/user_login.py | 43 +- wqflask/wqflask/user_session.py | 18 +- wqflask/wqflask/views.py | 71 +++- 30 files changed, 637 insertions(+), 617 deletions(-) delete mode 100644 wqflask/wqflask/templates/email/verification.txt (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 1b7cb23c..b133bf21 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -11,6 +11,7 @@ from base.data_set import create_dataset from db import webqtlDatabaseFunction from utility import webqtlUtil from utility import hmac +from utility.authentication_tools import check_resource_availability from utility.tools import GN2_BASE_URL from utility.redis_tools import get_redis_conn Redis = get_redis_conn() @@ -21,11 +22,33 @@ import simplejson as json from MySQLdb import escape_string as escape from pprint import pformat as pf -from flask import Flask, g, request, url_for +from flask import Flask, g, request, url_for, redirect from utility.logger import getLogger logger = getLogger(__name__ ) +def create_trait(**kw): + assert bool(kw.get('dataset')) != bool(kw.get('dataset_name')), "Needs dataset ob. or name"; + + permitted = True + if kw.get('name'): + if kw.get('dataset_name'): + if kw.get('dataset_name') != "Temp": + dataset = create_dataset(kw.get('dataset_name')) + else: + dataset = kw.get('dataset') + + if kw.get('dataset_name') != "Temp": + if dataset.type == 'Publish': + permitted = check_resource_availability(dataset, kw.get('name')) + else: + permitted = check_resource_availability(dataset) + + if permitted: + return GeneralTrait(**kw) + else: + return None + class GeneralTrait(object): """ Trait class defines a trait in webqtl, can be either Microarray, @@ -50,6 +73,7 @@ class GeneralTrait(object): self.haveinfo = kw.get('haveinfo', False) self.sequence = kw.get('sequence') # Blat sequence, available for ProbeSet self.data = kw.get('data', {}) + self.view = True # Sets defaults self.locus = None @@ -77,6 +101,7 @@ class GeneralTrait(object): # So we could add a simple if statement to short-circuit this if necessary if self.dataset.type != "Temp": self = retrieve_trait_info(self, self.dataset, get_qtl_info=get_qtl_info) + if get_sample_info != False: self = retrieve_sample_data(self, self.dataset) @@ -212,26 +237,28 @@ def get_sample_data(): trait = params['trait'] dataset = params['dataset'] - trait_ob = GeneralTrait(name=trait, dataset_name=dataset) - - trait_dict = {} - trait_dict['name'] = trait - trait_dict['db'] = dataset - trait_dict['type'] = trait_ob.dataset.type - trait_dict['group'] = trait_ob.dataset.group.name - trait_dict['tissue'] = trait_ob.dataset.tissue - trait_dict['species'] = trait_ob.dataset.group.species - trait_dict['url'] = url_for('show_trait_page', trait_id = trait, dataset = dataset) - trait_dict['description'] = trait_ob.description_display - if trait_ob.dataset.type == "ProbeSet": - trait_dict['symbol'] = trait_ob.symbol - trait_dict['location'] = trait_ob.location_repr - elif trait_ob.dataset.type == "Publish": - if trait_ob.pubmed_id: - trait_dict['pubmed_link'] = trait_ob.pubmed_link - trait_dict['pubmed_text'] = trait_ob.pubmed_text - - return json.dumps([trait_dict, {key: value.value for key, value in trait_ob.data.iteritems() }]) + trait_ob = create_trait(name=trait, dataset_name=dataset) + if trait_ob: + trait_dict = {} + trait_dict['name'] = trait + trait_dict['db'] = dataset + trait_dict['type'] = trait_ob.dataset.type + trait_dict['group'] = trait_ob.dataset.group.name + trait_dict['tissue'] = trait_ob.dataset.tissue + trait_dict['species'] = trait_ob.dataset.group.species + trait_dict['url'] = url_for('show_trait_page', trait_id = trait, dataset = dataset) + trait_dict['description'] = trait_ob.description_display + if trait_ob.dataset.type == "ProbeSet": + trait_dict['symbol'] = trait_ob.symbol + trait_dict['location'] = trait_ob.location_repr + elif trait_ob.dataset.type == "Publish": + if trait_ob.pubmed_id: + trait_dict['pubmed_link'] = trait_ob.pubmed_link + trait_dict['pubmed_text'] = trait_ob.pubmed_text + + return json.dumps([trait_dict, {key: value.value for key, value in trait_ob.data.iteritems() }]) + else: + return None def jsonable(trait): """Return a dict suitable for using as json @@ -350,91 +377,36 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): assert dataset, "Dataset doesn't exist" if dataset.type == 'Publish': - resource_id = hmac.data_hmac("{}:{}".format(dataset.id, trait.name)) - - the_url = "http://localhost:8080/run_action/?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) - trait_data = json.loads(requests.get("http://localhost:8080/run_action/?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id))) - - query = """ - SELECT - PublishXRef.Id, InbredSet.InbredSetCode, Publication.PubMed_ID, - Phenotype.Pre_publication_description, Phenotype.Post_publication_description, Phenotype.Original_description, - Phenotype.Pre_publication_abbreviation, Phenotype.Post_publication_abbreviation, PublishXRef.mean, - Phenotype.Lab_code, Phenotype.Submitter, Phenotype.Owner, Phenotype.Authorized_Users, - Publication.Authors, Publication.Title, Publication.Abstract, - Publication.Journal, Publication.Volume, Publication.Pages, - Publication.Month, Publication.Year, PublishXRef.Sequence, - Phenotype.Units, PublishXRef.comments - FROM - PublishXRef, Publication, Phenotype, PublishFreeze, InbredSet - WHERE - PublishXRef.Id = %s AND - Phenotype.Id = PublishXRef.PhenotypeId AND - Publication.Id = PublishXRef.PublicationId AND - PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND - PublishXRef.InbredSetId = InbredSet.Id AND - PublishFreeze.Id = %s - """ % (trait.name, dataset.id) - - logger.sql(query) - trait_info = g.db.execute(query).fetchone() - - - #XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name - #XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms. + resource_id = hmac.hmac_creation("{}:{}:{}".format('dataset-publish', dataset.id, trait.name)) + the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) elif dataset.type == 'ProbeSet': - display_fields_string = ', ProbeSet.'.join(dataset.display_fields) - display_fields_string = 'ProbeSet.' + display_fields_string - query = """ - SELECT %s - FROM ProbeSet, ProbeSetFreeze, ProbeSetXRef - WHERE - ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND - ProbeSetXRef.ProbeSetId = ProbeSet.Id AND - ProbeSetFreeze.Name = '%s' AND - ProbeSet.Name = '%s' - """ % (escape(display_fields_string), - escape(dataset.name), - escape(str(trait.name))) - logger.sql(query) - trait_info = g.db.execute(query).fetchone() - #XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name - # to avoid the problem of same marker name from different species. - elif dataset.type == 'Geno': - display_fields_string = string.join(dataset.display_fields,',Geno.') - display_fields_string = 'Geno.' + display_fields_string - query = """ - SELECT %s - FROM Geno, GenoFreeze, GenoXRef - WHERE - GenoXRef.GenoFreezeId = GenoFreeze.Id AND - GenoXRef.GenoId = Geno.Id AND - GenoFreeze.Name = '%s' AND - Geno.Name = '%s' - """ % (escape(display_fields_string), - escape(dataset.name), - escape(trait.name)) - logger.sql(query) - trait_info = g.db.execute(query).fetchone() - else: #Temp type - query = """SELECT %s FROM %s WHERE Name = %s""" - logger.sql(query) - trait_info = g.db.execute(query, - (string.join(dataset.display_fields,','), - dataset.type, trait.name)).fetchone() + resource_id = hmac.hmac_creation("{}:{}".format('dataset-probeset', dataset.id)) + the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) + else: + resource_id = hmac.hmac_creation("{}:{}".format('dataset-geno', dataset.id)) + the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) + + try: + response = requests.get(the_url).content + if response.strip() == "no-access": + trait.view = False + return trait + except: + resource_info = get_resource_info(resource_id) + default_permissions = resource_info['default_mask']['data'] + if 'view' not in default_persmissions: + trait.view = False + return trait + + trait_info = json.loads(response) if trait_info: trait.haveinfo = True - #XZ: assign SQL query result to trait attributes. for i, field in enumerate(dataset.display_fields): holder = trait_info[i] - # if isinstance(trait_info[i], basestring): - # logger.debug("HOLDER:", holder) - # logger.debug("HOLDER2:", holder.decode(encoding='latin1')) - # holder = unicode(trait_info[i], "utf-8", "ignore") - if isinstance(trait_info[i], basestring): - holder = holder.encode('latin1') + #if isinstance(trait_info[i], basestring): + # holder = holder.encode('latin1') setattr(trait, field, holder) if dataset.type == 'Publish': @@ -453,13 +425,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): if trait.confidential: trait.abbreviation = trait.pre_publication_abbreviation trait.description_display = trait.pre_publication_description - - #if not webqtlUtil.hasAccessToConfidentialPhenotypeTrait( - # privilege=self.dataset.privilege, - # userName=self.dataset.userName, - # authorized_users=self.authorized_users): - # - # description = self.pre_publication_description else: trait.abbreviation = trait.post_publication_abbreviation if description: diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index e7c04fef..9ce809b6 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -1,7 +1,7 @@ from __future__ import absolute_import, print_function, division -from base.trait import GeneralTrait from base import data_set +from base.trait import create_trait from base.species import TheSpecies from utility import hmac @@ -11,7 +11,6 @@ from flask import Flask, g import logging logger = logging.getLogger(__name__ ) - def get_species_dataset_trait(self, start_vars): #assert type(read_genotype) == type(bool()), "Expecting boolean value for read_genotype" if "temp_trait" in start_vars.keys(): @@ -24,7 +23,7 @@ def get_species_dataset_trait(self, start_vars): logger.debug("After creating dataset") self.species = TheSpecies(dataset=self.dataset) logger.debug("After creating species") - self.this_trait = GeneralTrait(dataset=self.dataset, + self.this_trait = create_trait(dataset=self.dataset, name=start_vars['trait_id'], cellid=None, get_qtl_info=True) @@ -34,7 +33,6 @@ def get_species_dataset_trait(self, start_vars): #self.dataset.group.read_genotype_file() #self.genotype = self.dataset.group.genotype - def get_trait_db_obs(self, trait_db_list): if isinstance(trait_db_list, basestring): trait_db_list = trait_db_list.split(",") @@ -49,10 +47,11 @@ def get_trait_db_obs(self, trait_db_list): dataset_ob = data_set.create_dataset(dataset_name=dataset_name, dataset_type="Temp", group_name=trait_name.split("_")[2]) else: dataset_ob = data_set.create_dataset(dataset_name) - trait_ob = GeneralTrait(dataset=dataset_ob, + trait_ob = create_trait(dataset=dataset_ob, name=trait_name, cellid=None) - self.trait_list.append((trait_ob, dataset_ob)) + if trait_ob: + self.trait_list.append((trait_ob, dataset_ob)) def get_species_groups(): diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 15841032..0ad96879 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -2,6 +2,7 @@ from __future__ import print_function, division, absolute_import import uuid import simplejson as json +import datetime import redis # used for collections @@ -96,15 +97,22 @@ def get_user_groups(user_id): for key in groups_list: group_ob = json.loads(groups_list[key]) group_admins = set(group_ob['admins']) - group_users = set(group_ob['users']) + group_members = set(group_ob['members']) if user_id in group_admins: admin_group_ids.append(group_ob['id']) - elif user_id in group_users: + elif user_id in group_members: user_group_ids.append(group_ob['id']) else: continue - return admin_group_ids, user_group_ids + admin_groups = [] + user_groups = [] + for the_id in admin_group_ids: + admin_groups.append(get_group_info(the_id)) + for the_id in user_group_ids: + user_groups.append(get_group_info(the_id)) + + return admin_groups, user_groups def get_group_info(group_id): group_json = Redis.hget("groups", group_id) @@ -114,18 +122,18 @@ def get_group_info(group_id): return group_info -def create_group(admin_member_ids, user_member_ids = [], group_name = ""): +def create_group(admin_user_ids, member_user_ids = [], group_name = "Default Group Name"): group_id = str(uuid.uuid4()) new_group = { "id" : group_id, - "admins": admin_member_ids, - "users" : user_member_ids, + "admins": admin_user_ids, + "members" : member_user_ids, "name" : group_name, "created_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), "changed_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') } - Redis.hset("groups", group_id, new_group) + Redis.hset("groups", group_id, json.dumps(new_group)) return new_group @@ -144,7 +152,7 @@ def add_users_to_group(user_id, group_id, user_emails = [], admins = False): #ZS if admins: group_users = set(group_info["admins"]) else: - group_users = set(group_info["users"]) + group_users = set(group_info["members"]) for email in user_emails: user_id = get_user_id("email_address", email) @@ -153,7 +161,7 @@ def add_users_to_group(user_id, group_id, user_emails = [], admins = False): #ZS if admins: group_info["admins"] = list(group_users) else: - group_info["users"] = list(group_users) + group_info["members"] = list(group_users) group_info["changed_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') Redis.hset("groups", group_id, json.dumps(group_info)) @@ -161,7 +169,7 @@ def add_users_to_group(user_id, group_id, user_emails = [], admins = False): #ZS else: return None -def remove_users_from_group(user_id, users_to_remove_ids, group_id, user_type = "users"): #ZS: User type is because I assume admins can remove other admins +def remove_users_from_group(user_id, users_to_remove_ids, group_id, user_type = "members"): #ZS: User type is because I assume admins can remove other admins group_info = get_group_info(group_id) if user_id in group_info["admins"]: group_users = set(group_info[user_type]) @@ -174,6 +182,7 @@ def change_group_name(user_id, group_id, new_name): group_info = get_group_info(group_id) if user_id in group_info["admins"]: group_info["name"] = new_name + Redis.hset("groups", group_id, json.dumps(group_info)) return group_info else: return None @@ -182,22 +191,21 @@ def get_resources(): resource_list = Redis.hgetall("resources") return resource_list -def get_resource_id(dataset_type, dataset_id, trait_id = None, all_resources = None): - if not all_resources: - all_resources = get_resources() - - resource_list = [[key, json.loads(value)] for key, value in all_resources.items()] - - if not trait_id: - matched_resources = [resource[0] for resource in resource_list if resource[1]['data']['dataset'] == dataset_id] - else: - matched_resources = [resource[0] for resource in resource_list if resource[1]['data']['dataset'] == dataset_id and resource[1]['data']['trait'] == trait_id] - - if len(matched_resources): - return matched_resources[0] +def get_resource_id(dataset, trait_id=None): + if dataset.type == "Publish": + if trait_id: + resource_id = hmac.hmac_creation("{}:{}:{}".format('dataset-publish', dataset.id, trait_id)) + else: + return False + elif dataset.type == "ProbeSet": + resource_id = hmac.hmac_creation("{}:{}".format('dataset-probeset', dataset.id)) + elif dataset.type == "Geno": + resource_id = hmac.hmac_creation("{}:{}".format('dataset-geno', dataset.id)) else: return False + return resource_id + def get_resource_info(resource_id): resource_info = Redis.hget("resources", resource_id) return json.loads(resource_info) @@ -205,9 +213,9 @@ def get_resource_info(resource_id): def add_resource(resource_info): if 'trait' in resource_info['data']: - resource_id = hmac.data_hmac('{}:{}'.format(str(resource_info['data']['dataset']), str(resource_info['data']['trait']))) + resource_id = hmac.hmac_creation('{}:{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset']), str(resource_info['data']['trait']))) else: - resource_id = hmac.data_hmac('{}'.format(str(resource_info['data']['dataset']))) + resource_id = hmac.hmac_creation('{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset']))) Redis.hset("resources", resource_id, json.dumps(resource_info)) diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py index 66eb94ac..7f5312c1 100644 --- a/wqflask/wqflask/api/correlation.py +++ b/wqflask/wqflask/api/correlation.py @@ -1,237 +1,237 @@ -from __future__ import absolute_import, division, print_function - -import collections - -import scipy - -from MySQLdb import escape_string as escape - -from flask import g - -from base import data_set -from base.trait import GeneralTrait, retrieve_sample_data - -from wqflask.correlation.show_corr_results import generate_corr_json -from wqflask.correlation import correlation_functions - -from utility import webqtlUtil, helper_functions, corr_result_helpers -from utility.benchmark import Bench - -import utility.logger -logger = utility.logger.getLogger(__name__ ) - -def do_correlation(start_vars): - assert('db' in start_vars) - assert('target_db' in start_vars) - assert('trait_id' in start_vars) - - this_dataset = data_set.create_dataset(dataset_name = start_vars['db']) - target_dataset = data_set.create_dataset(dataset_name = start_vars['target_db']) - this_trait = GeneralTrait(dataset = this_dataset, name = start_vars['trait_id']) - this_trait = retrieve_sample_data(this_trait, this_dataset) - - corr_params = init_corr_params(start_vars) - - corr_results = calculate_results(this_trait, this_dataset, target_dataset, corr_params) - #corr_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0]))) - - final_results = [] - for _trait_counter, trait in enumerate(corr_results.keys()[:corr_params['return_count']]): - if corr_params['type'] == "tissue": - [sample_r, num_overlap, sample_p, symbol] = corr_results[trait] - result_dict = { - "trait" : trait, - "sample_r" : sample_r, - "#_strains" : num_overlap, - "p_value" : sample_p, - "symbol" : symbol - } - elif corr_params['type'] == "literature" or corr_params['type'] == "lit": - [gene_id, sample_r] = corr_results[trait] - result_dict = { - "trait" : trait, - "sample_r" : sample_r, - "gene_id" : gene_id - } - else: - [sample_r, sample_p, num_overlap] = corr_results[trait] - result_dict = { - "trait" : trait, - "sample_r" : sample_r, - "#_strains" : num_overlap, - "p_value" : sample_p - } - - final_results.append(result_dict) - - # json_corr_results = generate_corr_json(final_corr_results, this_trait, this_dataset, target_dataset, for_api = True) - - return final_results - -def calculate_results(this_trait, this_dataset, target_dataset, corr_params): - corr_results = {} - - target_dataset.get_trait_data() - - if corr_params['type'] == "tissue": - trait_symbol_dict = this_dataset.retrieve_genes("Symbol") - corr_results = do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params) - sorted_results = collections.OrderedDict(sorted(corr_results.items(), - key=lambda t: -abs(t[1][1]))) - elif corr_params['type'] == "literature" or corr_params['type'] == "lit": #ZS: Just so a user can use either "lit" or "literature" - trait_geneid_dict = this_dataset.retrieve_genes("GeneId") - corr_results = do_literature_correlation_for_all_traits(this_trait, this_dataset, trait_geneid_dict, corr_params) - sorted_results = collections.OrderedDict(sorted(corr_results.items(), - key=lambda t: -abs(t[1][1]))) - else: - for target_trait, target_vals in target_dataset.trait_data.iteritems(): - result = get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, corr_params['type']) - if result is not None: - corr_results[target_trait] = result - - sorted_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0]))) - - return sorted_results - -def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params, tissue_dataset_id=1): - #Gets tissue expression values for the primary trait - primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list = [this_trait.symbol]) - - if this_trait.symbol.lower() in primary_trait_tissue_vals_dict: - primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower()] - - corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=trait_symbol_dict.values()) - - tissue_corr_data = {} - for trait, symbol in trait_symbol_dict.iteritems(): - if symbol and symbol.lower() in corr_result_tissue_vals_dict: - this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()] - - result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values, - this_trait_tissue_values, - corr_params['method']) - - tissue_corr_data[trait] = [result[0], result[1], result[2], symbol] - - return tissue_corr_data - -def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_geneid_dict, corr_params): - input_trait_mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), this_trait.geneid) - - lit_corr_data = {} - for trait, gene_id in trait_geneid_dict.iteritems(): - mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), gene_id) - - if mouse_gene_id and str(mouse_gene_id).find(";") == -1: - result = g.db.execute( - """SELECT value - FROM LCorrRamin3 - WHERE GeneId1='%s' and - GeneId2='%s' - """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id)) - ).fetchone() - if not result: - result = g.db.execute("""SELECT value - FROM LCorrRamin3 - WHERE GeneId2='%s' and - GeneId1='%s' - """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id)) - ).fetchone() - if result: - lit_corr = result.value - lit_corr_data[trait] = [gene_id, lit_corr] - else: - lit_corr_data[trait] = [gene_id, 0] - else: - lit_corr_data[trait] = [gene_id, 0] - - return lit_corr_data - -def get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, type): - """ - Calculates the sample r (or rho) and p-value - - Given a primary trait and a target trait's sample values, - calculates either the pearson r or spearman rho and the p-value - using the corresponding scipy functions. - """ - - this_trait_vals = [] - shared_target_vals = [] - for i, sample in enumerate(target_dataset.group.samplelist): - if sample in this_trait.data: - this_sample_value = this_trait.data[sample].value - target_sample_value = target_vals[i] - this_trait_vals.append(this_sample_value) - shared_target_vals.append(target_sample_value) - - this_trait_vals, shared_target_vals, num_overlap = corr_result_helpers.normalize_values(this_trait_vals, shared_target_vals) - - if type == 'pearson': - sample_r, sample_p = scipy.stats.pearsonr(this_trait_vals, shared_target_vals) - else: - sample_r, sample_p = scipy.stats.spearmanr(this_trait_vals, shared_target_vals) - - if num_overlap > 5: - if scipy.isnan(sample_r): - return None - else: - return [sample_r, sample_p, num_overlap] - -def convert_to_mouse_gene_id(species=None, gene_id=None): - """If the species is rat or human, translate the gene_id to the mouse geneid - - If there is no input gene_id or there's no corresponding mouse gene_id, return None - - """ - if not gene_id: - return None - - mouse_gene_id = None - - if species == 'mouse': - mouse_gene_id = gene_id - - elif species == 'rat': - - query = """SELECT mouse - FROM GeneIDXRef - WHERE rat='%s'""" % escape(gene_id) - - result = g.db.execute(query).fetchone() - if result != None: - mouse_gene_id = result.mouse - - elif species == 'human': - - query = """SELECT mouse - FROM GeneIDXRef - WHERE human='%s'""" % escape(gene_id) - - result = g.db.execute(query).fetchone() - if result != None: - mouse_gene_id = result.mouse - - return mouse_gene_id - -def init_corr_params(start_vars): - method = "pearson" - if 'method' in start_vars: - method = start_vars['method'] - - type = "sample" - if 'type' in start_vars: - type = start_vars['type'] - - return_count = 500 - if 'return_count' in start_vars: - assert(start_vars['return_count'].isdigit()) - return_count = int(start_vars['return_count']) - - corr_params = { - 'method' : method, - 'type' : type, - 'return_count' : return_count - } - +from __future__ import absolute_import, division, print_function + +import collections + +import scipy + +from MySQLdb import escape_string as escape + +from flask import g + +from base import data_set +from base.trait import create_trait, retrieve_sample_data + +from wqflask.correlation.show_corr_results import generate_corr_json +from wqflask.correlation import correlation_functions + +from utility import webqtlUtil, helper_functions, corr_result_helpers +from utility.benchmark import Bench + +import utility.logger +logger = utility.logger.getLogger(__name__ ) + +def do_correlation(start_vars): + assert('db' in start_vars) + assert('target_db' in start_vars) + assert('trait_id' in start_vars) + + this_dataset = data_set.create_dataset(dataset_name = start_vars['db']) + target_dataset = data_set.create_dataset(dataset_name = start_vars['target_db']) + this_trait = create_trait(dataset = this_dataset, name = start_vars['trait_id']) + this_trait = retrieve_sample_data(this_trait, this_dataset) + + corr_params = init_corr_params(start_vars) + + corr_results = calculate_results(this_trait, this_dataset, target_dataset, corr_params) + #corr_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0]))) + + final_results = [] + for _trait_counter, trait in enumerate(corr_results.keys()[:corr_params['return_count']]): + if corr_params['type'] == "tissue": + [sample_r, num_overlap, sample_p, symbol] = corr_results[trait] + result_dict = { + "trait" : trait, + "sample_r" : sample_r, + "#_strains" : num_overlap, + "p_value" : sample_p, + "symbol" : symbol + } + elif corr_params['type'] == "literature" or corr_params['type'] == "lit": + [gene_id, sample_r] = corr_results[trait] + result_dict = { + "trait" : trait, + "sample_r" : sample_r, + "gene_id" : gene_id + } + else: + [sample_r, sample_p, num_overlap] = corr_results[trait] + result_dict = { + "trait" : trait, + "sample_r" : sample_r, + "#_strains" : num_overlap, + "p_value" : sample_p + } + + final_results.append(result_dict) + + # json_corr_results = generate_corr_json(final_corr_results, this_trait, this_dataset, target_dataset, for_api = True) + + return final_results + +def calculate_results(this_trait, this_dataset, target_dataset, corr_params): + corr_results = {} + + target_dataset.get_trait_data() + + if corr_params['type'] == "tissue": + trait_symbol_dict = this_dataset.retrieve_genes("Symbol") + corr_results = do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params) + sorted_results = collections.OrderedDict(sorted(corr_results.items(), + key=lambda t: -abs(t[1][1]))) + elif corr_params['type'] == "literature" or corr_params['type'] == "lit": #ZS: Just so a user can use either "lit" or "literature" + trait_geneid_dict = this_dataset.retrieve_genes("GeneId") + corr_results = do_literature_correlation_for_all_traits(this_trait, this_dataset, trait_geneid_dict, corr_params) + sorted_results = collections.OrderedDict(sorted(corr_results.items(), + key=lambda t: -abs(t[1][1]))) + else: + for target_trait, target_vals in target_dataset.trait_data.iteritems(): + result = get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, corr_params['type']) + if result is not None: + corr_results[target_trait] = result + + sorted_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0]))) + + return sorted_results + +def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params, tissue_dataset_id=1): + #Gets tissue expression values for the primary trait + primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list = [this_trait.symbol]) + + if this_trait.symbol.lower() in primary_trait_tissue_vals_dict: + primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower()] + + corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=trait_symbol_dict.values()) + + tissue_corr_data = {} + for trait, symbol in trait_symbol_dict.iteritems(): + if symbol and symbol.lower() in corr_result_tissue_vals_dict: + this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()] + + result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values, + this_trait_tissue_values, + corr_params['method']) + + tissue_corr_data[trait] = [result[0], result[1], result[2], symbol] + + return tissue_corr_data + +def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_geneid_dict, corr_params): + input_trait_mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), this_trait.geneid) + + lit_corr_data = {} + for trait, gene_id in trait_geneid_dict.iteritems(): + mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), gene_id) + + if mouse_gene_id and str(mouse_gene_id).find(";") == -1: + result = g.db.execute( + """SELECT value + FROM LCorrRamin3 + WHERE GeneId1='%s' and + GeneId2='%s' + """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id)) + ).fetchone() + if not result: + result = g.db.execute("""SELECT value + FROM LCorrRamin3 + WHERE GeneId2='%s' and + GeneId1='%s' + """ % (escape(mouse_gene_id), escape(input_trait_mouse_gene_id)) + ).fetchone() + if result: + lit_corr = result.value + lit_corr_data[trait] = [gene_id, lit_corr] + else: + lit_corr_data[trait] = [gene_id, 0] + else: + lit_corr_data[trait] = [gene_id, 0] + + return lit_corr_data + +def get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, type): + """ + Calculates the sample r (or rho) and p-value + + Given a primary trait and a target trait's sample values, + calculates either the pearson r or spearman rho and the p-value + using the corresponding scipy functions. + """ + + this_trait_vals = [] + shared_target_vals = [] + for i, sample in enumerate(target_dataset.group.samplelist): + if sample in this_trait.data: + this_sample_value = this_trait.data[sample].value + target_sample_value = target_vals[i] + this_trait_vals.append(this_sample_value) + shared_target_vals.append(target_sample_value) + + this_trait_vals, shared_target_vals, num_overlap = corr_result_helpers.normalize_values(this_trait_vals, shared_target_vals) + + if type == 'pearson': + sample_r, sample_p = scipy.stats.pearsonr(this_trait_vals, shared_target_vals) + else: + sample_r, sample_p = scipy.stats.spearmanr(this_trait_vals, shared_target_vals) + + if num_overlap > 5: + if scipy.isnan(sample_r): + return None + else: + return [sample_r, sample_p, num_overlap] + +def convert_to_mouse_gene_id(species=None, gene_id=None): + """If the species is rat or human, translate the gene_id to the mouse geneid + + If there is no input gene_id or there's no corresponding mouse gene_id, return None + + """ + if not gene_id: + return None + + mouse_gene_id = None + + if species == 'mouse': + mouse_gene_id = gene_id + + elif species == 'rat': + + query = """SELECT mouse + FROM GeneIDXRef + WHERE rat='%s'""" % escape(gene_id) + + result = g.db.execute(query).fetchone() + if result != None: + mouse_gene_id = result.mouse + + elif species == 'human': + + query = """SELECT mouse + FROM GeneIDXRef + WHERE human='%s'""" % escape(gene_id) + + result = g.db.execute(query).fetchone() + if result != None: + mouse_gene_id = result.mouse + + return mouse_gene_id + +def init_corr_params(start_vars): + method = "pearson" + if 'method' in start_vars: + method = start_vars['method'] + + type = "sample" + if 'type' in start_vars: + type = start_vars['type'] + + return_count = 500 + if 'return_count' in start_vars: + assert(start_vars['return_count'].isdigit()) + return_count = int(start_vars['return_count']) + + corr_params = { + 'method' : method, + 'type' : type, + 'return_count' : return_count + } + return corr_params \ No newline at end of file diff --git a/wqflask/wqflask/api/gen_menu.py b/wqflask/wqflask/api/gen_menu.py index c7bcb65d..bdcc3bf7 100644 --- a/wqflask/wqflask/api/gen_menu.py +++ b/wqflask/wqflask/api/gen_menu.py @@ -126,9 +126,7 @@ def build_types(species, group): InbredSet.Name = '{1}' AND ProbeFreeze.TissueId = Tissue.Id AND ProbeFreeze.InbredSetId = InbredSet.Id AND - ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id AND - ProbeSetFreeze.public > 0 AND - ProbeSetFreeze.confidentiality < 1 + ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id ORDER BY Tissue.Name""".format(species, group) results = [] @@ -194,9 +192,7 @@ def build_datasets(species, group, type_name): FROM InfoFiles, GenoFreeze, InbredSet WHERE InbredSet.Name = '{}' AND GenoFreeze.InbredSetId = InbredSet.Id AND - InfoFiles.InfoPageName = GenoFreeze.ShortName AND - GenoFreeze.public > 0 AND - GenoFreeze.confidentiality < 1 + InfoFiles.InfoPageName = GenoFreeze.ShortName ORDER BY GenoFreeze.CreateTime DESC""".format(group)).fetchone() if results != None: @@ -214,8 +210,7 @@ def build_datasets(species, group, type_name): Species.Id = InbredSet.SpeciesId AND InbredSet.Name = '{1}' AND ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and Tissue.Name = '{2}' AND - ProbeFreeze.TissueId = Tissue.Id and ProbeFreeze.InbredSetId = InbredSet.Id AND - ProbeSetFreeze.confidentiality < 1 and ProbeSetFreeze.public > 0 + ProbeFreeze.TissueId = Tissue.Id and ProbeFreeze.InbredSetId = InbredSet.Id ORDER BY ProbeSetFreeze.CreateTime DESC""".format(species, group, type_name)).fetchall() datasets = [] diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py index d830cefc..92c27c9b 100644 --- a/wqflask/wqflask/api/mapping.py +++ b/wqflask/wqflask/api/mapping.py @@ -4,7 +4,7 @@ import string from base import data_set from base import webqtlConfig -from base.trait import GeneralTrait, retrieve_sample_data +from base.trait import create_trait, retrieve_sample_data from utility import helper_functions from wqflask.marker_regression import gemma_mapping, rqtl_mapping, qtlreaper_mapping, plink_mapping @@ -18,7 +18,7 @@ def do_mapping_for_api(start_vars): dataset = data_set.create_dataset(dataset_name = start_vars['db']) dataset.group.get_markers() - this_trait = GeneralTrait(dataset = dataset, name = start_vars['trait_id']) + this_trait = create_trait(dataset = dataset, name = start_vars['trait_id']) this_trait = retrieve_sample_data(this_trait, dataset) samples = [] diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index b22e0004..4fb8e69b 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -14,9 +14,6 @@ import urlparse import simplejson as json -import redis -Redis = redis.StrictRedis() - from flask import (Flask, g, render_template, url_for, request, make_response, redirect, flash, jsonify) @@ -30,8 +27,10 @@ from wqflask import model from utility import Bunch, Struct, hmac from utility.formatting import numify +from utility.redis_tools import get_redis_conn +Redis = get_redis_conn() -from base import trait +from base.trait import create_trait, retrieve_trait_info, jsonable from base.data_set import create_dataset import logging @@ -208,14 +207,14 @@ def view_collection(): if dataset_name == "Temp": group = name.split("_")[2] dataset = create_dataset(dataset_name, dataset_type = "Temp", group_name = group) - trait_ob = trait.GeneralTrait(name=name, dataset=dataset) + trait_ob = create_trait(name=name, dataset=dataset) else: dataset = create_dataset(dataset_name) - trait_ob = trait.GeneralTrait(name=name, dataset=dataset) - trait_ob = trait.retrieve_trait_info(trait_ob, dataset, get_qtl_info=True) + trait_ob = create_trait(name=name, dataset=dataset) + trait_ob = retrieve_trait_info(trait_ob, dataset, get_qtl_info=True) trait_obs.append(trait_ob) - json_version.append(trait.jsonable(trait_ob)) + json_version.append(jsonable(trait_ob)) collection_info = dict(trait_obs=trait_obs, uc = uc) diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py index 21eb1493..5d74dc9d 100644 --- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py +++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py @@ -37,7 +37,7 @@ from pprint import pformat as pf import reaper -from base.trait import GeneralTrait +from base.trait import create_trait from base import data_set from utility import webqtlUtil, helper_functions, corr_result_helpers from db import webqtlDatabaseFunction @@ -108,7 +108,7 @@ class ComparisonBarChart(object): trait_name, dataset_name = trait_db.split(":") #print("dataset_name:", dataset_name) dataset_ob = data_set.create_dataset(dataset_name) - trait_ob = GeneralTrait(dataset=dataset_ob, + trait_ob = create_trait(dataset=dataset_ob, name=trait_name, cellid=None) self.trait_list.append((trait_ob, dataset_ob)) diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index dfb81c54..04ec427d 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -4,7 +4,7 @@ import math from flask import g -from base.trait import GeneralTrait +from base.trait import create_trait from base import data_set from utility import corr_result_helpers from scipy import stats @@ -20,9 +20,9 @@ class CorrScatterPlot(object): self.data_set_1 = data_set.create_dataset(params['dataset_1']) self.data_set_2 = data_set.create_dataset(params['dataset_2']) #self.data_set_3 = data_set.create_dataset(params['dataset_3']) - self.trait_1 = GeneralTrait(name=params['trait_1'], dataset=self.data_set_1) - self.trait_2 = GeneralTrait(name=params['trait_2'], dataset=self.data_set_2) - #self.trait_3 = GeneralTrait(name=params['trait_3'], dataset=self.data_set_3) + self.trait_1 = create_trait(name=params['trait_1'], dataset=self.data_set_1) + self.trait_2 = create_trait(name=params['trait_2'], dataset=self.data_set_2) + #self.trait_3 = create_trait(name=params['trait_3'], dataset=self.data_set_3) samples_1, samples_2, num_overlap = corr_result_helpers.normalize_values_with_samples(self.trait_1.data, self.trait_2.data) diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index b099b83d..7eab7184 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -47,7 +47,7 @@ import reaper from base import webqtlConfig from utility.THCell import THCell from utility.TDCell import TDCell -from base.trait import GeneralTrait +from base.trait import create_trait from base import data_set from utility import webqtlUtil, helper_functions, corr_result_helpers, hmac from db import webqtlDatabaseFunction @@ -97,7 +97,7 @@ class CorrelationResults(object): if start_vars['dataset'] == "Temp": self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group']) self.trait_id = start_vars['trait_id'] - self.this_trait = GeneralTrait(dataset=self.dataset, + self.this_trait = create_trait(dataset=self.dataset, name=self.trait_id, cellid=None) else: @@ -199,7 +199,9 @@ class CorrelationResults(object): range_chr_as_int = order_id for _trait_counter, trait in enumerate(self.correlation_data.keys()[:self.return_number]): - trait_object = GeneralTrait(dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False) + trait_object = create_trait(dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False) + if not trait_object: + continue if self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Geno": #ZS: Convert trait chromosome to an int for the location range option diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index b5c45d05..2b9467d1 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -43,14 +43,16 @@ from pprint import pformat as pf import reaper -import redis -Redis = redis.StrictRedis() +from utility.redis_tools import get_redis_conn +Redis = get_redis_conn() +THIRTY_DAYS = 60 * 60 * 24 * 30 from utility.THCell import THCell from utility.TDCell import TDCell from base.trait import GeneralTrait from base import data_set from utility import webqtlUtil, helper_functions, corr_result_helpers + from db import webqtlDatabaseFunction import utility.webqtlUtil #this is for parallel computing only. from wqflask.correlation import correlation_functions @@ -204,20 +206,6 @@ class CorrelationMatrix(object): samples = self.all_sample_list, sample_data = self.sample_data,) # corr_results = [result[1] for result in result_row for result_row in self.corr_results]) - - def get_trait_db_obs(self, trait_db_list): - - self.trait_list = [] - for i, trait_db in enumerate(trait_db_list): - if i == (len(trait_db_list) - 1): - break - trait_name, dataset_name = trait_db.split(":") - #print("dataset_name:", dataset_name) - dataset_ob = data_set.create_dataset(dataset_name) - trait_ob = GeneralTrait(dataset=dataset_ob, - name=trait_name, - cellid=None) - self.trait_list.append((trait_ob, dataset_ob)) def calculate_pca(self, cols, corr_eigen_value, corr_eigen_vectors): base = importr('base') @@ -257,7 +245,7 @@ class CorrelationMatrix(object): this_vals_string += "x " this_vals_string = this_vals_string[:-1] - Redis.set(trait_id, this_vals_string) + Redis.set(trait_id, this_vals_string, ex=THIRTY_DAYS) self.pca_trait_ids.append(trait_id) return pca diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index 4415b86a..35067036 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -17,7 +17,7 @@ import csv import itertools from base import data_set -from base import trait as TRAIT +from base.trait import create_trait, retrieve_sample_data from utility import helper_functions from utility.tools import locate, GN2_BRANCH_URL @@ -122,8 +122,8 @@ class CTL(object): logger.debug("retrieving data for", trait) if trait != "": ts = trait.split(':') - gt = TRAIT.GeneralTrait(name = ts[0], dataset_name = ts[1]) - gt = TRAIT.retrieve_sample_data(gt, dataset, individuals) + gt = create_trait(name = ts[0], dataset_name = ts[1]) + gt = retrieve_sample_data(gt, dataset, individuals) for ind in individuals: if ind in gt.data.keys(): traits.append(gt.data[ind].value) @@ -180,8 +180,8 @@ class CTL(object): logger.debug(significant[0][x], significant[1][x], significant[2][x]) # Debug to console tsS = significant[0][x].split(':') # Source tsT = significant[2][x].split(':') # Target - gtS = TRAIT.GeneralTrait(name = tsS[0], dataset_name = tsS[1]) # Retrieve Source info from the DB - gtT = TRAIT.GeneralTrait(name = tsT[0], dataset_name = tsT[1]) # Retrieve Target info from the DB + gtS = create_trait(name = tsS[0], dataset_name = tsS[1]) # Retrieve Source info from the DB + gtT = create_trait(name = tsT[0], dataset_name = tsT[1]) # Retrieve Target info from the DB self.addNode(gtS) self.addNode(gtT) self.addEdge(gtS, gtT, significant, x) diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index b0ca5ced..1e15d28f 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -34,10 +34,7 @@ class DoSearch(object): self.search_type = search_type if self.dataset: - logger.debug("self.dataset is boo: ", type(self.dataset), pf(self.dataset)) - logger.debug("self.dataset.group is: ", pf(self.dataset.group)) #Get group information for dataset and the species id - self.species_id = webqtlDatabaseFunction.retrieve_species_id(self.dataset.group.name) def execute(self, query): @@ -54,10 +51,6 @@ class DoSearch(object): return keyword - #def escape(self, stringy): - # """Shorter name than self.db_conn.escape_string""" - # return escape(str(stringy)) - def mescape(self, *items): """Multiple escape""" escaped = [escape(str(item)) for item in items] @@ -71,8 +64,6 @@ class DoSearch(object): @classmethod def get_search(cls, search_type): - logger.debug("search_types are:", pf(cls.search_types)) - search_type_string = search_type['dataset_type'] if 'key' in search_type and search_type['key'] != None: search_type_string += '_' + search_type['key'] @@ -648,7 +639,7 @@ class CisTransLrsSearch(DoSearch): escape(self.dataset.type), chromosome) else: - location_clause = "(ABS(%s.Mb-Geno.Mb) %s %s and %s.Chr = Geno.Chr) or (%s.Chr != Geno.Chr)" % (escape(self.dataset.type), the_operator, escape(str(self.mb_buffer)), escape(self.dataset.type)) + location_clause = "(ABS(%s.Mb-Geno.Mb) %s %s and %s.Chr = Geno.Chr) or (%s.Chr != Geno.Chr)" % (escape(self.dataset.type), the_operator, escape(str(self.mb_buffer)), escape(self.dataset.type), escape(self.dataset.type)) where_clause = sub_clause + """ %sXRef.Locus = Geno.name and Geno.SpeciesId = %s and diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py index 04e3d578..c65a1415 100644 --- a/wqflask/wqflask/gsearch.py +++ b/wqflask/wqflask/gsearch.py @@ -4,7 +4,7 @@ import json from flask import Flask, g from base.data_set import create_dataset -from base.trait import GeneralTrait +from base.trait import create_trait from db import webqtlDatabaseFunction from base import webqtlConfig @@ -96,7 +96,9 @@ class GSearch(object): #dataset = create_dataset(line[3], "ProbeSet", get_samplelist=False) #trait_id = line[4] #with Bench("Building trait object"): - trait_ob = GeneralTrait(dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False) + trait_ob = create_trait(dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False) + if not trait_ob: + continue max_lrs_text = "N/A" if trait_ob.locus_chr != "" and trait_ob.locus_mb != "": max_lrs_text = "Chr" + str(trait_ob.locus_chr) + ": " + str(trait_ob.locus_mb) @@ -210,13 +212,12 @@ class GSearch(object): if line[11] != "" and line[11] != None: this_trait['additive'] = '%.3f' % line[11] - #dataset = create_dataset(line[2], "Publish") - #trait_id = line[3] - #this_trait = GeneralTrait(dataset=dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) this_trait['max_lrs_text'] = "N/A" + trait_ob = create_trait(dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False) + if not trait_ob: + continue if this_trait['dataset'] == this_trait['group'] + "Publish": try: - trait_ob = GeneralTrait(dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False) if trait_ob.locus_chr != "" and trait_ob.locus_mb != "": this_trait['max_lrs_text'] = "Chr" + str(trait_ob.locus_chr) + ": " + str(trait_ob.locus_mb) except: diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index a648667b..74fa4329 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -246,6 +246,12 @@ class DisplayMappingResults(object): if 'output_files' in start_vars: self.output_files = ",".join(start_vars['output_files']) + self.categorical_vars = "" + self.perm_strata = "" + if 'perm_strata' in start_vars.keys() and 'categorical_vars' in start_vars.keys(): + self.categorical_vars = start_vars['categorical_vars'] + self.perm_strata = start_vars['perm_strata'] + self.selectedChr = int(start_vars['selected_chr']) self.strainlist = start_vars['samples'] diff --git a/wqflask/wqflask/marker_regression/gemma_mapping.py b/wqflask/wqflask/marker_regression/gemma_mapping.py index e2b15c26..88d27517 100644 --- a/wqflask/wqflask/marker_regression/gemma_mapping.py +++ b/wqflask/wqflask/marker_regression/gemma_mapping.py @@ -1,7 +1,7 @@ import os, math, string, random, json from base import webqtlConfig -from base.trait import GeneralTrait +from base.trait import create_trait from base.data_set import create_dataset from utility.tools import flat_files, GEMMA_COMMAND, GEMMA_WRAPPER_COMMAND, TEMPDIR, WEBSERVER_MODE @@ -129,7 +129,7 @@ def gen_covariates_file(this_dataset, covariates, samples): this_covariate_data = [] trait_name = covariate.split(":")[0] dataset_ob = create_dataset(covariate.split(":")[1]) - trait_ob = GeneralTrait(dataset=dataset_ob, + trait_ob = create_trait(dataset=dataset_ob, name=trait_name, cellid=None) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index e4a4d127..c5590a85 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -6,7 +6,7 @@ import json from flask import g from base.webqtlConfig import TMPDIR -from base.trait import GeneralTrait +from base.trait import create_trait from base.data_set import create_dataset from utility import webqtlUtil from utility.tools import locate, TEMPDIR @@ -86,7 +86,6 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec cross_object = add_phenotype(cross_object, pheno_string, "the_pheno") # Add the phenotype cross_object = add_names(cross_object, names_string, "the_names") # Add the phenotype logger.info("Added pheno and names"); - # Scan for QTLs marker_covars = create_marker_covariates(control_marker, cross_object) # Create the additive covariate markers logger.info("Marker covars done"); if cofactors != "": @@ -115,6 +114,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec else: if do_control == "true" or cofactors != "": logger.info("Using covariate"); result_data_frame = scanone(cross_object, pheno = "the_pheno", addcovar = covars, model=model, method=method) + ro.r('save.image(file = "/home/zas1024/gn2-zach/itp_cofactor_test.RData")') else: logger.info("No covariates"); result_data_frame = scanone(cross_object, pheno = "the_pheno", model=model, method=method) @@ -295,7 +295,7 @@ def add_cofactors(cross, this_dataset, covariates, samples): covar_as_string = "c(" trait_name = covariate.split(":")[0] dataset_ob = create_dataset(covariate.split(":")[1]) - trait_ob = GeneralTrait(dataset=dataset_ob, + trait_ob = create_trait(dataset=dataset_ob, name=trait_name, cellid=None) @@ -321,27 +321,27 @@ def add_cofactors(cross, this_dataset, covariates, samples): datatype = get_trait_data_type(covariate) logger.info("Covariate: " + covariate + " is of type: " + datatype); if(datatype == "categorical"): # Cat variable - logger.info("call of add_categorical_covar"); - cross, col_names = add_categorical_covar(cross, covar_as_string, i) # Expand and add it to the cross - logger.info("add_categorical_covar returned"); - for z, col_name in enumerate(col_names): # Go through the additional covar names + logger.info("call of add_categorical_covar"); + cross, col_names = add_categorical_covar(cross, covar_as_string, i) # Expand and add it to the cross + logger.info("add_categorical_covar returned"); + for z, col_name in enumerate(col_names): # Go through the additional covar names + if i < (len(covariate_list) - 1): + covar_name_string += '"' + col_name + '", ' + else: + if(z < (len(col_names) -1)): + covar_name_string += '"' + col_name + '", ' + else: + covar_name_string += '"' + col_name + '"' + + logger.info("covar_name_string:" + covar_name_string) + else: + col_name = "covar_" + str(i) + cross = add_phenotype(cross, covar_as_string, col_name) if i < (len(covariate_list) - 1): - covar_name_string += '"' + col_name + '", ' - else: - if(z < (len(col_names) -1)): covar_name_string += '"' + col_name + '", ' - else: + else: covar_name_string += '"' + col_name + '"' - logger.info("covar_name_string:" + covar_name_string); - else: - col_name = "covar_" + str(i) - cross = add_phenotype(cross, covar_as_string, col_name) - if i < (len(covariate_list) - 1): - covar_name_string += '"' + col_name + '", ' - else: - covar_name_string += '"' + col_name + '"' - covar_name_string += ")" logger.info("covar_name_string:" + covar_name_string); covars_ob = pull_var("trait_covars", cross, covar_name_string) @@ -350,9 +350,13 @@ def add_cofactors(cross, this_dataset, covariates, samples): def create_marker_covariates(control_marker, cross): ro.globalenv["the_cross"] = cross ro.r('genotypes <- pull.geno(the_cross)') # Get the genotype matrix - userinputS = control_marker.replace(" ", "").split(",") # TODO: sanitize user input, Never Ever trust a user - covariate_names = ', '.join('"{0}"'.format(w) for w in userinputS) - ro.r('covnames <- c(' + covariate_names + ')') + userinput_sanitized = control_marker.replace(" ", "").split(",") # TODO: sanitize user input, Never Ever trust a user + logger.debug(userinput_sanitized) + if len(userinput_sanitized) > 0: + covariate_names = ', '.join('"{0}"'.format(w) for w in userinput_sanitized) + ro.r('covnames <- c(' + covariate_names + ')') + else: + ro.r('covnames <- c()') ro.r('covInGeno <- which(covnames %in% colnames(genotypes))') ro.r('covnames <- covnames[covInGeno]') ro.r("cat('covnames (purged): ', covnames,'\n')") @@ -404,16 +408,4 @@ def process_rqtl_results(result, species_name): # TODO: how to make this marker['lod_score'] = output[i][2] qtl_results.append(marker) - return qtl_results - -def get_trait_data_type(trait_db_string): - # Get a trait's type (numeric, categorical, etc) from the DB - the_query = "SELECT value FROM TraitMetadata WHERE type='trait_data_type'" - results_json = g.db.execute(the_query).fetchone() - - results_ob = json.loads(results_json[0]) - - if trait_db_string in results_ob: - return results_ob[trait_db_string] - else: - return "numeric" + return qtl_results \ No newline at end of file diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 5f7710ab..0711b852 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -161,7 +161,7 @@ class RunMapping(object): self.num_perm = 0 self.perm_output = [] self.bootstrap_results = [] - self.covariates = start_vars['covariates'] if "covariates" in start_vars else None + self.covariates = start_vars['covariates'] if "covariates" in start_vars else "" #ZS: This is passed to GN1 code for single chr mapping self.selected_chr = -1 @@ -467,6 +467,7 @@ class RunMapping(object): #mapping_scale = self.mapping_scale, #chromosomes = chromosome_mb_lengths, #qtl_results = self.qtl_results, + categorical_vars = self.categorical_vars, chr_lengths = chr_lengths, num_perm = self.num_perm, perm_results = self.perm_output, diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py index 152e4168..f41f3017 100644 --- a/wqflask/wqflask/network_graph/network_graph.py +++ b/wqflask/wqflask/network_graph/network_graph.py @@ -44,7 +44,7 @@ import reaper from utility.THCell import THCell from utility.TDCell import TDCell -from base.trait import GeneralTrait +from base.trait import create_trait from base import data_set from utility import webqtlUtil, helper_functions, corr_result_helpers from utility.tools import GN2_BRANCH_URL @@ -217,7 +217,7 @@ class NetworkGraph(object): break trait_name, dataset_name = trait_db.split(":") dataset_ob = data_set.create_dataset(dataset_name) - trait_ob = GeneralTrait(dataset=dataset_ob, + trait_ob = create_trait(dataset=dataset_ob, name=trait_name, cellid=None) self.trait_list.append((trait_ob, dataset_ob)) \ No newline at end of file diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index 8f702d58..de4b01eb 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -1,15 +1,9 @@ -# from __future__ import absolute_import, print_function, division +from __future__ import absolute_import, print_function, division - -import os -import cPickle import re import uuid from math import * import time -import math -import datetime -import collections import re import requests @@ -18,18 +12,16 @@ from pprint import pformat as pf import json from base.data_set import create_dataset -from base import trait +from base.trait import create_trait from wqflask import parser from wqflask import do_search -from utility import webqtlUtil,tools from db import webqtlDatabaseFunction -from flask import render_template, Flask, g +from flask import Flask, g -from utility import formatting -from utility import hmac +from utility import hmac, helper_functions from utility.tools import GN2_BASE_URL -from utility.type_checking import is_float, is_int, is_str, get_float, get_int, get_string +from utility.type_checking import is_str from utility.logger import getLogger logger = getLogger(__name__ ) @@ -86,7 +78,7 @@ views.py). try: self.search() except: - self.search_term_exists = False + self.search_term_exists = False if self.search_term_exists: self.gen_search_result() @@ -113,50 +105,49 @@ views.py). trait_dict = {} trait_id = result[0] - trait_dict['index'] = index + 1 - this_trait = trait.GeneralTrait(dataset=self.dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) - trait_dict['name'] = this_trait.name - if this_trait.dataset.type == "Publish": - trait_dict['display_name'] = this_trait.display_name - else: - trait_dict['display_name'] = this_trait.name - trait_dict['dataset'] = this_trait.dataset.name - trait_dict['hmac'] = hmac.data_hmac('{}:{}'.format(this_trait.name, this_trait.dataset.name)) - if this_trait.dataset.type == "ProbeSet": - trait_dict['symbol'] = this_trait.symbol - trait_dict['description'] = this_trait.description_display.decode('utf-8', 'replace') - trait_dict['location'] = this_trait.location_repr - trait_dict['mean'] = "N/A" - trait_dict['additive'] = "N/A" - if this_trait.mean != "" and this_trait.mean != None: - trait_dict['mean'] = '%.3f' % this_trait.mean - trait_dict['lrs_score'] = this_trait.LRS_score_repr - trait_dict['lrs_location'] = this_trait.LRS_location_repr - if this_trait.additive != "": - trait_dict['additive'] = '%.3f' % this_trait.additive - elif this_trait.dataset.type == "Geno": - trait_dict['location'] = this_trait.location_repr - elif this_trait.dataset.type == "Publish": - trait_dict['description'] = this_trait.description_display - trait_dict['authors'] = this_trait.authors - trait_dict['pubmed_id'] = "N/A" - if this_trait.pubmed_id: - trait_dict['pubmed_id'] = this_trait.pubmed_id - trait_dict['pubmed_link'] = this_trait.pubmed_link - trait_dict['pubmed_text'] = this_trait.pubmed_text - trait_dict['mean'] = "N/A" - if this_trait.mean != "" and this_trait.mean != None: - trait_dict['mean'] = '%.3f' % this_trait.mean - trait_dict['lrs_score'] = this_trait.LRS_score_repr - trait_dict['lrs_location'] = this_trait.LRS_location_repr - trait_dict['additive'] = "N/A" - if this_trait.additive != "": - trait_dict['additive'] = '%.3f' % this_trait.additive - trait_list.append(trait_dict) - #json_trait_list.append(trait.jsonable_table_row(this_trait, self.dataset.name, index + 1)) + this_trait = create_trait(dataset=self.dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) + if this_trait: + trait_dict['index'] = index + 1 + trait_dict['name'] = this_trait.name + if this_trait.dataset.type == "Publish": + trait_dict['display_name'] = this_trait.display_name + else: + trait_dict['display_name'] = this_trait.name + trait_dict['dataset'] = this_trait.dataset.name + trait_dict['hmac'] = hmac.data_hmac('{}:{}'.format(this_trait.name, this_trait.dataset.name)) + if this_trait.dataset.type == "ProbeSet": + trait_dict['symbol'] = this_trait.symbol + trait_dict['description'] = this_trait.description_display.decode('utf-8', 'replace') + trait_dict['location'] = this_trait.location_repr + trait_dict['mean'] = "N/A" + trait_dict['additive'] = "N/A" + if this_trait.mean != "" and this_trait.mean != None: + trait_dict['mean'] = '%.3f' % this_trait.mean + trait_dict['lrs_score'] = this_trait.LRS_score_repr + trait_dict['lrs_location'] = this_trait.LRS_location_repr + if this_trait.additive != "": + trait_dict['additive'] = '%.3f' % this_trait.additive + elif this_trait.dataset.type == "Geno": + trait_dict['location'] = this_trait.location_repr + elif this_trait.dataset.type == "Publish": + trait_dict['description'] = this_trait.description_display + trait_dict['authors'] = this_trait.authors + trait_dict['pubmed_id'] = "N/A" + if this_trait.pubmed_id: + trait_dict['pubmed_id'] = this_trait.pubmed_id + trait_dict['pubmed_link'] = this_trait.pubmed_link + trait_dict['pubmed_text'] = this_trait.pubmed_text + trait_dict['mean'] = "N/A" + if this_trait.mean != "" and this_trait.mean != None: + trait_dict['mean'] = '%.3f' % this_trait.mean + trait_dict['lrs_score'] = this_trait.LRS_score_repr + trait_dict['lrs_location'] = this_trait.LRS_location_repr + trait_dict['additive'] = "N/A" + if this_trait.additive != "": + trait_dict['additive'] = '%.3f' % this_trait.additive + trait_list.append(trait_dict) self.trait_list = json.dumps(trait_list) - #self.json_trait_list = json.dumps(json_trait_list) def search(self): """ @@ -234,7 +225,6 @@ views.py). self.header_fields = the_search.header_fields def get_search_ob(self, a_search): - logger.debug("[kodak] item is:", pf(a_search)) search_term = a_search['search_term'] search_operator = a_search['separator'] search_type = {} @@ -243,12 +233,10 @@ views.py). search_type['key'] = a_search['key'].upper() else: search_type['key'] = None - logger.debug("search_type is:", pf(search_type)) search_ob = do_search.DoSearch.get_search(search_type) if search_ob: search_class = getattr(do_search, search_ob) - logger.debug("search_class is: ", pf(search_class)) the_search = search_class(search_term, search_operator, self.dataset, diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py index 107f87c6..253c887b 100644 --- a/wqflask/wqflask/show_trait/export_trait_data.py +++ b/wqflask/wqflask/show_trait/export_trait_data.py @@ -4,7 +4,7 @@ import simplejson as json from pprint import pformat as pf -from base.trait import GeneralTrait +from base.trait import create_trait from base import data_set def export_sample_table(targs): @@ -26,7 +26,7 @@ def export_sample_table(targs): def get_export_metadata(trait_id, dataset_name): dataset = data_set.create_dataset(dataset_name) - this_trait = GeneralTrait(dataset=dataset, + this_trait = create_trait(dataset=dataset, name=trait_id, cellid=None, get_qtl_info=False) diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 29b2f77e..c77e247f 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -10,9 +10,6 @@ import json as json from collections import OrderedDict -import redis -Redis = redis.StrictRedis() - import numpy as np import scipy.stats as ss @@ -21,11 +18,15 @@ from flask import Flask, g from base import webqtlConfig from base import webqtlCaseData from wqflask.show_trait.SampleList import SampleList -from utility import webqtlUtil, Plot, Bunch, helper_functions -from utility.tools import locate_ignore_error -from base.trait import GeneralTrait +from base.trait import create_trait from base import data_set from db import webqtlDatabaseFunction +from utility import webqtlUtil, Plot, Bunch, helper_functions +from utility.authentication_tools import check_owner +from utility.tools import locate_ignore_error +from utility.redis_tools import get_redis_conn, get_resource_id +Redis = get_redis_conn() +ONE_YEAR = 60 * 60 * 24 * 365 from pprint import pformat as pf @@ -55,9 +56,9 @@ class ShowTrait(object): self.temp_group = kw['group'] self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = self.temp_group) # Put values in Redis so they can be looked up later if added to a collection - Redis.set(self.trait_id, kw['trait_paste']) + Redis.set(self.trait_id, kw['trait_paste'], ex=ONE_YEAR) self.trait_vals = kw['trait_paste'].split() - self.this_trait = GeneralTrait(dataset=self.dataset, + self.this_trait = create_trait(dataset=self.dataset, name=self.trait_id, cellid=None) else: @@ -66,11 +67,13 @@ class ShowTrait(object): self.temp_species = self.trait_id.split("_")[1] self.temp_group = self.trait_id.split("_")[2] self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = self.temp_group) - self.this_trait = GeneralTrait(dataset=self.dataset, + self.this_trait = create_trait(dataset=self.dataset, name=self.trait_id, cellid=None) self.trait_vals = Redis.get(self.trait_id).split() + self.resource_id = check_owner(self.dataset, self.trait_id) + #ZS: Get verify/rna-seq link URLs try: blatsequence = self.this_trait.sequence diff --git a/wqflask/wqflask/templates/admin/group_manager.html b/wqflask/wqflask/templates/admin/group_manager.html index ac5c1350..b7df1aad 100644 --- a/wqflask/wqflask/templates/admin/group_manager.html +++ b/wqflask/wqflask/templates/admin/group_manager.html @@ -2,17 +2,25 @@ {% block title %}Group Manager{% endblock %} {% block content %} - {{ header("List of groups", "" )}} -
-
+ +
+ {% if admin_groups|length == 0 and user_groups|length == 0 %} +

You currently aren't a member or admin of any groups.

+
+ + {% else %}

Admin Groups


- + {% if admin_groups|length == 0 %} +

You currently aren't the administrator of any groups.

+ {% else %} +
@@ -26,7 +34,7 @@ {% for group in admin_groups %} - + @@ -36,12 +44,16 @@ {% endfor %}
{{ loop.index }} {{ group.name }} {{ group.admins|length + group.users|length }}
+ {% endif %}

User Groups


- + {% if user_groups|length == 0 %} +

You currently aren't a member of any groups.

+ {% else %} +
@@ -65,12 +77,12 @@ {% endfor %}
+ {% endif %} + {% endif %}
- - {% endblock %} @@ -79,7 +91,6 @@ - {% endblock %} diff --git a/wqflask/wqflask/templates/correlation_page.html b/wqflask/wqflask/templates/correlation_page.html index 1c84239c..71705390 100644 --- a/wqflask/wqflask/templates/correlation_page.html +++ b/wqflask/wqflask/templates/correlation_page.html @@ -1,4 +1,5 @@ {% extends "base.html" %} +{% block title %}Correlation Results{% endblock %} {% block css %} diff --git a/wqflask/wqflask/templates/email/verification.txt b/wqflask/wqflask/templates/email/verification.txt deleted file mode 100644 index 76149a3a..00000000 --- a/wqflask/wqflask/templates/email/verification.txt +++ /dev/null @@ -1,7 +0,0 @@ -Thank you for signing up for GeneNetwork. - -We need to verify your email address. - -To do that please click the following link, or cut and paste it into your browser window: - -{{ url_for_hmac("verify_email", code = verification_code, _external=True )}} diff --git a/wqflask/wqflask/templates/gsearch_pheno.html b/wqflask/wqflask/templates/gsearch_pheno.html index 05b2f988..04b45659 100644 --- a/wqflask/wqflask/templates/gsearch_pheno.html +++ b/wqflask/wqflask/templates/gsearch_pheno.html @@ -31,7 +31,7 @@

-
+
diff --git a/wqflask/wqflask/templates/mapping_results.html b/wqflask/wqflask/templates/mapping_results.html index b4429b46..c5d49168 100644 --- a/wqflask/wqflask/templates/mapping_results.html +++ b/wqflask/wqflask/templates/mapping_results.html @@ -41,7 +41,8 @@ - + + @@ -464,13 +465,27 @@ {% if mapping_method != "gemma" and mapping_method != "plink" %} $('#download_perm').click(function(){ - var num_perm, perm_data; - num_perm = js_data.num_perm - perm_data = js_data.perm_results - json_perm_data = JSON.stringify(perm_data); - $('input[name=perm_results]').val(json_perm_data); - $('#marker_regression_form').attr('action', '/export_perm_data'); - return $('#marker_regression_form').submit(); + perm_info_dict = { + perm_data: js_data.perm_results, + num_perm: "{{ nperm }}", + trait_name: "{{ this_trait.display_name }}", + trait_description: "{{ this_trait.description_display }}", + cofactors: "{{ covariates }}", + n_samples: {{ n_samples }}, + n_genotypes: {{ qtl_results|length }}, + {% if genofile_string is defined %} + genofile: "{{ genofile_string }}", + {% else %} + genofile: "", + {% endif %} + units_linkage: "{{ LRS_LOD }}", + strat_cofactors: js_data.categorical_vars + } + json_perm_data = JSON.stringify(perm_info_dict); + + $('input[name=perm_info]').val(json_perm_data); + $('#marker_regression_form').attr('action', '/export_perm_data'); + return $('#marker_regression_form').submit(); }); modebar_options = { diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index edd272c2..cfee0079 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -12,9 +12,6 @@ import requests import simplejson as json -import redis # used for collections -Redis = redis.StrictRedis() - from flask import (Flask, g, render_template, url_for, request, make_response, redirect, flash, abort) @@ -23,7 +20,8 @@ from wqflask import pbkdf2 from wqflask.user_session import UserSession from utility import hmac -from utility.redis_tools import is_redis_available, get_user_id, get_user_by_unique_column, set_user_attribute, save_user, save_verification_code, check_verification_code, get_user_collections, save_collections +from utility.redis_tools import is_redis_available, get_redis_conn, get_user_id, get_user_by_unique_column, set_user_attribute, save_user, save_verification_code, check_verification_code, get_user_collections, save_collections +Redis = get_redis_conn() from utility.logger import getLogger logger = getLogger(__name__) @@ -127,7 +125,7 @@ def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): server.quit() logger.info("Successfully sent email to "+toaddr) -def send_verification_email(user_details, template_name = "email/verification.txt", key_prefix = "verification_code", subject = "GeneNetwork email verification"): +def send_verification_email(user_details, template_name = "email/user_verification.txt", key_prefix = "verification_code", subject = "GeneNetwork e-mail verification"): verification_code = str(uuid.uuid4()) key = key_prefix + ":" + verification_code @@ -141,6 +139,21 @@ def send_verification_email(user_details, template_name = "email/verification.tx send_email(recipient, subject, body) return {"recipient": recipient, "subject": subject, "body": body} +@app.route("/manage/verify_email") +def verify_email(): + if 'code' in request.args: + user_details = check_verification_code(request.args['code']) + if user_details: + # As long as they have access to the email account + # We might as well log them in + session_id_signed = get_signed_session_id(user_details) + flash("Thank you for logging in {}.".format(user_details['full_name']), "alert-success") + response = make_response(redirect(url_for('index_page', import_collections = import_col, anon_id = anon_id))) + response.set_cookie(UserSession.user_cookie_name, session_id_signed, max_age=None) + return response + else: + flash("Invalid code: Password reset code does not exist or might have expired!", "error") + @app.route("/n/login", methods=('GET', 'POST')) def login(): params = request.form if request.form else request.args @@ -204,7 +217,7 @@ def login(): response.set_cookie(UserSession.user_cookie_name, session_id_signed, max_age=None) return response else: - email_ob = send_verification_email(user_details) + email_ob = send_verification_email(user_details, template_name = "email/user_verification.txt") return render_template("newsecurity/verification_still_needed.html", subject=email_ob['subject']) else: # Incorrect password #ZS: It previously seemed to store that there was an incorrect log-in attempt here, but it did so in the MySQL DB so this might need to be reproduced with Redis @@ -374,16 +387,13 @@ def password_reset(): hmac = request.args.get('hm') if verification_code: - user_email = check_verification_code(verification_code) - if user_email: - user_details = get_user_by_unique_column('email_address', user_email) - if user_details: - return render_template( - "new_security/password_reset.html", user_encode=user_details["email_address"]) - else: - flash("Invalid code: User no longer exists!", "error") + user_details = check_verification_code(verification_code) + if user_details: + return render_template( + "new_security/password_reset.html", user_encode=user_details["email_address"]) else: flash("Invalid code: Password reset code does not exist or might have expired!", "error") + return redirect(url_for("login")) else: return redirect(url_for("login")) @@ -394,6 +404,7 @@ def password_reset_step2(): errors = [] user_email = request.form['user_encode'] + user_id = get_user_id("email_address", user_email) password = request.form['password'] encoded_password = set_password(password) @@ -401,9 +412,7 @@ def password_reset_step2(): set_user_attribute(user_id, "password", encoded_password) flash("Password changed successfully. You can now sign in.", "alert-info") - response = make_response(redirect(url_for('login'))) - - return response + return redirect(url_for('login')) def register_user(params): thank_you_mode = False diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index 50419146..ec6d4ae3 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -6,10 +6,6 @@ import uuid import simplejson as json -import redis # used for collections -Redis = redis.StrictRedis() - - from flask import (Flask, g, render_template, url_for, request, make_response, redirect, flash, abort) @@ -17,7 +13,8 @@ from wqflask import app from utility import hmac #from utility.elasticsearch_tools import get_elasticsearch_connection -from utility.redis_tools import get_user_id, get_user_by_unique_column, get_user_collections, save_collections +from utility.redis_tools import get_redis_conn, get_user_id, get_user_collections, save_collections +Redis = get_redis_conn() from utility.logger import getLogger logger = getLogger(__name__) @@ -29,6 +26,11 @@ THIRTY_DAYS = 60 * 60 * 24 * 30 def get_user_session(): logger.info("@app.before_request get_session") g.user_session = UserSession() + #ZS: I think this should solve the issue of deleting the cookie and redirecting to the home page when a user's session has expired + if not g.user_session: + response = make_response(redirect(url_for('login'))) + response.set_cookie('session_id_v2', '', expires=0) + return response @app.after_request def set_user_session(response): @@ -37,7 +39,6 @@ def set_user_session(response): response.set_cookie(g.user_session.cookie_name, g.user_session.cookie) return response - def verify_cookie(cookie): the_uuid, separator, the_signature = cookie.partition(':') assert len(the_uuid) == 36, "Is session_id a uuid?" @@ -88,14 +89,11 @@ class UserSession(object): user_id = str(uuid.uuid4())) Redis.hmset(self.redis_key, self.record) Redis.expire(self.redis_key, THIRTY_DAYS) - response = make_response(redirect(url_for('login'))) - response.set_cookie(self.user_cookie_name, '', expires=0) ########### Grrr...this won't work because of the way flask handles cookies # Delete the cookie flash("Due to inactivity your session has expired. If you'd like please login again.") - return response - #return + return None else: self.record = dict(login_time = time.time(), user_type = "anon", diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 938570f3..24a4dcee 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -23,16 +23,13 @@ import uuid import simplejson as json import yaml -#Switching from Redis to StrictRedis; might cause some issues -import redis -Redis = redis.StrictRedis() - import flask import base64 import array import sqlalchemy from wqflask import app -from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect +from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for +from wqflask import group_manager from wqflask import search_results from wqflask import export_traits from wqflask import gsearch @@ -55,11 +52,13 @@ from wqflask.correlation import corr_scatter_plot from wqflask.wgcna import wgcna_analysis from wqflask.ctl import ctl_analysis from wqflask.snp_browser import snp_browser -#from wqflask.trait_submission import submit_trait from utility import temp_data from utility.tools import SQL_URI,TEMPDIR,USE_REDIS,USE_GN_SERVER,GN_SERVER_URL,GN_VERSION,JS_TWITTER_POST_FETCHER_PATH,JS_GUIX_PATH, CSS_PATH from utility.helper_functions import get_species_groups +from utility.authentication_tools import check_resource_availability +from utility.redis_tools import get_redis_conn +Redis = get_redis_conn() from base.webqtlConfig import GENERATED_IMAGE_DIR from utility.benchmark import Bench @@ -87,6 +86,24 @@ def connect_db(): g.db = g._database = sqlalchemy.create_engine(SQL_URI, encoding="latin1") logger.debug(g.db) +@app.before_request +def check_access_permissions(): + logger.debug("@app.before_request check_access_permissions") + if "temp_trait" in request.args: + if request.args['temp_trait'] == "True": + pass + else: + if 'dataset' in request.args: + dataset = create_dataset(request.args['dataset']) + logger.debug("USER:", Redis.hget("users")) + if 'trait_id' in request.args: + available = check_resource_availability(dataset, request.args['trait_id']) + else: + available = check_resource_availability(dataset) + + if not available: + return redirect(url_for("no_access_page")) + @app.teardown_appcontext def shutdown_session(exception=None): db = getattr(g, '_database', None) @@ -120,6 +137,10 @@ def handle_bad_request(e): resp.set_cookie(err_msg[:32],animation) return resp +@app.route("/authentication_needed") +def no_access_page(): + return render_template("new_security/not_authenticated.html") + @app.route("/") def index_page(): logger.info("Sending index_page") @@ -401,25 +422,43 @@ def export_traits_csv(): def export_perm_data(): """CSV file consisting of the permutation data for the mapping results""" logger.info(request.url) - num_perm = float(request.form['num_perm']) - perm_data = json.loads(request.form['perm_results']) + perm_info = json.loads(request.form['perm_info']) + + now = datetime.datetime.now() + time_str = now.strftime('%H:%M_%d%B%Y') + + file_name = "Permutation_" + perm_info['num_perm'] + "_" + perm_info['trait_name'] + "_" + time_str + + the_rows = [ + ["#Permutation Test"], + ["#File_name: " + file_name], + ["#Metadata: From GeneNetwork.org"], + ["#Trait_ID: " + perm_info['trait_name']], + ["#Trait_description: " + perm_info['trait_description']], + ["#N_permutations: " + str(perm_info['num_perm'])], + ["#Cofactors: " + perm_info['cofactors']], + ["#N_cases: " + str(perm_info['n_samples'])], + ["#N_genotypes: " + str(perm_info['n_genotypes'])], + ["#Genotype_file: " + perm_info['genofile']], + ["#Units_linkage: " + perm_info['units_linkage']], + ["#Permutation_stratified_by: " + ", ".join([ str(cofactor) for cofactor in perm_info['strat_cofactors']])], + ["#RESULTS_1: Suggestive LRS(p=0.63) = " + str(np.percentile(np.array(perm_info['perm_data']), 67))], + ["#RESULTS_2: Significant LRS(p=0.05) = " + str(np.percentile(np.array(perm_info['perm_data']), 95))], + ["#RESULTS_3: Highly Significant LRS(p=0.01) = " + str(np.percentile(np.array(perm_info['perm_data']), 99))], + ["#Comment: Results sorted from low to high peak linkage"] + ] buff = StringIO.StringIO() writer = csv.writer(buff) - writer.writerow(["Suggestive LRS (p=0.63) = " + str(np.percentile(np.array(perm_data), 67))]) - writer.writerow(["Significant LRS (p=0.05) = " + str(np.percentile(np.array(perm_data), 95))]) - writer.writerow(["Highly Significant LRS (p=0.01) = " + str(np.percentile(np.array(perm_data), 99))]) - writer.writerow("") - writer.writerow([str(num_perm) + " Permutations"]) - writer.writerow("") - for item in perm_data: + writer.writerows(the_rows) + for item in perm_info['perm_data']: writer.writerow([item]) csv_data = buff.getvalue() buff.close() return Response(csv_data, mimetype='text/csv', - headers={"Content-Disposition":"attachment;filename=perm_data.csv"}) + headers={"Content-Disposition":"attachment;filename=" + file_name + ".csv"}) @app.route("/show_temp_trait", methods=('POST',)) def show_temp_trait_page(): -- cgit v1.2.3 From a302a2b0ac0e7c0f26a0d063c3f2b057f61d47f1 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 5 Jun 2020 16:52:56 -0500 Subject: Commiting other current group/resource management code, plus the new files --- wqflask/base/trait.py | 2 + wqflask/maintenance/set_resource_defaults.py | 155 +++++++++++++++++++++ wqflask/utility/authentication_tools.py | 46 ++++++ wqflask/utility/redis_tools.py | 37 +++-- wqflask/wqflask/group_manager.py | 77 ++++++++++ wqflask/wqflask/resource_manager.py | 72 ++++++++++ .../wqflask/static/new/javascript/group_manager.js | 38 +++++ wqflask/wqflask/templates/admin/create_group.html | 89 ++++++++++++ wqflask/wqflask/templates/admin/group_manager.html | 68 ++++----- .../wqflask/templates/admin/manage_resource.html | 92 ++++++++++++ .../wqflask/templates/admin/search_for_groups.html | 64 +++++++++ .../templates/admin/select_group_to_add.html | 54 +++++++ .../templates/new_security/not_authenticated.html | 11 ++ wqflask/wqflask/templates/show_trait_details.html | 5 + wqflask/wqflask/views.py | 3 +- 15 files changed, 764 insertions(+), 49 deletions(-) create mode 100644 wqflask/maintenance/set_resource_defaults.py create mode 100644 wqflask/utility/authentication_tools.py create mode 100644 wqflask/wqflask/group_manager.py create mode 100644 wqflask/wqflask/resource_manager.py create mode 100644 wqflask/wqflask/static/new/javascript/group_manager.js create mode 100644 wqflask/wqflask/templates/admin/create_group.html create mode 100644 wqflask/wqflask/templates/admin/manage_resource.html create mode 100644 wqflask/wqflask/templates/admin/search_for_groups.html create mode 100644 wqflask/wqflask/templates/admin/select_group_to_add.html create mode 100644 wqflask/wqflask/templates/new_security/not_authenticated.html (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 405c4ebf..2a945588 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -391,6 +391,8 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): if response.strip() == "no-access": trait.view = False return trait + else: + trait_info = json.loads(response) except: resource_info = get_resource_info(resource_id) default_permissions = resource_info['default_mask']['data'] diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py new file mode 100644 index 00000000..ba102d9c --- /dev/null +++ b/wqflask/maintenance/set_resource_defaults.py @@ -0,0 +1,155 @@ +""" + +Script that sets default resource access masks for use with the DB proxy + +Defaults will be: +Owner - omni_gn +Mask - Public/non-confidential: { data: "view", + metadata: "view", + admin: "not-admin" } + Private/confidentia: { data: "no-access", + metadata: "no-access", + admin: "not-admin" } + +To run: +./bin/genenetwork2 ~/my_settings.py -c ./wqflask/maintenance/gen_select_dataset.py + +""" + +from __future__ import print_function, division + +import sys +import json + +# NEW: Note we prepend the current path - otherwise a guix instance of GN2 may be used instead +sys.path.insert(0,'./') + +# NEW: import app to avoid a circular dependency on utility.tools +from wqflask import app + +from utility.tools import SQL_URI +from utility.redis_tools import get_redis_conn, get_user_id, add_resource, get_resources +Redis = get_redis_conn() + +import MySQLdb + +import urlparse + +from utility.logger import getLogger +logger = getLogger(__name__) + +def parse_db_uri(): + """Converts a database URI to the db name, host name, user name, and password""" + + parsed_uri = urlparse.urlparse(SQL_URI) + + db_conn_info = dict( + db = parsed_uri.path[1:], + host = parsed_uri.hostname, + user = parsed_uri.username, + passwd = parsed_uri.password) + + print(db_conn_info) + return db_conn_info + +def insert_probeset_resources(default_owner_id): + current_resources = Redis.hgetall("resources") + Cursor.execute(""" SELECT + ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.confidentiality, ProbeSetFreeze.public + FROM + ProbeSetFreeze""") + + resource_results = Cursor.fetchall() + for i, resource in enumerate(resource_results): + if i % 20 == 0: + print(i) + resource_ob = {} + resource_ob['name'] = resource[1] + resource_ob['owner_id'] = default_owner_id + resource_ob['data'] = { "dataset" : str(resource[0])} + resource_ob['type'] = "dataset-probeset" + if resource[2] < 1 and resource[3] > 0: + resource_ob['default_mask'] = { "data": ["no-access", "view"] } + else: + resource_ob['default_mask'] = { "data": ["no-access"] } + resource_ob['group_masks'] = {} + + add_resource(resource_ob) + +def insert_publish_resources(default_owner_id): + current_resources = Redis.hgetall("resources") + Cursor.execute(""" SELECT + PublishXRef.Id, PublishFreeze.Id, InbredSet.InbredSetCode + FROM + PublishXRef, PublishFreeze, InbredSet, Publication + WHERE + PublishFreeze.InbredSetId = PublishXRef.InbredSetId AND + InbredSet.Id = PublishXRef.InbredSetId AND + Publication.Id = PublishXRef.PublicationId""") + + resource_results = Cursor.fetchall() + for resource in resource_results: + if resource[2]: + resource_ob = {} + if resource[2]: + resource_ob['name'] = resource[2] + "_" + str(resource[0]) + else: + resource_ob['name'] = str(resource[0]) + resource_ob['owner_id'] = default_owner_id + resource_ob['data'] = { "dataset" : str(resource[1]) , + "trait" : str(resource[0])} + resource_ob['type'] = "dataset-publish" + resource_ob['default_mask'] = { "data": "view" } + + resource_ob['group_masks'] = {} + + add_resource(resource_ob) + else: + continue + +def insert_geno_resources(default_owner_id): + current_resources = Redis.hgetall("resources") + Cursor.execute(""" SELECT + GenoFreeze.Id, GenoFreeze.ShortName, GenoFreeze.confidentiality + FROM + GenoFreeze""") + + resource_results = Cursor.fetchall() + for i, resource in enumerate(resource_results): + if i % 20 == 0: + print(i) + resource_ob = {} + resource_ob['name'] = resource[1] + resource_ob['owner_id'] = default_owner_id + resource_ob['data'] = { "dataset" : str(resource[0]) } + resource_ob['type'] = "dataset-geno" + if resource[2] < 1: + resource_ob['default_mask'] = { "data": "view" } + else: + resource_ob['default_mask'] = { "data": "no-access" } + resource_ob['group_masks'] = {} + + add_resource(resource_ob) + +def insert_resources(default_owner_id): + current_resources = get_resources() + print("START") + insert_publish_resources(default_owner_id) + print("AFTER PUBLISH") + insert_geno_resources(default_owner_id) + print("AFTER GENO") + insert_probeset_resources(default_owner_id) + print("AFTER PROBESET") + +def main(): + """Generates and outputs (as json file) the data for the main dropdown menus on the home page""" + + Redis.delete("resources") + + owner_id = get_user_id("email_address", "zachary.a.sloan@gmail.com") + insert_resources(owner_id) + +if __name__ == '__main__': + Conn = MySQLdb.Connect(**parse_db_uri()) + Cursor = Conn.cursor() + main() \ No newline at end of file diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py new file mode 100644 index 00000000..537881a5 --- /dev/null +++ b/wqflask/utility/authentication_tools.py @@ -0,0 +1,46 @@ +from __future__ import absolute_import, print_function, division + +import json +import requests + +from base import data_set + +from utility import hmac +from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id + +from flask import Flask, g, redirect, url_for + +import logging +logger = logging.getLogger(__name__ ) + +def check_resource_availability(dataset, trait_id=None): + resource_id = get_resource_id(dataset, trait_id) + + if resource_id: + the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + try: + response = json.loads(requests.get(the_url).content)['data'] + except: + resource_info = get_resource_info(resource_id) + response = resource_info['default_mask']['data'] + + if 'view' in response: + return True + else: + return redirect(url_for("no_access_page")) + + return True + +def check_owner(dataset=None, trait_id=None, resource_id=None): + if resource_id: + resource_info = get_resource_info(resource_id) + if g.user_session.user_id == resource_info['owner_id']: + return resource_id + else: + resource_id = get_resource_id(dataset, trait_id) + if resource_id: + resource_info = get_resource_info(resource_id) + if g.user_session.user_id == resource_info['owner_id']: + return resource_id + + return False \ No newline at end of file diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 0ad96879..bc30a0af 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -95,14 +95,17 @@ def get_user_groups(user_id): user_group_ids = [] #ZS: Group IDs where user is a regular user groups_list = Redis.hgetall("groups") for key in groups_list: - group_ob = json.loads(groups_list[key]) - group_admins = set(group_ob['admins']) - group_members = set(group_ob['members']) - if user_id in group_admins: - admin_group_ids.append(group_ob['id']) - elif user_id in group_members: - user_group_ids.append(group_ob['id']) - else: + try: + group_ob = json.loads(groups_list[key]) + group_admins = set(group_ob['admins']) + group_members = set(group_ob['members']) + if user_id in group_admins: + admin_group_ids.append(group_ob['id']) + elif user_id in group_members: + user_group_ids.append(group_ob['id']) + else: + continue + except: continue admin_groups = [] @@ -122,6 +125,24 @@ def get_group_info(group_id): return group_info +def get_group_by_unique_column(column_name, column_value): + """ Get group by column; not sure if there's a faster way to do this """ + + matched_groups = [] + + all_group_list = Redis.hgetall("groups") + for key in all_group_list: + group_info = json.loads(all_group_list[key]) + if column_name == "admins" or column_name == "members": #ZS: Since these fields are lists, search in the list + if column_value in group_info[column_name]: + matched_groups.append(group_info) + else: + if group_info[column_name] == column_value: + matched_groups.append(group_info) + + return matched_groups + + def create_group(admin_user_ids, member_user_ids = [], group_name = "Default Group Name"): group_id = str(uuid.uuid4()) new_group = { diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py new file mode 100644 index 00000000..f41ae56d --- /dev/null +++ b/wqflask/wqflask/group_manager.py @@ -0,0 +1,77 @@ + +from __future__ import print_function, division, absolute_import + +from flask import (Flask, g, render_template, url_for, request, make_response, + redirect, flash) + +from wqflask import app +from wqflask.user_login import send_verification_email + +from utility.redis_tools import get_user_groups, get_group_info, create_group, delete_group, add_users_to_group, remove_users_from_group, \ + change_group_name, save_verification_code, check_verification_code, get_user_by_unique_column + +from utility.logger import getLogger +logger = getLogger(__name__) + +@app.route("/groups/manage", methods=('GET', 'POST')) +def manage_groups(): + params = request.form if request.form else request.args + if "add_new_group" in params: + return redirect(url_for('add_group')) + else: + admin_groups, user_groups = get_user_groups(g.user_session.user_id) + return render_template("admin/group_manager.html", admin_groups=admin_groups, user_groups=user_groups) + +@app.route("/groups/remove", methods=('POST',)) +def remove_groups(): + group_ids_to_remove = request.form['selected_group_ids'] + for group_id in group_ids_to_remove.split(":"): + delete_group(g.user_session.user_id, group_id) + + return redirect(url_for('manage_groups')) + +@app.route("/groups/create", methods=('GET', 'POST')) +def add_group(): + params = request.form if request.form else request.args + if "group_name" in params: + member_user_ids = set() + admin_user_ids = set() + admin_user_ids.add(g.user_session.user_id) #ZS: Always add the user creating the group as an admin + if "admin_emails" in params: + admin_emails = params['admin_emails_to_add'].split(",") + for email in admin_emails: + user_details = get_user_by_unique_column("email_address", email) + if user_details: + admin_user_ids.add(user_details['user_id']) + #send_group_invites(params['group_id'], user_email_list = admin_emails, user_type="admins") + if "user_emails" in params: + member_emails = params['member_emails_to_add'].split(",") + for email in member_emails: + user_details = get_user_by_unique_column("email_address", email) + if user_details: + member_user_ids.add(user_details['user_id']) + #send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members") + + create_group(list(admin_user_ids), list(member_user_ids), params['group_name']) + return redirect(url_for('manage_groups')) + else: + return render_template("admin/create_group.html") + +#ZS: Will integrate this later, for now just letting users be added directly +def send_group_invites(group_id, user_email_list = [], user_type="members"): + for user_email in user_email_list: + user_details = get_user_by_unique_column("email_address", user_email) + if user_details: + group_info = get_group_info(group_id) + #ZS: Probably not necessary since the group should normally always exist if group_id is being passed here, + # but it's technically possible to hit it if Redis is cleared out before submitting the new users or something + if group_info: + #ZS: Don't add user if they're already an admin or if they're being added a regular user and are already a regular user, + # but do add them if they're a regular user and are added as an admin + if (user_details['user_id'] in group_info['admins']) or \ + ((user_type == "members") and (user_details['user_id'] in group_info['members'])): + continue + else: + send_verification_email(user_details, template_name = "email/group_verification.txt", key_prefix = "verification_code", subject = "You've been invited to join a GeneNetwork user group") + +#@app.route() \ No newline at end of file diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py new file mode 100644 index 00000000..7d88b8ed --- /dev/null +++ b/wqflask/wqflask/resource_manager.py @@ -0,0 +1,72 @@ +from __future__ import print_function, division, absolute_import + +from flask import (Flask, g, render_template, url_for, request, make_response, + redirect, flash) + +from wqflask import app + +from utility.authentication_tools import check_owner +from utility.redis_tools import get_resource_info, get_group_info, get_group_by_unique_column, get_user_id + +from utility.logger import getLogger +logger = getLogger(__name__) + +@app.route("/resources/manage", methods=('GET', 'POST')) +def view_resource(): + params = request.form if request.form else request.args + if 'resource_id' in request.args: + resource_id = request.args['resource_id'] + if check_owner(resource_id=resource_id): + resource_info = get_resource_info(resource_id) + group_masks = resource_info['group_masks'] + group_masks_with_names = get_group_names(group_masks) + default_mask = resource_info['default_mask']['data'] + return render_template("admin/manage_resource.html", resource_id = resource_id, resource_info=resource_info, default_mask=default_mask, group_masks=group_masks_with_names) + else: + return redirect(url_for("no_access_page")) + +@app.route("/resources/add_group", methods=('POST',)) +def add_group_to_resource(): + resource_id = request.form['resource_id'] + if check_owner(resource_id=resource_id): + if all(key in request.form for key in ('group_id', 'group_name', 'user_name', 'user_email')): + group_list = [] + if request.form['group_id'] != "": + the_group = get_group_info(request.form['group_id']) + if the_group: + group_list.append(the_group) + if request.form['group_name'] != "": + matched_groups = get_group_by_unique_column("name", request.form['group_name']) + for group in matched_groups: + group_list.append(group) + if request.form['user_name'] != "": + user_id = get_user_id("user_name", request.form['user_name']) + if user_id: + matched_groups = get_group_by_unique_column("admins", user_id) + matched_groups += get_group_by_unique_column("members", user_id) + for group in matched_groups: + group_list.append(group) + if request.form['user_email'] != "": + user_id = get_user_id("email_address", request.form['user_email']) + if user_id: + matched_groups = get_group_by_unique_column("admins", user_id) + matched_groups += get_group_by_unique_column("members", user_id) + for group in matched_groups: + group_list.append(group) + return render_template("admin/select_group_to_add.html", group_list=group_list, resource_id = resource_id) + elif 'selected_group' in request.form: + group_id = request.form['selected_group'] + return render_template("admin/set_group_privileges.html", resource_id = resource_id, group_id = group_id) + else: + return render_template("admin/search_for_groups.html", resource_id = resource_id) + else: + return redirect(url_for("no_access_page")) + +def get_group_names(group_masks): + group_masks_with_names = {} + for group_id, group_mask in group_masks.iteritems(): + this_mask = group_mask + group_name = get_group_info(group_id)['name'] + this_mask['name'] = group_name + + return group_masks_with_names \ No newline at end of file diff --git a/wqflask/wqflask/static/new/javascript/group_manager.js b/wqflask/wqflask/static/new/javascript/group_manager.js new file mode 100644 index 00000000..5e82d104 --- /dev/null +++ b/wqflask/wqflask/static/new/javascript/group_manager.js @@ -0,0 +1,38 @@ +$('#add_to_admins').click(function() { + add_emails('admin') +}) + +$('#add_to_members').click(function() { + add_emails('member') +}) + +$('#clear_admins').click(function(){ + clear_emails('admin') +}) + +$('#clear_members').click(function(){ + clear_emails('member') +}) + + +function add_emails(user_type){ + var email_address = $('input[name=user_email]').val(); + var email_list_string = $('input[name=' + user_type + '_emails_to_add]').val() + console.log(email_list_string) + if (email_list_string == ""){ + var email_set = new Set(); + } else { + var email_set = new Set(email_list_string.split(",")) + } + email_set.add(email_address) + + $('input[name=' + user_type + '_emails_to_add]').val(Array.from(email_set).join(',')) + + var emails_display_string = Array.from(email_set).join('\n') + $('.added_' + user_type + 's').val(emails_display_string) +} + +function clear_emails(user_type){ + $('input[name=' + user_type + '_emails_to_add]').val("") + $('.added_' + user_type + 's').val("") +} \ No newline at end of file diff --git a/wqflask/wqflask/templates/admin/create_group.html b/wqflask/wqflask/templates/admin/create_group.html new file mode 100644 index 00000000..55c3fa0b --- /dev/null +++ b/wqflask/wqflask/templates/admin/create_group.html @@ -0,0 +1,89 @@ +{% extends "base.html" %} +{% block title %}Group Manager{% endblock %} +{% block content %} + +
+ +
+ + +
+
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+
+
+
+ +
+ + + + + +{% endblock %} + +{% block js %} + + + + + +{% endblock %} diff --git a/wqflask/wqflask/templates/admin/group_manager.html b/wqflask/wqflask/templates/admin/group_manager.html index b7df1aad..23d8205a 100644 --- a/wqflask/wqflask/templates/admin/group_manager.html +++ b/wqflask/wqflask/templates/admin/group_manager.html @@ -1,15 +1,23 @@ {% extends "base.html" %} {% block title %}Group Manager{% endblock %} +{% block css %} + + + +{% endblock %} {% block content %}
-
+
{% if admin_groups|length == 0 and user_groups|length == 0 %}

You currently aren't a member or admin of any groups.


@@ -20,7 +28,7 @@ {% if admin_groups|length == 0 %}

You currently aren't the administrator of any groups.

{% else %} -

Loading...
+
@@ -29,17 +37,19 @@ + {% for group in admin_groups %} - + - + + {% endfor %} @@ -47,13 +57,13 @@ {% endif %}
-
+

User Groups


{% if user_groups|length == 0 %}

You currently aren't a member of any groups.

{% else %} -
# Members Created Last ChangedGroup ID
{{ loop.index }}{{ loop.index }} {{ group.name }}{{ group.admins|length + group.users|length }}{{ group.admins|length + group.users|length }} {{ group.created_timestamp }} {{ group.changed_timestamp }}{{ group.id }}
+
@@ -88,48 +98,26 @@ {% endblock %} {% block js %} - - - + + + + +{% endblock %} diff --git a/wqflask/wqflask/templates/admin/search_for_groups.html b/wqflask/wqflask/templates/admin/search_for_groups.html new file mode 100644 index 00000000..89eb11dd --- /dev/null +++ b/wqflask/wqflask/templates/admin/search_for_groups.html @@ -0,0 +1,64 @@ +{% extends "base.html" %} +{% block title %}Resource Manager{% endblock %} +{% block content %} + +
+ + + +
+
+
+
+

Search by:

+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+
+
+ +
+ + + +{% endblock %} + +{% block js %} + + + + + +{% endblock %} diff --git a/wqflask/wqflask/templates/admin/select_group_to_add.html b/wqflask/wqflask/templates/admin/select_group_to_add.html new file mode 100644 index 00000000..df70fb2f --- /dev/null +++ b/wqflask/wqflask/templates/admin/select_group_to_add.html @@ -0,0 +1,54 @@ +{% extends "base.html" %} +{% block title %}Matched Groups{% endblock %} +{% block css %} + + + +{% endblock %} +{% block content %} + +
+

The following groups were found:

+
+
+ +
+ {% if group_list|length > 0 %} + +
+ + + + + + + + + + {% for group in group_list %} + + + + + + + {% endfor %} + +
NameCreatedLast Changed
{% if 'name' in group %}{{ group.name }}{% else %}N/A{% endif %}{% if 'created_timestamp' in group %}{{ group.created_timestamp }}{% else %}N/A{% endif %}{% if 'changed_timestamp' in group %}{{ group.changed_timestamp }}{% else %}N/A{% endif %}
+ {% else %} +

No matching groups were found.

+ {% endif %} +
+ +
+ + + +{% endblock %} + +{% block js %} + + +{% endblock %} diff --git a/wqflask/wqflask/templates/new_security/not_authenticated.html b/wqflask/wqflask/templates/new_security/not_authenticated.html new file mode 100644 index 00000000..7d0d3060 --- /dev/null +++ b/wqflask/wqflask/templates/new_security/not_authenticated.html @@ -0,0 +1,11 @@ +{% extends "base.html" %} +{% block title %}Authentication Needed{% endblock %} +{% block content %} +
+ +

Please contact the data's owner or GN administrators if you believe you should have access to this data.

+
+ +{% endblock %} \ No newline at end of file diff --git a/wqflask/wqflask/templates/show_trait_details.html b/wqflask/wqflask/templates/show_trait_details.html index 878b6ced..5c315878 100644 --- a/wqflask/wqflask/templates/show_trait_details.html +++ b/wqflask/wqflask/templates/show_trait_details.html @@ -248,6 +248,11 @@ + {% if resource_id %} + + + + {% endif %} diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 24a4dcee..ee827ba3 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -30,6 +30,7 @@ import sqlalchemy from wqflask import app from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect, url_for from wqflask import group_manager +from wqflask import resource_manager from wqflask import search_results from wqflask import export_traits from wqflask import gsearch @@ -89,13 +90,13 @@ def connect_db(): @app.before_request def check_access_permissions(): logger.debug("@app.before_request check_access_permissions") + available = True if "temp_trait" in request.args: if request.args['temp_trait'] == "True": pass else: if 'dataset' in request.args: dataset = create_dataset(request.args['dataset']) - logger.debug("USER:", Redis.hget("users")) if 'trait_id' in request.args: available = check_resource_availability(dataset, request.args['trait_id']) else: -- cgit v1.2.3 From ea47eb228b1224ea83e3f50a056bf715b3bf5ec6 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 17 Jun 2020 14:49:40 -0500 Subject: Adding all the authentication stuff --- wqflask/base/data_set.py | 36 +-- wqflask/base/trait.py | 5 +- wqflask/maintenance/set_resource_defaults.py | 307 ++++++++++----------- wqflask/utility/authentication_tools.py | 132 ++++++--- wqflask/utility/redis_tools.py | 67 ++++- wqflask/wqflask/docs.py | 4 +- wqflask/wqflask/group_manager.py | 220 ++++++++++----- wqflask/wqflask/resource_manager.py | 204 +++++++++----- .../wqflask/static/new/javascript/group_manager.js | 74 ++--- .../static/new/javascript/search_results.js | 1 - .../templates/admin/change_resource_owner.html | 116 ++++++++ wqflask/wqflask/templates/admin/create_group.html | 178 ++++++------ wqflask/wqflask/templates/admin/group_manager.html | 18 +- .../wqflask/templates/admin/manage_resource.html | 200 ++++++++------ .../wqflask/templates/admin/search_for_groups.html | 198 ++++++++----- .../templates/admin/select_group_to_add.html | 54 ---- .../templates/admin/set_group_privileges.html | 102 +++++++ wqflask/wqflask/templates/admin/view_group.html | 238 ++++++++++++++++ wqflask/wqflask/templates/base.html | 5 + .../wqflask/templates/set_group_privileges.html | 77 ++++++ wqflask/wqflask/templates/show_trait_details.html | 4 +- wqflask/wqflask/views.py | 2 +- 22 files changed, 1516 insertions(+), 726 deletions(-) create mode 100644 wqflask/wqflask/templates/admin/change_resource_owner.html delete mode 100644 wqflask/wqflask/templates/admin/select_group_to_add.html create mode 100644 wqflask/wqflask/templates/admin/set_group_privileges.html create mode 100644 wqflask/wqflask/templates/admin/view_group.html create mode 100644 wqflask/wqflask/templates/set_group_privileges.html (limited to 'wqflask/utility') diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 1457ba8d..92dc8615 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -486,25 +486,18 @@ class DatasetGroup(object): def datasets(group_name, this_group = None): key = "group_dataset_menu:v2:" + group_name - logger.debug("key is2:", key) dataset_menu = [] - logger.debug("[tape4] webqtlConfig.PUBLICTHRESH:", webqtlConfig.PUBLICTHRESH) - logger.debug("[tape4] type webqtlConfig.PUBLICTHRESH:", type(webqtlConfig.PUBLICTHRESH)) the_results = fetchall(''' (SELECT '#PublishFreeze',PublishFreeze.FullName,PublishFreeze.Name FROM PublishFreeze,InbredSet WHERE PublishFreeze.InbredSetId = InbredSet.Id and InbredSet.Name = '%s' - and PublishFreeze.public > %s - and PublishFreeze.confidentiality < 1 ORDER BY PublishFreeze.Id ASC) UNION (SELECT '#GenoFreeze',GenoFreeze.FullName,GenoFreeze.Name FROM GenoFreeze, InbredSet WHERE GenoFreeze.InbredSetId = InbredSet.Id - and InbredSet.Name = '%s' - and GenoFreeze.public > %s - and GenoFreeze.confidentiality < 1) + and InbredSet.Name = '%s') UNION (SELECT Tissue.Name, ProbeSetFreeze.FullName,ProbeSetFreeze.Name FROM ProbeSetFreeze, ProbeFreeze, InbredSet, Tissue @@ -512,12 +505,10 @@ def datasets(group_name, this_group = None): and ProbeFreeze.TissueId = Tissue.Id and ProbeFreeze.InbredSetId = InbredSet.Id and InbredSet.Name like %s - and ProbeSetFreeze.public > %s - and ProbeSetFreeze.confidentiality < 1 ORDER BY Tissue.Name, ProbeSetFreeze.OrderList DESC) - ''' % (group_name, webqtlConfig.PUBLICTHRESH, - group_name, webqtlConfig.PUBLICTHRESH, - "'" + group_name + "'", webqtlConfig.PUBLICTHRESH)) + ''' % (group_name, + group_name, + "'" + group_name + "'")) sorted_results = sorted(the_results, key=lambda kv: kv[0]) @@ -637,29 +628,25 @@ class DataSet(object): """ - try: if self.type == "ProbeSet": query_args = tuple(escape(x) for x in ( - str(webqtlConfig.PUBLICTHRESH), self.name, self.name, self.name)) self.id, self.name, self.fullname, self.shortname, self.data_scale, self.tissue = fetch1(""" -SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName, ProbeSetFreeze.ShortName, ProbeSetFreeze.DataScale, Tissue.Name -FROM ProbeSetFreeze, ProbeFreeze, Tissue -WHERE ProbeSetFreeze.public > %s -AND ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id -AND ProbeFreeze.TissueId = Tissue.Id -AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFreeze.ShortName = '%s') + SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName, ProbeSetFreeze.ShortName, ProbeSetFreeze.DataScale, Tissue.Name + FROM ProbeSetFreeze, ProbeFreeze, Tissue + WHERE ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id + AND ProbeFreeze.TissueId = Tissue.Id + AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFreeze.ShortName = '%s') """ % (query_args),"/dataset/"+self.name+".json", lambda r: (r["id"],r["name"],r["full_name"],r["short_name"],r["data_scale"],r["tissue"]) ) else: query_args = tuple(escape(x) for x in ( (self.type + "Freeze"), - str(webqtlConfig.PUBLICTHRESH), self.name, self.name, self.name)) @@ -668,9 +655,8 @@ AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFre self.id, self.name, self.fullname, self.shortname = fetchone(""" SELECT Id, Name, FullName, ShortName FROM %s - WHERE public > %s AND - (Name = '%s' OR FullName = '%s' OR ShortName = '%s') - """ % (query_args)) + WHERE (Name = '%s' OR FullName = '%s' OR ShortName = '%s') + """ % (query_args)) except TypeError: logger.debug("Dataset {} is not yet available in GeneNetwork.".format(self.name)) diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 2a945588..7700ecd5 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -46,9 +46,10 @@ def create_trait(**kw): else: permitted = check_resource_availability(dataset) - if permitted: + if permitted != "no-access": the_trait = GeneralTrait(**kw) if the_trait.dataset.type != "Temp": + the_trait = retrieve_trait_info(the_trait, the_trait.dataset, get_qtl_info=kw.get('get_qtl_info')) return the_trait else: @@ -383,7 +384,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): if dataset.type == 'Publish': the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) else: - the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) try: @@ -424,7 +424,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): logger.sql(query) trait_info = g.db.execute(query).fetchone() - #XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name #XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms. elif dataset.type == 'ProbeSet': diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index ba102d9c..344e6a23 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -1,155 +1,154 @@ -""" - -Script that sets default resource access masks for use with the DB proxy - -Defaults will be: -Owner - omni_gn -Mask - Public/non-confidential: { data: "view", - metadata: "view", - admin: "not-admin" } - Private/confidentia: { data: "no-access", - metadata: "no-access", - admin: "not-admin" } - -To run: -./bin/genenetwork2 ~/my_settings.py -c ./wqflask/maintenance/gen_select_dataset.py - -""" - -from __future__ import print_function, division - -import sys -import json - -# NEW: Note we prepend the current path - otherwise a guix instance of GN2 may be used instead -sys.path.insert(0,'./') - -# NEW: import app to avoid a circular dependency on utility.tools -from wqflask import app - -from utility.tools import SQL_URI -from utility.redis_tools import get_redis_conn, get_user_id, add_resource, get_resources -Redis = get_redis_conn() - -import MySQLdb - -import urlparse - -from utility.logger import getLogger -logger = getLogger(__name__) - -def parse_db_uri(): - """Converts a database URI to the db name, host name, user name, and password""" - - parsed_uri = urlparse.urlparse(SQL_URI) - - db_conn_info = dict( - db = parsed_uri.path[1:], - host = parsed_uri.hostname, - user = parsed_uri.username, - passwd = parsed_uri.password) - - print(db_conn_info) - return db_conn_info - -def insert_probeset_resources(default_owner_id): - current_resources = Redis.hgetall("resources") - Cursor.execute(""" SELECT - ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.confidentiality, ProbeSetFreeze.public - FROM - ProbeSetFreeze""") - - resource_results = Cursor.fetchall() - for i, resource in enumerate(resource_results): - if i % 20 == 0: - print(i) - resource_ob = {} - resource_ob['name'] = resource[1] - resource_ob['owner_id'] = default_owner_id - resource_ob['data'] = { "dataset" : str(resource[0])} - resource_ob['type'] = "dataset-probeset" - if resource[2] < 1 and resource[3] > 0: - resource_ob['default_mask'] = { "data": ["no-access", "view"] } - else: - resource_ob['default_mask'] = { "data": ["no-access"] } - resource_ob['group_masks'] = {} - - add_resource(resource_ob) - -def insert_publish_resources(default_owner_id): - current_resources = Redis.hgetall("resources") - Cursor.execute(""" SELECT - PublishXRef.Id, PublishFreeze.Id, InbredSet.InbredSetCode - FROM - PublishXRef, PublishFreeze, InbredSet, Publication - WHERE - PublishFreeze.InbredSetId = PublishXRef.InbredSetId AND - InbredSet.Id = PublishXRef.InbredSetId AND - Publication.Id = PublishXRef.PublicationId""") - - resource_results = Cursor.fetchall() - for resource in resource_results: - if resource[2]: - resource_ob = {} - if resource[2]: - resource_ob['name'] = resource[2] + "_" + str(resource[0]) - else: - resource_ob['name'] = str(resource[0]) - resource_ob['owner_id'] = default_owner_id - resource_ob['data'] = { "dataset" : str(resource[1]) , - "trait" : str(resource[0])} - resource_ob['type'] = "dataset-publish" - resource_ob['default_mask'] = { "data": "view" } - - resource_ob['group_masks'] = {} - - add_resource(resource_ob) - else: - continue - -def insert_geno_resources(default_owner_id): - current_resources = Redis.hgetall("resources") - Cursor.execute(""" SELECT - GenoFreeze.Id, GenoFreeze.ShortName, GenoFreeze.confidentiality - FROM - GenoFreeze""") - - resource_results = Cursor.fetchall() - for i, resource in enumerate(resource_results): - if i % 20 == 0: - print(i) - resource_ob = {} - resource_ob['name'] = resource[1] - resource_ob['owner_id'] = default_owner_id - resource_ob['data'] = { "dataset" : str(resource[0]) } - resource_ob['type'] = "dataset-geno" - if resource[2] < 1: - resource_ob['default_mask'] = { "data": "view" } - else: - resource_ob['default_mask'] = { "data": "no-access" } - resource_ob['group_masks'] = {} - - add_resource(resource_ob) - -def insert_resources(default_owner_id): - current_resources = get_resources() - print("START") - insert_publish_resources(default_owner_id) - print("AFTER PUBLISH") - insert_geno_resources(default_owner_id) - print("AFTER GENO") - insert_probeset_resources(default_owner_id) - print("AFTER PROBESET") - -def main(): - """Generates and outputs (as json file) the data for the main dropdown menus on the home page""" - - Redis.delete("resources") - - owner_id = get_user_id("email_address", "zachary.a.sloan@gmail.com") - insert_resources(owner_id) - -if __name__ == '__main__': - Conn = MySQLdb.Connect(**parse_db_uri()) - Cursor = Conn.cursor() +""" + +Script that sets default resource access masks for use with the DB proxy + +Defaults will be: +Owner - omni_gn +Mask - Public/non-confidential: { data: "view", + metadata: "view", + admin: "not-admin" } + Private/confidentia: { data: "no-access", + metadata: "no-access", + admin: "not-admin" } + +To run: +./bin/genenetwork2 ~/my_settings.py -c ./wqflask/maintenance/gen_select_dataset.py + +""" + +from __future__ import print_function, division + +import sys +import json + +# NEW: Note we prepend the current path - otherwise a guix instance of GN2 may be used instead +sys.path.insert(0,'./') + +# NEW: import app to avoid a circular dependency on utility.tools +from wqflask import app + +from utility.tools import SQL_URI +from utility.redis_tools import get_redis_conn, get_user_id, add_resource, get_resources +Redis = get_redis_conn() + +import MySQLdb + +import urlparse + +from utility.logger import getLogger +logger = getLogger(__name__) + +def parse_db_uri(): + """Converts a database URI to the db name, host name, user name, and password""" + + parsed_uri = urlparse.urlparse(SQL_URI) + + db_conn_info = dict( + db = parsed_uri.path[1:], + host = parsed_uri.hostname, + user = parsed_uri.username, + passwd = parsed_uri.password) + + print(db_conn_info) + return db_conn_info + +def insert_probeset_resources(default_owner_id): + current_resources = Redis.hgetall("resources") + Cursor.execute(""" SELECT + ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.confidentiality, ProbeSetFreeze.public + FROM + ProbeSetFreeze""") + + resource_results = Cursor.fetchall() + for i, resource in enumerate(resource_results): + resource_ob = {} + resource_ob['name'] = resource[1] + resource_ob['owner_id'] = default_owner_id + resource_ob['data'] = { "dataset" : str(resource[0])} + resource_ob['type'] = "dataset-probeset" + if resource[2] < 1 and resource[3] > 0: + resource_ob['default_mask'] = { "data": "view" } + else: + resource_ob['default_mask'] = { "data": "no-access" } + resource_ob['group_masks'] = {} + + add_resource(resource_ob) + +def insert_publish_resources(default_owner_id): + current_resources = Redis.hgetall("resources") + Cursor.execute(""" SELECT + PublishXRef.Id, PublishFreeze.Id, InbredSet.InbredSetCode + FROM + PublishXRef, PublishFreeze, InbredSet, Publication + WHERE + PublishFreeze.InbredSetId = PublishXRef.InbredSetId AND + InbredSet.Id = PublishXRef.InbredSetId AND + Publication.Id = PublishXRef.PublicationId""") + + resource_results = Cursor.fetchall() + for resource in resource_results: + if resource[2]: + resource_ob = {} + if resource[2]: + resource_ob['name'] = resource[2] + "_" + str(resource[0]) + else: + resource_ob['name'] = str(resource[0]) + resource_ob['owner_id'] = default_owner_id + resource_ob['data'] = { "dataset" : str(resource[1]) , + "trait" : str(resource[0])} + resource_ob['type'] = "dataset-publish" + resource_ob['default_mask'] = { "data": "view" } + + resource_ob['group_masks'] = {} + + add_resource(resource_ob) + else: + continue + +def insert_geno_resources(default_owner_id): + current_resources = Redis.hgetall("resources") + Cursor.execute(""" SELECT + GenoFreeze.Id, GenoFreeze.ShortName, GenoFreeze.confidentiality + FROM + GenoFreeze""") + + resource_results = Cursor.fetchall() + for i, resource in enumerate(resource_results): + resource_ob = {} + resource_ob['name'] = resource[1] + if resource[1] == "HET3-ITPGeno": + resource_ob['owner_id'] = "73a3f093-ca13-4ae0-a179-9a446f709f6e" + else: + resource_ob['owner_id'] = default_owner_id + resource_ob['data'] = { "dataset" : str(resource[0]) } + resource_ob['type'] = "dataset-geno" + if resource[2] < 1: + resource_ob['default_mask'] = { "data": "view" } + else: + resource_ob['default_mask'] = { "data": "no-access" } + resource_ob['group_masks'] = {} + + add_resource(resource_ob) + +def insert_resources(default_owner_id): + current_resources = get_resources() + print("START") + insert_publish_resources(default_owner_id) + print("AFTER PUBLISH") + insert_geno_resources(default_owner_id) + print("AFTER GENO") + insert_probeset_resources(default_owner_id) + print("AFTER PROBESET") + +def main(): + """Generates and outputs (as json file) the data for the main dropdown menus on the home page""" + + Redis.delete("resources") + + owner_id = get_user_id("email_address", "zachary.a.sloan@gmail.com") + insert_resources(owner_id) + +if __name__ == '__main__': + Conn = MySQLdb.Connect(**parse_db_uri()) + Cursor = Conn.cursor() main() \ No newline at end of file diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 537881a5..07ceacc0 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -1,46 +1,86 @@ -from __future__ import absolute_import, print_function, division - -import json -import requests - -from base import data_set - -from utility import hmac -from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id - -from flask import Flask, g, redirect, url_for - -import logging -logger = logging.getLogger(__name__ ) - -def check_resource_availability(dataset, trait_id=None): - resource_id = get_resource_id(dataset, trait_id) - - if resource_id: - the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) - try: - response = json.loads(requests.get(the_url).content)['data'] - except: - resource_info = get_resource_info(resource_id) - response = resource_info['default_mask']['data'] - - if 'view' in response: - return True - else: - return redirect(url_for("no_access_page")) - - return True - -def check_owner(dataset=None, trait_id=None, resource_id=None): - if resource_id: - resource_info = get_resource_info(resource_id) - if g.user_session.user_id == resource_info['owner_id']: - return resource_id - else: - resource_id = get_resource_id(dataset, trait_id) - if resource_id: - resource_info = get_resource_info(resource_id) - if g.user_session.user_id == resource_info['owner_id']: - return resource_id - - return False \ No newline at end of file +from __future__ import absolute_import, print_function, division + +import json +import requests + +from base import data_set + +from utility import hmac +from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id + +from flask import Flask, g, redirect, url_for + +import logging +logger = logging.getLogger(__name__ ) + +def check_resource_availability(dataset, trait_id=None): + resource_id = get_resource_id(dataset, trait_id) + + response = None + if resource_id: + resource_info = get_resource_info(resource_id) + + the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + try: + response = json.loads(requests.get(the_url).content)['data'] + except: + response = resource_info['default_mask']['data'] + + if 'edit' in response: + return "edit" + elif 'view' in response: + return "view" + else: + return "no-access" + + return False + +def check_admin(resource_id=None): + + return "not-admin" + + # ZS: commented out until proxy can return this + # the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + # try: + # response = json.loads(requests.get(the_url).content) + # except: + # response = resource_info['default_mask']['admin'] + + # if 'edit-admins' in response: + # return "edit-admins" + # elif 'edit-access' in response: + # return "edit-access" + # else: + # return "not-admin" + +def check_owner(dataset=None, trait_id=None, resource_id=None): + if resource_id: + resource_info = get_resource_info(resource_id) + if g.user_session.user_id == resource_info['owner_id']: + return resource_id + else: + resource_id = get_resource_id(dataset, trait_id) + if resource_id: + resource_info = get_resource_info(resource_id) + if g.user_session.user_id == resource_info['owner_id']: + return resource_id + + return False + +def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): + if resource_id: + resource_info = get_resource_info(resource_id) + if g.user_session.user_id == resource_info['owner_id']: + return [resource_id, "owner"] + else: + return [resource_id, check_admin(resource_id)] + else: + resource_id = get_resource_id(dataset, trait_id) + if resource_id: + resource_info = get_resource_info(resource_id) + if g.user_session.user_id == resource_info['owner_id']: + return [resource_id, "owner"] + else: + return [resource_id, check_admin(resource_id)] + + return [resource_id, "not-admin"] \ No newline at end of file diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index bc30a0af..c6d221ff 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -16,7 +16,7 @@ from utility.logger import getLogger logger = getLogger(__name__) def get_redis_conn(): - Redis = redis.StrictRedis(port=6380) + Redis = redis.StrictRedis(port=6379) return Redis Redis = get_redis_conn() @@ -51,6 +51,27 @@ def get_user_by_unique_column(column_name, column_value): return item_details +def get_users_like_unique_column(column_name, column_value): + """ + Like previous function, but this only checks if the input is a subset of a field and can return multiple results + """ + matched_users = [] + + if column_value != "": + user_list = Redis.hgetall("users") + if column_name != "user_id": + for key in user_list: + user_ob = json.loads(user_list[key]) + if column_name in user_ob: + if column_value in user_ob[column_name]: + matched_users.append(user_ob) + else: + matched_users.append(json.loads(user_list[column_value])) + + return matched_users + +# def search_users_by_unique_column(column_name, column_value): + def set_user_attribute(user_id, column_name, column_value): user_info = json.loads(Redis.hget("users", user_id)) user_info[column_name] = column_value @@ -142,6 +163,28 @@ def get_group_by_unique_column(column_name, column_value): return matched_groups +def get_groups_like_unique_column(column_name, column_value): + """ + Like previous function, but this only checks if the input is a subset of a field and can return multiple results + """ + matched_groups = [] + + if column_value != "": + group_list = Redis.hgetall("groups") + if column_name != "group_id": + for key in group_list: + group_info = json.loads(group_list[key]) + if column_name == "admins" or column_name == "members": #ZS: Since these fields are lists, search in the list + if column_value in group_info[column_name]: + matched_groups.append(group_info) + else: + if column_name in group_info: + if column_value in group_info[column_name]: + matched_groups.append(group_info) + else: + matched_groups.append(json.loads(group_list[column_value])) + + return matched_groups def create_group(admin_user_ids, member_user_ids = [], group_name = "Default Group Name"): group_id = str(uuid.uuid4()) @@ -192,9 +235,13 @@ def add_users_to_group(user_id, group_id, user_emails = [], admins = False): #ZS def remove_users_from_group(user_id, users_to_remove_ids, group_id, user_type = "members"): #ZS: User type is because I assume admins can remove other admins group_info = get_group_info(group_id) + if user_id in group_info["admins"]: + users_to_remove_set = set(users_to_remove_ids) + if user_type == "admins" and user_id in users_to_remove_set: #ZS: Make sure an admin can't remove themselves from a group, since I imagine we don't want groups to be able to become admin-less + users_to_remove_set.remove(user_id) group_users = set(group_info[user_type]) - group_users -= set(users_to_remove_ids) + group_users -= users_to_remove_set group_info[user_type] = list(group_users) group_info["changed_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') Redis.hset("groups", group_id, json.dumps(group_info)) @@ -232,7 +279,6 @@ def get_resource_info(resource_id): return json.loads(resource_info) def add_resource(resource_info): - if 'trait' in resource_info['data']: resource_id = hmac.hmac_creation('{}:{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset']), str(resource_info['data']['trait']))) else: @@ -241,3 +287,18 @@ def add_resource(resource_info): Redis.hset("resources", resource_id, json.dumps(resource_info)) return resource_info + +def add_access_mask(resource_id, group_id, access_mask): + the_resource = get_resource_info(resource_id) + the_resource['group_masks'][group_id] = access_mask + + Redis.hset("resources", resource_id, json.dumps(the_resource)) + + return the_resource + +def change_resource_owner(resource_id, new_owner_id): + the_resource= get_resource_info(resource_id) + the_resource['owner_id'] = new_owner_id + + Redis.delete("resource") + Redis.hset("resources", resource_id, json.dumps(the_resource)) \ No newline at end of file diff --git a/wqflask/wqflask/docs.py b/wqflask/wqflask/docs.py index 0187f32e..78407e22 100644 --- a/wqflask/wqflask/docs.py +++ b/wqflask/wqflask/docs.py @@ -1,5 +1,7 @@ from __future__ import absolute_import, print_function, division +import codecs + from flask import g from utility.logger import getLogger @@ -20,7 +22,7 @@ class Docs(object): self.content = "" else: self.title = result[0] - self.content = result[1] + self.content = result[1].encode("latin1") self.editable = "false" # ZS: Removing option to edit to see if text still gets vandalized diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py index f41ae56d..9afc016b 100644 --- a/wqflask/wqflask/group_manager.py +++ b/wqflask/wqflask/group_manager.py @@ -1,77 +1,145 @@ - -from __future__ import print_function, division, absolute_import - -from flask import (Flask, g, render_template, url_for, request, make_response, - redirect, flash) - -from wqflask import app -from wqflask.user_login import send_verification_email - -from utility.redis_tools import get_user_groups, get_group_info, create_group, delete_group, add_users_to_group, remove_users_from_group, \ - change_group_name, save_verification_code, check_verification_code, get_user_by_unique_column - -from utility.logger import getLogger -logger = getLogger(__name__) - -@app.route("/groups/manage", methods=('GET', 'POST')) -def manage_groups(): - params = request.form if request.form else request.args - if "add_new_group" in params: - return redirect(url_for('add_group')) - else: - admin_groups, user_groups = get_user_groups(g.user_session.user_id) - return render_template("admin/group_manager.html", admin_groups=admin_groups, user_groups=user_groups) - -@app.route("/groups/remove", methods=('POST',)) -def remove_groups(): - group_ids_to_remove = request.form['selected_group_ids'] - for group_id in group_ids_to_remove.split(":"): - delete_group(g.user_session.user_id, group_id) - - return redirect(url_for('manage_groups')) - -@app.route("/groups/create", methods=('GET', 'POST')) -def add_group(): - params = request.form if request.form else request.args - if "group_name" in params: - member_user_ids = set() - admin_user_ids = set() - admin_user_ids.add(g.user_session.user_id) #ZS: Always add the user creating the group as an admin - if "admin_emails" in params: - admin_emails = params['admin_emails_to_add'].split(",") - for email in admin_emails: - user_details = get_user_by_unique_column("email_address", email) - if user_details: - admin_user_ids.add(user_details['user_id']) - #send_group_invites(params['group_id'], user_email_list = admin_emails, user_type="admins") - if "user_emails" in params: - member_emails = params['member_emails_to_add'].split(",") - for email in member_emails: - user_details = get_user_by_unique_column("email_address", email) - if user_details: - member_user_ids.add(user_details['user_id']) - #send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members") - - create_group(list(admin_user_ids), list(member_user_ids), params['group_name']) - return redirect(url_for('manage_groups')) - else: - return render_template("admin/create_group.html") - -#ZS: Will integrate this later, for now just letting users be added directly -def send_group_invites(group_id, user_email_list = [], user_type="members"): - for user_email in user_email_list: - user_details = get_user_by_unique_column("email_address", user_email) - if user_details: - group_info = get_group_info(group_id) - #ZS: Probably not necessary since the group should normally always exist if group_id is being passed here, - # but it's technically possible to hit it if Redis is cleared out before submitting the new users or something - if group_info: - #ZS: Don't add user if they're already an admin or if they're being added a regular user and are already a regular user, - # but do add them if they're a regular user and are added as an admin - if (user_details['user_id'] in group_info['admins']) or \ - ((user_type == "members") and (user_details['user_id'] in group_info['members'])): - continue - else: - send_verification_email(user_details, template_name = "email/group_verification.txt", key_prefix = "verification_code", subject = "You've been invited to join a GeneNetwork user group") - + +from __future__ import print_function, division, absolute_import + +from flask import (Flask, g, render_template, url_for, request, make_response, + redirect, flash) + +from wqflask import app +from wqflask.user_login import send_verification_email + +from utility.redis_tools import get_user_groups, get_group_info, create_group, delete_group, add_users_to_group, remove_users_from_group, \ + change_group_name, save_verification_code, check_verification_code, get_user_by_unique_column, get_resources, get_resource_info + +from utility.logger import getLogger +logger = getLogger(__name__) + +@app.route("/groups/manage", methods=('GET', 'POST')) +def manage_groups(): + params = request.form if request.form else request.args + if "add_new_group" in params: + return redirect(url_for('add_group')) + else: + admin_groups, user_groups = get_user_groups(g.user_session.user_id) + return render_template("admin/group_manager.html", admin_groups=admin_groups, user_groups=user_groups) + +@app.route("/groups/view", methods=('GET', 'POST')) +def view_group(): + params = request.form if request.form else request.args + group_id = params['id'] + group_info = get_group_info(group_id) + admins_info = [] + user_is_admin = False + if g.user_session.user_id in group_info['admins']: + user_is_admin = True + for user_id in group_info['admins']: + if user_id: + user_info = get_user_by_unique_column("user_id", user_id) + admins_info.append(user_info) + members_info = [] + for user_id in group_info['members']: + if user_id: + user_info = get_user_by_unique_column("user_id", user_id) + members_info.append(user_info) + + #ZS: This whole part might not scale well with many resources + resources_info = [] + all_resources = get_resources() + for resource_id in all_resources: + resource_info = get_resource_info(resource_id) + group_masks = resource_info['group_masks'] + if group_id in group_masks: + this_resource = {} + privileges = group_masks[group_id] + this_resource['id'] = resource_id + this_resource['name'] = resource_info['name'] + this_resource['data'] = privileges['data'] + this_resource['metadata'] = privileges['metadata'] + this_resource['admin'] = privileges['admin'] + resources_info.append(this_resource) + + return render_template("admin/view_group.html", group_info=group_info, admins=admins_info, members=members_info, user_is_admin=user_is_admin, resources=resources_info) + +@app.route("/groups/remove", methods=('POST',)) +def remove_groups(): + group_ids_to_remove = request.form['selected_group_ids'] + for group_id in group_ids_to_remove.split(":"): + delete_group(g.user_session.user_id, group_id) + + return redirect(url_for('manage_groups')) + +@app.route("/groups/remove_users", methods=('POST',)) +def remove_users(): + group_id = request.form['group_id'] + admin_ids_to_remove = request.form['selected_admin_ids'] + member_ids_to_remove = request.form['selected_member_ids'] + + remove_users_from_group(g.user_session.user_id, admin_ids_to_remove.split(":"), group_id, user_type="admins") + remove_users_from_group(g.user_session.user_id, member_ids_to_remove.split(":"), group_id, user_type="members") + + return redirect(url_for('view_group', id=group_id)) + +@app.route("/groups/add_", methods=('POST',)) +def add_users(user_type='members'): + group_id = request.form['group_id'] + if user_type == "admins": + user_emails = request.form['admin_emails_to_add'].split(",") + add_users_to_group(g.user_session.user_id, group_id, user_emails, admins = True) + elif user_type == "members": + user_emails = request.form['member_emails_to_add'].split(",") + add_users_to_group(g.user_session.user_id, group_id, user_emails, admins = False) + + return redirect(url_for('view_group', id=group_id)) + +@app.route("/groups/change_name", methods=('POST',)) +def change_name(): + group_id = request.form['group_id'] + new_name = request.form['new_name'] + group_info = change_group_name(g.user_session.user_id, group_id, new_name) + + return new_name + +@app.route("/groups/create", methods=('GET', 'POST')) +def add_or_edit_group(): + params = request.form if request.form else request.args + if "group_name" in params: + member_user_ids = set() + admin_user_ids = set() + admin_user_ids.add(g.user_session.user_id) #ZS: Always add the user creating the group as an admin + if "admin_emails_to_add" in params: + admin_emails = params['admin_emails_to_add'].split(",") + for email in admin_emails: + user_details = get_user_by_unique_column("email_address", email) + if user_details: + admin_user_ids.add(user_details['user_id']) + #send_group_invites(params['group_id'], user_email_list = admin_emails, user_type="admins") + if "member_emails_to_add" in params: + member_emails = params['member_emails_to_add'].split(",") + for email in member_emails: + user_details = get_user_by_unique_column("email_address", email) + if user_details: + member_user_ids.add(user_details['user_id']) + #send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members") + + create_group(list(admin_user_ids), list(member_user_ids), params['group_name']) + return redirect(url_for('manage_groups')) + else: + return render_template("admin/create_group.html") + +#ZS: Will integrate this later, for now just letting users be added directly +def send_group_invites(group_id, user_email_list = [], user_type="members"): + for user_email in user_email_list: + user_details = get_user_by_unique_column("email_address", user_email) + if user_details: + group_info = get_group_info(group_id) + #ZS: Probably not necessary since the group should normally always exist if group_id is being passed here, + # but it's technically possible to hit it if Redis is cleared out before submitting the new users or something + if group_info: + #ZS: Don't add user if they're already an admin or if they're being added a regular user and are already a regular user, + # but do add them if they're a regular user and are added as an admin + if (user_details['user_id'] in group_info['admins']) or \ + ((user_type == "members") and (user_details['user_id'] in group_info['members'])): + continue + else: + send_verification_email(user_details, template_name = "email/group_verification.txt", key_prefix = "verification_code", subject = "You've been invited to join a GeneNetwork user group") + #@app.route() \ No newline at end of file diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index 7d88b8ed..0f9f5c9d 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -1,72 +1,134 @@ -from __future__ import print_function, division, absolute_import - -from flask import (Flask, g, render_template, url_for, request, make_response, - redirect, flash) - -from wqflask import app - -from utility.authentication_tools import check_owner -from utility.redis_tools import get_resource_info, get_group_info, get_group_by_unique_column, get_user_id - -from utility.logger import getLogger -logger = getLogger(__name__) - -@app.route("/resources/manage", methods=('GET', 'POST')) -def view_resource(): - params = request.form if request.form else request.args - if 'resource_id' in request.args: - resource_id = request.args['resource_id'] - if check_owner(resource_id=resource_id): - resource_info = get_resource_info(resource_id) - group_masks = resource_info['group_masks'] - group_masks_with_names = get_group_names(group_masks) - default_mask = resource_info['default_mask']['data'] - return render_template("admin/manage_resource.html", resource_id = resource_id, resource_info=resource_info, default_mask=default_mask, group_masks=group_masks_with_names) - else: - return redirect(url_for("no_access_page")) - -@app.route("/resources/add_group", methods=('POST',)) -def add_group_to_resource(): - resource_id = request.form['resource_id'] - if check_owner(resource_id=resource_id): - if all(key in request.form for key in ('group_id', 'group_name', 'user_name', 'user_email')): - group_list = [] - if request.form['group_id'] != "": - the_group = get_group_info(request.form['group_id']) - if the_group: - group_list.append(the_group) - if request.form['group_name'] != "": - matched_groups = get_group_by_unique_column("name", request.form['group_name']) - for group in matched_groups: - group_list.append(group) - if request.form['user_name'] != "": - user_id = get_user_id("user_name", request.form['user_name']) - if user_id: - matched_groups = get_group_by_unique_column("admins", user_id) - matched_groups += get_group_by_unique_column("members", user_id) - for group in matched_groups: - group_list.append(group) - if request.form['user_email'] != "": - user_id = get_user_id("email_address", request.form['user_email']) - if user_id: - matched_groups = get_group_by_unique_column("admins", user_id) - matched_groups += get_group_by_unique_column("members", user_id) - for group in matched_groups: - group_list.append(group) - return render_template("admin/select_group_to_add.html", group_list=group_list, resource_id = resource_id) - elif 'selected_group' in request.form: - group_id = request.form['selected_group'] - return render_template("admin/set_group_privileges.html", resource_id = resource_id, group_id = group_id) - else: - return render_template("admin/search_for_groups.html", resource_id = resource_id) - else: - return redirect(url_for("no_access_page")) - -def get_group_names(group_masks): - group_masks_with_names = {} - for group_id, group_mask in group_masks.iteritems(): - this_mask = group_mask - group_name = get_group_info(group_id)['name'] - this_mask['name'] = group_name - +from __future__ import print_function, division, absolute_import + +import json + +from flask import (Flask, g, render_template, url_for, request, make_response, + redirect, flash) + +from wqflask import app + +from utility.authentication_tools import check_owner_or_admin +from utility.redis_tools import get_resource_info, get_group_info, get_groups_like_unique_column, get_user_id, get_user_by_unique_column, get_users_like_unique_column, add_access_mask, add_resource, change_resource_owner + +from utility.logger import getLogger +logger = getLogger(__name__) + +@app.route("/resources/manage", methods=('GET', 'POST')) +def manage_resource(): + params = request.form if request.form else request.args + if 'resource_id' in request.args: + resource_id = request.args['resource_id'] + admin_status = check_owner_or_admin(resource_id=resource_id)[1] + + resource_info = get_resource_info(resource_id) + group_masks = resource_info['group_masks'] + group_masks_with_names = get_group_names(group_masks) + default_mask = resource_info['default_mask']['data'] + owner_id = resource_info['owner_id'] + owner_info = get_user_by_unique_column("user_id", owner_id) + + if 'name' in owner_info: + owner_display_name = owner_info['full_name'] + elif 'user_name' in owner_info: + owner_display_name = owner_info['user_name'] + elif 'email_address' in owner_info: + owner_display_name = owner_info['email_address'] + else: + owner_display_name = None + + return render_template("admin/manage_resource.html", owner_name = owner_display_name, resource_id = resource_id, resource_info=resource_info, default_mask=default_mask, group_masks=group_masks_with_names, admin_status=admin_status) + +@app.route("/search_for_users", methods=('POST',)) +def search_for_user(): + params = request.form + user_list = [] + user_list += get_users_like_unique_column("full_name", params['user_name']) + user_list += get_users_like_unique_column("email_address", params['user_email']) + + return json.dumps(user_list) + +@app.route("/search_for_groups", methods=('POST',)) +def search_for_groups(): + params = request.form + group_list = [] + group_list += get_groups_like_unique_column("id", params['group_id']) + group_list += get_groups_like_unique_column("name", params['group_name']) + + user_list = [] + user_list += get_users_like_unique_column("full_name", params['user_name']) + user_list += get_users_like_unique_column("email_address", params['user_email']) + for user in user_list: + group_list += get_groups_like_unique_column("admins", user['user_id']) + group_list += get_groups_like_unique_column("members", user['user_id']) + + return json.dumps(group_list) + +@app.route("/resources/change_owner", methods=('POST',)) +def change_owner(): + resource_id = request.form['resource_id'] + if 'new_owner' in request.form: + admin_status = check_owner_or_admin(resource_id=resource_id)[1] + if admin_status == "owner": + new_owner_id = request.form['new_owner'] + change_resource_owner(resource_id, new_owner_id) + flash("The resource's owner has beeen changed.", "alert-info") + return redirect(url_for("manage_resource", resource_id=resource_id)) + else: + flash("You lack the permissions to make this change.", "error") + return redirect(url_for("manage_resource", resource_id=resource_id)) + else: + return render_template("admin/change_resource_owner.html", resource_id = resource_id) + +@app.route("/resources/change_default_privileges", methods=('POST',)) +def change_default_privileges(): + resource_id = request.form['resource_id'] + admin_status = check_owner_or_admin(resource_id=resource_id) + if admin_status == "owner" or admin_status == "edit-admins": + resource_info = get_resource_info(resource_id) + default_mask = resource_info['default_mask'] + if request.form['open_to_public'] == "True": + default_mask['data'] = 'view' + else: + default_mask['data'] = 'no-access' + resource_info['default_mask'] = default_mask + add_resource(resource_info) + flash("Your changes have been saved.", "alert-info") + return redirect(url_for("manage_resource", resource_id=resource_id)) + else: + return redirect(url_for("no_access_page")) + +@app.route("/resources/add_group", methods=('POST',)) +def add_group_to_resource(): + resource_id = request.form['resource_id'] + admin_status = check_owner_or_admin(resource_id=resource_id)[1] + if admin_status == "owner" or admin_status == "edit-admins" or admin_status == "edit-access": + if 'selected_group' in request.form: + group_id = request.form['selected_group'] + resource_info = get_resource_info(resource_id) + default_privileges = resource_info['default_mask'] + return render_template("admin/set_group_privileges.html", resource_id = resource_id, group_id = group_id, default_privileges = default_privileges) + elif all(key in request.form for key in ('data_privilege', 'metadata_privilege', 'admin_privilege')): + group_id = request.form['group_id'] + group_name = get_group_info(group_id)['name'] + access_mask = { + 'data': request.form['data_privilege'], + 'metadata': request.form['metadata_privilege'], + 'admin': request.form['admin_privilege'] + } + add_access_mask(resource_id, group_id, access_mask) + flash("Privileges have been added for group {}.".format(group_name), "alert-info") + return redirect(url_for("manage_resource", resource_id=resource_id)) + else: + return render_template("admin/search_for_groups.html", resource_id = resource_id) + else: + return redirect(url_for("no_access_page")) + +def get_group_names(group_masks): + group_masks_with_names = {} + for group_id, group_mask in group_masks.iteritems(): + this_mask = group_mask + group_name = get_group_info(group_id)['name'] + this_mask['name'] = group_name + group_masks_with_names[group_id] = this_mask + return group_masks_with_names \ No newline at end of file diff --git a/wqflask/wqflask/static/new/javascript/group_manager.js b/wqflask/wqflask/static/new/javascript/group_manager.js index 5e82d104..4c172cbf 100644 --- a/wqflask/wqflask/static/new/javascript/group_manager.js +++ b/wqflask/wqflask/static/new/javascript/group_manager.js @@ -1,38 +1,38 @@ -$('#add_to_admins').click(function() { - add_emails('admin') -}) - -$('#add_to_members').click(function() { - add_emails('member') -}) - -$('#clear_admins').click(function(){ - clear_emails('admin') -}) - -$('#clear_members').click(function(){ - clear_emails('member') -}) - - -function add_emails(user_type){ - var email_address = $('input[name=user_email]').val(); - var email_list_string = $('input[name=' + user_type + '_emails_to_add]').val() - console.log(email_list_string) - if (email_list_string == ""){ - var email_set = new Set(); - } else { - var email_set = new Set(email_list_string.split(",")) - } - email_set.add(email_address) - - $('input[name=' + user_type + '_emails_to_add]').val(Array.from(email_set).join(',')) - - var emails_display_string = Array.from(email_set).join('\n') - $('.added_' + user_type + 's').val(emails_display_string) -} - -function clear_emails(user_type){ - $('input[name=' + user_type + '_emails_to_add]').val("") - $('.added_' + user_type + 's').val("") +$('#add_to_admins').click(function() { + add_emails('admin') +}) + +$('#add_to_members').click(function() { + add_emails('member') +}) + +$('#clear_admins').click(function(){ + clear_emails('admin') +}) + +$('#clear_members').click(function(){ + clear_emails('member') +}) + + +function add_emails(user_type){ + var email_address = $('input[name=user_email]').val(); + var email_list_string = $('input[name=' + user_type + '_emails_to_add]').val().trim() + console.log(email_list_string) + if (email_list_string == ""){ + var email_set = new Set(); + } else { + var email_set = new Set(email_list_string.split(",")) + } + email_set.add(email_address) + + $('input[name=' + user_type + '_emails_to_add]').val(Array.from(email_set).join(',')) + + var emails_display_string = Array.from(email_set).join('\n') + $('.added_' + user_type + 's').val(emails_display_string) +} + +function clear_emails(user_type){ + $('input[name=' + user_type + '_emails_to_add]').val("") + $('.added_' + user_type + 's').val("") } \ No newline at end of file diff --git a/wqflask/wqflask/static/new/javascript/search_results.js b/wqflask/wqflask/static/new/javascript/search_results.js index 8fa698b4..115dac13 100644 --- a/wqflask/wqflask/static/new/javascript/search_results.js +++ b/wqflask/wqflask/static/new/javascript/search_results.js @@ -296,7 +296,6 @@ $(function() { $("#deselect_all").click(deselect_all); $("#invert").click(invert); $("#add").click(add); - $("#remove").click(remove); $("#submit_bnw").click(submit_bnw); $("#export_traits").click(export_traits); $('.trait_checkbox, .btn').click(change_buttons); diff --git a/wqflask/wqflask/templates/admin/change_resource_owner.html b/wqflask/wqflask/templates/admin/change_resource_owner.html new file mode 100644 index 00000000..ae9409b0 --- /dev/null +++ b/wqflask/wqflask/templates/admin/change_resource_owner.html @@ -0,0 +1,116 @@ +{% extends "base.html" %} +{% block title %}Resource Manager{% endblock %} +{% block css %} + + +{% endblock %} +{% block content %} + +
+ +
+ +
+
+
+
+

Search for user by either name or e-mail:

+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ + +
+
+
+
+
+
+
+
+
+
+ + + +{% endblock %} + +{% block js %} + + + +{% endblock %} diff --git a/wqflask/wqflask/templates/admin/create_group.html b/wqflask/wqflask/templates/admin/create_group.html index 55c3fa0b..5a6929fb 100644 --- a/wqflask/wqflask/templates/admin/create_group.html +++ b/wqflask/wqflask/templates/admin/create_group.html @@ -1,89 +1,89 @@ -{% extends "base.html" %} -{% block title %}Group Manager{% endblock %} -{% block content %} - -
- -
- - -
-
-
- -
-
- -
-
-
-
- -
-
- -
-
-
-
- -
-
- -
-
- -
-
-
-
- -
-
- -
-
- -
-
-
-
- -
-
- -
-
- -
-
-
-
- -
-
- -
-
-
-
-
-
-
- - - - - -{% endblock %} - -{% block js %} - - - - - -{% endblock %} +{% extends "base.html" %} +{% block title %}Group Manager{% endblock %} +{% block content %} + +
+ +
+ + +
+
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+
+
+
+
+
+ + + + + +{% endblock %} + +{% block js %} + + + + + +{% endblock %} diff --git a/wqflask/wqflask/templates/admin/group_manager.html b/wqflask/wqflask/templates/admin/group_manager.html index 23d8205a..70d55684 100644 --- a/wqflask/wqflask/templates/admin/group_manager.html +++ b/wqflask/wqflask/templates/admin/group_manager.html @@ -23,7 +23,7 @@
{% else %} -

Admin Groups

+

Admin Groups


{% if admin_groups|length == 0 %}

You currently aren't the administrator of any groups.

@@ -45,8 +45,8 @@ {{ loop.index }} - {{ group.name }} - {{ group.admins|length + group.users|length }} + {{ group.name }} + {{ group.admins|length + group.members|length }} {{ group.created_timestamp }} {{ group.changed_timestamp }} {{ group.id }} @@ -58,7 +58,7 @@
-

User Groups

+

User Groups


{% if user_groups|length == 0 %}

You currently aren't a member of any groups.

@@ -80,7 +80,7 @@ {{ loop.index }} {{ group.name }} - {{ group.admins|length + group.users|length }} + {{ group.admins|length + group.members|length }} {{ group.created_timestamp }} {{ group.changed_timestamp }} @@ -103,10 +103,14 @@ - - - - -{% endblock %} +{% extends "base.html" %} +{% block title %}Resource Manager{% endblock %} +{% block css %} + + +{% endblock %} +{% block content %} + +
+ {{ flash_me() }} + +
+ +
+
+
+
+ +
+ {{ resource_info.name }} +
+
+ {% if admin_status == "owner" %} +
+ +
+ + +
+
+
+ +
+ +
+
+ {% endif %} +
+
+
+ {% if admin_status == "owner" or admin_status == "edit-admins" or admin_status == "edit-access" %} +
+
+ +
+ {% if group_masks|length > 0 %} +

Current Group Permissions

+
+ + + + + + + + + + + {% for key, value in group_masks.iteritems() %} + + + + + + + {% endfor %} + +
NameDataMetadataAdmin
{{ value.name }}{{ value.data }}{{ value.metadata }}{{ value.admin }}
+ {% else %} +

No groups are currently added to this resource.

+ {% endif %} +
+ {% endif %} +
+
+ + + + + +{% endblock %} + +{% block js %} + + + +{% endblock %} diff --git a/wqflask/wqflask/templates/admin/search_for_groups.html b/wqflask/wqflask/templates/admin/search_for_groups.html index 89eb11dd..f304a172 100644 --- a/wqflask/wqflask/templates/admin/search_for_groups.html +++ b/wqflask/wqflask/templates/admin/search_for_groups.html @@ -1,64 +1,134 @@ -{% extends "base.html" %} -{% block title %}Resource Manager{% endblock %} -{% block content %} - -
- -
- -
-
-
-
-

Search by:

-
-
- -
- -
-
-
- -
- -
-
-
- -
- -
-
-
- -
- -
-
-
- -
- -
-
-
-
-
-
-
- - - -{% endblock %} - -{% block js %} - - - - - -{% endblock %} +{% extends "base.html" %} +{% block title %}Resource Manager{% endblock %} +{% block css %} + + +{% endblock %} +{% block content %} + +
+ +
+ +
+
+
+
+

Search by:

+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ + +
+
+
+
+
+
+
+
+
+
+ + + +{% endblock %} + +{% block js %} + + + + + +{% endblock %} diff --git a/wqflask/wqflask/templates/admin/select_group_to_add.html b/wqflask/wqflask/templates/admin/select_group_to_add.html deleted file mode 100644 index df70fb2f..00000000 --- a/wqflask/wqflask/templates/admin/select_group_to_add.html +++ /dev/null @@ -1,54 +0,0 @@ -{% extends "base.html" %} -{% block title %}Matched Groups{% endblock %} -{% block css %} - - - -{% endblock %} -{% block content %} - -
-

The following groups were found:

-
-
- -
- {% if group_list|length > 0 %} - - - - - - - - - - - - {% for group in group_list %} - - - - - - - {% endfor %} - -
NameCreatedLast Changed
{% if 'name' in group %}{{ group.name }}{% else %}N/A{% endif %}{% if 'created_timestamp' in group %}{{ group.created_timestamp }}{% else %}N/A{% endif %}{% if 'changed_timestamp' in group %}{{ group.changed_timestamp }}{% else %}N/A{% endif %}
- {% else %} -

No matching groups were found.

- {% endif %} -
-
-
- - - -{% endblock %} - -{% block js %} - - -{% endblock %} diff --git a/wqflask/wqflask/templates/admin/set_group_privileges.html b/wqflask/wqflask/templates/admin/set_group_privileges.html new file mode 100644 index 00000000..bc52788f --- /dev/null +++ b/wqflask/wqflask/templates/admin/set_group_privileges.html @@ -0,0 +1,102 @@ +{% extends "base.html" %} +{% block title %}Set Group Privileges{% endblock %} +{% block css %} + + + +{% endblock %} +{% block content %} + +
+

Group Privileges

+
+
+ + +
+ +
+

Data and Metadata Privileges

+ + + + + + + + + + + + + {% if 'data' in default_privileges %} + + + + {% else %} + + + + {% endif %} + + + + {% if 'metadata' in default_privileges %} + + + + {% else %} + + + + {% endif %} + + +
No-AccessViewEdit
Data:
Metadata:
+
+

Admin Privileges

+ + + + + + + + + + + + + {% if 'admin' in default_privileges %} + + + + {% else %} + + + + {% endif %} + + +
Not AdminEdit AccessEdit Admins
Admin:
+
+
+
+ + + +{% endblock %} + +{% block js %} + + +{% endblock %} diff --git a/wqflask/wqflask/templates/admin/view_group.html b/wqflask/wqflask/templates/admin/view_group.html new file mode 100644 index 00000000..b797cd70 --- /dev/null +++ b/wqflask/wqflask/templates/admin/view_group.html @@ -0,0 +1,238 @@ +{% extends "base.html" %} +{% block title %}View and Edit Group{% endblock %} +{% block css %} + + + +{% endblock %} +{% block content %} + +
+ +
+ + + +
+
+
+

Admins

+
+ + + + + + + + + + + + {% for admin in admins %} + + + + + + + + {% endfor %} + +
IndexNameEmail AddressOrganization
{{ loop.index }}{% if 'full_name' in admin %}{{ admin.full_name }}{% else %}N/A{% endif %}{% if 'email_address' in admin %}{{ admin.email_address }}{% else %}N/A{% endif %}{% if 'organization' in admin %}{{ admin.organization }}{% else %}N/A{% endif %}
+ {% if user_is_admin == true %} +
+ E-mail of user to add to admins (multiple e-mails can be added separated by commas): + +
+
+ +
+ {% endif %} +
+
+
+ {% if members|length > 0 %} +

Members

+
+ + + + + + + + + + + + {% for member in members %} + + + + + + + + {% endfor %} + +
IndexNameEmail AddressOrganization
{{ loop.index }}{% if 'full_name' in member %}{{ member.full_name }}{% else %}N/A{% endif %}{% if 'email_address' in member %}{{ member.email_address }}{% else %}N/A{% endif %}{% if 'organization' in member %}{{ member.organization }}{% else %}N/A{% endif %}
+ {% if user_is_admin == true %} +
+ E-mail of user to add to members (multiple e-mails can be added separated by commas): + +
+
+ +
+ {% endif %} + {% else %} + There are currently no members in this group. + {% endif %} +
+
+
+

Resources

+
+ {% if resources|length > 0 %} + + + + + + + + + + + + {% for resource in resources %} + + + + + + + + {% endfor %} + +
IndexNameDataMetadataAdmin
{{ loop.index }}{% if 'name' in resource %}{{ resource.name }}{% else %}N/A{% endif %}{% if 'data' in resource %}{{ resource.data }}{% else %}N/A{% endif %}{% if 'metadata' in resource %}{{ resource.metadata }}{% else %}N/A{% endif %}{% if 'admin' in resource %}{{ resource.admin }}{% else %}N/A{% endif %}
+ {% else %} + There are currently no resources associated with this group. + {% endif %} +
+
+
+
+ + + +{% endblock %} + +{% block js %} + + + +{% endblock %} diff --git a/wqflask/wqflask/templates/base.html b/wqflask/wqflask/templates/base.html index 07c1b48e..262d9ee5 100644 --- a/wqflask/wqflask/templates/base.html +++ b/wqflask/wqflask/templates/base.html @@ -94,6 +94,11 @@ Sign in {% endif %} + {% if g.user_session.logged_in %} +
  • + Manage Groups +
  • + {% endif %} {% endif %} +
    +

    Group Privileges

    +
    +
    + +
    + +
    +

    Data and Metadata Privileges

    + + + + + + + + + + + + + + + + + + + + + + + +
    No-AccessViewEdit
    Data:
    Metadata:
    +
    +

    Admin Privileges

    + + + + + + + + + + + + + + + + + +
    Not AdminEdit AccessEdit Admins
    Admin:
    +
    +
    +
    + + + +{% endblock %} + +{% block js %} + + +{% endblock %} diff --git a/wqflask/wqflask/templates/show_trait_details.html b/wqflask/wqflask/templates/show_trait_details.html index 5c315878..5e0bae79 100644 --- a/wqflask/wqflask/templates/show_trait_details.html +++ b/wqflask/wqflask/templates/show_trait_details.html @@ -248,8 +248,8 @@ - {% if resource_id %} - + {% if admin_status[1] == "owner" or admin_status[1] == "edit-admins" or admin_status[1] == "edit-access" %} + {% endif %} diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index ee827ba3..dc431aa9 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -102,7 +102,7 @@ def check_access_permissions(): else: available = check_resource_availability(dataset) - if not available: + if available == "no-access": return redirect(url_for("no_access_page")) @app.teardown_appcontext -- cgit v1.2.3 From fcb3cb1105cf2a1d97c1a08fa636b118ed231ffa Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 17 Jun 2020 16:28:15 -0500 Subject: A user's id is now set as a parameter if it doesn't already exist --- wqflask/maintenance/set_resource_defaults.py | 8 +++--- wqflask/utility/authentication_tools.py | 30 ++++++++++++---------- wqflask/utility/redis_tools.py | 4 +++ wqflask/wqflask/group_manager.py | 4 +-- wqflask/wqflask/templates/admin/group_manager.html | 16 +++++++----- 5 files changed, 36 insertions(+), 26 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index 344e6a23..0c221bbf 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -27,8 +27,9 @@ sys.path.insert(0,'./') # NEW: import app to avoid a circular dependency on utility.tools from wqflask import app +from utility import hmac from utility.tools import SQL_URI -from utility.redis_tools import get_redis_conn, get_user_id, add_resource, get_resources +from utility.redis_tools import get_redis_conn, get_user_id, add_resource, get_resources, get_resource_info Redis = get_redis_conn() import MySQLdb @@ -117,7 +118,7 @@ def insert_geno_resources(default_owner_id): resource_ob = {} resource_ob['name'] = resource[1] if resource[1] == "HET3-ITPGeno": - resource_ob['owner_id'] = "73a3f093-ca13-4ae0-a179-9a446f709f6e" + resource_ob['owner_id'] = "c5ce8c56-78a6-474f-bcaf-7129d97f56ae" else: resource_ob['owner_id'] = default_owner_id resource_ob['data'] = { "dataset" : str(resource[0]) } @@ -145,7 +146,8 @@ def main(): Redis.delete("resources") - owner_id = get_user_id("email_address", "zachary.a.sloan@gmail.com") + owner_id = "c5ce8c56-78a6-474f-bcaf-7129d97f56ae" + insert_resources(owner_id) if __name__ == '__main__': diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 07ceacc0..dfa0e2d9 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -7,6 +7,7 @@ from base import data_set from utility import hmac from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id +Redis = get_redis_conn() from flask import Flask, g, redirect, url_for @@ -14,8 +15,12 @@ import logging logger = logging.getLogger(__name__ ) def check_resource_availability(dataset, trait_id=None): - resource_id = get_resource_id(dataset, trait_id) + #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy + if g.user_session.user_id in Redis.smembers("super_users"): + return "edit" + + resource_id = get_resource_id(dataset, trait_id) response = None if resource_id: resource_info = get_resource_info(resource_id) @@ -68,19 +73,16 @@ def check_owner(dataset=None, trait_id=None, resource_id=None): return False def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): - if resource_id: - resource_info = get_resource_info(resource_id) - if g.user_session.user_id == resource_info['owner_id']: - return [resource_id, "owner"] - else: - return [resource_id, check_admin(resource_id)] - else: + if not resource_id: resource_id = get_resource_id(dataset, trait_id) - if resource_id: - resource_info = get_resource_info(resource_id) - if g.user_session.user_id == resource_info['owner_id']: - return [resource_id, "owner"] - else: - return [resource_id, check_admin(resource_id)] + + if g.user_session.user_id in Redis.smembers("super_users"): + return [resource_id, "owner"] + + resource_info = get_resource_info(resource_id) + if g.user_session.user_id == resource_info['owner_id']: + return [resource_id, "owner"] + else: + return [resource_id, check_admin(resource_id)] return [resource_id, "not-admin"] \ No newline at end of file diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index c6d221ff..9d09a66b 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -30,6 +30,7 @@ def is_redis_available(): def get_user_id(column_name, column_value): user_list = Redis.hgetall("users") + key_list = [] for key in user_list: user_ob = json.loads(user_list[key]) if column_name in user_ob and user_ob[column_name] == column_value: @@ -62,6 +63,9 @@ def get_users_like_unique_column(column_name, column_value): if column_name != "user_id": for key in user_list: user_ob = json.loads(user_list[key]) + if "user_id" not in user_ob: + set_user_attribute(key, "user_id", key) + user_ob["user_id"] = key if column_name in user_ob: if column_value in user_ob[column_name]: matched_users.append(user_ob) diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py index 9afc016b..24848ed8 100644 --- a/wqflask/wqflask/group_manager.py +++ b/wqflask/wqflask/group_manager.py @@ -19,8 +19,8 @@ def manage_groups(): if "add_new_group" in params: return redirect(url_for('add_group')) else: - admin_groups, user_groups = get_user_groups(g.user_session.user_id) - return render_template("admin/group_manager.html", admin_groups=admin_groups, user_groups=user_groups) + admin_groups, member_groups = get_user_groups(g.user_session.user_id) + return render_template("admin/group_manager.html", admin_groups=admin_groups, member_groups=member_groups) @app.route("/groups/view", methods=('GET', 'POST')) def view_group(): diff --git a/wqflask/wqflask/templates/admin/group_manager.html b/wqflask/wqflask/templates/admin/group_manager.html index 70d55684..c8ed6851 100644 --- a/wqflask/wqflask/templates/admin/group_manager.html +++ b/wqflask/wqflask/templates/admin/group_manager.html @@ -10,18 +10,20 @@
    - {% if admin_groups|length == 0 and user_groups|length == 0 %} + {% if admin_groups|length == 0 and member_groups|length == 0 %}

    You currently aren't a member or admin of any groups.


    - + {% else %}

    Admin Groups


    @@ -60,10 +62,10 @@

    User Groups


    - {% if user_groups|length == 0 %} + {% if member_groups|length == 0 %}

    You currently aren't a member of any groups.

    {% else %} - +
    @@ -75,7 +77,7 @@ - {% for group in user_groups %} + {% for group in member_groups %} @@ -107,8 +109,8 @@ 'sDom': 'tr' }); {% endif %} - {% if user_groups|length != 0 %} - $('#user_groups').dataTable({ + {% if member_groups|length != 0 %} + $('#member_groups').dataTable({ 'sDom': 'tr' }); {% endif %} -- cgit v1.2.3 From 75802ed1f9e5d955987bf5f5eb78a9cb120116ec Mon Sep 17 00:00:00 2001 From: zsloan Date: Sat, 20 Jun 2020 17:33:22 -0500 Subject: Added some admin functionality and fixed issue with temp traits --- wqflask/base/trait.py | 17 ++++-- wqflask/base/webqtlConfig.py | 4 ++ wqflask/maintenance/set_resource_defaults.py | 20 +++++-- wqflask/utility/authentication_tools.py | 79 +++++++++++++++------------- wqflask/wqflask/resource_manager.py | 6 +-- wqflask/wqflask/views.py | 20 ++++--- 6 files changed, 90 insertions(+), 56 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 7700ecd5..c2b8b910 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -42,11 +42,11 @@ def create_trait(**kw): if kw.get('dataset_name') != "Temp": if dataset.type == 'Publish': - permitted = check_resource_availability(dataset, kw.get('name')) + permissions = check_resource_availability(dataset, kw.get('name')) else: - permitted = check_resource_availability(dataset) + permissions = check_resource_availability(dataset) - if permitted != "no-access": + if "view" in permissions['data']: the_trait = GeneralTrait(**kw) if the_trait.dataset.type != "Temp": @@ -382,9 +382,16 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): resource_id = get_resource_id(dataset, trait.name) if dataset.type == 'Publish': - the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8081/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) else: - the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) + the_url = "http://localhost:8081/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) + + response = requests.get(the_url).content + if response.strip() == "no-access": + trait.view = False + return trait + else: + trait_info = json.loads(response) try: response = requests.get(the_url).content diff --git a/wqflask/base/webqtlConfig.py b/wqflask/base/webqtlConfig.py index 55407123..3d86bc22 100644 --- a/wqflask/base/webqtlConfig.py +++ b/wqflask/base/webqtlConfig.py @@ -17,6 +17,10 @@ DEBUG = 1 #USER privilege USERDICT = {'guest':1,'user':2, 'admin':3, 'root':4} +#Set privileges +SUPER_PRIVILEGES = {'data': ['no-access', 'view', 'edit'], 'metadata': ['no-access', 'view', 'edit'], 'admin': ['not-admin', 'edit-access', 'edit-admins']} +DEFAULT_PRIVILEGES = {'data': ['no-access', 'view'], 'metadata': ['no-access', 'view'], 'admin': ['not-admin']} + #minimum number of informative strains KMININFORMATIVE = 5 diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index 0c221bbf..ddb3b17b 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -68,9 +68,13 @@ def insert_probeset_resources(default_owner_id): resource_ob['data'] = { "dataset" : str(resource[0])} resource_ob['type'] = "dataset-probeset" if resource[2] < 1 and resource[3] > 0: - resource_ob['default_mask'] = { "data": "view" } + resource_ob['default_mask'] = { "data": "view", + "metadata": "view", + "admin": "not-admin"} else: - resource_ob['default_mask'] = { "data": "no-access" } + resource_ob['default_mask'] = { "data": "no-access", + "metadata": "no-access", + "admin": "not-admin"} resource_ob['group_masks'] = {} add_resource(resource_ob) @@ -98,7 +102,9 @@ def insert_publish_resources(default_owner_id): resource_ob['data'] = { "dataset" : str(resource[1]) , "trait" : str(resource[0])} resource_ob['type'] = "dataset-publish" - resource_ob['default_mask'] = { "data": "view" } + resource_ob['default_mask'] = { "data": "view", + "metadata": "view", + "admin": "not-admin"} resource_ob['group_masks'] = {} @@ -124,9 +130,13 @@ def insert_geno_resources(default_owner_id): resource_ob['data'] = { "dataset" : str(resource[0]) } resource_ob['type'] = "dataset-geno" if resource[2] < 1: - resource_ob['default_mask'] = { "data": "view" } + resource_ob['default_mask'] = { "data": "view", + "metadata": "view", + "admin": "not-admin"} else: - resource_ob['default_mask'] = { "data": "no-access" } + resource_ob['default_mask'] = { "data": "no-access", + "metadata": "no-access", + "admin": "not-admin"} resource_ob['group_masks'] = {} add_resource(resource_ob) diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index dfa0e2d9..6c88949b 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -3,7 +3,7 @@ from __future__ import absolute_import, print_function, division import json import requests -from base import data_set +from base import data_set, webqtlConfig from utility import hmac from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id @@ -18,45 +18,47 @@ def check_resource_availability(dataset, trait_id=None): #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy if g.user_session.user_id in Redis.smembers("super_users"): - return "edit" + return webqtlConfig.SUPER_PRIVILEGES - resource_id = get_resource_id(dataset, trait_id) response = None - if resource_id: - resource_info = get_resource_info(resource_id) - - the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) - try: - response = json.loads(requests.get(the_url).content)['data'] - except: - response = resource_info['default_mask']['data'] - if 'edit' in response: - return "edit" - elif 'view' in response: - return "view" - else: - return "no-access" + #At least for now assume temporary entered traits are accessible#At least for now assume temporary entered traits are accessible + if type(dataset) == str: + return webqtlConfig.DEFAULT_PRIVILEGES + if dataset.type == "Temp": + return webqtlConfig.DEFAULT_PRIVILEGES - return False + resource_id = get_resource_id(dataset, trait_id) -def check_admin(resource_id=None): + if resource_id: + resource_info = get_resource_info(resource_id) + else: + return response #ZS: Need to substitute in something that creates the resource in Redis later - return "not-admin" + the_url = "http://localhost:8081/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + try: + response = json.loads(requests.get(the_url).content) + except: + response = resource_info['default_mask'] - # ZS: commented out until proxy can return this - # the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) - # try: - # response = json.loads(requests.get(the_url).content) - # except: - # response = resource_info['default_mask']['admin'] + if response: + return response + else: #ZS: No idea how this would happen, but just in case + return False - # if 'edit-admins' in response: - # return "edit-admins" - # elif 'edit-access' in response: - # return "edit-access" - # else: - # return "not-admin" +def check_admin(resource_id=None): + the_url = "http://localhost:8081/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + try: + response = json.loads(requests.get(the_url).content)['admin'] + except: + response = resource_info['default_mask']['admin'] + + if 'edit-admins' in response: + return "edit-admins" + elif 'edit-access' in response: + return "edit-access" + else: + return "not-admin" def check_owner(dataset=None, trait_id=None, resource_id=None): if resource_id: @@ -74,15 +76,18 @@ def check_owner(dataset=None, trait_id=None, resource_id=None): def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): if not resource_id: - resource_id = get_resource_id(dataset, trait_id) + if dataset.type == "Temp": + return "not-admin" + else: + resource_id = get_resource_id(dataset, trait_id) if g.user_session.user_id in Redis.smembers("super_users"): - return [resource_id, "owner"] + return "owner" resource_info = get_resource_info(resource_id) if g.user_session.user_id == resource_info['owner_id']: - return [resource_id, "owner"] + return "owner" else: - return [resource_id, check_admin(resource_id)] + return check_admin(resource_id) - return [resource_id, "not-admin"] \ No newline at end of file + return "not-admin" \ No newline at end of file diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index 0f9f5c9d..39a07310 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -18,7 +18,7 @@ def manage_resource(): params = request.form if request.form else request.args if 'resource_id' in request.args: resource_id = request.args['resource_id'] - admin_status = check_owner_or_admin(resource_id=resource_id)[1] + admin_status = check_owner_or_admin(resource_id=resource_id) resource_info = get_resource_info(resource_id) group_masks = resource_info['group_masks'] @@ -67,7 +67,7 @@ def search_for_groups(): def change_owner(): resource_id = request.form['resource_id'] if 'new_owner' in request.form: - admin_status = check_owner_or_admin(resource_id=resource_id)[1] + admin_status = check_owner_or_admin(resource_id=resource_id) if admin_status == "owner": new_owner_id = request.form['new_owner'] change_resource_owner(resource_id, new_owner_id) @@ -100,7 +100,7 @@ def change_default_privileges(): @app.route("/resources/add_group", methods=('POST',)) def add_group_to_resource(): resource_id = request.form['resource_id'] - admin_status = check_owner_or_admin(resource_id=resource_id)[1] + admin_status = check_owner_or_admin(resource_id=resource_id) if admin_status == "owner" or admin_status == "edit-admins" or admin_status == "edit-access": if 'selected_group' in request.form: group_id = request.form['selected_group'] diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index dc431aa9..bc01839b 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -96,13 +96,21 @@ def check_access_permissions(): pass else: if 'dataset' in request.args: - dataset = create_dataset(request.args['dataset']) - if 'trait_id' in request.args: - available = check_resource_availability(dataset, request.args['trait_id']) + if request.args['dataset'] == "Temp": + permissions = check_resource_availability("Temp") else: - available = check_resource_availability(dataset) - - if available == "no-access": + dataset = create_dataset(request.args['dataset']) + + if dataset.type == "Temp": + permissions = False + if 'trait_id' in request.args: + permissions = check_resource_availability(dataset, request.args['trait_id']) + elif dataset.type != "Publish": + permissions = check_resource_availability(dataset) + else: + return None + + if 'view' not in permissions['data']: return redirect(url_for("no_access_page")) @app.teardown_appcontext -- cgit v1.2.3 From 51417c06061246bc92be89db198b3e74e7126035 Mon Sep 17 00:00:00 2001 From: zsloan Date: Sat, 20 Jun 2020 17:47:38 -0500 Subject: Fixed ports for proxy (though I need to add the port to global variables) and also simplified the check_owner_or_admin function a little --- wqflask/base/trait.py | 4 ++-- wqflask/utility/authentication_tools.py | 4 ++-- wqflask/wqflask/show_trait/show_trait.py | 3 ++- wqflask/wqflask/templates/show_trait_details.html | 4 ++-- 4 files changed, 8 insertions(+), 7 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index c2b8b910..0e26ca2c 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -382,9 +382,9 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): resource_id = get_resource_id(dataset, trait.name) if dataset.type == 'Publish': - the_url = "http://localhost:8081/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) else: - the_url = "http://localhost:8081/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) + the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) response = requests.get(the_url).content if response.strip() == "no-access": diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 6c88949b..06b2854a 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -35,7 +35,7 @@ def check_resource_availability(dataset, trait_id=None): else: return response #ZS: Need to substitute in something that creates the resource in Redis later - the_url = "http://localhost:8081/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) try: response = json.loads(requests.get(the_url).content) except: @@ -47,7 +47,7 @@ def check_resource_availability(dataset, trait_id=None): return False def check_admin(resource_id=None): - the_url = "http://localhost:8081/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) try: response = json.loads(requests.get(the_url).content)['admin'] except: diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index ed4ff0ad..4698807a 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -72,7 +72,8 @@ class ShowTrait(object): cellid=None) self.trait_vals = Redis.get(self.trait_id).split() - self.admin_status = check_owner_or_admin(self.dataset, self.trait_id) + self.resource_id = get_resource_id(self.dataset, self.trait_id) + self.admin_status = check_owner_or_admin(resource_id=self.resource_id) #ZS: Get verify/rna-seq link URLs try: diff --git a/wqflask/wqflask/templates/show_trait_details.html b/wqflask/wqflask/templates/show_trait_details.html index 5e0bae79..965c0340 100644 --- a/wqflask/wqflask/templates/show_trait_details.html +++ b/wqflask/wqflask/templates/show_trait_details.html @@ -248,8 +248,8 @@ - {% if admin_status[1] == "owner" or admin_status[1] == "edit-admins" or admin_status[1] == "edit-access" %} - + {% if admin_status == "owner" or admin_status == "edit-admins" or admin_status == "edit-access" %} + {% endif %} -- cgit v1.2.3 From 645c07b98aae9508e8a0aeedd8eaca815d5daf54 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 24 Jun 2020 17:13:45 -0500 Subject: Temporary partial fix to issue of resources without info; need to add something that automatically inserts resources with default privileges --- wqflask/base/trait.py | 12 ++++-------- wqflask/utility/authentication_tools.py | 11 +++++++---- wqflask/utility/redis_tools.py | 5 ++++- wqflask/wqflask/correlation/show_corr_results.py | 7 +++---- wqflask/wqflask/templates/correlation_page.html | 4 ++++ 5 files changed, 22 insertions(+), 17 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 0e26ca2c..7fc016f3 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -386,13 +386,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): else: the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) - response = requests.get(the_url).content - if response.strip() == "no-access": - trait.view = False - return trait - else: - trait_info = json.loads(response) - try: response = requests.get(the_url).content if response.strip() == "no-access": @@ -402,7 +395,10 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): trait_info = json.loads(response) except: resource_info = get_resource_info(resource_id) - default_permissions = resource_info['default_mask']['data'] + if resource_info: + default_permissions = resource_info['default_mask']['data'] + else: + default_permissions = webqtlConfig.DEFAULT_PRIVILEGES if 'view' not in default_permissions: trait.view = False return trait diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 06b2854a..f9028f32 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -32,6 +32,8 @@ def check_resource_availability(dataset, trait_id=None): if resource_id: resource_info = get_resource_info(resource_id) + if not resource_info: + return webqtlConfig.DEFAULT_PRIVILEGES else: return response #ZS: Need to substitute in something that creates the resource in Redis later @@ -85,9 +87,10 @@ def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): return "owner" resource_info = get_resource_info(resource_id) - if g.user_session.user_id == resource_info['owner_id']: - return "owner" - else: - return check_admin(resource_id) + if resource_info: + if g.user_session.user_id == resource_info['owner_id']: + return "owner" + else: + return check_admin(resource_id) return "not-admin" \ No newline at end of file diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 9d09a66b..8a5f95ee 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -280,7 +280,10 @@ def get_resource_id(dataset, trait_id=None): def get_resource_info(resource_id): resource_info = Redis.hget("resources", resource_id) - return json.loads(resource_info) + if resource_info: + return json.loads(resource_info) + else: + return None def add_resource(resource_info): if 'trait' in resource_info['data']: diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index 7eab7184..bc2912f2 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -200,8 +200,6 @@ class CorrelationResults(object): for _trait_counter, trait in enumerate(self.correlation_data.keys()[:self.return_number]): trait_object = create_trait(dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False) - if not trait_object: - continue if self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Geno": #ZS: Convert trait chromosome to an int for the location range option @@ -237,9 +235,8 @@ class CorrelationResults(object): trait_object.tissue_pvalue = tissue_corr_data[trait][2] elif self.corr_type == "lit": trait_object.lit_corr = lit_corr_data[trait][1] - self.correlation_results.append(trait_object) - self.target_dataset.get_trait_info(self.correlation_results, self.target_dataset.group.species) + self.correlation_results.append(trait_object) if self.corr_type != "lit" and self.dataset.type == "ProbeSet" and self.target_dataset.type == "ProbeSet": self.do_lit_correlation_for_trait_list() @@ -498,6 +495,8 @@ def do_bicor(this_trait_vals, target_trait_vals): def generate_corr_json(corr_results, this_trait, dataset, target_dataset, for_api = False): results_list = [] for i, trait in enumerate(corr_results): + if trait.view == False: + continue results_dict = {} if not for_api: results_dict['checkbox'] = "" diff --git a/wqflask/wqflask/templates/correlation_page.html b/wqflask/wqflask/templates/correlation_page.html index f429948d..3d750bea 100644 --- a/wqflask/wqflask/templates/correlation_page.html +++ b/wqflask/wqflask/templates/correlation_page.html @@ -169,7 +169,11 @@ {% endif %} + {% if trait.authors %} + {% else %} + + {% endif %}
    {{ loop.index }}N/A{% if trait.description_display|length > 70 %}{{ trait.description_display[:70] }}...{% else %}{{ trait.description_display }}{% endif %}{% if trait.authors.split(',') > 6 %}{{ trait.authors.split(',')[:6]|join(', ') }}, et al.{% else %}{{ trait.authors }}{% endif %}N/A {{ trait.pubmed_text }} -- cgit v1.2.3 From 0a0d733af4b6c61c381a1c4ec9fa162182a057e9 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 6 Jul 2020 14:47:04 -0500 Subject: Fixed some of the logic with how traits are authenticated to avoid situation where a trait is authenticated twice --- wqflask/base/trait.py | 30 ++++++++-------------------- wqflask/maintenance/set_resource_defaults.py | 6 +++--- wqflask/utility/redis_tools.py | 5 +++-- 3 files changed, 14 insertions(+), 27 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 7fc016f3..4c3e790c 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -49,7 +49,6 @@ def create_trait(**kw): if "view" in permissions['data']: the_trait = GeneralTrait(**kw) if the_trait.dataset.type != "Temp": - the_trait = retrieve_trait_info(the_trait, the_trait.dataset, get_qtl_info=kw.get('get_qtl_info')) return the_trait else: @@ -144,6 +143,7 @@ class GeneralTrait(object): formatted = self.post_publication_description else: formatted = "Not available" + return formatted @property @@ -388,27 +388,14 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): try: response = requests.get(the_url).content - if response.strip() == "no-access": - trait.view = False - return trait - else: - trait_info = json.loads(response) - except: - resource_info = get_resource_info(resource_id) - if resource_info: - default_permissions = resource_info['default_mask']['data'] - else: - default_permissions = webqtlConfig.DEFAULT_PRIVILEGES - if 'view' not in default_permissions: - trait.view = False - return trait - + trait_info = json.loads(response) + except: #ZS: I'm assuming the trait is viewable if the try fails for some reason; it should never reach this point unless the user has privileges, since that's dealt with in create_trait if dataset.type == 'Publish': query = """ SELECT - PublishXRef.Id, Publication.PubMed_ID, + PublishXRef.Id, InbredSet.InbredSetCode, Publication.PubMed_ID, Phenotype.Pre_publication_description, Phenotype.Post_publication_description, Phenotype.Original_description, - Phenotype.Pre_publication_abbreviation, Phenotype.Post_publication_abbreviation, + Phenotype.Pre_publication_abbreviation, Phenotype.Post_publication_abbreviation, PublishXRef.mean, Phenotype.Lab_code, Phenotype.Submitter, Phenotype.Owner, Phenotype.Authorized_Users, Publication.Authors, Publication.Title, Publication.Abstract, Publication.Journal, Publication.Volume, Publication.Pages, @@ -472,11 +459,10 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): if trait_info: trait.haveinfo = True - for i, field in enumerate(dataset.display_fields): - holder = trait_info[i] - #if isinstance(trait_info[i], basestring): - # holder = holder.encode('latin1') + holder = trait_info[i] + # if isinstance(trait_info[i], basestring): + # holder = unicode(holder.strip(codecs.BOM_UTF8), 'utf-8', "ignore") setattr(trait, field, holder) if dataset.type == 'Publish': diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index ddb3b17b..54fd8e7e 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -77,7 +77,7 @@ def insert_probeset_resources(default_owner_id): "admin": "not-admin"} resource_ob['group_masks'] = {} - add_resource(resource_ob) + add_resource(resource_ob, update=False) def insert_publish_resources(default_owner_id): current_resources = Redis.hgetall("resources") @@ -108,7 +108,7 @@ def insert_publish_resources(default_owner_id): resource_ob['group_masks'] = {} - add_resource(resource_ob) + add_resource(resource_ob, update=False) else: continue @@ -139,7 +139,7 @@ def insert_geno_resources(default_owner_id): "admin": "not-admin"} resource_ob['group_masks'] = {} - add_resource(resource_ob) + add_resource(resource_ob, update=False) def insert_resources(default_owner_id): current_resources = get_resources() diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 8a5f95ee..6c912a23 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -285,13 +285,14 @@ def get_resource_info(resource_id): else: return None -def add_resource(resource_info): +def add_resource(resource_info, update=True): if 'trait' in resource_info['data']: resource_id = hmac.hmac_creation('{}:{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset']), str(resource_info['data']['trait']))) else: resource_id = hmac.hmac_creation('{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset']))) - Redis.hset("resources", resource_id, json.dumps(resource_info)) + if not Redis.hexists("resources", resource_id): + Redis.hset("resources", resource_id, json.dumps(resource_info)) return resource_info -- cgit v1.2.3 From ddc7dfa6afb2ae02d6a23e345d408ec1ec73b1aa Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 8 Jul 2020 13:00:30 -0500 Subject: Removed remaining unused references to the old qtlreaper --- wqflask/base/trait.py | 2 +- wqflask/utility/Plot.py | 2 -- wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py | 2 -- wqflask/wqflask/correlation/show_corr_results.py | 2 -- wqflask/wqflask/correlation_matrix/show_corr_matrix.py | 2 -- wqflask/wqflask/heatmap/heatmap.py | 2 -- wqflask/wqflask/network_graph/network_graph.py | 2 -- 7 files changed, 1 insertion(+), 13 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 8e11c11d..ecb9dcd9 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -403,7 +403,7 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): Publication.Month, Publication.Year, PublishXRef.Sequence, Phenotype.Units, PublishXRef.comments FROM - PublishXRef, Publication, Phenotype, PublishFreeze + PublishXRef, Publication, Phenotype, PublishFreeze, InbredSet WHERE PublishXRef.Id = %s AND Phenotype.Id = PublishXRef.PhenotypeId AND diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index cce8435d..9bc84d22 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -35,8 +35,6 @@ import sys, os from numarray import linear_algebra as la from numarray import ones, array, dot, swapaxes -import reaper - import webqtlUtil import corestats from base import webqtlConfig diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py index 5d74dc9d..09d6b9cc 100644 --- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py +++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py @@ -35,8 +35,6 @@ import resource from pprint import pformat as pf -import reaper - from base.trait import create_trait from base import data_set from utility import webqtlUtil, helper_functions, corr_result_helpers diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index bc2912f2..de7a1c0c 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -42,8 +42,6 @@ utils = importr("utils") from pprint import pformat as pf -import reaper - from base import webqtlConfig from utility.THCell import THCell from utility.TDCell import TDCell diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index 2b9467d1..0ac94139 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -41,8 +41,6 @@ import rpy2.robjects as robjects from pprint import pformat as pf -import reaper - from utility.redis_tools import get_redis_conn Redis = get_redis_conn() THIRTY_DAYS = 60 * 60 * 24 * 30 diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index e82aa0ef..5098a184 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -19,8 +19,6 @@ import numpy as np from pprint import pformat as pf -import reaper - from base.trait import GeneralTrait from base import data_set from base import species diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py index f41f3017..f61c40b4 100644 --- a/wqflask/wqflask/network_graph/network_graph.py +++ b/wqflask/wqflask/network_graph/network_graph.py @@ -40,8 +40,6 @@ import rpy2.robjects as robjects from pprint import pformat as pf -import reaper - from utility.THCell import THCell from utility.TDCell import TDCell from base.trait import create_trait -- cgit v1.2.3 From c9e5ea428684590ab11fa2cadbd2a60b72f98168 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 16 Jul 2020 16:20:27 +0300 Subject: Expose elastic-search variables for tests to pass --- wqflask/utility/tools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 89d88516..77db5d53 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -270,8 +270,8 @@ if ORCID_CLIENT_ID != 'UNKNOWN' and ORCID_CLIENT_SECRET: ORCID_CLIENT_ID+"&client_secret="+ORCID_CLIENT_SECRET ORCID_TOKEN_URL = get_setting('ORCID_TOKEN_URL') -# ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST') -# ELASTICSEARCH_PORT = get_setting('ELASTICSEARCH_PORT') +ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST') +ELASTICSEARCH_PORT = get_setting('ELASTICSEARCH_PORT') # import utility.elasticsearch_tools as es # es.test_elasticsearch_connection() -- cgit v1.2.3 From 81c35e99a7ff808bf2fcf1f5e19631adf07d42e8 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 22 Jul 2020 17:46:37 -0500 Subject: It should add new resources to Redis automatically now --- wqflask/utility/authentication_tools.py | 61 +++++++++++++++++++++++++-------- wqflask/utility/redis_tools.py | 5 +-- 2 files changed, 47 insertions(+), 19 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index f9028f32..ed7462d1 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -6,7 +6,7 @@ import requests from base import data_set, webqtlConfig from utility import hmac -from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id +from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id, add_resource Redis = get_redis_conn() from flask import Flask, g, redirect, url_for @@ -16,13 +16,7 @@ logger = logging.getLogger(__name__ ) def check_resource_availability(dataset, trait_id=None): - #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy - if g.user_session.user_id in Redis.smembers("super_users"): - return webqtlConfig.SUPER_PRIVILEGES - - response = None - - #At least for now assume temporary entered traits are accessible#At least for now assume temporary entered traits are accessible + #At least for now assume temporary entered traits are accessible if type(dataset) == str: return webqtlConfig.DEFAULT_PRIVILEGES if dataset.type == "Temp": @@ -33,9 +27,13 @@ def check_resource_availability(dataset, trait_id=None): if resource_id: resource_info = get_resource_info(resource_id) if not resource_info: - return webqtlConfig.DEFAULT_PRIVILEGES - else: - return response #ZS: Need to substitute in something that creates the resource in Redis later + resource_info = add_new_resource(dataset, trait_id) + + #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy + if g.user_session.user_id in Redis.smembers("super_users"): + return webqtlConfig.SUPER_PRIVILEGES + + response = None the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) try: @@ -43,10 +41,43 @@ def check_resource_availability(dataset, trait_id=None): except: response = resource_info['default_mask'] - if response: - return response - else: #ZS: No idea how this would happen, but just in case - return False + return response + +def add_new_resource(dataset, trait_id=None): + resource_ob = { + 'owner_id' : webqtlConfig.DEFAULT_OWNER_ID, + 'default_mask': webqtlConfig.DEFAULT_PRIVILEGES, + 'group_masks' : {} + } + + if dataset.type == "Publish": + resource_ob['name'] = get_group_code(dataset) + "_" + str(trait_id) + resource_ob['data'] = { + 'dataset': dataset.id, + 'trait' : trait_id + } + resource_ob['type'] = 'dataset-publish' + elif dataset.type == "Geno": + resource_ob['name'] = dataset.name + resource_ob['data'] = { + 'dataset': dataset.id + } + resource_ob['type'] = 'dataset-geno' + else: + resource_ob['name'] = dataset.name + resource_ob['data'] = { + 'dataset': dataset.id + } + resource_ob['type'] = 'dataset-probeset' + + resource_info = add_resource(resource_ob, update=False) + + return resource_info + +def get_group_code(dataset): + results = g.db.execute("SELECT InbredSetCode from InbredSet where Name='{}'".format(dataset.group.name)).fetchone() + + return results[0] def check_admin(resource_id=None): the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 6c912a23..1377a564 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -264,17 +264,14 @@ def get_resources(): return resource_list def get_resource_id(dataset, trait_id=None): + resource_id = False if dataset.type == "Publish": if trait_id: resource_id = hmac.hmac_creation("{}:{}:{}".format('dataset-publish', dataset.id, trait_id)) - else: - return False elif dataset.type == "ProbeSet": resource_id = hmac.hmac_creation("{}:{}".format('dataset-probeset', dataset.id)) elif dataset.type == "Geno": resource_id = hmac.hmac_creation("{}:{}".format('dataset-geno', dataset.id)) - else: - return False return resource_id -- cgit v1.2.3 From f4e61929d0eb490dbf7e8ddc03a20d42d44b6f5c Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 21 Jul 2020 12:36:06 +0300 Subject: Add work-around for failed imports in unittest * wqflask/utility/tools.py: Unittests will use `from wqflask.wqflask import app` and the gn2 script will use `from wqflask import app` --- wqflask/utility/tools.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 77db5d53..37f9d8fe 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -5,7 +5,10 @@ import os import sys import json -from wqflask import app +try: + from wqflask import app +except ImportError: + from wqflask.wqflask import app # Use the standard logger here to avoid a circular dependency import logging -- cgit v1.2.3 From 2cbda12fe9fa9fca4d27796b2a8eb719e659dc7f Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 21 Jul 2020 21:53:30 +0300 Subject: Revert "Add work-around for failed imports in unittest" This reverts commit d5e87fa6fe7546b46790f512d984a5501223082f. --- wqflask/utility/tools.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 37f9d8fe..77db5d53 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -5,10 +5,7 @@ import os import sys import json -try: - from wqflask import app -except ImportError: - from wqflask.wqflask import app +from wqflask import app # Use the standard logger here to avoid a circular dependency import logging -- cgit v1.2.3 From d26ca838b6e823303c4b903c286fcc452caed0ad Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 21 Jul 2020 23:37:21 +0300 Subject: Remove unused doc-tests * wqflask/utility/chunks.py: Remove test code from module --- wqflask/utility/chunks.py | 63 ----------------------------------------------- 1 file changed, 63 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/chunks.py b/wqflask/utility/chunks.py index b0e33c08..d91b9bf4 100644 --- a/wqflask/utility/chunks.py +++ b/wqflask/utility/chunks.py @@ -31,66 +31,3 @@ def divide_into_chunks(the_list, number_chunks): chunks.append(the_list[counter:counter+chunksize]) return chunks - -def _confirm_chunk(original, result): - all_chunked = [] - for chunk in result: - all_chunked.extend(chunk) - print("length of all chunked:", len(all_chunked)) - assert original == all_chunked, "You didn't chunk right" - - -def _chunk_test(divide_func): - import random - random.seed(7) - - number_exact = 0 - total_amount_off = 0 - - for test in range(1, 1001): - print("\n\ntest:", test) - number_chunks = random.randint(1, 20) - number_elements = random.randint(0, 100) - the_list = list(range(1, number_elements)) - result = divide_func(the_list, number_chunks) - - print("Dividing list of length {} into approximately {} chunks - got {} chunks".format( - len(the_list), number_chunks, len(result))) - print("result:", result) - - _confirm_chunk(the_list, result) - - amount_off = abs(number_chunks - len(result)) - if amount_off == 0: - number_exact += 1 - else: - total_amount_off += amount_off - - - print("\n{} exact out of {} [Total amount off: {}]".format(number_exact, - test, - total_amount_off)) - assert number_exact == 558 - assert total_amount_off == 1580 - return number_exact, total_amount_off - - -def _main(): - info = dict() - #funcs = (("sam", sam_divide_into_chunks), ("zach", zach_divide_into_chunks)) - funcs = (("only one", divide_into_chunks),) - for name, func in funcs: - start = time.time() - number_exact, total_amount_off = _chunk_test(func) - took = time.time() - start - info[name] = dict(number_exact=number_exact, - total_amount_off=total_amount_off, - took=took) - - print("info is:", info) - -if __name__ == '__main__': - _main() - print("\nConfirming doctests...") - import doctest - doctest.testmod() -- cgit v1.2.3 From b39ef1e464208cf8806dc73cfe9684183ce7c9a2 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 22 Jul 2020 01:37:52 +0300 Subject: Simplify normalize_values * wqflask/utility/corr_result_helpers.py(normalize_values): Replace loop with zip form --- wqflask/utility/corr_result_helpers.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/corr_result_helpers.py b/wqflask/utility/corr_result_helpers.py index b543c589..69c6fe1b 100644 --- a/wqflask/utility/corr_result_helpers.py +++ b/wqflask/utility/corr_result_helpers.py @@ -14,15 +14,11 @@ def normalize_values(a_values, b_values): min_length = min(len(a_values), len(b_values)) a_new = [] b_new = [] - for counter in range(min_length): - if (a_values[counter] or a_values[counter] == 0) and (b_values[counter] or b_values[counter] == 0): - a_new.append(a_values[counter]) - b_new.append(b_values[counter]) - - num_overlap = len(a_new) - assert num_overlap == len(b_new), "Lengths should be the same" - - return a_new, b_new, num_overlap + for a, b in zip(a_values, b_values): + if not (a == None or b == None): + a_new.append(a) + b_new.append(b) + return a_new, b_new, len(a_new) def common_keys(a_samples, b_samples): -- cgit v1.2.3 From f99df1fa0f2163a93f7e194beeb65f0e1d542594 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 22 Jul 2020 01:40:03 +0300 Subject: Remove unused doc-tests * wqflask/utility/corr_result_helpers.py: Delete doc-test --- wqflask/utility/corr_result_helpers.py | 6 ------ 1 file changed, 6 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/corr_result_helpers.py b/wqflask/utility/corr_result_helpers.py index 69c6fe1b..09017e4a 100644 --- a/wqflask/utility/corr_result_helpers.py +++ b/wqflask/utility/corr_result_helpers.py @@ -44,9 +44,3 @@ def normalize_values_with_samples(a_samples, b_samples): assert num_overlap == len(b_new), "Lengths should be the same" return a_new, b_new, num_overlap - - - -if __name__ == '__main__': - import doctest - doctest.testmod() \ No newline at end of file -- cgit v1.2.3 From 2b7d50f9ac6d0f4f6a032e60053b5923c292a0a1 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 22 Jul 2020 01:41:14 +0300 Subject: Remove unused assert * wqflask/utility/corr_result_helpers.py(normalize_values): At no one point will that assert be hit --- wqflask/utility/corr_result_helpers.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/corr_result_helpers.py b/wqflask/utility/corr_result_helpers.py index 09017e4a..a43edbd4 100644 --- a/wqflask/utility/corr_result_helpers.py +++ b/wqflask/utility/corr_result_helpers.py @@ -33,14 +33,10 @@ def common_keys(a_samples, b_samples): def normalize_values_with_samples(a_samples, b_samples): common_samples = common_keys(a_samples, b_samples) - a_new = {} b_new = {} for sample in common_samples: a_new[sample] = a_samples[sample] b_new[sample] = b_samples[sample] - num_overlap = len(a_new) - assert num_overlap == len(b_new), "Lengths should be the same" - return a_new, b_new, num_overlap -- cgit v1.2.3 From 82ee315583281b93e1eff9640ce04e44bc70ac58 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 22 Jul 2020 01:41:45 +0300 Subject: Remove redundant variable * wqflask/utility/corr_result_helpers.py(normalize_values_with_values): Remove `num_overlap` --- wqflask/utility/corr_result_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/corr_result_helpers.py b/wqflask/utility/corr_result_helpers.py index a43edbd4..ea3ababf 100644 --- a/wqflask/utility/corr_result_helpers.py +++ b/wqflask/utility/corr_result_helpers.py @@ -39,4 +39,4 @@ def normalize_values_with_samples(a_samples, b_samples): a_new[sample] = a_samples[sample] b_new[sample] = b_samples[sample] - return a_new, b_new, num_overlap + return a_new, b_new, len(a_new) -- cgit v1.2.3 From aed1b7f12f653694c22ab389a403120e143a0669 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 22 Jul 2020 17:03:37 +0300 Subject: Add zero to num_repr dictionary * wqflask/utility/formatting.py(numify): Update `num_repr` to have a zero --- wqflask/utility/formatting.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/formatting.py b/wqflask/utility/formatting.py index e53dda22..1c0269e9 100644 --- a/wqflask/utility/formatting.py +++ b/wqflask/utility/formatting.py @@ -28,7 +28,8 @@ def numify(number, singular=None, plural=None): '12,334 hippopotami' """ - num_repr = {1 : "one", + num_repr = {0 : "zero", + 1 : "one", 2 : "two", 3 : "three", 4 : "four", -- cgit v1.2.3 From 073e8c9813505fc00372b2ec21f3e4a87b5be9aa Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 22 Jul 2020 17:05:11 +0300 Subject: Apply autopep-8 * wqflask/utility/formatting.py: apply it --- wqflask/utility/formatting.py | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/formatting.py b/wqflask/utility/formatting.py index 1c0269e9..1da3e9b7 100644 --- a/wqflask/utility/formatting.py +++ b/wqflask/utility/formatting.py @@ -28,22 +28,20 @@ def numify(number, singular=None, plural=None): '12,334 hippopotami' """ - num_repr = {0 : "zero", - 1 : "one", - 2 : "two", - 3 : "three", - 4 : "four", - 5 : "five", - 6 : "six", - 7 : "seven", - 8 : "eight", - 9 : "nine", - 10 : "ten", - 11 : "eleven", - 12 : "twelve"} - - #Below line commented out cause doesn't work in Python 2.4 - #assert all((singular, plural)) or not any((singular, plural)), "Need to pass two words or none" + num_repr = {0: "zero", + 1: "one", + 2: "two", + 3: "three", + 4: "four", + 5: "five", + 6: "six", + 7: "seven", + 8: "eight", + 9: "nine", + 10: "ten", + 11: "eleven", + 12: "twelve"} + if number == 1: word = singular else: -- cgit v1.2.3 From b8e17aee9000943e0fd379b5ef006d76314733e4 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 21 Jul 2020 12:36:06 +0300 Subject: Add work-around for failed imports in unittest * wqflask/utility/tools.py: Unittests will use `from wqflask.wqflask import app` and the gn2 script will use `from wqflask import app` --- wqflask/utility/tools.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 77db5d53..37f9d8fe 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -5,7 +5,10 @@ import os import sys import json -from wqflask import app +try: + from wqflask import app +except ImportError: + from wqflask.wqflask import app # Use the standard logger here to avoid a circular dependency import logging -- cgit v1.2.3 From 98035fccfd7960e6992e1e47afc32b56d54ff074 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 21 Jul 2020 21:53:30 +0300 Subject: Revert "Add work-around for failed imports in unittest" This reverts commit d5e87fa6fe7546b46790f512d984a5501223082f. --- wqflask/utility/tools.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 37f9d8fe..77db5d53 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -5,10 +5,7 @@ import os import sys import json -try: - from wqflask import app -except ImportError: - from wqflask.wqflask import app +from wqflask import app # Use the standard logger here to avoid a circular dependency import logging -- cgit v1.2.3 From 91e52482071ce90acf3607597de2ae79e298fd67 Mon Sep 17 00:00:00 2001 From: Pjotr Prins Date: Sat, 25 Jul 2020 16:25:14 +0100 Subject: The variable is never defined, so let's default to "none" --- wqflask/utility/authentication_tools.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index ed7462d1..9a2a5ccd 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -45,7 +45,7 @@ def check_resource_availability(dataset, trait_id=None): def add_new_resource(dataset, trait_id=None): resource_ob = { - 'owner_id' : webqtlConfig.DEFAULT_OWNER_ID, + 'owner_id' : "none", # webqtlConfig.DEFAULT_OWNER_ID, 'default_mask': webqtlConfig.DEFAULT_PRIVILEGES, 'group_masks' : {} } @@ -84,6 +84,7 @@ def check_admin(resource_id=None): try: response = json.loads(requests.get(the_url).content)['admin'] except: + logger.debug(resource_info) response = resource_info['default_mask']['admin'] if 'edit-admins' in response: @@ -124,4 +125,4 @@ def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): else: return check_admin(resource_id) - return "not-admin" \ No newline at end of file + return "not-admin" -- cgit v1.2.3 From 317d7a78930ecf4be8ec80bd688c14baa8852f04 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 29 Jul 2020 18:50:08 -0500 Subject: Fixed error that occured for new resources in check_admin * wqflask/utility/authentication_tools.py: the code forgot to get resource_info when a resource wasn't in Redis yet, causing an error when it was referenced, so I added a line getting the resource info (and also a couple comments for other lines related to authentication) --- wqflask/utility/authentication_tools.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 9a2a5ccd..ece7022c 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -24,9 +24,9 @@ def check_resource_availability(dataset, trait_id=None): resource_id = get_resource_id(dataset, trait_id) - if resource_id: + if resource_id: #ZS: This should never be false, but it's technically possible if a non-Temp dataset somehow had a type other than Publish/ProbeSet/Geno resource_info = get_resource_info(resource_id) - if not resource_info: + if not resource_info: #ZS: If resource isn't already in redis, add it with default privileges resource_info = add_new_resource(dataset, trait_id) #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy @@ -84,7 +84,7 @@ def check_admin(resource_id=None): try: response = json.loads(requests.get(the_url).content)['admin'] except: - logger.debug(resource_info) + resource_info = get_resource_info(resource_id) response = resource_info['default_mask']['admin'] if 'edit-admins' in response: -- cgit v1.2.3 From 49dfdc0c986901683f3e00e65f922e2318e0f330 Mon Sep 17 00:00:00 2001 From: Muriithi Frederick Muriuki Date: Fri, 15 Feb 2019 08:41:28 +0300 Subject: Migrate code from Piddle to Pillow * wqflask/wqflask/marker_regression/display_mapping_results.py: Add font files (DisplayMappingResults): Update the code from the Piddle way of things to the Pillow way of things, for example, replace: - pid.drawRect(...) with im_drawer.rectangle(...) - pid.drawString(...) with im_drawer.text(...) - pid.drawPolygon(...) with im_drawer.polygon(...) etc. * wqflask/utility/Plot.py: Migrate code from the older, unsupported Piddle to the newer Pillow library. --- wqflask/utility/Plot.py | 87 ++- .../marker_regression/display_mapping_results.py | 757 ++++++++++++++++----- 2 files changed, 637 insertions(+), 207 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 9bc84d22..c557bcc6 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -26,7 +26,7 @@ from __future__ import print_function -import piddle as pid +from PIL import (Image, ImageColor, ImageDraw, ImageFont) from pprint import pformat as pf from math import * @@ -42,6 +42,17 @@ from base import webqtlConfig import utility.logger logger = utility.logger.getLogger(__name__ ) +# ---- Define common colours ---- # +BLUE = ImageColor.getrgb("blue") +BLACK = ImageColor.getrgb("black") +# ---- END: Define common colours ---- # + +# ---- FONT FILES ---- # +VERDANA_FILE = "fonts/ttf/verdana.ttf" +COUR_FILE = "fonts/ttf/courbd.ttf" +TAHOMA_FILE = "fonts/ttf/tahoma.ttf" +# ---- END: FONT FILES ---- # + def cformat(d, rank=0): 'custom string format' strD = "%2.6f" % d @@ -114,7 +125,8 @@ def find_outliers(vals): # parameter: data is either object returned by reaper permutation function (called by MarkerRegressionPage.py) # or the first object returned by direct (pair-scan) permu function (called by DirectPlotPage.py) -def plotBar(canvas, data, barColor=pid.blue, axesColor=pid.black, labelColor=pid.black, XLabel=None, YLabel=None, title=None, offset= (60, 20, 40, 40), zoom = 1): +def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLabel=None, YLabel=None, title=None, offset= (60, 20, 40, 40), zoom = 1): + im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset @@ -161,43 +173,67 @@ def plotBar(canvas, data, barColor=pid.blue, axesColor=pid.black, labelColor=pid if count: xc = (dataXY[i]-xLow)*xScale+xLeftOffset yc =-(count-yLow)*yScale+yTopOffset+plotHeight - canvas.drawRect(xc+2,yc,xc+barWidth-2,yTopOffset+plotHeight,edgeColor=barColor,fillColor=barColor) + im_drawer.rectangle( + xy=((xc+2,yc),(xc+barWidth-2,yTopOffset+plotHeight)), + outline=barColor, fill=barColor) #draw drawing region - canvas.drawRect(xLeftOffset, yTopOffset, xLeftOffset+plotWidth, yTopOffset+plotHeight) + im_drawer.rectangle( + xy=((xLeftOffset, yTopOffset), (xLeftOffset+plotWidth, yTopOffset+plotHeight)) + ) #draw scale - scaleFont=pid.Font(ttf="cour",size=11,bold=1) + scaleFont=ImageFont.truetype(font=COUR_FILE,size=11) x=xLow for i in range(int(stepX)+1): xc=xLeftOffset+(x-xLow)*xScale - canvas.drawLine(xc,yTopOffset+plotHeight,xc,yTopOffset+plotHeight+5, color=axesColor) + im_drawer.line( + xy=((xc,yTopOffset+plotHeight),(xc,yTopOffset+plotHeight+5)), + fill=axesColor) strX = cformat(d=x, rank=0) - canvas.drawString(strX,xc-canvas.stringWidth(strX,font=scaleFont)/2,yTopOffset+plotHeight+14,font=scaleFont) + im_drawer.text( + text=strX, + xy=(xc-im_drawer.textsize(strX,font=scaleFont)[0]/2, + yTopOffset+plotHeight+14),font=scaleFont) x+= (xTop - xLow)/stepX y=yLow for i in range(int(stepY)+1): yc=yTopOffset+plotHeight-(y-yLow)*yScale - canvas.drawLine(xLeftOffset,yc,xLeftOffset-5,yc, color=axesColor) + im_drawer.line(xy=((xLeftOffset,yc),(xLeftOffset-5,yc)), fill=axesColor) strY = "%d" %y - canvas.drawString(strY,xLeftOffset-canvas.stringWidth(strY,font=scaleFont)-6,yc+5,font=scaleFont) + im_drawer.text( + text=strY, + xy=(xLeftOffset-im_drawer.textsize(strY,font=scaleFont)[0]-6,yc+5), + font=scaleFont) y+= (yTop - yLow)/stepY #draw label - labelFont=pid.Font(ttf="tahoma",size=17,bold=0) + labelFont=ImageFont.truetype(font=TAHOMA_FILE,size=17) if XLabel: - canvas.drawString(XLabel,xLeftOffset+(plotWidth-canvas.stringWidth(XLabel,font=labelFont))/2.0, - yTopOffset+plotHeight+yBottomOffset-10,font=labelFont,color=labelColor) + im_drawer.text( + text=XLabel, + xy=(xLeftOffset+( + plotWidth-im_drawer.textsize(XLabel,font=labelFont)[0])/2.0, + yTopOffset+plotHeight+yBottomOffset-10), + font=labelFont,fill=labelColor) if YLabel: - canvas.drawString(YLabel, 19, yTopOffset+plotHeight-(plotHeight-canvas.stringWidth(YLabel,font=labelFont))/2.0, - font=labelFont,color=labelColor,angle=90) - - labelFont=pid.Font(ttf="verdana",size=16,bold=0) + im_drawer.text( + text=YLabel, + xy=(19, + yTopOffset+plotHeight-(plotHeight-im_drawer.textsize( + YLabel,font=labelFont)[0])/2.0), + font=labelFont,fill=labelColor,angle=90) + + labelFont=ImageFont.truetype(font=VERDANA_FILE,size=16) if title: - canvas.drawString(title,xLeftOffset+(plotWidth-canvas.stringWidth(title,font=labelFont))/2.0, - 20,font=labelFont,color=labelColor) + im_drawer.text( + text=title, + xy=(xLeftOffset+(plotWidth-im_drawer.textsize( + title,font=labelFont)[0])/2.0, + 20), + font=labelFont,fill=labelColor) # This function determines the scale of the plot def detScaleOld(min,max): @@ -265,16 +301,21 @@ def greenfunc(x): def colorSpectrum(n=100): multiple = 10 if n == 1: - return [pid.Color(1,0,0)] + return [ImageColor.getrgb("rgb(100%,0%,0%)")] elif n == 2: - return [pid.Color(1,0,0),pid.Color(0,0,1)] + return [ImageColor.getrgb("100%,0%,0%)"), + ImageColor.getrgb("rgb(0%,0%,100%)")] elif n == 3: - return [pid.Color(1,0,0),pid.Color(0,1,0),pid.Color(0,0,1)] + return [ImageColor.getrgb("rgb(100%,0%,0%)"), + ImageColor.getrgb("rgb(0%,100%,0%)"), + ImageColor.getrgb("rgb(0%,0%,100%)")] N = n*multiple out = [None]*N; for i in range(N): x = float(i)/N - out[i] = pid.Color(redfunc(x), greenfunc(x), bluefunc(x)); + out[i] = ImageColor.getrgb("rgb({}%,{}%,{}%".format( + *[int(i*100) for i in ( + redfunc(x), greenfunc(x), bluefunc(x))])) out2 = [out[0]] step = N/float(n-1) j = 0 @@ -290,4 +331,4 @@ def _test(): if __name__=="__main__": - _test() \ No newline at end of file + _test() diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 25846099..39067fc5 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -27,7 +27,7 @@ import datetime import string from math import * -from PIL import (ImageColor) +from PIL import (Image,ImageDraw,ImageFont,ImageColor) import sys,os import cPickle import httplib @@ -70,6 +70,13 @@ DARKVIOLET = ImageColor.getrgb("darkviolet") MEDIUMPURPLE = ImageColor.getrgb("mediumpurple") # ---- END: Define common colours ---- # +# ---- FONT FILES ---- # +VERDANA_FILE = "fonts/ttf/verdana.ttf" +TREBUC_FILE = "fonts/ttf/trebucbd.ttf" +FNT_BS_FILE = "fonts/ttf/fnt_bs.ttf" +ARIAL_FILE = "fonts/ttf/arial.ttf" +# ---- END: FONT FILES ---- # + ######################################### # Inteval Mapping Plot Page ######################################### @@ -487,21 +494,28 @@ class DisplayMappingResults(object): # Plots goes here ################################################################ showLocusForm = "" - intCanvas = pid.PILCanvas(size=(self.graphWidth, self.graphHeight)) + intCanvas = Image.new("RGBA", size=(self.graphWidth, self.graphHeight)) with Bench("Drawing Plot"): gifmap = self.plotIntMapping(intCanvas, startMb = self.startMb, endMb = self.endMb, showLocusForm= showLocusForm) self.gifmap = gifmap.__str__() self.filename= webqtlUtil.genRandStr("Itvl_") - intCanvas.save(os.path.join(webqtlConfig.GENERATED_IMAGE_DIR, self.filename), format='png') + intCanvas.save( + "{}.png".format( + os.path.join(webqtlConfig.GENERATED_IMAGE_DIR, self.filename)), + format='png') intImg=HT.Image('/image/'+self.filename+'.png', border=0, usemap='#WebQTLImageMap') #Scales plot differently for high resolution if self.draw2X: - intCanvasX2 = pid.PILCanvas(size=(self.graphWidth*2,self.graphHeight*2)) + intCanvasX2 = Image.new("RGBA", size=(self.graphWidth*2,self.graphHeight*2)) gifmapX2 = self.plotIntMapping(intCanvasX2, startMb = self.startMb, endMb = self.endMb, showLocusForm= showLocusForm, zoom=2) - intCanvasX2.save(os.path.join(webqtlConfig.GENERATED_IMAGE_DIR, self.filename+"X2"), format='png') + intCanvasX2.save( + "{}.png".format( + os.path.join(webqtlConfig.GENERATED_IMAGE_DIR, + self.filename+"X2")), + format='png') ################################################################ # Outputs goes here @@ -529,6 +543,7 @@ class DisplayMappingResults(object): btminfo.append(HT.BR(), 'Mapping using genotype data as a trait will result in infinity LRS at one locus. In order to display the result properly, all LRSs higher than 100 are capped at 100.') def plotIntMapping(self, canvas, offset= (80, 120, 20, 100), zoom = 1, startMb = None, endMb = None, showLocusForm = ""): + im_drawer = ImageDraw.Draw(canvas) #calculating margins xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset if self.multipleInterval: @@ -622,6 +637,7 @@ class DisplayMappingResults(object): return gifmap def drawBootStrapResult(self, canvas, nboot, drawAreaHeight, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset plotHeight = canvas.size[1] - yTopOffset - yBottomOffset @@ -708,8 +724,10 @@ class DisplayMappingResults(object): if item[1] > xLeftOffset+plotWidth: item[1] = xLeftOffset+plotWidth if item[0] != item[1]: - canvas.drawRect(item[0], yZero, item[1], yZero - item[2]*bootHeightThresh/maxBootCount, - fillColor=self.BOOTSTRAP_BOX_COLOR) + im_drawer.rectangle( + xy=((item[0], yZero), + (item[1], yZero - item[2]*bootHeightThresh/maxBootCount)), + fill=self.BOOTSTRAP_BOX_COLOR) ###draw boot scale highestPercent = (maxBootCount*100.0)/nboot @@ -718,26 +736,43 @@ class DisplayMappingResults(object): bootScale = bootScale[:-1] + [highestPercent] bootOffset = 50*fontZoom - bootScaleFont=pid.Font(ttf="verdana",size=13*fontZoom,bold=0) - canvas.drawRect(canvas.size[0]-bootOffset,yZero-bootHeightThresh,canvas.size[0]-bootOffset-15*zoom,yZero,fillColor = YELLOW) - canvas.drawLine(canvas.size[0]-bootOffset+4, yZero, canvas.size[0]-bootOffset, yZero, color=BLACK) - canvas.drawString('0%' ,canvas.size[0]-bootOffset+10,yZero+5,font=bootScaleFont,color=BLACK) + bootScaleFont=ImageFont.truetype(font=VERDANA_FILE,size=13*fontZoom) + im_drawer.rectangle( + xy=((canvas.size[0]-bootOffset, yZero-bootHeightThresh), + (canvas.size[0]-bootOffset-15*zoom,yZero)), + fill = YELLOW) + im_drawer.line( + xy=((canvas.size[0]-bootOffset+4, yZero), + (canvas.size[0]-bootOffset, yZero)), + fill=BLACK) + im_drawer.text(xy=(canvas.size[0]-bootOffset+10,yZero+5), text='0%', + font=bootScaleFont, fill=BLACK) + for item in bootScale: if item == 0: continue bootY = yZero-bootHeightThresh*item/highestPercent - canvas.drawLine(canvas.size[0]-bootOffset+4,bootY,canvas.size[0]-bootOffset,bootY,color=BLACK) - canvas.drawString('%2.1f'%item ,canvas.size[0]-bootOffset+10,bootY+5,font=bootScaleFont,color=BLACK) + im_drawer.line( + xy=((canvas.size[0]-bootOffset+4,bootY), + (canvas.size[0]-bootOffset,bootY)), + fill=BLACK) + im_drawer.text(xy=(canvas.size[0]-bootOffset+10,bootY+5), + text='%2.1f'%item, font=bootScaleFont, color=BLACK) if self.legendChecked: startPosY = 30 nCol = 2 - smallLabelFont = pid.Font(ttf="trebuc", size=12*fontZoom, bold=1) + smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom) leftOffset = xLeftOffset+(nCol-1)*200 - canvas.drawRect(leftOffset,startPosY-6, leftOffset+12,startPosY+6, fillColor=YELLOW) - canvas.drawString('Frequency of the Peak LRS',leftOffset+ 20, startPosY+5,font=smallLabelFont,color=BLACK) + im_drawer.rectangle( + xy=((leftOffset,startPosY-6), (leftOffset+12,startPosY+6)), + fill=YELLOW) + im_drawer.text(xy=(leftOffset+ 20, startPosY+5), + text='Frequency of the Peak LRS', + font=smallLabelFont, color=BLACK) def drawProbeSetPosition(self, canvas, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + im_drawer = ImageDraw.Draw(canvas) if len(self.traitList) != 1: return @@ -805,12 +840,15 @@ class DisplayMappingResults(object): break if locPixel >= 0 and self.plotScale == 'physic': traitPixel = ((locPixel, yZero), (locPixel-7, yZero+14), (locPixel+7, yZero+14)) - canvas.drawPolygon(traitPixel, edgeColor=BLACK, fillColor=self.TRANSCRIPT_LOCATION_COLOR, closed=1) + im_drawer.polygon( + xy=traitPixel, outline=BLACK, + fill=self.TRANSCRIPT_LOCATION_COLOR#, closed=1 + ) if self.legendChecked: startPosY = 15 nCol = 2 - smallLabelFont = pid.Font(ttf="trebuc", size=12*fontZoom, bold=1) + smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom) if self.manhattan_plot: leftOffset = xLeftOffset else: @@ -819,6 +857,7 @@ class DisplayMappingResults(object): canvas.drawString("Sequence Site", (leftOffset+15), (startPosY+5), smallLabelFont, self.TOP_RIGHT_INFO_COLOR) def drawSNPTrackNew(self, canvas, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + im_drawer = ImageDraw.Draw(canvas) if self.plotScale != 'physic' or self.selectedChr == -1 or not self.diffCol: return @@ -856,12 +895,15 @@ class DisplayMappingResults(object): if maxCount>0: for i in range(xLeftOffset, xLeftOffset + plotWidth): snpDensity = float(SNPCounts[i-xLeftOffset]*SNP_HEIGHT_MODIFIER/maxCount) - canvas.drawLine(i, drawSNPLocationY+(snpDensity)*zoom, i, drawSNPLocationY-(snpDensity)*zoom, color=self.SNP_COLOR, width=1) + im_drawer.line( + xy=((i, drawSNPLocationY+(snpDensity)*zoom), + (i, drawSNPLocationY-(snpDensity)*zoom)), + fill=self.SNP_COLOR, width=1) def drawMultiTraitName(self, fd, canvas, gifmap, showLocusForm, offset= (40, 120, 80, 10), zoom = 1): nameWidths = [] yPaddingTop = 10 - colorFont=pid.Font(ttf="trebuc",size=12,bold=1) + colorFont=ImageFont.truetype(font=TREBUC_FILE,size=12) if len(self.qtlresults) >20 and self.selectedChr > -1: rightShift = 20 rightShiftStep = 60 @@ -880,11 +922,16 @@ class DisplayMappingResults(object): rightShift += rightShiftStep name = thisTrait.displayName() - nameWidth = canvas.stringWidth(name,font=colorFont) + nameWidth, nameHeight = im_drawer.textsize(name,font=colorFont) nameWidths.append(nameWidth) - canvas.drawRect(rightShift,yPaddingTop+kstep*15, rectWidth+rightShift,yPaddingTop+10+kstep*15, fillColor=thisLRSColor) - canvas.drawString(name,rectWidth+2+rightShift,yPaddingTop+10+kstep*15,font=colorFont,color=BLACK) + im_drawer.rectangle( + xy=((rightShift,yPaddingTop+kstep*15), + (rectWidth+rightShift,yPaddingTop+10+kstep*15)), + fill=thisLRSColor) + im_drawer.text( + text=name,xy=(rectWidth+2+rightShift,yPaddingTop+10+kstep*15), + font=colorFont,fill=BLACK) if thisTrait.db: COORDS = "%d,%d,%d,%d" %(rectWidth+2+rightShift,yPaddingTop+kstep*15,rectWidth+2+rightShift+nameWidth,yPaddingTop+10+kstep*15,) HREF= "javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm, thisTrait.db.name, thisTrait.name) @@ -892,6 +939,7 @@ class DisplayMappingResults(object): gifmap.areas.append(Areas) def drawLegendPanel(self, canvas, offset= (40, 120, 80, 10), zoom = 1): + im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset plotHeight = canvas.size[1] - yTopOffset - yBottomOffset @@ -900,45 +948,80 @@ class DisplayMappingResults(object): if zoom == 2: fontZoom = 1.5 - labelFont=pid.Font(ttf="trebuc",size=12*fontZoom, bold=1) + labelFont=ImageFont.truetype(font=TREBUC_FILE,size=12*fontZoom) startPosY = 15 stepPosY = 12*fontZoom if self.manhattan_plot != True: - canvas.drawLine(xLeftOffset,startPosY,xLeftOffset+32,startPosY,color=self.LRS_COLOR, width=2) - canvas.drawString(self.LRS_LOD, xLeftOffset+40,startPosY+5,font=labelFont,color=BLACK) + im_drawer.line( + xy=((xLeftOffset,startPosY),(xLeftOffset+32,startPosY)), + fill=self.LRS_COLOR, width=2) + im_drawer.text( + text=self.LRS_LOD, xy=(xLeftOffset+40,startPosY+5), + font=labelFont,fill=BLACK) startPosY += stepPosY if self.additiveChecked: startPosX = xLeftOffset - canvas.drawLine(startPosX,startPosY,startPosX+17,startPosY,color=self.ADDITIVE_COLOR_POSITIVE, width=2) - canvas.drawLine(startPosX+18,startPosY,startPosX+32,startPosY,color=self.ADDITIVE_COLOR_NEGATIVE, width=2) - canvas.drawString('Additive Effect',startPosX+40,startPosY+5,font=labelFont,color=BLACK) + im_drawer.line( + xy=((startPosX,startPosY),(startPosX+17,startPosY)), + fill=self.ADDITIVE_COLOR_POSITIVE, width=2) + im_drawer.line( + xy=((startPosX+18,startPosY),(startPosX+32,startPosY)), + fill=self.ADDITIVE_COLOR_NEGATIVE, width=2) + im_drawer.text( + text='Additive Effect',xy=(startPosX+40,startPosY+5), + font=labelFont,fill=BLACK) if self.genotype.type == 'intercross' and self.dominanceChecked: startPosX = xLeftOffset startPosY += stepPosY - canvas.drawLine(startPosX,startPosY,startPosX+17,startPosY,color=self.DOMINANCE_COLOR_POSITIVE, width=4) - canvas.drawLine(startPosX+18,startPosY,startPosX+35,startPosY,color=self.DOMINANCE_COLOR_NEGATIVE, width=4) - canvas.drawString('Dominance Effect',startPosX+42,startPosY+5,font=labelFont,color=BLACK) + im_drawer.line( + xy=((startPosX,startPosY),(startPosX+17,startPosY)), + fill=self.DOMINANCE_COLOR_POSITIVE, width=4) + im_drawer.line( + xy=((startPosX+18,startPosY),(startPosX+35,startPosY)), + fill=self.DOMINANCE_COLOR_NEGATIVE, width=4) + im_drawer.text( + text='Dominance Effect', xy=(startPosX+42,startPosY+5), + font=labelFont,fill=BLACK) if self.haplotypeAnalystChecked: startPosY += stepPosY startPosX = xLeftOffset - canvas.drawLine(startPosX,startPosY,startPosX+17,startPosY,color=self.HAPLOTYPE_POSITIVE, width=4) - canvas.drawLine(startPosX+18,startPosY,startPosX+35,startPosY,color=self.HAPLOTYPE_NEGATIVE, width=4) - canvas.drawLine(startPosX+36,startPosY,startPosX+53,startPosY,color=self.HAPLOTYPE_HETEROZYGOUS, width=4) - canvas.drawLine(startPosX+54,startPosY,startPosX+67,startPosY,color=self.HAPLOTYPE_RECOMBINATION, width=4) - canvas.drawString('Haplotypes (Pat, Mat, Het, Unk)',startPosX+76,startPosY+5,font=labelFont,color=BLACK) + im_drawer.line( + xy=((startPosX,startPosY),(startPosX+17,startPosY)), + fill=self.HAPLOTYPE_POSITIVE, width=4) + im_drawer.line( + xy=((startPosX+18,startPosY),(startPosX+35,startPosY)), + fill=self.HAPLOTYPE_NEGATIVE, width=4) + im_drawer.line( + xy=((startPosX+36,startPosY),(startPosX+53,startPosY)), + fill=self.HAPLOTYPE_HETEROZYGOUS, width=4) + im_drawer.line( + xy=((startPosX+54,startPosY),(startPosX+67,startPosY)), + fill=self.HAPLOTYPE_RECOMBINATION, width=4) + im_drawer.text( + text='Haplotypes (Pat, Mat, Het, Unk)', + xy=(startPosX+76,startPosY+5),font=labelFont,fill=BLACK) if self.permChecked and self.nperm > 0: startPosY += stepPosY startPosX = xLeftOffset - canvas.drawLine(startPosX, startPosY, startPosX + 32, startPosY, color=self.SIGNIFICANT_COLOR, width=self.SIGNIFICANT_WIDTH) - canvas.drawLine(startPosX, startPosY + stepPosY, startPosX + 32, startPosY + stepPosY, color=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH) - canvas.drawString('Significant %s = %2.2f' % (self.LRS_LOD, self.significant),xLeftOffset+42,startPosY +5,font=labelFont,color=BLACK) - canvas.drawString('Suggestive %s = %2.2f' % (self.LRS_LOD, self.suggestive),xLeftOffset+42,startPosY + 5 +stepPosY,font=labelFont,color=BLACK) - - labelFont = pid.Font(ttf="verdana",size=12*fontZoom) + im_drawer.line( + xy=((startPosX, startPosY),( startPosX + 32, startPosY)), + fill=self.SIGNIFICANT_COLOR, width=self.SIGNIFICANT_WIDTH) + im_drawer.line( + xy=((startPosX, startPosY + stepPosY),( startPosX + 32, startPosY + stepPosY)), + fill=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH) + im_drawer.text( + text='Significant %s = %2.2f' % (self.LRS_LOD,self.significant), + xy=(xLeftOffset+42,startPosY +5),font=labelFont,fill=BLACK) + im_drawer.text( + text='Suggestive %s = %2.2f' % (self.LRS_LOD, self.suggestive), + xy=(xLeftOffset+42,startPosY + 5 +stepPosY),font=labelFont, + fill=BLACK) + + labelFont = ImageFont.truetype(font=VERDANA_FILE,size=12*fontZoom) labelColor = BLACK if self.dataset.type == "Publish" or self.dataset.type == "Geno": dataset_label = self.dataset.fullname @@ -1000,20 +1083,32 @@ class DisplayMappingResults(object): identification += "Trait: %s" % (self.this_trait.name) identification += " with %s samples" % (self.n_samples) - d = 4+ max(canvas.stringWidth(identification, font=labelFont), canvas.stringWidth(string1, font=labelFont), canvas.stringWidth(string2, font=labelFont)) - canvas.drawString(identification,canvas.size[0] - xRightOffset-d,20*fontZoom,font=labelFont,color=labelColor) - else: - d = 4+ max(canvas.stringWidth(string1, font=labelFont), canvas.stringWidth(string2, font=labelFont)) - canvas.drawString(string1,canvas.size[0] - xRightOffset-d,35*fontZoom,font=labelFont,color=labelColor) - canvas.drawString(string2,canvas.size[0] - xRightOffset-d,50*fontZoom,font=labelFont,color=labelColor) - canvas.drawString(string3,canvas.size[0] - xRightOffset-d,65*fontZoom,font=labelFont,color=labelColor) - if string4 != '': - canvas.drawString(string4,canvas.size[0] - xRightOffset-d,80*fontZoom,font=labelFont,color=labelColor) - canvas.drawString("Created at: " + str(datetime.datetime.now()).split('.')[0], canvas.size[0] - xRightOffset-d,95*fontZoom,font=labelFont,color=labelColor) + d = 4+ max( + im_drawer.textsize(identification, font=labelFont)[0], + im_drawer.textsize(string1, font=labelFont)[0], + im_drawer.textsize(string2, font=labelFont)[0]) + im_drawer.text( + text=identification, + xy=(canvas.size[0] - xRightOffset-d,20*fontZoom),font=labelFont, + fill=labelColor) else: - canvas.drawString("Created at: " + str(datetime.datetime.now()).split('.')[0], canvas.size[0] - xRightOffset-d,80*fontZoom,font=labelFont,color=labelColor) + d = 4+ max( + im_drawer.textsize(string1, font=labelFont)[0], + im_drawer.textsize(string2, font=labelFont)[0]) + im_drawer.text( + text=string1,xy=(canvas.size[0] - xRightOffset-d,35*fontZoom), + font=labelFont,fill=labelColor) + im_drawer.text( + text=string2,xy=(canvas.size[0] - xRightOffset-d,50*fontZoom), + font=labelFont,fill=labelColor) + if string3 != '': + im_drawer.text( + text=string3,xy=(canvas.size[0] - xRightOffset-d,65*fontZoom), + font=labelFont,fill=labelColor) + def drawGeneBand(self, canvas, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + im_drawer = ImageDraw.Draw(canvas) if self.plotScale != 'physic' or self.selectedChr == -1 or not self.geneCol: return @@ -1126,11 +1221,15 @@ class DisplayMappingResults(object): #draw the detail view if self.endMb - self.startMb <= self.DRAW_DETAIL_MB and geneEndPix - geneStartPix > self.EACH_GENE_ARROW_SPACING * 3: - utrColor = pid.Color(0.66, 0.66, 0.66) - arrowColor = pid.Color(0.7, 0.7, 0.7) + utrColor = ImageColor.getrgb("rgb(66%, 66%, 66%)") + arrowColor = ImageColor.getrgb("rgb(70%, 70%, 70%)") #draw the line that runs the entire length of the gene - canvas.drawLine(geneStartPix, geneYLocation + self.EACH_GENE_HEIGHT/2*zoom, geneEndPix, geneYLocation + self.EACH_GENE_HEIGHT/2*zoom, color=outlineColor, width=1) + im_drawer.line( + xy=( + (geneStartPix, geneYLocation + self.EACH_GENE_HEIGHT/2*zoom), + ( geneEndPix, geneYLocation + self.EACH_GENE_HEIGHT/2*zoom)), + fill=outlineColor, width=1) #draw the arrows if geneEndPix - geneStartPix < 1: @@ -1141,11 +1240,30 @@ class DisplayMappingResults(object): if (xCoord % self.EACH_GENE_ARROW_SPACING == 0 and xCoord + self.EACH_GENE_ARROW_SPACING < geneEndPix-geneStartPix) or xCoord == 0: if strand == "+": - canvas.drawLine(geneStartPix + xCoord, geneYLocation, geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, geneYLocation +(self.EACH_GENE_HEIGHT / 2)*zoom, color=arrowColor, width=1) - canvas.drawLine(geneStartPix + xCoord, geneYLocation + self.EACH_GENE_HEIGHT*zoom, geneStartPix + xCoord+self.EACH_GENE_ARROW_WIDTH, geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom, color=arrowColor, width=1) + im_drawer.line( + xy=((geneStartPix + xCoord, geneYLocation), + (geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, + geneYLocation +(self.EACH_GENE_HEIGHT / 2)*zoom)), + fill=arrowColor, width=1) + im_drawer.line( + xy=((geneStartPix + xCoord, + geneYLocation + self.EACH_GENE_HEIGHT*zoom), + (geneStartPix + xCoord+self.EACH_GENE_ARROW_WIDTH, + geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)), + fill=arrowColor, width=1) else: - canvas.drawLine(geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, geneYLocation, geneStartPix + xCoord, geneYLocation +(self.EACH_GENE_HEIGHT / 2)*zoom, color=arrowColor, width=1) - canvas.drawLine(geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, geneYLocation + self.EACH_GENE_HEIGHT*zoom, geneStartPix + xCoord, geneYLocation + (self.EACH_GENE_HEIGHT / 2)*zoom, color=arrowColor, width=1) + im_drawer.line( + xy=((geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, + geneYLocation), + ( geneStartPix + xCoord, + geneYLocation +(self.EACH_GENE_HEIGHT / 2)*zoom)), + fill=arrowColor, width=1) + im_drawer.line( + xy=((geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, + geneYLocation + self.EACH_GENE_HEIGHT*zoom), + ( geneStartPix + xCoord, + geneYLocation + (self.EACH_GENE_HEIGHT / 2)*zoom)), + fill=arrowColor, width=1) #draw the blocks for the exon regions for i in range(0, len(exonStarts)): @@ -1159,7 +1277,10 @@ class DisplayMappingResults(object): exonEndPix = xLeftOffset + plotWidth if (exonStartPix > xLeftOffset + plotWidth): exonStartPix = xLeftOffset + plotWidth - canvas.drawRect(exonStartPix, geneYLocation, exonEndPix, (geneYLocation + self.EACH_GENE_HEIGHT*zoom), edgeColor = outlineColor, fillColor = fillColor) + im_drawer.rectangle( + xy=((exonStartPix, geneYLocation), + (exonEndPix, (geneYLocation + self.EACH_GENE_HEIGHT*zoom))), + outline = outlineColor, fill = fillColor) #draw gray blocks for 3' and 5' UTR blocks if cdsStart and cdsEnd: @@ -1173,14 +1294,16 @@ class DisplayMappingResults(object): utrEndPix = xLeftOffset + plotWidth if (utrStartPix > xLeftOffset + plotWidth): utrStartPix = xLeftOffset + plotWidth - #canvas.drawRect(utrStartPix, geneYLocation, utrEndPix, (geneYLocation+self.EACH_GENE_HEIGHT*zoom), edgeColor=utrColor, fillColor =utrColor) if self.endMb - self.startMb <= self.DRAW_UTR_LABELS_MB: if strand == "-": labelText = "3'" else: labelText = "5'" - canvas.drawString(labelText, utrStartPix-9, geneYLocation+self.EACH_GENE_HEIGHT, pid.Font(face="helvetica", size=2)) + im_drawer.text( + text=labelText, + xy=(utrStartPix-9, geneYLocation+self.EACH_GENE_HEIGHT), + font=ImageFont.truetype(font=ARIAL_FILE, size=2)) #the second UTR region @@ -1194,18 +1317,23 @@ class DisplayMappingResults(object): utrEndPix = xLeftOffset + plotWidth if (utrStartPix > xLeftOffset + plotWidth): utrStartPix = xLeftOffset + plotWidth - #canvas.drawRect(utrStartPix, geneYLocation, utrEndPix, (geneYLocation+self.EACH_GENE_HEIGHT*zoom), edgeColor=utrColor, fillColor =utrColor) if self.endMb - self.startMb <= self.DRAW_UTR_LABELS_MB: if strand == "-": labelText = "5'" else: labelText = "3'" - canvas.drawString(labelText, utrEndPix+2, geneYLocation+self.EACH_GENE_HEIGHT, pid.Font(face="helvetica", size=2)) + im_drawer.text( + text=labelText, + xy=(utrEndPix+2,geneYLocation+self.EACH_GENE_HEIGHT), + font=ImageFont.truetype(font=ARIAL_FILE, size=2)) #draw the genes as rectangles else: - canvas.drawRect(geneStartPix, geneYLocation, geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT*zoom), edgeColor = outlineColor, fillColor = fillColor) + im_drawer.rectangle( + xy=((geneStartPix, geneYLocation), + (geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT*zoom))), + outline= outlineColor, fill = fillColor) COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation, geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT)) # NL: 06-02-2011 Rob required to display NCBI info in a new window @@ -1354,12 +1482,22 @@ class DisplayMappingResults(object): else: mylineColor = self.HAPLOTYPE_RECOMBINATION # XZ: Unknown - canvas.drawLine(drawStart, geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom, drawEnd, geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom, color = mylineColor, width=zoom*(self.EACH_GENE_HEIGHT+2)) + im_drawer.line( + xy=((drawStart, + geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom), + (drawEnd, + geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom)), + fill= mylineColor, width=zoom*(self.EACH_GENE_HEIGHT+2)) fillColor=BLACK outlineColor=BLACK if lastGene == 0: - canvas.drawRect(geneStartPix, geneYLocation+2*ind*self.EACH_GENE_HEIGHT*zoom, geneEndPix, geneYLocation+2*ind*self.EACH_GENE_HEIGHT+ 2*self.EACH_GENE_HEIGHT*zoom, edgeColor = outlineColor, fillColor = fillColor) + im_drawer.rectangle( + xy=((geneStartPix, + geneYLocation+2*ind*self.EACH_GENE_HEIGHT*zoom), + (geneEndPix, + geneYLocation+2*ind*self.EACH_GENE_HEIGHT+ 2*self.EACH_GENE_HEIGHT*zoom)), + outline=outlineColor, fill=fillColor) COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation+ind*self.EACH_GENE_HEIGHT, geneEndPix+1, (geneYLocation + ind*self.EACH_GENE_HEIGHT)) @@ -1372,11 +1510,21 @@ class DisplayMappingResults(object): if (plotRight < (xLeftOffset + plotWidth - 3)) and (lastGene == 0): drawEnd = xLeftOffset + plotWidth - 6 mylineColor = self.HAPLOTYPE_RECOMBINATION - canvas.drawLine(plotRight, geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom, drawEnd, geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom, color = mylineColor, width=zoom*(self.EACH_GENE_HEIGHT+2)) + im_drawer.line( + xy=((plotRight, + geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom), + (drawEnd, + geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom)), + fill= mylineColor, width=zoom*(self.EACH_GENE_HEIGHT+2)) if lastGene == 0: - canvas.drawString("%s" % (_chr[j].name), geneStartPix , geneYLocation+17+2*maxind*self.EACH_GENE_HEIGHT*zoom, font=pid.Font(ttf="verdana", size=12, bold=0), color=BLACK, angle=-90) + im_drawer.text( + text="%s" % (_chr[j].name), + xy=(geneStartPix, + geneYLocation+17+2*maxind*self.EACH_GENE_HEIGHT*zoom), + font=ImageFont.truetype(font=VERDANA_FILE, size=12), + fill=BLACK, angle=-90) oldgeneEndPix = geneEndPix; oldgeno = _chr[j].genotype @@ -1405,12 +1553,23 @@ class DisplayMappingResults(object): expr = item.value # Place where font is hardcoded - canvas.drawString("%s" % (samplelist[j]), (xLeftOffset + plotWidth + 10) , geneYLocation+8+2*ind*self.EACH_GENE_HEIGHT*zoom, font=pid.Font(ttf="verdana", size=12, bold=0), color=BLACK) - canvas.drawString("%2.2f" % (expr), (xLeftOffset + plotWidth + 60) , geneYLocation+8+2*ind*self.EACH_GENE_HEIGHT*zoom, font=pid.Font(ttf="verdana", size=12, bold=0), color=BLACK) + im_drawer.text( + text="%s" % (samplelist[j]), + xy=((xLeftOffset + plotWidth + 10), + geneYLocation+8+2*ind*self.EACH_GENE_HEIGHT*zoom), + font=ImageFont.truetype(font=VERDANA_FILE, size=12), + fill=BLACK) + im_drawer.text( + text="%2.2f" % (expr), + xy=((xLeftOffset + plotWidth + 60), + geneYLocation+8+2*ind*self.EACH_GENE_HEIGHT*zoom), + font=ImageFont.truetype(font=VERDANA_FILE, size=12), + fill=BLACK) ## END HaplotypeAnalyst def drawClickBand(self, canvas, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + im_drawer = ImageDraw.Draw(canvas) if self.plotScale != 'physic' or self.selectedChr == -1: return @@ -1426,7 +1585,7 @@ class DisplayMappingResults(object): # but it makes the HTML huge, and takes forever to render the page in the first place) # Draw the bands that you can click on to go to UCSC / Ensembl MAX_CLICKABLE_REGION_DIVISIONS = 100 - clickableRegionLabelFont=pid.Font(ttf="verdana", size=9, bold=0) + clickableRegionLabelFont=ImageFont.truetype(font=VERDANA_FILE, size=9) pixelStep = max(5, int(float(plotWidth)/MAX_CLICKABLE_REGION_DIVISIONS)) # pixelStep: every N pixels, we make a new clickable area for the user to go to that area of the genome. @@ -1463,8 +1622,14 @@ class DisplayMappingResults(object): WEBQTL_TITLE = "Click to view this section of the genome in WebQTL" gifmap.areas.append(HT.Area(shape='rect',coords=WEBQTL_COORDS,href=WEBQTL_HREF, title=WEBQTL_TITLE)) - canvas.drawRect(xBrowse1, paddingTop, xBrowse2, (paddingTop + self.BAND_HEIGHT), edgeColor=self.CLICKABLE_WEBQTL_REGION_COLOR, fillColor=self.CLICKABLE_WEBQTL_REGION_COLOR) - canvas.drawLine(xBrowse1, paddingTop, xBrowse1, (paddingTop + self.BAND_HEIGHT), color=self.CLICKABLE_WEBQTL_REGION_OUTLINE_COLOR) + im_drawer.rectangle( + xy=((xBrowse1, paddingTop), + (xBrowse2, (paddingTop + self.BAND_HEIGHT))), + outline=self.CLICKABLE_WEBQTL_REGION_COLOR, + fill=self.CLICKABLE_WEBQTL_REGION_COLOR) + im_drawer.line( + xy=((xBrowse1, paddingTop),( xBrowse1, (paddingTop + self.BAND_HEIGHT))), + fill=self.CLICKABLE_WEBQTL_REGION_OUTLINE_COLOR) if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": PHENOGEN_COORDS = "%d, %d, %d, %d" % (xBrowse1, phenogenPaddingTop, xBrowse2, (phenogenPaddingTop+self.BAND_HEIGHT)) @@ -1474,8 +1639,14 @@ class DisplayMappingResults(object): PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) PHENOGEN_TITLE = "Click to view this section of the genome in PhenoGen" gifmap.areas.append(HT.Area(shape='rect',coords=PHENOGEN_COORDS,href=PHENOGEN_HREF, title=PHENOGEN_TITLE)) - canvas.drawRect(xBrowse1, phenogenPaddingTop, xBrowse2, (phenogenPaddingTop+self.BAND_HEIGHT), edgeColor=self.CLICKABLE_PHENOGEN_REGION_COLOR, fillColor=self.CLICKABLE_PHENOGEN_REGION_COLOR) - canvas.drawLine(xBrowse1, phenogenPaddingTop, xBrowse1, (phenogenPaddingTop+self.BAND_HEIGHT), color=self.CLICKABLE_PHENOGEN_REGION_OUTLINE_COLOR) + im_drawer.rectangle( + xy=((xBrowse1, phenogenPaddingTop), + (xBrowse2, (phenogenPaddingTop+self.BAND_HEIGHT))), + outline=self.CLICKABLE_PHENOGEN_REGION_COLOR, + fill=self.CLICKABLE_PHENOGEN_REGION_COLOR) + im_drawer.line( + xy=((xBrowse1, phenogenPaddingTop),( xBrowse1, (phenogenPaddingTop+self.BAND_HEIGHT))), + fill=self.CLICKABLE_PHENOGEN_REGION_OUTLINE_COLOR) UCSC_COORDS = "%d, %d, %d, %d" %(xBrowse1, ucscPaddingTop, xBrowse2, (ucscPaddingTop+self.BAND_HEIGHT)) if self.dataset.group.species == "mouse": @@ -1484,8 +1655,15 @@ class DisplayMappingResults(object): UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d" % (self._ucscDb, self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) UCSC_TITLE = "Click to view this section of the genome in the UCSC Genome Browser" gifmap.areas.append(HT.Area(shape='rect',coords=UCSC_COORDS,href=UCSC_HREF, title=UCSC_TITLE)) - canvas.drawRect(xBrowse1, ucscPaddingTop, xBrowse2, (ucscPaddingTop+self.BAND_HEIGHT), edgeColor=self.CLICKABLE_UCSC_REGION_COLOR, fillColor=self.CLICKABLE_UCSC_REGION_COLOR) - canvas.drawLine(xBrowse1, ucscPaddingTop, xBrowse1, (ucscPaddingTop+self.BAND_HEIGHT), color=self.CLICKABLE_UCSC_REGION_OUTLINE_COLOR) + im_drawer.rectangle( + xy=((xBrowse1, ucscPaddingTop), + (xBrowse2, (ucscPaddingTop+self.BAND_HEIGHT))), + outline=self.CLICKABLE_UCSC_REGION_COLOR, + fill=self.CLICKABLE_UCSC_REGION_COLOR) + im_drawer.line( + xy=((xBrowse1, ucscPaddingTop), + (xBrowse1, (ucscPaddingTop+self.BAND_HEIGHT))), + fill=self.CLICKABLE_UCSC_REGION_OUTLINE_COLOR) ENSEMBL_COORDS = "%d, %d, %d, %d" %(xBrowse1, ensemblPaddingTop, xBrowse2, (ensemblPaddingTop+self.BAND_HEIGHT)) if self.dataset.group.species == "mouse": @@ -1494,32 +1672,59 @@ class DisplayMappingResults(object): ENSEMBL_HREF = "http://www.ensembl.org/Rattus_norvegicus/contigview?chr=%s&start=%d&end=%d" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) ENSEMBL_TITLE = "Click to view this section of the genome in the Ensembl Genome Browser" gifmap.areas.append(HT.Area(shape='rect',coords=ENSEMBL_COORDS,href=ENSEMBL_HREF, title=ENSEMBL_TITLE)) - canvas.drawRect(xBrowse1, ensemblPaddingTop, xBrowse2, (ensemblPaddingTop+self.BAND_HEIGHT), edgeColor=self.CLICKABLE_ENSEMBL_REGION_COLOR, fillColor=self.CLICKABLE_ENSEMBL_REGION_COLOR) - canvas.drawLine(xBrowse1, ensemblPaddingTop, xBrowse1, (ensemblPaddingTop+self.BAND_HEIGHT), color=self.CLICKABLE_ENSEMBL_REGION_OUTLINE_COLOR) + im_drawer.rectangle( + xy=((xBrowse1, ensemblPaddingTop), + (xBrowse2, (ensemblPaddingTop+self.BAND_HEIGHT))), + outline=self.CLICKABLE_ENSEMBL_REGION_COLOR, + fill=self.CLICKABLE_ENSEMBL_REGION_COLOR) + im_drawer.line( + xy=((xBrowse1, ensemblPaddingTop), + (xBrowse1, (ensemblPaddingTop+self.BAND_HEIGHT))), + fill=self.CLICKABLE_ENSEMBL_REGION_OUTLINE_COLOR) # end for - canvas.drawString("Click to view the corresponding section of the genome in an 8x expanded WebQTL map", (xLeftOffset + 10), paddingTop + self.BAND_HEIGHT/2, font=clickableRegionLabelFont, color=self.CLICKABLE_WEBQTL_TEXT_COLOR) + im_drawer.text( + text="Click to view the corresponding section of the genome in an 8x expanded WebQTL map", + xy=((xLeftOffset + 10), paddingTop + self.BAND_HEIGHT/2), + font=clickableRegionLabelFont, + fill=self.CLICKABLE_WEBQTL_TEXT_COLOR) if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": - canvas.drawString("Click to view the corresponding section of the genome in PhenoGen", (xLeftOffset + 10), phenogenPaddingTop + self.BAND_HEIGHT/2, font=clickableRegionLabelFont, color=self.CLICKABLE_PHENOGEN_TEXT_COLOR) - canvas.drawString("Click to view the corresponding section of the genome in the UCSC Genome Browser", (xLeftOffset + 10), ucscPaddingTop + self.BAND_HEIGHT/2, font=clickableRegionLabelFont, color=self.CLICKABLE_UCSC_TEXT_COLOR) - canvas.drawString("Click to view the corresponding section of the genome in the Ensembl Genome Browser", (xLeftOffset+10), ensemblPaddingTop + self.BAND_HEIGHT/2, font=clickableRegionLabelFont, color=self.CLICKABLE_ENSEMBL_TEXT_COLOR) + im_drawer.text( + text="Click to view the corresponding section of the genome in PhenoGen", + xy=((xLeftOffset + 10), phenogenPaddingTop + self.BAND_HEIGHT/2), + font=clickableRegionLabelFont, fill=self.CLICKABLE_PHENOGEN_TEXT_COLOR) + im_drawer.text( + text="Click to view the corresponding section of the genome in the UCSC Genome Browser", + xy=((xLeftOffset + 10), ucscPaddingTop + self.BAND_HEIGHT/2), + font=clickableRegionLabelFont, fill=self.CLICKABLE_UCSC_TEXT_COLOR) + im_drawer.text( + text="Click to view the corresponding section of the genome in the Ensembl Genome Browser", + xy=((xLeftOffset+10), ensemblPaddingTop + self.BAND_HEIGHT/2), + font=clickableRegionLabelFont, fill=self.CLICKABLE_ENSEMBL_TEXT_COLOR) #draw the gray text - chrFont = pid.Font(ttf="verdana", size=26*zoom, bold=1) - traitFont = pid.Font(ttf="verdana", size=14, bold=0) - chrX = xLeftOffset + plotWidth - 2 - canvas.stringWidth("Chr %s" % self.ChrList[self.selectedChr][0], font=chrFont) - canvas.drawString("Chr %s" % self.ChrList[self.selectedChr][0], chrX, ensemblPaddingTop-5, font=chrFont, color=GRAY) + chrFont = ImageFont.truetype(font=VERDANA_FILE, size=26*zoom) + traitFont = ImageFont.truetype(font=VERDANA_FILE, size=14) + chrX = xLeftOffset + plotWidth - 2 - im_drawer.textsize( + "Chr %s" % self.ChrList[self.selectedChr][0], font=chrFont)[0] + im_drawer.text( + text="Chr %s" % self.ChrList[self.selectedChr][0], + xy=(chrX, ensemblPaddingTop-5), font=chrFont, fill=GRAY) # end of drawBrowserClickableRegions else: #draw the gray text - chrFont = pid.Font(ttf="verdana", size=26*zoom, bold=1) - traitFont = pid.Font(ttf="verdana", size=14, bold=0) - chrX = xLeftOffset + (plotWidth - canvas.stringWidth("Chr %s" % currentChromosome, font=chrFont))/2 - canvas.drawString("Chr %s" % currentChromosome, chrX, 32, font=chrFont, color=GRAY) + chrFont = ImageFont.truetype(font=VERDANA_FILE, size=26*zoom) + traitFont = ImageFont.truetype(font=VERDANA_FILE, size=14) + chrX = xLeftOffset + (plotWidth - im_drawer.textsize( + "Chr %s" % currentChromosome, font=chrFont)[0])/2 + im_drawer.text( + text="Chr %s" % currentChromosome, xy=(chrX, 32), font=chrFont, + fill=GRAY) # end of drawBrowserClickableRegions pass def drawXAxis(self, canvas, drawAreaHeight, gifmap, plotXScale, showLocusForm, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset plotHeight = canvas.size[1] - yTopOffset - yBottomOffset @@ -1535,7 +1740,7 @@ class DisplayMappingResults(object): X_AXIS_THICKNESS = 1*zoom # ======= Alex: Draw the X-axis labels (megabase location) - MBLabelFont = pid.Font(ttf="verdana", size=15*zoom, bold=0) + MBLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=15*zoom) xMajorTickHeight = 10 * zoom # How high the tick extends below the axis xMinorTickHeight = 5*zoom xAxisTickMarkColor = BLACK @@ -1544,7 +1749,7 @@ class DisplayMappingResults(object): spacingFromLabelToAxis = 5 if self.plotScale == 'physic': - strYLoc = yZero + spacingFromLabelToAxis + canvas.fontHeight(MBLabelFont) + strYLoc = yZero + spacingFromLabelToAxis + MBLabelFont.font.height ###Physical single chromosome view if self.selectedChr > -1: XScale = Plot.detScale(startMb, endMb) @@ -1565,13 +1770,21 @@ class DisplayMappingResults(object): continue Xc = xLeftOffset + plotXScale*(_Mb - startMb) if counter % NUM_MINOR_TICKS == 0: # Draw a MAJOR mark, not just a minor tick mark - canvas.drawLine(Xc, yZero, Xc, yZero+xMajorTickHeight, color=xAxisTickMarkColor, width=X_MAJOR_TICK_THICKNESS) # Draw the MAJOR tick mark + im_drawer.line(xy=((Xc,yZero), + (Xc,yZero+xMajorTickHeight)), + fill=xAxisTickMarkColor, + width=X_MAJOR_TICK_THICKNESS) # Draw the MAJOR tick mark labelStr = str(formatStr % _Mb) # What Mbase location to put on the label - strWidth = canvas.stringWidth(labelStr, font=MBLabelFont) + strWidth, strHeight = im_drawer.textsize(labelStr, font=MBLabelFont) drawStringXc = (Xc - (strWidth / 2.0)) - canvas.drawString(labelStr, drawStringXc, strYLoc, font=MBLabelFont, color=xAxisLabelColor, angle=0) + im_drawer.text(xy=(drawStringXc, strYLoc), + text=labelStr, font=MBLabelFont, + fill=xAxisLabelColor) else: - canvas.drawLine(Xc, yZero, Xc, yZero+xMinorTickHeight, color=xAxisTickMarkColor, width=X_MINOR_TICK_THICKNESS) # Draw the MINOR tick mark + im_drawer.line(xy=((Xc,yZero), + (Xc,yZero+xMinorTickHeight)), + fill=xAxisTickMarkColor, + width=X_MINOR_TICK_THICKNESS) # Draw the MINOR tick mark ###Physical genome wide view else: @@ -1586,17 +1799,27 @@ class DisplayMappingResults(object): else: distScale = 5 for j, tickdists in enumerate(range(distScale, int(ceil(distLen)), distScale)): - canvas.drawLine(startPosX + tickdists*plotXScale, yZero, startPosX + tickdists*plotXScale, yZero + 7, color=BLACK, width=1*zoom) + im_drawer.line( + xy=((startPosX+tickdists*plotXScale, yZero), + (startPosX+tickdists*plotXScale, yZero + 7)), + fill=BLACK, width=1*zoom) if j % 2 == 0: - canvas.drawString(str(tickdists), startPosX+tickdists*plotXScale, yZero + 10*zoom, color=BLACK, font=MBLabelFont, angle=270) + im_drawer.text( + xy=(startPosX+tickdists*plotXScale, yZero+10*zoom), + text=str(tickdists), fill=BLACK, font=MBLabelFont, angle=270) startPosX += (self.ChrLengthDistList[i]+self.GraphInterval)*plotXScale - megabaseLabelFont = pid.Font(ttf="verdana", size=18*zoom*1.5, bold=0) - canvas.drawString("Megabases", xLeftOffset + (plotWidth - canvas.stringWidth("Megabases", font=megabaseLabelFont))/2, - strYLoc + canvas.fontHeight(MBLabelFont)+ 10*(zoom%2) + 10, font=megabaseLabelFont, color=BLACK) + megabaseLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18*zoom*1.5)) + im_drawer.text( + text="Megabases", + xy=( + xLeftOffset+(plotWidth-im_drawer.textsize( + "Megabases",font=megabaseLabelFont)[0])/2, + strYLoc+MBLabelFont.font.height+10*(zoom%2)+ 10), + font=megabaseLabelFont, fill=BLACK) pass else: - strYLoc = yZero + spacingFromLabelToAxis + canvas.fontHeight(MBLabelFont) + 8 + strYLoc = yZero + spacingFromLabelToAxis + MBLabelFont.font.height + 8 ChrAInfo = [] preLpos = -1 distinctCount = 0.0 @@ -1659,18 +1882,26 @@ class DisplayMappingResults(object): else: Zorder = 0 if differ: - canvas.drawLine(startPosX+Lpos,yZero,xLeftOffset+offsetA,\ - yZero+25, color=lineColor) - canvas.drawLine(xLeftOffset+offsetA,yZero+25,xLeftOffset+offsetA,\ - yZero+40+Zorder*(LRectWidth+3),color=lineColor) + im_drawer.line( + xy=((startPosX+Lpos,yZero),(xLeftOffset+offsetA,\ + yZero+25)), + fill=lineColor) + im_drawer.line( + xy=((xLeftOffset+offsetA,yZero+25),(xLeftOffset+offsetA,\ + yZero+40+Zorder*(LRectWidth+3))), + fill=lineColor) rectColor = ORANGE else: - canvas.drawLine(xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)-3,\ - xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3),color=lineColor) + im_drawer.line( + xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)-3),(\ + xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3))), + fill=lineColor) rectColor = DEEPPINK - canvas.drawRect(xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3),\ - xLeftOffset+offsetA-LRectHeight,yZero+40+Zorder*(LRectWidth+3)+LRectWidth,\ - edgeColor=rectColor,fillColor=rectColor,edgeWidth = 0) + im_drawer.rectangle( + xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)), + (xLeftOffset+offsetA-LRectHeight, + yZero+40+Zorder*(LRectWidth+3)+LRectWidth)), + outline=rectColor,fill=rectColor,width = 0) COORDS="%d,%d,%d,%d"%(xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\ xLeftOffset+offsetA,yZero+40+Zorder*(LRectWidth+3)+LRectWidth) HREF="/show_trait?trait_id=%s&dataset=%s" % (Lname, self.dataset.group.name+"Geno") @@ -1679,17 +1910,25 @@ class DisplayMappingResults(object): gifmap.areas.append(Areas) ##piddle bug if j == 0: - canvas.drawLine(startPosX,yZero,startPosX,yZero+40, color=lineColor) + im_drawer.line( + xy=((startPosX,yZero),(startPosX,yZero+40)), + fill=lineColor) startPosX += (self.ChrLengthDistList[j]+self.GraphInterval)*plotXScale - centimorganLabelFont = pid.Font(ttf="verdana", size=18*zoom*1.5, bold=0) - canvas.drawString("Centimorgans", xLeftOffset + (plotWidth - canvas.stringWidth("Megabases", font=centimorganLabelFont))/2, - strYLoc + canvas.fontHeight(MBLabelFont)+ 10*(zoom%2) + 10, font=centimorganLabelFont, color=BLACK) + centimorganLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=18*zoom*1.5) + im_drawer.text( + text="Centimorgans", + xy=(xLeftOffset+(plotWidth-im_drawer.textsize( + "Megabases", font=centimorganLabelFont)[0])/2, + strYLoc + MBLabelFont.font.height+ 10*(zoom%2) + 10), + font=centimorganLabelFont, fill=BLACK) - canvas.drawLine(xLeftOffset, yZero, xLeftOffset+plotWidth, yZero, color=BLACK, width=X_AXIS_THICKNESS) # Draw the X axis itself + im_drawer.line(xy=((xLeftOffset,yZero), (xLeftOffset+plotWidth,yZero)), + fill=BLACK, width=X_AXIS_THICKNESS) # Draw the X axis itself def drawQTL(self, canvas, drawAreaHeight, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset plotHeight = canvas.size[1] - yTopOffset - yBottomOffset @@ -1787,29 +2026,40 @@ class DisplayMappingResults(object): else: all_int = False break - - if all_int: - max_lrs_width = canvas.stringWidth("%d" % LRS_LOD_Max, font=LRSScaleFont) + 40 - else: - max_lrs_width = canvas.stringWidth("%2.1f" % LRS_LOD_Max, font=LRSScaleFont) + 30 + # TODO(PIL): Convert from PIL + # if all_int: + # max_lrs_width = canvas.stringWidth("%d" % LRS_LOD_Max, font=LRSScaleFont) + 40 + # else: + # max_lrs_width = canvas.stringWidth("%2.1f" % LRS_LOD_Max, font=LRSScaleFont) + 30 #draw the "LRS" or "LOD" string to the left of the axis - canvas.drawString(self.LRS_LOD, xLeftOffset - max_lrs_width - 15*(zoom-1), \ - yZero - 150 - 300*(zoom - 1), font=LRSLODFont, color=BLACK, angle=90) + LRSScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=16*zoom) + LRSLODFont=ImageFont.truetype(font=VERDANA_FILE, size=int(18*zoom*1.5)) + yZero = yTopOffset + plotHeight + + im_drawer.text( + text=self.LRS_LOD, + xy=(xLeftOffset - im_drawer.textsize("999.99", font=LRSScaleFont)[0] - 15*(zoom-1), + yZero - 150 - 300*(zoom - 1)), + font=LRSLODFont, fill=BLACK, angle=90) for item in LRSAxisList: if LRS_LOD_Max == 0.0: LRS_LOD_Max = 0.000001 yTopOffset + 30*(zoom - 1) yLRS = yZero - (item/LRS_LOD_Max) * LRSHeightThresh - #yLRS = yZero - (item/LRSAxisList[-1]) * LRSHeightThresh - canvas.drawLine(xLeftOffset, yLRS, xLeftOffset - 4, yLRS, color=self.LRS_COLOR, width=1*zoom) + im_drawer.line(xy=((xLeftOffset,yLRS), (xLeftOffset-4,yLRS)), + fill=self.LRS_COLOR, width=1*zoom) if all_int: scaleStr = "%d" % item else: scaleStr = "%2.1f" % item #Draw the LRS/LOD Y axis label - canvas.drawString(scaleStr, xLeftOffset-4-canvas.stringWidth(scaleStr, font=LRSScaleFont)-5, yLRS+3, font=LRSScaleFont, color=self.LRS_COLOR) + im_drawer.text( + text=scaleStr, + xy=(xLeftOffset-4-im_drawer.textsize(scaleStr, font=LRSScaleFont)[0]-5, + yLRS+3), + font=LRSScaleFont, fill=self.LRS_COLOR) if self.permChecked and self.nperm > 0 and not self.multipleInterval: significantY = yZero - self.significant*LRSHeightThresh/LRS_LOD_Max @@ -1824,10 +2074,19 @@ class DisplayMappingResults(object): #ZS: I don't know if what I did here with this inner function is clever or overly complicated, but it's the only way I could think of to avoid duplicating the code inside this function def add_suggestive_significant_lines_and_legend(start_pos_x, chr_length_dist): rightEdge = int(start_pos_x + chr_length_dist*plotXScale - self.SUGGESTIVE_WIDTH/1.5) - canvas.drawLine(start_pos_x+self.SUGGESTIVE_WIDTH/1.5, suggestiveY, rightEdge, suggestiveY, color=self.SUGGESTIVE_COLOR, - width=self.SUGGESTIVE_WIDTH*zoom, clipX=(xLeftOffset, xLeftOffset + plotWidth-2)) - canvas.drawLine(start_pos_x+self.SUGGESTIVE_WIDTH/1.5, significantY, rightEdge, significantY, color=self.SIGNIFICANT_COLOR, - width=self.SIGNIFICANT_WIDTH*zoom, clipX=(xLeftOffset, xLeftOffset + plotWidth-2)) + im_drawer.line( + xy=((start_pos_x+self.SUGGESTIVE_WIDTH/1.5, suggestiveY), + (rightEdge, suggestiveY)), + fill=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH*zoom + #,clipX=(xLeftOffset, xLeftOffset + plotWidth-2) + ) + im_drawer.line( + xy=((start_pos_x+self.SUGGESTIVE_WIDTH/1.5, significantY), + (rightEdge, significantY)), + fill=self.SIGNIFICANT_COLOR, + width=self.SIGNIFICANT_WIDTH*zoom + #, clipX=(xLeftOffset, xLeftOffset + plotWidth-2) + ) sugg_coords = "%d, %d, %d, %d" % (start_pos_x, suggestiveY-2, rightEdge + 2*zoom, suggestiveY+2) sig_coords = "%d, %d, %d, %d" % (start_pos_x, significantY-2, rightEdge + 2*zoom, significantY+2) if self.LRS_LOD == 'LRS': @@ -1868,7 +2127,7 @@ class DisplayMappingResults(object): AdditiveCoordXY = [] DominanceCoordXY = [] - symbolFont = pid.Font(ttf="fnt_bs", size=5,bold=0) #ZS: For Manhattan Plot + symbolFont = ImageFont.truetype(font=FNT_BS_FILE, size=5) #ZS: For Manhattan Plot previous_chr = 1 previous_chr_as_int = 0 @@ -1880,7 +2139,12 @@ class DisplayMappingResults(object): thisLRSColor = self.colorCollection[0] if qtlresult['chr'] != previous_chr and self.selectedChr == -1: if self.manhattan_plot != True: - canvas.drawPolygon(LRSCoordXY,edgeColor=thisLRSColor,closed=0, edgeWidth=lrsEdgeWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.polygon( + xy=LRSCoordXY, + outline=thisLRSColor + # , closed=0, edgeWidth=lrsEdgeWidth, + # clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) if not self.multipleInterval and self.additiveChecked: plusColor = self.ADDITIVE_COLOR_POSITIVE @@ -1895,22 +2159,58 @@ class DisplayMappingResults(object): else: Xcm = (yZero-Yc0)/((Yc-Yc0)/(Xc-Xc0)) +Xc0 if Yc0 < yZero: - canvas.drawLine(Xc0, Yc0, Xcm, yZero, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) - canvas.drawLine(Xcm, yZero, Xc, yZero-(Yc-yZero), color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, Yc0), (Xcm, yZero)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) + im_drawer.line( + xy=((Xcm, yZero), (Xc, yZero-(Yc-yZero))), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: - canvas.drawLine(Xc0, yZero - (Yc0-yZero), Xcm, yZero, color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) - canvas.drawLine(Xcm, yZero, Xc, Yc, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, yZero-(Yc0-yZero)), + (Xcm, yZero)), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) + im_drawer.line( + xy=((Xcm, yZero), (Xc, Yc)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) elif (Yc0-yZero)*(Yc-yZero) > 0: if Yc < yZero: - canvas.drawLine(Xc0, Yc0, Xc, Yc, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, Yc0), (Xc, Yc)), + fill=plusColor, + width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: - canvas.drawLine(Xc0, yZero - (Yc0-yZero), Xc, yZero - (Yc-yZero), color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, yZero - (Yc0-yZero)), + (Xc, yZero - (Yc-yZero))), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: minYc = min(Yc-yZero, Yc0-yZero) if minYc < 0: - canvas.drawLine(Xc0, Yc0, Xc, Yc, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, Yc0), (Xc, Yc)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: - canvas.drawLine(Xc0, yZero - (Yc0-yZero), Xc, yZero - (Yc-yZero), color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, yZero - (Yc0-yZero)), + (Xc, yZero - (Yc-yZero))), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) LRSCoordXY = [] AdditiveCoordXY = [] @@ -1979,15 +2279,12 @@ class DisplayMappingResults(object): point_color = RED else: point_color = BLUE - - final_x_pos = Xc-canvas.stringWidth("5",font=symbolFont)/2+1 - if final_x_pos > (xLeftOffset + plotWidth): - continue - #break ZS: This is temporary until issue with sorting for GEMMA is fixed - elif final_x_pos < xLeftOffset: - continue - else: - canvas.drawString("5", final_x_pos,Yc+2,color=point_color, font=symbolFont) + im_drawer.text( + text="5", + xy=( + Xc-im_drawer.textsize("5",font=symbolFont)[0]/2+1, + Yc+2), + fill=point_color, font=symbolFont) else: LRSCoordXY.append((Xc, Yc)) @@ -2000,7 +2297,11 @@ class DisplayMappingResults(object): m += 1 if self.manhattan_plot != True: - canvas.drawPolygon(LRSCoordXY,edgeColor=thisLRSColor,closed=0, edgeWidth=lrsEdgeWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.polygon( + xy=LRSCoordXY, + outline=thisLRSColor + #, closed=0, edgeWidth=lrsEdgeWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) if not self.multipleInterval and self.additiveChecked: plusColor = self.ADDITIVE_COLOR_POSITIVE @@ -2015,22 +2316,57 @@ class DisplayMappingResults(object): else: Xcm = (yZero-Yc0)/((Yc-Yc0)/(Xc-Xc0)) +Xc0 if Yc0 < yZero: - canvas.drawLine(Xc0, Yc0, Xcm, yZero, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) - canvas.drawLine(Xcm, yZero, Xc, yZero-(Yc-yZero), color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, Yc0), (Xcm, yZero)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) + im_drawer.line( + xy=((Xcm, yZero), (Xc, yZero-(Yc-yZero))), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: - canvas.drawLine(Xc0, yZero - (Yc0-yZero), Xcm, yZero, color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) - canvas.drawLine(Xcm, yZero, Xc, Yc, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, yZero - (Yc0-yZero)), + (Xcm, yZero)), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) + im_drawer.line( + xy=((Xcm, yZero), (Xc, Yc)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) elif (Yc0-yZero)*(Yc-yZero) > 0: if Yc < yZero: - canvas.drawLine(Xc0, Yc0, Xc, Yc, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, Yc0), (Xc, Yc)), fill=plusColor, + width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: - canvas.drawLine(Xc0, yZero - (Yc0-yZero), Xc, yZero - (Yc-yZero), color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0,yZero-(Yc0-yZero)), + (Xc,yZero-(Yc-yZero))), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: minYc = min(Yc-yZero, Yc0-yZero) if minYc < 0: - canvas.drawLine(Xc0, Yc0, Xc, Yc, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, Yc0), (Xc, Yc)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: - canvas.drawLine(Xc0, yZero - (Yc0-yZero), Xc, yZero - (Yc-yZero), color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, yZero - (Yc0-yZero)), + (Xc, yZero - (Yc-yZero))), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) if not self.multipleInterval and INTERCROSS and self.dominanceChecked: plusColor = self.DOMINANCE_COLOR_POSITIVE @@ -2045,27 +2381,61 @@ class DisplayMappingResults(object): else: Xcm = (yZero-Yc0)/((Yc-Yc0)/(Xc-Xc0)) +Xc0 if Yc0 < yZero: - canvas.drawLine(Xc0, Yc0, Xcm, yZero, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) - canvas.drawLine(Xcm, yZero, Xc, yZero-(Yc-yZero), color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, Yc0), (Xcm, yZero)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) + im_drawer.line( + xy=((Xcm, yZero), (Xc, yZero-(Yc-yZero))), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: - canvas.drawLine(Xc0, yZero - (Yc0-yZero), Xcm, yZero, color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) - canvas.drawLine(Xcm, yZero, Xc, Yc, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, yZero - (Yc0-yZero)), (Xcm, yZero)), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) + im_drawer.line( + xy=((Xcm, yZero), (Xc, Yc)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) elif (Yc0-yZero)*(Yc-yZero) > 0: if Yc < yZero: - canvas.drawLine(Xc0, Yc0, Xc, Yc, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, Yc0), (Xc, Yc)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: - canvas.drawLine(Xc0, yZero - (Yc0-yZero), Xc, yZero - (Yc-yZero), color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, yZero - (Yc0-yZero)), + (Xc, yZero - (Yc-yZero))), + fill=minusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: minYc = min(Yc-yZero, Yc0-yZero) if minYc < 0: - canvas.drawLine(Xc0, Yc0, Xc, Yc, color=plusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, Yc0), (Xc, Yc)), + fill=plusColor, width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) else: - canvas.drawLine(Xc0, yZero - (Yc0-yZero), Xc, yZero - (Yc-yZero), color=minusColor, width=lineWidth, clipX=(xLeftOffset, xLeftOffset + plotWidth)) + im_drawer.line( + xy=((Xc0, yZero - (Yc0-yZero)), + (Xc, yZero - (Yc-yZero))), fill=minusColor, + width=lineWidth + #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + ) ###draw additive scale if not self.multipleInterval and self.additiveChecked: - additiveScaleFont=pid.Font(ttf="verdana",size=16*zoom,bold=0) + additiveScaleFont=ImageFont.truetype(font=VERDANA_FILE,size=16*zoom) additiveScale = Plot.detScaleOld(0,additiveMax) additiveStep = (additiveScale[1]-additiveScale[0])/additiveScale[2] additiveAxisList = Plot.frange(0, additiveScale[1], additiveStep) @@ -2074,13 +2444,24 @@ class DisplayMappingResults(object): additiveAxisList.append(additiveScale[1]) for item in additiveAxisList: additiveY = yZero - item*addPlotScale - canvas.drawLine(xLeftOffset + plotWidth,additiveY,xLeftOffset+4+ plotWidth,additiveY,color=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom) + im_drawer.line( + xy=((xLeftOffset + plotWidth,additiveY), + (xLeftOffset+4+ plotWidth,additiveY)), + fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom) scaleStr = "%2.3f" % item - canvas.drawString(scaleStr,xLeftOffset + plotWidth +6,additiveY+5,font=additiveScaleFont,color=self.ADDITIVE_COLOR_POSITIVE) + im_drawer.text( + text=scaleStr, + xy=(xLeftOffset + plotWidth +6,additiveY+5), + font=additiveScaleFont,fill=self.ADDITIVE_COLOR_POSITIVE) - canvas.drawLine(xLeftOffset+plotWidth,additiveY,xLeftOffset+plotWidth,yZero,color=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom) + im_drawer.line( + xy=((xLeftOffset+plotWidth,additiveY), + (xLeftOffset+plotWidth,yZero)), + fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom) - canvas.drawLine(xLeftOffset, yZero, xLeftOffset, yTopOffset + 30*(zoom - 1), color=self.LRS_COLOR, width=1*zoom) #the blue line running up the y axis + im_drawer.line( + xy=((xLeftOffset, yZero), (xLeftOffset, yTopOffset + 30*(zoom - 1))), + fill=self.LRS_COLOR, width=1*zoom) #the blue line running up the y axis def drawGraphBackground(self, canvas, gifmap, offset= (80, 120, 80, 50), zoom = 1, startMb = None, endMb = None): @@ -2088,6 +2469,7 @@ class DisplayMappingResults(object): ##multiple Chromosome view ##single Chromosome Physical ##single Chromosome Genetic + im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset plotHeight = canvas.size[1] - yTopOffset - yBottomOffset @@ -2114,8 +2496,10 @@ class DisplayMappingResults(object): else: theBackColor = self.GRAPH_BACK_LIGHT_COLOR i += 1 - canvas.drawRect(startPix, yTopOffset, min(startPix+spacingAmt, xLeftOffset+plotWidth), \ - yBottom, edgeColor=theBackColor, fillColor=theBackColor) + im_drawer.rectangle( + [(startPix, yTopOffset), + (min(startPix+spacingAmt, xLeftOffset+plotWidth), yBottom)], + outline=theBackColor, fill=theBackColor) drawRegionDistance = self.ChrLengthDistList[self.ChrList[self.selectedChr][1]] self.ChrLengthDistList = [drawRegionDistance] @@ -2132,7 +2516,7 @@ class DisplayMappingResults(object): chrFontZoom = 2 else: chrFontZoom = 1 - chrLabelFont=pid.Font(ttf="verdana",size=24*chrFontZoom,bold=0) + chrLabelFont=ImageFont.truetype(font=VERDANA_FILE,size=24*chrFontZoom) for i, _chr in enumerate(self.genotype): if (i % 2 == 0): @@ -2141,14 +2525,18 @@ class DisplayMappingResults(object): theBackColor = self.GRAPH_BACK_LIGHT_COLOR #draw the shaded boxes and the sig/sug thick lines - canvas.drawRect(startPosX, yTopOffset, startPosX + self.ChrLengthDistList[i]*plotXScale, \ - yBottom, edgeColor=GAINSBORO,fillColor=theBackColor) + im_drawer.rectangle( + ((startPosX, yTopOffset), + (startPosX + self.ChrLengthDistList[i]*plotXScale, yBottom)), + outline=GAINSBORO, + fill=theBackColor) - chrNameWidth = canvas.stringWidth(_chr.name, font=chrLabelFont) + chrNameWidth, chrNameHeight = im_drawer.textsize(_chr.name, font=chrLabelFont) chrStartPix = startPosX + (self.ChrLengthDistList[i]*plotXScale -chrNameWidth)/2 chrEndPix = startPosX + (self.ChrLengthDistList[i]*plotXScale +chrNameWidth)/2 - canvas.drawString(_chr.name, chrStartPix, yTopOffset + 20 ,font = chrLabelFont,color=BLACK) + im_drawer.text(xy=(chrStartPix, yTopOffset + 20), + text=_chr.name, font=chrLabelFont, fill=BLACK) COORDS = "%d,%d,%d,%d" %(chrStartPix, yTopOffset, chrEndPix,yTopOffset +20) #add by NL 09-03-2010 @@ -2164,7 +2552,7 @@ class DisplayMappingResults(object): ######################################### # Permutation Graph ######################################### - myCanvas = pid.PILCanvas(size=(500,300)) + myCanvas = Image.new("RGBA", size=(500, 300)) if 'lod_score' in self.qtlresults[0] and self.LRS_LOD == "LRS": perm_output = [value*4.61 for value in self.perm_output] elif 'lod_score' not in self.qtlresults[0] and self.LRS_LOD == "LOD": @@ -2174,7 +2562,8 @@ class DisplayMappingResults(object): filename= webqtlUtil.genRandStr("Reg_") Plot.plotBar(myCanvas, perm_output, XLabel=self.LRS_LOD, YLabel='Frequency', title=' Histogram of Permutation Test') - myCanvas.save(GENERATED_IMAGE_DIR+filename, format='gif') + myCanvas.save("{}.gif".format(GENERATED_IMAGE_DIR+filename), + format='gif') return filename -- cgit v1.2.3 From 95dbb4a18a205b475ca8eb72210d7d2193062295 Mon Sep 17 00:00:00 2001 From: Muriithi Frederick Muriuki Date: Sat, 23 Feb 2019 07:32:29 +0300 Subject: Fix font paths * wqflask/utility/Plot.py: Update font paths. * wqflask/fonts/ttf: Update font files. --- wqflask/fonts/ttf/arial.ttf | Bin 151232 -> 0 bytes wqflask/fonts/ttf/courbd.ttf | Bin 181388 -> 0 bytes wqflask/fonts/ttf/fnt_bs.ttf | Bin 20988 -> 0 bytes wqflask/fonts/ttf/tahoma.ttf | Bin 249012 -> 0 bytes wqflask/fonts/ttf/trebucbd.ttf | Bin 123828 -> 0 bytes wqflask/fonts/ttf/verdana.ttf | Bin 139640 -> 0 bytes wqflask/utility/Plot.py | 6 +++--- wqflask/wqflask/static/fonts/courbd.ttf | Bin 0 -> 181388 bytes wqflask/wqflask/static/fonts/tahoma.ttf | Bin 0 -> 249012 bytes 9 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 wqflask/fonts/ttf/arial.ttf delete mode 100644 wqflask/fonts/ttf/courbd.ttf delete mode 100644 wqflask/fonts/ttf/fnt_bs.ttf delete mode 100644 wqflask/fonts/ttf/tahoma.ttf delete mode 100644 wqflask/fonts/ttf/trebucbd.ttf delete mode 100644 wqflask/fonts/ttf/verdana.ttf create mode 100644 wqflask/wqflask/static/fonts/courbd.ttf create mode 100644 wqflask/wqflask/static/fonts/tahoma.ttf (limited to 'wqflask/utility') diff --git a/wqflask/fonts/ttf/arial.ttf b/wqflask/fonts/ttf/arial.ttf deleted file mode 100644 index bf0d4a95..00000000 Binary files a/wqflask/fonts/ttf/arial.ttf and /dev/null differ diff --git a/wqflask/fonts/ttf/courbd.ttf b/wqflask/fonts/ttf/courbd.ttf deleted file mode 100644 index c8081467..00000000 Binary files a/wqflask/fonts/ttf/courbd.ttf and /dev/null differ diff --git a/wqflask/fonts/ttf/fnt_bs.ttf b/wqflask/fonts/ttf/fnt_bs.ttf deleted file mode 100644 index 712c38cf..00000000 Binary files a/wqflask/fonts/ttf/fnt_bs.ttf and /dev/null differ diff --git a/wqflask/fonts/ttf/tahoma.ttf b/wqflask/fonts/ttf/tahoma.ttf deleted file mode 100644 index bb314be3..00000000 Binary files a/wqflask/fonts/ttf/tahoma.ttf and /dev/null differ diff --git a/wqflask/fonts/ttf/trebucbd.ttf b/wqflask/fonts/ttf/trebucbd.ttf deleted file mode 100644 index 1ab1ae0a..00000000 Binary files a/wqflask/fonts/ttf/trebucbd.ttf and /dev/null differ diff --git a/wqflask/fonts/ttf/verdana.ttf b/wqflask/fonts/ttf/verdana.ttf deleted file mode 100644 index 754a9b7b..00000000 Binary files a/wqflask/fonts/ttf/verdana.ttf and /dev/null differ diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index c557bcc6..e7115036 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -48,9 +48,9 @@ BLACK = ImageColor.getrgb("black") # ---- END: Define common colours ---- # # ---- FONT FILES ---- # -VERDANA_FILE = "fonts/ttf/verdana.ttf" -COUR_FILE = "fonts/ttf/courbd.ttf" -TAHOMA_FILE = "fonts/ttf/tahoma.ttf" +VERDANA_FILE = "./wqflask/static/fonts/verdana.ttf" +COUR_FILE = "./wqflask/static/fonts/courbd.ttf" +TAHOMA_FILE = "./wqflask/static/fonts/tahoma.ttf" # ---- END: FONT FILES ---- # def cformat(d, rank=0): diff --git a/wqflask/wqflask/static/fonts/courbd.ttf b/wqflask/wqflask/static/fonts/courbd.ttf new file mode 100644 index 00000000..c8081467 Binary files /dev/null and b/wqflask/wqflask/static/fonts/courbd.ttf differ diff --git a/wqflask/wqflask/static/fonts/tahoma.ttf b/wqflask/wqflask/static/fonts/tahoma.ttf new file mode 100644 index 00000000..bb314be3 Binary files /dev/null and b/wqflask/wqflask/static/fonts/tahoma.ttf differ -- cgit v1.2.3 From dcd053e0249b14c938d94eb749a8b4095c80be29 Mon Sep 17 00:00:00 2001 From: Muriithi Frederick Muriuki Date: Fri, 8 Mar 2019 10:39:53 +0300 Subject: Create new utility module for drawing * wqflask/utility/pillow_utils.py: Create a module to hold some utility functions for drawing with Pillow. Initialise the module with a function to draw rotated text. --- wqflask/utility/pillow_utils.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 wqflask/utility/pillow_utils.py (limited to 'wqflask/utility') diff --git a/wqflask/utility/pillow_utils.py b/wqflask/utility/pillow_utils.py new file mode 100644 index 00000000..4a666e4b --- /dev/null +++ b/wqflask/utility/pillow_utils.py @@ -0,0 +1,16 @@ +from PIL import Image, ImageColor, ImageDraw, ImageFont + +import utility.logger +logger = utility.logger.getLogger(__name__ ) + +BLACK = ImageColor.getrgb("black") +# def draw_rotated_text(canvas: Image, text: str, font: ImageFont, xy: tuple, fill: ImageColor=BLACK, angle: int=-90): +def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90): + # type: (Image, str, ImageFont, tuple, ImageColor, int) + """Utility function draw rotated text""" + tmp_img = Image.new("RGBA", font.getsize(text), color=(0,0,0,0)) + draw_text = ImageDraw.Draw(tmp_img) + draw_text.text(text=text, xy=(0,0), font=font, fill=fill) + tmp_img2 = tmp_img.rotate(angle, expand=1) + tmp_img2.save("/tmp/{}.png".format(text), format="png") + canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy])) -- cgit v1.2.3 From 6e1102e36737a3f48a74cf431819c269c54f2601 Mon Sep 17 00:00:00 2001 From: Muriithi Frederick Muriuki Date: Fri, 8 Mar 2019 10:41:40 +0300 Subject: Use draw_rotated_text() Use the new draw_rotated_text() function to draw the text rotated as was formerly done. * wqflask/utility/Plot.py (plotBar): Use draw_rotated_text(). * wqflask/wqflask/marker_regression/display_mapping_results.py: (DisplayMappingResults) Use draw_rotated_text(). --- wqflask/utility/Plot.py | 14 ++++++------- .../marker_regression/display_mapping_results.py | 24 +++++++++++++--------- 2 files changed, 21 insertions(+), 17 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index e7115036..d4373412 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -38,7 +38,7 @@ from numarray import ones, array, dot, swapaxes import webqtlUtil import corestats from base import webqtlConfig - +from utility.pillow_utils import draw_rotated_text import utility.logger logger = utility.logger.getLogger(__name__ ) @@ -219,12 +219,12 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab font=labelFont,fill=labelColor) if YLabel: - im_drawer.text( - text=YLabel, - xy=(19, - yTopOffset+plotHeight-(plotHeight-im_drawer.textsize( - YLabel,font=labelFont)[0])/2.0), - font=labelFont,fill=labelColor,angle=90) + draw_rotated_text(canvas, text=YLabel, + xy=(19, + yTopOffset+plotHeight-( + plotHeight-im_drawer.textsize( + YLabel,font=labelFont)[0])/2.0), + font=labelFont, fill=labelColor, angle=90) labelFont=ImageFont.truetype(font=VERDANA_FILE,size=16) if title: diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 39067fc5..b03902a5 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -45,6 +45,7 @@ from utility import Plot from utility.benchmark import Bench from wqflask.interval_analyst import GeneUtil from base.webqtlConfig import TMPDIR, GENERATED_TEXT_DIR, GENERATED_IMAGE_DIR +from utility.pillow_utils import draw_rotated_text import utility.logger logger = utility.logger.getLogger(__name__ ) @@ -1519,11 +1520,12 @@ class DisplayMappingResults(object): if lastGene == 0: - im_drawer.text( - text="%s" % (_chr[j].name), + draw_rotated_text( + canvas, text="%s" % (_chr[j].name), + font=ImageFont.truetype(font=VERDANA_FILE, + size=12), xy=(geneStartPix, geneYLocation+17+2*maxind*self.EACH_GENE_HEIGHT*zoom), - font=ImageFont.truetype(font=VERDANA_FILE, size=12), fill=BLACK, angle=-90) oldgeneEndPix = geneEndPix; @@ -1804,9 +1806,10 @@ class DisplayMappingResults(object): (startPosX+tickdists*plotXScale, yZero + 7)), fill=BLACK, width=1*zoom) if j % 2 == 0: - im_drawer.text( - xy=(startPosX+tickdists*plotXScale, yZero+10*zoom), - text=str(tickdists), fill=BLACK, font=MBLabelFont, angle=270) + draw_rotated_text( + canvas, text=str(tickdists), font=MBLabelFont, + xy=(startPosX+tickdists*plotXScale, + yZero+10*zoom), fill=BLACK, angle=270) startPosX += (self.ChrLengthDistList[i]+self.GraphInterval)*plotXScale megabaseLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18*zoom*1.5)) @@ -2037,11 +2040,12 @@ class DisplayMappingResults(object): LRSLODFont=ImageFont.truetype(font=VERDANA_FILE, size=int(18*zoom*1.5)) yZero = yTopOffset + plotHeight - im_drawer.text( - text=self.LRS_LOD, - xy=(xLeftOffset - im_drawer.textsize("999.99", font=LRSScaleFont)[0] - 15*(zoom-1), + draw_rotated_text( + canvas, text=self.LRS_LOD, font=LRSLODFont, + xy=(xLeftOffset - im_drawer.textsize( + "999.99", font=LRSScaleFont)[0] - 15*(zoom-1), yZero - 150 - 300*(zoom - 1)), - font=LRSLODFont, fill=BLACK, angle=90) + fill=BLACK, angle=90) for item in LRSAxisList: if LRS_LOD_Max == 0.0: -- cgit v1.2.3 From e3c0745fdc3bf33cb10256643f3b1469ee6614c4 Mon Sep 17 00:00:00 2001 From: Muriithi Frederick Muriuki Date: Sat, 9 Mar 2019 08:33:19 +0300 Subject: Add draw_open_polygon() utility * wqflask/utility/pillow_utils.py: New method. --- wqflask/utility/pillow_utils.py | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'wqflask/utility') diff --git a/wqflask/utility/pillow_utils.py b/wqflask/utility/pillow_utils.py index 4a666e4b..dfbf3e19 100644 --- a/wqflask/utility/pillow_utils.py +++ b/wqflask/utility/pillow_utils.py @@ -4,6 +4,8 @@ import utility.logger logger = utility.logger.getLogger(__name__ ) BLACK = ImageColor.getrgb("black") +WHITE = ImageColor.getrgb("white") + # def draw_rotated_text(canvas: Image, text: str, font: ImageFont, xy: tuple, fill: ImageColor=BLACK, angle: int=-90): def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90): # type: (Image, str, ImageFont, tuple, ImageColor, int) @@ -14,3 +16,10 @@ def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90): tmp_img2 = tmp_img.rotate(angle, expand=1) tmp_img2.save("/tmp/{}.png".format(text), format="png") canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy])) + +# def draw_open_polygon(canvas: Image, xy: tuple, fill: ImageColor=WHITE, outline: ImageColor=BLACK): +def draw_open_polygon(canvas, xy, fill=None, outline=BLACK, width=0): + # type: (Image, tuple, ImageColor, ImageColor) + draw_ctx = ImageDraw.Draw(canvas) + draw_ctx.polygon(xy, fill=fill) + draw_ctx.line(xy, fill=outline, width=width) -- cgit v1.2.3 From e49ef0954e111ace2044cced0a83b4d9dc00bc72 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 17 Aug 2020 20:45:08 +0300 Subject: Fix malformed docstring * wqflask/utility/Plot.py: Update docstring. --- wqflask/utility/Plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index d4373412..82bf6070 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -97,7 +97,7 @@ def find_outliers(vals): >>> find_outliers([3.504, 5.234, 6.123, 7.234, 3.542, 5.341, 7.852, 4.555, 12.537]) (11.252500000000001, 0.5364999999999993) - >>> >>> find_outliers([9,12,15,17,31,50,7,5,6,8]) + >>> find_outliers([9,12,15,17,31,50,7,5,6,8]) (32.0, -8.0) If there are no vals, returns None for the upper and lower bounds, -- cgit v1.2.3 From af330a2aa7b36fd0cf8505eb20fa06d2ed58b86b Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 17 Aug 2020 17:41:58 +0300 Subject: Wrap print statements in parentheses --- scripts/maintenance/QTL_Reaper_v6.py | 6 ++-- .../Update_Case_Attributes_MySQL_tab.py | 2 +- scripts/maintenance/delete_genotypes.py | 14 ++++---- scripts/maintenance/delete_phenotypes.py | 14 ++++---- scripts/maintenance/load_genotypes.py | 16 ++++----- scripts/maintenance/load_phenotypes.py | 30 ++++++++-------- scripts/maintenance/readProbeSetMean_v7.py | 40 +++++++++++----------- scripts/maintenance/readProbeSetSE_v7.py | 22 ++++++------ wqflask/run_gunicorn.py | 2 +- wqflask/utility/startup_config.py | 4 +-- wqflask/utility/svg.py | 10 +++--- wqflask/utility/tools.py | 8 ++--- wqflask/wqflask/marker_regression/rqtl_mapping.py | 2 +- wqflask/wqflask/pbkdf2.py | 16 ++++----- wqflask/wqflask/views.py | 4 +-- wqflask/wqflask/wgcna/wgcna_analysis.py | 12 +++---- 16 files changed, 101 insertions(+), 101 deletions(-) (limited to 'wqflask/utility') diff --git a/scripts/maintenance/QTL_Reaper_v6.py b/scripts/maintenance/QTL_Reaper_v6.py index e50dbd40..7fb56eca 100755 --- a/scripts/maintenance/QTL_Reaper_v6.py +++ b/scripts/maintenance/QTL_Reaper_v6.py @@ -53,7 +53,7 @@ for ProbeSetFreezeId in ProbeSetFreezeIds: #if InbredSetId==12: # InbredSetId=2 - print ProbeSetFreezeId, InbredSets[InbredSetId] + print((ProbeSetFreezeId, InbredSets[InbredSetId])) genotype_1.read(InbredSets[InbredSetId]) locuses = [] @@ -102,7 +102,7 @@ for ProbeSetFreezeId in ProbeSetFreezeIds: kj += 1 if kj%1000==0: - print ProbeSetFreezeId, InbredSets[InbredSetId],kj + print((ProbeSetFreezeId, InbredSets[InbredSetId],kj)) - print ProbeSetFreezeIds + print(ProbeSetFreezeIds) diff --git a/scripts/maintenance/Update_Case_Attributes_MySQL_tab.py b/scripts/maintenance/Update_Case_Attributes_MySQL_tab.py index 0f8602c9..bf796df4 100644 --- a/scripts/maintenance/Update_Case_Attributes_MySQL_tab.py +++ b/scripts/maintenance/Update_Case_Attributes_MySQL_tab.py @@ -24,4 +24,4 @@ for row in csv_data: #close the connection to the database. mydb.commit() cursor.close() -print "Done" \ No newline at end of file +print("Done") \ No newline at end of file diff --git a/scripts/maintenance/delete_genotypes.py b/scripts/maintenance/delete_genotypes.py index fa693f0f..060640e1 100755 --- a/scripts/maintenance/delete_genotypes.py +++ b/scripts/maintenance/delete_genotypes.py @@ -8,13 +8,13 @@ import genotypes def main(argv): # config config = utilities.get_config(argv[1]) - print "config:" + print("config:") for item in config.items('config'): - print "\t%s" % (str(item)) + print(("\t%s" % (str(item)))) # var - print "variable:" + print("variable:") inbredsetid = config.get('config', 'inbredsetid') - print "\tinbredsetid: %s" % inbredsetid + print(("\tinbredsetid: %s" % inbredsetid)) # datafile datafile = open(config.get('config', 'datafile'), 'r') datafile = csv.reader(datafile, delimiter='\t', quotechar='"') @@ -25,9 +25,9 @@ def main(argv): continue genoname = row[0] delrowcount += genotypes.delete(genoname, inbredsetid) - print "deleted %d genotypes" % (delrowcount) + print(("deleted %d genotypes" % (delrowcount))) if __name__ == "__main__": - print "command line arguments:\n\t%s" % sys.argv + print(("command line arguments:\n\t%s" % sys.argv)) main(sys.argv) - print "exit successfully" + print("exit successfully") diff --git a/scripts/maintenance/delete_phenotypes.py b/scripts/maintenance/delete_phenotypes.py index 326c466e..60dbec61 100755 --- a/scripts/maintenance/delete_phenotypes.py +++ b/scripts/maintenance/delete_phenotypes.py @@ -8,13 +8,13 @@ import phenotypes def main(argv): # config config = utilities.get_config(argv[1]) - print "config:" + print("config:") for item in config.items('config'): - print "\t%s" % (str(item)) + print(("\t%s" % (str(item)))) # var - print "variable:" + print("variable:") inbredsetid = config.get('config', 'inbredsetid') - print "\tinbredsetid: %s" % inbredsetid + print(("\tinbredsetid: %s" % inbredsetid)) # datafile datafile = open(config.get('config', 'datafile'), 'r') datafile = csv.reader(datafile, delimiter='\t', quotechar='"') @@ -27,9 +27,9 @@ def main(argv): except: continue delrowcount += phenotypes.delete(publishxrefid=publishxrefid, inbredsetid=inbredsetid) - print "deleted %d phenotypes" % (delrowcount) + print(("deleted %d phenotypes" % (delrowcount))) if __name__ == "__main__": - print "command line arguments:\n\t%s" % sys.argv + print(("command line arguments:\n\t%s" % sys.argv)) main(sys.argv) - print "exit successfully" + print("exit successfully") diff --git a/scripts/maintenance/load_genotypes.py b/scripts/maintenance/load_genotypes.py index 338483f4..c235a31f 100755 --- a/scripts/maintenance/load_genotypes.py +++ b/scripts/maintenance/load_genotypes.py @@ -8,7 +8,7 @@ def main(argv): config = utilities.get_config(argv[1]) print("config file:") for item in config.items('config'): - print("\t%s" % str(item)) + print(("\t%s" % str(item))) parse_genofile(config, fetch_parameters(config)) def fetch_parameters(config): @@ -20,7 +20,7 @@ def fetch_parameters(config): config_dic['genofile'] = config.get('config', 'genofile') print("config dictionary:") for k, v in config_dic.items(): - print("\t%s: %s" % (k, v)) + print(("\t%s: %s" % (k, v))) return config_dic def parse_genofile(config, config_dic): @@ -43,9 +43,9 @@ def parse_genofile(config, config_dic): # print("geno file meta dictionary:") for k, v in meta_dic.items(): - print("\t%s: %s" % (k, v)) + print(("\t%s: %s" % (k, v))) # - print("geno file head:\n\t%s" % line) + print(("geno file head:\n\t%s" % line)) strainnames = line.split()[4:] config_dic['strains'] = datastructure.get_strains_bynames(inbredsetid=config_dic['inbredsetid'], strainnames=strainnames, updatestrainxref="yes") continue @@ -81,7 +81,7 @@ def check_or_insert_geno(config_dic, marker_dic): result = cursor.fetchone() if result: genoid = result[0] - print("get geno record: %d" % genoid) + print(("get geno record: %d" % genoid)) else: sql = """ INSERT INTO Geno @@ -95,7 +95,7 @@ def check_or_insert_geno(config_dic, marker_dic): cursor.execute(sql, (config_dic['speciesid'], marker_dic['locus'], marker_dic['locus'], marker_dic['chromosome'], marker_dic['mb'])) rowcount = cursor.rowcount genoid = con.insert_id() - print("INSERT INTO Geno: %d record: %d" % (rowcount, genoid)) + print(("INSERT INTO Geno: %d record: %d" % (rowcount, genoid))) return genoid def check_genoxref(config_dic, marker_dic): @@ -146,9 +146,9 @@ def insert_genoxref(config_dic, marker_dic): """ cursor.execute(sql, (config_dic['genofreezeid'], marker_dic['genoid'], config_dic['dataid'], marker_dic['cm'], 'N')) rowcount = cursor.rowcount - print("INSERT INTO GenoXRef: %d record" % (rowcount)) + print(("INSERT INTO GenoXRef: %d record" % (rowcount))) if __name__ == "__main__": - print("command line arguments:\n\t%s" % sys.argv) + print(("command line arguments:\n\t%s" % sys.argv)) main(sys.argv) print("exit successfully") diff --git a/scripts/maintenance/load_phenotypes.py b/scripts/maintenance/load_phenotypes.py index c3c6570b..61d527d4 100755 --- a/scripts/maintenance/load_phenotypes.py +++ b/scripts/maintenance/load_phenotypes.py @@ -7,31 +7,31 @@ import datastructure def main(argv): # config config = utilities.get_config(argv[1]) - print "config:" + print("config:") for item in config.items('config'): - print "\t%s" % (str(item)) + print(("\t%s" % (str(item)))) # var inbredsetid = config.get('config', 'inbredsetid') - print "inbredsetid: %s" % inbredsetid + print(("inbredsetid: %s" % inbredsetid)) species = datastructure.get_species(inbredsetid) speciesid = species[0] - print "speciesid: %s" % speciesid + print(("speciesid: %s" % speciesid)) dataid = datastructure.get_nextdataid_phenotype() - print "next data id: %s" % dataid + print(("next data id: %s" % dataid)) cursor, con = utilities.get_cursor() # datafile datafile = open(config.get('config', 'datafile'), 'r') phenotypedata = csv.reader(datafile, delimiter='\t', quotechar='"') phenotypedata_head = phenotypedata.next() - print "phenotypedata head:\n\t%s" % phenotypedata_head + print(("phenotypedata head:\n\t%s" % phenotypedata_head)) strainnames = phenotypedata_head[1:] strains = datastructure.get_strains_bynames(inbredsetid=inbredsetid, strainnames=strainnames, updatestrainxref="yes") # metafile metafile = open(config.get('config', 'metafile'), 'r') phenotypemeta = csv.reader(metafile, delimiter='\t', quotechar='"') phenotypemeta_head = phenotypemeta.next() - print "phenotypemeta head:\n\t%s" % phenotypemeta_head - print + print(("phenotypemeta head:\n\t%s" % phenotypemeta_head)) + print() # load for metarow in phenotypemeta: # @@ -67,7 +67,7 @@ def main(argv): )) rowcount = cursor.rowcount phenotypeid = con.insert_id() - print "INSERT INTO Phenotype: %d record: %d" % (rowcount, phenotypeid) + print(("INSERT INTO Phenotype: %d record: %d" % (rowcount, phenotypeid))) # Publication publicationid = None # reset pubmed_id = utilities.to_db_string(metarow[0], None) @@ -81,7 +81,7 @@ def main(argv): re = cursor.fetchone() if re: publicationid = re[0] - print "get Publication record: %d" % publicationid + print(("get Publication record: %d" % publicationid)) if not publicationid: sql = """ INSERT INTO Publication @@ -109,7 +109,7 @@ def main(argv): )) rowcount = cursor.rowcount publicationid = con.insert_id() - print "INSERT INTO Publication: %d record: %d" % (rowcount, publicationid) + print(("INSERT INTO Publication: %d record: %d" % (rowcount, publicationid))) # data for index, strain in enumerate(strains): # @@ -158,14 +158,14 @@ def main(argv): cursor.execute(sql, (inbredsetid, phenotypeid, publicationid, dataid, "")) rowcount = cursor.rowcount publishxrefid = con.insert_id() - print "INSERT INTO PublishXRef: %d record: %d" % (rowcount, publishxrefid) + print(("INSERT INTO PublishXRef: %d record: %d" % (rowcount, publishxrefid))) # for loop next dataid += 1 - print + print() # release con.close() if __name__ == "__main__": - print "command line arguments:\n\t%s" % sys.argv + print(("command line arguments:\n\t%s" % sys.argv)) main(sys.argv) - print "exit successfully" + print("exit successfully") diff --git a/scripts/maintenance/readProbeSetMean_v7.py b/scripts/maintenance/readProbeSetMean_v7.py index e9c8f25c..e7a4c826 100755 --- a/scripts/maintenance/readProbeSetMean_v7.py +++ b/scripts/maintenance/readProbeSetMean_v7.py @@ -42,9 +42,9 @@ try: con = MySQLdb.Connect(db='db_webqtl',host='localhost', user='username',passwd=passwd) db = con.cursor() - print "You have successfully connected to mysql.\n" + print("You have successfully connected to mysql.\n") except: - print "You entered incorrect password.\n" + print("You entered incorrect password.\n") sys.exit(0) time0 = time.time() @@ -55,7 +55,7 @@ time0 = time.time() # generate the gene list of expression data here # ######################################################################### -print 'Checking if each line have same number of members' +print('Checking if each line have same number of members') GeneList = [] isCont = 1 @@ -70,7 +70,7 @@ while line: line2 = string.split(string.strip(line),'\t') line2 = map(string.strip, line2) if len(line2) != nfield: - print "Error : " + line + print(("Error : " + line)) isCont = 0 GeneList.append(line2[0]) @@ -78,7 +78,7 @@ while line: kj+=1 if kj%100000 == 0: - print 'checked ',kj,' lines' + print(('checked ',kj,' lines')) GeneList = map(string.lower, GeneList) GeneList.sort() @@ -87,14 +87,14 @@ if isCont==0: sys.exit(0) -print 'used ',time.time()-time0,' seconds' +print(('used ',time.time()-time0,' seconds')) ######################################################################### # # Check if each strain exist in database # generate the string id list of expression data here # ######################################################################### -print 'Checking if each strain exist in database' +print('Checking if each strain exist in database') isCont = 1 fp.seek(0) @@ -109,20 +109,20 @@ for item in header: db.execute('select Id from Strain where Name = "%s"' % item) Ids.append(db.fetchall()[0][0]) except: - print item,'does not exist, check the if the strain name is correct' + print((item,'does not exist, check the if the strain name is correct')) isCont=0 if isCont==0: sys.exit(0) -print 'used ',time.time()-time0,' seconds' +print(('used ',time.time()-time0,' seconds')) ######################################################################## # # Check if each ProbeSet exist in database # ######################################################################## -print 'Check if each ProbeSet exist in database' +print('Check if each ProbeSet exist in database') ##---- find PID is name or target ----## line = fp.readline() @@ -146,7 +146,7 @@ Names = [] for item in results: Names.append(item[0]) -print Names +print(Names) Names = map(string.lower, Names) @@ -170,7 +170,7 @@ while x 0: DataValues = ','.join(DataValues) diff --git a/wqflask/run_gunicorn.py b/wqflask/run_gunicorn.py index adffdca3..58108e03 100644 --- a/wqflask/run_gunicorn.py +++ b/wqflask/run_gunicorn.py @@ -7,7 +7,7 @@ # from flask import Flask # application = Flask(__name__) -print "===> Starting up Gunicorn process" +print("===> Starting up Gunicorn process") from wqflask import app from utility.startup_config import app_config diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index 817284dd..42ead709 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -27,7 +27,7 @@ def app_config(): port = get_setting_int("SERVER_PORT") if get_setting_bool("USE_GN_SERVER"): - print("GN2 API server URL is ["+BLUE+get_setting("GN_SERVER_URL")+ENDC+"]") + print(("GN2 API server URL is ["+BLUE+get_setting("GN_SERVER_URL")+ENDC+"]")) import requests page = requests.get(get_setting("GN_SERVER_URL")) if page.status_code != 200: @@ -36,4 +36,4 @@ def app_config(): # import utility.elasticsearch_tools as es # es.test_elasticsearch_connection() - print("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % (BLUE,str(port),ENDC,get_setting("WEBSERVER_URL"))) + print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % (BLUE,str(port),ENDC,get_setting("WEBSERVER_URL")))) diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index db13b9d1..6285ea63 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -102,7 +102,7 @@ if use_dom_implementation<>0: from xml.dom import implementation from xml.dom.ext import PrettyPrint except: - raise exceptions.ImportError, "PyXML is required for using the dom implementation" + raise exceptions.ImportError("PyXML is required for using the dom implementation") #The implementation is used for the creating the XML document. #The prettyprint module is used for converting the xml document object to a xml file @@ -1018,12 +1018,12 @@ class drawing: PrettyPrint(root,f) f.close() except: - print "Cannot write SVG file: " + filename + print(("Cannot write SVG file: " + filename)) def validate(self): try: import xml.parsers.xmlproc.xmlval except: - raise exceptions.ImportError,'PyXml is required for validating SVG' + raise exceptions.ImportError('PyXml is required for validating SVG') svg=self.toXml() xv=xml.parsers.xmlproc.xmlval.XMLValidator() try: @@ -1031,7 +1031,7 @@ class drawing: except: raise Exception("SVG is not well formed, see messages above") else: - print "SVG well formed" + print("SVG well formed") if __name__=='__main__': @@ -1065,4 +1065,4 @@ if __name__=='__main__': s.addElement(c) d.setSVG(s) - print d.toXml() + print((d.toXml())) diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 77db5d53..f790d424 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -214,20 +214,20 @@ ENDC = '\033[0m' def show_settings(): from utility.tools import LOG_LEVEL - print("Set global log level to "+BLUE+LOG_LEVEL+ENDC) + print(("Set global log level to "+BLUE+LOG_LEVEL+ENDC)) log_level = getattr(logging, LOG_LEVEL.upper()) logging.basicConfig(level=log_level) logger.info(OVERRIDES) logger.info(BLUE+"Mr. Mojo Risin 2"+ENDC) - print "runserver.py: ****** Webserver configuration - k,v pairs from app.config ******" keylist = app.config.keys() + print("runserver.py: ****** Webserver configuration - k,v pairs from app.config ******") keylist.sort() for k in keylist: try: - print("%s: %s%s%s%s" % (k,BLUE,BOLD,get_setting(k),ENDC)) + print(("%s: %s%s%s%s" % (k,BLUE,BOLD,get_setting(k),ENDC))) except: - print("%s: %s%s%s%s" % (k,GREEN,BOLD,app.config[k],ENDC)) + print(("%s: %s%s%s%s" % (k,GREEN,BOLD,app.config[k],ENDC))) # Cached values diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index c5590a85..0a5758af 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -42,7 +42,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec png = ro.r["png"] # Map the png function dev_off = ro.r["dev.off"] # Map the device off function - print(r_library("qtl")) # Load R/qtl + print((r_library("qtl"))) # Load R/qtl logger.info("QTL library loaded"); diff --git a/wqflask/wqflask/pbkdf2.py b/wqflask/wqflask/pbkdf2.py index f7f61a09..811c83b0 100644 --- a/wqflask/wqflask/pbkdf2.py +++ b/wqflask/wqflask/pbkdf2.py @@ -92,14 +92,14 @@ def test(): def check(data, salt, iterations, keylen, expected): rv = pbkdf2_hex(data, salt, iterations, keylen) if rv != expected: - print 'Test failed:' - print ' Expected: %s' % expected - print ' Got: %s' % rv - print ' Parameters:' - print ' data=%s' % data - print ' salt=%s' % salt - print ' iterations=%d' % iterations - print + print('Test failed:') + print((' Expected: %s' % expected)) + print((' Got: %s' % rv)) + print(' Parameters:') + print((' data=%s' % data)) + print((' salt=%s' % salt)) + print((' iterations=%d' % iterations)) + print() failed.append(1) # From RFC 6070 diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index dde22bf7..d67f1a2e 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -943,5 +943,5 @@ def json_default_handler(obj): # logger.info("Not going to serialize Dataset") # return None else: - raise TypeError, 'Object of type %s with value of %s is not JSON serializable' % ( - type(obj), repr(obj)) + raise TypeError('Object of type %s with value of %s is not JSON serializable' % ( + type(obj), repr(obj))) diff --git a/wqflask/wqflask/wgcna/wgcna_analysis.py b/wqflask/wqflask/wgcna/wgcna_analysis.py index 880a1cb2..d79ad6df 100644 --- a/wqflask/wqflask/wgcna/wgcna_analysis.py +++ b/wqflask/wqflask/wgcna/wgcna_analysis.py @@ -60,7 +60,7 @@ class WGCNA(object): print("Starting WGCNA analysis on dataset") self.r_enableWGCNAThreads() # Enable multi threading self.trait_db_list = [trait.strip() for trait in requestform['trait_list'].split(',')] - print("Retrieved phenotype data from database", requestform['trait_list']) + print(("Retrieved phenotype data from database", requestform['trait_list'])) helper_functions.get_trait_db_obs(self, self.trait_db_list) self.input = {} # self.input contains the phenotype values we need to send to R @@ -101,13 +101,13 @@ class WGCNA(object): if requestform.get('SoftThresholds') is not None: powers = [int(threshold.strip()) for threshold in requestform['SoftThresholds'].rstrip().split(",")] rpow = r_unlist(r_c(powers)) - print "SoftThresholds: {} == {}".format(powers, rpow) + print(("SoftThresholds: {} == {}".format(powers, rpow))) self.sft = self.r_pickSoftThreshold(rM, powerVector = rpow, verbose = 5) - print "PowerEstimate: {}".format(self.sft[0]) + print(("PowerEstimate: {}".format(self.sft[0]))) self.results['PowerEstimate'] = self.sft[0] if self.sft[0][0] is ri.NA_Integer: - print "No power is suitable for the analysis, just use 1" + print("No power is suitable for the analysis, just use 1") self.results['Power'] = 1 # No power could be estimated else: self.results['Power'] = self.sft[0][0] # Use the estimated power @@ -122,7 +122,7 @@ class WGCNA(object): self.results['network'] = network # How many modules and how many gene per module ? - print "WGCNA found {} modules".format(r_table(network[1])) + print(("WGCNA found {} modules".format(r_table(network[1])))) self.results['nmod'] = r_length(r_table(network[1]))[0] # The iconic WCGNA plot of the modules in the hanging tree @@ -135,7 +135,7 @@ class WGCNA(object): sys.stdout.flush() def render_image(self, results): - print("pre-loading imgage results:", self.results['imgloc']) + print(("pre-loading imgage results:", self.results['imgloc'])) imgfile = open(self.results['imgloc'], 'rb') imgdata = imgfile.read() imgB64 = imgdata.encode("base64") -- cgit v1.2.3 From acdc611247c8eaccbba3dec00ad640e10a4c99b1 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 17 Aug 2020 21:45:04 +0300 Subject: Replace "<>" with "!=" * wqflask/utility/svg.py: Use "!=". The operator, "<>" is removed in Python3 --- wqflask/utility/svg.py | 196 ++++++++++++++++++++++++------------------------- 1 file changed, 98 insertions(+), 98 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index 6285ea63..f737d2f2 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -97,7 +97,7 @@ use_dom_implementation=0 import exceptions -if use_dom_implementation<>0: +if use_dom_implementation!=0: try: from xml.dom import implementation from xml.dom.ext import PrettyPrint @@ -398,22 +398,22 @@ class rect(SVGelement): """ def __init__(self,x=None,y=None,width=None,height=None,fill=None,stroke=None,stroke_width=None,**args): if width==None or height==None: - if width<>None: raise ValueError, 'height is required' - if height<>None: raise ValueError, 'width is required' + if width!=None: + if height!=None: else: raise ValueError, 'both height and width are required' SVGelement.__init__(self,'rect',{'width':width,'height':height},**args) - if x<>None: + if x!=None: self.attributes['x']=x - if y<>None: + if y!=None: self.attributes['y']=y - if fill<>None: + if fill!=None: self.attributes['fill']=fill - if stroke<>None: + if stroke!=None: self.attributes['stroke']=stroke - if stroke_width<>None: + if stroke_width!=None: self.attributes['stroke-width']=stroke_width class ellipse(SVGelement): @@ -423,22 +423,22 @@ class ellipse(SVGelement): """ def __init__(self,cx=None,cy=None,rx=None,ry=None,fill=None,stroke=None,stroke_width=None,**args): if rx==None or ry== None: - if rx<>None: raise ValueError, 'rx is required' - if ry<>None: raise ValueError, 'ry is required' + if rx!=None: + if ry!=None: else: raise ValueError, 'both rx and ry are required' SVGelement.__init__(self,'ellipse',{'rx':rx,'ry':ry},**args) - if cx<>None: + if cx!=None: self.attributes['cx']=cx - if cy<>None: + if cy!=None: self.attributes['cy']=cy - if fill<>None: + if fill!=None: self.attributes['fill']=fill - if stroke<>None: + if stroke!=None: self.attributes['stroke']=stroke - if stroke_width<>None: + if stroke_width!=None: self.attributes['stroke-width']=stroke_width @@ -451,15 +451,15 @@ class circle(SVGelement): if r==None: raise ValueError, 'r is required' SVGelement.__init__(self,'circle',{'r':r},**args) - if cx<>None: + if cx!=None: self.attributes['cx']=cx - if cy<>None: + if cy!=None: self.attributes['cy']=cy - if fill<>None: + if fill!=None: self.attributes['fill']=fill - if stroke<>None: + if stroke!=None: self.attributes['stroke']=stroke - if stroke_width<>None: + if stroke_width!=None: self.attributes['stroke-width']=stroke_width class point(circle): @@ -478,17 +478,17 @@ class line(SVGelement): """ def __init__(self,x1=None,y1=None,x2=None,y2=None,stroke=None,stroke_width=None,**args): SVGelement.__init__(self,'line',**args) - if x1<>None: + if x1!=None: self.attributes['x1']=x1 - if y1<>None: + if y1!=None: self.attributes['y1']=y1 - if x2<>None: + if x2!=None: self.attributes['x2']=x2 - if y2<>None: + if y2!=None: self.attributes['y2']=y2 - if stroke_width<>None: + if stroke_width!=None: self.attributes['stroke-width']=stroke_width - if stroke<>None: + if stroke!=None: self.attributes['stroke']=stroke class polyline(SVGelement): @@ -498,11 +498,11 @@ class polyline(SVGelement): """ def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args): SVGelement.__init__(self,'polyline',{'points':_xypointlist(points)},**args) - if fill<>None: + if fill!=None: self.attributes['fill']=fill - if stroke_width<>None: + if stroke_width!=None: self.attributes['stroke-width']=stroke_width - if stroke<>None: + if stroke!=None: self.attributes['stroke']=stroke class polygon(SVGelement): @@ -512,11 +512,11 @@ class polygon(SVGelement): """ def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args): SVGelement.__init__(self,'polygon',{'points':_xypointlist(points)},**args) - if fill<>None: + if fill!=None: self.attributes['fill']=fill - if stroke_width<>None: + if stroke_width!=None: self.attributes['stroke-width']=stroke_width - if stroke<>None: + if stroke!=None: self.attributes['stroke']=stroke class path(SVGelement): @@ -526,13 +526,13 @@ class path(SVGelement): """ def __init__(self,pathdata,fill=None,stroke=None,stroke_width=None,id=None,**args): SVGelement.__init__(self,'path',{'d':str(pathdata)},**args) - if stroke<>None: + if stroke!=None: self.attributes['stroke']=stroke - if fill<>None: + if fill!=None: self.attributes['fill']=fill - if stroke_width<>None: + if stroke_width!=None: self.attributes['stroke-width']=stroke_width - if id<>None: + if id!=None: self.attributes['id']=id @@ -543,17 +543,17 @@ class text(SVGelement): """ def __init__(self,x=None,y=None,text=None,font_size=None,font_family=None,text_anchor=None,**args): SVGelement.__init__(self,'text',**args) - if x<>None: + if x!=None: self.attributes['x']=x - if y<>None: + if y!=None: self.attributes['y']=y - if font_size<>None: + if font_size!=None: self.attributes['font-size']=font_size - if font_family<>None: + if font_family!=None: self.attributes['font-family']=font_family - if text<>None: + if text!=None: self.text=text - if text_anchor<>None: + if text_anchor!=None: self.attributes['text-anchor']=text_anchor @@ -564,7 +564,7 @@ class textpath(SVGelement): """ def __init__(self,link,text=None,**args): SVGelement.__init__(self,'textPath',{'xlink:href':link},**args) - if text<>None: + if text!=None: self.text=text class pattern(SVGelement): @@ -576,15 +576,15 @@ class pattern(SVGelement): """ def __init__(self,x=None,y=None,width=None,height=None,patternUnits=None,**args): SVGelement.__init__(self,'pattern',**args) - if x<>None: + if x!=None: self.attributes['x']=x - if y<>None: + if y!=None: self.attributes['y']=y - if width<>None: + if width!=None: self.attributes['width']=width - if height<>None: + if height!=None: self.attributes['height']=height - if patternUnits<>None: + if patternUnits!=None: self.attributes['patternUnits']=patternUnits class title(SVGelement): @@ -595,7 +595,7 @@ class title(SVGelement): """ def __init__(self,text=None,**args): SVGelement.__init__(self,'title',**args) - if text<>None: + if text!=None: self.text=text class description(SVGelement): @@ -606,7 +606,7 @@ class description(SVGelement): """ def __init__(self,text=None,**args): SVGelement.__init__(self,'desc',**args) - if text<>None: + if text!=None: self.text=text class lineargradient(SVGelement): @@ -617,15 +617,15 @@ class lineargradient(SVGelement): """ def __init__(self,x1=None,y1=None,x2=None,y2=None,id=None,**args): SVGelement.__init__(self,'linearGradient',**args) - if x1<>None: + if x1!=None: self.attributes['x1']=x1 - if y1<>None: + if y1!=None: self.attributes['y1']=y1 - if x2<>None: + if x2!=None: self.attributes['x2']=x2 - if y2<>None: + if y2!=None: self.attributes['y2']=y2 - if id<>None: + if id!=None: self.attributes['id']=id class radialgradient(SVGelement): @@ -636,17 +636,17 @@ class radialgradient(SVGelement): """ def __init__(self,cx=None,cy=None,r=None,fx=None,fy=None,id=None,**args): SVGelement.__init__(self,'radialGradient',**args) - if cx<>None: + if cx!=None: self.attributes['cx']=cx - if cy<>None: + if cy!=None: self.attributes['cy']=cy - if r<>None: + if r!=None: self.attributes['r']=r - if fx<>None: + if fx!=None: self.attributes['fx']=fx - if fy<>None: + if fy!=None: self.attributes['fy']=fy - if id<>None: + if id!=None: self.attributes['id']=id class stop(SVGelement): @@ -656,7 +656,7 @@ class stop(SVGelement): """ def __init__(self,offset,stop_color=None,**args): SVGelement.__init__(self,'stop',{'offset':offset},**args) - if stop_color<>None: + if stop_color!=None: self.attributes['stop-color']=stop_color class style(SVGelement): @@ -675,16 +675,16 @@ class image(SVGelement): """ def __init__(self,url,x=None,y=None,width=None,height=None,**args): if width==None or height==None: - if width<>None: raise ValueError, 'height is required' - if height<>None: raise ValueError, 'width is required' + if width!=None: + if height!=None: else: raise ValueError, 'both height and width are required' SVGelement.__init__(self,'image',{'xlink:href':url,'width':width,'height':height},**args) - if x<>None: + if x!=None: self.attributes['x']=x - if y<>None: + if y!=None: self.attributes['y']=y class cursor(SVGelement): @@ -704,17 +704,17 @@ class marker(SVGelement): """ def __init__(self,id=None,viewBox=None,refx=None,refy=None,markerWidth=None,markerHeight=None,**args): SVGelement.__init__(self,'marker',**args) - if id<>None: + if id!=None: self.attributes['id']=id - if viewBox<>None: + if viewBox!=None: self.attributes['viewBox']=_viewboxlist(viewBox) - if refx<>None: + if refx!=None: self.attributes['refX']=refx - if refy<>None: + if refy!=None: self.attributes['refY']=refy - if markerWidth<>None: + if markerWidth!=None: self.attributes['markerWidth']=markerWidth - if markerHeight<>None: + if markerHeight!=None: self.attributes['markerHeight']=markerHeight class group(SVGelement): @@ -725,7 +725,7 @@ class group(SVGelement): """ def __init__(self,id=None,**args): SVGelement.__init__(self,'g',**args) - if id<>None: + if id!=None: self.attributes['id']=id class symbol(SVGelement): @@ -739,9 +739,9 @@ class symbol(SVGelement): def __init__(self,id=None,viewBox=None,**args): SVGelement.__init__(self,'symbol',**args) - if id<>None: + if id!=None: self.attributes['id']=id - if viewBox<>None: + if viewBox!=None: self.attributes['viewBox']=_viewboxlist(viewBox) class defs(SVGelement): @@ -770,14 +770,14 @@ class use(SVGelement): """ def __init__(self,link,x=None,y=None,width=None,height=None,**args): SVGelement.__init__(self,'use',{'xlink:href':link},**args) - if x<>None: + if x!=None: self.attributes['x']=x - if y<>None: + if y!=None: self.attributes['y']=y - if width<>None: + if width!=None: self.attributes['width']=width - if height<>None: + if height!=None: self.attributes['height']=height @@ -796,7 +796,7 @@ class view(SVGelement): a view can be used to create a view with different attributes""" def __init__(self,id=None,**args): SVGelement.__init__(self,'view',**args) - if id<>None: + if id!=None: self.attributes['id']=id class script(SVGelement): @@ -815,11 +815,11 @@ class animate(SVGelement): """ def __init__(self,attribute,fr=None,to=None,dur=None,**args): SVGelement.__init__(self,'animate',{'attributeName':attribute},**args) - if fr<>None: + if fr!=None: self.attributes['from']=fr - if to<>None: + if to!=None: self.attributes['to']=to - if dur<>None: + if dur!=None: self.attributes['dur']=dur class animateMotion(SVGelement): @@ -829,9 +829,9 @@ class animateMotion(SVGelement): """ def __init__(self,pathdata,dur,**args): SVGelement.__init__(self,'animateMotion',**args) - if pathdata<>None: + if pathdata!=None: self.attributes['path']=str(pathdata) - if dur<>None: + if dur!=None: self.attributes['dur']=dur class animateTransform(SVGelement): @@ -842,13 +842,13 @@ class animateTransform(SVGelement): def __init__(self,type=None,fr=None,to=None,dur=None,**args): SVGelement.__init__(self,'animateTransform',{'attributeName':'transform'},**args) #As far as I know the attributeName is always transform - if type<>None: + if type!=None: self.attributes['type']=type - if fr<>None: + if fr!=None: self.attributes['from']=fr - if to<>None: + if to!=None: self.attributes['to']=to - if dur<>None: + if dur!=None: self.attributes['dur']=dur class animateColor(SVGelement): """ac=animateColor(attribute,type,from,to,dur,**args) @@ -857,13 +857,13 @@ class animateColor(SVGelement): """ def __init__(self,attribute,type=None,fr=None,to=None,dur=None,**args): SVGelement.__init__(self,'animateColor',{'attributeName':attribute},**args) - if type<>None: + if type!=None: self.attributes['type']=type - if fr<>None: + if fr!=None: self.attributes['from']=fr - if to<>None: + if to!=None: self.attributes['to']=to - if dur<>None: + if dur!=None: self.attributes['dur']=dur class set(SVGelement): """st=set(attribute,to,during,**args) @@ -872,9 +872,9 @@ class set(SVGelement): """ def __init__(self,attribute,to=None,dur=None,**args): SVGelement.__init__(self,'set',{'attributeName':attribute},**args) - if to<>None: + if to!=None: self.attributes['to']=to - if dur<>None: + if dur!=None: self.attributes['dur']=dur @@ -896,11 +896,11 @@ class svg(SVGelement): """ def __init__(self,viewBox=None, width=None, height=None,**args): SVGelement.__init__(self,'svg',**args) - if viewBox<>None: + if viewBox!=None: self.attributes['viewBox']=_viewboxlist(viewBox) - if width<>None: + if width!=None: self.attributes['width']=width - if height<>None: + if height!=None: self.attributes['height']=height self.namespace="http://www.w3.org/2000/svg" -- cgit v1.2.3 From 4e10f4bd8fb902810ee033abb8d509ab641308e1 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 18 Aug 2020 17:09:51 +0300 Subject: Apply pep8 * wqflask/utility/svg.py: Apply pep8 to fix indentation error when running `2to3-3.8 -w .`: ```` RefactoringTool: Can't parse ./wqflask/utility/svg.py: IndentationError: unindent does not match any outer indentation level (, line 403) ```` --- wqflask/utility/svg.py | 321 ++++++++++++++++++++++++++++--------------------- 1 file changed, 184 insertions(+), 137 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index f737d2f2..c850feb8 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -25,54 +25,56 @@ # Last updated by GeneNetwork Core Team 2010/10/20 #!/usr/bin/env python -##Copyright (c) 2002, Fedor Baart & Hans de Wit (Stichting Farmaceutische Kengetallen) -##All rights reserved. +# Copyright (c) 2002, Fedor Baart & Hans de Wit (Stichting Farmaceutische Kengetallen) +# All rights reserved. ## -##Redistribution and use in source and binary forms, with or without modification, -##are permitted provided that the following conditions are met: +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: ## -##Redistributions of source code must retain the above copyright notice, this -##list of conditions and the following disclaimer. +# Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. ## -##Redistributions in binary form must reproduce the above copyright notice, -##this list of conditions and the following disclaimer in the documentation and/or -##other materials provided with the distribution. +# Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation and/or +# other materials provided with the distribution. ## -##Neither the name of the Stichting Farmaceutische Kengetallen nor the names of -##its contributors may be used to endorse or promote products derived from this -##software without specific prior written permission. +# Neither the name of the Stichting Farmaceutische Kengetallen nor the names of +# its contributors may be used to endorse or promote products derived from this +# software without specific prior written permission. ## -##THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -##AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -##IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -##DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -##FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -##DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -##SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -##CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -##OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -##OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Thanks to Gerald Rosennfellner for his help and useful comments. -##Thanks to Gerald Rosennfellner for his help and useful comments. - -__doc__="""Use SVGdraw to generate your SVGdrawings. +import sys +import exceptions +__doc__ = """Use SVGdraw to generate your SVGdrawings. SVGdraw uses an object model drawing and a method toXML to create SVG graphics by using easy to use classes and methods usualy you start by creating a drawing eg d=drawing() - #then you create a SVG root element + # then you create a SVG root element s=svg() - #then you add some elements eg a circle and add it to the svg root element + # then you add some elements eg a circle and add it to the svg root element c=circle() - #you can supply attributes by using named arguments. + # you can supply attributes by using named arguments. c=circle(fill='red',stroke='blue') - #or by updating the attributes attribute: + # or by updating the attributes attribute: c.attributes['stroke-width']=1 s.addElement(c) - #then you add the svg root element to the drawing + # then you add the svg root element to the drawing d.setSVG(s) - #and finaly you xmlify the drawing + # and finaly you xmlify the drawing d.toXml() @@ -82,7 +84,7 @@ This module was created using the SVG specification of www.w3c.org and the O'Reilly (www.oreilly.com) python books as information sources. A svg viewer is available from www.adobe.com""" -__version__="1.0" +__version__ = "1.0" # there are two possibilities to generate svg: # via a dom implementation and directly using text strings @@ -93,33 +95,34 @@ __version__="1.0" # Note that PyXML is required for using the dom implementation. # It is also possible to use the standard minidom. But I didn't try that one. # Anyway the text based approach is about 60 times faster than using the full dom implementation. -use_dom_implementation=0 +use_dom_implementation = 0 -import exceptions -if use_dom_implementation!=0: +if use_dom_implementation != 0: try: from xml.dom import implementation from xml.dom.ext import PrettyPrint except: - raise exceptions.ImportError("PyXML is required for using the dom implementation") -#The implementation is used for the creating the XML document. -#The prettyprint module is used for converting the xml document object to a xml file + raise exceptions.ImportError( + "PyXML is required for using the dom implementation") +# The implementation is used for the creating the XML document. +# The prettyprint module is used for converting the xml document object to a xml file + +assert sys.version_info[0] >= 2 +if sys.version_info[1] < 2: + True = 1 + False = 0 + file = open + +sys.setrecursionlimit = 50 +# The recursion limit is set conservative so mistakes like s=svg() s.addElement(s) +# won't eat up too much processor time. + +# the following code is pasted form xml.sax.saxutils +# it makes it possible to run the code without the xml sax package installed +# To make it possible to have in your text elements, it is necessary to escape the texts + -import sys -assert sys.version_info[0]>=2 -if sys.version_info[1]<2: - True=1 - False=0 - file=open - -sys.setrecursionlimit=50 -#The recursion limit is set conservative so mistakes like s=svg() s.addElement(s) -#won't eat up too much processor time. - -#the following code is pasted form xml.sax.saxutils -#it makes it possible to run the code without the xml sax package installed -#To make it possible to have in your text elements, it is necessary to escape the texts def _escape(data, entities={}): """Escape &, <, and > in a string of data. @@ -127,13 +130,14 @@ def _escape(data, entities={}): the optional entities parameter. The keys and values must all be strings; each key will be replaced with its corresponding value. """ - #data = data.replace("&", "&") + # data = data.replace("&", "&") data = data.replace("<", "<") data = data.replace(">", ">") for chars, entity in entities.items(): data = data.replace(chars, entity) return data + def _quoteattr(data, entities={}): """Escape and quote an attribute value. @@ -156,96 +160,121 @@ def _quoteattr(data, entities={}): return data - def _xypointlist(a): """formats a list of xy pairs""" - s='' - for e in a: #this could be done more elegant - s+=str(e)[1:-1] +' ' + s = '' + for e in a: # this could be done more elegant + s += str(e)[1:-1] + ' ' return s + def _viewboxlist(a): """formats a tuple""" - s='' + s = '' for e in a: - s+=str(e)+' ' + s += str(e)+' ' return s + def _pointlist(a): """formats a list of numbers""" return str(a)[1:-1] + class pathdata: """class used to create a pathdata object which can be used for a path. although most methods are pretty straightforward it might be useful to look at the SVG specification.""" - #I didn't test the methods below. - def __init__(self,x=None,y=None): - self.path=[] + # I didn't test the methods below. + + def __init__(self, x=None, y=None): + self.path = [] if x is not None and y is not None: self.path.append('M '+str(x)+' '+str(y)) + def closepath(self): """ends the path""" self.path.append('z') - def move(self,x,y): + + def move(self, x, y): """move to absolute""" self.path.append('M '+str(x)+' '+str(y)) - def relmove(self,x,y): + + def relmove(self, x, y): """move to relative""" self.path.append('m '+str(x)+' '+str(y)) - def line(self,x,y): + + def line(self, x, y): """line to absolute""" self.path.append('L '+str(x)+' '+str(y)) - def relline(self,x,y): + + def relline(self, x, y): """line to relative""" self.path.append('l '+str(x)+' '+str(y)) - def hline(self,x): + + def hline(self, x): """horizontal line to absolute""" self.path.append('H'+str(x)) - def relhline(self,x): + + def relhline(self, x): """horizontal line to relative""" self.path.append('h'+str(x)) - def vline(self,y): + + def vline(self, y): """verical line to absolute""" self.path.append('V'+str(y)) - def relvline(self,y): + + def relvline(self, y): """vertical line to relative""" self.path.append('v'+str(y)) - def bezier(self,x1,y1,x2,y2,x,y): + + def bezier(self, x1, y1, x2, y2, x, y): """bezier with xy1 and xy2 to xy absolut""" - self.path.append('C'+str(x1)+','+str(y1)+' '+str(x2)+','+str(y2)+' '+str(x)+','+str(y)) - def relbezier(self,x1,y1,x2,y2,x,y): + self.path.append('C'+str(x1)+','+str(y1)+' '+str(x2) + + ','+str(y2)+' '+str(x)+','+str(y)) + + def relbezier(self, x1, y1, x2, y2, x, y): """bezier with xy1 and xy2 to xy relative""" - self.path.append('c'+str(x1)+','+str(y1)+' '+str(x2)+','+str(y2)+' '+str(x)+','+str(y)) - def smbezier(self,x2,y2,x,y): + self.path.append('c'+str(x1)+','+str(y1)+' '+str(x2) + + ','+str(y2)+' '+str(x)+','+str(y)) + + def smbezier(self, x2, y2, x, y): """smooth bezier with xy2 to xy absolut""" self.path.append('S'+str(x2)+','+str(y2)+' '+str(x)+','+str(y)) - def relsmbezier(self,x2,y2,x,y): + + def relsmbezier(self, x2, y2, x, y): """smooth bezier with xy2 to xy relative""" self.path.append('s'+str(x2)+','+str(y2)+' '+str(x)+','+str(y)) - def qbezier(self,x1,y1,x,y): + + def qbezier(self, x1, y1, x, y): """quadratic bezier with xy1 to xy absolut""" self.path.append('Q'+str(x1)+','+str(y1)+' '+str(x)+','+str(y)) - def relqbezier(self,x1,y1,x,y): + + def relqbezier(self, x1, y1, x, y): """quadratic bezier with xy1 to xy relative""" self.path.append('q'+str(x1)+','+str(y1)+' '+str(x)+','+str(y)) - def smqbezier(self,x,y): + + def smqbezier(self, x, y): """smooth quadratic bezier to xy absolut""" self.path.append('T'+str(x)+','+str(y)) - def relsmqbezier(self,x,y): + + def relsmqbezier(self, x, y): """smooth quadratic bezier to xy relative""" self.path.append('t'+str(x)+','+str(y)) - def ellarc(self,rx,ry,xrot,laf,sf,x,y): + + def ellarc(self, rx, ry, xrot, laf, sf, x, y): """elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy absolut""" - self.path.append('A'+str(rx)+','+str(ry)+' '+str(xrot)+' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y)) - def relellarc(self,rx,ry,xrot,laf,sf,x,y): + self.path.append('A'+str(rx)+','+str(ry)+' '+str(xrot) + + ' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y)) + + def relellarc(self, rx, ry, xrot, laf, sf, x, y): """elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy relative""" - self.path.append('a'+str(rx)+','+str(ry)+' '+str(xrot)+' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y)) + self.path.append('a'+str(rx)+','+str(ry)+' '+str(xrot) + + ' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y)) + def __repr__(self): return ' '.join(self.path) - - class SVGelement: """SVGelement(type,attributes,elements,text,namespace,**args) Creates a arbitrary svg element and is intended to be subclassed not used on its own. @@ -256,52 +285,56 @@ class SVGelement: namespace. Note the elements==None, if elements = None:self.elements=[] construction. This is done because if you default to elements=[] every object has a reference to the same empty list.""" - def __init__(self,type='',attributes=None,elements=None,text='',namespace='',cdata=None, **args): - self.type=type - if attributes==None: - self.attributes={} + + def __init__(self, type='', attributes=None, elements=None, text='', namespace='', cdata=None, **args): + self.type = type + if attributes == None: + self.attributes = {} else: - self.attributes=attributes - if elements==None: - self.elements=[] + self.attributes = attributes + if elements == None: + self.elements = [] else: - self.elements=elements - self.text=text - self.namespace=namespace - self.cdata=cdata + self.elements = elements + self.text = text + self.namespace = namespace + self.cdata = cdata for arg in args.keys(): arg2 = arg.replace("__", ":") arg2 = arg2.replace("_", "-") - self.attributes[arg2]=args[arg] - def addElement(self,SVGelement): + self.attributes[arg2] = args[arg] + + def addElement(self, SVGelement): """adds an element to a SVGelement SVGelement.addElement(SVGelement) """ self.elements.append(SVGelement) - def toXml(self,level,f): + def toXml(self, level, f): f.write('\t'*level) f.write('<'+self.type) for attkey in self.attributes.keys(): - f.write(' '+_escape(str(attkey))+'='+_quoteattr(str(self.attributes[attkey]))) + f.write(' '+_escape(str(attkey))+'=' + + _quoteattr(str(self.attributes[attkey]))) if self.namespace: - f.write(' xmlns="'+ _escape(str(self.namespace))+'" xmlns:xlink="http://www.w3.org/1999/xlink"') + f.write(' xmlns="' + _escape(str(self.namespace)) + + '" xmlns:xlink="http://www.w3.org/1999/xlink"') if self.elements or self.text or self.cdata: f.write('>') if self.elements: f.write('\n') for element in self.elements: - element.toXml(level+1,f) + element.toXml(level+1, f) if self.cdata: f.write('\n'+'\t'*(level+1)+'\n') if self.text: - if type(self.text)==type(''): #If the text is only text + if type(self.text) == type(''): # If the text is only text f.write(_escape(str(self.text))) - else: #If the text is a spannedtext class + else: # If the text is a spannedtext class f.write(str(self.text)) if self.elements: f.write('\t'*level+'\n') @@ -312,6 +345,7 @@ class SVGelement: else: f.write('/>\n') + class tspan(SVGelement): """ts=tspan(text='',**args) @@ -323,19 +357,22 @@ class tspan(SVGelement): st.addtspan(ts) t=text(3,5,st) """ - def __init__(self,text=None,**args): - SVGelement.__init__(self,'tspan',**args) - if self.text<>None: - self.text=text + + def __init__(self, text=None, **args): + SVGelement.__init__(self, 'tspan', **args) + if self.text <> None: + self.text = text + def __repr__(self): - s=" --- scripts/maintenance/QTL_Reaper_v6.py | 2 +- scripts/maintenance/readProbeSetMean_v7.py | 16 ++++++++-------- scripts/maintenance/readProbeSetSE_v7.py | 16 ++++++++-------- test/requests/link_checker.py | 3 +-- wqflask/utility/webqtlUtil.py | 2 +- wqflask/wqflask/api/router.py | 2 +- wqflask/wqflask/correlation_matrix/show_corr_matrix.py | 4 ++-- wqflask/wqflask/export_traits.py | 2 +- wqflask/wqflask/interval_analyst/GeneUtil.py | 2 +- .../wqflask/marker_regression/display_mapping_results.py | 10 +++++----- wqflask/wqflask/marker_regression/plink_mapping.py | 4 ++-- wqflask/wqflask/pbkdf2.py | 2 +- wqflask/wqflask/snp_browser/snp_browser.py | 6 +++--- 13 files changed, 35 insertions(+), 36 deletions(-) (limited to 'wqflask/utility') diff --git a/scripts/maintenance/QTL_Reaper_v6.py b/scripts/maintenance/QTL_Reaper_v6.py index 7fb56eca..2fbeb53b 100755 --- a/scripts/maintenance/QTL_Reaper_v6.py +++ b/scripts/maintenance/QTL_Reaper_v6.py @@ -23,7 +23,7 @@ for item in results: ProbeSetFreezeIds=sys.argv[1:] if ProbeSetFreezeIds: #####convert the Ids to integer - ProbeSetFreezeIds=map(int, ProbeSetFreezeIds) + ProbeSetFreezeIds=list(map(int, ProbeSetFreezeIds)) else: #####get all of the dataset that need be updated diff --git a/scripts/maintenance/readProbeSetMean_v7.py b/scripts/maintenance/readProbeSetMean_v7.py index fea26731..97767715 100755 --- a/scripts/maintenance/readProbeSetMean_v7.py +++ b/scripts/maintenance/readProbeSetMean_v7.py @@ -61,14 +61,14 @@ GeneList = [] isCont = 1 header = fp.readline() header = string.split(string.strip(header),'\t') -header = map(string.strip, header) +header = list(map(string.strip, header)) nfield = len(header) line = fp.readline() kj=0 while line: line2 = string.split(string.strip(line),'\t') - line2 = map(string.strip, line2) + line2 = list(map(string.strip, line2)) if len(line2) != nfield: print(("Error : " + line)) isCont = 0 @@ -80,7 +80,7 @@ while line: if kj%100000 == 0: print(('checked ',kj,' lines')) -GeneList = map(string.lower, GeneList) +GeneList = list(map(string.lower, GeneList)) GeneList.sort() if isCont==0: @@ -100,8 +100,8 @@ isCont = 1 fp.seek(0) header = fp.readline() header = string.split(string.strip(header),'\t') -header = map(string.strip, header) -header = map(translateAlias, header) +header = list(map(string.strip, header)) +header = list(map(translateAlias, header)) header = header[dataStart:] Ids = [] for item in header: @@ -128,7 +128,7 @@ print('Check if each ProbeSet exist in database') line = fp.readline() line = fp.readline() line2 = string.split(string.strip(line),'\t') -line2 = map(string.strip, line2) +line2 = list(map(string.strip, line2)) PId = line2[0] db.execute('select Id from ProbeSet where Name="%s" and ChipId=%d' % (PId, GeneChipId) ) @@ -148,7 +148,7 @@ for item in results: print(Names) -Names = map(string.lower, Names) +Names = list(map(string.lower, Names)) Names.sort() # -- Fixed the lower case problem of ProbeSets affx-mur_b2_at doesn't exist --# @@ -223,7 +223,7 @@ values1 = [] values2 = [] while line: line2 = string.split(string.strip(line),'\t') - line2 = map(string.strip, line2) + line2 = list(map(string.strip, line2)) PId = line2[0] recordId = NameIds[PId] diff --git a/scripts/maintenance/readProbeSetSE_v7.py b/scripts/maintenance/readProbeSetSE_v7.py index 79ed455f..7b2fee87 100755 --- a/scripts/maintenance/readProbeSetSE_v7.py +++ b/scripts/maintenance/readProbeSetSE_v7.py @@ -72,14 +72,14 @@ GeneList = [] isCont = 1 header = fp.readline() header = string.split(string.strip(header), '\t') -header = map(string.strip, header) +header = list(map(string.strip, header)) nfield = len(header) line = fp.readline() kj = 0 while line: line2 = string.split(string.strip(line), '\t') - line2 = map(string.strip, line2) + line2 = list(map(string.strip, line2)) if len(line2) != nfield: isCont = 0 print(("Error : " + line)) @@ -91,7 +91,7 @@ while line: if kj % 100000 == 0: print(('checked ', kj, ' lines')) -GeneList = map(string.lower, GeneList) +GeneList = list(map(string.lower, GeneList)) GeneList.sort() if isCont == 0: @@ -111,8 +111,8 @@ isCont = 1 fp.seek(0) header = fp.readline() header = string.split(string.strip(header), '\t') -header = map(string.strip, header) -header = map(translateAlias, header) +header = list(map(string.strip, header)) +header = list(map(translateAlias, header)) header = header[dataStart:] Ids = [] for item in header: @@ -139,7 +139,7 @@ print('Check if each ProbeSet exist in database') line = fp.readline() line = fp.readline() line2 = string.split(string.strip(line), '\t') -line2 = map(string.strip, line2) +line2 = list(map(string.strip, line2)) PId = line2[0] db.execute('select Id from ProbeSet where Name="%s" and ChipId=%d' % @@ -158,7 +158,7 @@ results = db.fetchall() Names = [] for item in results: Names.append(item[0]) - Names = map(string.lower, Names) + Names = list(map(string.lower, Names)) Names.sort() # -- Fixed the lower case problem of ProbeSets affx-mur_b2_at doesn't exist --# ##---- compare genelist with names ----## @@ -220,7 +220,7 @@ line = fp.readline() kj = 0 while line: line2 = string.split(string.strip(line), '\t') - line2 = map(string.strip, line2) + line2 = list(map(string.strip, line2)) CellId = line2[0] if not ProbeNameId.has_key(CellId): diff --git a/test/requests/link_checker.py b/test/requests/link_checker.py index 715f330c..df4d32d8 100644 --- a/test/requests/link_checker.py +++ b/test/requests/link_checker.py @@ -27,8 +27,7 @@ def get_links(doc): lambda x: not ( is_root_link(x) or is_mailto_link(x)) - , map(lambda y: y.get("href") - , doc.cssselect("a"))) + , [y.get("href") for y in doc.cssselect("a")]) def verify_link(link): if link[0] == "#": diff --git a/wqflask/utility/webqtlUtil.py b/wqflask/utility/webqtlUtil.py index 53661ae4..79991149 100644 --- a/wqflask/utility/webqtlUtil.py +++ b/wqflask/utility/webqtlUtil.py @@ -107,7 +107,7 @@ def hasAccessToConfidentialPhenotypeTrait(privilege, userName, authorized_users) if webqtlConfig.USERDICT[privilege] > webqtlConfig.USERDICT['user']: access_to_confidential_phenotype_trait = 1 else: - AuthorizedUsersList=map(string.strip, string.split(authorized_users, ',')) + AuthorizedUsersList=list(map(string.strip, string.split(authorized_users, ','))) if AuthorizedUsersList.__contains__(userName): access_to_confidential_phenotype_trait = 1 return access_to_confidential_phenotype_trait \ No newline at end of file diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py index 6324cabe..3fa1d5ba 100644 --- a/wqflask/wqflask/api/router.py +++ b/wqflask/wqflask/api/router.py @@ -517,7 +517,7 @@ def all_sample_data(dataset_name, file_format = "csv"): line_list.append("x") results_list.append(line_list) - results_list = map(list, zip(*results_list)) + results_list = list(map(list, zip(*results_list))) si = StringIO.StringIO() csv_writer = csv.writer(si) diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index 0ac94139..a912344f 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -278,7 +278,7 @@ def zScore(trait_data_array): stdev = math.sqrt(var/(N-1)) if stdev == 0: stdev = 1e-100 - data2 = map(lambda x:(x-mean)/stdev,data) + data2 = [(x-mean)/stdev for x in data] trait_data_array[i] = data2 i += 1 return trait_data_array @@ -299,7 +299,7 @@ def sortEigenVectors(vector): A.append(item[0]) B.append(item[1]) sum = reduce(lambda x,y: x+y, A, 0.0) - A = map(lambda x:x*100.0/sum, A) + A = [x*100.0/sum for x in A] return [A, B] except: return [] \ No newline at end of file diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py index 3272c03d..6646cc36 100644 --- a/wqflask/wqflask/export_traits.py +++ b/wqflask/wqflask/export_traits.py @@ -122,7 +122,7 @@ def export_search_results_csv(targs): csv_rows.append(row_contents) - csv_rows = map(list, itertools.izip_longest(*[row for row in csv_rows])) + csv_rows = list(map(list, itertools.izip_longest(*[row for row in csv_rows]))) writer.writerows(csv_rows) csv_data = buff.getvalue() buff.close() diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index 2c60dd70..273168a8 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -24,7 +24,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): ##List current Species and other Species speciesId = speciesDict[species] - otherSpecies = map(lambda X: [X, speciesDict[X]], speciesDict.keys()) + otherSpecies = [[X, speciesDict[X]] for X in speciesDict.keys()] otherSpecies.remove([species, speciesId]) results = g.db.execute(""" diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index bda899fb..7b6e70d2 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -389,9 +389,9 @@ class DisplayMappingResults(object): Chr_Length.Name in (%s) Order by Chr_Length.OrderId - """ % (self.dataset.group.name, string.join(map(lambda X: "'%s'" % X[0], self.ChrList[1:]), ", "))) + """ % (self.dataset.group.name, string.join(["'%s'" % X[0] for X in self.ChrList[1:]], ", "))) - self.ChrLengthMbList = map(lambda x: x[0]/1000000.0, self.ChrLengthMbList) + self.ChrLengthMbList = [x[0]/1000000.0 for x in self.ChrLengthMbList] self.ChrLengthMbSum = reduce(lambda x, y:x+y, self.ChrLengthMbList, 0.0) if self.ChrLengthMbList: self.MbGraphInterval = self.ChrLengthMbSum/(len(self.ChrLengthMbList)*12) #Empirical Mb interval @@ -1147,8 +1147,8 @@ class DisplayMappingResults(object): tenPercentLength = geneLength*0.0001 SNPdensity = theGO["snpCount"]/geneLength - exonStarts = map(float, theGO['exonStarts'].split(",")[:-1]) - exonEnds = map(float, theGO['exonEnds'].split(",")[:-1]) + exonStarts = list(map(float, theGO['exonStarts'].split(",")[:-1])) + exonEnds = list(map(float, theGO['exonEnds'].split(",")[:-1])) cdsStart = theGO['cdsStart'] cdsEnd = theGO['cdsEnd'] accession = theGO['NM_ID'] @@ -2145,7 +2145,7 @@ class DisplayMappingResults(object): lrsEdgeWidth = 1 else: if self.additiveChecked: - additiveMax = max(map(lambda X : abs(X['additive']), self.qtlresults)) + additiveMax = max([abs(X['additive']) for X in self.qtlresults]) lrsEdgeWidth = 3 if zoom == 2: diff --git a/wqflask/wqflask/marker_regression/plink_mapping.py b/wqflask/wqflask/marker_regression/plink_mapping.py index 2f327faf..9571015e 100644 --- a/wqflask/wqflask/marker_regression/plink_mapping.py +++ b/wqflask/wqflask/marker_regression/plink_mapping.py @@ -84,7 +84,7 @@ def get_samples_from_ped_file(dataset): while line: lineList = string.split(string.strip(line), '\t') - lineList = map(string.strip, lineList) + lineList = list(map(string.strip, lineList)) sample_name = lineList[0] sample_list.append(sample_name) @@ -157,6 +157,6 @@ def parse_plink_output(output_filename, species): def build_line_list(line=None): line_list = string.split(string.strip(line),' ')# irregular number of whitespaces between columns line_list = [item for item in line_list if item <>''] - line_list = map(string.strip, line_list) + line_list = list(map(string.strip, line_list)) return line_list \ No newline at end of file diff --git a/wqflask/wqflask/pbkdf2.py b/wqflask/wqflask/pbkdf2.py index 811c83b0..731c8843 100644 --- a/wqflask/wqflask/pbkdf2.py +++ b/wqflask/wqflask/pbkdf2.py @@ -66,7 +66,7 @@ def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None): def _pseudorandom(x, mac=mac): h = mac.copy() h.update(x) - return map(ord, h.digest()) + return list(map(ord, h.digest())) buf = [] for block in xrange(1, -(-keylen // mac.digest_size) + 1): rv = u = _pseudorandom(salt + _pack_int(block)) diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index 1d28d76a..b18bfc62 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -459,7 +459,7 @@ class SnpBrowser(object): function_list = [] if function_details: function_list = string.split(string.strip(function_details), ",") - function_list = map(string.strip, function_list) + function_list = list(map(string.strip, function_list)) function_list[0] = function_list[0].title() function_details = ", ".join(item for item in function_list) function_details = function_details.replace("_", " ") @@ -725,11 +725,11 @@ def get_effect_details_by_category(effect_name = None, effect_value = None): codon_effect_group_list = ['Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] effect_detail_list = string.split(string.strip(effect_value), '|') - effect_detail_list = map(string.strip, effect_detail_list) + effect_detail_list = list(map(string.strip, effect_detail_list)) for index, item in enumerate(effect_detail_list): item_list = string.split(string.strip(item), ',') - item_list = map(string.strip, item_list) + item_list = list(map(string.strip, item_list)) gene_id = item_list[0] gene_name = item_list[1] -- cgit v1.2.3 From bafbb5b7a4b7db2ca230f292eb45be7e67985259 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 19 Aug 2020 02:05:05 +0300 Subject: Remove erroneous `if .. else` branch * wqflask/utility/svg.py [roct, ellipse, SVGelement]: Raise only a single value error if either height or width is not defined. Fixes parsing error when running `2to3-3.8 -f apply -w .` --- wqflask/utility/svg.py | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index c850feb8..d66c954e 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -445,12 +445,8 @@ class rect(SVGelement): def __init__(self, x=None, y=None, width=None, height=None, fill=None, stroke=None, stroke_width=None, **args): if width == None or height == None: - raise ValueError, 'height is required' - raise ValueError, 'width is required' - if width!=None: - if height!=None: - else: - raise ValueError, 'both height and width are required' + raise ValueError, 'both height and width are required' + SVGelement.__init__(self,'rect',{'width':width,'height':height},**args) if x!=None: self.attributes['x']=x @@ -470,12 +466,8 @@ class ellipse(SVGelement): """ def __init__(self,cx=None,cy=None,rx=None,ry=None,fill=None,stroke=None,stroke_width=None,**args): if rx==None or ry== None: - raise ValueError, 'rx is required' - raise ValueError, 'ry is required' - if rx!=None: - if ry!=None: - else: - raise ValueError, 'both rx and ry are required' + raise ValueError, 'both rx and ry are required' + SVGelement.__init__(self,'ellipse',{'rx':rx,'ry':ry},**args) if cx!=None: self.attributes['cx']=cx @@ -722,12 +714,7 @@ class image(SVGelement): """ def __init__(self,url,x=None,y=None,width=None,height=None,**args): if width==None or height==None: - raise ValueError, 'height is required' - raise ValueError, 'width is required' - if width!=None: - if height!=None: - else: - raise ValueError, 'both height and width are required' + raise ValueError, 'both height and width are required' SVGelement.__init__(self,'image',{'xlink:href':url,'width':width,'height':height},**args) if x!=None: self.attributes['x']=x -- cgit v1.2.3 From ba123e1e0fe693f9778993c3f8e5a70a28658a4c Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 19 Aug 2020 02:31:31 +0300 Subject: Fix dictionary iteration methods Run `2to3-3.8 -f dict -w .` See: and --- scripts/maintenance/load_genotypes.py | 4 +-- wqflask/base/GeneralObject.py | 8 ++--- wqflask/base/trait.py | 4 +-- wqflask/maintenance/gen_select_dataset.py | 6 ++-- .../maintenance/generate_probesetfreeze_file.py | 2 +- wqflask/utility/__init__.py | 4 +-- wqflask/utility/benchmark.py | 4 +-- wqflask/utility/gen_geno_ob.py | 2 +- wqflask/utility/helper_functions.py | 2 +- wqflask/utility/svg.py | 14 ++++---- wqflask/utility/temp_data.py | 2 +- wqflask/utility/tools.py | 2 +- wqflask/wqflask/api/correlation.py | 16 ++++----- wqflask/wqflask/api/gen_menu.py | 6 ++-- wqflask/wqflask/correlation/corr_scatter_plot.py | 6 ++-- wqflask/wqflask/correlation/show_corr_results.py | 30 ++++++++--------- wqflask/wqflask/ctl/ctl_analysis.py | 2 +- wqflask/wqflask/export_traits.py | 4 +-- wqflask/wqflask/heatmap/heatmap.py | 4 +-- wqflask/wqflask/interval_analyst/GeneUtil.py | 2 +- .../marker_regression/display_mapping_results.py | 38 +++++++++++----------- wqflask/wqflask/marker_regression/run_mapping.py | 24 +++++++------- wqflask/wqflask/resource_manager.py | 2 +- wqflask/wqflask/show_trait/export_trait_data.py | 2 +- wqflask/wqflask/show_trait/show_trait.py | 8 ++--- wqflask/wqflask/views.py | 6 ++-- 26 files changed, 102 insertions(+), 102 deletions(-) (limited to 'wqflask/utility') diff --git a/scripts/maintenance/load_genotypes.py b/scripts/maintenance/load_genotypes.py index c235a31f..51278d48 100755 --- a/scripts/maintenance/load_genotypes.py +++ b/scripts/maintenance/load_genotypes.py @@ -19,7 +19,7 @@ def fetch_parameters(config): config_dic['dataid'] = datastructure.get_nextdataid_genotype() config_dic['genofile'] = config.get('config', 'genofile') print("config dictionary:") - for k, v in config_dic.items(): + for k, v in list(config_dic.items()): print(("\t%s: %s" % (k, v))) return config_dic @@ -42,7 +42,7 @@ def parse_genofile(config, config_dic): if line.lower().startswith("chr"): # print("geno file meta dictionary:") - for k, v in meta_dic.items(): + for k, v in list(meta_dic.items()): print(("\t%s: %s" % (k, v))) # print(("geno file head:\n\t%s" % line)) diff --git a/wqflask/base/GeneralObject.py b/wqflask/base/GeneralObject.py index 0fccaab3..707569db 100644 --- a/wqflask/base/GeneralObject.py +++ b/wqflask/base/GeneralObject.py @@ -33,7 +33,7 @@ class GeneralObject: def __init__(self, *args, **kw): self.contents = list(args) - for name, value in kw.items(): + for name, value in list(kw.items()): setattr(self, name, value) def __setitem__(self, key, value): @@ -50,16 +50,16 @@ class GeneralObject: def __str__(self): s = '' - for key in self.__dict__.keys(): + for key in list(self.__dict__.keys()): if key != 'contents': s += '%s = %s\n' % (key, self.__dict__[key]) return s def __repr__(self): s = '' - for key in self.__dict__.keys(): + for key in list(self.__dict__.keys()): s += '%s = %s\n' % (key, self.__dict__[key]) return s def __cmp__(self, other): - return len(self.__dict__.keys()).__cmp__(len(other.__dict__.keys())) + return len(list(self.__dict__.keys())).__cmp__(len(list(other.__dict__.keys()))) diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 7666348e..e82df226 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -118,7 +118,7 @@ class GeneralTrait(object): vals = [] the_vars = [] sample_aliases = [] - for sample_name, sample_data in self.data.items(): + for sample_name, sample_data in list(self.data.items()): if sample_data.value != None: if not include_variance or sample_data.variance != None: samples.append(sample_name) @@ -260,7 +260,7 @@ def get_sample_data(): trait_dict['pubmed_link'] = trait_ob.pubmed_link trait_dict['pubmed_text'] = trait_ob.pubmed_text - return json.dumps([trait_dict, {key: value.value for key, value in trait_ob.data.iteritems() }]) + return json.dumps([trait_dict, {key: value.value for key, value in list(trait_ob.data.items()) }]) else: return None diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py index 647e58a2..78217587 100644 --- a/wqflask/maintenance/gen_select_dataset.py +++ b/wqflask/maintenance/gen_select_dataset.py @@ -108,7 +108,7 @@ def get_types(groups): """Build types list""" types = {} #print("Groups: ", pf(groups)) - for species, group_dict in groups.iteritems(): + for species, group_dict in list(groups.items()): types[species] = {} for group_name, _group_full_name in group_dict: # make group an alias to shorten the code @@ -195,9 +195,9 @@ def build_types(species, group): def get_datasets(types): """Build datasets list""" datasets = {} - for species, group_dict in types.iteritems(): + for species, group_dict in list(types.items()): datasets[species] = {} - for group, type_list in group_dict.iteritems(): + for group, type_list in list(group_dict.items()): datasets[species][group] = {} for type_name in type_list: these_datasets = build_datasets(species, group, type_name[0]) diff --git a/wqflask/maintenance/generate_probesetfreeze_file.py b/wqflask/maintenance/generate_probesetfreeze_file.py index b7b2dc8e..4231cc7c 100644 --- a/wqflask/maintenance/generate_probesetfreeze_file.py +++ b/wqflask/maintenance/generate_probesetfreeze_file.py @@ -82,7 +82,7 @@ def get_probeset_vals(cursor, dataset_name): def trim_strains(strains, probeset_vals): trimmed_strains = [] #print("probeset_vals is:", pf(probeset_vals)) - first_probeset = list(probeset_vals.itervalues())[0] + first_probeset = list(probeset_vals.values())[0] print("\n**** first_probeset is:", pf(first_probeset)) for strain in strains: print("\n**** strain is:", pf(strain)) diff --git a/wqflask/utility/__init__.py b/wqflask/utility/__init__.py index d9856eed..204ff59a 100644 --- a/wqflask/utility/__init__.py +++ b/wqflask/utility/__init__.py @@ -19,7 +19,7 @@ class Struct(object): ''' def __init__(self, obj): - for k, v in obj.iteritems(): + for k, v in list(obj.items()): if isinstance(v, dict): setattr(self, k, Struct(v)) else: @@ -30,6 +30,6 @@ class Struct(object): def __repr__(self): return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for - (k, v) in self.__dict__.iteritems())) + (k, v) in list(self.__dict__.items()))) diff --git a/wqflask/utility/benchmark.py b/wqflask/utility/benchmark.py index 8f1c916b..221e5151 100644 --- a/wqflask/utility/benchmark.py +++ b/wqflask/utility/benchmark.py @@ -38,9 +38,9 @@ class Bench(object): @classmethod def report(cls): - total_time = sum((time_taken for time_taken in cls.entries.itervalues())) + total_time = sum((time_taken for time_taken in list(cls.entries.values()))) print("\nTiming report\n") - for name, time_taken in cls.entries.iteritems(): + for name, time_taken in list(cls.entries.items()): percent = int(round((time_taken/total_time) * 100)) print("[{}%] {}: {}".format(percent, name, time_taken)) print() diff --git a/wqflask/utility/gen_geno_ob.py b/wqflask/utility/gen_geno_ob.py index 23b0b650..ae42f834 100644 --- a/wqflask/utility/gen_geno_ob.py +++ b/wqflask/utility/gen_geno_ob.py @@ -175,7 +175,7 @@ class Locus(object): start_pos = 3 for allele in marker_row[start_pos:]: - if allele in geno_table.keys(): + if allele in list(geno_table.keys()): self.genotype.append(geno_table[allele]) else: #ZS: Some genotype appears that isn't specified in the metadata, make it unknown self.genotype.append("U") \ No newline at end of file diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 9ce809b6..9a4a235a 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -13,7 +13,7 @@ logger = logging.getLogger(__name__ ) def get_species_dataset_trait(self, start_vars): #assert type(read_genotype) == type(bool()), "Expecting boolean value for read_genotype" - if "temp_trait" in start_vars.keys(): + if "temp_trait" in list(start_vars.keys()): if start_vars['temp_trait'] == "True": self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group']) else: diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index d66c954e..c6a5c260 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -133,7 +133,7 @@ def _escape(data, entities={}): # data = data.replace("&", "&") data = data.replace("<", "<") data = data.replace(">", ">") - for chars, entity in entities.items(): + for chars, entity in list(entities.items()): data = data.replace(chars, entity) return data @@ -299,7 +299,7 @@ class SVGelement: self.text = text self.namespace = namespace self.cdata = cdata - for arg in args.keys(): + for arg in list(args.keys()): arg2 = arg.replace("__", ":") arg2 = arg2.replace("_", "-") self.attributes[arg2] = args[arg] @@ -314,7 +314,7 @@ class SVGelement: def toXml(self, level, f): f.write('\t'*level) f.write('<'+self.type) - for attkey in self.attributes.keys(): + for attkey in list(self.attributes.keys()): f.write(' '+_escape(str(attkey))+'=' + _quoteattr(str(self.attributes[attkey]))) if self.namespace: @@ -365,7 +365,7 @@ class tspan(SVGelement): def __repr__(self): s = "\n" % (item, self.entity[item])) xml.write("]") xml.write(">\n") @@ -1015,7 +1015,7 @@ class drawing: if element.text: textnode=root.createTextNode(element.text) e.appendChild(textnode) - for attribute in element.attributes.keys(): #in element.attributes is supported from python 2.2 + for attribute in list(element.attributes.keys()): #in element.attributes is supported from python 2.2 e.setAttribute(attribute,str(element.attributes[attribute])) if element.elements: for el in element.elements: diff --git a/wqflask/utility/temp_data.py b/wqflask/utility/temp_data.py index 5bf700c9..2f2726c6 100644 --- a/wqflask/utility/temp_data.py +++ b/wqflask/utility/temp_data.py @@ -20,6 +20,6 @@ class TempData(object): if __name__ == "__main__": redis = Redis() - for key in redis.keys(): + for key in list(redis.keys()): for field in redis.hkeys(key): print("{}.{}={}".format(key, field, redis.hget(key, field))) diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index f790d424..51a87fe1 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -220,7 +220,7 @@ def show_settings(): logger.info(OVERRIDES) logger.info(BLUE+"Mr. Mojo Risin 2"+ENDC) - keylist = app.config.keys() + keylist = list(app.config.keys()) print("runserver.py: ****** Webserver configuration - k,v pairs from app.config ******") keylist.sort() for k in keylist: diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py index 7f5312c1..eb05645e 100644 --- a/wqflask/wqflask/api/correlation.py +++ b/wqflask/wqflask/api/correlation.py @@ -36,7 +36,7 @@ def do_correlation(start_vars): #corr_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0]))) final_results = [] - for _trait_counter, trait in enumerate(corr_results.keys()[:corr_params['return_count']]): + for _trait_counter, trait in enumerate(list(corr_results.keys())[:corr_params['return_count']]): if corr_params['type'] == "tissue": [sample_r, num_overlap, sample_p, symbol] = corr_results[trait] result_dict = { @@ -76,20 +76,20 @@ def calculate_results(this_trait, this_dataset, target_dataset, corr_params): if corr_params['type'] == "tissue": trait_symbol_dict = this_dataset.retrieve_genes("Symbol") corr_results = do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params) - sorted_results = collections.OrderedDict(sorted(corr_results.items(), + sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), key=lambda t: -abs(t[1][1]))) elif corr_params['type'] == "literature" or corr_params['type'] == "lit": #ZS: Just so a user can use either "lit" or "literature" trait_geneid_dict = this_dataset.retrieve_genes("GeneId") corr_results = do_literature_correlation_for_all_traits(this_trait, this_dataset, trait_geneid_dict, corr_params) - sorted_results = collections.OrderedDict(sorted(corr_results.items(), + sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), key=lambda t: -abs(t[1][1]))) else: - for target_trait, target_vals in target_dataset.trait_data.iteritems(): + for target_trait, target_vals in list(target_dataset.trait_data.items()): result = get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, corr_params['type']) if result is not None: corr_results[target_trait] = result - sorted_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0]))) + sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), key=lambda t: -abs(t[1][0]))) return sorted_results @@ -100,10 +100,10 @@ def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_par if this_trait.symbol.lower() in primary_trait_tissue_vals_dict: primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower()] - corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=trait_symbol_dict.values()) + corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=list(trait_symbol_dict.values())) tissue_corr_data = {} - for trait, symbol in trait_symbol_dict.iteritems(): + for trait, symbol in list(trait_symbol_dict.items()): if symbol and symbol.lower() in corr_result_tissue_vals_dict: this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()] @@ -119,7 +119,7 @@ def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_g input_trait_mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), this_trait.geneid) lit_corr_data = {} - for trait, gene_id in trait_geneid_dict.iteritems(): + for trait, gene_id in list(trait_geneid_dict.items()): mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), gene_id) if mouse_gene_id and str(mouse_gene_id).find(";") == -1: diff --git a/wqflask/wqflask/api/gen_menu.py b/wqflask/wqflask/api/gen_menu.py index cc11e14b..71d9ee03 100644 --- a/wqflask/wqflask/api/gen_menu.py +++ b/wqflask/wqflask/api/gen_menu.py @@ -61,7 +61,7 @@ def get_types(groups): """Build types list""" types = {} - for species, group_dict in groups.iteritems(): + for species, group_dict in list(groups.items()): types[species] = {} for group_name, _group_full_name, _family_name in group_dict: if phenotypes_exist(group_name): @@ -136,9 +136,9 @@ def build_types(species, group): def get_datasets(types): """Build datasets list""" datasets = {} - for species, group_dict in types.iteritems(): + for species, group_dict in list(types.items()): datasets[species] = {} - for group, type_list in group_dict.iteritems(): + for group, type_list in list(group_dict.items()): datasets[species][group] = {} for type_name in type_list: these_datasets = build_datasets(species, group, type_name[0]) diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index 819836b1..57a8d85f 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -36,13 +36,13 @@ class CorrScatterPlot(object): samples_1, samples_2, num_overlap = corr_result_helpers.normalize_values_with_samples(self.trait_1.data, self.trait_2.data) self.data = [] - self.indIDs = samples_1.keys() + self.indIDs = list(samples_1.keys()) vals_1 = [] - for sample in samples_1.keys(): + for sample in list(samples_1.keys()): vals_1.append(samples_1[sample].value) self.data.append(vals_1) vals_2 = [] - for sample in samples_2.keys(): + for sample in list(samples_2.keys()): vals_2.append(samples_2[sample].value) self.data.append(vals_2) diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index de7a1c0c..15a21ee6 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -145,10 +145,10 @@ class CorrelationResults(object): if corr_samples_group == 'samples_other': primary_samples = [x for x in primary_samples if x not in ( self.dataset.group.parlist + self.dataset.group.f1list)] - self.process_samples(start_vars, self.this_trait.data.keys(), primary_samples) + self.process_samples(start_vars, list(self.this_trait.data.keys()), primary_samples) self.target_dataset = data_set.create_dataset(start_vars['corr_dataset']) - self.target_dataset.get_trait_data(self.sample_data.keys()) + self.target_dataset.get_trait_data(list(self.sample_data.keys())) self.header_fields = get_header_fields(self.target_dataset.type, self.corr_method) @@ -168,41 +168,41 @@ class CorrelationResults(object): tissue_corr_data = self.do_tissue_correlation_for_all_traits() if tissue_corr_data != None: - for trait in tissue_corr_data.keys()[:self.return_number]: + for trait in list(tissue_corr_data.keys())[:self.return_number]: self.get_sample_r_and_p_values(trait, self.target_dataset.trait_data[trait]) else: - for trait, values in self.target_dataset.trait_data.iteritems(): + for trait, values in list(self.target_dataset.trait_data.items()): self.get_sample_r_and_p_values(trait, values) elif self.corr_type == "lit": self.trait_geneid_dict = self.dataset.retrieve_genes("GeneId") lit_corr_data = self.do_lit_correlation_for_all_traits() - for trait in lit_corr_data.keys()[:self.return_number]: + for trait in list(lit_corr_data.keys())[:self.return_number]: self.get_sample_r_and_p_values(trait, self.target_dataset.trait_data[trait]) elif self.corr_type == "sample": - for trait, values in self.target_dataset.trait_data.iteritems(): + for trait, values in list(self.target_dataset.trait_data.items()): self.get_sample_r_and_p_values(trait, values) - self.correlation_data = collections.OrderedDict(sorted(self.correlation_data.items(), + self.correlation_data = collections.OrderedDict(sorted(list(self.correlation_data.items()), key=lambda t: -abs(t[1][0]))) if self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Geno": #ZS: Convert min/max chromosome to an int for the location range option range_chr_as_int = None - for order_id, chr_info in self.dataset.species.chromosomes.chromosomes.iteritems(): + for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()): if 'loc_chr' in start_vars: if chr_info.name == self.location_chr: range_chr_as_int = order_id - for _trait_counter, trait in enumerate(self.correlation_data.keys()[:self.return_number]): + for _trait_counter, trait in enumerate(list(self.correlation_data.keys())[:self.return_number]): trait_object = create_trait(dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False) if self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Geno": #ZS: Convert trait chromosome to an int for the location range option chr_as_int = 0 - for order_id, chr_info in self.dataset.species.chromosomes.chromosomes.iteritems(): + for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()): if chr_info.name == trait_object.chr: chr_as_int = order_id @@ -297,14 +297,14 @@ class CorrelationResults(object): #print("trait_gene_symbols: ", pf(trait_gene_symbols.values())) corr_result_tissue_vals_dict= correlation_functions.get_trait_symbol_and_tissue_values( - symbol_list=self.trait_symbol_dict.values()) + symbol_list=list(self.trait_symbol_dict.values())) #print("corr_result_tissue_vals: ", pf(corr_result_tissue_vals_dict)) #print("trait_gene_symbols: ", pf(trait_gene_symbols)) tissue_corr_data = {} - for trait, symbol in self.trait_symbol_dict.iteritems(): + for trait, symbol in list(self.trait_symbol_dict.items()): if symbol and symbol.lower() in corr_result_tissue_vals_dict: this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()] @@ -314,7 +314,7 @@ class CorrelationResults(object): tissue_corr_data[trait] = [symbol, result[0], result[2]] - tissue_corr_data = collections.OrderedDict(sorted(tissue_corr_data.items(), + tissue_corr_data = collections.OrderedDict(sorted(list(tissue_corr_data.items()), key=lambda t: -abs(t[1][1]))) return tissue_corr_data @@ -359,7 +359,7 @@ class CorrelationResults(object): input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), self.this_trait.geneid) lit_corr_data = {} - for trait, gene_id in self.trait_geneid_dict.iteritems(): + for trait, gene_id in list(self.trait_geneid_dict.items()): mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), gene_id) if mouse_gene_id and str(mouse_gene_id).find(";") == -1: @@ -387,7 +387,7 @@ class CorrelationResults(object): else: lit_corr_data[trait] = [gene_id, 0] - lit_corr_data = collections.OrderedDict(sorted(lit_corr_data.items(), + lit_corr_data = collections.OrderedDict(sorted(list(lit_corr_data.items()), key=lambda t: -abs(t[1][1]))) return lit_corr_data diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index 35067036..f0be7a98 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -125,7 +125,7 @@ class CTL(object): gt = create_trait(name = ts[0], dataset_name = ts[1]) gt = retrieve_sample_data(gt, dataset, individuals) for ind in individuals: - if ind in gt.data.keys(): + if ind in list(gt.data.keys()): traits.append(gt.data[ind].value) else: traits.append("-999") diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py index 6646cc36..28c6593d 100644 --- a/wqflask/wqflask/export_traits.py +++ b/wqflask/wqflask/export_traits.py @@ -61,7 +61,7 @@ def export_search_results_csv(targs): traits_by_group = sort_traits_by_group(trait_list) file_list = [] - for group in traits_by_group.keys(): + for group in list(traits_by_group.keys()): group_traits = traits_by_group[group] buff = StringIO.StringIO() writer = csv.writer(buff) @@ -135,7 +135,7 @@ def export_search_results_csv(targs): def sort_traits_by_group(trait_list=[]): traits_by_group = {} for trait in trait_list: - if trait.dataset.group.name not in traits_by_group.keys(): + if trait.dataset.group.name not in list(traits_by_group.keys()): traits_by_group[trait.dataset.group.name] = [] traits_by_group[trait.dataset.group.name].append(trait) diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index 5098a184..577426b0 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -60,7 +60,7 @@ class Heatmap(object): chrnames = [] self.species = species.TheSpecies(dataset=self.trait_list[0][1]) - for key in self.species.chromosomes.chromosomes.keys(): + for key in list(self.species.chromosomes.chromosomes.keys()): chrnames.append([self.species.chromosomes.chromosomes[key].name, self.species.chromosomes.chromosomes[key].mb_length]) for trait_db in self.trait_list: @@ -93,7 +93,7 @@ class Heatmap(object): pos = [] markernames = [] - for trait in self.trait_results.keys(): + for trait in list(self.trait_results.keys()): lodnames.append(trait) self.dataset.group.get_markers() diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index 273168a8..a39e5d0f 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -24,7 +24,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): ##List current Species and other Species speciesId = speciesDict[species] - otherSpecies = [[X, speciesDict[X]] for X in speciesDict.keys()] + otherSpecies = [[X, speciesDict[X]] for X in list(speciesDict.keys())] otherSpecies.remove([species, speciesId]) results = g.db.execute(""" diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 7b6e70d2..0328ce85 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -229,7 +229,7 @@ class DisplayMappingResults(object): self.manhattan_plot = start_vars['manhattan_plot'] - if 'permCheck' in start_vars.keys(): + if 'permCheck' in list(start_vars.keys()): self.permChecked = start_vars['permCheck'] else: self.permChecked = False @@ -242,46 +242,46 @@ class DisplayMappingResults(object): else: self.nperm = 0 - if 'bootCheck' in start_vars.keys(): + if 'bootCheck' in list(start_vars.keys()): self.bootChecked = start_vars['bootCheck'] else: self.bootChecked = False - if 'num_bootstrap' in start_vars.keys(): + if 'num_bootstrap' in list(start_vars.keys()): self.nboot = int(start_vars['num_bootstrap']) else: self.nboot = 0 - if 'bootstrap_results' in start_vars.keys(): + if 'bootstrap_results' in list(start_vars.keys()): self.bootResult = start_vars['bootstrap_results'] else: self.bootResult = [] - if 'do_control' in start_vars.keys(): + if 'do_control' in list(start_vars.keys()): self.doControl = start_vars['do_control'] else: self.doControl = "false" - if 'control_marker' in start_vars.keys(): + if 'control_marker' in list(start_vars.keys()): self.controlLocus = start_vars['control_marker'] else: self.controlLocus = "" - if 'covariates' in start_vars.keys(): + if 'covariates' in list(start_vars.keys()): self.covariates = start_vars['covariates'] - if 'maf' in start_vars.keys(): + if 'maf' in list(start_vars.keys()): self.maf = start_vars['maf'] else: self.maf = "" - if 'output_files' in start_vars.keys(): + if 'output_files' in list(start_vars.keys()): self.output_files = start_vars['output_files'] - if 'use_loco' in start_vars.keys() and self.mapping_method == "gemma": + if 'use_loco' in list(start_vars.keys()) and self.mapping_method == "gemma": self.use_loco = start_vars['use_loco'] - if 'reaper_version' in start_vars.keys() and self.mapping_method == "reaper": + if 'reaper_version' in list(start_vars.keys()) and self.mapping_method == "reaper": self.reaper_version = start_vars['reaper_version'] if 'output_files' in start_vars: self.output_files = ",".join(start_vars['output_files']) self.categorical_vars = "" self.perm_strata = "" - if 'perm_strata' in start_vars.keys() and 'categorical_vars' in start_vars.keys(): + if 'perm_strata' in list(start_vars.keys()) and 'categorical_vars' in list(start_vars.keys()): self.categorical_vars = start_vars['categorical_vars'] self.perm_strata = start_vars['perm_strata'] @@ -323,7 +323,7 @@ class DisplayMappingResults(object): self.graphWidth = self.MULT_GRAPH_DEFAULT_WIDTH ## BEGIN HaplotypeAnalyst - if 'haplotypeAnalystCheck' in start_vars.keys(): + if 'haplotypeAnalystCheck' in list(start_vars.keys()): self.haplotypeAnalystChecked = start_vars['haplotypeAnalystCheck'] else: self.haplotypeAnalystChecked = False @@ -331,25 +331,25 @@ class DisplayMappingResults(object): self.graphHeight = self.GRAPH_DEFAULT_HEIGHT self.dominanceChecked = False - if 'LRSCheck' in start_vars.keys(): + if 'LRSCheck' in list(start_vars.keys()): self.LRS_LOD = start_vars['LRSCheck'] else: self.LRS_LOD = start_vars['score_type'] self.intervalAnalystChecked = True self.draw2X = False - if 'additiveCheck' in start_vars.keys(): + if 'additiveCheck' in list(start_vars.keys()): self.additiveChecked = start_vars['additiveCheck'] else: self.additiveChecked = False - if 'viewLegend' in start_vars.keys(): + if 'viewLegend' in list(start_vars.keys()): self.legendChecked = start_vars['viewLegend'] else: self.legendChecked = False - if 'showSNP' in start_vars.keys(): + if 'showSNP' in list(start_vars.keys()): self.SNPChecked = start_vars['showSNP'] else: self.SNPChecked = False - if 'showGenes' in start_vars.keys(): + if 'showGenes' in list(start_vars.keys()): self.geneChecked = start_vars['showGenes'] else: self.geneChecked = False @@ -530,7 +530,7 @@ class DisplayMappingResults(object): showLocusForm = HT.Form(cgi= os.path.join(webqtlConfig.CGIDIR, webqtlConfig.SCRIPTFILE), enctype='multipart/form-data', name=showLocusForm, submit=HT.Input(type='hidden')) hddn = {'FormID':'showDatabase', 'ProbeSetID':'_','database':fd.RISet+"Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'} - for key in hddn.keys(): + for key in list(hddn.keys()): showLocusForm.append(HT.Input(name=key, value=hddn[key], type='hidden')) showLocusForm.append(intImg) else: diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index c9d10f7c..145dbc77 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -347,7 +347,7 @@ class RunMapping(object): if marker['chr1'] > 0 or marker['chr1'] == "X" or marker['chr1'] == "X/Y": if marker['chr1'] > highest_chr or marker['chr1'] == "X" or marker['chr1'] == "X/Y": highest_chr = marker['chr1'] - if 'lod_score' in marker.keys(): + if 'lod_score' in list(marker.keys()): self.qtl_results.append(marker) self.trimmed_markers = results @@ -411,7 +411,7 @@ class RunMapping(object): if marker['chr'] > 0 or marker['chr'] == "X" or marker['chr'] == "X/Y": if marker['chr'] > highest_chr or marker['chr'] == "X" or marker['chr'] == "X/Y": highest_chr = marker['chr'] - if ('lod_score' in marker.keys()) or ('lrs_value' in marker.keys()): + if ('lod_score' in list(marker.keys())) or ('lrs_value' in list(marker.keys())): self.qtl_results.append(marker) with Bench("Exporting Results"): @@ -538,28 +538,28 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, output_file.write("Mb," + score_type) else: output_file.write("Cm," + score_type) - if "additive" in markers[0].keys(): + if "additive" in list(markers[0].keys()): output_file.write(",Additive") - if "dominance" in markers[0].keys(): + if "dominance" in list(markers[0].keys()): output_file.write(",Dominance") output_file.write("\n") for i, marker in enumerate(markers): output_file.write(marker['name'] + "," + str(marker['chr']) + "," + str(marker['Mb']) + ",") - if "lod_score" in marker.keys(): + if "lod_score" in list(marker.keys()): output_file.write(str(marker['lod_score'])) else: output_file.write(str(marker['lrs_value'])) - if "additive" in marker.keys(): + if "additive" in list(marker.keys()): output_file.write("," + str(marker['additive'])) - if "dominance" in marker.keys(): + if "dominance" in list(marker.keys()): output_file.write("," + str(marker['dominance'])) if i < (len(markers) - 1): output_file.write("\n") def trim_markers_for_figure(markers): - if 'p_wald' in markers[0].keys(): + if 'p_wald' in list(markers[0].keys()): score_type = 'p_wald' - elif 'lod_score' in markers[0].keys(): + elif 'lod_score' in list(markers[0].keys()): score_type = 'lod_score' else: score_type = 'lrs_value' @@ -617,7 +617,7 @@ def trim_markers_for_figure(markers): return filtered_markers def trim_markers_for_table(markers): - if 'lod_score' in markers[0].keys(): + if 'lod_score' in list(markers[0].keys()): sorted_markers = sorted(markers, key=lambda k: k['lod_score'], reverse=True) else: sorted_markers = sorted(markers, key=lambda k: k['lrs_value'], reverse=True) @@ -695,10 +695,10 @@ def get_genofile_samplelist(dataset): def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples): perm_strata_strings = [] for sample in used_samples: - if sample in sample_list.sample_attribute_values.keys(): + if sample in list(sample_list.sample_attribute_values.keys()): combined_string = "" for var in categorical_vars: - if var in sample_list.sample_attribute_values[sample].keys(): + if var in list(sample_list.sample_attribute_values[sample].keys()): combined_string += str(sample_list.sample_attribute_values[sample][var]) else: combined_string += "NA" diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index 39a07310..6b3e00fb 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -125,7 +125,7 @@ def add_group_to_resource(): def get_group_names(group_masks): group_masks_with_names = {} - for group_id, group_mask in group_masks.iteritems(): + for group_id, group_mask in list(group_masks.items()): this_mask = group_mask group_name = get_group_info(group_id)['name'] this_mask['name'] = group_name diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py index 253c887b..68c3ad7d 100644 --- a/wqflask/wqflask/show_trait/export_trait_data.py +++ b/wqflask/wqflask/show_trait/export_trait_data.py @@ -47,7 +47,7 @@ def get_export_metadata(trait_id, dataset_name): def dict_to_sorted_list(dictionary): - sorted_list = [item for item in dictionary.iteritems()] + sorted_list = [item for item in list(dictionary.items())] sorted_list = sorted(sorted_list, cmp=cmp_samples) sorted_values = [item[1] for item in sorted_list] return sorted_values diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index f188fd9d..c156e61b 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -261,7 +261,7 @@ class ShowTrait(object): hddn['export_data'] = "" hddn['export_format'] = "excel" if len(self.scales_in_geno) < 2: - hddn['mapping_scale'] = self.scales_in_geno[self.scales_in_geno.keys()[0]][0][0] + hddn['mapping_scale'] = self.scales_in_geno[list(self.scales_in_geno.keys())[0]][0][0] # We'll need access to this_trait and hddn in the Jinja2 Template, so we put it inside self self.hddn = hddn @@ -405,7 +405,7 @@ class ShowTrait(object): if not self.temp_trait: other_sample_names = [] - for sample in self.this_trait.data.keys(): + for sample in list(self.this_trait.data.keys()): if (self.this_trait.data[sample].name2 in primary_sample_names) and (self.this_trait.data[sample].name not in primary_sample_names): primary_sample_names.append(self.this_trait.data[sample].name) primary_sample_names.remove(self.this_trait.data[sample].name2) @@ -558,7 +558,7 @@ def get_table_widths(sample_groups, has_num_cases=False): def has_num_cases(this_trait): has_n = False if this_trait.dataset.type != "ProbeSet" and this_trait.dataset.type != "Geno": - for name, sample in this_trait.data.iteritems(): + for name, sample in list(this_trait.data.items()): if sample.num_cases: has_n = True break @@ -611,7 +611,7 @@ def get_categorical_variables(this_trait, sample_list): if len(sample_list.attributes) > 0: for attribute in sample_list.attributes: attribute_vals = [] - for sample_name in this_trait.data.keys(): + for sample_name in list(this_trait.data.keys()): if sample_list.attributes[attribute].name in this_trait.data[sample_name].extra_attributes: attribute_vals.append(this_trait.data[sample_name].extra_attributes[sample_list.attributes[attribute].name]) else: diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index d67f1a2e..394a9e28 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -533,7 +533,7 @@ def heatmap_page(): result = template_vars.__dict__ - for item in template_vars.__dict__.keys(): + for item in list(template_vars.__dict__.keys()): logger.info(" ---**--- {}: {}".format(type(template_vars.__dict__[item]), item)) pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL) @@ -637,7 +637,7 @@ def loading_page(): if 'wanted_inputs' in initial_start_vars: wanted = initial_start_vars['wanted_inputs'].split(",") start_vars = {} - for key, value in initial_start_vars.iteritems(): + for key, value in list(initial_start_vars.items()): if key in wanted or key.startswith(('value:')): start_vars[key] = value @@ -737,7 +737,7 @@ def mapping_results_page(): 'transform' ) start_vars = {} - for key, value in initial_start_vars.iteritems(): + for key, value in list(initial_start_vars.items()): if key in wanted or key.startswith(('value:')): start_vars[key] = value #logger.debug("Mapping called with start_vars:", start_vars) -- cgit v1.2.3 From caec08fa1e738fa9bc1b0b6bf626d8325f798712 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 19 Aug 2020 03:08:08 +0300 Subject: Convert the old not-equal syntax, <>, to != Run `2to3-3.8 -f ne -w .` See: --- wqflask/utility/svg.py | 2 +- wqflask/wqflask/marker_regression/plink_mapping.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index c6a5c260..c7356e57 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -360,7 +360,7 @@ class tspan(SVGelement): def __init__(self, text=None, **args): SVGelement.__init__(self, 'tspan', **args) - if self.text <> None: + if self.text != None: self.text = text def __repr__(self): diff --git a/wqflask/wqflask/marker_regression/plink_mapping.py b/wqflask/wqflask/marker_regression/plink_mapping.py index 38ef7190..d4ee6fe6 100644 --- a/wqflask/wqflask/marker_regression/plink_mapping.py +++ b/wqflask/wqflask/marker_regression/plink_mapping.py @@ -156,7 +156,7 @@ def parse_plink_output(output_filename, species): ####################################################### def build_line_list(line=None): line_list = string.split(string.strip(line),' ')# irregular number of whitespaces between columns - line_list = [item for item in line_list if item <>''] + line_list = [item for item in line_list if item !=''] line_list = list(map(string.strip, line_list)) return line_list \ No newline at end of file -- cgit v1.2.3 From 7e60647223017220747d248ed1c986cc8374435e Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 19 Aug 2020 03:13:53 +0300 Subject: Wrap `raise` statements in parenthesis Run `2to3-3.8 -f raise -w .` See: --- wqflask/base/trait.py | 2 +- wqflask/utility/svg.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index e82df226..c2d260e3 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -605,6 +605,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): if trait.lrs != "": trait.LRS_score_repr = LRS_score_repr = '%3.1f' % trait.lrs else: - raise KeyError, `trait.name`+' information is not found in the database.' + raise KeyError(`trait.name`+' information is not found in the database.') return trait \ No newline at end of file diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index c7356e57..872f22fe 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -445,7 +445,7 @@ class rect(SVGelement): def __init__(self, x=None, y=None, width=None, height=None, fill=None, stroke=None, stroke_width=None, **args): if width == None or height == None: - raise ValueError, 'both height and width are required' + raise ValueError('both height and width are required') SVGelement.__init__(self,'rect',{'width':width,'height':height},**args) if x!=None: @@ -466,7 +466,7 @@ class ellipse(SVGelement): """ def __init__(self,cx=None,cy=None,rx=None,ry=None,fill=None,stroke=None,stroke_width=None,**args): if rx==None or ry== None: - raise ValueError, 'both rx and ry are required' + raise ValueError('both rx and ry are required') SVGelement.__init__(self,'ellipse',{'rx':rx,'ry':ry},**args) if cx!=None: @@ -488,7 +488,7 @@ class circle(SVGelement): """ def __init__(self,cx=None,cy=None,r=None,fill=None,stroke=None,stroke_width=None,**args): if r==None: - raise ValueError, 'r is required' + raise ValueError('r is required') SVGelement.__init__(self,'circle',{'r':r},**args) if cx!=None: self.attributes['cx']=cx @@ -714,7 +714,7 @@ class image(SVGelement): """ def __init__(self,url,x=None,y=None,width=None,height=None,**args): if width==None or height==None: - raise ValueError, 'both height and width are required' + raise ValueError('both height and width are required') SVGelement.__init__(self,'image',{'xlink:href':url,'width':width,'height':height},**args) if x!=None: self.attributes['x']=x -- cgit v1.2.3 From 9d5dff44fb4d07f926659dde0c6205bf12a1ca5b Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 19 Aug 2020 03:36:24 +0300 Subject: Rename xrange() to range() and wrap existing range() calls with list See: --- wqflask/maintenance/quantile_normalize.py | 2 +- wqflask/utility/Plot.py | 2 +- wqflask/wqflask/correlation/correlation_functions.py | 4 ++-- wqflask/wqflask/correlation_matrix/show_corr_matrix.py | 6 +++--- wqflask/wqflask/marker_regression/qtlreaper_mapping.py | 2 +- wqflask/wqflask/network_graph/network_graph.py | 4 ++-- wqflask/wqflask/pbkdf2.py | 4 ++-- wqflask/wqflask/search_results.py | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py index 34886f44..82b695f4 100644 --- a/wqflask/maintenance/quantile_normalize.py +++ b/wqflask/maintenance/quantile_normalize.py @@ -37,7 +37,7 @@ def create_dataframe(input_file): with open(input_file) as f: ncols = len(f.readline().split("\t")) - input_array = np.loadtxt(open(input_file, "rb"), delimiter="\t", skiprows=1, usecols=range(1, ncols)) + input_array = np.loadtxt(open(input_file, "rb"), delimiter="\t", skiprows=1, usecols=list(range(1, ncols))) return pd.DataFrame(input_array) #This function taken from https://github.com/ShawnLYU/Quantile_Normalize diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 82bf6070..c9053dde 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -86,7 +86,7 @@ def frange(start, end=None, inc=1.0): # Need to adjust the count. AFAICT, it always comes up one short. count += 1 L = [start] * count - for i in xrange(1, count): + for i in range(1, count): L[i] = start + i * inc return L diff --git a/wqflask/wqflask/correlation/correlation_functions.py b/wqflask/wqflask/correlation/correlation_functions.py index 06dec795..abaa212f 100644 --- a/wqflask/wqflask/correlation/correlation_functions.py +++ b/wqflask/wqflask/correlation/correlation_functions.py @@ -50,12 +50,12 @@ from flask import Flask, g def cal_zero_order_corr_for_tiss (primaryValue=[], targetValue=[], method='pearson'): - R_primary = rpy2.robjects.FloatVector(range(len(primaryValue))) + R_primary = rpy2.robjects.FloatVector(list(range(len(primaryValue)))) N = len(primaryValue) for i in range(len(primaryValue)): R_primary[i] = primaryValue[i] - R_target = rpy2.robjects.FloatVector(range(len(targetValue))) + R_target = rpy2.robjects.FloatVector(list(range(len(targetValue)))) for i in range(len(targetValue)): R_target[i]=targetValue[i] diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index e6c817e7..832746bb 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -194,7 +194,7 @@ class CorrelationMatrix(object): if self.do_PCA == True: self.pca_works = "True" self.pca_trait_ids = [] - pca = self.calculate_pca(range(len(self.traits)), corr_eigen_value, corr_eigen_vectors) + pca = self.calculate_pca(list(range(len(self.traits))), corr_eigen_value, corr_eigen_vectors) self.loadings_array = self.process_loadings() else: self.pca_works = "False" @@ -203,8 +203,8 @@ class CorrelationMatrix(object): self.js_data = dict(traits = [trait.name for trait in self.traits], groups = groups, - cols = range(len(self.traits)), - rows = range(len(self.traits)), + cols = list(range(len(self.traits))), + rows = list(range(len(self.traits))), samples = self.all_sample_list, sample_data = self.sample_data,) # corr_results = [result[1] for result in result_row for result_row in self.corr_results]) diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py index 0c560582..189c1985 100644 --- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py +++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py @@ -228,4 +228,4 @@ def natural_sort(marker_list): """ convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', str(marker_list[key]['chr'])) ] - return sorted(range(len(marker_list)), key = alphanum_key) \ No newline at end of file + return sorted(list(range(len(marker_list))), key = alphanum_key) \ No newline at end of file diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py index f61c40b4..ac2ff017 100644 --- a/wqflask/wqflask/network_graph/network_graph.py +++ b/wqflask/wqflask/network_graph/network_graph.py @@ -202,8 +202,8 @@ class NetworkGraph(object): self.js_data = dict(traits = [trait.name for trait in self.traits], groups = groups, - cols = range(len(self.traits)), - rows = range(len(self.traits)), + cols = list(range(len(self.traits))), + rows = list(range(len(self.traits))), samples = self.all_sample_list, sample_data = self.sample_data, elements = self.elements,) diff --git a/wqflask/wqflask/pbkdf2.py b/wqflask/wqflask/pbkdf2.py index 0ed50790..917b9d31 100644 --- a/wqflask/wqflask/pbkdf2.py +++ b/wqflask/wqflask/pbkdf2.py @@ -68,9 +68,9 @@ def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None): h.update(x) return list(map(ord, h.digest())) buf = [] - for block in xrange(1, -(-keylen // mac.digest_size) + 1): + for block in range(1, -(-keylen // mac.digest_size) + 1): rv = u = _pseudorandom(salt + _pack_int(block)) - for i in xrange(iterations - 1): + for i in range(iterations - 1): u = _pseudorandom(''.join(map(chr, u))) rv = list(starmap(xor, zip(rv, u))) buf.extend(rv) diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index de4b01eb..5b3946e3 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -266,7 +266,7 @@ def get_GO_symbols(a_search): def insert_newlines(string, every=64): """ This is because it is seemingly impossible to change the width of the description column, so I'm just manually adding line breaks """ lines = [] - for i in xrange(0, len(string), every): + for i in range(0, len(string), every): lines.append(string[i:i+every]) return '\n'.join(lines) -- cgit v1.2.3 From 3aaa28ea762c496eeb84e09e45194e3fd2a51673 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 19 Aug 2020 03:45:49 +0300 Subject: Make Python more idiomatic Run `2to3-3.8 -f idioms -w .` See: --- scripts/maintenance/readProbeSetMean_v7.py | 6 ++---- scripts/maintenance/readProbeSetSE_v7.py | 6 ++---- wqflask/base/data_set.py | 4 ++-- wqflask/utility/authentication_tools.py | 2 +- wqflask/utility/svg.py | 2 +- wqflask/wqflask/ctl/ctl_analysis.py | 2 +- wqflask/wqflask/show_trait/SampleList.py | 2 +- wqflask/wqflask/show_trait/show_trait.py | 2 +- wqflask/wqflask/user_login.py | 2 +- 9 files changed, 12 insertions(+), 16 deletions(-) (limited to 'wqflask/utility') diff --git a/scripts/maintenance/readProbeSetMean_v7.py b/scripts/maintenance/readProbeSetMean_v7.py index 97767715..864b4e08 100755 --- a/scripts/maintenance/readProbeSetMean_v7.py +++ b/scripts/maintenance/readProbeSetMean_v7.py @@ -80,8 +80,7 @@ while line: if kj%100000 == 0: print(('checked ',kj,' lines')) -GeneList = list(map(string.lower, GeneList)) -GeneList.sort() +GeneList = sorted(map(string.lower, GeneList)) if isCont==0: sys.exit(0) @@ -148,9 +147,8 @@ for item in results: print(Names) -Names = list(map(string.lower, Names)) +Names = sorted(map(string.lower, Names)) -Names.sort() # -- Fixed the lower case problem of ProbeSets affx-mur_b2_at doesn't exist --# ##---- compare genelist with names ----## diff --git a/scripts/maintenance/readProbeSetSE_v7.py b/scripts/maintenance/readProbeSetSE_v7.py index 833c3f5f..20a846a4 100755 --- a/scripts/maintenance/readProbeSetSE_v7.py +++ b/scripts/maintenance/readProbeSetSE_v7.py @@ -91,8 +91,7 @@ while line: if kj % 100000 == 0: print(('checked ', kj, ' lines')) -GeneList = list(map(string.lower, GeneList)) -GeneList.sort() +GeneList = sorted(map(string.lower, GeneList)) if isCont == 0: sys.exit(0) @@ -158,8 +157,7 @@ results = db.fetchall() Names = [] for item in results: Names.append(item[0]) - Names = list(map(string.lower, Names)) - Names.sort() # -- Fixed the lower case problem of ProbeSets affx-mur_b2_at doesn't exist --# + Names = sorted(map(string.lower, Names)) ##---- compare genelist with names ----## x = y = 0 diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index b0119b58..06e1c551 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -254,7 +254,7 @@ class Markers(object): logger.debug("length of self.markers:", len(self.markers)) logger.debug("length of p_values:", len(p_values)) - if type(p_values) is list: + if isinstance(p_values, list): # THIS IS only needed for the case when we are limiting the number of p-values calculated # if len(self.markers) > len(p_values): # self.markers = self.markers[:len(p_values)] @@ -270,7 +270,7 @@ class Markers(object): marker['lod_score'] = -math.log10(marker['p_value']) # Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61 - elif type(p_values) is dict: + elif isinstance(p_values, dict): filtered_markers = [] for marker in self.markers: #logger.debug("marker[name]", marker['name']) diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index ece7022c..bc03eb55 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -17,7 +17,7 @@ logger = logging.getLogger(__name__ ) def check_resource_availability(dataset, trait_id=None): #At least for now assume temporary entered traits are accessible - if type(dataset) == str: + if isinstance(dataset, str): return webqtlConfig.DEFAULT_PRIVILEGES if dataset.type == "Temp": return webqtlConfig.DEFAULT_PRIVILEGES diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index 872f22fe..874ada9d 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -332,7 +332,7 @@ class SVGelement: f.write('\n'+'\t'*(level+2)+line) f.write('\n'+'\t'*(level+1)+']]>\n') if self.text: - if type(self.text) == type(''): # If the text is only text + if isinstance(self.text, type('')): # If the text is only text f.write(_escape(str(self.text))) else: # If the text is a spannedtext class f.write(str(self.text)) diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index f0be7a98..e58a7b87 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -175,7 +175,7 @@ class CTL(object): sys.stdout.flush() # Create the interactive graph for cytoscape visualization (Nodes and Edges) - if not type(significant) == ri.RNULLType: + if not isinstance(significant, ri.RNULLType): for x in range(len(significant[0])): logger.debug(significant[0][x], significant[1][x], significant[2][x]) # Debug to console tsS = significant[0][x].split(':') # Source diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index ad78ebcc..21ba7f63 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -43,7 +43,7 @@ class SampleList(object): for counter, sample_name in enumerate(sample_names, 1): sample_name = sample_name.replace("_2nd_", "") - if type(self.this_trait) is list: #ZS: self.this_trait will be a list if it is a Temp trait + if isinstance(self.this_trait, list): #ZS: self.this_trait will be a list if it is a Temp trait if counter <= len(self.this_trait) and str(self.this_trait[counter-1]).upper() != 'X': sample = webqtlCaseData.webqtlCaseData(name=sample_name, value=float(self.this_trait[counter-1])) else: diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index c156e61b..65899ab2 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -625,7 +625,7 @@ def get_categorical_variables(this_trait, sample_list): def get_genotype_scales(genofiles): geno_scales = {} - if type(genofiles) is list: + if isinstance(genofiles, list): for the_file in genofiles: file_location = the_file['location'] geno_scales[file_location] = get_scales_from_genofile(file_location) diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index cfee0079..04672b45 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -193,7 +193,7 @@ def login(): if user_details: submitted_password = params['password'] pwfields = user_details['password'] - if type(pwfields) is str: + if isinstance(pwfields, str): pwfields = json.loads(pwfields) encrypted_pass_fields = encode_password(pwfields, submitted_password) password_match = pbkdf2.safe_str_cmp(encrypted_pass_fields['password'], pwfields['password']) -- cgit v1.2.3 From db41cd49b6d8ccd2c3318209118ffe098bc9293e Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 19 Aug 2020 03:57:05 +0300 Subject: Remove extra whitespace(or add it) from comma separated items See: --- etc/default_settings.py | 2 +- scripts/maintenance/QTL_Reaper_v6.py | 4 +- scripts/maintenance/readProbeSetMean_v7.py | 28 ++-- setup.py | 2 +- wqflask/base/trait.py | 6 +- wqflask/db/call.py | 10 +- wqflask/db/webqtlDatabaseFunction.py | 8 +- wqflask/maintenance/gen_select_dataset.py | 2 +- wqflask/maintenance/quantile_normalize.py | 2 +- wqflask/maintenance/set_resource_defaults.py | 2 +- wqflask/utility/Plot.py | 48 +++--- wqflask/utility/elasticsearch_tools.py | 2 +- wqflask/utility/logger.py | 26 +-- wqflask/utility/pillow_utils.py | 4 +- wqflask/utility/startup_config.py | 2 +- wqflask/utility/svg.py | 130 +++++++------- wqflask/utility/tools.py | 32 ++-- wqflask/utility/webqtlUtil.py | 16 +- wqflask/wqflask/api/router.py | 8 +- wqflask/wqflask/correlation/show_corr_results.py | 12 +- .../wqflask/correlation_matrix/show_corr_matrix.py | 8 +- wqflask/wqflask/do_search.py | 4 +- .../wqflask/external_tools/send_to_geneweaver.py | 8 +- .../marker_regression/display_mapping_results.py | 186 ++++++++++----------- wqflask/wqflask/marker_regression/plink_mapping.py | 6 +- wqflask/wqflask/marker_regression/run_mapping.py | 2 +- wqflask/wqflask/search_results.py | 2 +- wqflask/wqflask/show_trait/SampleList.py | 2 +- wqflask/wqflask/user_manager.py | 2 +- wqflask/wqflask/views.py | 8 +- 30 files changed, 287 insertions(+), 287 deletions(-) (limited to 'wqflask/utility') diff --git a/etc/default_settings.py b/etc/default_settings.py index f368237b..82c605da 100644 --- a/etc/default_settings.py +++ b/etc/default_settings.py @@ -24,7 +24,7 @@ import os import sys -GN_VERSION = open("../etc/VERSION","r").read() +GN_VERSION = open("../etc/VERSION", "r").read() GN_SERVER_URL = "http://localhost:8880/" # REST API server # ---- MySQL diff --git a/scripts/maintenance/QTL_Reaper_v6.py b/scripts/maintenance/QTL_Reaper_v6.py index 2fbeb53b..35f2d1a1 100755 --- a/scripts/maintenance/QTL_Reaper_v6.py +++ b/scripts/maintenance/QTL_Reaper_v6.py @@ -7,7 +7,7 @@ import reaper import MySQLdb import time -con = MySQLdb.Connect(db='db_webqtl',user='username',passwd='', host="localhost") +con = MySQLdb.Connect(db='db_webqtl', user='username', passwd='', host="localhost") cursor = con.cursor() genotypeDir = '/gnshare/gn/web/genotypes/' @@ -102,7 +102,7 @@ for ProbeSetFreezeId in ProbeSetFreezeIds: kj += 1 if kj%1000==0: - print((ProbeSetFreezeId, InbredSets[InbredSetId],kj)) + print((ProbeSetFreezeId, InbredSets[InbredSetId], kj)) print(ProbeSetFreezeIds) diff --git a/scripts/maintenance/readProbeSetMean_v7.py b/scripts/maintenance/readProbeSetMean_v7.py index 864b4e08..59a51cf9 100755 --- a/scripts/maintenance/readProbeSetMean_v7.py +++ b/scripts/maintenance/readProbeSetMean_v7.py @@ -39,7 +39,7 @@ fp = open("%s" % input_file_name, 'rb') try: passwd = getpass.getpass('Please enter mysql password here : ') - con = MySQLdb.Connect(db='db_webqtl',host='localhost', user='username',passwd=passwd) + con = MySQLdb.Connect(db='db_webqtl', host='localhost', user='username', passwd=passwd) db = con.cursor() print("You have successfully connected to mysql.\n") @@ -60,14 +60,14 @@ print('Checking if each line have same number of members') GeneList = [] isCont = 1 header = fp.readline() -header = string.split(string.strip(header),'\t') +header = string.split(string.strip(header), '\t') header = list(map(string.strip, header)) nfield = len(header) line = fp.readline() kj=0 while line: - line2 = string.split(string.strip(line),'\t') + line2 = string.split(string.strip(line), '\t') line2 = list(map(string.strip, line2)) if len(line2) != nfield: print(("Error : " + line)) @@ -78,7 +78,7 @@ while line: kj+=1 if kj%100000 == 0: - print(('checked ',kj,' lines')) + print(('checked ', kj, ' lines')) GeneList = sorted(map(string.lower, GeneList)) @@ -86,7 +86,7 @@ if isCont==0: sys.exit(0) -print(('used ',time.time()-time0,' seconds')) +print(('used ', time.time()-time0, ' seconds')) ######################################################################### # # Check if each strain exist in database @@ -98,7 +98,7 @@ print('Checking if each strain exist in database') isCont = 1 fp.seek(0) header = fp.readline() -header = string.split(string.strip(header),'\t') +header = string.split(string.strip(header), '\t') header = list(map(string.strip, header)) header = list(map(translateAlias, header)) header = header[dataStart:] @@ -108,14 +108,14 @@ for item in header: db.execute('select Id from Strain where Name = "%s"' % item) Ids.append(db.fetchall()[0][0]) except: - print((item,'does not exist, check the if the strain name is correct')) + print((item, 'does not exist, check the if the strain name is correct')) isCont=0 if isCont==0: sys.exit(0) -print(('used ',time.time()-time0,' seconds')) +print(('used ', time.time()-time0, ' seconds')) ######################################################################## # # Check if each ProbeSet exist in database @@ -126,7 +126,7 @@ print('Check if each ProbeSet exist in database') ##---- find PID is name or target ----## line = fp.readline() line = fp.readline() -line2 = string.split(string.strip(line),'\t') +line2 = string.split(string.strip(line), '\t') line2 = list(map(string.strip, line2)) PId = line2[0] @@ -185,7 +185,7 @@ if isCont==0: sys.exit(0) -print(('used ',time.time()-time0,' seconds')) +print(('used ', time.time()-time0, ' seconds')) ######################################################################### # # Insert data into database @@ -200,7 +200,7 @@ results = db.fetchall() NameIds = {} for item in results: NameIds[item[0]] = item[1] -print(('used ',time.time()-time0,' seconds')) +print(('used ', time.time()-time0, ' seconds')) print('inserting data') @@ -220,7 +220,7 @@ kj = 0 values1 = [] values2 = [] while line: - line2 = string.split(string.strip(line),'\t') + line2 = string.split(string.strip(line), '\t') line2 = list(map(string.strip, line2)) PId = line2[0] recordId = NameIds[PId] @@ -253,8 +253,8 @@ while line: values1=[] values2=[] - print(('Inserted ', kj,' lines')) - print(('used ',time.time()-time0,' seconds')) + print(('Inserted ', kj, ' lines')) + print(('used ', time.time()-time0, ' seconds')) line = fp.readline() diff --git a/setup.py b/setup.py index a9b71fab..8436dcd3 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ setup(name='genenetwork2', url = "https://github.com/genenetwork/genenetwork2/blob/master/README.md", description = 'Website and tools for genetics.', include_package_data=True, - packages=['wqflask','etc'], + packages=['wqflask', 'etc'], scripts=['bin/genenetwork2'], # package_data = { # 'etc': ['*.py'] diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 2b8f2e72..05b272c3 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -193,7 +193,7 @@ class GeneralTrait(object): ''' if self.chr and self.mb: - self.location = 'Chr %s @ %s Mb' % (self.chr,self.mb) + self.location = 'Chr %s @ %s Mb' % (self.chr, self.mb) elif self.chr: self.location = 'Chr %s @ Unknown position' % (self.chr) else: @@ -440,7 +440,7 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): #XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name # to avoid the problem of same marker name from different species. elif dataset.type == 'Geno': - display_fields_string = string.join(dataset.display_fields,',Geno.') + display_fields_string = string.join(dataset.display_fields, ',Geno.') display_fields_string = 'Geno.' + display_fields_string query = """ SELECT %s @@ -459,7 +459,7 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): query = """SELECT %s FROM %s WHERE Name = %s""" logger.sql(query) trait_info = g.db.execute(query, - (string.join(dataset.display_fields,','), + (string.join(dataset.display_fields, ','), dataset.type, trait.name)).fetchone() if trait_info: diff --git a/wqflask/db/call.py b/wqflask/db/call.py index 82cfebb4..3b8f782e 100644 --- a/wqflask/db/call.py +++ b/wqflask/db/call.py @@ -26,8 +26,8 @@ GN_SERVER result when set (which should return a Tuple) else: res2 = result, if LOG_SQL: - logger.debug("Replaced SQL call",query) - logger.debug(path,res2) + logger.debug("Replaced SQL call", query) + logger.debug(path, res2) return res2 else: return fetchone(query) @@ -37,7 +37,7 @@ def fetchone(query): original fetchone, but with logging) """ - with Bench("SQL",LOG_SQL): + with Bench("SQL", LOG_SQL): def helper(query): res = g.db.execute(query) return res.fetchone() @@ -48,7 +48,7 @@ def fetchall(query): original fetchall, but with logging) """ - with Bench("SQL",LOG_SQL): + with Bench("SQL", LOG_SQL): def helper(query): res = g.db.execute(query) return res.fetchall() @@ -58,7 +58,7 @@ def gn_server(path): """Return JSON record by calling GN_SERVER """ - with Bench("GN_SERVER",LOG_SQL): + with Bench("GN_SERVER", LOG_SQL): res = urllib.request.urlopen(GN_SERVER_URL+path) rest = res.read() res2 = json.loads(rest) diff --git a/wqflask/db/webqtlDatabaseFunction.py b/wqflask/db/webqtlDatabaseFunction.py index 8a9dc79d..2805febd 100644 --- a/wqflask/db/webqtlDatabaseFunction.py +++ b/wqflask/db/webqtlDatabaseFunction.py @@ -35,13 +35,13 @@ def retrieve_species(group): """Get the species of a group (e.g. returns string "mouse" on "BXD" """ - result = fetch1("select Species.Name from Species, InbredSet where InbredSet.Name = '%s' and InbredSet.SpeciesId = Species.Id" % (group),"/cross/"+group+".json",lambda r: (r["species"],))[0] - logger.debug("retrieve_species result:",result) + result = fetch1("select Species.Name from Species, InbredSet where InbredSet.Name = '%s' and InbredSet.SpeciesId = Species.Id" % (group), "/cross/"+group+".json", lambda r: (r["species"],))[0] + logger.debug("retrieve_species result:", result) return result def retrieve_species_id(group): - result = fetch1("select SpeciesId from InbredSet where Name = '%s'" % (group),"/cross/"+group+".json",lambda r: (r["species_id"],))[0] - logger.debug("retrieve_species_id result:",result) + result = fetch1("select SpeciesId from InbredSet where Name = '%s'" % (group), "/cross/"+group+".json", lambda r: (r["species_id"],))[0] + logger.debug("retrieve_species_id result:", result) return result diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py index 78217587..d12b328f 100644 --- a/wqflask/maintenance/gen_select_dataset.py +++ b/wqflask/maintenance/gen_select_dataset.py @@ -41,7 +41,7 @@ from __future__ import print_function, division import sys # NEW: Note we prepend the current path - otherwise a guix instance of GN2 may be used instead -sys.path.insert(0,'./') +sys.path.insert(0, './') # NEW: import app to avoid a circular dependency on utility.tools from wqflask import app diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py index 82b695f4..43edfd13 100644 --- a/wqflask/maintenance/quantile_normalize.py +++ b/wqflask/maintenance/quantile_normalize.py @@ -1,7 +1,7 @@ from __future__ import absolute_import, print_function, division import sys -sys.path.insert(0,'./') +sys.path.insert(0, './') diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index 54fd8e7e..d53a255b 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -22,7 +22,7 @@ import sys import json # NEW: Note we prepend the current path - otherwise a guix instance of GN2 may be used instead -sys.path.insert(0,'./') +sys.path.insert(0, './') # NEW: import app to avoid a circular dependency on utility.tools from wqflask import app diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index c9053dde..4f6e694e 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -58,7 +58,7 @@ def cformat(d, rank=0): strD = "%2.6f" % d if rank == 0: - while strD[-1] in ('0','.'): + while strD[-1] in ('0', '.'): if strD[-1] == '0' and strD[-2] == '.' and len(strD) <= 4: break elif strD[-1] == '.': @@ -162,7 +162,7 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab j = int((item-xLow)/step) Count[j] += 1 - yLow, yTop, stepY=detScale(0,max(Count)) + yLow, yTop, stepY=detScale(0, max(Count)) #draw data xScale = plotWidth/(xTop-xLow) @@ -174,7 +174,7 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab xc = (dataXY[i]-xLow)*xScale+xLeftOffset yc =-(count-yLow)*yScale+yTopOffset+plotHeight im_drawer.rectangle( - xy=((xc+2,yc),(xc+barWidth-2,yTopOffset+plotHeight)), + xy=((xc+2, yc), (xc+barWidth-2, yTopOffset+plotHeight)), outline=barColor, fill=barColor) #draw drawing region @@ -183,81 +183,81 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab ) #draw scale - scaleFont=ImageFont.truetype(font=COUR_FILE,size=11) + scaleFont=ImageFont.truetype(font=COUR_FILE, size=11) x=xLow for i in range(int(stepX)+1): xc=xLeftOffset+(x-xLow)*xScale im_drawer.line( - xy=((xc,yTopOffset+plotHeight),(xc,yTopOffset+plotHeight+5)), + xy=((xc, yTopOffset+plotHeight), (xc, yTopOffset+plotHeight+5)), fill=axesColor) strX = cformat(d=x, rank=0) im_drawer.text( text=strX, - xy=(xc-im_drawer.textsize(strX,font=scaleFont)[0]/2, - yTopOffset+plotHeight+14),font=scaleFont) + xy=(xc-im_drawer.textsize(strX, font=scaleFont)[0]/2, + yTopOffset+plotHeight+14), font=scaleFont) x+= (xTop - xLow)/stepX y=yLow for i in range(int(stepY)+1): yc=yTopOffset+plotHeight-(y-yLow)*yScale - im_drawer.line(xy=((xLeftOffset,yc),(xLeftOffset-5,yc)), fill=axesColor) + im_drawer.line(xy=((xLeftOffset, yc), (xLeftOffset-5, yc)), fill=axesColor) strY = "%d" %y im_drawer.text( text=strY, - xy=(xLeftOffset-im_drawer.textsize(strY,font=scaleFont)[0]-6,yc+5), + xy=(xLeftOffset-im_drawer.textsize(strY, font=scaleFont)[0]-6, yc+5), font=scaleFont) y+= (yTop - yLow)/stepY #draw label - labelFont=ImageFont.truetype(font=TAHOMA_FILE,size=17) + labelFont=ImageFont.truetype(font=TAHOMA_FILE, size=17) if XLabel: im_drawer.text( text=XLabel, xy=(xLeftOffset+( - plotWidth-im_drawer.textsize(XLabel,font=labelFont)[0])/2.0, + plotWidth-im_drawer.textsize(XLabel, font=labelFont)[0])/2.0, yTopOffset+plotHeight+yBottomOffset-10), - font=labelFont,fill=labelColor) + font=labelFont, fill=labelColor) if YLabel: draw_rotated_text(canvas, text=YLabel, xy=(19, yTopOffset+plotHeight-( plotHeight-im_drawer.textsize( - YLabel,font=labelFont)[0])/2.0), + YLabel, font=labelFont)[0])/2.0), font=labelFont, fill=labelColor, angle=90) - labelFont=ImageFont.truetype(font=VERDANA_FILE,size=16) + labelFont=ImageFont.truetype(font=VERDANA_FILE, size=16) if title: im_drawer.text( text=title, xy=(xLeftOffset+(plotWidth-im_drawer.textsize( - title,font=labelFont)[0])/2.0, + title, font=labelFont)[0])/2.0, 20), - font=labelFont,fill=labelColor) + font=labelFont, fill=labelColor) # This function determines the scale of the plot -def detScaleOld(min,max): +def detScaleOld(min, max): if min>=max: return None elif min == -1.0 and max == 1.0: - return [-1.2,1.2,12] + return [-1.2, 1.2, 12] else: a=max-min b=floor(log10(a)) - c=pow(10.0,b) + c=pow(10.0, b) if a < c*5.0: c/=2.0 #print a,b,c low=c*floor(min/c) high=c*ceil(max/c) - return [low,high,round((high-low)/c)] + return [low, high, round((high-low)/c)] def detScale(min=0,max=0): if min>=max: return None elif min == -1.0 and max == 1.0: - return [-1.2,1.2,12] + return [-1.2, 1.2, 12] else: a=max-min if max != 0: @@ -269,7 +269,7 @@ def detScale(min=0,max=0): min -= 0.1*a a=max-min b=floor(log10(a)) - c=pow(10.0,b) + c=pow(10.0, b) low=c*floor(min/c) high=c*ceil(max/c) n = round((high-low)/c) @@ -287,7 +287,7 @@ def detScale(min=0,max=0): high=c*ceil(max/c) n = round((high-low)/c) - return [low,high,n] + return [low, high, n] def bluefunc(x): return 1.0 / (1.0 + exp(-10*(x-0.6))) @@ -296,7 +296,7 @@ def redfunc(x): return 1.0 / (1.0 + exp(10*(x-0.5))) def greenfunc(x): - return 1 - pow(redfunc(x+0.2),2) - bluefunc(x-0.3) + return 1 - pow(redfunc(x+0.2), 2) - bluefunc(x-0.3) def colorSpectrum(n=100): multiple = 10 diff --git a/wqflask/utility/elasticsearch_tools.py b/wqflask/utility/elasticsearch_tools.py index 15cdd0bc..a5580811 100644 --- a/wqflask/utility/elasticsearch_tools.py +++ b/wqflask/utility/elasticsearch_tools.py @@ -59,7 +59,7 @@ def get_elasticsearch_connection(for_user=True): try: assert(ELASTICSEARCH_HOST) assert(ELASTICSEARCH_PORT) - logger.info("ES HOST",ELASTICSEARCH_HOST) + logger.info("ES HOST", ELASTICSEARCH_HOST) es = Elasticsearch([{ "host": ELASTICSEARCH_HOST, "port": ELASTICSEARCH_PORT diff --git a/wqflask/utility/logger.py b/wqflask/utility/logger.py index 510b1041..e904eb94 100644 --- a/wqflask/utility/logger.py +++ b/wqflask/utility/logger.py @@ -42,10 +42,10 @@ class GNLogger: """ - def __init__(self,name): + def __init__(self, name): self.logger = logging.getLogger(name) - def setLevel(self,value): + def setLevel(self, value): """Set the undelying log level""" self.logger.setLevel(value) @@ -54,7 +54,7 @@ class GNLogger: level=num to filter on LOG_LEVEL_DEBUG. """ - self.collect(self.logger.debug,*args) + self.collect(self.logger.debug, *args) def debug20(self,*args): """Call logging.debug for multiple args. Use level=num to filter on @@ -63,15 +63,15 @@ LOG_LEVEL_DEBUG (NYI). """ if level <= LOG_LEVEL_DEBUG: if self.logger.getEffectiveLevel() < 20: - self.collect(self.logger.debug,*args) + self.collect(self.logger.debug, *args) def info(self,*args): """Call logging.info for multiple args""" - self.collect(self.logger.info,*args) + self.collect(self.logger.info, *args) def warning(self,*args): """Call logging.warning for multiple args""" - self.collect(self.logger.warning,*args) + self.collect(self.logger.warning, *args) # self.logger.warning(self.collect(*args)) def error(self,*args): @@ -79,13 +79,13 @@ LOG_LEVEL_DEBUG (NYI). now = datetime.datetime.utcnow() time_str = now.strftime('%H:%M:%S UTC %Y%m%d') l = [time_str]+list(args) - self.collect(self.logger.error,*l) + self.collect(self.logger.error, *l) def infof(self,*args): """Call logging.info for multiple args lazily""" # only evaluate function when logging if self.logger.getEffectiveLevel() < 30: - self.collectf(self.logger.debug,*args) + self.collectf(self.logger.debug, *args) def debugf(self,level=0,*args): """Call logging.debug for multiple args lazily and handle @@ -95,15 +95,15 @@ LOG_LEVEL_DEBUG (NYI). # only evaluate function when logging if level <= LOG_LEVEL_DEBUG: if self.logger.getEffectiveLevel() < 20: - self.collectf(self.logger.debug,*args) + self.collectf(self.logger.debug, *args) def sql(self, sqlcommand, fun = None): """Log SQL command, optionally invoking a timed fun""" if LOG_SQL: caller = stack()[1][3] - if caller in ['fetchone','fetch1','fetchall']: + if caller in ['fetchone', 'fetch1', 'fetchall']: caller = stack()[2][3] - self.info(caller,sqlcommand) + self.info(caller, sqlcommand) if fun: result = fun(sqlcommand) if LOG_SQL: @@ -119,7 +119,7 @@ LOG_LEVEL_DEBUG (NYI). if isinstance(a, str): out = out + a else: - out = out + pf(a,width=160) + out = out + pf(a, width=160) fun(out) def collectf(self,fun,*args): @@ -134,7 +134,7 @@ LOG_LEVEL_DEBUG (NYI). if isinstance(a, str): out = out + a else: - out = out + pf(a,width=160) + out = out + pf(a, width=160) fun(out) # Get the module logger. You can override log levels at the diff --git a/wqflask/utility/pillow_utils.py b/wqflask/utility/pillow_utils.py index dfbf3e19..1e2ed075 100644 --- a/wqflask/utility/pillow_utils.py +++ b/wqflask/utility/pillow_utils.py @@ -10,9 +10,9 @@ WHITE = ImageColor.getrgb("white") def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90): # type: (Image, str, ImageFont, tuple, ImageColor, int) """Utility function draw rotated text""" - tmp_img = Image.new("RGBA", font.getsize(text), color=(0,0,0,0)) + tmp_img = Image.new("RGBA", font.getsize(text), color=(0, 0, 0, 0)) draw_text = ImageDraw.Draw(tmp_img) - draw_text.text(text=text, xy=(0,0), font=font, fill=fill) + draw_text.text(text=text, xy=(0, 0), font=font, fill=fill) tmp_img2 = tmp_img.rotate(angle, expand=1) tmp_img2.save("/tmp/{}.png".format(text), format="png") canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy])) diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index 42ead709..f1aaebb6 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -36,4 +36,4 @@ def app_config(): # import utility.elasticsearch_tools as es # es.test_elasticsearch_connection() - print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % (BLUE,str(port),ENDC,get_setting("WEBSERVER_URL")))) + print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL")))) diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index 874ada9d..19eda0ce 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -447,7 +447,7 @@ class rect(SVGelement): if width == None or height == None: raise ValueError('both height and width are required') - SVGelement.__init__(self,'rect',{'width':width,'height':height},**args) + SVGelement.__init__(self, 'rect', {'width':width,'height':height}, **args) if x!=None: self.attributes['x']=x if y!=None: @@ -468,7 +468,7 @@ class ellipse(SVGelement): if rx==None or ry== None: raise ValueError('both rx and ry are required') - SVGelement.__init__(self,'ellipse',{'rx':rx,'ry':ry},**args) + SVGelement.__init__(self, 'ellipse', {'rx':rx,'ry':ry}, **args) if cx!=None: self.attributes['cx']=cx if cy!=None: @@ -489,7 +489,7 @@ class circle(SVGelement): def __init__(self,cx=None,cy=None,r=None,fill=None,stroke=None,stroke_width=None,**args): if r==None: raise ValueError('r is required') - SVGelement.__init__(self,'circle',{'r':r},**args) + SVGelement.__init__(self, 'circle', {'r':r}, **args) if cx!=None: self.attributes['cx']=cx if cy!=None: @@ -508,7 +508,7 @@ class point(circle): very small rectangle if you use many points because a circle is difficult to render. """ def __init__(self,x,y,fill='black',**args): - circle.__init__(self,x,y,1,fill,**args) + circle.__init__(self, x, y, 1, fill, **args) class line(SVGelement): """l=line(x1,y1,x2,y2,stroke,stroke_width,**args) @@ -516,7 +516,7 @@ class line(SVGelement): A line is defined by a begin x,y pair and an end x,y pair """ def __init__(self,x1=None,y1=None,x2=None,y2=None,stroke=None,stroke_width=None,**args): - SVGelement.__init__(self,'line',**args) + SVGelement.__init__(self, 'line', **args) if x1!=None: self.attributes['x1']=x1 if y1!=None: @@ -536,7 +536,7 @@ class polyline(SVGelement): a polyline is defined by a list of xy pairs """ def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args): - SVGelement.__init__(self,'polyline',{'points':_xypointlist(points)},**args) + SVGelement.__init__(self, 'polyline', {'points':_xypointlist(points)}, **args) if fill!=None: self.attributes['fill']=fill if stroke_width!=None: @@ -550,7 +550,7 @@ class polygon(SVGelement): a polygon is defined by a list of xy pairs """ def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args): - SVGelement.__init__(self,'polygon',{'points':_xypointlist(points)},**args) + SVGelement.__init__(self, 'polygon', {'points':_xypointlist(points)}, **args) if fill!=None: self.attributes['fill']=fill if stroke_width!=None: @@ -564,7 +564,7 @@ class path(SVGelement): a path is defined by a path object and optional width, stroke and fillcolor """ def __init__(self,pathdata,fill=None,stroke=None,stroke_width=None,id=None,**args): - SVGelement.__init__(self,'path',{'d':str(pathdata)},**args) + SVGelement.__init__(self, 'path', {'d':str(pathdata)}, **args) if stroke!=None: self.attributes['stroke']=stroke if fill!=None: @@ -581,7 +581,7 @@ class text(SVGelement): a text element can bge used for displaying text on the screen """ def __init__(self,x=None,y=None,text=None,font_size=None,font_family=None,text_anchor=None,**args): - SVGelement.__init__(self,'text',**args) + SVGelement.__init__(self, 'text', **args) if x!=None: self.attributes['x']=x if y!=None: @@ -602,7 +602,7 @@ class textpath(SVGelement): a textpath places a text on a path which is referenced by a link. """ def __init__(self,link,text=None,**args): - SVGelement.__init__(self,'textPath',{'xlink:href':link},**args) + SVGelement.__init__(self, 'textPath', {'xlink:href':link}, **args) if text!=None: self.text=text @@ -614,7 +614,7 @@ class pattern(SVGelement): in x and y to cover the areas to be painted. """ def __init__(self,x=None,y=None,width=None,height=None,patternUnits=None,**args): - SVGelement.__init__(self,'pattern',**args) + SVGelement.__init__(self, 'pattern', **args) if x!=None: self.attributes['x']=x if y!=None: @@ -633,7 +633,7 @@ class title(SVGelement): add at least one to the root svg element """ def __init__(self,text=None,**args): - SVGelement.__init__(self,'title',**args) + SVGelement.__init__(self, 'title', **args) if text!=None: self.text=text @@ -644,7 +644,7 @@ class description(SVGelement): Add this element before adding other elements. """ def __init__(self,text=None,**args): - SVGelement.__init__(self,'desc',**args) + SVGelement.__init__(self, 'desc', **args) if text!=None: self.text=text @@ -655,7 +655,7 @@ class lineargradient(SVGelement): stop elements van be added to define the gradient colors. """ def __init__(self,x1=None,y1=None,x2=None,y2=None,id=None,**args): - SVGelement.__init__(self,'linearGradient',**args) + SVGelement.__init__(self, 'linearGradient', **args) if x1!=None: self.attributes['x1']=x1 if y1!=None: @@ -674,7 +674,7 @@ class radialgradient(SVGelement): stop elements van be added to define the gradient colors. """ def __init__(self,cx=None,cy=None,r=None,fx=None,fy=None,id=None,**args): - SVGelement.__init__(self,'radialGradient',**args) + SVGelement.__init__(self, 'radialGradient', **args) if cx!=None: self.attributes['cx']=cx if cy!=None: @@ -694,7 +694,7 @@ class stop(SVGelement): Puts a stop color at the specified radius """ def __init__(self,offset,stop_color=None,**args): - SVGelement.__init__(self,'stop',{'offset':offset},**args) + SVGelement.__init__(self, 'stop', {'offset':offset}, **args) if stop_color!=None: self.attributes['stop-color']=stop_color @@ -704,7 +704,7 @@ class style(SVGelement): Add a CDATA element to this element for defing in line stylesheets etc.. """ def __init__(self,type,cdata=None,**args): - SVGelement.__init__(self,'style',{'type':type},cdata=cdata, **args) + SVGelement.__init__(self, 'style', {'type':type}, cdata=cdata, **args) class image(SVGelement): @@ -715,7 +715,7 @@ class image(SVGelement): def __init__(self,url,x=None,y=None,width=None,height=None,**args): if width==None or height==None: raise ValueError('both height and width are required') - SVGelement.__init__(self,'image',{'xlink:href':url,'width':width,'height':height},**args) + SVGelement.__init__(self, 'image', {'xlink:href':url,'width':width,'height':height}, **args) if x!=None: self.attributes['x']=x if y!=None: @@ -727,7 +727,7 @@ class cursor(SVGelement): defines a custom cursor for a element or a drawing """ def __init__(self,url,**args): - SVGelement.__init__(self,'cursor',{'xlink:href':url},**args) + SVGelement.__init__(self, 'cursor', {'xlink:href':url}, **args) class marker(SVGelement): @@ -737,7 +737,7 @@ class marker(SVGelement): add an element to it which should be used as a marker. """ def __init__(self,id=None,viewBox=None,refx=None,refy=None,markerWidth=None,markerHeight=None,**args): - SVGelement.__init__(self,'marker',**args) + SVGelement.__init__(self, 'marker', **args) if id!=None: self.attributes['id']=id if viewBox!=None: @@ -758,7 +758,7 @@ class group(SVGelement): g.addElement(SVGelement) """ def __init__(self,id=None,**args): - SVGelement.__init__(self,'g',**args) + SVGelement.__init__(self, 'g', **args) if id!=None: self.attributes['id']=id @@ -772,7 +772,7 @@ class symbol(SVGelement): """ def __init__(self,id=None,viewBox=None,**args): - SVGelement.__init__(self,'symbol',**args) + SVGelement.__init__(self, 'symbol', **args) if id!=None: self.attributes['id']=id if viewBox!=None: @@ -784,7 +784,7 @@ class defs(SVGelement): container for defining elements """ def __init__(self,**args): - SVGelement.__init__(self,'defs',**args) + SVGelement.__init__(self, 'defs', **args) class switch(SVGelement): """sw=switch(**args) @@ -794,7 +794,7 @@ class switch(SVGelement): Refer to the SVG specification for details. """ def __init__(self,**args): - SVGelement.__init__(self,'switch',**args) + SVGelement.__init__(self, 'switch', **args) class use(SVGelement): @@ -803,7 +803,7 @@ class use(SVGelement): references a symbol by linking to its id and its position, height and width """ def __init__(self,link,x=None,y=None,width=None,height=None,**args): - SVGelement.__init__(self,'use',{'xlink:href':link},**args) + SVGelement.__init__(self, 'use', {'xlink:href':link}, **args) if x!=None: self.attributes['x']=x if y!=None: @@ -822,14 +822,14 @@ class link(SVGelement): a.addElement(SVGelement) """ def __init__(self,link='',**args): - SVGelement.__init__(self,'a',{'xlink:href':link},**args) + SVGelement.__init__(self, 'a', {'xlink:href':link}, **args) class view(SVGelement): """v=view(id,**args) a view can be used to create a view with different attributes""" def __init__(self,id=None,**args): - SVGelement.__init__(self,'view',**args) + SVGelement.__init__(self, 'view', **args) if id!=None: self.attributes['id']=id @@ -840,7 +840,7 @@ class script(SVGelement): """ def __init__(self,type,cdata=None,**args): - SVGelement.__init__(self,'script',{'type':type},cdata=cdata,**args) + SVGelement.__init__(self, 'script', {'type':type}, cdata=cdata, **args) class animate(SVGelement): """an=animate(attribute,from,to,during,**args) @@ -848,7 +848,7 @@ class animate(SVGelement): animates an attribute. """ def __init__(self,attribute,fr=None,to=None,dur=None,**args): - SVGelement.__init__(self,'animate',{'attributeName':attribute},**args) + SVGelement.__init__(self, 'animate', {'attributeName':attribute}, **args) if fr!=None: self.attributes['from']=fr if to!=None: @@ -862,7 +862,7 @@ class animateMotion(SVGelement): animates a SVGelement over the given path in dur seconds """ def __init__(self,pathdata,dur,**args): - SVGelement.__init__(self,'animateMotion',**args) + SVGelement.__init__(self, 'animateMotion', **args) if pathdata!=None: self.attributes['path']=str(pathdata) if dur!=None: @@ -874,7 +874,7 @@ class animateTransform(SVGelement): transform an element from and to a value. """ def __init__(self,type=None,fr=None,to=None,dur=None,**args): - SVGelement.__init__(self,'animateTransform',{'attributeName':'transform'},**args) + SVGelement.__init__(self, 'animateTransform', {'attributeName':'transform'}, **args) # As far as I know the attributeName is always transform if type!=None: self.attributes['type']=type @@ -890,7 +890,7 @@ class animateColor(SVGelement): Animates the color of a element """ def __init__(self,attribute,type=None,fr=None,to=None,dur=None,**args): - SVGelement.__init__(self,'animateColor',{'attributeName':attribute},**args) + SVGelement.__init__(self, 'animateColor', {'attributeName':attribute}, **args) if type!=None: self.attributes['type']=type if fr!=None: @@ -905,7 +905,7 @@ class set(SVGelement): sets an attribute to a value for a """ def __init__(self,attribute,to=None,dur=None,**args): - SVGelement.__init__(self,'set',{'attributeName':attribute},**args) + SVGelement.__init__(self, 'set', {'attributeName':attribute}, **args) if to!=None: self.attributes['to']=to if dur!=None: @@ -929,7 +929,7 @@ class svg(SVGelement): d.toXml() """ def __init__(self,viewBox=None, width=None, height=None,**args): - SVGelement.__init__(self,'svg',**args) + SVGelement.__init__(self, 'svg', **args) if viewBox!=None: self.attributes['viewBox']=_viewboxlist(viewBox) if width!=None: @@ -952,7 +952,7 @@ class drawing: def __init__(self, entity={}): self.svg=None self.entity = entity - def setSVG(self,svg): + def setSVG(self, svg): self.svg=svg # Voeg een element toe aan de grafiek toe. if use_dom_implementation==0: @@ -967,12 +967,12 @@ class drawing: xml.write("\n" % (item, self.entity[item])) xml.write("]") xml.write(">\n") - self.svg.toXml(0,xml) + self.svg.toXml(0, xml) if not filename: if compress: import gzip f=cStringIO.StringIO() - zf=gzip.GzipFile(fileobj=f,mode='wb') + zf=gzip.GzipFile(fileobj=f, mode='wb') zf.write(xml.getvalue()) zf.close() f.seek(0) @@ -982,11 +982,11 @@ class drawing: else: if filename[-4:]=='svgz': import gzip - f=gzip.GzipFile(filename=filename,mode="wb", compresslevel=9) + f=gzip.GzipFile(filename=filename, mode="wb", compresslevel=9) f.write(xml.getvalue()) f.close() else: - f=file(filename,'w') + f=file(filename, 'w') f.write(xml.getvalue()) f.close() @@ -997,40 +997,40 @@ class drawing: writes a svg drawing to the screen or to a file compresses if filename ends with svgz or if compress is true """ - doctype = implementation.createDocumentType('svg',"-//W3C//DTD SVG 1.0//EN""",'http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd ') + doctype = implementation.createDocumentType('svg', "-//W3C//DTD SVG 1.0//EN""", 'http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd ') global root # root is defined global so it can be used by the appender. Its also possible to use it as an arugument but # that is a bit messy. - root=implementation.createDocument(None,None,doctype) + root=implementation.createDocument(None, None, doctype) # Create the xml document. global appender - def appender(element,elementroot): + def appender(element, elementroot): """This recursive function appends elements to an element and sets the attributes and type. It stops when alle elements have been appended""" if element.namespace: - e=root.createElementNS(element.namespace,element.type) + e=root.createElementNS(element.namespace, element.type) else: e=root.createElement(element.type) if element.text: textnode=root.createTextNode(element.text) e.appendChild(textnode) for attribute in list(element.attributes.keys()): #in element.attributes is supported from python 2.2 - e.setAttribute(attribute,str(element.attributes[attribute])) + e.setAttribute(attribute, str(element.attributes[attribute])) if element.elements: for el in element.elements: - e=appender(el,e) + e=appender(el, e) elementroot.appendChild(e) return elementroot - root=appender(self.svg,root) + root=appender(self.svg, root) if not filename: import cStringIO xml=cStringIO.StringIO() - PrettyPrint(root,xml) + PrettyPrint(root, xml) if compress: import gzip f=cStringIO.StringIO() - zf=gzip.GzipFile(fileobj=f,mode='wb') + zf=gzip.GzipFile(fileobj=f, mode='wb') zf.write(xml.getvalue()) zf.close() f.seek(0) @@ -1043,13 +1043,13 @@ class drawing: import gzip import cStringIO xml=cStringIO.StringIO() - PrettyPrint(root,xml) - f=gzip.GzipFile(filename=filename,mode='wb',compresslevel=9) + PrettyPrint(root, xml) + f=gzip.GzipFile(filename=filename, mode='wb', compresslevel=9) f.write(xml.getvalue()) f.close() else: - f=open(filename,'w') - PrettyPrint(root,f) + f=open(filename, 'w') + PrettyPrint(root, f) f.close() except: print(("Cannot write SVG file: " + filename)) @@ -1070,32 +1070,32 @@ if __name__=='__main__': d=drawing() - s=svg((0,0,100,100)) - r=rect(-100,-100,300,300,'cyan') + s=svg((0, 0, 100, 100)) + r=rect(-100, -100, 300, 300, 'cyan') s.addElement(r) t=title('SVGdraw Demo') s.addElement(t) g=group('animations') - e=ellipse(0,0,5,2) + e=ellipse(0, 0, 5, 2) g.addElement(e) - c=circle(0,0,1,'red') + c=circle(0, 0, 1, 'red') g.addElement(c) - pd=pathdata(0,-10) + pd=pathdata(0, -10) for i in range(6): - pd.relsmbezier(10,5,0,10) - pd.relsmbezier(-10,5,0,10) - an=animateMotion(pd,10) + pd.relsmbezier(10, 5, 0, 10) + pd.relsmbezier(-10, 5, 0, 10) + an=animateMotion(pd, 10) an.attributes['rotate']='auto-reverse' an.attributes['repeatCount']="indefinite" g.addElement(an) s.addElement(g) - for i in range(20,120,20): - u=use('#animations',i,0) + for i in range(20, 120, 20): + u=use('#animations', i, 0) s.addElement(u) - for i in range(0,120,20): - for j in range(5,105,10): - c=circle(i,j,1,'red','black',.5) + for i in range(0, 120, 20): + for j in range(5, 105, 10): + c=circle(i, j, 1, 'red', 'black', .5) s.addElement(c) d.setSVG(s) diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 51a87fe1..68ef0f04 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -15,7 +15,7 @@ OVERRIDES = {} def app_set(command_id, value): """Set application wide value""" - app.config.setdefault(command_id,value) + app.config.setdefault(command_id, value) return value def get_setting(command_id,guess=None): @@ -45,7 +45,7 @@ def get_setting(command_id,guess=None): def value(command): if command: # sys.stderr.write("Found "+command+"\n") - app_set(command_id,command) + app_set(command_id, command) return command else: return None @@ -68,7 +68,7 @@ def get_setting(command_id,guess=None): def get_setting_bool(id): v = get_setting(id) - if v not in [0,False,'False','FALSE',None]: + if v not in [0, False, 'False', 'FALSE', None]: return True return False @@ -108,16 +108,16 @@ def js_path(module=None): raise "No JS path found for "+module+" (if not in Guix check JS_GN_PATH)" def reaper_command(guess=None): - return get_setting("REAPER_COMMAND",guess) + return get_setting("REAPER_COMMAND", guess) def gemma_command(guess=None): - return assert_bin(get_setting("GEMMA_COMMAND",guess)) + return assert_bin(get_setting("GEMMA_COMMAND", guess)) def gemma_wrapper_command(guess=None): - return assert_bin(get_setting("GEMMA_WRAPPER_COMMAND",guess)) + return assert_bin(get_setting("GEMMA_WRAPPER_COMMAND", guess)) def plink_command(guess=None): - return assert_bin(get_setting("PLINK_COMMAND",guess)) + return assert_bin(get_setting("PLINK_COMMAND", guess)) def flat_file_exists(subdir): base = get_setting("GENENETWORK_FILES") @@ -180,7 +180,7 @@ def locate(name, subdir=None): raise Exception("Can not locate "+name+" in "+base) def locate_phewas(name, subdir=None): - return locate(name,'/phewas/'+subdir) + return locate(name, '/phewas/'+subdir) def locate_ignore_error(name, subdir=None): """ @@ -204,7 +204,7 @@ def tempdir(): """ Get UNIX TMPDIR by default """ - return valid_path(get_setting("TMPDIR","/tmp")) + return valid_path(get_setting("TMPDIR", "/tmp")) BLUE = '\033[94m' GREEN = '\033[92m' @@ -225,9 +225,9 @@ def show_settings(): keylist.sort() for k in keylist: try: - print(("%s: %s%s%s%s" % (k,BLUE,BOLD,get_setting(k),ENDC))) + print(("%s: %s%s%s%s" % (k, BLUE, BOLD, get_setting(k), ENDC))) except: - print(("%s: %s%s%s%s" % (k,GREEN,BOLD,app.config[k],ENDC))) + print(("%s: %s%s%s%s" % (k, GREEN, BOLD, app.config[k], ENDC))) # Cached values @@ -279,10 +279,10 @@ SMTP_CONNECT = get_setting('SMTP_CONNECT') SMTP_USERNAME = get_setting('SMTP_USERNAME') SMTP_PASSWORD = get_setting('SMTP_PASSWORD') -REAPER_COMMAND = app_set("REAPER_COMMAND",reaper_command()) -GEMMA_COMMAND = app_set("GEMMA_COMMAND",gemma_command()) +REAPER_COMMAND = app_set("REAPER_COMMAND", reaper_command()) +GEMMA_COMMAND = app_set("GEMMA_COMMAND", gemma_command()) assert(GEMMA_COMMAND is not None) -PLINK_COMMAND = app_set("PLINK_COMMAND",plink_command()) +PLINK_COMMAND = app_set("PLINK_COMMAND", plink_command()) GEMMA_WRAPPER_COMMAND = gemma_wrapper_command() TEMPDIR = tempdir() # defaults to UNIX TMPDIR assert_dir(TEMPDIR) @@ -295,11 +295,11 @@ assert_dir(JS_GUIX_PATH+'/cytoscape-panzoom') CSS_PATH = JS_GUIX_PATH # The CSS is bundled together with the JS # assert_dir(JS_PATH) -JS_TWITTER_POST_FETCHER_PATH = get_setting("JS_TWITTER_POST_FETCHER_PATH",js_path("javascript-twitter-post-fetcher")) +JS_TWITTER_POST_FETCHER_PATH = get_setting("JS_TWITTER_POST_FETCHER_PATH", js_path("javascript-twitter-post-fetcher")) assert_dir(JS_TWITTER_POST_FETCHER_PATH) assert_file(JS_TWITTER_POST_FETCHER_PATH+"/js/twitterFetcher_min.js") -JS_CYTOSCAPE_PATH = get_setting("JS_CYTOSCAPE_PATH",js_path("cytoscape")) +JS_CYTOSCAPE_PATH = get_setting("JS_CYTOSCAPE_PATH", js_path("cytoscape")) assert_dir(JS_CYTOSCAPE_PATH) assert_file(JS_CYTOSCAPE_PATH+'/cytoscape.min.js') diff --git a/wqflask/utility/webqtlUtil.py b/wqflask/utility/webqtlUtil.py index 79991149..77cc3416 100644 --- a/wqflask/utility/webqtlUtil.py +++ b/wqflask/utility/webqtlUtil.py @@ -41,22 +41,22 @@ ParInfo ={ 'C57BL-6JxC57BL-6NJF2':['', '', 'C57BL/6J', 'C57BL/6NJ'], 'BXD300':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], 'B6BTBRF2':['B6BTBRF1', 'BTBRB6F1', 'C57BL/6J', 'BTBRT<+>tf/J'], -'BHHBF2':['B6HF2','HB6F2','C57BL/6J','C3H/HeJ'], -'BHF2':['B6HF2','HB6F2','C57BL/6J','C3H/HeJ'], +'BHHBF2':['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], +'BHF2':['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], 'B6D2F2':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], 'BDF2-1999':['B6D2F2', 'D2B6F2', 'C57BL/6J', 'DBA/2J'], 'BDF2-2005':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], -'CTB6F2':['CTB6F2','B6CTF2','C57BL/6J','Castaneous'], +'CTB6F2':['CTB6F2', 'B6CTF2', 'C57BL/6J', 'Castaneous'], 'CXB':['CBF1', 'BCF1', 'C57BL/6ByJ', 'BALB/cByJ'], 'AXBXA':['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], 'AXB':['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], 'BXA':['BAF1', 'ABF1', 'C57BL/6J', 'A/J'], 'LXS':['LSF1', 'SLF1', 'ISS', 'ILS'], 'HXBBXH':['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'], -'BayXSha':['BayXShaF1', 'ShaXBayF1', 'Bay-0','Shahdara'], -'ColXBur':['ColXBurF1', 'BurXColF1', 'Col-0','Bur-0'], -'ColXCvi':['ColXCviF1', 'CviXColF1', 'Col-0','Cvi'], -'SXM':['SMF1', 'MSF1', 'Steptoe','Morex'], +'BayXSha':['BayXShaF1', 'ShaXBayF1', 'Bay-0', 'Shahdara'], +'ColXBur':['ColXBurF1', 'BurXColF1', 'Col-0', 'Bur-0'], +'ColXCvi':['ColXCviF1', 'CviXColF1', 'Col-0', 'Cvi'], +'SXM':['SMF1', 'MSF1', 'Steptoe', 'Morex'], 'HRDP':['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'] } @@ -91,7 +91,7 @@ def readLineCSV(line): ### dcrowell July 2008 returnList[0]=returnList[0][1:] return returnList -def cmpEigenValue(A,B): +def cmpEigenValue(A, B): try: if A[0] > B[0]: return -1 diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py index 3fa1d5ba..b81da0dc 100644 --- a/wqflask/wqflask/api/router.py +++ b/wqflask/wqflask/api/router.py @@ -558,10 +558,10 @@ def trait_sample_data(dataset_name, trait_name, file_format = "json"): sample_list = [] for sample in sample_data: sample_dict = { - "sample_name" : sample[0], - "sample_name_2" : sample[1], - "value" : sample[2], - "data_id" : sample[3], + "sample_name": sample[0], + "sample_name_2": sample[1], + "value": sample[2], + "data_id": sample[3], } if sample[4]: sample_dict["se"] = sample[4] diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index 15a21ee6..4116e2df 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -108,17 +108,17 @@ class CorrelationResults(object): self.sample_data = {} self.corr_type = start_vars['corr_type'] self.corr_method = start_vars['corr_sample_method'] - self.min_expr = get_float(start_vars,'min_expr') - self.p_range_lower = get_float(start_vars,'p_range_lower',-1.0) - self.p_range_upper = get_float(start_vars,'p_range_upper',1.0) + self.min_expr = get_float(start_vars, 'min_expr') + self.p_range_lower = get_float(start_vars, 'p_range_lower', -1.0) + self.p_range_upper = get_float(start_vars, 'p_range_upper', 1.0) if ('loc_chr' in start_vars and 'min_loc_mb' in start_vars and 'max_loc_mb' in start_vars): - self.location_chr = get_string(start_vars,'loc_chr') - self.min_location_mb = get_int(start_vars,'min_loc_mb') - self.max_location_mb = get_int(start_vars,'max_loc_mb') + self.location_chr = get_string(start_vars, 'loc_chr') + self.min_location_mb = get_int(start_vars, 'min_loc_mb') + self.max_location_mb = get_int(start_vars, 'max_loc_mb') else: self.location_chr = self.min_location_mb = self.max_location_mb = None diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index 832746bb..b582cd23 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -275,8 +275,8 @@ def zScore(trait_data_array): i = 0 for data in trait_data_array: N = len(data) - S = reduce(lambda x,y: x+y, data, 0.) - SS = reduce(lambda x,y: x+y*y, data, 0.) + S = reduce(lambda x, y: x+y, data, 0.) + SS = reduce(lambda x, y: x+y*y, data, 0.) mean = S/N var = SS - S*S/N stdev = math.sqrt(var/(N-1)) @@ -294,7 +294,7 @@ def sortEigenVectors(vector): combines = [] i = 0 for item in eigenValues: - combines.append([eigenValues[i],eigenVectors[i]]) + combines.append([eigenValues[i], eigenVectors[i]]) i += 1 combines.sort(webqtlUtil.cmpEigenValue) A = [] @@ -302,7 +302,7 @@ def sortEigenVectors(vector): for item in combines: A.append(item[0]) B.append(item[1]) - sum = reduce(lambda x,y: x+y, A, 0.0) + sum = reduce(lambda x, y: x+y, A, 0.0) A = [x*100.0/sum for x in A] return [A, B] except: diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index 1e15d28f..cc9c1860 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -46,8 +46,8 @@ class DoSearch(object): def handle_wildcard(self, str): keyword = str.strip() - keyword = keyword.replace("*",".*") - keyword = keyword.replace("?",".") + keyword = keyword.replace("*", ".*") + keyword = keyword.replace("?", ".") return keyword diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py index 7a5dba73..9844bab4 100644 --- a/wqflask/wqflask/external_tools/send_to_geneweaver.py +++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py @@ -54,10 +54,10 @@ class SendToGeneWeaver(object): trait_name_list = get_trait_name_list(self.trait_list) self.hidden_vars = { - 'client' : "genenetwork", - 'species' : species_name, - 'idtype' : self.chip_name, - 'list' : string.join(trait_name_list, ","), + 'client': "genenetwork", + 'species': species_name, + 'idtype': self.chip_name, + 'list': string.join(trait_name_list, ","), } def get_trait_name_list(trait_list): diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index c8b9c405..f282b010 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -27,8 +27,8 @@ import datetime import string from math import * -from PIL import (Image,ImageDraw,ImageFont,ImageColor) -import sys,os +from PIL import (Image, ImageDraw, ImageFont, ImageColor) +import sys, os import cPickle import httplib import json @@ -426,7 +426,7 @@ class DisplayMappingResults(object): else: continue samplelist = list(self.genotype.prgy) - for j,_geno in enumerate (self.genotype[0][1].genotype): + for j, _geno in enumerate (self.genotype[0][1].genotype): for item in smd: if item.name == samplelist[j]: self.NR_INDIVIDUALS = self.NR_INDIVIDUALS + 1 @@ -518,7 +518,7 @@ class DisplayMappingResults(object): #Scales plot differently for high resolution if self.draw2X: - intCanvasX2 = Image.new("RGBA", size=(self.graphWidth*2,self.graphHeight*2)) + intCanvasX2 = Image.new("RGBA", size=(self.graphWidth*2, self.graphHeight*2)) gifmapX2 = self.plotIntMapping(intCanvasX2, startMb = self.startMb, endMb = self.endMb, showLocusForm= showLocusForm, zoom=2) intCanvasX2.save( "{}.png".format( @@ -745,17 +745,17 @@ class DisplayMappingResults(object): bootScale = bootScale[:-1] + [highestPercent] bootOffset = 50*fontZoom - bootScaleFont=ImageFont.truetype(font=VERDANA_FILE,size=13*fontZoom) + bootScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=13*fontZoom) im_drawer.rectangle( xy=((canvas.size[0]-bootOffset, yZero-bootHeightThresh), - (canvas.size[0]-bootOffset-15*zoom,yZero)), + (canvas.size[0]-bootOffset-15*zoom, yZero)), fill = YELLOW, outline=BLACK) im_drawer.line( xy=((canvas.size[0]-bootOffset+4, yZero), (canvas.size[0]-bootOffset, yZero)), fill=BLACK) TEXT_Y_DISPLACEMENT = -8 - im_drawer.text(xy=(canvas.size[0]-bootOffset+10,yZero+TEXT_Y_DISPLACEMENT), text='0%', + im_drawer.text(xy=(canvas.size[0]-bootOffset+10, yZero+TEXT_Y_DISPLACEMENT), text='0%', font=bootScaleFont, fill=BLACK) for item in bootScale: @@ -763,10 +763,10 @@ class DisplayMappingResults(object): continue bootY = yZero-bootHeightThresh*item/highestPercent im_drawer.line( - xy=((canvas.size[0]-bootOffset+4,bootY), - (canvas.size[0]-bootOffset,bootY)), + xy=((canvas.size[0]-bootOffset+4, bootY), + (canvas.size[0]-bootOffset, bootY)), fill=BLACK) - im_drawer.text(xy=(canvas.size[0]-bootOffset+10,bootY+TEXT_Y_DISPLACEMENT), + im_drawer.text(xy=(canvas.size[0]-bootOffset+10, bootY+TEXT_Y_DISPLACEMENT), text='%2.1f'%item, font=bootScaleFont, fill=BLACK) if self.legendChecked: @@ -775,7 +775,7 @@ class DisplayMappingResults(object): smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom) leftOffset = xLeftOffset+(nCol-1)*200 im_drawer.rectangle( - xy=((leftOffset,startPosY-6), (leftOffset+12,startPosY+6)), + xy=((leftOffset, startPosY-6), (leftOffset+12, startPosY+6)), fill=YELLOW, outline=BLACK) im_drawer.text(xy=(leftOffset+ 20, startPosY+TEXT_Y_DISPLACEMENT), text='Frequency of the Peak LRS', @@ -872,7 +872,7 @@ class DisplayMappingResults(object): TEXT_Y_DISPLACEMENT = -8 im_drawer.text( text="Sequence Site", - xy=(leftOffset+15,startPosY+TEXT_Y_DISPLACEMENT), font=smallLabelFont, + xy=(leftOffset+15, startPosY+TEXT_Y_DISPLACEMENT), font=smallLabelFont, fill=self.TOP_RIGHT_INFO_COLOR) def drawSNPTrackNew(self, canvas, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): @@ -922,7 +922,7 @@ class DisplayMappingResults(object): def drawMultiTraitName(self, fd, canvas, gifmap, showLocusForm, offset= (40, 120, 80, 10), zoom = 1): nameWidths = [] yPaddingTop = 10 - colorFont=ImageFont.truetype(font=TREBUC_FILE,size=12) + colorFont=ImageFont.truetype(font=TREBUC_FILE, size=12) if len(self.qtlresults) >20 and self.selectedChr > -1: rightShift = 20 rightShiftStep = 60 @@ -941,20 +941,20 @@ class DisplayMappingResults(object): rightShift += rightShiftStep name = thisTrait.displayName() - nameWidth, nameHeight = im_drawer.textsize(name,font=colorFont) + nameWidth, nameHeight = im_drawer.textsize(name, font=colorFont) nameWidths.append(nameWidth) im_drawer.rectangle( - xy=((rightShift,yPaddingTop+kstep*15), - (rectWidth+rightShift,yPaddingTop+10+kstep*15)), + xy=((rightShift, yPaddingTop+kstep*15), + (rectWidth+rightShift, yPaddingTop+10+kstep*15)), fill=thisLRSColor, outline=BLACK) im_drawer.text( - text=name,xy=(rectWidth+2+rightShift,yPaddingTop+10+kstep*15), - font=colorFont,fill=BLACK) + text=name, xy=(rectWidth+2+rightShift, yPaddingTop+10+kstep*15), + font=colorFont, fill=BLACK) if thisTrait.db: - COORDS = "%d,%d,%d,%d" %(rectWidth+2+rightShift,yPaddingTop+kstep*15,rectWidth+2+rightShift+nameWidth,yPaddingTop+10+kstep*15,) + COORDS = "%d,%d,%d,%d" %(rectWidth+2+rightShift, yPaddingTop+kstep*15, rectWidth+2+rightShift+nameWidth, yPaddingTop+10+kstep*15,) HREF= "javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm, thisTrait.db.name, thisTrait.name) - Areas = HT.Area(shape='rect',coords=COORDS,href=HREF) + Areas = HT.Area(shape='rect', coords=COORDS, href=HREF) gifmap.areas.append(Areas) def drawLegendPanel(self, canvas, offset= (40, 120, 80, 10), zoom = 1): @@ -968,80 +968,80 @@ class DisplayMappingResults(object): if zoom == 2: fontZoom = 1.5 - labelFont=ImageFont.truetype(font=TREBUC_FILE,size=12*fontZoom) + labelFont=ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom) startPosY = 15 stepPosY = 12*fontZoom if self.manhattan_plot != True: im_drawer.line( - xy=((xLeftOffset,startPosY),(xLeftOffset+32,startPosY)), + xy=((xLeftOffset, startPosY), (xLeftOffset+32, startPosY)), fill=self.LRS_COLOR, width=2) im_drawer.text( - text=self.LRS_LOD, xy=(xLeftOffset+40,startPosY+TEXT_Y_DISPLACEMENT), - font=labelFont,fill=BLACK) + text=self.LRS_LOD, xy=(xLeftOffset+40, startPosY+TEXT_Y_DISPLACEMENT), + font=labelFont, fill=BLACK) startPosY += stepPosY if self.additiveChecked: startPosX = xLeftOffset im_drawer.line( - xy=((startPosX,startPosY),(startPosX+17,startPosY)), + xy=((startPosX, startPosY), (startPosX+17, startPosY)), fill=self.ADDITIVE_COLOR_POSITIVE, width=2) im_drawer.line( - xy=((startPosX+18,startPosY),(startPosX+32,startPosY)), + xy=((startPosX+18, startPosY), (startPosX+32, startPosY)), fill=self.ADDITIVE_COLOR_NEGATIVE, width=2) im_drawer.text( - text='Additive Effect',xy=(startPosX+40,startPosY+TEXT_Y_DISPLACEMENT), - font=labelFont,fill=BLACK) + text='Additive Effect', xy=(startPosX+40, startPosY+TEXT_Y_DISPLACEMENT), + font=labelFont, fill=BLACK) if self.genotype.type == 'intercross' and self.dominanceChecked: startPosX = xLeftOffset startPosY += stepPosY im_drawer.line( - xy=((startPosX,startPosY),(startPosX+17,startPosY)), + xy=((startPosX, startPosY), (startPosX+17, startPosY)), fill=self.DOMINANCE_COLOR_POSITIVE, width=4) im_drawer.line( - xy=((startPosX+18,startPosY),(startPosX+35,startPosY)), + xy=((startPosX+18, startPosY), (startPosX+35, startPosY)), fill=self.DOMINANCE_COLOR_NEGATIVE, width=4) im_drawer.text( - text='Dominance Effect', xy=(startPosX+42,startPosY+5), - font=labelFont,fill=BLACK) + text='Dominance Effect', xy=(startPosX+42, startPosY+5), + font=labelFont, fill=BLACK) if self.haplotypeAnalystChecked: startPosY += stepPosY startPosX = xLeftOffset im_drawer.line( - xy=((startPosX,startPosY),(startPosX+17,startPosY)), + xy=((startPosX, startPosY), (startPosX+17, startPosY)), fill=self.HAPLOTYPE_POSITIVE, width=4) im_drawer.line( - xy=((startPosX+18,startPosY),(startPosX+35,startPosY)), + xy=((startPosX+18, startPosY), (startPosX+35, startPosY)), fill=self.HAPLOTYPE_NEGATIVE, width=4) im_drawer.line( - xy=((startPosX+36,startPosY),(startPosX+53,startPosY)), + xy=((startPosX+36, startPosY), (startPosX+53, startPosY)), fill=self.HAPLOTYPE_HETEROZYGOUS, width=4) im_drawer.line( - xy=((startPosX+54,startPosY),(startPosX+67,startPosY)), + xy=((startPosX+54, startPosY), (startPosX+67, startPosY)), fill=self.HAPLOTYPE_RECOMBINATION, width=4) im_drawer.text( text='Haplotypes (Pat, Mat, Het, Unk)', - xy=(startPosX+76,startPosY+5),font=labelFont,fill=BLACK) + xy=(startPosX+76, startPosY+5), font=labelFont, fill=BLACK) if self.permChecked and self.nperm > 0: startPosY += stepPosY startPosX = xLeftOffset im_drawer.line( - xy=((startPosX, startPosY),( startPosX + 32, startPosY)), + xy=((startPosX, startPosY), ( startPosX + 32, startPosY)), fill=self.SIGNIFICANT_COLOR, width=self.SIGNIFICANT_WIDTH) im_drawer.line( - xy=((startPosX, startPosY + stepPosY),( startPosX + 32, startPosY + stepPosY)), + xy=((startPosX, startPosY + stepPosY), ( startPosX + 32, startPosY + stepPosY)), fill=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH) im_drawer.text( - text='Significant %s = %2.2f' % (self.LRS_LOD,self.significant), - xy=(xLeftOffset+42,startPosY+TEXT_Y_DISPLACEMENT),font=labelFont,fill=BLACK) + text='Significant %s = %2.2f' % (self.LRS_LOD, self.significant), + xy=(xLeftOffset+42, startPosY+TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK) im_drawer.text( text='Suggestive %s = %2.2f' % (self.LRS_LOD, self.suggestive), - xy=(xLeftOffset+42,startPosY + TEXT_Y_DISPLACEMENT +stepPosY),font=labelFont, + xy=(xLeftOffset+42, startPosY + TEXT_Y_DISPLACEMENT +stepPosY), font=labelFont, fill=BLACK) - labelFont = ImageFont.truetype(font=VERDANA_FILE,size=12*fontZoom) + labelFont = ImageFont.truetype(font=VERDANA_FILE, size=12*fontZoom) labelColor = BLACK if self.dataset.type == "Publish" or self.dataset.type == "Geno": dataset_label = self.dataset.fullname @@ -1109,22 +1109,22 @@ class DisplayMappingResults(object): im_drawer.textsize(string2, font=labelFont)[0]) im_drawer.text( text=identification, - xy=(canvas.size[0] - xRightOffset-d,20*fontZoom),font=labelFont, + xy=(canvas.size[0] - xRightOffset-d, 20*fontZoom), font=labelFont, fill=labelColor) else: d = 4+ max( im_drawer.textsize(string1, font=labelFont)[0], im_drawer.textsize(string2, font=labelFont)[0]) im_drawer.text( - text=string1,xy=(canvas.size[0] - xRightOffset-d,35*fontZoom), - font=labelFont,fill=labelColor) + text=string1, xy=(canvas.size[0] - xRightOffset-d, 35*fontZoom), + font=labelFont, fill=labelColor) im_drawer.text( - text=string2,xy=(canvas.size[0] - xRightOffset-d,50*fontZoom), - font=labelFont,fill=labelColor) + text=string2, xy=(canvas.size[0] - xRightOffset-d, 50*fontZoom), + font=labelFont, fill=labelColor) if string3 != '': im_drawer.text( - text=string3,xy=(canvas.size[0] - xRightOffset-d,65*fontZoom), - font=labelFont,fill=labelColor) + text=string3, xy=(canvas.size[0] - xRightOffset-d, 65*fontZoom), + font=labelFont, fill=labelColor) def drawGeneBand(self, canvas, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): @@ -1345,7 +1345,7 @@ class DisplayMappingResults(object): labelText = "3'" im_drawer.text( text=labelText, - xy=(utrEndPix+2,geneYLocation+self.EACH_GENE_HEIGHT), + xy=(utrEndPix+2, geneYLocation+self.EACH_GENE_HEIGHT), font=ImageFont.truetype(font=ARIAL_FILE, size=2)) #draw the genes as rectangles @@ -1357,7 +1357,7 @@ class DisplayMappingResults(object): COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation, geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT)) # NL: 06-02-2011 Rob required to display NCBI info in a new window - gifmap.areas.append(HT.Area(shape='rect',coords=COORDS,href=HREF, title=TITLE,target="_blank")) + gifmap.areas.append(HT.Area(shape='rect', coords=COORDS, href=HREF, title=TITLE, target="_blank")) ## BEGIN HaplotypeAnalyst def drawHaplotypeBand(self, canvas, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): @@ -1490,7 +1490,7 @@ class DisplayMappingResults(object): counter = counter + 1 if item.name == samplelist[k]: ind = counter - maxind=max(ind,maxind) + maxind=max(ind, maxind) # lines if (oldgeno[k] == -1 and _geno == -1): @@ -1523,7 +1523,7 @@ class DisplayMappingResults(object): COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation+ind*self.EACH_GENE_HEIGHT, geneEndPix+1, (geneYLocation + ind*self.EACH_GENE_HEIGHT)) TITLE = "Strain: %s, marker (%s) \n Position %2.3f Mb." % (samplelist[k], _chr[j].name, float(txStart)) HREF = '' - gifmap.areas.append(HT.Area(shape='rect',coords=COORDS,href=HREF, title=TITLE)) + gifmap.areas.append(HT.Area(shape='rect', coords=COORDS, href=HREF, title=TITLE)) # if there are no more markers in a chromosome, the plotRight value calculated above will be before the plotWidth # resulting in some empty space on the right side of the plot area. This draws an "unknown" bar from plotRight to the edge. @@ -1642,14 +1642,14 @@ class DisplayMappingResults(object): WEBQTL_HREF = "javascript:rangeView('%s', %f, %f)" % (self.selectedChr - 1, max(0, (calBase-webqtlZoomWidth))/1000000.0, (calBase+webqtlZoomWidth)/1000000.0) WEBQTL_TITLE = "Click to view this section of the genome in WebQTL" - gifmap.areas.append(HT.Area(shape='rect',coords=WEBQTL_COORDS,href=WEBQTL_HREF, title=WEBQTL_TITLE)) + gifmap.areas.append(HT.Area(shape='rect', coords=WEBQTL_COORDS, href=WEBQTL_HREF, title=WEBQTL_TITLE)) im_drawer.rectangle( xy=((xBrowse1, paddingTop), (xBrowse2, (paddingTop + self.BAND_HEIGHT))), outline=self.CLICKABLE_WEBQTL_REGION_COLOR, fill=self.CLICKABLE_WEBQTL_REGION_COLOR) im_drawer.line( - xy=((xBrowse1, paddingTop),( xBrowse1, (paddingTop + self.BAND_HEIGHT))), + xy=((xBrowse1, paddingTop), ( xBrowse1, (paddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_WEBQTL_REGION_OUTLINE_COLOR) if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": @@ -1659,14 +1659,14 @@ class DisplayMappingResults(object): else: PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) PHENOGEN_TITLE = "Click to view this section of the genome in PhenoGen" - gifmap.areas.append(HT.Area(shape='rect',coords=PHENOGEN_COORDS,href=PHENOGEN_HREF, title=PHENOGEN_TITLE)) + gifmap.areas.append(HT.Area(shape='rect', coords=PHENOGEN_COORDS, href=PHENOGEN_HREF, title=PHENOGEN_TITLE)) im_drawer.rectangle( xy=((xBrowse1, phenogenPaddingTop), (xBrowse2, (phenogenPaddingTop+self.BAND_HEIGHT))), outline=self.CLICKABLE_PHENOGEN_REGION_COLOR, fill=self.CLICKABLE_PHENOGEN_REGION_COLOR) im_drawer.line( - xy=((xBrowse1, phenogenPaddingTop),( xBrowse1, (phenogenPaddingTop+self.BAND_HEIGHT))), + xy=((xBrowse1, phenogenPaddingTop), ( xBrowse1, (phenogenPaddingTop+self.BAND_HEIGHT))), fill=self.CLICKABLE_PHENOGEN_REGION_OUTLINE_COLOR) UCSC_COORDS = "%d, %d, %d, %d" %(xBrowse1, ucscPaddingTop, xBrowse2, (ucscPaddingTop+self.BAND_HEIGHT)) @@ -1675,7 +1675,7 @@ class DisplayMappingResults(object): else: UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d" % (self._ucscDb, self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) UCSC_TITLE = "Click to view this section of the genome in the UCSC Genome Browser" - gifmap.areas.append(HT.Area(shape='rect',coords=UCSC_COORDS,href=UCSC_HREF, title=UCSC_TITLE)) + gifmap.areas.append(HT.Area(shape='rect', coords=UCSC_COORDS, href=UCSC_HREF, title=UCSC_TITLE)) im_drawer.rectangle( xy=((xBrowse1, ucscPaddingTop), (xBrowse2, (ucscPaddingTop+self.BAND_HEIGHT))), @@ -1692,7 +1692,7 @@ class DisplayMappingResults(object): else: ENSEMBL_HREF = "http://www.ensembl.org/Rattus_norvegicus/contigview?chr=%s&start=%d&end=%d" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) ENSEMBL_TITLE = "Click to view this section of the genome in the Ensembl Genome Browser" - gifmap.areas.append(HT.Area(shape='rect',coords=ENSEMBL_COORDS,href=ENSEMBL_HREF, title=ENSEMBL_TITLE)) + gifmap.areas.append(HT.Area(shape='rect', coords=ENSEMBL_COORDS, href=ENSEMBL_HREF, title=ENSEMBL_TITLE)) im_drawer.rectangle( xy=((xBrowse1, ensemblPaddingTop), (xBrowse2, (ensemblPaddingTop+self.BAND_HEIGHT))), @@ -1789,8 +1789,8 @@ class DisplayMappingResults(object): continue Xc = xLeftOffset + plotXScale*(_Mb - startMb) if counter % NUM_MINOR_TICKS == 0: # Draw a MAJOR mark, not just a minor tick mark - im_drawer.line(xy=((Xc,yZero), - (Xc,yZero+xMajorTickHeight)), + im_drawer.line(xy=((Xc, yZero), + (Xc, yZero+xMajorTickHeight)), fill=xAxisTickMarkColor, width=X_MAJOR_TICK_THICKNESS) # Draw the MAJOR tick mark labelStr = str(formatStr % _Mb) # What Mbase location to put on the label @@ -1800,8 +1800,8 @@ class DisplayMappingResults(object): text=labelStr, font=MBLabelFont, fill=xAxisLabelColor) else: - im_drawer.line(xy=((Xc,yZero), - (Xc,yZero+xMinorTickHeight)), + im_drawer.line(xy=((Xc, yZero), + (Xc, yZero+xMinorTickHeight)), fill=xAxisTickMarkColor, width=X_MINOR_TICK_THICKNESS) # Draw the MINOR tick mark @@ -1834,7 +1834,7 @@ class DisplayMappingResults(object): text="Megabases", xy=( xLeftOffset+(plotWidth-im_drawer.textsize( - "Megabases",font=megabaseLabelFont)[0])/2, + "Megabases", font=megabaseLabelFont)[0])/2, strYLoc+MBLabelFont.font.height+10*(zoom%2)), font=megabaseLabelFont, fill=BLACK) pass @@ -1889,7 +1889,7 @@ class DisplayMappingResults(object): for j, ChrInfo in enumerate(ChrAInfo): preLpos = -1 for i, item in enumerate(ChrInfo): - Lname,Lpos = item + Lname, Lpos = item if Lpos != preLpos: offsetA += stepA differ = 1 @@ -1903,17 +1903,17 @@ class DisplayMappingResults(object): Zorder = 0 if differ: im_drawer.line( - xy=((startPosX+Lpos,yZero),(xLeftOffset+offsetA,\ + xy=((startPosX+Lpos, yZero), (xLeftOffset+offsetA,\ yZero+25)), fill=lineColor) im_drawer.line( - xy=((xLeftOffset+offsetA,yZero+25),(xLeftOffset+offsetA,\ + xy=((xLeftOffset+offsetA, yZero+25), (xLeftOffset+offsetA,\ yZero+40+Zorder*(LRectWidth+3))), fill=lineColor) rectColor = ORANGE else: im_drawer.line( - xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)-3),(\ + xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)-3), (\ xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3))), fill=lineColor) rectColor = DEEPPINK @@ -1921,9 +1921,9 @@ class DisplayMappingResults(object): xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)), (xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3)+LRectWidth)), - outline=rectColor,fill=rectColor,width = 0) + outline=rectColor, fill=rectColor, width = 0) COORDS="%d,%d,%d,%d"%(xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\ - xLeftOffset+offsetA,yZero+40+Zorder*(LRectWidth+3)+LRectWidth) + xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)+LRectWidth) HREF="/show_trait?trait_id=%s&dataset=%s" % (Lname, self.dataset.group.name+"Geno") #HREF="javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm,fd.RISet+"Geno", Lname) Areas=HT.Area(shape='rect', coords=COORDS, href=HREF, target="_blank", title="Locus : " + Lname) @@ -1931,7 +1931,7 @@ class DisplayMappingResults(object): ##piddle bug if j == 0: im_drawer.line( - xy=((startPosX,yZero),(startPosX,yZero+40)), + xy=((startPosX, yZero), (startPosX, yZero+40)), fill=lineColor) startPosX += (self.ChrLengthDistList[j]+self.GraphInterval)*plotXScale @@ -1943,7 +1943,7 @@ class DisplayMappingResults(object): strYLoc + MBLabelFont.font.height+ 10*(zoom%2)), font=centimorganLabelFont, fill=BLACK) - im_drawer.line(xy=((xLeftOffset,yZero), (xLeftOffset+plotWidth,yZero)), + im_drawer.line(xy=((xLeftOffset, yZero), (xLeftOffset+plotWidth, yZero)), fill=BLACK, width=X_AXIS_THICKNESS) # Draw the X axis itself @@ -2079,7 +2079,7 @@ class DisplayMappingResults(object): LRS_LOD_Max = 0.000001 yTopOffset + 30*(zoom - 1) yLRS = yZero - (item/LRS_LOD_Max) * LRSHeightThresh - im_drawer.line(xy=((xLeftOffset,yLRS), (xLeftOffset-4,yLRS)), + im_drawer.line(xy=((xLeftOffset, yLRS), (xLeftOffset-4, yLRS)), fill=self.LRS_COLOR, width=1*zoom) if all_int: scaleStr = "%d" % item @@ -2127,8 +2127,8 @@ class DisplayMappingResults(object): else: sugg_title = "Suggestive LOD = %0.2f" % (self.suggestive/4.61) sig_title = "Significant LOD = %0.2f" % (self.significant/4.61) - Areas1 = HT.Area(shape='rect',coords=sugg_coords,title=sugg_title) - Areas2 = HT.Area(shape='rect',coords=sig_coords,title=sig_title) + Areas1 = HT.Area(shape='rect', coords=sugg_coords, title=sugg_title) + Areas2 = HT.Area(shape='rect', coords=sig_coords, title=sig_title) gifmap.areas.append(Areas1) gifmap.areas.append(Areas2) @@ -2316,7 +2316,7 @@ class DisplayMappingResults(object): im_drawer.text( text="5", xy=( - Xc-im_drawer.textsize("5",font=symbolFont)[0]/2+1, + Xc-im_drawer.textsize("5", font=symbolFont)[0]/2+1, Yc-4), fill=point_color, font=symbolFont) else: @@ -2383,8 +2383,8 @@ class DisplayMappingResults(object): ) else: im_drawer.line( - xy=((Xc0,yZero-(Yc0-yZero)), - (Xc,yZero-(Yc-yZero))), + xy=((Xc0, yZero-(Yc0-yZero)), + (Xc, yZero-(Yc-yZero))), fill=minusColor, width=lineWidth #, clipX=(xLeftOffset, xLeftOffset + plotWidth) ) @@ -2471,8 +2471,8 @@ class DisplayMappingResults(object): ###draw additive scale if not self.multipleInterval and self.additiveChecked: - additiveScaleFont=ImageFont.truetype(font=VERDANA_FILE,size=16*zoom) - additiveScale = Plot.detScaleOld(0,additiveMax) + additiveScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=16*zoom) + additiveScale = Plot.detScaleOld(0, additiveMax) additiveStep = (additiveScale[1]-additiveScale[0])/additiveScale[2] additiveAxisList = Plot.frange(0, additiveScale[1], additiveStep) addPlotScale = AdditiveHeightThresh/additiveMax @@ -2482,18 +2482,18 @@ class DisplayMappingResults(object): for item in additiveAxisList: additiveY = yZero - item*addPlotScale im_drawer.line( - xy=((xLeftOffset + plotWidth,additiveY), - (xLeftOffset+4+ plotWidth,additiveY)), + xy=((xLeftOffset + plotWidth, additiveY), + (xLeftOffset+4+ plotWidth, additiveY)), fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom) scaleStr = "%2.3f" % item im_drawer.text( text=scaleStr, - xy=(xLeftOffset + plotWidth +6,additiveY+TEXT_Y_DISPLACEMENT), - font=additiveScaleFont,fill=self.ADDITIVE_COLOR_POSITIVE) + xy=(xLeftOffset + plotWidth +6, additiveY+TEXT_Y_DISPLACEMENT), + font=additiveScaleFont, fill=self.ADDITIVE_COLOR_POSITIVE) im_drawer.line( - xy=((xLeftOffset+plotWidth,additiveY), - (xLeftOffset+plotWidth,yZero)), + xy=((xLeftOffset+plotWidth, additiveY), + (xLeftOffset+plotWidth, yZero)), fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom) im_drawer.line( @@ -2553,7 +2553,7 @@ class DisplayMappingResults(object): chrFontZoom = 2 else: chrFontZoom = 1 - chrLabelFont=ImageFont.truetype(font=VERDANA_FILE,size=24*chrFontZoom) + chrLabelFont=ImageFont.truetype(font=VERDANA_FILE, size=24*chrFontZoom) for i, _chr in enumerate(self.genotype): if (i % 2 == 0): @@ -2575,10 +2575,10 @@ class DisplayMappingResults(object): TEXT_Y_DISPLACEMENT = 0 im_drawer.text(xy=(chrStartPix, yTopOffset + TEXT_Y_DISPLACEMENT), text=_chr.name, font=chrLabelFont, fill=BLACK) - COORDS = "%d,%d,%d,%d" %(chrStartPix, yTopOffset, chrEndPix,yTopOffset +20) + COORDS = "%d,%d,%d,%d" %(chrStartPix, yTopOffset, chrEndPix, yTopOffset +20) #add by NL 09-03-2010 - HREF = "javascript:chrView(%d,%s);" % (i,self.ChrLengthMbList) + HREF = "javascript:chrView(%d,%s);" % (i, self.ChrLengthMbList) #HREF = "javascript:changeView(%d,%s);" % (i,self.ChrLengthMbList) Areas = HT.Area(shape='rect', coords=COORDS, href=HREF) gifmap.areas.append(Areas) @@ -2720,7 +2720,7 @@ class DisplayMappingResults(object): else: chr_as_int = int(theGO["Chromosome"]) - 1 if refGene: - literatureCorrelationString = str(self.getLiteratureCorrelation(self.cursor,refGene,theGO['GeneID']) or "N/A") + literatureCorrelationString = str(self.getLiteratureCorrelation(self.cursor, refGene, theGO['GeneID']) or "N/A") this_row = [selectCheck.__str__(), str(tableIterationsCnt), @@ -2820,8 +2820,8 @@ class DisplayMappingResults(object): lCorr = None try: query = 'SELECT Value FROM LCorrRamin3 WHERE GeneId1 = %s and GeneId2 = %s' - for x,y in [(geneId1,geneId2),(geneId2,geneId1)]: - cursor.execute(query,(x,y)) + for x, y in [(geneId1, geneId2), (geneId2, geneId1)]: + cursor.execute(query, (x, y)) lCorr = cursor.fetchone() if lCorr: lCorr = lCorr[0] diff --git a/wqflask/wqflask/marker_regression/plink_mapping.py b/wqflask/wqflask/marker_regression/plink_mapping.py index d4ee6fe6..2f282adc 100644 --- a/wqflask/wqflask/marker_regression/plink_mapping.py +++ b/wqflask/wqflask/marker_regression/plink_mapping.py @@ -54,7 +54,7 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''): for i, sample in enumerate(ped_sample_list): try: value = vals[i] - value = str(value).replace('value=','') + value = str(value).replace('value=', '') value = value.strip() except: value = -9999 @@ -78,7 +78,7 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''): # get strain name from ped file in order def get_samples_from_ped_file(dataset): - ped_file= open("{}{}.ped".format(flat_files('mapping'), dataset.group.name),"r") + ped_file= open("{}{}.ped".format(flat_files('mapping'), dataset.group.name), "r") line = ped_file.readline() sample_list=[] @@ -155,7 +155,7 @@ def parse_plink_output(output_filename, species): # output: lineList list ####################################################### def build_line_list(line=None): - line_list = string.split(string.strip(line),' ')# irregular number of whitespaces between columns + line_list = string.split(string.strip(line), ' ')# irregular number of whitespaces between columns line_list = [item for item in line_list if item !=''] line_list = list(map(string.strip, line_list)) diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 145dbc77..1e6dff57 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -707,7 +707,7 @@ def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples): perm_strata_strings.append(combined_string) - d = dict([(y,x+1) for x,y in enumerate(sorted(set(perm_strata_strings)))]) + d = dict([(y, x+1) for x, y in enumerate(sorted(set(perm_strata_strings)))]) list_to_numbers = [d[x] for x in perm_strata_strings] perm_strata = list_to_numbers diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index 5b3946e3..c07a7670 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -53,7 +53,7 @@ views.py). search = self.search_terms self.original_search_string = self.search_terms # check for dodgy search terms - rx = re.compile(r'.*\W(href|http|sql|select|update)\W.*',re.IGNORECASE) + rx = re.compile(r'.*\W(href|http|sql|select|update)\W.*', re.IGNORECASE) if rx.match(search): logger.info("Regex failed search") self.search_term_exists = False diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 21ba7f63..f17e825e 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -57,7 +57,7 @@ class SampleList(object): sample = webqtlCaseData.webqtlCaseData(name=sample_name) sample.extra_info = {} - if self.dataset.group.name == 'AXBXA' and sample_name in ('AXB18/19/20','AXB13/14','BXA8/17'): + if self.dataset.group.name == 'AXBXA' and sample_name in ('AXB18/19/20', 'AXB13/14', 'BXA8/17'): sample.extra_info['url'] = "/mouseCross.html#AXB/BXA" sample.extra_info['css_class'] = "fs12" diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index a871e91a..232cb8da 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -867,7 +867,7 @@ def forgot_password_submit(): email_address = params['email_address'] next_page = None if email_address != "": - logger.debug("Wants to send password E-mail to ",email_address) + logger.debug("Wants to send password E-mail to ", email_address) user_details = get_user_by_unique_column("email_address", email_address) if user_details: ForgotPasswordEmail(user_details["email_address"]) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 394a9e28..92c20fc7 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -55,7 +55,7 @@ from wqflask.ctl import ctl_analysis from wqflask.snp_browser import snp_browser from utility import temp_data -from utility.tools import SQL_URI,TEMPDIR,USE_REDIS,USE_GN_SERVER,GN_SERVER_URL,GN_VERSION,JS_TWITTER_POST_FETCHER_PATH,JS_GUIX_PATH, CSS_PATH +from utility.tools import SQL_URI, TEMPDIR, USE_REDIS, USE_GN_SERVER, GN_SERVER_URL, GN_VERSION, JS_TWITTER_POST_FETCHER_PATH, JS_GUIX_PATH, CSS_PATH from utility.helper_functions import get_species_groups from utility.authentication_tools import check_resource_availability from utility.redis_tools import get_redis_conn @@ -133,10 +133,10 @@ def handle_bad_request(e): list = [fn for fn in os.listdir("./wqflask/static/gif/error") if fn.endswith(".gif") ] animation = random.choice(list) - resp = make_response(render_template("error.html",message=err_msg,stack=formatted_lines,error_image=animation,version=GN_VERSION)) + resp = make_response(render_template("error.html", message=err_msg, stack=formatted_lines, error_image=animation, version=GN_VERSION)) # logger.error("Set cookie %s with %s" % (err_msg, animation)) - resp.set_cookie(err_msg[:32],animation) + resp.set_cookie(err_msg[:32], animation) return resp @app.route("/authentication_needed") @@ -257,7 +257,7 @@ def docedit(): @app.route('/generated/') def generated_file(filename): logger.info(request.url) - return send_from_directory(GENERATED_IMAGE_DIR,filename) + return send_from_directory(GENERATED_IMAGE_DIR, filename) @app.route("/help") def help(): -- cgit v1.2.3 From 1f4fb6b24f3508d80be1f07cd62e38ac9385ef41 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 20 Aug 2020 16:35:17 +0300 Subject: Handle module renames in the standard library Run: ``` 2to3-3.8 -f imports -w . && \ 2to3-3.8 -f imports2 -w . ``` See: and --- wqflask/base/data_set.py | 2 +- wqflask/maintenance/gen_select_dataset.py | 4 ++-- wqflask/maintenance/quantile_normalize.py | 4 ++-- wqflask/maintenance/set_resource_defaults.py | 4 ++-- wqflask/utility/svg.py | 16 ++++++++-------- wqflask/wqflask/api/router.py | 14 +++++++------- wqflask/wqflask/collect.py | 2 +- .../wqflask/comparison_bar_chart/comparison_bar_chart.py | 2 +- wqflask/wqflask/correlation/show_corr_results.py | 2 +- wqflask/wqflask/correlation_matrix/show_corr_matrix.py | 2 +- wqflask/wqflask/export_traits.py | 4 ++-- wqflask/wqflask/heatmap/heatmap.py | 4 ++-- .../wqflask/marker_regression/display_mapping_results.py | 4 ++-- wqflask/wqflask/marker_regression/run_mapping.py | 2 +- wqflask/wqflask/network_graph/network_graph.py | 2 +- wqflask/wqflask/show_trait/show_trait.py | 2 +- wqflask/wqflask/user_manager.py | 2 +- wqflask/wqflask/views.py | 12 ++++++------ 18 files changed, 42 insertions(+), 42 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 43beec26..8151a29d 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -44,7 +44,7 @@ import codecs import json import requests import gzip -import cPickle as pickle +import pickle as pickle import itertools from redis import Redis diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py index d12b328f..fd65a52a 100644 --- a/wqflask/maintenance/gen_select_dataset.py +++ b/wqflask/maintenance/gen_select_dataset.py @@ -50,7 +50,7 @@ from utility.tools import locate, locate_ignore_error, TEMPDIR, SQL_URI import MySQLdb import simplejson as json -import urlparse +import urllib.parse #import sqlalchemy as sa @@ -66,7 +66,7 @@ from pprint import pformat as pf def parse_db_uri(): """Converts a database URI to the db name, host name, user name, and password""" - parsed_uri = urlparse.urlparse(SQL_URI) + parsed_uri = urllib.parse.urlparse(SQL_URI) db_conn_info = dict( db = parsed_uri.path[1:], diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py index 43edfd13..4d6e03bf 100644 --- a/wqflask/maintenance/quantile_normalize.py +++ b/wqflask/maintenance/quantile_normalize.py @@ -6,7 +6,7 @@ sys.path.insert(0, './') import MySQLdb -import urlparse +import urllib.parse import numpy as np import pandas as pd @@ -22,7 +22,7 @@ from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT, SQL_URI def parse_db_uri(): """Converts a database URI to the db name, host name, user name, and password""" - parsed_uri = urlparse.urlparse(SQL_URI) + parsed_uri = urllib.parse.urlparse(SQL_URI) db_conn_info = dict( db = parsed_uri.path[1:], diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index d53a255b..abd5416c 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -34,7 +34,7 @@ Redis = get_redis_conn() import MySQLdb -import urlparse +import urllib.parse from utility.logger import getLogger logger = getLogger(__name__) @@ -42,7 +42,7 @@ logger = getLogger(__name__) def parse_db_uri(): """Converts a database URI to the db name, host name, user name, and password""" - parsed_uri = urlparse.urlparse(SQL_URI) + parsed_uri = urllib.parse.urlparse(SQL_URI) db_conn_info = dict( db = parsed_uri.path[1:], diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index 19eda0ce..b92cc2d1 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -957,8 +957,8 @@ class drawing: # Voeg een element toe aan de grafiek toe. if use_dom_implementation==0: def toXml(self, filename='',compress=False): - import cStringIO - xml=cStringIO.StringIO() + import io + xml=io.StringIO() xml.write("\n") xml.write(" 1: - memory_file = StringIO.StringIO() + memory_file = io.StringIO() with ZipFile(memory_file, mode='w', compression=ZIP_DEFLATED) as zf: for the_file in file_list: zf.writestr(the_file[0], the_file[1]) @@ -460,7 +460,7 @@ def export_perm_data(): ["#Comment: Results sorted from low to high peak linkage"] ] - buff = StringIO.StringIO() + buff = io.StringIO() writer = csv.writer(buff) writer.writerows(the_rows) for item in perm_info['perm_data']: -- cgit v1.2.3 From 666a423b0b2f8cf98b9e70ed3dcf1c6dc3896b1d Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Sun, 23 Aug 2020 15:28:41 +0300 Subject: Use Python3 string methods * wqflask/utility/webqtlUtil.py (genRandStr): Replace "string.letters" with "string.ascii_letters". --- wqflask/utility/webqtlUtil.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/webqtlUtil.py b/wqflask/utility/webqtlUtil.py index 77cc3416..d4979011 100644 --- a/wqflask/utility/webqtlUtil.py +++ b/wqflask/utility/webqtlUtil.py @@ -64,7 +64,7 @@ ParInfo ={ # Accessory Functions ######################################### -def genRandStr(prefix = "", length=8, chars=string.letters+string.digits): +def genRandStr(prefix = "", length=8, chars=string.ascii_letters+string.digits): from random import choice _str = prefix[:] for i in range(length): @@ -110,4 +110,4 @@ def hasAccessToConfidentialPhenotypeTrait(privilege, userName, authorized_users) AuthorizedUsersList=list(map(string.strip, string.split(authorized_users, ','))) if AuthorizedUsersList.__contains__(userName): access_to_confidential_phenotype_trait = 1 - return access_to_confidential_phenotype_trait \ No newline at end of file + return access_to_confidential_phenotype_trait -- cgit v1.2.3 From 3d2ef9879305449568962db9be7fa753473ec11e Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 24 Aug 2020 10:49:58 -0500 Subject: The function draw_rotated_text_ was writing an image to /tmp, which GN2 user apparently doesn't have access to, so I replaced it with a link to the dynamically set TEMPDIR * wqflask/utility/pillow_utils.py - Replaced "/tmp/" in image location with TEMPDIR --- wqflask/utility/pillow_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/pillow_utils.py b/wqflask/utility/pillow_utils.py index dfbf3e19..0c2ce7af 100644 --- a/wqflask/utility/pillow_utils.py +++ b/wqflask/utility/pillow_utils.py @@ -1,5 +1,7 @@ from PIL import Image, ImageColor, ImageDraw, ImageFont +from utility.tools import TEMPDIR + import utility.logger logger = utility.logger.getLogger(__name__ ) @@ -14,7 +16,7 @@ def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90): draw_text = ImageDraw.Draw(tmp_img) draw_text.text(text=text, xy=(0,0), font=font, fill=fill) tmp_img2 = tmp_img.rotate(angle, expand=1) - tmp_img2.save("/tmp/{}.png".format(text), format="png") + tmp_img2.save("/{0}/{1}.png".format(TEMPDIR, text), format="png") canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy])) # def draw_open_polygon(canvas: Image, xy: tuple, fill: ImageColor=WHITE, outline: ImageColor=BLACK): -- cgit v1.2.3 From b3461095a21fd373656156a983038e4a586085a9 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 24 Aug 2020 14:09:26 +0300 Subject: Remove unused imports * wqflask/utility/Plot.py: Remove unused imports like "numarray". "numarray" is does not have py3 support so it's important to remove references to it. --- wqflask/utility/Plot.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index d4373412..b9b71129 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -26,16 +26,12 @@ from __future__ import print_function -from PIL import (Image, ImageColor, ImageDraw, ImageFont) -from pprint import pformat as pf +from PIL import ImageColor +from PIL import ImageDraw +from PIL import ImageFont from math import * -import random -import sys, os -from numarray import linear_algebra as la -from numarray import ones, array, dot, swapaxes -import webqtlUtil import corestats from base import webqtlConfig from utility.pillow_utils import draw_rotated_text -- cgit v1.2.3 From 3335bb967e59da17d104624be971e4a3e98e07c8 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 26 Aug 2020 01:33:43 +0300 Subject: Update corestats import * wqflask/utility/Plot.py: import corestats from the utility module. --- wqflask/utility/Plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 7d9d7649..48a5c7ba 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -32,7 +32,7 @@ from PIL import ImageFont from math import * -import corestats +import utility.corestats as corestats from base import webqtlConfig from utility.pillow_utils import draw_rotated_text import utility.logger -- cgit v1.2.3 From 133db0e8dbc5a0812dc1f06402f2b788aecaef20 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 26 Aug 2020 17:36:23 +0300 Subject: Replace calls to "basestring with "str"" See: --- wqflask/utility/helper_functions.py | 2 +- wqflask/wqflask/collect.py | 2 +- wqflask/wqflask/show_trait/show_trait.py | 2 +- wqflask/wqflask/snp_browser/snp_browser.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 9a4a235a..107c9ec6 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -34,7 +34,7 @@ def get_species_dataset_trait(self, start_vars): #self.genotype = self.dataset.group.genotype def get_trait_db_obs(self, trait_db_list): - if isinstance(trait_db_list, basestring): + if isinstance(trait_db_list, str): trait_db_list = trait_db_list.split(",") self.trait_list = [] diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index d77567f8..06c00930 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -38,7 +38,7 @@ from utility.logger import getLogger logger = getLogger(__name__) def process_traits(unprocessed_traits): - if isinstance(unprocessed_traits, basestring): + if isinstance(unprocessed_traits, str): unprocessed_traits = unprocessed_traits.split(",") traits = set() for trait in unprocessed_traits: diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 738aa28a..6a74cada 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -372,7 +372,7 @@ class ShowTrait(object): this_group = self.dataset.group.name # We're checking a string here! - assert isinstance(this_group, basestring), "We need a string type thing here" + assert isinstance(this_group, str), "We need a string type thing here" if this_group[:3] == 'BXD' and this_group != "BXD-Harvested": this_group = 'BXD' diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index b18bfc62..b3d26caf 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -477,7 +477,7 @@ class SnpBrowser(object): the_bases = [] for j, item in enumerate(allele_value_list): - if item and isinstance(item, basestring): + if item and isinstance(item, str): this_base = [str(item), base_color_dict[item]] else: this_base = "" @@ -612,7 +612,7 @@ class SnpBrowser(object): this_allele_list = [] for item in self.allele_list: - if item and isinstance(item, basestring) and (item.lower() not in this_allele_list) and (item != "-"): + if item and isinstance(item, str) and (item.lower() not in this_allele_list) and (item != "-"): this_allele_list.append(item.lower()) total_allele_count = len(this_allele_list) -- cgit v1.2.3 From 303e4b71c2172da5be19c84d4be5a062329ac013 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 26 Aug 2020 19:12:33 +0300 Subject: Remove "from __future__ import new_feature" statements See: --- test/requests/link_checker.py | 1 - test/requests/main_web_functionality.py | 22 ++++++++++++---------- test/requests/mapping_tests.py | 1 - test/requests/navigation_tests.py | 1 - test/requests/test-website.py | 2 +- webtests/browser_run.py | 4 +--- webtests/correlation_matrix_test.py | 2 -- webtests/correlation_test.py | 2 -- webtests/marker_regression_test.py | 2 -- webtests/show_trait_js_test.py | 2 -- webtests/test_runner.py | 4 +--- wqflask/base/data_set.py | 1 - wqflask/base/mrna_assay_tissue_data.py | 4 +--- wqflask/base/species.py | 4 +--- wqflask/base/trait.py | 2 -- wqflask/maintenance/convert_dryad_to_bimbam.py | 3 +-- wqflask/maintenance/convert_geno_to_bimbam.py | 3 +-- wqflask/maintenance/gen_select_dataset.py | 10 +--------- .../maintenance/generate_kinship_from_bimbam.py | 3 +-- .../maintenance/generate_probesetfreeze_file.py | 2 -- wqflask/maintenance/geno_to_json.py | 3 +-- wqflask/maintenance/get_group_samplelists.py | 2 -- wqflask/maintenance/print_benchmark.py | 4 +--- wqflask/maintenance/quantile_normalize.py | 7 +------ wqflask/maintenance/set_resource_defaults.py | 4 +--- wqflask/utility/Plot.py | 2 -- wqflask/utility/after.py | 4 +--- wqflask/utility/authentication_tools.py | 2 -- wqflask/utility/benchmark.py | 2 -- wqflask/utility/chunks.py | 2 -- wqflask/utility/db_tools.py | 2 -- wqflask/utility/gen_geno_ob.py | 4 +--- wqflask/utility/genofile_parser.py | 1 - wqflask/utility/helper_functions.py | 2 -- wqflask/utility/hmac.py | 4 +--- wqflask/utility/redis_tools.py | 4 +--- wqflask/utility/temp_data.py | 1 - wqflask/wqflask/__init__.py | 2 -- wqflask/wqflask/api/correlation.py | 4 +--- wqflask/wqflask/api/gen_menu.py | 2 -- wqflask/wqflask/api/mapping.py | 2 -- wqflask/wqflask/api/router.py | 4 +--- wqflask/wqflask/collect.py | 3 --- .../comparison_bar_chart/comparison_bar_chart.py | 1 - wqflask/wqflask/correlation/corr_scatter_plot.py | 4 +--- .../wqflask/correlation/correlation_functions.py | 4 +--- wqflask/wqflask/correlation/show_corr_results.py | 2 -- .../wqflask/correlation_matrix/show_corr_matrix.py | 2 -- wqflask/wqflask/do_search.py | 2 -- wqflask/wqflask/docs.py | 4 +--- wqflask/wqflask/export_traits.py | 4 +--- wqflask/wqflask/external_tools/send_to_bnw.py | 4 +--- .../wqflask/external_tools/send_to_geneweaver.py | 4 +--- .../wqflask/external_tools/send_to_webgestalt.py | 4 +--- wqflask/wqflask/group_manager.py | 5 +---- wqflask/wqflask/gsearch.py | 2 -- wqflask/wqflask/heatmap/heatmap.py | 2 -- wqflask/wqflask/interval_analyst/GeneUtil.py | 2 -- wqflask/wqflask/marker_regression/run_mapping.py | 4 +--- wqflask/wqflask/model.py | 4 +--- wqflask/wqflask/network_graph/network_graph.py | 2 -- wqflask/wqflask/news.py | 1 - wqflask/wqflask/parser.py | 2 -- wqflask/wqflask/resource_manager.py | 4 +--- wqflask/wqflask/search_results.py | 2 -- wqflask/wqflask/send_mail.py | 2 -- wqflask/wqflask/show_trait/SampleList.py | 2 -- wqflask/wqflask/show_trait/export_trait_data.py | 4 +--- wqflask/wqflask/show_trait/show_trait.py | 2 -- wqflask/wqflask/snp_browser/snp_browser.py | 2 -- wqflask/wqflask/submit_bnw.py | 4 +--- wqflask/wqflask/update_search_results.py | 2 -- wqflask/wqflask/user_login.py | 4 +--- wqflask/wqflask/user_manager.py | 2 -- wqflask/wqflask/user_session.py | 2 -- wqflask/wqflask/views.py | 2 -- 76 files changed, 45 insertions(+), 190 deletions(-) (limited to 'wqflask/utility') diff --git a/test/requests/link_checker.py b/test/requests/link_checker.py index 5e16a5cd..949a33c8 100644 --- a/test/requests/link_checker.py +++ b/test/requests/link_checker.py @@ -1,4 +1,3 @@ -from __future__ import print_function import re import requests from lxml.html import parse diff --git a/test/requests/main_web_functionality.py b/test/requests/main_web_functionality.py index d070dab9..78030307 100644 --- a/test/requests/main_web_functionality.py +++ b/test/requests/main_web_functionality.py @@ -1,9 +1,7 @@ -from __future__ import print_function -import re import requests from lxml.html import parse from link_checker import check_page -from requests.exceptions import ConnectionError + def check_home(url): doc = parse(url).getroot() @@ -11,20 +9,23 @@ def check_home(url): assert(search_button[0].value == "Search") print("OK") + def check_search_page(host): data = dict( - species="mouse" - , group="BXD" - , type="Hippocampus mRNA" - , dataset="HC_M2_0606_P" - , search_terms_or="" - , search_terms_and="MEAN=(15 16) LRS=(23 46)") + species="mouse", + group="BXD", + type="Hippocampus mRNA", + dataset="HC_M2_0606_P", + search_terms_or="", + search_terms_and="MEAN=(15 16) LRS=(23 46)") result = requests.get(host+"/search", params=data) found = result.text.find("records are shown below") assert(found >= 0) assert(result.status_code == 200) print("OK") - check_traits_page(host, "/show_trait?trait_id=1435395_s_at&dataset=HC_M2_0606_P") + check_traits_page(host, ("/show_trait?trait_id=1435395_" + "s_at&dataset=HC_M2_0606_P")) + def check_traits_page(host, traits_url): doc = parse(host+traits_url).getroot() @@ -33,6 +34,7 @@ def check_traits_page(host, traits_url): print("OK") check_page(host, host+traits_url) + def check_main_web_functionality(args_obj, parser): print("") print("Checking main web functionality...") diff --git a/test/requests/mapping_tests.py b/test/requests/mapping_tests.py index 5748a2a3..19b22c21 100644 --- a/test/requests/mapping_tests.py +++ b/test/requests/mapping_tests.py @@ -1,4 +1,3 @@ -from __future__ import print_function import re import copy import json diff --git a/test/requests/navigation_tests.py b/test/requests/navigation_tests.py index eda27324..6b91c1fd 100644 --- a/test/requests/navigation_tests.py +++ b/test/requests/navigation_tests.py @@ -1,4 +1,3 @@ -from __future__ import print_function import re import requests from lxml.html import parse diff --git a/test/requests/test-website.py b/test/requests/test-website.py index f90d1843..8bfb47c2 100755 --- a/test/requests/test-website.py +++ b/test/requests/test-website.py @@ -3,7 +3,7 @@ # env GN2_PROFILE=/home/wrk/opt/gn-latest ./bin/genenetwork2 ./etc/default_settings.py -c ../test/requests/test-website.py http://localhost:5003 # # Mostly to pick up the Guix GN2_PROFILE and python modules -from __future__ import print_function + import argparse from link_checker import check_links from link_checker import check_packaged_js_files diff --git a/webtests/browser_run.py b/webtests/browser_run.py index 2ec299c5..7ee540b7 100644 --- a/webtests/browser_run.py +++ b/webtests/browser_run.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function - __all__ = ('sleep', 'testmod', 'test') from doctest import testmod @@ -71,4 +69,4 @@ class Test(object): -test = Test() \ No newline at end of file +test = Test() diff --git a/webtests/correlation_matrix_test.py b/webtests/correlation_matrix_test.py index 8529c265..97114890 100644 --- a/webtests/correlation_matrix_test.py +++ b/webtests/correlation_matrix_test.py @@ -65,8 +65,6 @@ text: 0.608\n71 """ -from __future__ import absolute_import, division, print_function - from browser_run import * testmod() diff --git a/webtests/correlation_test.py b/webtests/correlation_test.py index aad3a69f..311bb847 100644 --- a/webtests/correlation_test.py +++ b/webtests/correlation_test.py @@ -44,8 +44,6 @@ text: 1.000 """ -from __future__ import absolute_import, division, print_function - from browser_run import * testmod() diff --git a/webtests/marker_regression_test.py b/webtests/marker_regression_test.py index c4f76f53..9b4a4acb 100644 --- a/webtests/marker_regression_test.py +++ b/webtests/marker_regression_test.py @@ -48,8 +48,6 @@ text: 11.511 """ -from __future__ import absolute_import, division, print_function - from browser_run import * testmod() diff --git a/webtests/show_trait_js_test.py b/webtests/show_trait_js_test.py index 0fd2c16c..34ffd3b7 100644 --- a/webtests/show_trait_js_test.py +++ b/webtests/show_trait_js_test.py @@ -35,8 +35,6 @@ style: display: none; """ -from __future__ import absolute_import, division, print_function - from browser_run import * testmod() diff --git a/webtests/test_runner.py b/webtests/test_runner.py index ef6d0d69..b5b590a6 100644 --- a/webtests/test_runner.py +++ b/webtests/test_runner.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function - import unittest import doctest import glob @@ -25,4 +23,4 @@ def main(): runner.run(suite) if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 39a8a2ed..8ac7a279 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -18,7 +18,6 @@ # # This module is used by GeneNetwork project (www.genenetwork.org) -from __future__ import absolute_import, print_function, division from db.call import fetchall, fetchone, fetch1 from utility.logger import getLogger from utility.tools import USE_GN_SERVER, USE_REDIS, flat_files, flat_file_exists, GN2_BASE_URL diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py index 6fec5dcd..33ce12bd 100644 --- a/wqflask/base/mrna_assay_tissue_data.py +++ b/wqflask/base/mrna_assay_tissue_data.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import collections from flask import g @@ -92,4 +90,4 @@ class MrnaAssayTissueData(object): else: symbol_values_dict[result.Symbol.lower()].append(result.value) - return symbol_values_dict \ No newline at end of file + return symbol_values_dict diff --git a/wqflask/base/species.py b/wqflask/base/species.py index 6d99af65..e3f1bc4a 100644 --- a/wqflask/base/species.py +++ b/wqflask/base/species.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import collections from flask import Flask, g @@ -59,4 +57,4 @@ class Chromosomes(object): results = g.db.execute(query).fetchall() for item in results: - self.chromosomes[item.OrderId] = IndChromosome(item.Name, item.Length) \ No newline at end of file + self.chromosomes[item.OrderId] = IndChromosome(item.Name, item.Length) diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 09c41fa7..b20efd2a 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function - import os import string import resource diff --git a/wqflask/maintenance/convert_dryad_to_bimbam.py b/wqflask/maintenance/convert_dryad_to_bimbam.py index e833b395..12ce35e9 100644 --- a/wqflask/maintenance/convert_dryad_to_bimbam.py +++ b/wqflask/maintenance/convert_dryad_to_bimbam.py @@ -6,7 +6,6 @@ Convert data dryad files to a BIMBAM _geno and _snps file """ -from __future__ import print_function, division, absolute_import import sys sys.path.append("..") @@ -67,4 +66,4 @@ def convert_dryad_to_bimbam(filename): if __name__=="__main__": input_filename = "/home/zas1024/cfw_data/" + sys.argv[1] + ".txt" - convert_dryad_to_bimbam(input_filename) \ No newline at end of file + convert_dryad_to_bimbam(input_filename) diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py index 528b98cf..d49742f2 100644 --- a/wqflask/maintenance/convert_geno_to_bimbam.py +++ b/wqflask/maintenance/convert_geno_to_bimbam.py @@ -9,7 +9,6 @@ code """ -from __future__ import print_function, division, absolute_import import sys sys.path.append("..") import os @@ -187,4 +186,4 @@ if __name__=="__main__": #convertob = ConvertGenoFile("/home/zas1024/gene/genotype_files/genotypes/SRxSHRSPF2.geno", "/home/zas1024/gene/genotype_files/new_genotypes/SRxSHRSPF2.json") #convertob.convert() ConvertGenoFile.process_all(Old_Geno_Directory, New_Geno_Directory) - #ConvertGenoFiles(Geno_Directory) \ No newline at end of file + #ConvertGenoFiles(Geno_Directory) diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py index fd65a52a..544e2fd1 100644 --- a/wqflask/maintenance/gen_select_dataset.py +++ b/wqflask/maintenance/gen_select_dataset.py @@ -30,14 +30,6 @@ It needs to be run manually when database has been changed. Run it as # # This module is used by GeneNetwork project (www.genenetwork.org) -from __future__ import print_function, division - -#from flask import config -# -#cdict = {} -#config = config.Config(cdict).from_envvar('WQFLASK_SETTINGS') -#print("cdict is:", cdict) - import sys # NEW: Note we prepend the current path - otherwise a guix instance of GN2 may be used instead @@ -319,4 +311,4 @@ def _test_it(): if __name__ == '__main__': Conn = MySQLdb.Connect(**parse_db_uri()) Cursor = Conn.cursor() - main() \ No newline at end of file + main() diff --git a/wqflask/maintenance/generate_kinship_from_bimbam.py b/wqflask/maintenance/generate_kinship_from_bimbam.py index b53f5dda..60257b28 100644 --- a/wqflask/maintenance/generate_kinship_from_bimbam.py +++ b/wqflask/maintenance/generate_kinship_from_bimbam.py @@ -8,7 +8,6 @@ and uses GEMMA to generate their corresponding kinship/relatedness matrix file """ -from __future__ import print_function, division, absolute_import import sys sys.path.append("..") import os @@ -58,4 +57,4 @@ if __name__=="__main__": Bimbam_Directory = """/export/local/home/zas1024/genotype_files/genotype/bimbam/""" GenerateKinshipMatrices.process_all(Geno_Directory, Bimbam_Directory) - #./gemma -g /home/zas1024/genotype_files/genotype/bimbam/BXD_geno.txt -p /home/zas1024/genotype_files/genotype/bimbam/BXD_pheno.txt -gk 1 -o BXD \ No newline at end of file + #./gemma -g /home/zas1024/genotype_files/genotype/bimbam/BXD_geno.txt -p /home/zas1024/genotype_files/genotype/bimbam/BXD_pheno.txt -gk 1 -o BXD diff --git a/wqflask/maintenance/generate_probesetfreeze_file.py b/wqflask/maintenance/generate_probesetfreeze_file.py index 4231cc7c..b1e41e9a 100644 --- a/wqflask/maintenance/generate_probesetfreeze_file.py +++ b/wqflask/maintenance/generate_probesetfreeze_file.py @@ -1,7 +1,5 @@ #!/usr/bin/python -from __future__ import absolute_import, print_function, division - import sys # sys.path.insert(0, "..") - why? diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py index 9579812a..7e7fd241 100644 --- a/wqflask/maintenance/geno_to_json.py +++ b/wqflask/maintenance/geno_to_json.py @@ -9,7 +9,6 @@ code """ -from __future__ import print_function, division, absolute_import import sys sys.path.append("..") import os @@ -194,4 +193,4 @@ if __name__=="__main__": ConvertGenoFile.process_all(Old_Geno_Directory, New_Geno_Directory) #ConvertGenoFiles(Geno_Directory) - #process_csv(Input_File, Output_File) \ No newline at end of file + #process_csv(Input_File, Output_File) diff --git a/wqflask/maintenance/get_group_samplelists.py b/wqflask/maintenance/get_group_samplelists.py index fb22898a..3f9d0278 100644 --- a/wqflask/maintenance/get_group_samplelists.py +++ b/wqflask/maintenance/get_group_samplelists.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import os import glob import gzip diff --git a/wqflask/maintenance/print_benchmark.py b/wqflask/maintenance/print_benchmark.py index ae327cf3..b24ce4f1 100644 --- a/wqflask/maintenance/print_benchmark.py +++ b/wqflask/maintenance/print_benchmark.py @@ -1,7 +1,5 @@ #!/usr/bin/python -from __future__ import absolute_import, print_function, division - import time from pprint import pformat as pf @@ -40,4 +38,4 @@ def new_main(): print(pf(TheCounter.Counters)) if __name__ == '__main__': - new_main() \ No newline at end of file + new_main() diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py index 4d6e03bf..701b2b50 100644 --- a/wqflask/maintenance/quantile_normalize.py +++ b/wqflask/maintenance/quantile_normalize.py @@ -1,10 +1,5 @@ -from __future__ import absolute_import, print_function, division - import sys sys.path.insert(0, './') - - - import MySQLdb import urllib.parse @@ -126,4 +121,4 @@ if __name__ == '__main__': } ) - print(response) \ No newline at end of file + print(response) diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index abd5416c..4177c124 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -16,8 +16,6 @@ To run: """ -from __future__ import print_function, division - import sys import json @@ -163,4 +161,4 @@ def main(): if __name__ == '__main__': Conn = MySQLdb.Connect(**parse_db_uri()) Cursor = Conn.cursor() - main() \ No newline at end of file + main() diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 48a5c7ba..61f408d2 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -24,8 +24,6 @@ # # Last updated by GeneNetwork Core Team 2010/10/20 -from __future__ import print_function - from PIL import ImageColor from PIL import ImageDraw from PIL import ImageFont diff --git a/wqflask/utility/after.py b/wqflask/utility/after.py index b628a0a4..06091ecb 100644 --- a/wqflask/utility/after.py +++ b/wqflask/utility/after.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division, absolute_import - """ See: http://flask.pocoo.org/docs/patterns/deferredcallbacks/#deferred-callbacks @@ -13,4 +11,4 @@ def after_this_request(f): if not hasattr(g, 'after_request_callbacks'): g.after_request_callbacks = [] g.after_request_callbacks.append(f) - return f \ No newline at end of file + return f diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index bc03eb55..0e499180 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import json import requests diff --git a/wqflask/utility/benchmark.py b/wqflask/utility/benchmark.py index 221e5151..ea5a0ab6 100644 --- a/wqflask/utility/benchmark.py +++ b/wqflask/utility/benchmark.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division, absolute_import - import collections import inspect import time diff --git a/wqflask/utility/chunks.py b/wqflask/utility/chunks.py index d91b9bf4..9a7db102 100644 --- a/wqflask/utility/chunks.py +++ b/wqflask/utility/chunks.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import math import time diff --git a/wqflask/utility/db_tools.py b/wqflask/utility/db_tools.py index 4034f39c..92bde505 100644 --- a/wqflask/utility/db_tools.py +++ b/wqflask/utility/db_tools.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - from MySQLdb import escape_string as escape def create_in_clause(items): diff --git a/wqflask/utility/gen_geno_ob.py b/wqflask/utility/gen_geno_ob.py index ae42f834..81085ffe 100644 --- a/wqflask/utility/gen_geno_ob.py +++ b/wqflask/utility/gen_geno_ob.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function - import utility.logger logger = utility.logger.getLogger(__name__ ) @@ -178,4 +176,4 @@ class Locus(object): if allele in list(geno_table.keys()): self.genotype.append(geno_table[allele]) else: #ZS: Some genotype appears that isn't specified in the metadata, make it unknown - self.genotype.append("U") \ No newline at end of file + self.genotype.append("U") diff --git a/wqflask/utility/genofile_parser.py b/wqflask/utility/genofile_parser.py index af306731..0b736176 100644 --- a/wqflask/utility/genofile_parser.py +++ b/wqflask/utility/genofile_parser.py @@ -1,7 +1,6 @@ # CTL analysis for GN2 # Author / Maintainer: Danny Arends -from __future__ import print_function, division, absolute_import import sys import os import glob diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 107c9ec6..7eb7f013 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - from base import data_set from base.trait import create_trait from base.species import TheSpecies diff --git a/wqflask/utility/hmac.py b/wqflask/utility/hmac.py index b08be97e..73e28790 100644 --- a/wqflask/utility/hmac.py +++ b/wqflask/utility/hmac.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division, absolute_import - import hmac import hashlib @@ -37,4 +35,4 @@ def url_for_hmac(endpoint, **values): return url + combiner + "hm=" + hm app.jinja_env.globals.update(url_for_hmac=url_for_hmac, - data_hmac=data_hmac) \ No newline at end of file + data_hmac=data_hmac) diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 1377a564..13ac5cfe 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division, absolute_import - import uuid import simplejson as json import datetime @@ -306,4 +304,4 @@ def change_resource_owner(resource_id, new_owner_id): the_resource['owner_id'] = new_owner_id Redis.delete("resource") - Redis.hset("resources", resource_id, json.dumps(the_resource)) \ No newline at end of file + Redis.hset("resources", resource_id, json.dumps(the_resource)) diff --git a/wqflask/utility/temp_data.py b/wqflask/utility/temp_data.py index 2f2726c6..4144ae00 100644 --- a/wqflask/utility/temp_data.py +++ b/wqflask/utility/temp_data.py @@ -1,4 +1,3 @@ -from __future__ import print_function, division, absolute_import from redis import Redis import simplejson as json diff --git a/wqflask/wqflask/__init__.py b/wqflask/wqflask/__init__.py index d729aef5..e73f833f 100644 --- a/wqflask/wqflask/__init__.py +++ b/wqflask/wqflask/__init__.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function - import sys import jinja2 diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py index eb05645e..7da13121 100644 --- a/wqflask/wqflask/api/correlation.py +++ b/wqflask/wqflask/api/correlation.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function - import collections import scipy @@ -234,4 +232,4 @@ def init_corr_params(start_vars): 'return_count' : return_count } - return corr_params \ No newline at end of file + return corr_params diff --git a/wqflask/wqflask/api/gen_menu.py b/wqflask/wqflask/api/gen_menu.py index 41966f78..1dcafe1f 100644 --- a/wqflask/wqflask/api/gen_menu.py +++ b/wqflask/wqflask/api/gen_menu.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division - from flask import g diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py index 92c27c9b..d59a69df 100644 --- a/wqflask/wqflask/api/mapping.py +++ b/wqflask/wqflask/api/mapping.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function - import string from base import data_set diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py index 5f9b81b8..acf7ce4a 100644 --- a/wqflask/wqflask/api/router.py +++ b/wqflask/wqflask/api/router.py @@ -1,7 +1,5 @@ # GN2 API -from __future__ import absolute_import, division, print_function - import os, io, csv, json, datetime, requests, yaml import zlib from zipfile import ZipFile, ZIP_DEFLATED @@ -966,4 +964,4 @@ def get_group_id(group_name): if group_id: return group_id[0] else: - return None \ No newline at end of file + return None diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index 06c00930..15383603 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -1,6 +1,3 @@ -from __future__ import print_function, division, absolute_import - - import os import hashlib import datetime diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py index 16832621..92de6073 100644 --- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py +++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py @@ -18,7 +18,6 @@ # # This module is used by GeneNetwork project (www.genenetwork.org) -from __future__ import absolute_import, print_function, division from pprint import pformat as pf from base.trait import create_trait diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index 57a8d85f..929cd2c9 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import math from flask import g @@ -130,4 +128,4 @@ def get_intercept_coords(slope, intercept, x_range, y_range): intercept_coords.append([x1, y1]) intercept_coords.append([x2, y2]) - return intercept_coords \ No newline at end of file + return intercept_coords diff --git a/wqflask/wqflask/correlation/correlation_functions.py b/wqflask/wqflask/correlation/correlation_functions.py index abaa212f..b883e361 100644 --- a/wqflask/wqflask/correlation/correlation_functions.py +++ b/wqflask/wqflask/correlation/correlation_functions.py @@ -24,8 +24,6 @@ # # Last updated by NL 2011/03/23 -from __future__ import absolute_import, print_function, division - import math import rpy2.robjects import string @@ -114,4 +112,4 @@ def get_trait_symbol_and_tissue_values(symbol_list=None): tissue_data = MrnaAssayTissueData(gene_symbols=symbol_list) if len(tissue_data.gene_symbols): - return tissue_data.get_symbol_values_pairs() \ No newline at end of file + return tissue_data.get_symbol_values_pairs() diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index 5ced30b6..e5b87c6a 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -18,8 +18,6 @@ # # This module is used by GeneNetwork project (www.genenetwork.org) -from __future__ import absolute_import, print_function, division - import collections import json import scipy diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index bd5aca1f..7b4bda31 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -18,8 +18,6 @@ # # This module is used by GeneNetwork project (www.genenetwork.org) -from __future__ import absolute_import, print_function, division - import datetime import math import numpy as np diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index cc9c1860..de8e1e78 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division - import string import requests import json diff --git a/wqflask/wqflask/docs.py b/wqflask/wqflask/docs.py index 78407e22..8628b81d 100644 --- a/wqflask/wqflask/docs.py +++ b/wqflask/wqflask/docs.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import codecs from flask import g @@ -42,4 +40,4 @@ def update_text(start_vars): sql = "UPDATE Docs SET content='{0}' WHERE entry='{1}';".format(content, start_vars['entry_type']) g.db.execute(sql) except: - pass \ No newline at end of file + pass diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py index 2f4e9aac..3a886537 100644 --- a/wqflask/wqflask/export_traits.py +++ b/wqflask/wqflask/export_traits.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division - import csv import xlsxwriter import io @@ -140,4 +138,4 @@ def sort_traits_by_group(trait_list=[]): traits_by_group[trait.dataset.group.name].append(trait) - return traits_by_group \ No newline at end of file + return traits_by_group diff --git a/wqflask/wqflask/external_tools/send_to_bnw.py b/wqflask/wqflask/external_tools/send_to_bnw.py index 68efd10d..efa17f05 100644 --- a/wqflask/wqflask/external_tools/send_to_bnw.py +++ b/wqflask/wqflask/external_tools/send_to_bnw.py @@ -18,8 +18,6 @@ # # This module is used by GeneNetwork project (www.genenetwork.org) -from __future__ import absolute_import, print_function, division - from base.trait import GeneralTrait from utility import helper_functions, corr_result_helpers @@ -69,4 +67,4 @@ class SendToBNW(object): if has_none: continue self.form_value += ",".join(str(cell) for cell in row) - self.form_value += ";" \ No newline at end of file + self.form_value += ";" diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py index 9844bab4..93164233 100644 --- a/wqflask/wqflask/external_tools/send_to_geneweaver.py +++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py @@ -18,8 +18,6 @@ # # This module is used by GeneNetwork project (www.genenetwork.org) -from __future__ import absolute_import, print_function, division - import string from flask import Flask, g @@ -109,4 +107,4 @@ def test_chip(trait_list): chip_name = '%s_NA' % result[0] return chip_name - return chip_name \ No newline at end of file + return chip_name diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py index 30ca024f..b255ba95 100644 --- a/wqflask/wqflask/external_tools/send_to_webgestalt.py +++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py @@ -18,8 +18,6 @@ # # This module is used by GeneNetwork project (www.genenetwork.org) -from __future__ import absolute_import, print_function, division - import string from flask import Flask, g @@ -123,4 +121,4 @@ def gen_gene_id_list(trait_list): trait_name_list.append(trait.name) retrieve_trait_info(trait, trait.dataset) gene_id_list.append(str(trait.geneid)) - return trait_name_list, gene_id_list \ No newline at end of file + return trait_name_list, gene_id_list diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py index 24848ed8..e6079c3e 100644 --- a/wqflask/wqflask/group_manager.py +++ b/wqflask/wqflask/group_manager.py @@ -1,6 +1,3 @@ - -from __future__ import print_function, division, absolute_import - from flask import (Flask, g, render_template, url_for, request, make_response, redirect, flash) @@ -142,4 +139,4 @@ def send_group_invites(group_id, user_email_list = [], user_type="members"): else: send_verification_email(user_details, template_name = "email/group_verification.txt", key_prefix = "verification_code", subject = "You've been invited to join a GeneNetwork user group") -#@app.route() \ No newline at end of file +#@app.route() diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py index c65a1415..6d797a29 100644 --- a/wqflask/wqflask/gsearch.py +++ b/wqflask/wqflask/gsearch.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import json from flask import Flask, g diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index 1f1cdb90..cca5a4fc 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import string import os import random diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index a39e5d0f..17c8ccbf 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import string from flask import Flask, g diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 67512bc6..7ae84b16 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - from base.trait import GeneralTrait from base import data_set #import create_dataset @@ -711,4 +709,4 @@ def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples): list_to_numbers = [d[x] for x in perm_strata_strings] perm_strata = list_to_numbers - return perm_strata \ No newline at end of file + return perm_strata diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py index a135761c..772f74e4 100644 --- a/wqflask/wqflask/model.py +++ b/wqflask/wqflask/model.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division, absolute_import - import uuid import datetime @@ -168,4 +166,4 @@ def display_collapsible(number): def user_uuid(): """Unique cookie for a user""" - user_uuid = request.cookies.get('user_uuid') \ No newline at end of file + user_uuid = request.cookies.get('user_uuid') diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py index 2a4c4a93..70e5dd58 100644 --- a/wqflask/wqflask/network_graph/network_graph.py +++ b/wqflask/wqflask/network_graph/network_graph.py @@ -18,8 +18,6 @@ # # This module is used by GeneNetwork project (www.genenetwork.org) -from __future__ import absolute_import, print_function, division - import scipy import simplejson as json from pprint import pformat as pf diff --git a/wqflask/wqflask/news.py b/wqflask/wqflask/news.py index 20319756..0675ec4b 100644 --- a/wqflask/wqflask/news.py +++ b/wqflask/wqflask/news.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import, print_function, division from flask import g class News(object): diff --git a/wqflask/wqflask/parser.py b/wqflask/wqflask/parser.py index 1ca5ecff..472dd30e 100644 --- a/wqflask/wqflask/parser.py +++ b/wqflask/wqflask/parser.py @@ -17,8 +17,6 @@ be acceptable.] """ -from __future__ import print_function, division - import re from pprint import pformat as pf diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index 6b3e00fb..e883d5da 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division, absolute_import - import json from flask import (Flask, g, render_template, url_for, request, make_response, @@ -131,4 +129,4 @@ def get_group_names(group_masks): this_mask['name'] = group_name group_masks_with_names[group_id] = this_mask - return group_masks_with_names \ No newline at end of file + return group_masks_with_names diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index c07a7670..f6c677a8 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import re import uuid from math import * diff --git a/wqflask/wqflask/send_mail.py b/wqflask/wqflask/send_mail.py index bf5d0dd8..86e8a558 100644 --- a/wqflask/wqflask/send_mail.py +++ b/wqflask/wqflask/send_mail.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function - import datetime import time diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index f17e825e..6fcf7cec 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - from flask import Flask, g from base import webqtlCaseData diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py index 68c3ad7d..2d76b935 100644 --- a/wqflask/wqflask/show_trait/export_trait_data.py +++ b/wqflask/wqflask/show_trait/export_trait_data.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division - import simplejson as json from pprint import pformat as pf @@ -71,4 +69,4 @@ def cmp_samples(a, b): else: return 1 else: - return -1 \ No newline at end of file + return -1 diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 6a74cada..e93b0289 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import string import os import datetime diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index b3d26caf..0db7e1fe 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - from flask import Flask, g, url_for import string diff --git a/wqflask/wqflask/submit_bnw.py b/wqflask/wqflask/submit_bnw.py index 59e60dfd..a0e84c8c 100644 --- a/wqflask/wqflask/submit_bnw.py +++ b/wqflask/wqflask/submit_bnw.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - from base.trait import GeneralTrait from base import data_set from utility import helper_functions @@ -8,4 +6,4 @@ import utility.logger logger = utility.logger.getLogger(__name__ ) def get_bnw_input(start_vars): - logger.debug("BNW VARS:", start_vars) \ No newline at end of file + logger.debug("BNW VARS:", start_vars) diff --git a/wqflask/wqflask/update_search_results.py b/wqflask/wqflask/update_search_results.py index 68bea9d6..672f95b1 100644 --- a/wqflask/wqflask/update_search_results.py +++ b/wqflask/wqflask/update_search_results.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, print_function, division - import json from flask import Flask, g diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index 04672b45..10cb7319 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division, absolute_import - import os import hashlib import datetime @@ -470,4 +468,4 @@ def register(): @app.errorhandler(401) def unauthorized(error): - return redirect(url_for('login')) \ No newline at end of file + return redirect(url_for('login')) diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index 3c41e2b8..24191a15 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division, absolute_import - import os import hashlib import datetime diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index 71c31c57..c1f38396 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -1,5 +1,3 @@ -from __future__ import print_function, division, absolute_import - import datetime import time import uuid diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 83d5202e..b0489e64 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -2,8 +2,6 @@ # # Main routing table for GN2 -from __future__ import absolute_import, division, print_function - import traceback # for error page import os # for error gifs import random # for random error gif -- cgit v1.2.3 From 357ca458695fbc60c97de3d1cdf89034a8722bc5 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 27 Aug 2020 01:18:11 +0300 Subject: Replace "string.split" & "string.join" with python's inbuilt methods --- scripts/maintenance/readProbeSetMean_v7.py | 20 ++++++++++---------- scripts/maintenance/readProbeSetSE_v7.py | 14 +++++++------- wqflask/base/data_set.py | 2 +- wqflask/base/trait.py | 14 +++++++------- wqflask/utility/webqtlUtil.py | 4 ++-- wqflask/wqflask/external_tools/send_to_geneweaver.py | 2 +- wqflask/wqflask/external_tools/send_to_webgestalt.py | 2 +- wqflask/wqflask/interval_analyst/GeneUtil.py | 4 ++-- .../marker_regression/display_mapping_results.py | 2 +- wqflask/wqflask/marker_regression/plink_mapping.py | 6 +++--- wqflask/wqflask/show_trait/show_trait.py | 4 ++-- wqflask/wqflask/snp_browser/snp_browser.py | 12 ++++++------ 12 files changed, 43 insertions(+), 43 deletions(-) (limited to 'wqflask/utility') diff --git a/scripts/maintenance/readProbeSetMean_v7.py b/scripts/maintenance/readProbeSetMean_v7.py index a540796a..43f084f4 100755 --- a/scripts/maintenance/readProbeSetMean_v7.py +++ b/scripts/maintenance/readProbeSetMean_v7.py @@ -60,15 +60,15 @@ print('Checking if each line have same number of members') GeneList = [] isCont = 1 header = fp.readline() -header = string.split(string.strip(header), '\t') -header = list(map(string.strip, header)) +header = header.strip().split('\t') +header = [x.strip() for x in header] nfield = len(header) line = fp.readline() kj = 0 while line: - line2 = string.split(string.strip(line), '\t') - line2 = list(map(string.strip, line2)) + line2 = line.strip().split('\t') + line2 = [x.strip() for x in line2] if len(line2) != nfield: print(("Error : " + line)) isCont = 0 @@ -98,8 +98,8 @@ print('Checking if each strain exist in database') isCont = 1 fp.seek(0) header = fp.readline() -header = string.split(string.strip(header), '\t') -header = list(map(string.strip, header)) +header = header.strip().split('\t') +header = [x.strip() for x in header] header = list(map(translateAlias, header)) header = header[dataStart:] Ids = [] @@ -126,8 +126,8 @@ print('Check if each ProbeSet exist in database') ##---- find PID is name or target ----## line = fp.readline() line = fp.readline() -line2 = string.split(string.strip(line), '\t') -line2 = list(map(string.strip, line2)) +line2 = line.strip().split('\t') +line2 = [x.strip() for x in line2] PId = line2[0] db.execute('select Id from ProbeSet where Name="%s" and ChipId=%d' % @@ -222,8 +222,8 @@ kj = 0 values1 = [] values2 = [] while line: - line2 = string.split(string.strip(line), '\t') - line2 = list(map(string.strip, line2)) + line2 = line.strip().split('\t') + line2 = [x.strip() for x in line2] PId = line2[0] recordId = NameIds[PId] diff --git a/scripts/maintenance/readProbeSetSE_v7.py b/scripts/maintenance/readProbeSetSE_v7.py index 20a846a4..edd9e7b0 100755 --- a/scripts/maintenance/readProbeSetSE_v7.py +++ b/scripts/maintenance/readProbeSetSE_v7.py @@ -71,14 +71,14 @@ print('Checking if each line have same number of members') GeneList = [] isCont = 1 header = fp.readline() -header = string.split(string.strip(header), '\t') +header = header.strip().split('\t') header = list(map(string.strip, header)) nfield = len(header) line = fp.readline() kj = 0 while line: - line2 = string.split(string.strip(line), '\t') + line2 = line.strip().split('\t') line2 = list(map(string.strip, line2)) if len(line2) != nfield: isCont = 0 @@ -109,7 +109,7 @@ print('Checking if each strain exist in database') isCont = 1 fp.seek(0) header = fp.readline() -header = string.split(string.strip(header), '\t') +header = header.strip().split('\t') header = list(map(string.strip, header)) header = list(map(translateAlias, header)) header = header[dataStart:] @@ -137,8 +137,8 @@ print('Check if each ProbeSet exist in database') ##---- find PID is name or target ----## line = fp.readline() line = fp.readline() -line2 = string.split(string.strip(line), '\t') -line2 = list(map(string.strip, line2)) +line2 = line.strip().split('\t') +line2 = [x.strip() for x in line2] PId = line2[0] db.execute('select Id from ProbeSet where Name="%s" and ChipId=%d' % @@ -217,8 +217,8 @@ line = fp.readline() kj = 0 while line: - line2 = string.split(string.strip(line), '\t') - line2 = list(map(string.strip, line2)) + line2 = line.strip().split('\t') + line2 = [x.strip() for x in line2] CellId = line2[0] if CellId not in ProbeNameId: diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 8ac7a279..ce017fb4 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -697,7 +697,7 @@ class DataSet(object): else: query = "SELECT {}.Name,".format(escape(dataset_type)) data_start_pos = 1 - query += string.join(temp, ', ') + query += ', '.join(temp) query += ' FROM ({}, {}XRef, {}Freeze) '.format(*mescape(dataset_type, self.type, self.type)) diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index b20efd2a..2fd5d725 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -150,8 +150,8 @@ class GeneralTrait(object): alias = 'Not available' if getattr(self, "alias", None): - alias = string.replace(self.alias, ";", " ") - alias = string.join(string.split(alias), ", ") + alias = self.alias.replace(";", " ") + alias = ", ".join(alias.split()) return alias @@ -437,7 +437,7 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): #XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name # to avoid the problem of same marker name from different species. elif dataset.type == 'Geno': - display_fields_string = string.join(dataset.display_fields, ',Geno.') + display_fields_string = ',Geno.'.join(dataset.display_fields) display_fields_string = 'Geno.' + display_fields_string query = """ SELECT %s @@ -456,8 +456,8 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): query = """SELECT %s FROM %s WHERE Name = %s""" logger.sql(query) trait_info = g.db.execute(query, - (string.join(dataset.display_fields, ','), - dataset.type, trait.name)).fetchone() + ','.join(dataset.display_fields), + dataset.type, trait.name).fetchone() if trait_info: trait.haveinfo = True @@ -501,8 +501,8 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): trait.pubmed_link = webqtlConfig.PUBMEDLINK_URL % trait.pubmed_id if dataset.type == 'ProbeSet' and dataset.group: - description_string = str(str(trait.description).strip(codecs.BOM_UTF8), 'utf-8') - target_string = str(str(trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8') + description_string = trait.description + target_string = trait.probe_target_description if len(description_string) > 1 and description_string != 'None': description_display = description_string diff --git a/wqflask/utility/webqtlUtil.py b/wqflask/utility/webqtlUtil.py index d4979011..5681fadf 100644 --- a/wqflask/utility/webqtlUtil.py +++ b/wqflask/utility/webqtlUtil.py @@ -107,7 +107,7 @@ def hasAccessToConfidentialPhenotypeTrait(privilege, userName, authorized_users) if webqtlConfig.USERDICT[privilege] > webqtlConfig.USERDICT['user']: access_to_confidential_phenotype_trait = 1 else: - AuthorizedUsersList=list(map(string.strip, string.split(authorized_users, ','))) - if AuthorizedUsersList.__contains__(userName): + AuthorizedUsersList=[x.strip() for x in authorized_users.split(',')] + if userName in AuthorizedUsersList: access_to_confidential_phenotype_trait = 1 return access_to_confidential_phenotype_trait diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py index 93164233..4c958a88 100644 --- a/wqflask/wqflask/external_tools/send_to_geneweaver.py +++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py @@ -55,7 +55,7 @@ class SendToGeneWeaver(object): 'client': "genenetwork", 'species': species_name, 'idtype': self.chip_name, - 'list': string.join(trait_name_list, ","), + 'list': ",".join(trait_name_list), } def get_trait_name_list(trait_list): diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py index b255ba95..2f068792 100644 --- a/wqflask/wqflask/external_tools/send_to_webgestalt.py +++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py @@ -47,7 +47,7 @@ class SendToWebGestalt(object): id_type = "entrezgene" self.hidden_vars = { - 'gene_list' : string.join(gene_id_list, "\n"), + 'gene_list' : "\n".join(gene_id_list), 'id_type' : "entrezgene", 'ref_set' : "genome", 'enriched_database_category' : "geneontology", diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index 17c8ccbf..d0dd7aea 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -31,7 +31,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): Chromosome = '%s' AND ((TxStart > %f and TxStart <= %f) OR (TxEnd > %f and TxEnd <= %f)) ORDER BY txStart - """ % (string.join(fetchFields, ", "), + """ % (", ".join(fetchFields), speciesId, chrName, startMb, endMb, startMb, endMb)).fetchall() @@ -66,7 +66,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): othSpec, othSpecId = item newdict2 = {} - resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (string.join(fetchFields, ", "), + resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (", ".join(fetchFields), othSpecId, newdict["GeneSymbol"])).fetchone() diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index ccdafa14..dfaa1562 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -454,7 +454,7 @@ class DisplayMappingResults(object): Chr_Length.Name in (%s) Order by Chr_Length.OrderId - """ % (self.dataset.group.name, string.join(["'%s'" % X[0] for X in self.ChrList[1:]], ", "))) + """ % (self.dataset.group.name, ", ".join(["'%s'" % X[0] for X in self.ChrList[1:]]))) self.ChrLengthMbList = [x[0]/1000000.0 for x in self.ChrLengthMbList] self.ChrLengthMbSum = reduce(lambda x, y:x+y, self.ChrLengthMbList, 0.0) diff --git a/wqflask/wqflask/marker_regression/plink_mapping.py b/wqflask/wqflask/marker_regression/plink_mapping.py index 2f282adc..6c38c34f 100644 --- a/wqflask/wqflask/marker_regression/plink_mapping.py +++ b/wqflask/wqflask/marker_regression/plink_mapping.py @@ -83,7 +83,7 @@ def get_samples_from_ped_file(dataset): sample_list=[] while line: - lineList = string.split(string.strip(line), '\t') + lineList = line.strip().split('\t') lineList = list(map(string.strip, lineList)) sample_name = lineList[0] @@ -155,8 +155,8 @@ def parse_plink_output(output_filename, species): # output: lineList list ####################################################### def build_line_list(line=None): - line_list = string.split(string.strip(line), ' ')# irregular number of whitespaces between columns + line_list = line.strip().split(' ')# irregular number of whitespaces between columns line_list = [item for item in line_list if item !=''] line_list = list(map(string.strip, line_list)) - return line_list \ No newline at end of file + return line_list diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index e93b0289..88cd7dca 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -229,8 +229,8 @@ class ShowTrait(object): hddn = OrderedDict() if self.dataset.group.allsamples: - hddn['allsamples'] = string.join(self.dataset.group.allsamples, ' ') - hddn['primary_samples'] = string.join(self.primary_sample_names, ',') + hddn['allsamples'] = ''.join(self.dataset.group.allsamples) + hddn['primary_samples'] = ''.join(self.primary_sample_names) hddn['trait_id'] = self.trait_id hddn['trait_display_name'] = self.this_trait.display_name hddn['dataset'] = self.dataset.name diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index 0db7e1fe..2df71b12 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -456,7 +456,7 @@ class SnpBrowser(object): function_list = [] if function_details: - function_list = string.split(string.strip(function_details), ",") + function_list = function_details.strip().split(",") function_list = list(map(string.strip, function_list)) function_list[0] = function_list[0].title() function_details = ", ".join(item for item in function_list) @@ -722,11 +722,11 @@ def get_effect_details_by_category(effect_name = None, effect_value = None): new_codon_group_list = ['Start Gained'] codon_effect_group_list = ['Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] - effect_detail_list = string.split(string.strip(effect_value), '|') + effect_detail_list = effect_value.strip().split('|') effect_detail_list = list(map(string.strip, effect_detail_list)) for index, item in enumerate(effect_detail_list): - item_list = string.split(string.strip(item), ',') + item_list = item.strip().split(',') item_list = list(map(string.strip, item_list)) gene_id = item_list[0] @@ -746,13 +746,13 @@ def get_effect_details_by_category(effect_name = None, effect_value = None): if effect_name in new_codon_group_list: new_codon = item_list[6] tmp_list = [biotype, new_codon] - function_detail_list.append(string.join(tmp_list, ", ")) + function_detail_list.append(", ".join(tmp_list)) elif effect_name in codon_effect_group_list: old_new_AA = item_list[6] old_new_codon = item_list[7] codon_num = item_list[8] tmp_list = [biotype, old_new_AA, old_new_codon, codon_num] - function_detail_list.append(string.join(tmp_list, ", ")) + function_detail_list.append(", ".join(tmp_list)) else: function_detail_list.append(biotype) @@ -852,7 +852,7 @@ def get_gene_id_name_dict(species_id, gene_name_list): if len(gene_name_list) == 0: return "" gene_name_str_list = ["'" + gene_name + "'" for gene_name in gene_name_list] - gene_name_str = string.join(gene_name_str_list, ",") + gene_name_str = ",".join(gene_name_str_list) query = """ SELECT -- cgit v1.2.3 From 391c1681eaeabfdbe65a64a1bb8b05beca30141e Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 27 Aug 2020 01:23:29 +0300 Subject: Add global method to convert binary string to plain string * wqflask/utility/db_tools.py: escape_string returns a binary string which introduces a bug when composing sql query string. The escaped strings have to be converted to plain text. --- wqflask/utility/db_tools.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/db_tools.py b/wqflask/utility/db_tools.py index 92bde505..6e19778f 100644 --- a/wqflask/utility/db_tools.py +++ b/wqflask/utility/db_tools.py @@ -1,4 +1,5 @@ -from MySQLdb import escape_string as escape +from MySQLdb import escape_string as escape_ + def create_in_clause(items): """Create an in clause for mysql""" @@ -6,8 +7,11 @@ def create_in_clause(items): in_clause = '( {} )'.format(in_clause) return in_clause + def mescape(*items): """Multiple escape""" - escaped = [escape(str(item)) for item in items] - #print("escaped is:", escaped) - return escaped + return [escape_(str(item)).decode('utf8') for item in items] + + +def escape(string_): + return escape_(string_).decode('utf8') -- cgit v1.2.3 From 46443ec8d2cdfd7c60358a889d90a90e4f7daaf4 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 27 Aug 2020 01:32:34 +0300 Subject: Replace string arguments to "hmac.new" with bytearray See: --- wqflask/utility/hmac.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/hmac.py b/wqflask/utility/hmac.py index 73e28790..aa21c741 100644 --- a/wqflask/utility/hmac.py +++ b/wqflask/utility/hmac.py @@ -10,7 +10,7 @@ def hmac_creation(stringy): secret = app.config['SECRET_HMAC_CODE'] - hmaced = hmac.new(secret, stringy, hashlib.sha1) + hmaced = hmac.new(bytearray(secret, 'utf8'), bytearray(stringy, 'utf8'), hashlib.sha1) hm = hmaced.hexdigest() # ZS: Leaving the below comment here to ask Pjotr about # "Conventional wisdom is that you don't lose much in terms of security if you throw away up to half of the output." -- cgit v1.2.3 From 70dbeeb5832711ed5271434e482c18bc7ea095b8 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 27 Aug 2020 03:28:43 +0300 Subject: Add check for empty group_code when adding a new resource * wqflask/utility/authentication_tools.py(add_new_resource): If group_code is "None", an error is thrown when you try to: `group_code + "_" + str(trait_id)` --- wqflask/utility/authentication_tools.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 0e499180..73c39399 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -49,7 +49,10 @@ def add_new_resource(dataset, trait_id=None): } if dataset.type == "Publish": - resource_ob['name'] = get_group_code(dataset) + "_" + str(trait_id) + group_code = get_group_code(dataset) + if group_code is None: + group_code = "" + resource_ob['name'] = group_code + "_" + str(trait_id) resource_ob['data'] = { 'dataset': dataset.id, 'trait' : trait_id @@ -74,7 +77,6 @@ def add_new_resource(dataset, trait_id=None): def get_group_code(dataset): results = g.db.execute("SELECT InbredSetCode from InbredSet where Name='{}'".format(dataset.group.name)).fetchone() - return results[0] def check_admin(resource_id=None): -- cgit v1.2.3 From dba31ef2aa9e3f89621848fdb79b915dbd3c56e2 Mon Sep 17 00:00:00 2001 From: zsloan Date: Sat, 29 Aug 2020 13:47:52 -0500 Subject: Fixed issue where exporting traits would throw an error if a dataset's group didn't have an InbredSetCode set in the DB * wqflask/utility/authentication_tools.py - Changed get_group_code to return an empty string instead of None if InbredSetCode doesn't exist for a dataset group * wqflask/wqflask/views.py - Changed zipped export filename to "export_(datetime)" instead of "collection_(datetime)" since this export can occur from both the collection page and the global search page --- wqflask/utility/authentication_tools.py | 6 ++++-- wqflask/wqflask/views.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index ece7022c..3553b92b 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -76,8 +76,10 @@ def add_new_resource(dataset, trait_id=None): def get_group_code(dataset): results = g.db.execute("SELECT InbredSetCode from InbredSet where Name='{}'".format(dataset.group.name)).fetchone() - - return results[0] + if results[0]: + return results[0] + else: + return "" def check_admin(resource_id=None): the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index a898de43..91d1a279 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -419,7 +419,7 @@ def export_traits_csv(): if len(file_list) > 1: now = datetime.datetime.now() time_str = now.strftime('%H:%M_%d%B%Y') - filename = "collection_{}".format(time_str) + filename = "export_{}".format(time_str) memory_file = StringIO.StringIO() with ZipFile(memory_file, mode='w', compression=ZIP_DEFLATED) as zf: for the_file in file_list: -- cgit v1.2.3 From e754baaf51150156c159f59c8ba1fda0ba0a7269 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 31 Aug 2020 11:25:10 -0500 Subject: Fixed issue that was causing updating resource default privileges to not work * wqflask/utility/redis_tools.py - There was an issue where resources wouldn't be updated if they already existed. This is because the code didn't yet account for the "update" tag (that is meant to give the option of preventing updating resources when running the script to enter all resources into Redis). I changed the logic to add a resource if "update" is True or the resource doesn't already exist (so it won't if update is False and the resource exists). --- wqflask/utility/redis_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 1377a564..81ba04ea 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -288,7 +288,7 @@ def add_resource(resource_info, update=True): else: resource_id = hmac.hmac_creation('{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset']))) - if not Redis.hexists("resources", resource_id): + if update or not Redis.hexists("resources", resource_id): Redis.hset("resources", resource_id, json.dumps(resource_info)) return resource_info -- cgit v1.2.3 From 90475fed0b2d1bd192a641bd417f6dfef79653d0 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 31 Aug 2020 20:34:31 +0300 Subject: Remove typo at beginning of line * wqflask/utility/authentication_tools.py: Remove accidental char at beginning of line. --- wqflask/utility/authentication_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 68bbb276..c52ebafa 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -88,7 +88,7 @@ def check_admin(resource_id=None): response = json.loads(requests.get(the_url).content)['admin'] except: resource_info = get_resource_info(resource_id) -l response = resource_info['default_mask']['admin'] + response = resource_info['default_mask']['admin'] if 'edit-admins' in response: return "edit-admins" -- cgit v1.2.3 From af0ca059da29391db9b1dfca0fe96883ce95f72f Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 15 Sep 2020 23:43:09 +0300 Subject: Remove unnecessary if branch * wqflask/utility/authentication_tools.py (check_resource_availability): Combine if statements into one boolean check in one if branch. --- wqflask/utility/authentication_tools.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 3553b92b..954f55fc 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -17,9 +17,7 @@ logger = logging.getLogger(__name__ ) def check_resource_availability(dataset, trait_id=None): #At least for now assume temporary entered traits are accessible - if type(dataset) == str: - return webqtlConfig.DEFAULT_PRIVILEGES - if dataset.type == "Temp": + if type(dataset) == str or dataset.type == "Temp": return webqtlConfig.DEFAULT_PRIVILEGES resource_id = get_resource_id(dataset, trait_id) -- cgit v1.2.3 From 23521ad683beaaff200743cb37dec68cd19cc2d5 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 16 Sep 2020 00:06:04 +0300 Subject: Apply pep8 * wqflask/utility/authentication_tools.py: Apply pep8 formatting to file. --- wqflask/utility/authentication_tools.py | 37 ++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 14 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 954f55fc..239b08e3 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -1,4 +1,6 @@ from __future__ import absolute_import, print_function, division +import logging +from flask import Flask, g, redirect, url_for import json import requests @@ -9,31 +11,31 @@ from utility import hmac from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id, add_resource Redis = get_redis_conn() -from flask import Flask, g, redirect, url_for -import logging -logger = logging.getLogger(__name__ ) +logger = logging.getLogger(__name__) + def check_resource_availability(dataset, trait_id=None): - #At least for now assume temporary entered traits are accessible + # At least for now assume temporary entered traits are accessible if type(dataset) == str or dataset.type == "Temp": return webqtlConfig.DEFAULT_PRIVILEGES resource_id = get_resource_id(dataset, trait_id) - if resource_id: #ZS: This should never be false, but it's technically possible if a non-Temp dataset somehow had a type other than Publish/ProbeSet/Geno + if resource_id: # ZS: This should never be false, but it's technically possible if a non-Temp dataset somehow had a type other than Publish/ProbeSet/Geno resource_info = get_resource_info(resource_id) - if not resource_info: #ZS: If resource isn't already in redis, add it with default privileges + if not resource_info: # ZS: If resource isn't already in redis, add it with default privileges resource_info = add_new_resource(dataset, trait_id) - #ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy + # ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy if g.user_session.user_id in Redis.smembers("super_users"): - return webqtlConfig.SUPER_PRIVILEGES + return webqtlConfig.SUPER_PRIVILEGES response = None - the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8080/available?resource={}&user={}".format( + resource_id, g.user_session.user_id) try: response = json.loads(requests.get(the_url).content) except: @@ -41,18 +43,19 @@ def check_resource_availability(dataset, trait_id=None): return response + def add_new_resource(dataset, trait_id=None): resource_ob = { - 'owner_id' : "none", # webqtlConfig.DEFAULT_OWNER_ID, + 'owner_id': "none", # webqtlConfig.DEFAULT_OWNER_ID, 'default_mask': webqtlConfig.DEFAULT_PRIVILEGES, - 'group_masks' : {} + 'group_masks': {} } if dataset.type == "Publish": resource_ob['name'] = get_group_code(dataset) + "_" + str(trait_id) resource_ob['data'] = { 'dataset': dataset.id, - 'trait' : trait_id + 'trait': trait_id } resource_ob['type'] = 'dataset-publish' elif dataset.type == "Geno": @@ -72,15 +75,19 @@ def add_new_resource(dataset, trait_id=None): return resource_info + def get_group_code(dataset): - results = g.db.execute("SELECT InbredSetCode from InbredSet where Name='{}'".format(dataset.group.name)).fetchone() + results = g.db.execute("SELECT InbredSetCode from InbredSet where Name='{}'".format( + dataset.group.name)).fetchone() if results[0]: return results[0] else: return "" + def check_admin(resource_id=None): - the_url = "http://localhost:8080/available?resource={}&user={}".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8080/available?resource={}&user={}".format( + resource_id, g.user_session.user_id) try: response = json.loads(requests.get(the_url).content)['admin'] except: @@ -94,6 +101,7 @@ def check_admin(resource_id=None): else: return "not-admin" + def check_owner(dataset=None, trait_id=None, resource_id=None): if resource_id: resource_info = get_resource_info(resource_id) @@ -108,6 +116,7 @@ def check_owner(dataset=None, trait_id=None, resource_id=None): return False + def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): if not resource_id: if dataset.type == "Temp": -- cgit v1.2.3 From 225c360d0a5c57957fe2bc3299108e9b39f12929 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 17 Sep 2020 15:55:35 +0300 Subject: Apply pep8 * wqflask/utility/hmac.py: Apply pep8 and fix typo. --- wqflask/utility/hmac.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/hmac.py b/wqflask/utility/hmac.py index b08be97e..fd75803e 100644 --- a/wqflask/utility/hmac.py +++ b/wqflask/utility/hmac.py @@ -7,11 +7,11 @@ from flask import url_for from wqflask import app + def hmac_creation(stringy): """Helper function to create the actual hmac""" secret = app.config['SECRET_HMAC_CODE'] - hmaced = hmac.new(secret, stringy, hashlib.sha1) hm = hmaced.hexdigest() # ZS: Leaving the below comment here to ask Pjotr about @@ -20,10 +20,12 @@ def hmac_creation(stringy): hm = hm[:20] return hm + def data_hmac(stringy): - """Takes arbitray data string and appends :hmac so we know data hasn't been tampered with""" + """Takes arbitrary data string and appends :hmac so we know data hasn't been tampered with""" return stringy + ":" + hmac_creation(stringy) + def url_for_hmac(endpoint, **values): """Like url_for but adds an hmac at the end to insure the url hasn't been tampered with""" @@ -36,5 +38,6 @@ def url_for_hmac(endpoint, **values): combiner = "?" return url + combiner + "hm=" + hm + app.jinja_env.globals.update(url_for_hmac=url_for_hmac, - data_hmac=data_hmac) \ No newline at end of file + data_hmac=data_hmac) -- cgit v1.2.3 From f4a8789a5f28e1527d4c801b40176f47aa44146c Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 17 Sep 2020 17:16:28 +0300 Subject: Apply pep8 * wqflask/base/trait.py: Apply pep8. * wqflask/utility/authentication_tools.py: Ditto. --- wqflask/base/trait.py | 192 +++++++++++++++++++------------- wqflask/utility/authentication_tools.py | 32 ++++-- 2 files changed, 134 insertions(+), 90 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 6950cf11..548ccc4c 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -1,36 +1,30 @@ -import os -import string -import resource -import codecs import requests -import random +import simplejson as json +from wqflask import app from base import webqtlConfig from base.webqtlCaseData import webqtlCaseData from base.data_set import create_dataset -from db import webqtlDatabaseFunction -from utility import webqtlUtil from utility import hmac from utility.authentication_tools import check_resource_availability -from utility.tools import GN2_BASE_URL, GN_VERSION -from utility.redis_tools import get_redis_conn, get_resource_id, get_resource_info -Redis = get_redis_conn() +from utility.tools import GN2_BASE_URL +from utility.redis_tools import get_redis_conn, get_resource_id -from wqflask import app - -import simplejson as json from utility.db_tools import escape -from pprint import pformat as pf -from flask import Flask, g, request, url_for, redirect, make_response, render_template +from flask import g, request, url_for from utility.logger import getLogger -logger = getLogger(__name__ ) + +logger = getLogger(__name__) + +Redis = get_redis_conn() + def create_trait(**kw): - assert bool(kw.get('dataset')) != bool(kw.get('dataset_name')), "Needs dataset ob. or name"; + assert bool(kw.get('dataset')) != bool( + kw.get('dataset_name')), "Needs dataset ob. or name" - permitted = True if kw.get('name'): if kw.get('dataset_name'): if kw.get('dataset_name') != "Temp": @@ -40,18 +34,23 @@ def create_trait(**kw): if kw.get('dataset_name') != "Temp": if dataset.type == 'Publish': - permissions = check_resource_availability(dataset, kw.get('name')) + permissions = check_resource_availability( + dataset, kw.get('name')) else: permissions = check_resource_availability(dataset) if "view" in permissions['data']: the_trait = GeneralTrait(**kw) if the_trait.dataset.type != "Temp": - the_trait = retrieve_trait_info(the_trait, the_trait.dataset, get_qtl_info=kw.get('get_qtl_info')) + the_trait = retrieve_trait_info( + the_trait, + the_trait.dataset, + get_qtl_info=kw.get('get_qtl_info')) return the_trait else: return None + class GeneralTrait(object): """ Trait class defines a trait in webqtl, can be either Microarray, @@ -61,12 +60,17 @@ class GeneralTrait(object): def __init__(self, get_qtl_info=False, get_sample_info=True, **kw): # xor assertion - assert bool(kw.get('dataset')) != bool(kw.get('dataset_name')), "Needs dataset ob. or name"; - self.name = kw.get('name') # Trait ID, ProbeSet ID, Published ID, etc. + assert bool(kw.get('dataset')) != bool( + kw.get('dataset_name')), "Needs dataset ob. or name" + # Trait ID, ProbeSet ID, Published ID, etc. + self.name = kw.get('name') if kw.get('dataset_name'): if kw.get('dataset_name') == "Temp": temp_group = self.name.split("_")[2] - self.dataset = create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = temp_group) + self.dataset = create_dataset( + dataset_name="Temp", + dataset_type="Temp", + group_name=temp_group) else: self.dataset = create_dataset(kw.get('dataset_name')) else: @@ -74,7 +78,8 @@ class GeneralTrait(object): self.cellid = kw.get('cellid') self.identification = kw.get('identification', 'un-named trait') self.haveinfo = kw.get('haveinfo', False) - self.sequence = kw.get('sequence') # Blat sequence, available for ProbeSet + # Blat sequence, available for ProbeSet + self.sequence = kw.get('sequence') self.data = kw.get('data', {}) self.view = True @@ -100,9 +105,10 @@ class GeneralTrait(object): elif len(name2) == 3: self.dataset, self.name, self.cellid = name2 - # Todo: These two lines are necessary most of the time, but perhaps not all of the time - # So we could add a simple if statement to short-circuit this if necessary - if get_sample_info != False: + # Todo: These two lines are necessary most of the time, but + # perhaps not all of the time So we could add a simple if + # statement to short-circuit this if necessary + if get_sample_info is not False: self = retrieve_sample_data(self, self.dataset) def export_informative(self, include_variance=0): @@ -116,13 +122,13 @@ class GeneralTrait(object): the_vars = [] sample_aliases = [] for sample_name, sample_data in list(self.data.items()): - if sample_data.value != None: - if not include_variance or sample_data.variance != None: + if sample_data.value is not None: + if not include_variance or sample_data.variance is not None: samples.append(sample_name) vals.append(sample_data.value) the_vars.append(sample_data.variance) sample_aliases.append(sample_data.name2) - return samples, vals, the_vars, sample_aliases + return samples, vals, the_vars, sample_aliases @property def description_fmt(self): @@ -161,12 +167,17 @@ class GeneralTrait(object): alias = 'Not available' if self.symbol: - human_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.upper()) - mouse_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.capitalize()) - other_response = requests.get(GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.lower()) + human_response = requests.get( + GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.upper()) + mouse_response = requests.get( + GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.capitalize()) + other_response = requests.get( + GN2_BASE_URL + "gn3/gene/aliases/" + self.symbol.lower()) if human_response and mouse_response and other_response: - alias_list = json.loads(human_response.content) + json.loads(mouse_response.content) + json.loads(other_response.content) + alias_list = json.loads(human_response.content) + json.loads( + mouse_response.content) + \ + json.loads(other_response.content) filtered_aliases = [] seen = set() @@ -180,33 +191,34 @@ class GeneralTrait(object): return alias - @property def location_fmt(self): '''Return a text formatted location - While we're at it we set self.location in case we need it later (do we?) + While we're at it we set self.location in case we need it + later (do we?) ''' if self.chr and self.mb: - self.location = 'Chr %s @ %s Mb' % (self.chr, self.mb) + self.location = 'Chr %s @ %s Mb' % (self.chr, self.mb) elif self.chr: self.location = 'Chr %s @ Unknown position' % (self.chr) else: self.location = 'Not available' fmt = self.location - ##XZ: deal with direction + # XZ: deal with direction if self.strand_probe == '+': fmt += (' on the plus strand ') elif self.strand_probe == '-': fmt += (' on the minus strand ') return fmt - + + def retrieve_sample_data(trait, dataset, samplelist=None): - if samplelist == None: + if samplelist is None: samplelist = [] if dataset.type == "Temp": @@ -222,16 +234,19 @@ def retrieve_sample_data(trait, dataset, samplelist=None): all_samples_ordered = dataset.group.all_samples_ordered() for i, item in enumerate(results): try: - trait.data[all_samples_ordered[i]] = webqtlCaseData(all_samples_ordered[i], float(item)) + trait.data[all_samples_ordered[i]] = webqtlCaseData( + all_samples_ordered[i], float(item)) except: pass else: for item in results: name, value, variance, num_cases, name2 = item if not samplelist or (samplelist and name in samplelist): - trait.data[name] = webqtlCaseData(*item) #name, value, variance, num_cases) + # name, value, variance, num_cases) + trait.data[name] = webqtlCaseData(*item) return trait + @app.route("/trait/get_sample_data") def get_sample_data(): params = request.args @@ -247,7 +262,8 @@ def get_sample_data(): trait_dict['group'] = trait_ob.dataset.group.name trait_dict['tissue'] = trait_ob.dataset.tissue trait_dict['species'] = trait_ob.dataset.group.species - trait_dict['url'] = url_for('show_trait_page', trait_id = trait, dataset = dataset) + trait_dict['url'] = url_for( + 'show_trait_page', trait_id=trait, dataset=dataset) trait_dict['description'] = trait_ob.description_display if trait_ob.dataset.type == "ProbeSet": trait_dict['symbol'] = trait_ob.symbol @@ -257,22 +273,27 @@ def get_sample_data(): trait_dict['pubmed_link'] = trait_ob.pubmed_link trait_dict['pubmed_text'] = trait_ob.pubmed_text - return json.dumps([trait_dict, {key: value.value for key, value in list(trait_ob.data.items()) }]) + return json.dumps([trait_dict, {key: value.value for + key, value in list( + trait_ob.data.items())}]) else: return None - + + def jsonable(trait): """Return a dict suitable for using as json Actual turning into json doesn't happen here though""" - dataset = create_dataset(dataset_name = trait.dataset.name, dataset_type = trait.dataset.type, group_name = trait.dataset.group.name) - + dataset = create_dataset(dataset_name=trait.dataset.name, + dataset_type=trait.dataset.type, + group_name=trait.dataset.group.name) + if dataset.type == "ProbeSet": return dict(name=trait.name, symbol=trait.symbol, dataset=dataset.name, - dataset_name = dataset.shortname, + dataset_name=dataset.shortname, description=trait.description_display, mean=trait.mean, location=trait.location_repr, @@ -284,7 +305,7 @@ def jsonable(trait): if trait.pubmed_id: return dict(name=trait.name, dataset=dataset.name, - dataset_name = dataset.shortname, + dataset_name=dataset.shortname, description=trait.description_display, abbreviation=trait.abbreviation, authors=trait.authors, @@ -297,7 +318,7 @@ def jsonable(trait): else: return dict(name=trait.name, dataset=dataset.name, - dataset_name = dataset.shortname, + dataset_name=dataset.shortname, description=trait.description_display, abbreviation=trait.abbreviation, authors=trait.authors, @@ -309,19 +330,20 @@ def jsonable(trait): elif dataset.type == "Geno": return dict(name=trait.name, dataset=dataset.name, - dataset_name = dataset.shortname, + dataset_name=dataset.shortname, location=trait.location_repr ) else: return dict() + def jsonable_table_row(trait, dataset_name, index): """Return a list suitable for json and intended to be displayed in a table Actual turning into json doesn't happen here though""" dataset = create_dataset(dataset_name) - + if dataset.type == "ProbeSet": if trait.mean == "": mean = "N/A" @@ -333,11 +355,13 @@ def jsonable_table_row(trait, dataset_name, index): additive = "%.3f" % round(float(trait.additive), 2) return ['', index, - ''+str(trait.name)+'', + ''+str(trait.name)+'', trait.symbol, trait.description_display, trait.location_repr, - mean, + mean, trait.LRS_score_repr, trait.LRS_location_repr, additive] @@ -349,7 +373,9 @@ def jsonable_table_row(trait, dataset_name, index): if trait.pubmed_id: return ['', index, - ''+str(trait.name)+'', + ''+str(trait.name)+'', trait.description_display, trait.authors, '' + trait.pubmed_text + '', @@ -359,7 +385,9 @@ def jsonable_table_row(trait, dataset_name, index): else: return ['', index, - ''+str(trait.name)+'', + ''+str(trait.name)+'', trait.description_display, trait.authors, trait.pubmed_text, @@ -369,7 +397,9 @@ def jsonable_table_row(trait, dataset_name, index): elif dataset.type == "Geno": return ['', index, - ''+str(trait.name)+'', + ''+str(trait.name)+'', trait.location_repr] else: return dict() @@ -380,14 +410,16 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): resource_id = get_resource_id(dataset, trait.name) if dataset.type == 'Publish': - the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format(resource_id, g.user_session.user_id) + the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view".format( + resource_id, g.user_session.user_id) else: - the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format(resource_id, g.user_session.user_id, trait.name) + the_url = "http://localhost:8080/run-action?resource={}&user={}&branch=data&action=view&trait={}".format( + resource_id, g.user_session.user_id, trait.name) try: response = requests.get(the_url).content trait_info = json.loads(response) - except: #ZS: I'm assuming the trait is viewable if the try fails for some reason; it should never reach this point unless the user has privileges, since that's dealt with in create_trait + except: # ZS: I'm assuming the trait is viewable if the try fails for some reason; it should never reach this point unless the user has privileges, since that's dealt with in create_trait if dataset.type == 'Publish': query = """ SELECT @@ -416,8 +448,8 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): logger.sql(query) trait_info = g.db.execute(query).fetchone() - #XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name - #XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms. + # XZ, 05/08/2009: Xiaodong add this block to use ProbeSet.Id to find the probeset instead of just using ProbeSet.Name + # XZ, 05/08/2009: to avoid the problem of same probeset name from different platforms. elif dataset.type == 'ProbeSet': display_fields_string = ', ProbeSet.'.join(dataset.display_fields) display_fields_string = 'ProbeSet.' + display_fields_string @@ -430,11 +462,11 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): ProbeSetFreeze.Name = '%s' AND ProbeSet.Name = '%s' """ % (escape(display_fields_string), - escape(dataset.name), - escape(str(trait.name))) + escape(dataset.name), + escape(str(trait.name))) logger.sql(query) trait_info = g.db.execute(query).fetchone() - #XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name + # XZ, 05/08/2009: We also should use Geno.Id to find marker instead of just using Geno.Name # to avoid the problem of same marker name from different species. elif dataset.type == 'Geno': display_fields_string = ',Geno.'.join(dataset.display_fields) @@ -448,11 +480,11 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): GenoFreeze.Name = '%s' AND Geno.Name = '%s' """ % (escape(display_fields_string), - escape(dataset.name), - escape(trait.name)) + escape(dataset.name), + escape(trait.name)) logger.sql(query) trait_info = g.db.execute(query).fetchone() - else: #Temp type + else: # Temp type query = """SELECT %s FROM %s WHERE Name = %s""" logger.sql(query) trait_info = g.db.execute(query, @@ -462,7 +494,7 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): if trait_info: trait.haveinfo = True for i, field in enumerate(dataset.display_fields): - holder = trait_info[i] + holder = trait_info[i] setattr(trait, field, holder) if dataset.type == 'Publish': @@ -475,9 +507,9 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): description = trait.post_publication_description - #If the dataset is confidential and the user has access to confidential - #phenotype traits, then display the pre-publication description instead - #of the post-publication description + # If the dataset is confidential and the user has access to confidential + # phenotype traits, then display the pre-publication description instead + # of the post-publication description if trait.confidential: trait.abbreviation = trait.pre_publication_abbreviation trait.description_display = trait.pre_publication_description @@ -514,15 +546,17 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): trait.location_repr = 'N/A' if trait.chr and trait.mb: - trait.location_repr = 'Chr%s: %.6f' % (trait.chr, float(trait.mb)) + trait.location_repr = 'Chr%s: %.6f' % ( + trait.chr, float(trait.mb)) elif dataset.type == "Geno": trait.location_repr = 'N/A' if trait.chr and trait.mb: - trait.location_repr = 'Chr%s: %.6f' % (trait.chr, float(trait.mb)) + trait.location_repr = 'Chr%s: %.6f' % ( + trait.chr, float(trait.mb)) if get_qtl_info: - #LRS and its location + # LRS and its location trait.LRS_score_repr = "N/A" trait.LRS_location_repr = "N/A" trait.locus = trait.locus_chr = trait.locus_mb = trait.lrs = trait.pvalue = trait.additive = "" @@ -594,10 +628,12 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): trait.locus = trait.lrs = trait.additive = "" if (dataset.type == 'Publish' or dataset.type == "ProbeSet") and trait.locus_chr != "" and trait.locus_mb != "": - trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (trait.locus_chr, float(trait.locus_mb)) + trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % ( + trait.locus_chr, float(trait.locus_mb)) if trait.lrs != "": trait.LRS_score_repr = LRS_score_repr = '%3.1f' % trait.lrs else: - raise KeyError(repr(trait.name)+' information is not found in the database.') - + raise KeyError(repr(trait.name) + + ' information is not found in the database.') + return trait diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 390ad75a..ce0c0749 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -1,30 +1,37 @@ import json import requests -from base import data_set, webqtlConfig - -from utility import hmac -from utility.redis_tools import get_redis_conn, get_resource_info, get_resource_id, add_resource -Redis = get_redis_conn() +from flask import g +from base import webqtlConfig -logger = logging.getLogger(__name__) +from utility.redis_tools import (get_redis_conn, + get_resource_info, + get_resource_id, + add_resource) +Redis = get_redis_conn() def check_resource_availability(dataset, trait_id=None): - # At least for now assume temporary entered traits are accessible if type(dataset) == str or dataset.type == "Temp": return webqtlConfig.DEFAULT_PRIVILEGES resource_id = get_resource_id(dataset, trait_id) - if resource_id: # ZS: This should never be false, but it's technically possible if a non-Temp dataset somehow had a type other than Publish/ProbeSet/Geno + # ZS: This should never be false, but it's technically possible if + # a non-Temp dataset somehow had a type other than + # Publish/ProbeSet/Geno + if resource_id: resource_info = get_resource_info(resource_id) - if not resource_info: # ZS: If resource isn't already in redis, add it with default privileges + + # ZS: If resource isn't already in redis, add it with default + # privileges + if not resource_info: resource_info = add_new_resource(dataset, trait_id) - # ZS: Check if super-user - we should probably come up with some way to integrate this into the proxy + # ZS: Check if super-user - we should probably come up with some + # way to integrate this into the proxy if g.user_session.user_id in Redis.smembers("super_users"): return webqtlConfig.SUPER_PRIVILEGES @@ -76,8 +83,9 @@ def add_new_resource(dataset, trait_id=None): def get_group_code(dataset): - results = g.db.execute("SELECT InbredSetCode from InbredSet where Name='{}'".format( - dataset.group.name)).fetchone() + results = g.db.execute( + "SELECT InbredSetCode from InbredSet where Name='{}'".format( + dataset.group.name)).fetchone() if results[0]: return results[0] else: -- cgit v1.2.3 From 6064148eb2b723a308f0d29595a75ab64f47e1e2 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 17 Sep 2020 17:34:11 +0300 Subject: Replace string arguments to "hmac.new" with bytearray Same as: https://github.com/genenetwork/genenetwork2/pull/422/commits/46443ec8d2cdfd7c60358a889d90a90e4f7daaf4 --- wqflask/utility/hmac.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/hmac.py b/wqflask/utility/hmac.py index 10387bb0..6623f69a 100644 --- a/wqflask/utility/hmac.py +++ b/wqflask/utility/hmac.py @@ -10,7 +10,9 @@ def hmac_creation(stringy): """Helper function to create the actual hmac""" secret = app.config['SECRET_HMAC_CODE'] - hmaced = hmac.new(secret, stringy, hashlib.sha1) + hmaced = hmac.new(bytearray(secret, "utf-8"), + bytearray(stringy, "utf-8"), + hashlib.sha1) hm = hmaced.hexdigest() # ZS: Leaving the below comment here to ask Pjotr about # "Conventional wisdom is that you don't lose much in terms of security if you throw away up to half of the output." -- cgit v1.2.3 From 75fe1adb2cfaf542bd22f33e8985938ca988661d Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 18 Sep 2020 02:45:26 +0300 Subject: Apply pep8 formatting --- wqflask/utility/redis_tools.py | 120 ++++++++++++++++++++++++++++------------- 1 file changed, 84 insertions(+), 36 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 81ba04ea..ef02268e 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -4,23 +4,21 @@ import uuid import simplejson as json import datetime -import redis # used for collections - -import logging - -from flask import (render_template, flash) - -from utility import hmac +import redis # used for collections +from utility.hmac import hmac_creation from utility.logger import getLogger logger = getLogger(__name__) + def get_redis_conn(): Redis = redis.StrictRedis(port=6379) return Redis + Redis = get_redis_conn() + def is_redis_available(): try: Redis.ping() @@ -28,6 +26,7 @@ def is_redis_available(): return False return True + def get_user_id(column_name, column_value): user_list = Redis.hgetall("users") key_list = [] @@ -38,6 +37,7 @@ def get_user_id(column_name, column_value): return None + def get_user_by_unique_column(column_name, column_value): item_details = None @@ -52,9 +52,11 @@ def get_user_by_unique_column(column_name, column_value): return item_details + def get_users_like_unique_column(column_name, column_value): - """ - Like previous function, but this only checks if the input is a subset of a field and can return multiple results + """Like previous function, but this only checks if the input is a + subset of a field and can return multiple results + """ matched_users = [] @@ -74,7 +76,6 @@ def get_users_like_unique_column(column_name, column_value): return matched_users -# def search_users_by_unique_column(column_name, column_value): def set_user_attribute(user_id, column_name, column_value): user_info = json.loads(Redis.hget("users", user_id)) @@ -82,6 +83,7 @@ def set_user_attribute(user_id, column_name, column_value): Redis.hset("users", user_id, json.dumps(user_info)) + def get_user_collections(user_id): collections = None collections = Redis.hget("collections", user_id) @@ -91,22 +93,27 @@ def get_user_collections(user_id): else: return [] + def save_user(user, user_id): Redis.hset("users", user_id, json.dumps(user)) + def save_collections(user_id, collections_ob): Redis.hset("collections", user_id, collections_ob) + def save_verification_code(user_email, code): Redis.hset("verification_codes", code, user_email) + def check_verification_code(code): email_address = None user_details = None email_address = Redis.hget("verification_codes", code) if email_address: - user_details = get_user_by_unique_column('email_address', email_address) + user_details = get_user_by_unique_column( + 'email_address', email_address) if user_details: return user_details else: @@ -114,10 +121,12 @@ def check_verification_code(code): else: return None + def get_user_groups(user_id): - #ZS: Get the groups where a user is an admin or a member and return lists corresponding to those two sets of groups - admin_group_ids = [] #ZS: Group IDs where user is an admin - user_group_ids = [] #ZS: Group IDs where user is a regular user + # ZS: Get the groups where a user is an admin or a member and + # return lists corresponding to those two sets of groups + admin_group_ids = [] # ZS: Group IDs where user is an admin + user_group_ids = [] # ZS: Group IDs where user is a regular user groups_list = Redis.hgetall("groups") for key in groups_list: try: @@ -142,6 +151,7 @@ def get_user_groups(user_id): return admin_groups, user_groups + def get_group_info(group_id): group_json = Redis.hget("groups", group_id) group_info = None @@ -150,6 +160,7 @@ def get_group_info(group_id): return group_info + def get_group_by_unique_column(column_name, column_value): """ Get group by column; not sure if there's a faster way to do this """ @@ -158,7 +169,8 @@ def get_group_by_unique_column(column_name, column_value): all_group_list = Redis.hgetall("groups") for key in all_group_list: group_info = json.loads(all_group_list[key]) - if column_name == "admins" or column_name == "members": #ZS: Since these fields are lists, search in the list + # ZS: Since these fields are lists, search in the list + if column_name == "admins" or column_name == "members": if column_value in group_info[column_name]: matched_groups.append(group_info) else: @@ -167,9 +179,11 @@ def get_group_by_unique_column(column_name, column_value): return matched_groups + def get_groups_like_unique_column(column_name, column_value): - """ - Like previous function, but this only checks if the input is a subset of a field and can return multiple results + """Like previous function, but this only checks if the input is a + subset of a field and can return multiple results + """ matched_groups = [] @@ -178,7 +192,8 @@ def get_groups_like_unique_column(column_name, column_value): if column_name != "group_id": for key in group_list: group_info = json.loads(group_list[key]) - if column_name == "admins" or column_name == "members": #ZS: Since these fields are lists, search in the list + # ZS: Since these fields are lists, search in the list + if column_name == "admins" or column_name == "members": if column_value in group_info[column_name]: matched_groups.append(group_info) else: @@ -190,13 +205,15 @@ def get_groups_like_unique_column(column_name, column_value): return matched_groups -def create_group(admin_user_ids, member_user_ids = [], group_name = "Default Group Name"): + +def create_group(admin_user_ids, member_user_ids=[], + group_name="Default Group Name"): group_id = str(uuid.uuid4()) new_group = { - "id" : group_id, + "id": group_id, "admins": admin_user_ids, - "members" : member_user_ids, - "name" : group_name, + "members": member_user_ids, + "name": group_name, "created_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), "changed_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') } @@ -205,8 +222,9 @@ def create_group(admin_user_ids, member_user_ids = [], group_name = "Default Gro return new_group + def delete_group(user_id, group_id): - #ZS: If user is an admin of a group, remove it from the groups hash + # ZS: If user is an admin of a group, remove it from the groups hash group_info = get_group_info(group_id) if user_id in group_info["admins"]: Redis.hdel("groups", group_id) @@ -214,9 +232,15 @@ def delete_group(user_id, group_id): else: None -def add_users_to_group(user_id, group_id, user_emails = [], admins = False): #ZS "admins" is just to indicate whether the users should be added to the groups admins or regular users set + +# ZS "admins" is just to indicate whether the users should be added to +# the groups admins or regular users set +def add_users_to_group(user_id, group_id, user_emails=[], admins=False): group_info = get_group_info(group_id) - if user_id in group_info["admins"]: #ZS: Just to make sure that the user is an admin for the group, even though they shouldn't be able to reach this point unless they are + # ZS: Just to make sure that the user is an admin for the group, + # even though they shouldn't be able to reach this point unless + # they are + if user_id in group_info["admins"]: if admins: group_users = set(group_info["admins"]) else: @@ -231,25 +255,36 @@ def add_users_to_group(user_id, group_id, user_emails = [], admins = False): #ZS else: group_info["members"] = list(group_users) - group_info["changed_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + group_info["changed_timestamp"] = datetime.datetime.utcnow().strftime( + '%b %d %Y %I:%M%p') Redis.hset("groups", group_id, json.dumps(group_info)) return group_info else: return None -def remove_users_from_group(user_id, users_to_remove_ids, group_id, user_type = "members"): #ZS: User type is because I assume admins can remove other admins + +# ZS: User type is because I assume admins can remove other admins +def remove_users_from_group(user_id, + users_to_remove_ids, + group_id, + user_type="members"): group_info = get_group_info(group_id) if user_id in group_info["admins"]: users_to_remove_set = set(users_to_remove_ids) - if user_type == "admins" and user_id in users_to_remove_set: #ZS: Make sure an admin can't remove themselves from a group, since I imagine we don't want groups to be able to become admin-less + # ZS: Make sure an admin can't remove themselves from a group, + # since I imagine we don't want groups to be able to become + # admin-less + if user_type == "admins" and user_id in users_to_remove_set: users_to_remove_set.remove(user_id) group_users = set(group_info[user_type]) group_users -= users_to_remove_set group_info[user_type] = list(group_users) - group_info["changed_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + group_info["changed_timestamp"] = datetime.datetime.utcnow().strftime( + '%b %d %Y %I:%M%p') Redis.hset("groups", group_id, json.dumps(group_info)) + def change_group_name(user_id, group_id, new_name): group_info = get_group_info(group_id) if user_id in group_info["admins"]: @@ -259,22 +294,28 @@ def change_group_name(user_id, group_id, new_name): else: return None + def get_resources(): resource_list = Redis.hgetall("resources") return resource_list + def get_resource_id(dataset, trait_id=None): resource_id = False if dataset.type == "Publish": if trait_id: - resource_id = hmac.hmac_creation("{}:{}:{}".format('dataset-publish', dataset.id, trait_id)) + resource_id = hmac_creation("{}:{}:{}".format( + 'dataset-publish', dataset.id, trait_id)) elif dataset.type == "ProbeSet": - resource_id = hmac.hmac_creation("{}:{}".format('dataset-probeset', dataset.id)) + resource_id = hmac_creation( + "{}:{}".format('dataset-probeset', dataset.id)) elif dataset.type == "Geno": - resource_id = hmac.hmac_creation("{}:{}".format('dataset-geno', dataset.id)) + resource_id = hmac_creation( + "{}:{}".format('dataset-geno', dataset.id)) return resource_id + def get_resource_info(resource_id): resource_info = Redis.hget("resources", resource_id) if resource_info: @@ -282,17 +323,23 @@ def get_resource_info(resource_id): else: return None + def add_resource(resource_info, update=True): if 'trait' in resource_info['data']: - resource_id = hmac.hmac_creation('{}:{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset']), str(resource_info['data']['trait']))) + resource_id = hmac_creation('{}:{}:{}'.format( + str(resource_info['type']), str( + resource_info['data']['dataset']), + str(resource_info['data']['trait']))) else: - resource_id = hmac.hmac_creation('{}:{}'.format(str(resource_info['type']), str(resource_info['data']['dataset']))) + resource_id = hmac_creation('{}:{}'.format( + str(resource_info['type']), str(resource_info['data']['dataset']))) if update or not Redis.hexists("resources", resource_id): Redis.hset("resources", resource_id, json.dumps(resource_info)) return resource_info + def add_access_mask(resource_id, group_id, access_mask): the_resource = get_resource_info(resource_id) the_resource['group_masks'][group_id] = access_mask @@ -301,9 +348,10 @@ def add_access_mask(resource_id, group_id, access_mask): return the_resource + def change_resource_owner(resource_id, new_owner_id): - the_resource= get_resource_info(resource_id) + the_resource = get_resource_info(resource_id) the_resource['owner_id'] = new_owner_id Redis.delete("resource") - Redis.hset("resources", resource_id, json.dumps(the_resource)) \ No newline at end of file + Redis.hset("resources", resource_id, json.dumps(the_resource)) -- cgit v1.2.3 From ca22bbfdd36351e2b7d8f346b5a3ab81c94f7203 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 28 Oct 2020 23:14:35 +0300 Subject: Use latin-1 encoding when converting hmac secret to bytes * wqflask/utility/hmac.py (hmac_creation): Use latin-1 when serializing the hmac secret. This ensures that the behaviour of *verify_cookie* remains the same as in the python2 tip. --- wqflask/utility/hmac.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/hmac.py b/wqflask/utility/hmac.py index 6623f69a..29891677 100644 --- a/wqflask/utility/hmac.py +++ b/wqflask/utility/hmac.py @@ -10,7 +10,7 @@ def hmac_creation(stringy): """Helper function to create the actual hmac""" secret = app.config['SECRET_HMAC_CODE'] - hmaced = hmac.new(bytearray(secret, "utf-8"), + hmaced = hmac.new(bytearray(secret, "latin-1"), bytearray(stringy, "utf-8"), hashlib.sha1) hm = hmaced.hexdigest() -- cgit v1.2.3 From 04e981ba202a6bd9d969c05855a33040928d15d0 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 2 Nov 2020 13:21:40 -0600 Subject: Created function for encoding the column value as bytes when fetching from the JSON-formatted string pulled from Redis (since this was needed after the Python 3 switchover) * wqflask/utility/redis_tools.py - Created function load_json_from_redis that encodes the key (column_value) when fetching a value from the JSON pulled from Redis --- wqflask/utility/redis_tools.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index d855a7fa..236cc755 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -25,6 +25,10 @@ def is_redis_available(): return True +def load_json_from_redis(item_list, column_value): + return json.loads(item_list[str.encode(column_value)]) + + def get_user_id(column_name, column_value): user_list = Redis.hgetall("users") key_list = [] @@ -46,7 +50,7 @@ def get_user_by_unique_column(column_name, column_value): if column_name in user_ob and user_ob[column_name] == column_value: item_details = user_ob else: - item_details = json.loads(user_list[column_value]) + item_details = load_json_from_redis(user_list, column_value) return item_details @@ -70,7 +74,7 @@ def get_users_like_unique_column(column_name, column_value): if column_value in user_ob[column_name]: matched_users.append(user_ob) else: - matched_users.append(json.loads(user_list[column_value])) + matched_users.append(load_json_from_redis(user_list, column_value)) return matched_users @@ -199,7 +203,7 @@ def get_groups_like_unique_column(column_name, column_value): if column_value in group_info[column_name]: matched_groups.append(group_info) else: - matched_groups.append(json.loads(group_list[column_value])) + matched_groups.append(load_json_from_redis(group_list, column_value)) return matched_groups -- cgit v1.2.3 From fc717c36d334f364bbae8c98f31ddf78e18d64e3 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 18 Dec 2020 17:34:47 -0600 Subject: Added redirect_uri to the ORCID_AUTH_URL since it wasn't working due to missing this parameter --- wqflask/utility/tools.py | 2 +- wqflask/wqflask/user_login.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 68ef0f04..65df59c3 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -267,7 +267,7 @@ ORCID_CLIENT_SECRET = get_setting('ORCID_CLIENT_SECRET') ORCID_AUTH_URL = None if ORCID_CLIENT_ID != 'UNKNOWN' and ORCID_CLIENT_SECRET: ORCID_AUTH_URL = "https://orcid.org/oauth/authorize?response_type=code&scope=/authenticate&show_login=true&client_id=" + \ - ORCID_CLIENT_ID+"&client_secret="+ORCID_CLIENT_SECRET + ORCID_CLIENT_ID+"&client_secret="+ORCID_CLIENT_SECRET + "&redirect_uri=" + GN2_BRANCH_URL + "n/login/orcid_oauth2" ORCID_TOKEN_URL = get_setting('ORCID_TOKEN_URL') ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST') diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index b6266b2a..bc608e84 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -25,7 +25,7 @@ from utility.logger import getLogger logger = getLogger(__name__) from smtplib import SMTP -from utility.tools import SMTP_CONNECT, SMTP_USERNAME, SMTP_PASSWORD, LOG_SQL_ALCHEMY +from utility.tools import SMTP_CONNECT, SMTP_USERNAME, SMTP_PASSWORD, LOG_SQL_ALCHEMY, GN2_BRANCH_URL THREE_DAYS = 60 * 60 * 24 * 3 @@ -277,9 +277,11 @@ def orcid_oauth2(): data = { "client_id": ORCID_CLIENT_ID, "client_secret": ORCID_CLIENT_SECRET, - "grant_type": "authorization_code", + "grant_type": "authorization_code", + "redirect_uri": GN2_BRANCH_URL + "n/login/orcid_oauth2", "code": code } + result = requests.post(ORCID_TOKEN_URL, data=data) result_dict = json.loads(result.text.encode("utf-8")) -- cgit v1.2.3 From 2030cc56db878b05e5507c7c3d5ddef39ff223cb Mon Sep 17 00:00:00 2001 From: Alexanderkabui Date: Mon, 14 Dec 2020 16:15:01 +0300 Subject: replace vars with vars_obj which is a function name in python --- wqflask/utility/type_checking.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/type_checking.py b/wqflask/utility/type_checking.py index f15b17e2..6b029317 100644 --- a/wqflask/utility/type_checking.py +++ b/wqflask/utility/type_checking.py @@ -23,20 +23,20 @@ def is_str(value): except: return False -def get_float(vars,name,default=None): - if name in vars: - if is_float(vars[name]): - return float(vars[name]) +def get_float(vars_obj,name,default=None): + if name in vars_obj: + if is_float(vars_obj[name]): + return float(vars_obj[name]) return default -def get_int(vars,name,default=None): - if name in vars: - if is_int(vars[name]): - return float(vars[name]) +def get_int(vars_obj,name,default=None): + if name in vars_obj: + if is_int(vars_obj[name]): + return float(vars_obj[name]) return default -def get_string(vars,name,default=None): - if name in vars: - if not vars[name] is None: - return str(vars[name]) +def get_string(vars_obj,name,default=None): + if name in vars_obj: + if not vars_obj[name] is None: + return str(vars_obj[name]) return default -- cgit v1.2.3 From 935d618da8986ad85292836f0762e435deb37108 Mon Sep 17 00:00:00 2001 From: zsloan Date: Wed, 17 Feb 2021 19:24:57 +0000 Subject: Fixed error caused by user IDs being encoded as bytes --- wqflask/utility/redis_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 236cc755..f72d9110 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -26,7 +26,7 @@ def is_redis_available(): def load_json_from_redis(item_list, column_value): - return json.loads(item_list[str.encode(column_value)]) + return json.loads(item_list[str.encode(column_value.decode("utf-8"))]) def get_user_id(column_name, column_value): -- cgit v1.2.3 From f1f80b480ea0f71b7ea34f0b23ad136a56e1bc21 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 18 Feb 2021 18:21:23 +0000 Subject: Changed authentication_tools.py to account for encoding str as bytes and dealing with the permissions response either being a list or not a list (this should be temporary until I figure out what is going on here) --- wqflask/utility/authentication_tools.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index ce0c0749..786475a8 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -11,7 +11,6 @@ from utility.redis_tools import (get_redis_conn, add_resource) Redis = get_redis_conn() - def check_resource_availability(dataset, trait_id=None): # At least for now assume temporary entered traits are accessible if type(dataset) == str or dataset.type == "Temp": @@ -32,13 +31,14 @@ def check_resource_availability(dataset, trait_id=None): # ZS: Check if super-user - we should probably come up with some # way to integrate this into the proxy - if g.user_session.user_id in Redis.smembers("super_users"): + if str.encode(g.user_session.user_id) in Redis.smembers("super_users"): return webqtlConfig.SUPER_PRIVILEGES response = None the_url = "http://localhost:8080/available?resource={}&user={}".format( resource_id, g.user_session.user_id) + try: response = json.loads(requests.get(the_url).content) except: @@ -101,12 +101,13 @@ def check_admin(resource_id=None): resource_info = get_resource_info(resource_id) response = resource_info['default_mask']['admin'] - if 'edit-admins' in response: - return "edit-admins" - elif 'edit-access' in response: - return "edit-access" - else: - return "not-admin" + if type(response) is list: + if 'edit-admins' in response: + return 'edit_admins' + elif 'edit-access' in response: + return 'edit-access' + + return response def check_owner(dataset=None, trait_id=None, resource_id=None): @@ -131,7 +132,7 @@ def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): else: resource_id = get_resource_id(dataset, trait_id) - if g.user_session.user_id in Redis.smembers("super_users"): + if str.encode(g.user_session.user_id) in Redis.smembers("super_users"): return "owner" resource_info = get_resource_info(resource_id) -- cgit v1.2.3 From cafdd7284f3c2f87e1dbb288e181f1e5aeeccc06 Mon Sep 17 00:00:00 2001 From: zsloan Date: Thu, 18 Feb 2021 18:37:16 +0000 Subject: Remove the deocde since it was throwing an error --- wqflask/utility/redis_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index f72d9110..236cc755 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -26,7 +26,7 @@ def is_redis_available(): def load_json_from_redis(item_list, column_value): - return json.loads(item_list[str.encode(column_value.decode("utf-8"))]) + return json.loads(item_list[str.encode(column_value)]) def get_user_id(column_name, column_value): -- cgit v1.2.3 From 7004219e51a6053a03ec4a997d759c52ed36bdfe Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 16 Mar 2021 17:10:50 +0000 Subject: Removed unnecessary encoding of user_id from authentication_tools.py --- wqflask/utility/authentication_tools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 786475a8..672b36d5 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -31,7 +31,7 @@ def check_resource_availability(dataset, trait_id=None): # ZS: Check if super-user - we should probably come up with some # way to integrate this into the proxy - if str.encode(g.user_session.user_id) in Redis.smembers("super_users"): + if g.user_session.user_id in Redis.smembers("super_users"): return webqtlConfig.SUPER_PRIVILEGES response = None @@ -132,7 +132,7 @@ def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): else: resource_id = get_resource_id(dataset, trait_id) - if str.encode(g.user_session.user_id) in Redis.smembers("super_users"): + if g.user_session.user_id in Redis.smembers("super_users"): return "owner" resource_info = get_resource_info(resource_id) -- cgit v1.2.3 From 02b14ccc55978742cd115f3c80bf8ef41ac77ae7 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 16 Mar 2021 17:23:45 +0000 Subject: Fixed encoding to make get_user_groups work properly, since the user IDs for groups in Redis are stored as strings --- wqflask/utility/redis_tools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 236cc755..d05de923 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -133,8 +133,8 @@ def get_user_groups(user_id): for key in groups_list: try: group_ob = json.loads(groups_list[key]) - group_admins = set(group_ob['admins']) - group_members = set(group_ob['members']) + group_admins = set([this_admin.encode('utf-8') if this_admin else None for this_admin in group_ob['admins']]) + group_members = set([this_member.encode('utf-8') if this_member else None for this_member in group_ob['members']]) if user_id in group_admins: admin_group_ids.append(group_ob['id']) elif user_id in group_members: -- cgit v1.2.3 From 696585dda43ec238940b2e1cc239028e31684dc3 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 16 Mar 2021 17:25:14 +0000 Subject: Fixed encoding in load_json_from_redis to account for the first that column values are sometimes stored as bytes and sometimes as strings --- wqflask/utility/redis_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index d05de923..8052035f 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -26,7 +26,7 @@ def is_redis_available(): def load_json_from_redis(item_list, column_value): - return json.loads(item_list[str.encode(column_value)]) + return json.loads(item_list[column_value]) def get_user_id(column_name, column_value): -- cgit v1.2.3 From 68ee0a995fceaf6aefdd3c8f780e46a83b51a0e8 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 19 Apr 2021 17:11:41 +0000 Subject: Specify only getting the first two items after splitting the trait/dataset input string --- wqflask/utility/helper_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 7eb7f013..46eeb35d 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -40,7 +40,7 @@ def get_trait_db_obs(self, trait_db_list): data, _separator, hmac_string = trait.rpartition(':') data = data.strip() assert hmac_string==hmac.hmac_creation(data), "Data tampering?" - trait_name, dataset_name = data.split(":") + trait_name, dataset_name = data.split(":")[:2] if dataset_name == "Temp": dataset_ob = data_set.create_dataset(dataset_name=dataset_name, dataset_type="Temp", group_name=trait_name.split("_")[2]) else: -- cgit v1.2.3 From b0ccb12682fed83bf72d22ff42f1f442a8e6176e Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 19 Apr 2021 14:43:16 +0300 Subject: Remove stale comments --- wqflask/base/data_set.py | 11 ---- wqflask/utility/helper_functions.py | 4 -- wqflask/wqflask/show_trait/show_trait.py | 72 +++++++++++++++----------- wqflask/wqflask/templates/index_page_orig.html | 10 ---- wqflask/wqflask/templates/submit_trait.html | 12 ----- wqflask/wqflask/views.py | 23 ++------ 6 files changed, 46 insertions(+), 86 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 178234fe..cc5a428c 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -262,8 +262,6 @@ class Markers(object): elif isinstance(p_values, dict): filtered_markers = [] for marker in self.markers: - #logger.debug("marker[name]", marker['name']) - #logger.debug("p_values:", p_values) if marker['name'] in p_values: #logger.debug("marker {} IS in p_values".format(i)) marker['p_value'] = p_values[marker['name']] @@ -276,10 +274,6 @@ class Markers(object): marker['lrs_value'] = - \ math.log10(marker['p_value']) * 4.61 filtered_markers.append(marker) - # else: - #logger.debug("marker {} NOT in p_values".format(i)) - # self.markers.remove(marker) - #del self.markers[i] self.markers = filtered_markers @@ -306,7 +300,6 @@ class HumanMarkers(Markers): marker['Mb'] = float(splat[3]) / 1000000 self.markers.append(marker) - #logger.debug("markers is: ", pf(self.markers)) def add_pvalues(self, p_values): super(HumanMarkers, self).add_pvalues(p_values) @@ -520,7 +513,6 @@ def datasets(group_name, this_group=None): break if tissue_already_exists: - #logger.debug("dataset_menu:", dataset_menu[i]['datasets']) dataset_menu[i]['datasets'].append((dataset, dataset_short)) else: dataset_menu.append(dict(tissue=tissue_name, @@ -735,9 +727,6 @@ class PhenotypeDataSet(DataSet): DS_NAME_MAP['Publish'] = 'PhenotypeDataSet' def setup(self): - - #logger.debug("IS A PHENOTYPEDATASET") - # Fields in the database table self.search_fields = ['Phenotype.Post_publication_description', 'Phenotype.Pre_publication_description', diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 7eb7f013..15d5b3ab 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -10,7 +10,6 @@ import logging logger = logging.getLogger(__name__ ) def get_species_dataset_trait(self, start_vars): - #assert type(read_genotype) == type(bool()), "Expecting boolean value for read_genotype" if "temp_trait" in list(start_vars.keys()): if start_vars['temp_trait'] == "True": self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group']) @@ -27,9 +26,6 @@ def get_species_dataset_trait(self, start_vars): get_qtl_info=True) logger.debug("After creating trait") - #if read_genotype: - #self.dataset.group.read_genotype_file() - #self.genotype = self.dataset.group.genotype def get_trait_db_obs(self, trait_db_list): if isinstance(trait_db_list, str): diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 6892f02b..ed55d473 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -138,17 +138,12 @@ class ShowTrait(object): self.ncbi_summary = get_ncbi_summary(self.this_trait) - #Get nearest marker for composite mapping + # Get nearest marker for composite mapping if not self.temp_trait: if check_if_attr_exists(self.this_trait, 'locus_chr') and self.dataset.type != "Geno" and self.dataset.type != "Publish": self.nearest_marker = get_nearest_marker(self.this_trait, self.dataset) - #self.nearest_marker1 = get_nearest_marker(self.this_trait, self.dataset)[0] - #self.nearest_marker2 = get_nearest_marker(self.this_trait, self.dataset)[1] else: self.nearest_marker = "" - #self.nearest_marker1 = "" - #self.nearest_marker2 = "" - self.make_sample_lists() @@ -168,16 +163,19 @@ class ShowTrait(object): categorical_var_list = [] if not self.temp_trait: - categorical_var_list = get_categorical_variables(self.this_trait, self.sample_groups[0]) #ZS: Only using first samplelist, since I think mapping only uses those samples + # ZS: Only using first samplelist, since I think mapping only uses those samples + categorical_var_list = get_categorical_variables(self.this_trait, self.sample_groups[0]) - #ZS: Get list of chromosomes to select for mapping + # ZS: Get list of chromosomes to select for mapping self.chr_list = [["All", -1]] for i, this_chr in enumerate(self.dataset.species.chromosomes.chromosomes): self.chr_list.append([self.dataset.species.chromosomes.chromosomes[this_chr].name, i]) self.genofiles = self.dataset.group.get_genofiles() - if "QTLReaper" or "R/qtl" in dataset.group.mapping_names: #ZS: No need to grab scales from .geno file unless it's using a mapping method that reads .geno files + # ZS: No need to grab scales from .geno file unless it's using + # a mapping method that reads .geno files + if "QTLReaper" or "R/qtl" in dataset.group.mapping_names: if self.genofiles: self.scales_in_geno = get_genotype_scales(self.genofiles) else: @@ -187,10 +185,15 @@ class ShowTrait(object): self.has_num_cases = has_num_cases(self.this_trait) - #ZS: Needed to know whether to display bar chart + get max sample name length in order to set table column width + # ZS: Needed to know whether to display bar chart + get max + # sample name length in order to set table column width self.num_values = 0 - self.binary = "true" #ZS: So it knows whether to display the Binary R/qtl mapping method, which doesn't work unless all values are 0 or 1 - self.negative_vals_exist = "false" #ZS: Since we don't want to show log2 transform option for situations where it doesn't make sense + # ZS: So it knows whether to display the Binary R/qtl mapping + # method, which doesn't work unless all values are 0 or 1 + self.binary = "true" + # ZS: Since we don't want to show log2 transform option for + # situations where it doesn't make sense + self.negative_vals_exist = "false" max_samplename_width = 1 for group in self.sample_groups: for sample in group.sample_list: @@ -203,7 +206,8 @@ class ShowTrait(object): if sample.value < 0: self.negative_vals_exist = "true" - #ZS: Check whether any attributes have few enough distinct values to show the "Block samples by group" option + # ZS: Check whether any attributes have few enough distinct + # values to show the "Block samples by group" option self.categorical_attr_exists = "false" for attribute in self.sample_groups[0].attributes: if len(self.sample_groups[0].attributes[attribute].distinct_values) <= 10: @@ -258,7 +262,6 @@ class ShowTrait(object): if not self.temp_trait: if hasattr(self.this_trait, 'locus_chr') and self.this_trait.locus_chr != "" and self.dataset.type != "Geno" and self.dataset.type != "Publish": hddn['control_marker'] = self.nearest_marker - #hddn['control_marker'] = self.nearest_marker1+","+self.nearest_marker2 hddn['do_control'] = False hddn['maf'] = 0.05 hddn['mapping_scale'] = "physic" @@ -268,7 +271,8 @@ class ShowTrait(object): if len(self.scales_in_geno) < 2: hddn['mapping_scale'] = self.scales_in_geno[list(self.scales_in_geno.keys())[0]][0][0] - # We'll need access to this_trait and hddn in the Jinja2 Template, so we put it inside self + # We'll need access to this_trait and hddn in the Jinja2 + # Template, so we put it inside self self.hddn = hddn js_data = dict(trait_id = self.trait_id, @@ -294,7 +298,8 @@ class ShowTrait(object): self.js_data = js_data def get_external_links(self): - #ZS: There's some weirdness here because some fields don't exist while others are empty strings + # ZS: There's some weirdness here because some fields don't + # exist while others are empty strings self.pubmed_link = webqtlConfig.PUBMEDLINK_URL % self.this_trait.pubmed_id if check_if_attr_exists(self.this_trait, 'pubmed_id') else None self.ncbi_gene_link = webqtlConfig.NCBI_LOCUSID % self.this_trait.geneid if check_if_attr_exists(self.this_trait, 'geneid') else None self.omim_link = webqtlConfig.OMIM_ID % self.this_trait.omim if check_if_attr_exists(self.this_trait, 'omim') else None @@ -320,7 +325,6 @@ class ShowTrait(object): self.panther_link = webqtlConfig.PANTHER_URL % self.this_trait.symbol self.ebi_gwas_link = webqtlConfig.EBIGWAS_URL % self.this_trait.symbol self.protein_atlas_link = webqtlConfig.PROTEIN_ATLAS_URL % self.this_trait.symbol - #self.open_targets_link = webqtlConfig.OPEN_TARGETS_URL % self.this_trait.symbol if self.dataset.group.species == "mouse" or self.dataset.group.species == "human": self.rgd_link = webqtlConfig.RGD_URL % (self.this_trait.symbol, self.dataset.group.species.capitalize()) @@ -429,7 +433,9 @@ class ShowTrait(object): all_samples_ordered.append(sample) other_sample_names.append(sample) - #ZS: CFW is here because the .geno file doesn't properly contain its full list of samples. This should probably be fixed. + # ZS: CFW is here because the .geno file doesn't properly + # contain its full list of samples. This should probably + # be fixed. if self.dataset.group.species == "human" or (set(primary_sample_names) == set(parent_f1_samples)) or self.dataset.group.name == "CFW": primary_sample_names += other_sample_names other_sample_names = [] @@ -445,7 +451,8 @@ class ShowTrait(object): sample_group_type='primary', header=primary_header) - #if other_sample_names and self.dataset.group.species != "human" and self.dataset.group.name != "CFW": + # if other_sample_names and self.dataset.group.species != + # "human" and self.dataset.group.name != "CFW": if len(other_sample_names) > 0: other_sample_names.sort() #Sort other samples if parent_f1_samples: @@ -539,7 +546,8 @@ def get_z_scores(sample_groups): def get_nearest_marker(this_trait, this_db): this_chr = this_trait.locus_chr this_mb = this_trait.locus_mb - #One option is to take flanking markers, another is to take the two (or one) closest + # One option is to take flanking markers, another is to take the + # two (or one) closest query = """SELECT Geno.Name FROM Geno, GenoXRef, GenoFreeze WHERE Geno.Chr = '{}' AND @@ -552,7 +560,6 @@ def get_nearest_marker(this_trait, this_db): if result == []: return "" - #return "", "" else: return result[0][0] @@ -617,7 +624,8 @@ def check_if_attr_exists(the_trait, id_type): def get_ncbi_summary(this_trait): if check_if_attr_exists(this_trait, 'geneid'): - #ZS: Need to switch this try/except to something that checks the output later + # ZS: Need to switch this try/except to something that checks + # the output later try: response = requests.get("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&id=%s&retmode=json" % this_trait.geneid) summary = json.loads(response.content)['result'][this_trait.geneid]['summary'] @@ -661,8 +669,8 @@ def get_genotype_scales(genofiles): def get_scales_from_genofile(file_location): geno_path = locate_ignore_error(file_location, 'genotype') - - if not geno_path: #ZS: This is just to allow the code to run when + # ZS: This is just to allow the code to run when + if not geno_path: return [["physic", "Mb"]] cm_and_mb_cols_exist = True cm_column = None @@ -670,7 +678,9 @@ def get_scales_from_genofile(file_location): with open(geno_path, "r") as geno_fh: for i, line in enumerate(geno_fh): if line[0] == "#" or line[0] == "@": - if "@scale" in line: #ZS: If the scale is made explicit in the metadata, use that + # ZS: If the scale is made explicit in the metadata, + # use that + if "@scale" in line: scale = line.split(":")[1].strip() if scale == "morgan": return [["morgan", "cM"]] @@ -690,12 +700,16 @@ def get_scales_from_genofile(file_location): mb_column = 3 break - #ZS: This attempts to check whether the cM and Mb columns are 'real', since some .geno files have one column be a copy of the other column, or have one column that is all 0s + # ZS: This attempts to check whether the cM and Mb columns are + # 'real', since some .geno files have one column be a copy of + # the other column, or have one column that is all 0s cm_all_zero = True mb_all_zero = True cm_mb_all_equal = True for i, line in enumerate(geno_fh): - if first_marker_line <= i < first_marker_line + 10: #ZS: I'm assuming there won't be more than 10 markers where the position is listed as 0 + # ZS: I'm assuming there won't be more than 10 markers + # where the position is listed as 0 + if first_marker_line <= i < first_marker_line + 10: if cm_column: cm_val = line.split("\t")[cm_column].strip() if cm_val != "0": @@ -711,8 +725,8 @@ def get_scales_from_genofile(file_location): if i > first_marker_line + 10: break - - #ZS: This assumes that both won't be all zero, since if that's the case mapping shouldn't be an option to begin with + # ZS: This assumes that both won't be all zero, since if that's + # the case mapping shouldn't be an option to begin with if mb_all_zero: return [["morgan", "cM"]] elif cm_mb_all_equal: diff --git a/wqflask/wqflask/templates/index_page_orig.html b/wqflask/wqflask/templates/index_page_orig.html index 7f82b35c..87cf1b45 100755 --- a/wqflask/wqflask/templates/index_page_orig.html +++ b/wqflask/wqflask/templates/index_page_orig.html @@ -7,16 +7,6 @@ {% endblock %} {% block content %} - - -
    {{ flash_me() }} diff --git a/wqflask/wqflask/templates/submit_trait.html b/wqflask/wqflask/templates/submit_trait.html index 68b06f55..334a608d 100644 --- a/wqflask/wqflask/templates/submit_trait.html +++ b/wqflask/wqflask/templates/submit_trait.html @@ -61,18 +61,6 @@
    -

    Paste or Type Multiple Values: You can enter data by pasting a series of numbers representing trait values into this area. diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 2c0ba586..c4b510d4 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -156,12 +156,6 @@ def index_page(): import_collections = params['import_collections'] if import_collections == "true": g.user_session.import_traits_to_user(params['anon_id']) - #if USE_GN_SERVER: - # # The menu is generated using GN_SERVER - # return render_template("index_page.html", gn_server_url = GN_SERVER_URL, version=GN_VERSION) - #else: - - # Old style static menu (OBSOLETE) return render_template("index_page_orig.html", version=GN_VERSION) @@ -343,14 +337,10 @@ def intro(): @app.route("/tutorials") def tutorials(): - #doc = Docs("links", request.args) - #return render_template("docs.html", **doc.__dict__) return render_template("tutorials.html") @app.route("/credits") def credits(): - #doc = Docs("links", request.args) - #return render_template("docs.html", **doc.__dict__) return render_template("credits.html") @app.route("/update_text", methods=('POST',)) @@ -368,12 +358,9 @@ def submit_trait_form(): @app.route("/create_temp_trait", methods=('POST',)) def create_temp_trait(): logger.info(request.url) - - #template_vars = submit_trait.SubmitTrait(request.form) - doc = Docs("links") return render_template("links.html", **doc.__dict__) - #return render_template("show_trait.html", **template_vars.__dict__) + @app.route('/export_trait_excel', methods=('POST',)) def export_trait_excel(): @@ -487,21 +474,17 @@ def export_perm_data(): mimetype='text/csv', headers={"Content-Disposition":"attachment;filename=" + file_name + ".csv"}) + @app.route("/show_temp_trait", methods=('POST',)) def show_temp_trait_page(): logger.info(request.url) template_vars = show_trait.ShowTrait(request.form) - #logger.info("js_data before dump:", template_vars.js_data) template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") - # Sorting the keys messes up the ordered dictionary, so don't do that - #sort_keys=True) - - #logger.info("js_data after dump:", template_vars.js_data) - #logger.info("show_trait template_vars:", pf(template_vars.__dict__)) return render_template("show_trait.html", **template_vars.__dict__) + @app.route("/show_trait") def show_trait_page(): logger.info(request.url) -- cgit v1.2.3 From 4534daa6fb07c23b90e024560ca64091fc330eed Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 19 Apr 2021 17:46:38 +0300 Subject: Move looped sql query into one statement in "get_species_groups" It's in-efficient to have a sql query executed in a loop. As data grows, the query becomes slower. It's better to let sql handle such queries. --- wqflask/utility/helper_functions.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 15d5b3ab..4ba92ed5 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -47,19 +47,18 @@ def get_trait_db_obs(self, trait_db_list): if trait_ob: self.trait_list.append((trait_ob, dataset_ob)) -def get_species_groups(): - - species_query = "SELECT SpeciesId, MenuName FROM Species" - species_ids_and_names = g.db.execute(species_query).fetchall() - - species_and_groups = [] - for species_id, species_name in species_ids_and_names: - this_species_groups = {} - this_species_groups['species'] = species_name - groups_query = "SELECT InbredSetName FROM InbredSet WHERE SpeciesId = %s" % (species_id) - groups = [group[0] for group in g.db.execute(groups_query).fetchall()] - this_species_groups['groups'] = groups - species_and_groups.append(this_species_groups) - - return species_and_groups +def get_species_groups(): + """Group each species into a group""" + _menu = {} + for species, group_name in g.db.execute( + "SELECT s.MenuName, i.InbredSetName FROM InbredSet i " + "INNER JOIN Species s ON s.SpeciesId = i.SpeciesId " + "ORDER BY i.SpeciesId ASC, i.Name ASC").fetchall(): + if _menu.get(species): + _menu = _menu[species].append(group_name) + else: + _menu[species] = [group_name] + return [{"species": key, + "groups": value} for key, value in + list(_menu.items())] -- cgit v1.2.3 From d66a8d2ccbf10b53b31ef094835c5ba8ec672a68 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 23 Apr 2021 17:35:54 +0300 Subject: Apply PEP-8 --- wqflask/utility/helper_functions.py | 28 +++++++++++++++++----------- wqflask/wqflask/views.py | 11 +++++++++-- 2 files changed, 26 insertions(+), 13 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 4ba92ed5..e0ae3414 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -4,19 +4,23 @@ from base.species import TheSpecies from utility import hmac -from flask import Flask, g +from flask import g import logging -logger = logging.getLogger(__name__ ) +logger = logging.getLogger(__name__) + def get_species_dataset_trait(self, start_vars): if "temp_trait" in list(start_vars.keys()): - if start_vars['temp_trait'] == "True": - self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group']) - else: - self.dataset = data_set.create_dataset(start_vars['dataset']) + if start_vars['temp_trait'] == "True": + self.dataset = data_set.create_dataset( + dataset_name="Temp", + dataset_type="Temp", + group_name=start_vars['group']) + else: + self.dataset = data_set.create_dataset(start_vars['dataset']) else: - self.dataset = data_set.create_dataset(start_vars['dataset']) + self.dataset = data_set.create_dataset(start_vars['dataset']) logger.debug("After creating dataset") self.species = TheSpecies(dataset=self.dataset) logger.debug("After creating species") @@ -35,15 +39,17 @@ def get_trait_db_obs(self, trait_db_list): for trait in trait_db_list: data, _separator, hmac_string = trait.rpartition(':') data = data.strip() - assert hmac_string==hmac.hmac_creation(data), "Data tampering?" + assert hmac_string == hmac.hmac_creation(data), "Data tampering?" trait_name, dataset_name = data.split(":") if dataset_name == "Temp": - dataset_ob = data_set.create_dataset(dataset_name=dataset_name, dataset_type="Temp", group_name=trait_name.split("_")[2]) + dataset_ob = data_set.create_dataset( + dataset_name=dataset_name, dataset_type="Temp", + group_name=trait_name.split("_")[2]) else: dataset_ob = data_set.create_dataset(dataset_name) trait_ob = create_trait(dataset=dataset_ob, - name=trait_name, - cellid=None) + name=trait_name, + cellid=None) if trait_ob: self.trait_list.append((trait_ob, dataset_ob)) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 156b4772..36033d80 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -334,26 +334,33 @@ def intro(): return render_template("docs.html", **doc.__dict__) - @app.route("/tutorials") def tutorials(): return render_template("tutorials.html") + @app.route("/credits") def credits(): return render_template("credits.html") + @app.route("/update_text", methods=('POST',)) def update_page(): update_text(request.form) doc = Docs(request.form['entry_type'], request.form) return render_template("docs.html", **doc.__dict__) + @app.route("/submit_trait") def submit_trait_form(): logger.info(request.url) species_and_groups = get_species_groups() - return render_template("submit_trait.html", **{'species_and_groups' : species_and_groups, 'gn_server_url' : GN_SERVER_URL, 'version' : GN_VERSION}) + return render_template( + "submit_trait.html", + **{'species_and_groups': species_and_groups, + 'gn_server_url': GN_SERVER_URL, + 'version': GN_VERSION}) + @app.route("/create_temp_trait", methods=('POST',)) def create_temp_trait(): -- cgit v1.2.3 From 23e8e222e8016ea8335d4850070f884ab9b043fe Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 29 Apr 2021 21:45:51 +0300 Subject: Run `sed -i 's/(object)//g'` See: https://is.gd/pL7IJF Ran: find . \( -type d -name .git -prune \) -o -type f -print0 | xargs -0 sed -i 's/(object)//g' --- webtests/browser_run.py | 2 +- wqflask/base/data_set.py | 6 +++--- wqflask/base/mrna_assay_tissue_data.py | 2 +- wqflask/base/species.py | 6 +++--- wqflask/base/trait.py | 2 +- wqflask/maintenance/convert_geno_to_bimbam.py | 4 ++-- .../maintenance/generate_kinship_from_bimbam.py | 2 +- wqflask/maintenance/geno_to_json.py | 4 ++-- wqflask/maintenance/print_benchmark.py | 2 +- wqflask/utility/__init__.py | 4 ++-- wqflask/utility/benchmark.py | 2 +- wqflask/utility/gen_geno_ob.py | 6 +++--- wqflask/utility/genofile_parser.py | 4 ++-- wqflask/utility/temp_data.py | 2 +- .../comparison_bar_chart/comparison_bar_chart.py | 2 +- wqflask/wqflask/correlation/corr_scatter_plot.py | 2 +- wqflask/wqflask/correlation/show_corr_results.py | 2 +- .../wqflask/correlation_matrix/show_corr_matrix.py | 2 +- wqflask/wqflask/ctl/ctl_analysis.py | 2 +- wqflask/wqflask/db_info.py | 2 +- wqflask/wqflask/do_search.py | 2 +- wqflask/wqflask/docs.py | 2 +- wqflask/wqflask/external_tools/send_to_bnw.py | 2 +- .../wqflask/external_tools/send_to_geneweaver.py | 2 +- .../wqflask/external_tools/send_to_webgestalt.py | 2 +- wqflask/wqflask/gsearch.py | 2 +- wqflask/wqflask/heatmap/heatmap.py | 2 +- .../marker_regression/display_mapping_results.py | 2 +- wqflask/wqflask/marker_regression/run_mapping.py | 2 +- wqflask/wqflask/network_graph/network_graph.py | 2 +- wqflask/wqflask/news.py | 2 +- wqflask/wqflask/search_results.py | 2 +- wqflask/wqflask/server_side.py | 2 +- wqflask/wqflask/show_trait/SampleList.py | 2 +- wqflask/wqflask/show_trait/show_trait.py | 6 +++--- wqflask/wqflask/snp_browser/snp_browser.py | 2 +- wqflask/wqflask/update_search_results.py | 2 +- wqflask/wqflask/user_manager.py | 22 +++++++++++----------- wqflask/wqflask/user_session.py | 2 +- wqflask/wqflask/wgcna/wgcna_analysis.py | 2 +- 40 files changed, 62 insertions(+), 62 deletions(-) (limited to 'wqflask/utility') diff --git a/webtests/browser_run.py b/webtests/browser_run.py index 7ee540b7..6cf46de5 100644 --- a/webtests/browser_run.py +++ b/webtests/browser_run.py @@ -9,7 +9,7 @@ from selenium import webdriver from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException from selenium.webdriver.common.keys import Keys -class Test(object): +class Test: def __init__(self): #self.browser = webdriver.Chrome('/home/gn2/gn2/webtests/chromedriver') self.browser = webdriver.Firefox() diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 1ae138d2..3183363b 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -220,7 +220,7 @@ def create_datasets_list(): return datasets -class Markers(object): +class Markers: """Todo: Build in cacheing so it saves us reading the same file more than once""" def __init__(self, name): @@ -316,7 +316,7 @@ class HumanMarkers(Markers): super(HumanMarkers, self).add_pvalues(p_values) -class DatasetGroup(object): +class DatasetGroup: """ Each group has multiple datasets; each species has multiple groups. @@ -540,7 +540,7 @@ def datasets(group_name, this_group=None): return dataset_menu -class DataSet(object): +class DataSet: """ DataSet class defines a dataset in webqtl, can be either Microarray, Published phenotype, genotype, or user input dataset(temp) diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py index f1929518..1f8224af 100644 --- a/wqflask/base/mrna_assay_tissue_data.py +++ b/wqflask/base/mrna_assay_tissue_data.py @@ -11,7 +11,7 @@ from utility.db_tools import escape from utility.logger import getLogger logger = getLogger(__name__ ) -class MrnaAssayTissueData(object): +class MrnaAssayTissueData: def __init__(self, gene_symbols=None): self.gene_symbols = gene_symbols diff --git a/wqflask/base/species.py b/wqflask/base/species.py index 2771d116..eae3325a 100644 --- a/wqflask/base/species.py +++ b/wqflask/base/species.py @@ -6,7 +6,7 @@ from flask import Flask, g from utility.logger import getLogger logger = getLogger(__name__ ) -class TheSpecies(object): +class TheSpecies: def __init__(self, dataset=None, species_name=None): if species_name != None: self.name = species_name @@ -15,7 +15,7 @@ class TheSpecies(object): self.dataset = dataset self.chromosomes = Chromosomes(dataset=self.dataset) -class IndChromosome(object): +class IndChromosome: def __init__(self, name, length): self.name = name self.length = length @@ -25,7 +25,7 @@ class IndChromosome(object): """Chromosome length in megabases""" return self.length / 1000000 -class Chromosomes(object): +class Chromosomes: def __init__(self, dataset=None, species=None): self.chromosomes = collections.OrderedDict() if species != None: diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index a9223a32..968b6d4b 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -51,7 +51,7 @@ def create_trait(**kw): return None -class GeneralTrait(object): +class GeneralTrait: """ Trait class defines a trait in webqtl, can be either Microarray, Published phenotype, genotype, or user input trait diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py index d49742f2..0853b3ac 100644 --- a/wqflask/maintenance/convert_geno_to_bimbam.py +++ b/wqflask/maintenance/convert_geno_to_bimbam.py @@ -22,7 +22,7 @@ from pprint import pformat as pf class EmptyConfigurations(Exception): pass -class Marker(object): +class Marker: def __init__(self): self.name = None self.chr = None @@ -30,7 +30,7 @@ class Marker(object): self.Mb = None self.genotypes = [] -class ConvertGenoFile(object): +class ConvertGenoFile: def __init__(self, input_file, output_files): self.input_file = input_file diff --git a/wqflask/maintenance/generate_kinship_from_bimbam.py b/wqflask/maintenance/generate_kinship_from_bimbam.py index 60257b28..3e4d1741 100644 --- a/wqflask/maintenance/generate_kinship_from_bimbam.py +++ b/wqflask/maintenance/generate_kinship_from_bimbam.py @@ -13,7 +13,7 @@ sys.path.append("..") import os import glob -class GenerateKinshipMatrices(object): +class GenerateKinshipMatrices: def __init__(self, group_name, geno_file, pheno_file): self.group_name = group_name self.geno_file = geno_file diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py index 7e7fd241..f5f7e0e7 100644 --- a/wqflask/maintenance/geno_to_json.py +++ b/wqflask/maintenance/geno_to_json.py @@ -29,7 +29,7 @@ class EmptyConfigurations(Exception): pass -class Marker(object): +class Marker: def __init__(self): self.name = None self.chr = None @@ -37,7 +37,7 @@ class Marker(object): self.Mb = None self.genotypes = [] -class ConvertGenoFile(object): +class ConvertGenoFile: def __init__(self, input_file, output_file): diff --git a/wqflask/maintenance/print_benchmark.py b/wqflask/maintenance/print_benchmark.py index b24ce4f1..a1046c86 100644 --- a/wqflask/maintenance/print_benchmark.py +++ b/wqflask/maintenance/print_benchmark.py @@ -5,7 +5,7 @@ import time from pprint import pformat as pf -class TheCounter(object): +class TheCounter: Counters = {} def __init__(self): diff --git a/wqflask/utility/__init__.py b/wqflask/utility/__init__.py index 204ff59a..df926884 100644 --- a/wqflask/utility/__init__.py +++ b/wqflask/utility/__init__.py @@ -2,7 +2,7 @@ from pprint import pformat as pf # Todo: Move these out of __init__ -class Bunch(object): +class Bunch: """Like a dictionary but using object notation""" def __init__(self, **kw): self.__dict__ = kw @@ -11,7 +11,7 @@ class Bunch(object): return pf(self.__dict__) -class Struct(object): +class Struct: '''The recursive class for building and representing objects with. From http://stackoverflow.com/a/6573827/1175849 diff --git a/wqflask/utility/benchmark.py b/wqflask/utility/benchmark.py index ea5a0ab6..91ea91e8 100644 --- a/wqflask/utility/benchmark.py +++ b/wqflask/utility/benchmark.py @@ -6,7 +6,7 @@ from utility.tools import LOG_BENCH from utility.logger import getLogger logger = getLogger(__name__ ) -class Bench(object): +class Bench: entries = collections.OrderedDict() def __init__(self, name=None, write_output=LOG_BENCH): diff --git a/wqflask/utility/gen_geno_ob.py b/wqflask/utility/gen_geno_ob.py index 81085ffe..0a381c9b 100644 --- a/wqflask/utility/gen_geno_ob.py +++ b/wqflask/utility/gen_geno_ob.py @@ -1,7 +1,7 @@ import utility.logger logger = utility.logger.getLogger(__name__ ) -class genotype(object): +class genotype: """ Replacement for reaper.Dataset so we can remove qtlreaper use while still generating mapping output figure """ @@ -119,7 +119,7 @@ class genotype(object): self.chromosomes.append(chr_ob) -class Chr(object): +class Chr: def __init__(self, name, geno_ob): self.name = name self.loci = [] @@ -140,7 +140,7 @@ class Chr(object): def add_marker(self, marker_row): self.loci.append(Locus(self.geno_ob, marker_row)) -class Locus(object): +class Locus: def __init__(self, geno_ob, marker_row = None): self.chr = None self.name = None diff --git a/wqflask/utility/genofile_parser.py b/wqflask/utility/genofile_parser.py index 0b736176..f8e96d19 100644 --- a/wqflask/utility/genofile_parser.py +++ b/wqflask/utility/genofile_parser.py @@ -12,7 +12,7 @@ import simplejson as json from pprint import pformat as pf -class Marker(object): +class Marker: def __init__(self): self.name = None self.chr = None @@ -21,7 +21,7 @@ class Marker(object): self.genotypes = [] -class ConvertGenoFile(object): +class ConvertGenoFile: def __init__(self, input_file): self.mb_exists = False diff --git a/wqflask/utility/temp_data.py b/wqflask/utility/temp_data.py index 4144ae00..b2cbd458 100644 --- a/wqflask/utility/temp_data.py +++ b/wqflask/utility/temp_data.py @@ -2,7 +2,7 @@ from redis import Redis import simplejson as json -class TempData(object): +class TempData: def __init__(self, temp_uuid): self.temp_uuid = temp_uuid diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py index 92de6073..5855ccf0 100644 --- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py +++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py @@ -31,7 +31,7 @@ from MySQLdb import escape_string as escape from flask import Flask, g -class ComparisonBarChart(object): +class ComparisonBarChart: def __init__(self, start_vars): trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index c87776bb..d5dc26f5 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -11,7 +11,7 @@ import numpy as np import utility.logger logger = utility.logger.getLogger(__name__ ) -class CorrScatterPlot(object): +class CorrScatterPlot: """Page that displays a correlation scatterplot with a line fitted to it""" def __init__(self, params): diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index fb4dc4f4..cb341e79 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -58,7 +58,7 @@ TISSUE_METHODS = [METHOD_TISSUE_PEARSON, METHOD_TISSUE_RANK] TISSUE_MOUSE_DB = 1 -class CorrelationResults(object): +class CorrelationResults: def __init__(self, start_vars): # get trait list from db (database name) # calculate correlation with Base vector and targets diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index f77761d8..d0b4a156 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -41,7 +41,7 @@ Redis = get_redis_conn() THIRTY_DAYS = 60 * 60 * 24 * 30 -class CorrelationMatrix(object): +class CorrelationMatrix: def __init__(self, start_vars): trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index 72b4f3a3..1556e370 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -39,7 +39,7 @@ r_write_table = ro.r["write.table"] # Map the write.table function r_data_frame = ro.r["data.frame"] # Map the write.table function r_as_numeric = ro.r["as.numeric"] # Map the write.table function -class CTL(object): +class CTL: def __init__(self): logger.info("Initialization of CTL") #log = r_file("/tmp/genenetwork_ctl.log", open = "wt") diff --git a/wqflask/wqflask/db_info.py b/wqflask/wqflask/db_info.py index f420b472..25e624ef 100644 --- a/wqflask/wqflask/db_info.py +++ b/wqflask/wqflask/db_info.py @@ -10,7 +10,7 @@ from utility.logger import getLogger logger = getLogger(__name__) -class InfoPage(object): +class InfoPage: def __init__(self, start_vars): self.info = None self.gn_accession_id = None diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index 00636563..364a3eed 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -17,7 +17,7 @@ from utility.logger import getLogger logger = getLogger(__name__) -class DoSearch(object): +class DoSearch: """Parent class containing parameters/functions used for all searches""" # Used to translate search phrases into classes diff --git a/wqflask/wqflask/docs.py b/wqflask/wqflask/docs.py index 23fc3cad..207767c4 100644 --- a/wqflask/wqflask/docs.py +++ b/wqflask/wqflask/docs.py @@ -5,7 +5,7 @@ from flask import g from utility.logger import getLogger logger = getLogger(__name__) -class Docs(object): +class Docs: def __init__(self, entry, start_vars={}): sql = """ diff --git a/wqflask/wqflask/external_tools/send_to_bnw.py b/wqflask/wqflask/external_tools/send_to_bnw.py index efa17f05..c5c79e98 100644 --- a/wqflask/wqflask/external_tools/send_to_bnw.py +++ b/wqflask/wqflask/external_tools/send_to_bnw.py @@ -24,7 +24,7 @@ from utility import helper_functions, corr_result_helpers import utility.logger logger = utility.logger.getLogger(__name__ ) -class SendToBNW(object): +class SendToBNW: def __init__(self, start_vars): trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py index 4c958a88..47e4c53a 100644 --- a/wqflask/wqflask/external_tools/send_to_geneweaver.py +++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py @@ -29,7 +29,7 @@ from utility import helper_functions, corr_result_helpers import utility.logger logger = utility.logger.getLogger(__name__ ) -class SendToGeneWeaver(object): +class SendToGeneWeaver: def __init__(self, start_vars): trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py index 2f068792..e1e5e655 100644 --- a/wqflask/wqflask/external_tools/send_to_webgestalt.py +++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py @@ -29,7 +29,7 @@ from utility import helper_functions, corr_result_helpers import utility.logger logger = utility.logger.getLogger(__name__ ) -class SendToWebGestalt(object): +class SendToWebGestalt: def __init__(self, start_vars): trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py index 907f1180..9bf23d57 100644 --- a/wqflask/wqflask/gsearch.py +++ b/wqflask/wqflask/gsearch.py @@ -18,7 +18,7 @@ from utility.type_checking import is_float, is_int, is_str, get_float, get_int, from utility.logger import getLogger logger = getLogger(__name__) -class GSearch(object): +class GSearch: def __init__(self, kw): assert('type' in kw) diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index cca5a4fc..20e3559a 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -14,7 +14,7 @@ Redis = Redis() logger = getLogger(__name__ ) -class Heatmap(object): +class Heatmap: def __init__(self, start_vars, temp_uuid): trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 6a5fe2f6..4074f098 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -152,7 +152,7 @@ class HtmlGenWrapper: return map_ -class DisplayMappingResults(object): +class DisplayMappingResults: """Inteval Mapping Plot Page""" cMGraphInterval = 5 GRAPH_MIN_WIDTH = 900 diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 8f051c14..7dd0bcb6 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -45,7 +45,7 @@ from base.webqtlConfig import TMPDIR, GENERATED_TEXT_DIR import utility.logger logger = utility.logger.getLogger(__name__ ) -class RunMapping(object): +class RunMapping: def __init__(self, start_vars, temp_uuid): helper_functions.get_species_dataset_trait(self, start_vars) diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py index 1d5316a2..f5ee5303 100644 --- a/wqflask/wqflask/network_graph/network_graph.py +++ b/wqflask/wqflask/network_graph/network_graph.py @@ -28,7 +28,7 @@ from utility import corr_result_helpers from utility.tools import GN2_BRANCH_URL -class NetworkGraph(object): +class NetworkGraph: def __init__(self, start_vars): trait_db_list = [trait.strip() diff --git a/wqflask/wqflask/news.py b/wqflask/wqflask/news.py index 0675ec4b..861a93f2 100644 --- a/wqflask/wqflask/news.py +++ b/wqflask/wqflask/news.py @@ -1,6 +1,6 @@ from flask import g -class News(object): +class News: def __init__(self): sql = """ diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index f23c0582..36500a8d 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -24,7 +24,7 @@ from utility.type_checking import is_str from utility.logger import getLogger logger = getLogger(__name__ ) -class SearchResultPage(object): +class SearchResultPage: #maxReturn = 3000 def __init__(self, kw): diff --git a/wqflask/wqflask/server_side.py b/wqflask/wqflask/server_side.py index 5f764767..48761fa0 100644 --- a/wqflask/wqflask/server_side.py +++ b/wqflask/wqflask/server_side.py @@ -2,7 +2,7 @@ -class ServerSideTable(object): +class ServerSideTable: """ This class is used to do server-side processing on the DataTables table such as paginating, sorting, diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 857e4456..f955f632 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -8,7 +8,7 @@ from pprint import pformat as pf from utility import Plot from utility import Bunch -class SampleList(object): +class SampleList: def __init__(self, dataset, sample_names, diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index e5e94c7e..f9c5fbe6 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -35,8 +35,7 @@ logger = getLogger(__name__) ############################################## -class ShowTrait(object): - +class ShowTrait: def __init__(self, kw): if 'trait_id' in kw and kw['dataset'] != "Temp": self.temp_trait = False @@ -54,7 +53,8 @@ class ShowTrait(object): self.dataset = data_set.create_dataset( dataset_name="Temp", dataset_type="Temp", group_name=self.temp_group) - # Put values in Redis so they can be looked up later if added to a collection + # Put values in Redis so they can be looked up later if + # added to a collection Redis.set(self.trait_id, kw['trait_paste'], ex=ONE_YEAR) self.trait_vals = kw['trait_paste'].split() self.this_trait = create_trait(dataset=self.dataset, diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index a52399a2..8658abf8 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -9,7 +9,7 @@ logger = getLogger(__name__) from base import species from base import webqtlConfig -class SnpBrowser(object): +class SnpBrowser: def __init__(self, start_vars): self.strain_lists = get_browser_sample_lists() diff --git a/wqflask/wqflask/update_search_results.py b/wqflask/wqflask/update_search_results.py index 672f95b1..22a46ef2 100644 --- a/wqflask/wqflask/update_search_results.py +++ b/wqflask/wqflask/update_search_results.py @@ -10,7 +10,7 @@ from utility.benchmark import Bench from utility.logger import getLogger logger = getLogger(__name__) -class GSearch(object): +class GSearch: def __init__(self, kw): self.type = kw['type'] diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index 7b25b68e..fcec3b67 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -58,7 +58,7 @@ def timestamp(): return datetime.datetime.utcnow().isoformat() -class AnonUser(object): +class AnonUser: """Anonymous user handling""" cookie_name = 'anon_user_v1' @@ -158,7 +158,7 @@ def create_signed_cookie(): logger.debug("uuid_signed:", uuid_signed) return the_uuid, uuid_signed -class UserSession(object): +class UserSession: """Logged in user handling""" cookie_name = 'session_id_v1' @@ -353,12 +353,12 @@ def set_cookie(response): response.set_cookie(g.cookie_session.cookie_name, g.cookie_session.cookie) return response -class UsersManager(object): +class UsersManager: def __init__(self): self.users = model.User.query.all() logger.debug("Users are:", self.users) -class UserManager(object): +class UserManager: def __init__(self, kw): self.user_id = kw['user_id'] logger.debug("In UserManager locals are:", pf(locals())) @@ -377,7 +377,7 @@ class UserManager(object): #logger.debug(" ---> self.datasets:", self.datasets) -class RegisterUser(object): +class RegisterUser: def __init__(self, kw): self.thank_you_mode = False self.errors = [] @@ -454,7 +454,7 @@ def set_password(password, user): ) -class VerificationEmail(object): +class VerificationEmail: template_name = "email/verification.txt" key_prefix = "verification_code" subject = "GeneNetwork email verification" @@ -511,7 +511,7 @@ class ForgotPasswordEmail(VerificationEmail): send_email(toaddr, msg.as_string()) -class Password(object): +class Password: def __init__(self, unencrypted_password, salt, iterations, keylength, hashfunc): hashfunc = getattr(hashlib, hashfunc) logger.debug("hashfunc is:", hashfunc) @@ -589,7 +589,7 @@ def password_reset_step2(): return response -class DecodeUser(object): +class DecodeUser: def __init__(self, code_prefix): verify_url_hmac(request.url) @@ -695,7 +695,7 @@ def get_github_user_details(access_token): result = requests.get(GITHUB_API_URL, params={"access_token":access_token}) return result.json() -class LoginUser(object): +class LoginUser: remember_time = 60 * 60 * 24 * 30 # One month in seconds def __init__(self): @@ -1039,12 +1039,12 @@ def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): server.quit() logger.info("Successfully sent email to "+toaddr) -class GroupsManager(object): +class GroupsManager: def __init__(self, kw): self.datasets = create_datasets_list() -class RolesManager(object): +class RolesManager: def __init__(self): self.roles = model.Role.query.all() logger.debug("Roles are:", self.roles) diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index c5a577df..cc0ac744 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -63,7 +63,7 @@ def manage_user(): return render_template("admin/manage_user.html", user_details = user_details) -class UserSession(object): +class UserSession: """Logged in user handling""" user_cookie_name = 'session_id_v2' diff --git a/wqflask/wqflask/wgcna/wgcna_analysis.py b/wqflask/wqflask/wgcna/wgcna_analysis.py index 6bf75216..21516b30 100644 --- a/wqflask/wqflask/wgcna/wgcna_analysis.py +++ b/wqflask/wqflask/wgcna/wgcna_analysis.py @@ -42,7 +42,7 @@ r_png = ro.r["png"] # Map the png function for plotting r_dev_off = ro.r["dev.off"] # Map the dev.off function -class WGCNA(object): +class WGCNA: def __init__(self): # To log output from stdout/stderr to a file add `r_sink(log)` print("Initialization of WGCNA") -- cgit v1.2.3 From c7e661b8ff9f70955418fbc4527378904beb0cf4 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 30 Apr 2021 12:16:51 +0300 Subject: autopep8: Fix E20-E27 Run: python -m autopep8 --in-place --recrusive ./ --select\ E20,E211,E22,E224,E224,E225,E226,E227,E228,E231,E241,\ E242,E251,E252,E26,E265,E266,E27 -p 3 --- wqflask/base/data_set.py | 18 +- wqflask/base/mrna_assay_tissue_data.py | 16 +- wqflask/base/species.py | 2 +- wqflask/base/webqtlConfig.py | 40 +- wqflask/db/call.py | 6 +- wqflask/db/gn_server.py | 2 +- wqflask/db/webqtlDatabaseFunction.py | 10 +- wqflask/maintenance/convert_dryad_to_bimbam.py | 6 +- wqflask/maintenance/convert_geno_to_bimbam.py | 10 +- wqflask/maintenance/gen_select_dataset.py | 16 +- .../maintenance/generate_kinship_from_bimbam.py | 4 +- wqflask/maintenance/geno_to_json.py | 26 +- wqflask/maintenance/quantile_normalize.py | 30 +- wqflask/maintenance/set_resource_defaults.py | 26 +- wqflask/tests/unit/wqflask/api/test_correlation.py | 4 +- .../marker_regression/test_gemma_mapping.py | 14 +- .../marker_regression/test_qtlreaper_mapping.py | 24 +- .../wqflask/marker_regression/test_rqtl_mapping.py | 28 +- wqflask/tests/wqflask/show_trait/testSampleList.py | 6 +- wqflask/utility/Plot.py | 180 ++-- wqflask/utility/TDCell.py | 6 +- wqflask/utility/THCell.py | 8 +- wqflask/utility/__init__.py | 2 +- wqflask/utility/benchmark.py | 4 +- wqflask/utility/chunks.py | 2 +- wqflask/utility/corestats.py | 6 +- wqflask/utility/elasticsearch_tools.py | 8 +- wqflask/utility/external.py | 2 +- wqflask/utility/gen_geno_ob.py | 18 +- wqflask/utility/helper_functions.py | 2 +- wqflask/utility/logger.py | 34 +- wqflask/utility/pillow_utils.py | 2 +- wqflask/utility/startup_config.py | 10 +- wqflask/utility/svg.py | 624 +++++------ wqflask/utility/temp_data.py | 2 +- wqflask/utility/tools.py | 112 +- wqflask/utility/type_checking.py | 6 +- wqflask/utility/webqtlUtil.py | 60 +- wqflask/wqflask/__init__.py | 8 +- wqflask/wqflask/api/correlation.py | 44 +- wqflask/wqflask/api/gen_menu.py | 4 +- wqflask/wqflask/api/mapping.py | 12 +- wqflask/wqflask/api/router.py | 196 ++-- wqflask/wqflask/collect.py | 20 +- .../comparison_bar_chart/comparison_bar_chart.py | 12 +- wqflask/wqflask/correlation/corr_scatter_plot.py | 76 +- .../wqflask/correlation/correlation_functions.py | 34 +- wqflask/wqflask/correlation/show_corr_results.py | 38 +- .../wqflask/correlation_matrix/show_corr_matrix.py | 52 +- wqflask/wqflask/ctl/ctl_analysis.py | 88 +- wqflask/wqflask/database.py | 2 +- wqflask/wqflask/do_search.py | 80 +- wqflask/wqflask/export_traits.py | 2 +- wqflask/wqflask/external_tools/send_to_bnw.py | 6 +- .../wqflask/external_tools/send_to_geneweaver.py | 6 +- .../wqflask/external_tools/send_to_webgestalt.py | 26 +- wqflask/wqflask/group_manager.py | 22 +- wqflask/wqflask/gsearch.py | 2 +- wqflask/wqflask/heatmap/heatmap.py | 6 +- wqflask/wqflask/interval_analyst/GeneUtil.py | 24 +- .../marker_regression/display_mapping_results.py | 1114 ++++++++++---------- wqflask/wqflask/marker_regression/gemma_mapping.py | 2 +- wqflask/wqflask/marker_regression/plink_mapping.py | 34 +- .../wqflask/marker_regression/qtlreaper_mapping.py | 70 +- wqflask/wqflask/marker_regression/rqtl_mapping.py | 82 +- wqflask/wqflask/marker_regression/run_mapping.py | 134 +-- wqflask/wqflask/model.py | 8 +- wqflask/wqflask/parser.py | 4 +- wqflask/wqflask/resource_manager.py | 10 +- wqflask/wqflask/search_results.py | 16 +- wqflask/wqflask/show_trait/SampleList.py | 14 +- wqflask/wqflask/show_trait/show_trait.py | 18 +- wqflask/wqflask/snp_browser/snp_browser.py | 42 +- wqflask/wqflask/submit_bnw.py | 2 +- wqflask/wqflask/update_search_results.py | 6 +- wqflask/wqflask/user_login.py | 88 +- wqflask/wqflask/user_manager.py | 143 ++- wqflask/wqflask/user_session.py | 38 +- wqflask/wqflask/views.py | 6 +- wqflask/wsgi.py | 2 +- 80 files changed, 1975 insertions(+), 1994 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 3183363b..55ab45f5 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -150,7 +150,7 @@ class DatasetType: "FROM PublishFreeze, InbredSet " "WHERE InbredSet.Name = '%s' AND " "PublishFreeze.InbredSetId = InbredSet.Id"), - 'geno': ("SELECT GenoFreeze.Id FROM GenoFreeze WHERE " + 'geno': ("SELECT GenoFreeze.Id FROM GenoFreeze WHERE " "GenoFreeze.Name = \"%s\" ") } @@ -215,7 +215,7 @@ def create_datasets_list(): if USE_REDIS: r.set(key, pickle.dumps(datasets, pickle.HIGHEST_PROTOCOL)) - r.expire(key, 60*60) + r.expire(key, 60 * 60) return datasets @@ -239,7 +239,7 @@ class Markers: for line in bimbam_fh: marker = {} marker['name'] = line.split(delimiter)[0].rstrip() - marker['Mb'] = float(line.split(delimiter)[1].rstrip())/1000000 + marker['Mb'] = float(line.split(delimiter)[1].rstrip()) / 1000000 marker['chr'] = line.split(delimiter)[2].rstrip() markers.append(marker) @@ -369,8 +369,8 @@ class DatasetGroup: def get_markers(self): def check_plink_gemma(): if flat_file_exists("mapping"): - MAPPING_PATH = flat_files("mapping")+"/" - if os.path.isfile(MAPPING_PATH+self.name+".bed"): + MAPPING_PATH = flat_files("mapping") + "/" + if os.path.isfile(MAPPING_PATH + self.name + ".bed"): return True return False @@ -416,7 +416,7 @@ class DatasetGroup: else: logger.debug("Cache not hit") - genotype_fn = locate_ignore_error(self.name+".geno", 'genotype') + genotype_fn = locate_ignore_error(self.name + ".geno", 'genotype') if genotype_fn: self.samplelist = get_group_samplelists.get_samplelist( "geno", genotype_fn) @@ -425,7 +425,7 @@ class DatasetGroup: if USE_REDIS: r.set(key, json.dumps(self.samplelist)) - r.expire(key, 60*5) + r.expire(key, 60 * 5) def all_samples_ordered(self): result = [] @@ -531,7 +531,7 @@ def datasets(group_name, this_group=None): if USE_REDIS: r.set(key, pickle.dumps(dataset_menu, pickle.HIGHEST_PROTOCOL)) - r.expire(key, 60*5) + r.expire(key, 60 * 5) if this_group != None: this_group._datasets = dataset_menu @@ -622,7 +622,7 @@ class DataSet: WHERE ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id AND ProbeFreeze.TissueId = Tissue.Id AND (ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFreeze.ShortName = '%s') - """ % (query_args), "/dataset/"+self.name+".json", + """ % (query_args), "/dataset/" + self.name + ".json", lambda r: (r["id"], r["name"], r["full_name"], r["short_name"], r["data_scale"], r["tissue"]) ) diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py index 1f8224af..f3264b3d 100644 --- a/wqflask/base/mrna_assay_tissue_data.py +++ b/wqflask/base/mrna_assay_tissue_data.py @@ -9,7 +9,7 @@ from utility.db_tools import escape from utility.logger import getLogger -logger = getLogger(__name__ ) +logger = getLogger(__name__) class MrnaAssayTissueData: @@ -20,7 +20,7 @@ class MrnaAssayTissueData: self.data = collections.defaultdict(Bunch) - query = '''select t.Symbol, t.GeneId, t.DataId, t.Chr, t.Mb, t.description, t.Probe_Target_Description + query = '''select t.Symbol, t.GeneId, t.DataId, t.Chr, t.Mb, t.description, t.Probe_Target_Description from ( select Symbol, max(Mean) as maxmean from TissueProbeSetXRef @@ -31,14 +31,14 @@ class MrnaAssayTissueData: # Due to the limit size of TissueProbeSetFreezeId table in DB, # performance of inner join is acceptable.MrnaAssayTissueData(gene_symbols=symbol_list) if len(gene_symbols) == 0: - query += '''Symbol!='' and Symbol Is Not Null group by Symbol) + query += '''Symbol!='' and Symbol Is Not Null group by Symbol) as x inner join TissueProbeSetXRef as t on t.Symbol = x.Symbol and t.Mean = x.maxmean; ''' else: in_clause = db_tools.create_in_clause(gene_symbols) - #ZS: This was in the query, not sure why: http://docs.python.org/2/library/string.html?highlight=lower#string.lower + # ZS: This was in the query, not sure why: http://docs.python.org/2/library/string.html?highlight=lower#string.lower query += ''' Symbol in {} group by Symbol) as x inner join TissueProbeSetXRef as t on t.Symbol = x.Symbol and t.Mean = x.maxmean; @@ -64,11 +64,11 @@ class MrnaAssayTissueData: self.data[symbol].probe_target_description = result.Probe_Target_Description ########################################################################### - #Input: cursor, symbolList (list), dataIdDict(Dict) - #output: symbolValuepairDict (dictionary):one dictionary of Symbol and Value Pair, + # Input: cursor, symbolList (list), dataIdDict(Dict) + # output: symbolValuepairDict (dictionary):one dictionary of Symbol and Value Pair, # key is symbol, value is one list of expression values of one probeSet; - #function: get one dictionary whose key is gene symbol and value is tissue expression data (list type). - #Attention! All keys are lower case! + # function: get one dictionary whose key is gene symbol and value is tissue expression data (list type). + # Attention! All keys are lower case! ########################################################################### def get_symbol_values_pairs(self): diff --git a/wqflask/base/species.py b/wqflask/base/species.py index eae3325a..cf764d72 100644 --- a/wqflask/base/species.py +++ b/wqflask/base/species.py @@ -4,7 +4,7 @@ from flask import Flask, g from utility.logger import getLogger -logger = getLogger(__name__ ) +logger = getLogger(__name__) class TheSpecies: def __init__(self, dataset=None, species_name=None): diff --git a/wqflask/base/webqtlConfig.py b/wqflask/base/webqtlConfig.py index bb8704a5..872b52eb 100644 --- a/wqflask/base/webqtlConfig.py +++ b/wqflask/base/webqtlConfig.py @@ -1,4 +1,4 @@ -#########################################' +# ' # Environment Variables - public # # Note: much of this needs to handled by the settings/environment @@ -10,35 +10,35 @@ from utility.tools import valid_path, mk_dir, assert_dir, assert_writable_dir, flat_files, TEMPDIR -#Debug Level -#1 for debug, mod python will reload import each time +# Debug Level +# 1 for debug, mod python will reload import each time DEBUG = 1 -#USER privilege -USERDICT = {'guest':1,'user':2, 'admin':3, 'root':4} +# USER privilege +USERDICT = {'guest': 1, 'user': 2, 'admin': 3, 'root':4} -#Set privileges +# Set privileges SUPER_PRIVILEGES = {'data': 'edit', 'metadata': 'edit', 'admin': 'edit-admins'} DEFAULT_PRIVILEGES = {'data': 'view', 'metadata': 'view', 'admin': 'not-admin'} -#minimum number of informative strains +# minimum number of informative strains KMININFORMATIVE = 5 -#Daily download limit from one IP +# Daily download limit from one IP DAILYMAXIMUM = 1000 -#maximum LRS value +# maximum LRS value MAXLRS = 460.0 -#MINIMUM Database public value +# MINIMUM Database public value PUBLICTHRESH = 0 -#EXTERNAL LINK ADDRESSES +# EXTERNAL LINK ADDRESSES PUBMEDLINK_URL = "http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Retrieve&db=PubMed&list_uids=%s&dopt=Abstract" UCSC_BLAT = 'http://genome.ucsc.edu/cgi-bin/hgBlat?org=%s&db=%s&type=0&sort=0&output=0&userSeq=%s' UTHSC_BLAT = 'http://ucscbrowser.genenetwork.org/cgi-bin/hgBlat?org=%s&db=%s&type=0&sort=0&output=0&userSeq=%s' UTHSC_BLAT2 = 'http://ucscbrowserbeta.genenetwork.org/cgi-bin/hgBlat?org=%s&db=%s&type=0&sort=0&output=0&userSeq=%s' -GENOMEBROWSER_URL="https://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=%s" +GENOMEBROWSER_URL = "https://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=%s" NCBI_LOCUSID = "http://www.ncbi.nlm.nih.gov/gene?cmd=Retrieve&dopt=Graphics&list_uids=%s" GENBANK_ID = "http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=Nucleotide&cmd=search&doptcmdl=DocSum&term=%s" OMIM_ID = "http://www.ncbi.nlm.nih.gov/omim/%s" @@ -56,7 +56,7 @@ GEMMA_URL = "http://www.chibi.ubc.ca/Gemma/gene/showGene.html?ncbiid=%s" ABA_URL = "http://mouse.brain-map.org/search/show?search_type=gene&search_term=%s" EBIGWAS_URL = "https://www.ebi.ac.uk/gwas/search?query=%s" WIKI_PI_URL = "http://severus.dbmi.pitt.edu/wiki-pi/index.php/search?q=%s" -ENSEMBLETRANSCRIPT_URL="http://useast.ensembl.org/Mus_musculus/Transcript/Idhistory?t=%s" +ENSEMBLETRANSCRIPT_URL = "http://useast.ensembl.org/Mus_musculus/Transcript/Idhistory?t=%s" DBSNP = 'http://ensembl.org/Mus_musculus/Variation/Population?v=%s' PROTEIN_ATLAS_URL = "http://www.proteinatlas.org/search/%s" OPEN_TARGETS_URL = "https://genetics.opentargets.org/gene/%s" @@ -71,13 +71,13 @@ RRID_RAT_URL = "https://rgd.mcw.edu/rgdweb/report/strain/main.html?id=%s" # want to reach this base dir assert_writable_dir(TEMPDIR) -TMPDIR = mk_dir(TEMPDIR+'/gn2/') +TMPDIR = mk_dir(TEMPDIR + '/gn2/') assert_writable_dir(TMPDIR) -CACHEDIR = mk_dir(TMPDIR+'/cache/') +CACHEDIR = mk_dir(TMPDIR + '/cache/') # We can no longer write into the git tree: -GENERATED_IMAGE_DIR = mk_dir(TMPDIR+'generated/') -GENERATED_TEXT_DIR = mk_dir(TMPDIR+'generated_text/') +GENERATED_IMAGE_DIR = mk_dir(TMPDIR + 'generated/') +GENERATED_TEXT_DIR = mk_dir(TMPDIR + 'generated_text/') # Make sure we have permissions to access these assert_writable_dir(CACHEDIR) @@ -85,12 +85,12 @@ assert_writable_dir(GENERATED_IMAGE_DIR) assert_writable_dir(GENERATED_TEXT_DIR) # Flat file directories -GENODIR = flat_files('genotype')+'/' +GENODIR = flat_files('genotype') + '/' assert_dir(GENODIR) # assert_dir(GENODIR+'bimbam') # for gemma # JSON genotypes are OBSOLETE -JSON_GENODIR = flat_files('genotype/json')+'/' +JSON_GENODIR = flat_files('genotype/json') + '/' if not valid_path(JSON_GENODIR): # fall back on old location (move the dir, FIXME) JSON_GENODIR = flat_files('json') @@ -98,4 +98,4 @@ if not valid_path(JSON_GENODIR): # Are we using the following...? PORTADDR = "http://50.16.251.170" INFOPAGEHREF = '/dbdoc/%s.html' -CGIDIR = '/webqtl/' #XZ: The variable name 'CGIDIR' should be changed to 'PYTHONDIR' +CGIDIR = '/webqtl/' # XZ: The variable name 'CGIDIR' should be changed to 'PYTHONDIR' diff --git a/wqflask/db/call.py b/wqflask/db/call.py index 0971d2a2..555878ad 100644 --- a/wqflask/db/call.py +++ b/wqflask/db/call.py @@ -12,7 +12,7 @@ from utility.tools import USE_GN_SERVER, LOG_SQL, GN_SERVER_URL from utility.benchmark import Bench from utility.logger import getLogger -logger = getLogger(__name__ ) +logger = getLogger(__name__) # from inspect import stack @@ -64,9 +64,9 @@ def gn_server(path): with Bench("GN_SERVER", LOG_SQL): res = () try: - res = urllib.request.urlopen(GN_SERVER_URL+path) + res = urllib.request.urlopen(GN_SERVER_URL + path) except: - res = urllib2.urlopen(GN_SERVER_URL+path) + res = urllib2.urlopen(GN_SERVER_URL + path) rest = res.read() res2 = json.loads(rest) logger.debug(res2) diff --git a/wqflask/db/gn_server.py b/wqflask/db/gn_server.py index da224112..6c7383d0 100644 --- a/wqflask/db/gn_server.py +++ b/wqflask/db/gn_server.py @@ -3,7 +3,7 @@ from db.call import gn_server from utility.logger import getLogger -logger = getLogger(__name__ ) +logger = getLogger(__name__) def menu_main(): return gn_server("/int/menu/main.json") diff --git a/wqflask/db/webqtlDatabaseFunction.py b/wqflask/db/webqtlDatabaseFunction.py index 2805febd..18ade405 100644 --- a/wqflask/db/webqtlDatabaseFunction.py +++ b/wqflask/db/webqtlDatabaseFunction.py @@ -24,24 +24,24 @@ from db.call import fetch1 from utility.tools import USE_GN_SERVER from utility.logger import getLogger -logger = getLogger(__name__ ) +logger = getLogger(__name__) ########################################################################### -#output: cursor instance -#function: connect to database and return cursor instance +# output: cursor instance +# function: connect to database and return cursor instance ########################################################################### def retrieve_species(group): """Get the species of a group (e.g. returns string "mouse" on "BXD" """ - result = fetch1("select Species.Name from Species, InbredSet where InbredSet.Name = '%s' and InbredSet.SpeciesId = Species.Id" % (group), "/cross/"+group+".json", lambda r: (r["species"],))[0] + result = fetch1("select Species.Name from Species, InbredSet where InbredSet.Name = '%s' and InbredSet.SpeciesId = Species.Id" % (group), "/cross/" + group + ".json", lambda r: (r["species"],))[0] logger.debug("retrieve_species result:", result) return result def retrieve_species_id(group): - result = fetch1("select SpeciesId from InbredSet where Name = '%s'" % (group), "/cross/"+group+".json", lambda r: (r["species_id"],))[0] + result = fetch1("select SpeciesId from InbredSet where Name = '%s'" % (group), "/cross/" + group + ".json", lambda r: (r["species_id"],))[0] logger.debug("retrieve_species_id result:", result) return result diff --git a/wqflask/maintenance/convert_dryad_to_bimbam.py b/wqflask/maintenance/convert_dryad_to_bimbam.py index 12ce35e9..8eab66e8 100644 --- a/wqflask/maintenance/convert_dryad_to_bimbam.py +++ b/wqflask/maintenance/convert_dryad_to_bimbam.py @@ -41,7 +41,7 @@ def read_dryad_file(filename): return geno_rows - #for i, marker in enumerate(marker_list): + # for i, marker in enumerate(marker_list): # this_row = [] # this_row.append(marker) # this_row.append("X") @@ -53,7 +53,7 @@ def read_dryad_file(filename): # print("row: " + str(i)) # geno_rows.append(this_row) # - #return geno_rows + # return geno_rows def write_bimbam_files(geno_rows): with open('/home/zas1024/cfw_data/CFW_geno.txt', 'w') as geno_fh: @@ -64,6 +64,6 @@ def convert_dryad_to_bimbam(filename): geno_file_rows = read_dryad_file(filename) write_bimbam_files(geno_file_rows) -if __name__=="__main__": +if __name__ == "__main__": input_filename = "/home/zas1024/cfw_data/" + sys.argv[1] + ".txt" convert_dryad_to_bimbam(input_filename) diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py index 0853b3ac..dc01cbb3 100644 --- a/wqflask/maintenance/convert_geno_to_bimbam.py +++ b/wqflask/maintenance/convert_geno_to_bimbam.py @@ -103,9 +103,9 @@ class ConvertGenoFile: with open(self.output_files[2], "w") as snp_fh: for marker in self.markers: if self.mb_exists: - snp_fh.write(marker['name'] +", " + str(int(float(marker['Mb'])*1000000)) + ", " + marker['chr'] + "\n") + snp_fh.write(marker['name'] + ", " + str(int(float(marker['Mb']) * 1000000)) + ", " + marker['chr'] + "\n") else: - snp_fh.write(marker['name'] +", " + str(int(float(marker['cM'])*1000000)) + ", " + marker['chr'] + "\n") + snp_fh.write(marker['name'] + ", " + str(int(float(marker['cM']) * 1000000)) + ", " + marker['chr'] + "\n") def get_sample_list(self, row_contents): self.sample_list = [] @@ -178,12 +178,12 @@ class ConvertGenoFile: print(" Row is:", convertob.latest_row_value) break -if __name__=="__main__": +if __name__ == "__main__": Old_Geno_Directory = """/export/local/home/zas1024/gn2-zach/genotype_files/genotype""" New_Geno_Directory = """/export/local/home/zas1024/gn2-zach/genotype_files/genotype/bimbam""" #Input_File = """/home/zas1024/gene/genotype_files/genotypes/BXD.geno""" #Output_File = """/home/zas1024/gene/wqflask/wqflask/pylmm/data/bxd.snps""" #convertob = ConvertGenoFile("/home/zas1024/gene/genotype_files/genotypes/SRxSHRSPF2.geno", "/home/zas1024/gene/genotype_files/new_genotypes/SRxSHRSPF2.json") - #convertob.convert() + # convertob.convert() ConvertGenoFile.process_all(Old_Geno_Directory, New_Geno_Directory) - #ConvertGenoFiles(Geno_Directory) + # ConvertGenoFiles(Geno_Directory) diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py index 544e2fd1..f480d63f 100644 --- a/wqflask/maintenance/gen_select_dataset.py +++ b/wqflask/maintenance/gen_select_dataset.py @@ -61,10 +61,10 @@ def parse_db_uri(): parsed_uri = urllib.parse.urlparse(SQL_URI) db_conn_info = dict( - db = parsed_uri.path[1:], - host = parsed_uri.hostname, - user = parsed_uri.username, - passwd = parsed_uri.password) + db=parsed_uri.path[1:], + host=parsed_uri.hostname, + user=parsed_uri.username, + passwd=parsed_uri.password) print(db_conn_info) return db_conn_info @@ -120,7 +120,7 @@ def get_types(groups): if not phenotypes_exist(group_name) and not genotypes_exist(group_name): types[species].pop(group_name, None) groups[species] = tuple(group for group in groups[species] if group[0] != group_name) - else: #ZS: This whole else statement might be unnecessary, need to check + else: # ZS: This whole else statement might be unnecessary, need to check types_list = build_types(species, group_name) if len(types_list) > 0: types[species][group_name] = types_list @@ -133,7 +133,7 @@ def get_types(groups): def phenotypes_exist(group_name): #print("group_name:", group_name) Cursor.execute("""select Name from PublishFreeze - where PublishFreeze.Name = '%s'""" % (group_name+"Publish")) + where PublishFreeze.Name = '%s'""" % (group_name + "Publish")) results = Cursor.fetchone() #print("RESULTS:", results) @@ -146,7 +146,7 @@ def phenotypes_exist(group_name): def genotypes_exist(group_name): #print("group_name:", group_name) Cursor.execute("""select Name from GenoFreeze - where GenoFreeze.Name = '%s'""" % (group_name+"Geno")) + where GenoFreeze.Name = '%s'""" % (group_name + "Geno")) results = Cursor.fetchone() #print("RESULTS:", results) @@ -246,7 +246,7 @@ def build_datasets(species, group, type_name): dataset_text = "%s Genotypes" % group datasets.append((dataset_id, dataset_value, dataset_text)) - else: # for mRNA expression/ProbeSet + else: # for mRNA expression/ProbeSet Cursor.execute("""select ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName from ProbeSetFreeze, ProbeFreeze, InbredSet, Tissue, Species where Species.Name = '%s' and Species.Id = InbredSet.SpeciesId and diff --git a/wqflask/maintenance/generate_kinship_from_bimbam.py b/wqflask/maintenance/generate_kinship_from_bimbam.py index 3e4d1741..664e9e48 100644 --- a/wqflask/maintenance/generate_kinship_from_bimbam.py +++ b/wqflask/maintenance/generate_kinship_from_bimbam.py @@ -52,9 +52,9 @@ class GenerateKinshipMatrices: break -if __name__=="__main__": +if __name__ == "__main__": Geno_Directory = """/export/local/home/zas1024/genotype_files/genotype/""" Bimbam_Directory = """/export/local/home/zas1024/genotype_files/genotype/bimbam/""" GenerateKinshipMatrices.process_all(Geno_Directory, Bimbam_Directory) - #./gemma -g /home/zas1024/genotype_files/genotype/bimbam/BXD_geno.txt -p /home/zas1024/genotype_files/genotype/bimbam/BXD_pheno.txt -gk 1 -o BXD + # ./gemma -g /home/zas1024/genotype_files/genotype/bimbam/BXD_geno.txt -p /home/zas1024/genotype_files/genotype/bimbam/BXD_pheno.txt -gk 1 -o BXD diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py index f5f7e0e7..fa0dcebd 100644 --- a/wqflask/maintenance/geno_to_json.py +++ b/wqflask/maintenance/geno_to_json.py @@ -66,16 +66,16 @@ class ConvertGenoFile: self.configurations = {} #self.skipped_cols = 3 - #if self.input_file.endswith(".geno.gz"): + # if self.input_file.endswith(".geno.gz"): # print("self.input_file: ", self.input_file) # self.input_fh = gzip.open(self.input_file) - #else: + # else: self.input_fh = open(self.input_file) with open(self.output_file, "w") as self.output_fh: - #if self.file_type == "geno": + # if self.file_type == "geno": self.process_csv() - #elif self.file_type == "snps": + # elif self.file_type == "snps": # self.process_snps_file() @@ -105,7 +105,7 @@ class ConvertGenoFile: this_marker.genotypes.append("NA") #print("this_marker is:", pf(this_marker.__dict__)) - #if this_marker.chr == "14": + # if this_marker.chr == "14": self.markers.append(this_marker.__dict__) with open(self.output_file, 'w') as fh: @@ -115,16 +115,16 @@ class ConvertGenoFile: #self.latest_col_pos = item_count + self.skipped_cols #self.latest_col_value = item - #if item_count != 0: + # if item_count != 0: # self.output_fh.write(" ") - #self.output_fh.write(self.configurations[item.upper()]) + # self.output_fh.write(self.configurations[item.upper()]) - #self.output_fh.write("\n") + # self.output_fh.write("\n") def process_rows(self): for self.latest_row_pos, row in enumerate(self.input_fh): - #if self.input_file.endswith(".geno.gz"): + # if self.input_file.endswith(".geno.gz"): # print("row: ", row) self.latest_row_value = row # Take care of headers @@ -176,21 +176,21 @@ class ConvertGenoFile: print(" Row is:", convertob.latest_row_value) break - #def process_snps_file(cls, snps_file, new_directory): + # def process_snps_file(cls, snps_file, new_directory): # output_file = os.path.join(new_directory, "mouse_families.json") # print("%s -> %s" % (snps_file, output_file)) # convertob = ConvertGenoFile(input_file, output_file) -if __name__=="__main__": +if __name__ == "__main__": Old_Geno_Directory = """/export/local/home/zas1024/gn2-zach/genotype_files/genotype""" New_Geno_Directory = """/export/local/home/zas1024/gn2-zach/genotype_files/genotype/json""" #Input_File = """/home/zas1024/gene/genotype_files/genotypes/BXD.geno""" #Output_File = """/home/zas1024/gene/wqflask/wqflask/pylmm/data/bxd.snps""" #convertob = ConvertGenoFile("/home/zas1024/gene/genotype_files/genotypes/SRxSHRSPF2.geno", "/home/zas1024/gene/genotype_files/new_genotypes/SRxSHRSPF2.json") - #convertob.convert() + # convertob.convert() ConvertGenoFile.process_all(Old_Geno_Directory, New_Geno_Directory) - #ConvertGenoFiles(Geno_Directory) + # ConvertGenoFiles(Geno_Directory) #process_csv(Input_File, Output_File) diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py index 701b2b50..6751a8e5 100644 --- a/wqflask/maintenance/quantile_normalize.py +++ b/wqflask/maintenance/quantile_normalize.py @@ -20,10 +20,10 @@ def parse_db_uri(): parsed_uri = urllib.parse.urlparse(SQL_URI) db_conn_info = dict( - db = parsed_uri.path[1:], - host = parsed_uri.hostname, - user = parsed_uri.username, - passwd = parsed_uri.password) + db=parsed_uri.path[1:], + host=parsed_uri.hostname, + user=parsed_uri.username, + passwd=parsed_uri.password) print(db_conn_info) return db_conn_info @@ -35,16 +35,16 @@ def create_dataframe(input_file): input_array = np.loadtxt(open(input_file, "rb"), delimiter="\t", skiprows=1, usecols=list(range(1, ncols))) return pd.DataFrame(input_array) -#This function taken from https://github.com/ShawnLYU/Quantile_Normalize +# This function taken from https://github.com/ShawnLYU/Quantile_Normalize def quantileNormalize(df_input): df = df_input.copy() - #compute rank + # compute rank dic = {} for col in df: - dic.update({col : sorted(df[col])}) + dic.update({col: sorted(df[col])}) sorted_df = pd.DataFrame(dic) - rank = sorted_df.mean(axis = 1).tolist() - #sort + rank = sorted_df.mean(axis=1).tolist() + # sort for col in df: t = np.searchsorted(np.sort(df[col]), df[col]) df[col] = [rank[i] for i in t] @@ -65,8 +65,8 @@ def set_data(dataset_name): for i, sample in enumerate(sample_names): this_sample = { "name": sample, - "value": line1.split('\t')[i+1], - "qnorm": line2.split('\t')[i+1] + "value": line1.split('\t')[i + 1], + "qnorm": line2.split('\t')[i + 1] } sample_list.append(this_sample) query = """SELECT Species.SpeciesName, InbredSet.InbredSetName, ProbeSetFreeze.FullName @@ -99,9 +99,9 @@ if __name__ == '__main__': Conn = MySQLdb.Connect(**parse_db_uri()) Cursor = Conn.cursor() - #es = Elasticsearch([{ + # es = Elasticsearch([{ # "host": ELASTICSEARCH_HOST, "port": ELASTICSEARCH_PORT - #}], timeout=60) if (ELASTICSEARCH_HOST and ELASTICSEARCH_PORT) else None + # }], timeout=60) if (ELASTICSEARCH_HOST and ELASTICSEARCH_PORT) else None es = get_elasticsearch_connection(for_user=False) @@ -116,8 +116,8 @@ if __name__ == '__main__': success, _ = bulk(es, set_data(sys.argv[1])) response = es.search( - index = "traits", doc_type = "trait", body = { - "query": { "match": { "name": "ENSMUSG00000028982" } } + index="traits", doc_type="trait", body = { + "query": {"match": {"name": "ENSMUSG00000028982"}} } ) diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index 4177c124..286094dd 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -43,10 +43,10 @@ def parse_db_uri(): parsed_uri = urllib.parse.urlparse(SQL_URI) db_conn_info = dict( - db = parsed_uri.path[1:], - host = parsed_uri.hostname, - user = parsed_uri.username, - passwd = parsed_uri.password) + db=parsed_uri.path[1:], + host=parsed_uri.hostname, + user=parsed_uri.username, + passwd=parsed_uri.password) print(db_conn_info) return db_conn_info @@ -63,14 +63,14 @@ def insert_probeset_resources(default_owner_id): resource_ob = {} resource_ob['name'] = resource[1] resource_ob['owner_id'] = default_owner_id - resource_ob['data'] = { "dataset" : str(resource[0])} + resource_ob['data'] = {"dataset": str(resource[0])} resource_ob['type'] = "dataset-probeset" if resource[2] < 1 and resource[3] > 0: - resource_ob['default_mask'] = { "data": "view", + resource_ob['default_mask'] = {"data": "view", "metadata": "view", "admin": "not-admin"} else: - resource_ob['default_mask'] = { "data": "no-access", + resource_ob['default_mask'] = {"data": "no-access", "metadata": "no-access", "admin": "not-admin"} resource_ob['group_masks'] = {} @@ -97,10 +97,10 @@ def insert_publish_resources(default_owner_id): else: resource_ob['name'] = str(resource[0]) resource_ob['owner_id'] = default_owner_id - resource_ob['data'] = { "dataset" : str(resource[1]) , - "trait" : str(resource[0])} + resource_ob['data'] = {"dataset": str(resource[1]), + "trait": str(resource[0])} resource_ob['type'] = "dataset-publish" - resource_ob['default_mask'] = { "data": "view", + resource_ob['default_mask'] = {"data": "view", "metadata": "view", "admin": "not-admin"} @@ -125,14 +125,14 @@ def insert_geno_resources(default_owner_id): resource_ob['owner_id'] = "c5ce8c56-78a6-474f-bcaf-7129d97f56ae" else: resource_ob['owner_id'] = default_owner_id - resource_ob['data'] = { "dataset" : str(resource[0]) } + resource_ob['data'] = {"dataset": str(resource[0])} resource_ob['type'] = "dataset-geno" if resource[2] < 1: - resource_ob['default_mask'] = { "data": "view", + resource_ob['default_mask'] = {"data": "view", "metadata": "view", "admin": "not-admin"} else: - resource_ob['default_mask'] = { "data": "no-access", + resource_ob['default_mask'] = {"data": "no-access", "metadata": "no-access", "admin": "not-admin"} resource_ob['group_masks'] = {} diff --git a/wqflask/tests/unit/wqflask/api/test_correlation.py b/wqflask/tests/unit/wqflask/api/test_correlation.py index d0264b87..bd99838d 100644 --- a/wqflask/tests/unit/wqflask/api/test_correlation.py +++ b/wqflask/tests/unit/wqflask/api/test_correlation.py @@ -127,9 +127,9 @@ class TestCorrelations(unittest.TestCase): expected_pearsonr = [-0.21618688834430866, 0.680771605997119, 6] expected_spearmanr = [-0.11595420713048969, 0.826848213385815, 6] for i, val in enumerate(expected_pearsonr): - self.assertAlmostEqual(val, results_pearsonr[i],4) + self.assertAlmostEqual(val, results_pearsonr[i], 4) for i, val in enumerate(expected_spearmanr): - self.assertAlmostEqual(val, results_spearmanr[i],4) + self.assertAlmostEqual(val, results_spearmanr[i], 4) self.assertEqual(results_num_overlap, None) @mock.patch("wqflask.api.correlation.do_literature_correlation_for_all_traits") diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py index fe2569b8..f194c6c9 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py @@ -47,11 +47,11 @@ class TestGemmaMapping(unittest.TestCase): @mock.patch("wqflask.marker_regression.run_mapping.random.choice") @mock.patch("wqflask.marker_regression.gemma_mapping.os") @mock.patch("wqflask.marker_regression.gemma_mapping.gen_pheno_txt_file") - def test_run_gemma_firstrun_set_true(self, mock_gen_pheno_txt, mock_os, mock_choice, mock_gen_covar, mock_flat_files,mock_parse_loco): + def test_run_gemma_firstrun_set_true(self, mock_gen_pheno_txt, mock_os, mock_choice, mock_gen_covar, mock_flat_files, mock_parse_loco): """add tests for run_gemma where first run is set to true""" - this_chromosomes={} + this_chromosomes = {} for i in range(1, 5): - this_chromosomes[f'CH{i}']=(AttributeSetter({"name": f"CH{i}"})) + this_chromosomes[f'CH{i}'] = (AttributeSetter({"name": f"CH{i}"})) chromosomes = AttributeSetter({"chromosomes": this_chromosomes}) dataset_group = MockGroup( @@ -68,9 +68,9 @@ class TestGemmaMapping(unittest.TestCase): mock_parse_loco.return_value = [] results = run_gemma(this_trait=trait, this_dataset=dataset, samples=[ ], vals=[], covariates="", use_loco=True) - self.assertEqual(mock_os.system.call_count,2) + self.assertEqual(mock_os.system.call_count, 2) mock_gen_pheno_txt.assert_called_once() - mock_parse_loco.assert_called_once_with(dataset, "GP1_GWA_RRRRRR",True) + mock_parse_loco.assert_called_once_with(dataset, "GP1_GWA_RRRRRR", True) mock_os.path.isfile.assert_called_once_with( ('/home/user/imgfile_output.assoc.txt')) self.assertEqual(mock_flat_files.call_count, 4) @@ -144,7 +144,7 @@ class TestGemmaMapping(unittest.TestCase): "files": [["file_name", "user", "~/file1"], ["file_name", "user", "~/file2"]] } - return_file="""X/Y\tM1\t28.457155\tQ\tE\tA\tMMB\t23.3\tW\t0.9\t0.85\t + return_file = """X/Y\tM1\t28.457155\tQ\tE\tA\tMMB\t23.3\tW\t0.9\t0.85\t chr4\tM2\t12\tQ\tE\tMMB\tR\t24\tW\t0.87\t0.5 Y\tM4\t12\tQ\tE\tMMB\tR\t11.6\tW\t0.21\t0.7 X\tM5\t12\tQ\tE\tMMB\tR\t21.1\tW\t0.65\t0.6""" @@ -159,7 +159,7 @@ X\tM5\t12\tQ\tE\tMMB\tR\t21.1\tW\t0.65\t0.6""" mock_open.side_effect = handles results = parse_loco_output( this_dataset={}, gwa_output_filename=".xw/") - expected_results= [ + expected_results = [ {'name': 'M1', 'chr': 'X/Y', 'Mb': 2.8457155e-05, 'p_value': 0.85, 'additive': 23.3, 'lod_score': 0.07058107428570727}, {'name': 'M2', 'chr': 4, 'Mb': 1.2e-05, 'p_value': 0.5, 'additive': 24.0, 'lod_score': 0.3010299956639812}, {'name': 'M4', 'chr': 'Y', 'Mb': 1.2e-05, 'p_value': 0.7, 'additive': 11.6, 'lod_score': 0.1549019599857432}, diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py index b47f877a..bbb79f98 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py @@ -1,20 +1,20 @@ import unittest -from unittest import mock -from wqflask.marker_regression.qtlreaper_mapping import gen_pheno_txt_file +from unittest import mock +from wqflask.marker_regression.qtlreaper_mapping import gen_pheno_txt_file -#issues some methods in genofile object are not defined -#modify samples should equal to vals +# issues some methods in genofile object are not defined +# modify samples should equal to vals class TestQtlReaperMapping(unittest.TestCase): @mock.patch("wqflask.marker_regression.qtlreaper_mapping.TEMPDIR", "/home/user/data") - def test_gen_pheno_txt_file(self): - vals=["V1","x","V4","V3","x"] - samples=["S1","S2","S3","S4","S5"] - trait_filename="trait_file" + def test_gen_pheno_txt_file(self): + vals = ["V1", "x", "V4", "V3","x"] + samples = ["S1", "S2", "S3", "S4","S5"] + trait_filename = "trait_file" with mock.patch("builtins.open", mock.mock_open())as mock_open: - gen_pheno_txt_file(samples=samples,vals=vals,trait_filename=trait_filename) - mock_open.assert_called_once_with("/home/user/data/gn2/trait_file.txt","w") - filehandler=mock_open() - write_calls= [mock.call('Trait\t'),mock.call('S1\tS3\tS4\n'),mock.call('T1\t'),mock.call('V1\tV4\tV3')] + gen_pheno_txt_file(samples=samples, vals=vals, trait_filename=trait_filename) + mock_open.assert_called_once_with("/home/user/data/gn2/trait_file.txt", "w") + filehandler = mock_open() + write_calls = [mock.call('Trait\t'), mock.call('S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')] filehandler.write.assert_has_calls(write_calls) diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py index c585f1df..0e617e93 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py @@ -8,7 +8,7 @@ from wqflask.marker_regression.rqtl_mapping import sanitize_rqtl_names class TestRqtlMapping(unittest.TestCase): def setUp(self): - self.app_context=app.app_context() + self.app_context = app.app_context() self.app_context.push() def tearDown(self): @@ -17,28 +17,28 @@ class TestRqtlMapping(unittest.TestCase): @mock.patch("wqflask.marker_regression.rqtl_mapping.g") @mock.patch("wqflask.marker_regression.rqtl_mapping.logger") - def test_get_trait_data(self,mock_logger,mock_db): + def test_get_trait_data(self, mock_logger, mock_db): """test for getting trait data_type return True""" - query_value="""SELECT value FROM TraitMetadata WHERE type='trait_data_type'""" - mock_db.db.execute.return_value.fetchone.return_value=["""{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""] - results=get_trait_data_type("traid_id") + query_value = """SELECT value FROM TraitMetadata WHERE type='trait_data_type'""" + mock_db.db.execute.return_value.fetchone.return_value = ["""{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""] + results = get_trait_data_type("traid_id") mock_db.db.execute.assert_called_with(query_value) - self.assertEqual(results,"fer434f") + self.assertEqual(results, "fer434f") def test_sanitize_rqtl_phenotype(self): """test for sanitizing rqtl phenotype""" - vals=['f',"x","r","x","x"] - results=sanitize_rqtl_phenotype(vals) - expected_phenotype_string='c(f,NA,r,NA,NA)' + vals = ['f', "x", "r", "x","x"] + results = sanitize_rqtl_phenotype(vals) + expected_phenotype_string = 'c(f,NA,r,NA,NA)' - self.assertEqual(results,expected_phenotype_string) + self.assertEqual(results, expected_phenotype_string) def test_sanitize_rqtl_names(self): """test for sanitzing rqtl names""" - vals=['f',"x","r","x","x"] - expected_sanitized_name="c('f',NA,'r',NA,NA)" - results=sanitize_rqtl_names(vals) - self.assertEqual(expected_sanitized_name,results) + vals = ['f', "x", "r", "x","x"] + expected_sanitized_name = "c('f',NA,'r',NA,NA)" + results = sanitize_rqtl_names(vals) + self.assertEqual(expected_sanitized_name, results) diff --git a/wqflask/tests/wqflask/show_trait/testSampleList.py b/wqflask/tests/wqflask/show_trait/testSampleList.py index 34c51e3e..441a88a7 100644 --- a/wqflask/tests/wqflask/show_trait/testSampleList.py +++ b/wqflask/tests/wqflask/show_trait/testSampleList.py @@ -10,7 +10,7 @@ class TestSampleList(unittest.TestCase): characters_list = ["z", "f", "q", "s", "t", "a", "g"] names_list = ["temp1", "publish", "Sample", "Dataset"] - sorted_list_a=natural_sort(characters_list) - sorted_list_b=natural_sort(names_list) + sorted_list_a = natural_sort(characters_list) + sorted_list_b = natural_sort(names_list) self.assertEqual(sorted_list_a, ["a", "f", "g", "q", "s", "t", "z"]) - self.assertEqual(sorted_list_b,["Dataset", "Sample", "publish", "temp1"]) + self.assertEqual(sorted_list_b, ["Dataset", "Sample", "publish", "temp1"]) diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 61f408d2..68c2cb72 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -34,7 +34,7 @@ import utility.corestats as corestats from base import webqtlConfig from utility.pillow_utils import draw_rotated_text import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) # ---- Define common colours ---- # BLUE = ImageColor.getrgb("blue") @@ -74,7 +74,7 @@ def frange(start, end=None, inc=1.0): end = start + 0.0 start = 0.0 else: - start += 0.0 # force it to be a float + start += 0.0 # force it to be a float count = int((end - start) / inc) if start + count * inc != end: # Need to adjust the count. AFAICT, it always comes up one short. @@ -119,13 +119,13 @@ def find_outliers(vals): # parameter: data is either object returned by reaper permutation function (called by MarkerRegressionPage.py) # or the first object returned by direct (pair-scan) permu function (called by DirectPlotPage.py) -def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLabel=None, YLabel=None, title=None, offset= (60, 20, 40, 40), zoom = 1): +def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLabel=None, YLabel=None, title=None, offset=(60, 20, 40, 40), zoom=1): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset plotHeight = canvas.size[1] - yTopOffset - yBottomOffset - if plotHeight<=0 or plotWidth<=0: + if plotHeight <= 0 or plotWidth <= 0: return if len(data) < 2: @@ -133,15 +133,15 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab max_D = max(data) min_D = min(data) - #add by NL 06-20-2011: fix the error: when max_D is infinite, log function in detScale will go wrong - if max_D == float('inf') or max_D>webqtlConfig.MAXLRS: - max_D=webqtlConfig.MAXLRS #maximum LRS value + # add by NL 06-20-2011: fix the error: when max_D is infinite, log function in detScale will go wrong + if max_D == float('inf') or max_D > webqtlConfig.MAXLRS: + max_D = webqtlConfig.MAXLRS # maximum LRS value xLow, xTop, stepX = detScale(min_D, max_D) - #reduce data - #ZS: Used to determine number of bins for permutation output - step = ceil((xTop-xLow)/50.0) + # reduce data + # ZS: Used to determine number of bins for permutation output + step = ceil((xTop - xLow) / 50.0) j = xLow dataXY = [] Count = [] @@ -151,122 +151,122 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab j += step for i, item in enumerate(data): - if item == float('inf') or item>webqtlConfig.MAXLRS: - item = webqtlConfig.MAXLRS #maximum LRS value - j = int((item-xLow)/step) + if item == float('inf') or item > webqtlConfig.MAXLRS: + item = webqtlConfig.MAXLRS # maximum LRS value + j = int((item - xLow) / step) Count[j] += 1 - yLow, yTop, stepY=detScale(0, max(Count)) + yLow, yTop, stepY = detScale(0, max(Count)) - #draw data - xScale = plotWidth/(xTop-xLow) - yScale = plotHeight/(yTop-yLow) - barWidth = xScale*step + # draw data + xScale = plotWidth / (xTop - xLow) + yScale = plotHeight / (yTop - yLow) + barWidth = xScale * step for i, count in enumerate(Count): if count: - xc = (dataXY[i]-xLow)*xScale+xLeftOffset - yc =-(count-yLow)*yScale+yTopOffset+plotHeight + xc = (dataXY[i] - xLow) * xScale + xLeftOffset + yc = -(count - yLow) * yScale + yTopOffset + plotHeight im_drawer.rectangle( - xy=((xc+2, yc), (xc+barWidth-2, yTopOffset+plotHeight)), + xy=((xc + 2, yc), (xc + barWidth - 2, yTopOffset + plotHeight)), outline=barColor, fill=barColor) - #draw drawing region + # draw drawing region im_drawer.rectangle( - xy=((xLeftOffset, yTopOffset), (xLeftOffset+plotWidth, yTopOffset+plotHeight)) + xy=((xLeftOffset, yTopOffset), (xLeftOffset + plotWidth, yTopOffset + plotHeight)) ) - #draw scale - scaleFont=ImageFont.truetype(font=COUR_FILE, size=11) - x=xLow - for i in range(int(stepX)+1): - xc=xLeftOffset+(x-xLow)*xScale + # draw scale + scaleFont = ImageFont.truetype(font=COUR_FILE, size=11) + x = xLow + for i in range(int(stepX) + 1): + xc = xLeftOffset + (x - xLow) * xScale im_drawer.line( - xy=((xc, yTopOffset+plotHeight), (xc, yTopOffset+plotHeight+5)), + xy=((xc, yTopOffset + plotHeight), (xc, yTopOffset + plotHeight + 5)), fill=axesColor) strX = cformat(d=x, rank=0) im_drawer.text( text=strX, - xy=(xc-im_drawer.textsize(strX, font=scaleFont)[0]/2, - yTopOffset+plotHeight+14), font=scaleFont) - x+= (xTop - xLow)/stepX - - y=yLow - for i in range(int(stepY)+1): - yc=yTopOffset+plotHeight-(y-yLow)*yScale - im_drawer.line(xy=((xLeftOffset, yc), (xLeftOffset-5, yc)), fill=axesColor) - strY = "%d" %y + xy=(xc - im_drawer.textsize(strX, font=scaleFont)[0] / 2, + yTopOffset + plotHeight + 14), font=scaleFont) + x += (xTop - xLow) / stepX + + y = yLow + for i in range(int(stepY) + 1): + yc = yTopOffset + plotHeight - (y - yLow) * yScale + im_drawer.line(xy=((xLeftOffset, yc), (xLeftOffset - 5, yc)), fill=axesColor) + strY = "%d" % y im_drawer.text( text=strY, - xy=(xLeftOffset-im_drawer.textsize(strY, font=scaleFont)[0]-6, yc+5), + xy=(xLeftOffset - im_drawer.textsize(strY, font=scaleFont)[0] - 6, yc + 5), font=scaleFont) - y+= (yTop - yLow)/stepY + y += (yTop - yLow) / stepY - #draw label - labelFont=ImageFont.truetype(font=TAHOMA_FILE, size=17) + # draw label + labelFont = ImageFont.truetype(font=TAHOMA_FILE, size=17) if XLabel: im_drawer.text( text=XLabel, - xy=(xLeftOffset+( - plotWidth-im_drawer.textsize(XLabel, font=labelFont)[0])/2.0, - yTopOffset+plotHeight+yBottomOffset-10), + xy=(xLeftOffset + ( + plotWidth - im_drawer.textsize(XLabel, font=labelFont)[0]) / 2.0, + yTopOffset + plotHeight + yBottomOffset-10), font=labelFont, fill=labelColor) if YLabel: draw_rotated_text(canvas, text=YLabel, xy=(19, - yTopOffset+plotHeight-( - plotHeight-im_drawer.textsize( - YLabel, font=labelFont)[0])/2.0), + yTopOffset + plotHeight - ( + plotHeight - im_drawer.textsize( + YLabel, font=labelFont)[0]) / 2.0), font=labelFont, fill=labelColor, angle=90) - labelFont=ImageFont.truetype(font=VERDANA_FILE, size=16) + labelFont = ImageFont.truetype(font=VERDANA_FILE, size=16) if title: im_drawer.text( text=title, - xy=(xLeftOffset+(plotWidth-im_drawer.textsize( - title, font=labelFont)[0])/2.0, + xy=(xLeftOffset + (plotWidth - im_drawer.textsize( + title, font=labelFont)[0]) / 2.0, 20), font=labelFont, fill=labelColor) # This function determines the scale of the plot def detScaleOld(min, max): - if min>=max: + if min >= max: return None elif min == -1.0 and max == 1.0: return [-1.2, 1.2, 12] else: - a=max-min - b=floor(log10(a)) - c=pow(10.0, b) - if a < c*5.0: - c/=2.0 - #print a,b,c - low=c*floor(min/c) - high=c*ceil(max/c) - return [low, high, round((high-low)/c)] - -def detScale(min=0,max=0): - - if min>=max: + a = max - min + b = floor(log10(a)) + c = pow(10.0, b) + if a < c * 5.0: + c /= 2.0 + # print a,b,c + low = c * floor(min / c) + high = c * ceil(max / c) + return [low, high, round((high - low) / c)] + +def detScale(min=0, max=0): + + if min >= max: return None elif min == -1.0 and max == 1.0: return [-1.2, 1.2, 12] else: - a=max-min + a = max - min if max != 0: - max += 0.1*a + max += 0.1 * a if min != 0: - if min > 0 and min < 0.1*a: + if min > 0 and min < 0.1 * a: min = 0.0 else: - min -= 0.1*a - a=max-min - b=floor(log10(a)) - c=pow(10.0, b) - low=c*floor(min/c) - high=c*ceil(max/c) - n = round((high-low)/c) + min -= 0.1 * a + a = max - min + b = floor(log10(a)) + c = pow(10.0, b) + low = c * floor(min / c) + high = c * ceil(max / c) + n = round((high - low) / c) div = 2.0 while n < 5 or n > 15: if n < 5: @@ -274,23 +274,23 @@ def detScale(min=0,max=0): else: c *= div if div == 2.0: - div =5.0 + div = 5.0 else: - div =2.0 - low=c*floor(min/c) - high=c*ceil(max/c) - n = round((high-low)/c) + div = 2.0 + low = c * floor(min / c) + high = c * ceil(max / c) + n = round((high - low) / c) return [low, high, n] def bluefunc(x): - return 1.0 / (1.0 + exp(-10*(x-0.6))) + return 1.0 / (1.0 + exp(-10 * (x - 0.6))) def redfunc(x): - return 1.0 / (1.0 + exp(10*(x-0.5))) + return 1.0 / (1.0 + exp(10 * (x - 0.5))) def greenfunc(x): - return 1 - pow(redfunc(x+0.2), 2) - bluefunc(x-0.3) + return 1 - pow(redfunc(x + 0.2), 2) - bluefunc(x - 0.3) def colorSpectrum(n=100): multiple = 10 @@ -303,17 +303,17 @@ def colorSpectrum(n=100): return [ImageColor.getrgb("rgb(100%,0%,0%)"), ImageColor.getrgb("rgb(0%,100%,0%)"), ImageColor.getrgb("rgb(0%,0%,100%)")] - N = n*multiple - out = [None]*N; + N = n * multiple + out = [None] * N; for i in range(N): - x = float(i)/N + x = float(i) / N out[i] = ImageColor.getrgb("rgb({}%,{}%,{}%".format( - *[int(i*100) for i in ( + *[int(i * 100) for i in ( redfunc(x), greenfunc(x), bluefunc(x))])) out2 = [out[0]] - step = N/float(n-1) + step = N / float(n - 1) j = 0 - for i in range(n-2): + for i in range(n - 2): j += step out2.append(out[int(j)]) out2.append(out[-1]) @@ -324,5 +324,5 @@ def _test(): doctest.testmod() -if __name__=="__main__": +if __name__ == "__main__": _test() diff --git a/wqflask/utility/TDCell.py b/wqflask/utility/TDCell.py index 8de8e050..4b0f4b1d 100644 --- a/wqflask/utility/TDCell.py +++ b/wqflask/utility/TDCell.py @@ -33,9 +33,9 @@ class TDCell: def __init__(self, html="", text="", val=0.0): - self.html = html #html, for web page - self.text = text #text value, for output to a text file - self.val = val #sort by value + self.html = html # html, for web page + self.text = text # text value, for output to a text file + self.val = val # sort by value def __str__(self): return self.text diff --git a/wqflask/utility/THCell.py b/wqflask/utility/THCell.py index dde221b5..f533dcb8 100644 --- a/wqflask/utility/THCell.py +++ b/wqflask/utility/THCell.py @@ -33,10 +33,10 @@ class THCell: def __init__(self, html="", text="", sort=1, idx=-1): - self.html = html #html, for web page - self.text = text #Column text value - self.sort = sort #0: not sortable, 1: yes - self.idx = idx #sort by value + self.html = html # html, for web page + self.text = text # Column text value + self.sort = sort # 0: not sortable, 1: yes + self.idx = idx # sort by value def __str__(self): return self.text diff --git a/wqflask/utility/__init__.py b/wqflask/utility/__init__.py index df926884..ec7e72d0 100644 --- a/wqflask/utility/__init__.py +++ b/wqflask/utility/__init__.py @@ -4,7 +4,7 @@ from pprint import pformat as pf class Bunch: """Like a dictionary but using object notation""" - def __init__(self, **kw): + def __init__(self, **kw): self.__dict__ = kw def __repr__(self): diff --git a/wqflask/utility/benchmark.py b/wqflask/utility/benchmark.py index 91ea91e8..3d40a3b8 100644 --- a/wqflask/utility/benchmark.py +++ b/wqflask/utility/benchmark.py @@ -4,7 +4,7 @@ import time from utility.tools import LOG_BENCH from utility.logger import getLogger -logger = getLogger(__name__ ) +logger = getLogger(__name__) class Bench: entries = collections.OrderedDict() @@ -39,7 +39,7 @@ class Bench: total_time = sum((time_taken for time_taken in list(cls.entries.values()))) print("\nTiming report\n") for name, time_taken in list(cls.entries.items()): - percent = int(round((time_taken/total_time) * 100)) + percent = int(round((time_taken / total_time) * 100)) print("[{}%] {}: {}".format(percent, name, time_taken)) print() diff --git a/wqflask/utility/chunks.py b/wqflask/utility/chunks.py index 9a7db102..484b5de6 100644 --- a/wqflask/utility/chunks.py +++ b/wqflask/utility/chunks.py @@ -26,6 +26,6 @@ def divide_into_chunks(the_list, number_chunks): chunks = [] for counter in range(0, length, chunksize): - chunks.append(the_list[counter:counter+chunksize]) + chunks.append(the_list[counter:counter + chunksize]) return chunks diff --git a/wqflask/utility/corestats.py b/wqflask/utility/corestats.py index 67ca3ad3..15d1cb8d 100644 --- a/wqflask/utility/corestats.py +++ b/wqflask/utility/corestats.py @@ -15,7 +15,7 @@ import sys -#ZS: Should switch to using some third party library for this; maybe scipy has an equivalent +# ZS: Should switch to using some third party library for this; maybe scipy has an equivalent class Stats: def __init__(self, sequence): @@ -63,7 +63,7 @@ class Stats: if len(self.sequence) < 1: value = None elif (percentile >= 100): - sys.stderr.write('ERROR: percentile must be < 100. you supplied: %s\n'% percentile) + sys.stderr.write('ERROR: percentile must be < 100. you supplied: %s\n' % percentile) value = None else: element_idx = int(len(self.sequence) * (percentile / 100.0)) @@ -80,4 +80,4 @@ class Stats: # stats = corestats.Stats(sequence) # print stats.avg() # print stats.percentile(90) -# ------------------------------------------- \ No newline at end of file +# ------------------------------------------- diff --git a/wqflask/utility/elasticsearch_tools.py b/wqflask/utility/elasticsearch_tools.py index a5580811..ae1181e4 100644 --- a/wqflask/utility/elasticsearch_tools.py +++ b/wqflask/utility/elasticsearch_tools.py @@ -48,7 +48,7 @@ logger = getLogger(__name__) from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT def test_elasticsearch_connection(): - es = Elasticsearch(['http://'+ELASTICSEARCH_HOST+":"+str(ELASTICSEARCH_PORT)+'/'], verify_certs=True) + es = Elasticsearch(['http://' + ELASTICSEARCH_HOST + ":" + str(ELASTICSEARCH_PORT) + '/'], verify_certs=True) if not es.ping(): logger.warning("Elasticsearch is DOWN") @@ -97,8 +97,8 @@ def get_item_by_unique_column(es, column_name, column_value, index, doc_type): item_details = None try: response = es.search( - index = index, doc_type = doc_type, body = { - "query": { "match": { column_name: column_value } } + index=index, doc_type=doc_type, body = { + "query": {"match": {column_name: column_value}} }) if len(response["hits"]["hits"]) > 0: item_details = response["hits"]["hits"][0]["_source"] @@ -109,4 +109,4 @@ def get_item_by_unique_column(es, column_name, column_value, index, doc_type): def es_save_data(es, index, doc_type, data_item, data_id,): from time import sleep es.create(index, doc_type, body=data_item, id=data_id) - sleep(1) # Delay 1 second to allow indexing + sleep(1) # Delay 1 second to allow indexing diff --git a/wqflask/utility/external.py b/wqflask/utility/external.py index 50afea08..c1bf4043 100644 --- a/wqflask/utility/external.py +++ b/wqflask/utility/external.py @@ -6,4 +6,4 @@ import subprocess def shell(command): if subprocess.call(command, shell=True) != 0: - raise Exception("ERROR: failed on "+command) + raise Exception("ERROR: failed on " + command) diff --git a/wqflask/utility/gen_geno_ob.py b/wqflask/utility/gen_geno_ob.py index 0a381c9b..9cfa39f9 100644 --- a/wqflask/utility/gen_geno_ob.py +++ b/wqflask/utility/gen_geno_ob.py @@ -1,5 +1,5 @@ import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) class genotype: """ @@ -18,7 +18,7 @@ class genotype: self.filler = False self.mb_exists = False - #ZS: This is because I'm not sure if some files switch the column that contains Mb/cM positions; might be unnecessary + # ZS: This is because I'm not sure if some files switch the column that contains Mb/cM positions; might be unnecessary self.cm_column = 2 self.mb_column = 3 @@ -36,14 +36,14 @@ class genotype: return len(self.chromosomes) def read_rdata_output(self, qtl_results): - #ZS: This is necessary because R/qtl requires centimorgan marker positions, which it normally gets from the .geno file, but that doesn't exist for HET3-ITP (which only has RData), so it needs to read in the marker cM positions from the results - self.chromosomes = [] #ZS: Overwriting since the .geno file's contents are just placeholders + # ZS: This is necessary because R/qtl requires centimorgan marker positions, which it normally gets from the .geno file, but that doesn't exist for HET3-ITP (which only has RData), so it needs to read in the marker cM positions from the results + self.chromosomes = [] # ZS: Overwriting since the .geno file's contents are just placeholders - this_chr = "" #ZS: This is so it can track when the chromosome changes as it iterates through markers + this_chr = "" # ZS: This is so it can track when the chromosome changes as it iterates through markers chr_ob = None for marker in qtl_results: locus = Locus(self) - if (str(marker['chr']) != this_chr) and this_chr != "X": #ZS: This is really awkward but works as a temporary fix + if (str(marker['chr']) != this_chr) and this_chr != "X": # ZS: This is really awkward but works as a temporary fix if this_chr != "": self.chromosomes.append(chr_ob) this_chr = str(marker['chr']) @@ -68,7 +68,7 @@ class genotype: with open(filename, 'r') as geno_file: lines = geno_file.readlines() - this_chr = "" #ZS: This is so it can track when the chromosome changes as it iterates through markers + this_chr = "" # ZS: This is so it can track when the chromosome changes as it iterates through markers chr_ob = None for line in lines: if line[0] == "#": @@ -141,7 +141,7 @@ class Chr: self.loci.append(Locus(self.geno_ob, marker_row)) class Locus: - def __init__(self, geno_ob, marker_row = None): + def __init__(self, geno_ob, marker_row=None): self.chr = None self.name = None self.cM = None @@ -175,5 +175,5 @@ class Locus: for allele in marker_row[start_pos:]: if allele in list(geno_table.keys()): self.genotype.append(geno_table[allele]) - else: #ZS: Some genotype appears that isn't specified in the metadata, make it unknown + else: # ZS: Some genotype appears that isn't specified in the metadata, make it unknown self.genotype.append("U") diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 12fd6be5..ecc075ae 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -39,7 +39,7 @@ def get_trait_db_obs(self, trait_db_list): for trait in trait_db_list: data, _separator, hmac_string = trait.rpartition(':') data = data.strip() - assert hmac_string==hmac.hmac_creation(data), "Data tampering?" + assert hmac_string == hmac.hmac_creation(data), "Data tampering?" trait_name, dataset_name = data.split(":")[:2] if dataset_name == "Temp": dataset_ob = data_set.create_dataset( diff --git a/wqflask/utility/logger.py b/wqflask/utility/logger.py index e904eb94..16912e58 100644 --- a/wqflask/utility/logger.py +++ b/wqflask/utility/logger.py @@ -49,14 +49,14 @@ class GNLogger: """Set the undelying log level""" self.logger.setLevel(value) - def debug(self,*args): + def debug(self, *args): """Call logging.debug for multiple args. Use (lazy) debugf and level=num to filter on LOG_LEVEL_DEBUG. """ self.collect(self.logger.debug, *args) - def debug20(self,*args): + def debug20(self, *args): """Call logging.debug for multiple args. Use level=num to filter on LOG_LEVEL_DEBUG (NYI). @@ -65,29 +65,29 @@ LOG_LEVEL_DEBUG (NYI). if self.logger.getEffectiveLevel() < 20: self.collect(self.logger.debug, *args) - def info(self,*args): + def info(self, *args): """Call logging.info for multiple args""" self.collect(self.logger.info, *args) - def warning(self,*args): + def warning(self, *args): """Call logging.warning for multiple args""" self.collect(self.logger.warning, *args) # self.logger.warning(self.collect(*args)) - def error(self,*args): + def error(self, *args): """Call logging.error for multiple args""" now = datetime.datetime.utcnow() time_str = now.strftime('%H:%M:%S UTC %Y%m%d') - l = [time_str]+list(args) + l = [time_str] + list(args) self.collect(self.logger.error, *l) - def infof(self,*args): + def infof(self, *args): """Call logging.info for multiple args lazily""" # only evaluate function when logging if self.logger.getEffectiveLevel() < 30: self.collectf(self.logger.debug, *args) - def debugf(self,level=0,*args): + def debugf(self, level=0, *args): """Call logging.debug for multiple args lazily and handle LOG_LEVEL_DEBUG correctly @@ -97,7 +97,7 @@ LOG_LEVEL_DEBUG (NYI). if self.logger.getEffectiveLevel() < 20: self.collectf(self.logger.debug, *args) - def sql(self, sqlcommand, fun = None): + def sql(self, sqlcommand, fun=None): """Log SQL command, optionally invoking a timed fun""" if LOG_SQL: caller = stack()[1][3] @@ -110,11 +110,11 @@ LOG_LEVEL_DEBUG (NYI). self.info(result) return result - def collect(self,fun,*args): + def collect(self, fun, *args): """Collect arguments and use fun to output""" - out = "."+stack()[2][3] + out = "." + stack()[2][3] for a in args: - if len(out)>1: + if len(out) > 1: out += ": " if isinstance(a, str): out = out + a @@ -122,11 +122,11 @@ LOG_LEVEL_DEBUG (NYI). out = out + pf(a, width=160) fun(out) - def collectf(self,fun,*args): + def collectf(self, fun, *args): """Collect arguments and use fun to output one by one""" - out = "."+stack()[2][3] + out = "." + stack()[2][3] for a in args: - if len(out)>1: + if len(out) > 1: out += ": " if isfunction(a): out += a() @@ -139,7 +139,7 @@ LOG_LEVEL_DEBUG (NYI). # Get the module logger. You can override log levels at the # module level -def getLogger(name, level = None): +def getLogger(name, level=None): gnlogger = GNLogger(name) logger = gnlogger.logger @@ -148,5 +148,5 @@ def getLogger(name, level = None): else: logger.setLevel(LOG_LEVEL) - logger.info("Log level of "+name+" set to "+logging.getLevelName(logger.getEffectiveLevel())) + logger.info("Log level of " + name + " set to " + logging.getLevelName(logger.getEffectiveLevel())) return gnlogger diff --git a/wqflask/utility/pillow_utils.py b/wqflask/utility/pillow_utils.py index c486abba..6e95beb0 100644 --- a/wqflask/utility/pillow_utils.py +++ b/wqflask/utility/pillow_utils.py @@ -3,7 +3,7 @@ from PIL import Image, ImageColor, ImageDraw, ImageFont from utility.tools import TEMPDIR import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) BLACK = ImageColor.getrgb("black") WHITE = ImageColor.getrgb("white") diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index f1aaebb6..f22f4b14 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -3,12 +3,12 @@ from wqflask import app from utility.tools import WEBSERVER_MODE, show_settings, get_setting_int, get_setting, get_setting_bool import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) -BLUE = '\033[94m' +BLUE = '\033[94m' GREEN = '\033[92m' -BOLD = '\033[1m' -ENDC = '\033[0m' +BOLD = '\033[1m' +ENDC = '\033[0m' def app_config(): app.config['SESSION_TYPE'] = 'filesystem' @@ -27,7 +27,7 @@ def app_config(): port = get_setting_int("SERVER_PORT") if get_setting_bool("USE_GN_SERVER"): - print(("GN2 API server URL is ["+BLUE+get_setting("GN_SERVER_URL")+ENDC+"]")) + print(("GN2 API server URL is [" + BLUE + get_setting("GN_SERVER_URL") + ENDC + "]")) import requests page = requests.get(get_setting("GN_SERVER_URL")) if page.status_code != 200: diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index b92cc2d1..4c478c36 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -172,7 +172,7 @@ def _viewboxlist(a): """formats a tuple""" s = '' for e in a: - s += str(e)+' ' + s += str(e) + ' ' return s @@ -189,7 +189,7 @@ class pathdata: def __init__(self, x=None, y=None): self.path = [] if x is not None and y is not None: - self.path.append('M '+str(x)+' '+str(y)) + self.path.append('M ' + str(x) + ' ' + str(y)) def closepath(self): """ends the path""" @@ -197,79 +197,79 @@ class pathdata: def move(self, x, y): """move to absolute""" - self.path.append('M '+str(x)+' '+str(y)) + self.path.append('M ' + str(x) + ' ' + str(y)) def relmove(self, x, y): """move to relative""" - self.path.append('m '+str(x)+' '+str(y)) + self.path.append('m ' + str(x) + ' ' + str(y)) def line(self, x, y): """line to absolute""" - self.path.append('L '+str(x)+' '+str(y)) + self.path.append('L ' + str(x) + ' ' + str(y)) def relline(self, x, y): """line to relative""" - self.path.append('l '+str(x)+' '+str(y)) + self.path.append('l ' + str(x) + ' ' + str(y)) def hline(self, x): """horizontal line to absolute""" - self.path.append('H'+str(x)) + self.path.append('H' + str(x)) def relhline(self, x): """horizontal line to relative""" - self.path.append('h'+str(x)) + self.path.append('h' + str(x)) def vline(self, y): """verical line to absolute""" - self.path.append('V'+str(y)) + self.path.append('V' + str(y)) def relvline(self, y): """vertical line to relative""" - self.path.append('v'+str(y)) + self.path.append('v' + str(y)) def bezier(self, x1, y1, x2, y2, x, y): """bezier with xy1 and xy2 to xy absolut""" - self.path.append('C'+str(x1)+','+str(y1)+' '+str(x2) + - ','+str(y2)+' '+str(x)+','+str(y)) + self.path.append('C' + str(x1) + ','+str(y1)+' '+str(x2) + + ',' + str(y2) + ' '+str(x)+','+str(y)) def relbezier(self, x1, y1, x2, y2, x, y): """bezier with xy1 and xy2 to xy relative""" - self.path.append('c'+str(x1)+','+str(y1)+' '+str(x2) + - ','+str(y2)+' '+str(x)+','+str(y)) + self.path.append('c' + str(x1) + ','+str(y1)+' '+str(x2) + + ',' + str(y2) + ' '+str(x)+','+str(y)) def smbezier(self, x2, y2, x, y): """smooth bezier with xy2 to xy absolut""" - self.path.append('S'+str(x2)+','+str(y2)+' '+str(x)+','+str(y)) + self.path.append('S' + str(x2) + ',' + str(y2) + ' ' + str(x) + ',' + str(y)) def relsmbezier(self, x2, y2, x, y): """smooth bezier with xy2 to xy relative""" - self.path.append('s'+str(x2)+','+str(y2)+' '+str(x)+','+str(y)) + self.path.append('s' + str(x2) + ',' + str(y2) + ' ' + str(x) + ',' + str(y)) def qbezier(self, x1, y1, x, y): """quadratic bezier with xy1 to xy absolut""" - self.path.append('Q'+str(x1)+','+str(y1)+' '+str(x)+','+str(y)) + self.path.append('Q' + str(x1) + ',' + str(y1) + ' ' + str(x) + ',' + str(y)) def relqbezier(self, x1, y1, x, y): """quadratic bezier with xy1 to xy relative""" - self.path.append('q'+str(x1)+','+str(y1)+' '+str(x)+','+str(y)) + self.path.append('q' + str(x1) + ',' + str(y1) + ' ' + str(x) + ',' + str(y)) def smqbezier(self, x, y): """smooth quadratic bezier to xy absolut""" - self.path.append('T'+str(x)+','+str(y)) + self.path.append('T' + str(x) + ',' + str(y)) def relsmqbezier(self, x, y): """smooth quadratic bezier to xy relative""" - self.path.append('t'+str(x)+','+str(y)) + self.path.append('t' + str(x) + ',' + str(y)) def ellarc(self, rx, ry, xrot, laf, sf, x, y): """elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy absolut""" - self.path.append('A'+str(rx)+','+str(ry)+' '+str(xrot) + - ' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y)) + self.path.append('A' + str(rx) + ','+str(ry)+' '+str(xrot) + + ' ' + str(laf) + ' '+str(sf)+' '+str(x)+' '+str(y)) def relellarc(self, rx, ry, xrot, laf, sf, x, y): """elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy relative""" - self.path.append('a'+str(rx)+','+str(ry)+' '+str(xrot) + - ' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y)) + self.path.append('a' + str(rx) + ','+str(ry)+' '+str(xrot) + + ' ' + str(laf) + ' '+str(sf)+' '+str(x)+' '+str(y)) def __repr__(self): return ' '.join(self.path) @@ -312,10 +312,10 @@ class SVGelement: self.elements.append(SVGelement) def toXml(self, level, f): - f.write('\t'*level) - f.write('<'+self.type) + f.write('\t' * level) + f.write('<' + self.type) for attkey in list(self.attributes.keys()): - f.write(' '+_escape(str(attkey))+'=' + + f.write(' ' + _escape(str(attkey)) + '=' + _quoteattr(str(self.attributes[attkey]))) if self.namespace: f.write(' xmlns="' + _escape(str(self.namespace)) + @@ -325,23 +325,23 @@ class SVGelement: if self.elements: f.write('\n') for element in self.elements: - element.toXml(level+1, f) + element.toXml(level + 1, f) if self.cdata: - f.write('\n'+'\t'*(level+1)+'\n') + f.write('\n' + '\t' * (level + 2) + line) + f.write('\n' + '\t' * (level + 1) + ']]>\n') if self.text: if isinstance(self.text, type('')): # If the text is only text f.write(_escape(str(self.text))) else: # If the text is a spannedtext class f.write(str(self.text)) if self.elements: - f.write('\t'*level+'\n') + f.write('\t' * level + '\n') elif self.text: - f.write('\n') + f.write('\n') elif self.cdata: - f.write('\t'*level+'\n') + f.write('\t' * level + '\n') else: f.write('/>\n') @@ -447,38 +447,38 @@ class rect(SVGelement): if width == None or height == None: raise ValueError('both height and width are required') - SVGelement.__init__(self, 'rect', {'width':width,'height':height}, **args) - if x!=None: - self.attributes['x']=x - if y!=None: - self.attributes['y']=y - if fill!=None: - self.attributes['fill']=fill - if stroke!=None: - self.attributes['stroke']=stroke - if stroke_width!=None: - self.attributes['stroke-width']=stroke_width + SVGelement.__init__(self, 'rect', {'width': width, 'height': height}, **args) + if x != None: + self.attributes['x'] = x + if y != None: + self.attributes['y'] = y + if fill != None: + self.attributes['fill'] = fill + if stroke != None: + self.attributes['stroke'] = stroke + if stroke_width != None: + self.attributes['stroke-width'] = stroke_width class ellipse(SVGelement): """e=ellipse(rx,ry,x,y,fill,stroke,stroke_width,**args) an ellipse is defined as a center and a x and y radius. """ - def __init__(self,cx=None,cy=None,rx=None,ry=None,fill=None,stroke=None,stroke_width=None,**args): - if rx==None or ry== None: + def __init__(self, cx=None, cy=None, rx=None, ry=None,fill=None,stroke=None,stroke_width=None,**args): + if rx == None or ry == None: raise ValueError('both rx and ry are required') - SVGelement.__init__(self, 'ellipse', {'rx':rx,'ry':ry}, **args) - if cx!=None: - self.attributes['cx']=cx - if cy!=None: - self.attributes['cy']=cy - if fill!=None: - self.attributes['fill']=fill - if stroke!=None: - self.attributes['stroke']=stroke - if stroke_width!=None: - self.attributes['stroke-width']=stroke_width + SVGelement.__init__(self, 'ellipse', {'rx': rx, 'ry': ry}, **args) + if cx != None: + self.attributes['cx'] = cx + if cy != None: + self.attributes['cy'] = cy + if fill != None: + self.attributes['fill'] = fill + if stroke != None: + self.attributes['stroke'] = stroke + if stroke_width != None: + self.attributes['stroke-width'] = stroke_width class circle(SVGelement): @@ -486,20 +486,20 @@ class circle(SVGelement): The circle creates an element using a x, y and radius values eg """ - def __init__(self,cx=None,cy=None,r=None,fill=None,stroke=None,stroke_width=None,**args): - if r==None: + def __init__(self, cx=None, cy=None, r=None, fill=None,stroke=None,stroke_width=None,**args): + if r == None: raise ValueError('r is required') - SVGelement.__init__(self, 'circle', {'r':r}, **args) - if cx!=None: - self.attributes['cx']=cx - if cy!=None: - self.attributes['cy']=cy - if fill!=None: - self.attributes['fill']=fill - if stroke!=None: - self.attributes['stroke']=stroke - if stroke_width!=None: - self.attributes['stroke-width']=stroke_width + SVGelement.__init__(self, 'circle', {'r': r}, **args) + if cx != None: + self.attributes['cx'] = cx + if cy != None: + self.attributes['cy'] = cy + if fill != None: + self.attributes['fill'] = fill + if stroke != None: + self.attributes['stroke'] = stroke + if stroke_width != None: + self.attributes['stroke-width'] = stroke_width class point(circle): """p=point(x,y,color) @@ -507,7 +507,7 @@ class point(circle): A point is defined as a circle with a size 1 radius. It may be more efficient to use a very small rectangle if you use many points because a circle is difficult to render. """ - def __init__(self,x,y,fill='black',**args): + def __init__(self, x, y, fill='black', **args): circle.__init__(self, x, y, 1, fill, **args) class line(SVGelement): @@ -515,64 +515,64 @@ class line(SVGelement): A line is defined by a begin x,y pair and an end x,y pair """ - def __init__(self,x1=None,y1=None,x2=None,y2=None,stroke=None,stroke_width=None,**args): + def __init__(self, x1=None, y1=None, x2=None, y2=None,stroke=None,stroke_width=None,**args): SVGelement.__init__(self, 'line', **args) - if x1!=None: - self.attributes['x1']=x1 - if y1!=None: - self.attributes['y1']=y1 - if x2!=None: - self.attributes['x2']=x2 - if y2!=None: - self.attributes['y2']=y2 - if stroke_width!=None: - self.attributes['stroke-width']=stroke_width - if stroke!=None: - self.attributes['stroke']=stroke + if x1 != None: + self.attributes['x1'] = x1 + if y1 != None: + self.attributes['y1'] = y1 + if x2 != None: + self.attributes['x2'] = x2 + if y2 != None: + self.attributes['y2'] = y2 + if stroke_width != None: + self.attributes['stroke-width'] = stroke_width + if stroke != None: + self.attributes['stroke'] = stroke class polyline(SVGelement): """pl=polyline([[x1,y1],[x2,y2],...],fill,stroke,stroke_width,**args) a polyline is defined by a list of xy pairs """ - def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args): - SVGelement.__init__(self, 'polyline', {'points':_xypointlist(points)}, **args) - if fill!=None: - self.attributes['fill']=fill - if stroke_width!=None: - self.attributes['stroke-width']=stroke_width - if stroke!=None: - self.attributes['stroke']=stroke + def __init__(self, points, fill=None, stroke=None, stroke_width=None,**args): + SVGelement.__init__(self, 'polyline', {'points': _xypointlist(points)}, **args) + if fill != None: + self.attributes['fill'] = fill + if stroke_width != None: + self.attributes['stroke-width'] = stroke_width + if stroke != None: + self.attributes['stroke'] = stroke class polygon(SVGelement): """pl=polyline([[x1,y1],[x2,y2],...],fill,stroke,stroke_width,**args) a polygon is defined by a list of xy pairs """ - def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args): - SVGelement.__init__(self, 'polygon', {'points':_xypointlist(points)}, **args) - if fill!=None: - self.attributes['fill']=fill - if stroke_width!=None: - self.attributes['stroke-width']=stroke_width - if stroke!=None: - self.attributes['stroke']=stroke + def __init__(self, points, fill=None, stroke=None, stroke_width=None,**args): + SVGelement.__init__(self, 'polygon', {'points': _xypointlist(points)}, **args) + if fill != None: + self.attributes['fill'] = fill + if stroke_width != None: + self.attributes['stroke-width'] = stroke_width + if stroke != None: + self.attributes['stroke'] = stroke class path(SVGelement): """p=path(path,fill,stroke,stroke_width,**args) a path is defined by a path object and optional width, stroke and fillcolor """ - def __init__(self,pathdata,fill=None,stroke=None,stroke_width=None,id=None,**args): - SVGelement.__init__(self, 'path', {'d':str(pathdata)}, **args) - if stroke!=None: - self.attributes['stroke']=stroke - if fill!=None: - self.attributes['fill']=fill - if stroke_width!=None: - self.attributes['stroke-width']=stroke_width - if id!=None: - self.attributes['id']=id + def __init__(self, pathdata, fill=None, stroke=None, stroke_width=None,id=None,**args): + SVGelement.__init__(self, 'path', {'d': str(pathdata)}, **args) + if stroke != None: + self.attributes['stroke'] = stroke + if fill != None: + self.attributes['fill'] = fill + if stroke_width != None: + self.attributes['stroke-width'] = stroke_width + if id != None: + self.attributes['id'] = id class text(SVGelement): @@ -580,20 +580,20 @@ class text(SVGelement): a text element can bge used for displaying text on the screen """ - def __init__(self,x=None,y=None,text=None,font_size=None,font_family=None,text_anchor=None,**args): + def __init__(self, x=None, y=None, text=None, font_size=None,font_family=None,text_anchor=None,**args): SVGelement.__init__(self, 'text', **args) - if x!=None: - self.attributes['x']=x - if y!=None: - self.attributes['y']=y - if font_size!=None: - self.attributes['font-size']=font_size - if font_family!=None: - self.attributes['font-family']=font_family - if text!=None: - self.text=text - if text_anchor!=None: - self.attributes['text-anchor']=text_anchor + if x != None: + self.attributes['x'] = x + if y != None: + self.attributes['y'] = y + if font_size != None: + self.attributes['font-size'] = font_size + if font_family != None: + self.attributes['font-family'] = font_family + if text != None: + self.text = text + if text_anchor != None: + self.attributes['text-anchor'] = text_anchor class textpath(SVGelement): @@ -601,10 +601,10 @@ class textpath(SVGelement): a textpath places a text on a path which is referenced by a link. """ - def __init__(self,link,text=None,**args): - SVGelement.__init__(self, 'textPath', {'xlink:href':link}, **args) - if text!=None: - self.text=text + def __init__(self, link, text=None, **args): + SVGelement.__init__(self, 'textPath', {'xlink:href': link}, **args) + if text != None: + self.text = text class pattern(SVGelement): """p=pattern(x,y,width,height,patternUnits,**args) @@ -613,18 +613,18 @@ class pattern(SVGelement): graphic object which can be replicated ("tiled") at fixed intervals in x and y to cover the areas to be painted. """ - def __init__(self,x=None,y=None,width=None,height=None,patternUnits=None,**args): + def __init__(self, x=None, y=None, width=None, height=None,patternUnits=None,**args): SVGelement.__init__(self, 'pattern', **args) - if x!=None: - self.attributes['x']=x - if y!=None: - self.attributes['y']=y - if width!=None: - self.attributes['width']=width - if height!=None: - self.attributes['height']=height - if patternUnits!=None: - self.attributes['patternUnits']=patternUnits + if x != None: + self.attributes['x'] = x + if y != None: + self.attributes['y'] = y + if width != None: + self.attributes['width'] = width + if height != None: + self.attributes['height'] = height + if patternUnits != None: + self.attributes['patternUnits'] = patternUnits class title(SVGelement): """t=title(text,**args) @@ -632,10 +632,10 @@ class title(SVGelement): a title is a text element. The text is displayed in the title bar add at least one to the root svg element """ - def __init__(self,text=None,**args): + def __init__(self, text=None, **args): SVGelement.__init__(self, 'title', **args) - if text!=None: - self.text=text + if text != None: + self.text = text class description(SVGelement): """d=description(text,**args) @@ -643,10 +643,10 @@ class description(SVGelement): a description can be added to any element and is used for a tooltip Add this element before adding other elements. """ - def __init__(self,text=None,**args): + def __init__(self, text=None, **args): SVGelement.__init__(self, 'desc', **args) - if text!=None: - self.text=text + if text != None: + self.text = text class lineargradient(SVGelement): """lg=lineargradient(x1,y1,x2,y2,id,**args) @@ -654,18 +654,18 @@ class lineargradient(SVGelement): defines a lineargradient using two xy pairs. stop elements van be added to define the gradient colors. """ - def __init__(self,x1=None,y1=None,x2=None,y2=None,id=None,**args): + def __init__(self, x1=None, y1=None, x2=None, y2=None,id=None,**args): SVGelement.__init__(self, 'linearGradient', **args) - if x1!=None: - self.attributes['x1']=x1 - if y1!=None: - self.attributes['y1']=y1 - if x2!=None: - self.attributes['x2']=x2 - if y2!=None: - self.attributes['y2']=y2 - if id!=None: - self.attributes['id']=id + if x1 != None: + self.attributes['x1'] = x1 + if y1 != None: + self.attributes['y1'] = y1 + if x2 != None: + self.attributes['x2'] = x2 + if y2 != None: + self.attributes['y2'] = y2 + if id != None: + self.attributes['id'] = id class radialgradient(SVGelement): """rg=radialgradient(cx,cy,r,fx,fy,id,**args) @@ -673,38 +673,38 @@ class radialgradient(SVGelement): defines a radial gradient using a outer circle which are defined by a cx,cy and r and by using a focalpoint. stop elements van be added to define the gradient colors. """ - def __init__(self,cx=None,cy=None,r=None,fx=None,fy=None,id=None,**args): + def __init__(self, cx=None, cy=None, r=None, fx=None,fy=None,id=None,**args): SVGelement.__init__(self, 'radialGradient', **args) - if cx!=None: - self.attributes['cx']=cx - if cy!=None: - self.attributes['cy']=cy - if r!=None: - self.attributes['r']=r - if fx!=None: - self.attributes['fx']=fx - if fy!=None: - self.attributes['fy']=fy - if id!=None: - self.attributes['id']=id + if cx != None: + self.attributes['cx'] = cx + if cy != None: + self.attributes['cy'] = cy + if r != None: + self.attributes['r'] = r + if fx != None: + self.attributes['fx'] = fx + if fy != None: + self.attributes['fy'] = fy + if id != None: + self.attributes['id'] = id class stop(SVGelement): """st=stop(offset,stop_color,**args) Puts a stop color at the specified radius """ - def __init__(self,offset,stop_color=None,**args): - SVGelement.__init__(self, 'stop', {'offset':offset}, **args) - if stop_color!=None: - self.attributes['stop-color']=stop_color + def __init__(self, offset, stop_color=None, **args): + SVGelement.__init__(self, 'stop', {'offset': offset}, **args) + if stop_color != None: + self.attributes['stop-color'] = stop_color class style(SVGelement): """st=style(type,cdata=None,**args) Add a CDATA element to this element for defing in line stylesheets etc.. """ - def __init__(self,type,cdata=None,**args): - SVGelement.__init__(self, 'style', {'type':type}, cdata=cdata, **args) + def __init__(self, type, cdata=None, **args): + SVGelement.__init__(self, 'style', {'type': type}, cdata=cdata, **args) class image(SVGelement): @@ -712,22 +712,22 @@ class image(SVGelement): adds an image to the drawing. Supported formats are .png, .jpg and .svg. """ - def __init__(self,url,x=None,y=None,width=None,height=None,**args): - if width==None or height==None: + def __init__(self, url, x=None, y=None, width=None,height=None,**args): + if width == None or height == None: raise ValueError('both height and width are required') - SVGelement.__init__(self, 'image', {'xlink:href':url,'width':width,'height':height}, **args) - if x!=None: - self.attributes['x']=x - if y!=None: - self.attributes['y']=y + SVGelement.__init__(self, 'image', {'xlink:href': url, 'width': width, 'height':height}, **args) + if x != None: + self.attributes['x'] = x + if y != None: + self.attributes['y'] = y class cursor(SVGelement): """c=cursor(url,**args) defines a custom cursor for a element or a drawing """ - def __init__(self,url,**args): - SVGelement.__init__(self, 'cursor', {'xlink:href':url}, **args) + def __init__(self, url, **args): + SVGelement.__init__(self, 'cursor', {'xlink:href': url}, **args) class marker(SVGelement): @@ -736,20 +736,20 @@ class marker(SVGelement): defines a marker which can be used as an endpoint for a line or other pathtypes add an element to it which should be used as a marker. """ - def __init__(self,id=None,viewBox=None,refx=None,refy=None,markerWidth=None,markerHeight=None,**args): + def __init__(self, id=None, viewBox=None, refx=None, refy=None,markerWidth=None,markerHeight=None,**args): SVGelement.__init__(self, 'marker', **args) - if id!=None: - self.attributes['id']=id - if viewBox!=None: - self.attributes['viewBox']=_viewboxlist(viewBox) - if refx!=None: - self.attributes['refX']=refx - if refy!=None: - self.attributes['refY']=refy - if markerWidth!=None: - self.attributes['markerWidth']=markerWidth - if markerHeight!=None: - self.attributes['markerHeight']=markerHeight + if id != None: + self.attributes['id'] = id + if viewBox != None: + self.attributes['viewBox'] = _viewboxlist(viewBox) + if refx != None: + self.attributes['refX'] = refx + if refy != None: + self.attributes['refY'] = refy + if markerWidth != None: + self.attributes['markerWidth'] = markerWidth + if markerHeight != None: + self.attributes['markerHeight'] = markerHeight class group(SVGelement): """g=group(id,**args) @@ -757,10 +757,10 @@ class group(SVGelement): a group is defined by an id and is used to contain elements g.addElement(SVGelement) """ - def __init__(self,id=None,**args): + def __init__(self, id=None, **args): SVGelement.__init__(self, 'g', **args) - if id!=None: - self.attributes['id']=id + if id != None: + self.attributes['id'] = id class symbol(SVGelement): """sy=symbol(id,viewbox,**args) @@ -771,19 +771,19 @@ class symbol(SVGelement): sy.addElement(SVGelement) """ - def __init__(self,id=None,viewBox=None,**args): + def __init__(self, id=None, viewBox=None, **args): SVGelement.__init__(self, 'symbol', **args) - if id!=None: - self.attributes['id']=id - if viewBox!=None: - self.attributes['viewBox']=_viewboxlist(viewBox) + if id != None: + self.attributes['id'] = id + if viewBox != None: + self.attributes['viewBox'] = _viewboxlist(viewBox) class defs(SVGelement): """d=defs(**args) container for defining elements """ - def __init__(self,**args): + def __init__(self, **args): SVGelement.__init__(self, 'defs', **args) class switch(SVGelement): @@ -793,7 +793,7 @@ class switch(SVGelement): requiredFeatures, requiredExtensions and systemLanguage. Refer to the SVG specification for details. """ - def __init__(self,**args): + def __init__(self, **args): SVGelement.__init__(self, 'switch', **args) @@ -802,17 +802,17 @@ class use(SVGelement): references a symbol by linking to its id and its position, height and width """ - def __init__(self,link,x=None,y=None,width=None,height=None,**args): - SVGelement.__init__(self, 'use', {'xlink:href':link}, **args) - if x!=None: - self.attributes['x']=x - if y!=None: - self.attributes['y']=y + def __init__(self, link, x=None, y=None, width=None,height=None,**args): + SVGelement.__init__(self, 'use', {'xlink:href': link}, **args) + if x != None: + self.attributes['x'] = x + if y != None: + self.attributes['y'] = y - if width!=None: - self.attributes['width']=width - if height!=None: - self.attributes['height']=height + if width != None: + self.attributes['width'] = width + if height != None: + self.attributes['height'] = height class link(SVGelement): @@ -821,17 +821,17 @@ class link(SVGelement): a link is defined by a hyperlink. add elements which have to be linked a.addElement(SVGelement) """ - def __init__(self,link='',**args): - SVGelement.__init__(self, 'a', {'xlink:href':link}, **args) + def __init__(self, link='', **args): + SVGelement.__init__(self, 'a', {'xlink:href': link}, **args) class view(SVGelement): """v=view(id,**args) a view can be used to create a view with different attributes""" - def __init__(self,id=None,**args): + def __init__(self, id=None, **args): SVGelement.__init__(self, 'view', **args) - if id!=None: - self.attributes['id']=id + if id != None: + self.attributes['id'] = id class script(SVGelement): """sc=script(type,type,cdata,**args) @@ -839,77 +839,77 @@ class script(SVGelement): adds a script element which contains CDATA to the SVG drawing """ - def __init__(self,type,cdata=None,**args): - SVGelement.__init__(self, 'script', {'type':type}, cdata=cdata, **args) + def __init__(self, type, cdata=None, **args): + SVGelement.__init__(self, 'script', {'type': type}, cdata=cdata, **args) class animate(SVGelement): """an=animate(attribute,from,to,during,**args) animates an attribute. """ - def __init__(self,attribute,fr=None,to=None,dur=None,**args): - SVGelement.__init__(self, 'animate', {'attributeName':attribute}, **args) - if fr!=None: - self.attributes['from']=fr - if to!=None: - self.attributes['to']=to - if dur!=None: - self.attributes['dur']=dur + def __init__(self, attribute, fr=None, to=None, dur=None,**args): + SVGelement.__init__(self, 'animate', {'attributeName': attribute}, **args) + if fr != None: + self.attributes['from'] = fr + if to != None: + self.attributes['to'] = to + if dur != None: + self.attributes['dur'] = dur class animateMotion(SVGelement): """an=animateMotion(pathdata,dur,**args) animates a SVGelement over the given path in dur seconds """ - def __init__(self,pathdata,dur,**args): + def __init__(self, pathdata, dur, **args): SVGelement.__init__(self, 'animateMotion', **args) - if pathdata!=None: - self.attributes['path']=str(pathdata) - if dur!=None: - self.attributes['dur']=dur + if pathdata != None: + self.attributes['path'] = str(pathdata) + if dur != None: + self.attributes['dur'] = dur class animateTransform(SVGelement): """antr=animateTransform(type,from,to,dur,**args) transform an element from and to a value. """ - def __init__(self,type=None,fr=None,to=None,dur=None,**args): - SVGelement.__init__(self, 'animateTransform', {'attributeName':'transform'}, **args) + def __init__(self, type=None, fr=None, to=None, dur=None,**args): + SVGelement.__init__(self, 'animateTransform', {'attributeName': 'transform'}, **args) # As far as I know the attributeName is always transform - if type!=None: - self.attributes['type']=type - if fr!=None: - self.attributes['from']=fr - if to!=None: - self.attributes['to']=to - if dur!=None: - self.attributes['dur']=dur + if type != None: + self.attributes['type'] = type + if fr != None: + self.attributes['from'] = fr + if to != None: + self.attributes['to'] = to + if dur != None: + self.attributes['dur'] = dur class animateColor(SVGelement): """ac=animateColor(attribute,type,from,to,dur,**args) Animates the color of a element """ - def __init__(self,attribute,type=None,fr=None,to=None,dur=None,**args): - SVGelement.__init__(self, 'animateColor', {'attributeName':attribute}, **args) - if type!=None: - self.attributes['type']=type - if fr!=None: - self.attributes['from']=fr - if to!=None: - self.attributes['to']=to - if dur!=None: - self.attributes['dur']=dur + def __init__(self, attribute, type=None, fr=None, to=None,dur=None,**args): + SVGelement.__init__(self, 'animateColor', {'attributeName': attribute}, **args) + if type != None: + self.attributes['type'] = type + if fr != None: + self.attributes['from'] = fr + if to != None: + self.attributes['to'] = to + if dur != None: + self.attributes['dur'] = dur class set(SVGelement): """st=set(attribute,to,during,**args) sets an attribute to a value for a """ - def __init__(self,attribute,to=None,dur=None,**args): - SVGelement.__init__(self, 'set', {'attributeName':attribute}, **args) - if to!=None: - self.attributes['to']=to - if dur!=None: - self.attributes['dur']=dur + def __init__(self, attribute, to=None, dur=None, **args): + SVGelement.__init__(self, 'set', {'attributeName': attribute}, **args) + if to != None: + self.attributes['to'] = to + if dur != None: + self.attributes['dur'] = dur @@ -928,15 +928,15 @@ class svg(SVGelement): d.setSVG(s) d.toXml() """ - def __init__(self,viewBox=None, width=None, height=None,**args): + def __init__(self, viewBox=None, width=None, height=None, **args): SVGelement.__init__(self, 'svg', **args) - if viewBox!=None: - self.attributes['viewBox']=_viewboxlist(viewBox) - if width!=None: - self.attributes['width']=width - if height!=None: - self.attributes['height']=height - self.namespace="http://www.w3.org/2000/svg" + if viewBox != None: + self.attributes['viewBox'] = _viewboxlist(viewBox) + if width != None: + self.attributes['width'] = width + if height != None: + self.attributes['height'] = height + self.namespace = "http://www.w3.org/2000/svg" class drawing: """d=drawing() @@ -950,15 +950,15 @@ class drawing: """ def __init__(self, entity={}): - self.svg=None + self.svg = None self.entity = entity def setSVG(self, svg): - self.svg=svg + self.svg = svg # Voeg een element toe aan de grafiek toe. - if use_dom_implementation==0: - def toXml(self, filename='',compress=False): + if use_dom_implementation == 0: + def toXml(self, filename='', compress=False): import io - xml=io.StringIO() + xml = io.StringIO() xml.write("\n") xml.write("to the screen drawing.toXml(filename)---->to the file writes a svg drawing to the screen or to a file @@ -1002,35 +1002,35 @@ class drawing: global root # root is defined global so it can be used by the appender. Its also possible to use it as an arugument but # that is a bit messy. - root=implementation.createDocument(None, None, doctype) + root = implementation.createDocument(None, None, doctype) # Create the xml document. global appender def appender(element, elementroot): """This recursive function appends elements to an element and sets the attributes and type. It stops when alle elements have been appended""" if element.namespace: - e=root.createElementNS(element.namespace, element.type) + e = root.createElementNS(element.namespace, element.type) else: - e=root.createElement(element.type) + e = root.createElement(element.type) if element.text: - textnode=root.createTextNode(element.text) + textnode = root.createTextNode(element.text) e.appendChild(textnode) - for attribute in list(element.attributes.keys()): #in element.attributes is supported from python 2.2 + for attribute in list(element.attributes.keys()): # in element.attributes is supported from python 2.2 e.setAttribute(attribute, str(element.attributes[attribute])) if element.elements: for el in element.elements: - e=appender(el, e) + e = appender(el, e) elementroot.appendChild(e) return elementroot - root=appender(self.svg, root) + root = appender(self.svg, root) if not filename: import io - xml=io.StringIO() + xml = io.StringIO() PrettyPrint(root, xml) if compress: import gzip - f=io.StringIO() - zf=gzip.GzipFile(fileobj=f, mode='wb') + f = io.StringIO() + zf = gzip.GzipFile(fileobj=f, mode='wb') zf.write(xml.getvalue()) zf.close() f.seek(0) @@ -1039,16 +1039,16 @@ class drawing: return xml.getvalue() else: try: - if filename[-4:]=='svgz': + if filename[-4:] == 'svgz': import gzip import io - xml=io.StringIO() + xml = io.StringIO() PrettyPrint(root, xml) - f=gzip.GzipFile(filename=filename, mode='wb', compresslevel=9) + f = gzip.GzipFile(filename=filename, mode='wb', compresslevel=9) f.write(xml.getvalue()) f.close() else: - f=open(filename, 'w') + f = open(filename, 'w') PrettyPrint(root, f) f.close() except: @@ -1058,44 +1058,44 @@ class drawing: import xml.parsers.xmlproc.xmlval except: raise exceptions.ImportError('PyXml is required for validating SVG') - svg=self.toXml() - xv=xml.parsers.xmlproc.xmlval.XMLValidator() + svg = self.toXml() + xv = xml.parsers.xmlproc.xmlval.XMLValidator() try: xv.feed(svg) except: raise Exception("SVG is not well formed, see messages above") else: print("SVG well formed") -if __name__=='__main__': +if __name__ == '__main__': - d=drawing() - s=svg((0, 0, 100, 100)) - r=rect(-100, -100, 300, 300, 'cyan') + d = drawing() + s = svg((0, 0, 100, 100)) + r = rect(-100, -100, 300, 300, 'cyan') s.addElement(r) - t=title('SVGdraw Demo') + t = title('SVGdraw Demo') s.addElement(t) - g=group('animations') - e=ellipse(0, 0, 5, 2) + g = group('animations') + e = ellipse(0, 0, 5, 2) g.addElement(e) - c=circle(0, 0, 1, 'red') + c = circle(0, 0, 1, 'red') g.addElement(c) - pd=pathdata(0, -10) + pd = pathdata(0, -10) for i in range(6): pd.relsmbezier(10, 5, 0, 10) pd.relsmbezier(-10, 5, 0, 10) - an=animateMotion(pd, 10) - an.attributes['rotate']='auto-reverse' - an.attributes['repeatCount']="indefinite" + an = animateMotion(pd, 10) + an.attributes['rotate'] = 'auto-reverse' + an.attributes['repeatCount'] = "indefinite" g.addElement(an) s.addElement(g) for i in range(20, 120, 20): - u=use('#animations', i, 0) + u = use('#animations', i, 0) s.addElement(u) for i in range(0, 120, 20): for j in range(5, 105, 10): - c=circle(i, j, 1, 'red', 'black', .5) + c = circle(i, j, 1, 'red', 'black', .5) s.addElement(c) d.setSVG(s) diff --git a/wqflask/utility/temp_data.py b/wqflask/utility/temp_data.py index b2cbd458..2088ba9a 100644 --- a/wqflask/utility/temp_data.py +++ b/wqflask/utility/temp_data.py @@ -11,7 +11,7 @@ class TempData: def store(self, field, value): self.redis.hset(self.key, field, value) - self.redis.expire(self.key, 60*15) # Expire in 15 minutes + self.redis.expire(self.key, 60 * 15) # Expire in 15 minutes def get_all(self): return self.redis.hgetall(self.key) diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 65df59c3..9b751344 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -9,7 +9,7 @@ from wqflask import app # Use the standard logger here to avoid a circular dependency import logging -logger = logging.getLogger(__name__ ) +logger = logging.getLogger(__name__) OVERRIDES = {} @@ -18,7 +18,7 @@ def app_set(command_id, value): app.config.setdefault(command_id, value) return value -def get_setting(command_id,guess=None): +def get_setting(command_id, guess=None): """Resolve a setting from the environment or the global settings in app.config, with valid_path is a function checking whether the path points to an expected directory and returns the full path to @@ -54,7 +54,7 @@ def get_setting(command_id,guess=None): # print("Looking for "+command_id+"\n") command = value(os.environ.get(command_id)) if command is None or command == "": - command = OVERRIDES.get(command_id) # currently not in use + command = OVERRIDES.get(command_id) # currently not in use if command is None: # ---- Check whether setting exists in app command = value(app.config.get(command_id)) @@ -62,7 +62,7 @@ def get_setting(command_id,guess=None): command = value(guess) if command is None or command == "": # print command - raise Exception(command_id+' setting unknown or faulty (update default_settings.py?).') + raise Exception(command_id + ' setting unknown or faulty (update default_settings.py?).') # print("Set "+command_id+"="+str(command)) return command @@ -99,13 +99,13 @@ def js_path(module=None): """ Find the JS module in the two paths """ - try_gn = get_setting("JS_GN_PATH")+"/"+module + try_gn = get_setting("JS_GN_PATH") + "/" + module if valid_path(try_gn): return try_gn - try_guix = get_setting("JS_GUIX_PATH")+"/"+module + try_guix = get_setting("JS_GUIX_PATH") + "/" + module if valid_path(try_guix): return try_guix - raise "No JS path found for "+module+" (if not in Guix check JS_GN_PATH)" + raise "No JS path found for " + module + " (if not in Guix check JS_GN_PATH)" def reaper_command(guess=None): return get_setting("REAPER_COMMAND", guess) @@ -121,28 +121,28 @@ def plink_command(guess=None): def flat_file_exists(subdir): base = get_setting("GENENETWORK_FILES") - return valid_path(base+"/"+subdir) + return valid_path(base + "/" + subdir) def flat_files(subdir=None): base = get_setting("GENENETWORK_FILES") if subdir: - return assert_dir(base+"/"+subdir) + return assert_dir(base + "/" + subdir) return assert_dir(base) def assert_bin(fn): if not valid_bin(fn): - raise Exception("ERROR: can not find binary "+fn) + raise Exception("ERROR: can not find binary " + fn) return fn def assert_dir(dir): if not valid_path(dir): - raise Exception("ERROR: can not find directory "+dir) + raise Exception("ERROR: can not find directory " + dir) return dir def assert_writable_dir(dir): try: fn = dir + "/test.txt" - fh = open( fn, 'w' ) + fh = open(fn, 'w') fh.write("I am writing this text to the file\n") fh.close() os.remove(fn) @@ -152,7 +152,7 @@ def assert_writable_dir(dir): def assert_file(fn): if not valid_file(fn): - raise Exception('Unable to find file '+fn) + raise Exception('Unable to find file ' + fn) return fn def mk_dir(dir): @@ -168,19 +168,19 @@ def locate(name, subdir=None): """ base = get_setting("GENENETWORK_FILES") if subdir: - base = base+"/"+subdir + base = base + "/" + subdir if valid_path(base): lookfor = base + "/" + name if valid_file(lookfor): - logger.info("Found: file "+lookfor+"\n") + logger.info("Found: file " + lookfor + "\n") return lookfor else: - raise Exception("Can not locate "+lookfor) + raise Exception("Can not locate " + lookfor) if subdir: sys.stderr.write(subdir) - raise Exception("Can not locate "+name+" in "+base) + raise Exception("Can not locate " + name + " in " + base) def locate_phewas(name, subdir=None): - return locate(name, '/phewas/'+subdir) + return locate(name, '/phewas/' + subdir) def locate_ignore_error(name, subdir=None): """ @@ -191,13 +191,13 @@ def locate_ignore_error(name, subdir=None): """ base = get_setting("GENENETWORK_FILES") if subdir: - base = base+"/"+subdir + base = base + "/" + subdir if valid_path(base): lookfor = base + "/" + name if valid_file(lookfor): - logger.debug("Found: file "+name+"\n") + logger.debug("Found: file " + name + "\n") return lookfor - logger.info("WARNING: file "+name+" not found\n") + logger.info("WARNING: file " + name + " not found\n") return None def tempdir(): @@ -206,20 +206,20 @@ def tempdir(): """ return valid_path(get_setting("TMPDIR", "/tmp")) -BLUE = '\033[94m' +BLUE = '\033[94m' GREEN = '\033[92m' -BOLD = '\033[1m' -ENDC = '\033[0m' +BOLD = '\033[1m' +ENDC = '\033[0m' def show_settings(): from utility.tools import LOG_LEVEL - print(("Set global log level to "+BLUE+LOG_LEVEL+ENDC)) + print(("Set global log level to " + BLUE + LOG_LEVEL + ENDC)) log_level = getattr(logging, LOG_LEVEL.upper()) logging.basicConfig(level=log_level) logger.info(OVERRIDES) - logger.info(BLUE+"Mr. Mojo Risin 2"+ENDC) + logger.info(BLUE + "Mr. Mojo Risin 2" + ENDC) keylist = list(app.config.keys()) print("runserver.py: ****** Webserver configuration - k,v pairs from app.config ******") keylist.sort() @@ -231,35 +231,35 @@ def show_settings(): # Cached values -GN_VERSION = get_setting('GN_VERSION') -HOME = get_setting('HOME') -SERVER_PORT = get_setting('SERVER_PORT') -WEBSERVER_MODE = get_setting('WEBSERVER_MODE') -GN2_BASE_URL = get_setting('GN2_BASE_URL') -GN2_BRANCH_URL = get_setting('GN2_BRANCH_URL') -GN_SERVER_URL = get_setting('GN_SERVER_URL') -SERVER_PORT = get_setting_int('SERVER_PORT') -SQL_URI = get_setting('SQL_URI') -LOG_LEVEL = get_setting('LOG_LEVEL') -LOG_LEVEL_DEBUG = get_setting_int('LOG_LEVEL_DEBUG') -LOG_SQL = get_setting_bool('LOG_SQL') -LOG_SQL_ALCHEMY = get_setting_bool('LOG_SQL_ALCHEMY') -LOG_BENCH = get_setting_bool('LOG_BENCH') -LOG_FORMAT = "%(message)s" # not yet in use -USE_REDIS = get_setting_bool('USE_REDIS') -USE_GN_SERVER = get_setting_bool('USE_GN_SERVER') - -GENENETWORK_FILES = get_setting('GENENETWORK_FILES') -JS_GUIX_PATH = get_setting('JS_GUIX_PATH') +GN_VERSION = get_setting('GN_VERSION') +HOME = get_setting('HOME') +SERVER_PORT = get_setting('SERVER_PORT') +WEBSERVER_MODE = get_setting('WEBSERVER_MODE') +GN2_BASE_URL = get_setting('GN2_BASE_URL') +GN2_BRANCH_URL = get_setting('GN2_BRANCH_URL') +GN_SERVER_URL = get_setting('GN_SERVER_URL') +SERVER_PORT = get_setting_int('SERVER_PORT') +SQL_URI = get_setting('SQL_URI') +LOG_LEVEL = get_setting('LOG_LEVEL') +LOG_LEVEL_DEBUG = get_setting_int('LOG_LEVEL_DEBUG') +LOG_SQL = get_setting_bool('LOG_SQL') +LOG_SQL_ALCHEMY = get_setting_bool('LOG_SQL_ALCHEMY') +LOG_BENCH = get_setting_bool('LOG_BENCH') +LOG_FORMAT = "%(message)s" # not yet in use +USE_REDIS = get_setting_bool('USE_REDIS') +USE_GN_SERVER = get_setting_bool('USE_GN_SERVER') + +GENENETWORK_FILES = get_setting('GENENETWORK_FILES') +JS_GUIX_PATH = get_setting('JS_GUIX_PATH') assert_dir(JS_GUIX_PATH) -JS_GN_PATH = get_setting('JS_GN_PATH') +JS_GN_PATH = get_setting('JS_GN_PATH') # assert_dir(JS_GN_PATH) GITHUB_CLIENT_ID = get_setting('GITHUB_CLIENT_ID') GITHUB_CLIENT_SECRET = get_setting('GITHUB_CLIENT_SECRET') if GITHUB_CLIENT_ID != 'UNKNOWN' and GITHUB_CLIENT_SECRET: GITHUB_AUTH_URL = "https://github.com/login/oauth/authorize?client_id=" + \ - GITHUB_CLIENT_ID+"&client_secret="+GITHUB_CLIENT_SECRET + GITHUB_CLIENT_ID + "&client_secret=" + GITHUB_CLIENT_SECRET GITHUB_API_URL = get_setting('GITHUB_API_URL') ORCID_CLIENT_ID = get_setting('ORCID_CLIENT_ID') @@ -267,7 +267,7 @@ ORCID_CLIENT_SECRET = get_setting('ORCID_CLIENT_SECRET') ORCID_AUTH_URL = None if ORCID_CLIENT_ID != 'UNKNOWN' and ORCID_CLIENT_SECRET: ORCID_AUTH_URL = "https://orcid.org/oauth/authorize?response_type=code&scope=/authenticate&show_login=true&client_id=" + \ - ORCID_CLIENT_ID+"&client_secret="+ORCID_CLIENT_SECRET + "&redirect_uri=" + GN2_BRANCH_URL + "n/login/orcid_oauth2" + ORCID_CLIENT_ID + "&client_secret=" + ORCID_CLIENT_SECRET + "&redirect_uri=" + GN2_BRANCH_URL + "n/login/orcid_oauth2" ORCID_TOKEN_URL = get_setting('ORCID_TOKEN_URL') ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST') @@ -279,28 +279,28 @@ SMTP_CONNECT = get_setting('SMTP_CONNECT') SMTP_USERNAME = get_setting('SMTP_USERNAME') SMTP_PASSWORD = get_setting('SMTP_PASSWORD') -REAPER_COMMAND = app_set("REAPER_COMMAND", reaper_command()) -GEMMA_COMMAND = app_set("GEMMA_COMMAND", gemma_command()) +REAPER_COMMAND = app_set("REAPER_COMMAND", reaper_command()) +GEMMA_COMMAND = app_set("GEMMA_COMMAND", gemma_command()) assert(GEMMA_COMMAND is not None) -PLINK_COMMAND = app_set("PLINK_COMMAND", plink_command()) +PLINK_COMMAND = app_set("PLINK_COMMAND", plink_command()) GEMMA_WRAPPER_COMMAND = gemma_wrapper_command() -TEMPDIR = tempdir() # defaults to UNIX TMPDIR +TEMPDIR = tempdir() # defaults to UNIX TMPDIR assert_dir(TEMPDIR) # ---- Handle specific JS modules JS_GUIX_PATH = get_setting("JS_GUIX_PATH") assert_dir(JS_GUIX_PATH) -assert_dir(JS_GUIX_PATH+'/cytoscape-panzoom') +assert_dir(JS_GUIX_PATH + '/cytoscape-panzoom') CSS_PATH = JS_GUIX_PATH # The CSS is bundled together with the JS # assert_dir(JS_PATH) JS_TWITTER_POST_FETCHER_PATH = get_setting("JS_TWITTER_POST_FETCHER_PATH", js_path("javascript-twitter-post-fetcher")) assert_dir(JS_TWITTER_POST_FETCHER_PATH) -assert_file(JS_TWITTER_POST_FETCHER_PATH+"/js/twitterFetcher_min.js") +assert_file(JS_TWITTER_POST_FETCHER_PATH + "/js/twitterFetcher_min.js") JS_CYTOSCAPE_PATH = get_setting("JS_CYTOSCAPE_PATH", js_path("cytoscape")) assert_dir(JS_CYTOSCAPE_PATH) -assert_file(JS_CYTOSCAPE_PATH+'/cytoscape.min.js') +assert_file(JS_CYTOSCAPE_PATH + '/cytoscape.min.js') # assert_file(PHEWAS_FILES+"/auwerx/PheWAS_pval_EMMA_norm.RData") diff --git a/wqflask/utility/type_checking.py b/wqflask/utility/type_checking.py index 6b029317..662bf794 100644 --- a/wqflask/utility/type_checking.py +++ b/wqflask/utility/type_checking.py @@ -23,19 +23,19 @@ def is_str(value): except: return False -def get_float(vars_obj,name,default=None): +def get_float(vars_obj, name, default=None): if name in vars_obj: if is_float(vars_obj[name]): return float(vars_obj[name]) return default -def get_int(vars_obj,name,default=None): +def get_int(vars_obj, name, default=None): if name in vars_obj: if is_int(vars_obj[name]): return float(vars_obj[name]) return default -def get_string(vars_obj,name,default=None): +def get_string(vars_obj, name, default=None): if name in vars_obj: if not vars_obj[name] is None: return str(vars_obj[name]) diff --git a/wqflask/utility/webqtlUtil.py b/wqflask/utility/webqtlUtil.py index 5681fadf..ed59b0eb 100644 --- a/wqflask/utility/webqtlUtil.py +++ b/wqflask/utility/webqtlUtil.py @@ -33,38 +33,38 @@ from math import * from base import webqtlConfig # NL, 07/27/2010. moved from webqtlForm.py -#Dict of Parents and F1 information, In the order of [F1, Mat, Pat] -ParInfo ={ -'BXH':['BHF1', 'HBF1', 'C57BL/6J', 'C3H/HeJ'], -'AKXD':['AKF1', 'KAF1', 'AKR/J', 'DBA/2J'], -'BXD':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], -'C57BL-6JxC57BL-6NJF2':['', '', 'C57BL/6J', 'C57BL/6NJ'], -'BXD300':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], -'B6BTBRF2':['B6BTBRF1', 'BTBRB6F1', 'C57BL/6J', 'BTBRT<+>tf/J'], -'BHHBF2':['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], -'BHF2':['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], -'B6D2F2':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], -'BDF2-1999':['B6D2F2', 'D2B6F2', 'C57BL/6J', 'DBA/2J'], -'BDF2-2005':['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], -'CTB6F2':['CTB6F2', 'B6CTF2', 'C57BL/6J', 'Castaneous'], -'CXB':['CBF1', 'BCF1', 'C57BL/6ByJ', 'BALB/cByJ'], -'AXBXA':['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], -'AXB':['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], -'BXA':['BAF1', 'ABF1', 'C57BL/6J', 'A/J'], -'LXS':['LSF1', 'SLF1', 'ISS', 'ILS'], -'HXBBXH':['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'], -'BayXSha':['BayXShaF1', 'ShaXBayF1', 'Bay-0', 'Shahdara'], -'ColXBur':['ColXBurF1', 'BurXColF1', 'Col-0', 'Bur-0'], -'ColXCvi':['ColXCviF1', 'CviXColF1', 'Col-0', 'Cvi'], -'SXM':['SMF1', 'MSF1', 'Steptoe', 'Morex'], -'HRDP':['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'] +# Dict of Parents and F1 information, In the order of [F1, Mat, Pat] +ParInfo = { +'BXH': ['BHF1', 'HBF1', 'C57BL/6J', 'C3H/HeJ'], +'AKXD': ['AKF1', 'KAF1', 'AKR/J', 'DBA/2J'], +'BXD': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], +'C57BL-6JxC57BL-6NJF2': ['', '', 'C57BL/6J', 'C57BL/6NJ'], +'BXD300': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], +'B6BTBRF2': ['B6BTBRF1', 'BTBRB6F1', 'C57BL/6J', 'BTBRT<+>tf/J'], +'BHHBF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], +'BHF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], +'B6D2F2': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], +'BDF2-1999': ['B6D2F2', 'D2B6F2', 'C57BL/6J', 'DBA/2J'], +'BDF2-2005': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], +'CTB6F2': ['CTB6F2', 'B6CTF2', 'C57BL/6J', 'Castaneous'], +'CXB': ['CBF1', 'BCF1', 'C57BL/6ByJ', 'BALB/cByJ'], +'AXBXA': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], +'AXB': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], +'BXA': ['BAF1', 'ABF1', 'C57BL/6J', 'A/J'], +'LXS': ['LSF1', 'SLF1', 'ISS', 'ILS'], +'HXBBXH': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'], +'BayXSha': ['BayXShaF1', 'ShaXBayF1', 'Bay-0', 'Shahdara'], +'ColXBur': ['ColXBurF1', 'BurXColF1', 'Col-0', 'Bur-0'], +'ColXCvi': ['ColXCviF1', 'CviXColF1', 'Col-0', 'Cvi'], +'SXM': ['SMF1', 'MSF1', 'Steptoe', 'Morex'], +'HRDP': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'] } ######################################### # Accessory Functions ######################################### -def genRandStr(prefix = "", length=8, chars=string.ascii_letters+string.digits): +def genRandStr(prefix="", length=8, chars=string.ascii_letters + string.digits): from random import choice _str = prefix[:] for i in range(length): @@ -83,12 +83,12 @@ def ListNotNull(lst): return 1 return None -def readLineCSV(line): ### dcrowell July 2008 +def readLineCSV(line): # dcrowell July 2008 """Parses a CSV string of text and returns a list containing each element as a string. Used by correlationPage""" returnList = line.split('","') - returnList[-1]=returnList[-1][:-2] - returnList[0]=returnList[0][1:] + returnList[-1] = returnList[-1][:-2] + returnList[0] = returnList[0][1:] return returnList def cmpEigenValue(A, B): @@ -107,7 +107,7 @@ def hasAccessToConfidentialPhenotypeTrait(privilege, userName, authorized_users) if webqtlConfig.USERDICT[privilege] > webqtlConfig.USERDICT['user']: access_to_confidential_phenotype_trait = 1 else: - AuthorizedUsersList=[x.strip() for x in authorized_users.split(',')] + AuthorizedUsersList = [x.strip() for x in authorized_users.split(',')] if userName in AuthorizedUsersList: access_to_confidential_phenotype_trait = 1 return access_to_confidential_phenotype_trait diff --git a/wqflask/wqflask/__init__.py b/wqflask/wqflask/__init__.py index 0564cfa7..bab87115 100644 --- a/wqflask/wqflask/__init__.py +++ b/wqflask/wqflask/__init__.py @@ -7,11 +7,11 @@ from flask import g from flask import Flask from utility import formatting from wqflask.markdown_routes import glossary_blueprint -from wqflask.markdown_routes import references_blueprint -from wqflask.markdown_routes import links_blueprint +from wqflask.markdown_routes import references_blueprint +from wqflask.markdown_routes import links_blueprint from wqflask.markdown_routes import policies_blueprint -from wqflask.markdown_routes import environments_blueprint -from wqflask.markdown_routes import facilities_blueprint +from wqflask.markdown_routes import environments_blueprint +from wqflask.markdown_routes import facilities_blueprint app = Flask(__name__) diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py index f5b50dcd..e0b7fea0 100644 --- a/wqflask/wqflask/api/correlation.py +++ b/wqflask/wqflask/api/correlation.py @@ -16,16 +16,16 @@ from utility import webqtlUtil, helper_functions, corr_result_helpers from utility.benchmark import Bench import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) def do_correlation(start_vars): assert('db' in start_vars) assert('target_db' in start_vars) assert('trait_id' in start_vars) - this_dataset = data_set.create_dataset(dataset_name = start_vars['db']) - target_dataset = data_set.create_dataset(dataset_name = start_vars['target_db']) - this_trait = create_trait(dataset = this_dataset, name = start_vars['trait_id']) + this_dataset = data_set.create_dataset(dataset_name=start_vars['db']) + target_dataset = data_set.create_dataset(dataset_name=start_vars['target_db']) + this_trait = create_trait(dataset=this_dataset, name=start_vars['trait_id']) this_trait = retrieve_sample_data(this_trait, this_dataset) corr_params = init_corr_params(start_vars) @@ -38,26 +38,26 @@ def do_correlation(start_vars): if corr_params['type'] == "tissue": [sample_r, num_overlap, sample_p, symbol] = corr_results[trait] result_dict = { - "trait" : trait, - "sample_r" : sample_r, - "#_strains" : num_overlap, - "p_value" : sample_p, - "symbol" : symbol + "trait": trait, + "sample_r": sample_r, + "#_strains": num_overlap, + "p_value": sample_p, + "symbol": symbol } elif corr_params['type'] == "literature" or corr_params['type'] == "lit": [gene_id, sample_r] = corr_results[trait] result_dict = { - "trait" : trait, - "sample_r" : sample_r, - "gene_id" : gene_id + "trait": trait, + "sample_r": sample_r, + "gene_id": gene_id } else: [sample_r, sample_p, num_overlap] = corr_results[trait] result_dict = { - "trait" : trait, - "sample_r" : sample_r, - "#_strains" : num_overlap, - "p_value" : sample_p + "trait": trait, + "sample_r": sample_r, + "#_strains": num_overlap, + "p_value": sample_p } final_results.append(result_dict) @@ -76,7 +76,7 @@ def calculate_results(this_trait, this_dataset, target_dataset, corr_params): corr_results = do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params) sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), key=lambda t: -abs(t[1][1]))) - elif corr_params['type'] == "literature" or corr_params['type'] == "lit": #ZS: Just so a user can use either "lit" or "literature" + elif corr_params['type'] == "literature" or corr_params['type'] == "lit": # ZS: Just so a user can use either "lit" or "literature" trait_geneid_dict = this_dataset.retrieve_genes("GeneId") corr_results = do_literature_correlation_for_all_traits(this_trait, this_dataset, trait_geneid_dict, corr_params) sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), @@ -92,8 +92,8 @@ def calculate_results(this_trait, this_dataset, target_dataset, corr_params): return sorted_results def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params, tissue_dataset_id=1): - #Gets tissue expression values for the primary trait - primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list = [this_trait.symbol]) + # Gets tissue expression values for the primary trait + primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=[this_trait.symbol]) if this_trait.symbol.lower() in primary_trait_tissue_vals_dict: primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower()] @@ -227,9 +227,9 @@ def init_corr_params(start_vars): return_count = int(start_vars['return_count']) corr_params = { - 'method' : method, - 'type' : type, - 'return_count' : return_count + 'method': method, + 'type': type, + 'return_count': return_count } return corr_params diff --git a/wqflask/wqflask/api/gen_menu.py b/wqflask/wqflask/api/gen_menu.py index eaddecd7..a64524dd 100644 --- a/wqflask/wqflask/api/gen_menu.py +++ b/wqflask/wqflask/api/gen_menu.py @@ -87,14 +87,14 @@ def phenotypes_exist(group_name): results = g.db.execute( ("SELECT Name FROM PublishFreeze " "WHERE PublishFreeze.Name = " - "'{}'").format(group_name+"Publish")).fetchone() + "'{}'").format(group_name + "Publish")).fetchone() return bool(results) def genotypes_exist(group_name): results = g.db.execute( ("SELECT Name FROM GenoFreeze " + - "WHERE GenoFreeze.Name = '{}'").format(group_name+"Geno")).fetchone() + "WHERE GenoFreeze.Name = '{}'").format(group_name + "Geno")).fetchone() return bool(results) diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py index d59a69df..fbfbc879 100644 --- a/wqflask/wqflask/api/mapping.py +++ b/wqflask/wqflask/api/mapping.py @@ -8,15 +8,15 @@ from utility import helper_functions from wqflask.marker_regression import gemma_mapping, rqtl_mapping, qtlreaper_mapping, plink_mapping import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) def do_mapping_for_api(start_vars): assert('db' in start_vars) assert('trait_id' in start_vars) - dataset = data_set.create_dataset(dataset_name = start_vars['db']) + dataset = data_set.create_dataset(dataset_name=start_vars['db']) dataset.group.get_markers() - this_trait = create_trait(dataset = dataset, name = start_vars['trait_id']) + this_trait = create_trait(dataset=dataset, name=start_vars['trait_id']) this_trait = retrieve_sample_data(this_trait, dataset) samples = [] @@ -36,11 +36,11 @@ def do_mapping_for_api(start_vars): mapping_params = initialize_parameters(start_vars, dataset, this_trait) - covariates = "" #ZS: It seems to take an empty string as default. This should probably be changed. + covariates = "" # ZS: It seems to take an empty string as default. This should probably be changed. if mapping_params['mapping_method'] == "gemma": header_row = ["name", "chr", "Mb", "lod_score", "p_value"] - if mapping_params['use_loco'] == "True": #ZS: gemma_mapping returns both results and the filename for LOCO, so need to only grab the former for api + if mapping_params['use_loco'] == "True": # ZS: gemma_mapping returns both results and the filename for LOCO, so need to only grab the former for api result_markers = gemma_mapping.run_gemma(this_trait, dataset, samples, vals, covariates, mapping_params['use_loco'], mapping_params['maf'])[0] else: result_markers = gemma_mapping.run_gemma(this_trait, dataset, samples, vals, covariates, mapping_params['use_loco'], mapping_params['maf']) @@ -118,7 +118,7 @@ def initialize_parameters(start_vars, dataset, this_trait): mapping_params['maf'] = 0.01 if 'maf' in start_vars: - mapping_params['maf'] = start_vars['maf'] # Minor allele frequency + mapping_params['maf'] = start_vars['maf'] # Minor allele frequency mapping_params['use_loco'] = True if 'use_loco' in start_vars: diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py index 60e163f2..b0559a07 100644 --- a/wqflask/wqflask/api/router.py +++ b/wqflask/wqflask/api/router.py @@ -23,13 +23,13 @@ from wqflask.api import correlation, mapping, gen_menu from utility.tools import flat_files import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) version = "pre1" @app.route("/api/v_{}/".format(version)) def hello_world(): - return flask.jsonify({"hello":"world"}) + return flask.jsonify({"hello": "world"}) @app.route("/api/v_{}/species".format(version)) def get_species_list(): @@ -38,10 +38,10 @@ def get_species_list(): species_list = [] for species in the_species: species_dict = { - "Id" : species[0], - "Name" : species[1], - "FullName" : species[2], - "TaxonomyId" : species[3] + "Id": species[0], + "Name": species[1], + "FullName": species[2], + "TaxonomyId": species[3] } species_list.append(species_dict) @@ -49,17 +49,17 @@ def get_species_list(): @app.route("/api/v_{}/species/".format(version)) @app.route("/api/v_{}/species/.".format(version)) -def get_species_info(species_name, file_format = "json"): +def get_species_info(species_name, file_format="json"): results = g.db.execute("""SELECT SpeciesId, Name, FullName, TaxonomyId FROM Species WHERE (Name="{0}" OR FullName="{0}" OR SpeciesName="{0}");""".format(species_name)) the_species = results.fetchone() species_dict = { - "Id" : the_species[0], - "Name" : the_species[1], - "FullName" : the_species[2], - "TaxonomyId" : the_species[3] + "Id": the_species[0], + "Name": the_species[1], + "FullName": the_species[2], + "TaxonomyId": the_species[3] } return flask.jsonify(species_dict) @@ -87,14 +87,14 @@ def get_groups_list(species_name=None): groups_list = [] for group in the_groups: group_dict = { - "Id" : group[0], - "SpeciesId" : group[1], - "DisplayName" : group[2], - "Name" : group[3], - "FullName" : group[4], - "public" : group[5], - "MappingMethodId" : group[6], - "GeneticType" : group[7] + "Id": group[0], + "SpeciesId": group[1], + "DisplayName": group[2], + "Name": group[3], + "FullName": group[4], + "public": group[5], + "MappingMethodId": group[6], + "GeneticType": group[7] } groups_list.append(group_dict) @@ -106,7 +106,7 @@ def get_groups_list(species_name=None): @app.route("/api/v_{}/group/.".format(version)) @app.route("/api/v_{}/group//".format(version)) @app.route("/api/v_{}/group//.".format(version)) -def get_group_info(group_name, species_name = None, file_format = "json"): +def get_group_info(group_name, species_name=None, file_format="json"): if species_name: results = g.db.execute("""SELECT InbredSet.InbredSetId, InbredSet.SpeciesId, InbredSet.InbredSetName, InbredSet.Name, InbredSet.FullName, InbredSet.public, @@ -131,14 +131,14 @@ def get_group_info(group_name, species_name = None, file_format = "json"): group = results.fetchone() if group: group_dict = { - "Id" : group[0], - "SpeciesId" : group[1], - "DisplayName" : group[2], - "Name" : group[3], - "FullName" : group[4], - "public" : group[5], - "MappingMethodId" : group[6], - "GeneticType" : group[7] + "Id": group[0], + "SpeciesId": group[1], + "DisplayName": group[2], + "Name": group[3], + "FullName": group[4], + "public": group[5], + "MappingMethodId": group[6], + "GeneticType": group[7] } return flask.jsonify(group_dict) @@ -179,17 +179,17 @@ def get_datasets_for_group(group_name, species_name=None): datasets_list = [] for dataset in the_datasets: dataset_dict = { - "Id" : dataset[0], - "ProbeFreezeId" : dataset[1], - "AvgID" : dataset[2], - "Short_Abbreviation" : dataset[3], - "Long_Abbreviation" : dataset[4], - "FullName" : dataset[5], - "ShortName" : dataset[6], - "CreateTime" : dataset[7], - "public" : dataset[8], - "confidentiality" : dataset[9], - "DataScale" : dataset[10] + "Id": dataset[0], + "ProbeFreezeId": dataset[1], + "AvgID": dataset[2], + "Short_Abbreviation": dataset[3], + "Long_Abbreviation": dataset[4], + "FullName": dataset[5], + "ShortName": dataset[6], + "CreateTime": dataset[7], + "public": dataset[8], + "confidentiality": dataset[9], + "DataScale": dataset[10] } datasets_list.append(dataset_dict) @@ -201,10 +201,10 @@ def get_datasets_for_group(group_name, species_name=None): @app.route("/api/v_{}/dataset/.".format(version)) @app.route("/api/v_{}/dataset//".format(version)) @app.route("/api/v_{}/dataset//.".format(version)) -def get_dataset_info(dataset_name, group_name = None, file_format="json"): - #ZS: First get ProbeSet (mRNA expression) datasets and then get Phenotype datasets +def get_dataset_info(dataset_name, group_name=None, file_format="json"): + # ZS: First get ProbeSet (mRNA expression) datasets and then get Phenotype datasets - datasets_list = [] #ZS: I figure I might as well return a list if there are multiple matches, though I don"t know if this will actually happen in practice + datasets_list = [] # ZS: I figure I might as well return a list if there are multiple matches, though I don"t know if this will actually happen in practice probeset_query = """ SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName, @@ -235,16 +235,16 @@ def get_dataset_info(dataset_name, group_name = None, file_format="json"): if dataset: dataset_dict = { - "dataset_type" : "mRNA expression", - "id" : dataset[0], - "name" : dataset[1], - "full_name" : dataset[2], - "short_name" : dataset[3], - "data_scale" : dataset[4], - "tissue_id" : dataset[5], - "tissue" : dataset[6], - "public" : dataset[7], - "confidential" : dataset[8] + "dataset_type": "mRNA expression", + "id": dataset[0], + "name": dataset[1], + "full_name": dataset[2], + "short_name": dataset[3], + "data_scale": dataset[4], + "tissue_id": dataset[5], + "tissue": dataset[6], + "public": dataset[7], + "confidential": dataset[8] } datasets_list.append(dataset_dict) @@ -272,25 +272,25 @@ def get_dataset_info(dataset_name, group_name = None, file_format="json"): if dataset: if dataset[5]: dataset_dict = { - "dataset_type" : "phenotype", - "id" : dataset[0], - "name" : dataset[1], - "description" : dataset[2], - "pubmed_id" : dataset[5], - "title" : dataset[6], - "year" : dataset[7] + "dataset_type": "phenotype", + "id": dataset[0], + "name": dataset[1], + "description": dataset[2], + "pubmed_id": dataset[5], + "title": dataset[6], + "year": dataset[7] } elif dataset[4]: dataset_dict = { - "dataset_type" : "phenotype", - "id" : dataset[0], - "name" : dataset[3], - "description" : dataset[4] + "dataset_type": "phenotype", + "id": dataset[0], + "name": dataset[3], + "description": dataset[4] } else: dataset_dict = { - "dataset_type" : "phenotype", - "id" : dataset[0] + "dataset_type": "phenotype", + "id": dataset[0] } datasets_list.append(dataset_dict) @@ -304,7 +304,7 @@ def get_dataset_info(dataset_name, group_name = None, file_format="json"): @app.route("/api/v_{}/traits/".format(version), methods=("GET",)) @app.route("/api/v_{}/traits/.".format(version), methods=("GET",)) -def fetch_traits(dataset_name, file_format = "json"): +def fetch_traits(dataset_name, file_format="json"): trait_ids, trait_names, data_type, dataset_id = get_dataset_trait_ids(dataset_name, request.args) if ("ids_only" in request.args) and (len(trait_ids) > 0): if file_format == "json": @@ -432,7 +432,7 @@ def fetch_traits(dataset_name, file_format = "json"): @app.route("/api/v_{}/sample_data/".format(version)) @app.route("/api/v_{}/sample_data/.".format(version)) -def all_sample_data(dataset_name, file_format = "csv"): +def all_sample_data(dataset_name, file_format="csv"): trait_ids, trait_names, data_type, dataset_id = get_dataset_trait_ids(dataset_name, request.args) if len(trait_ids) > 0: @@ -538,7 +538,7 @@ def all_sample_data(dataset_name, file_format = "csv"): @app.route("/api/v_{}/sample_data//".format(version)) @app.route("/api/v_{}/sample_data//.".format(version)) -def trait_sample_data(dataset_name, trait_name, file_format = "json"): +def trait_sample_data(dataset_name, trait_name, file_format="json"): probeset_query = """ SELECT Strain.Name, Strain.Name2, ProbeSetData.value, ProbeSetData.Id, ProbeSetSE.error @@ -610,10 +610,10 @@ def trait_sample_data(dataset_name, trait_name, file_format = "json"): sample_list = [] for sample in sample_data: sample_dict = { - "sample_name" : sample[0], - "sample_name_2" : sample[1], - "value" : sample[2], - "data_id" : sample[3] + "sample_name": sample[0], + "sample_name_2": sample[1], + "value": sample[2], + "data_id": sample[3] } if sample[4]: sample_dict["se"] = sample[4] @@ -629,7 +629,7 @@ def trait_sample_data(dataset_name, trait_name, file_format = "json"): @app.route("/api/v_{}/trait//.".format(version)) @app.route("/api/v_{}/trait_info//".format(version)) @app.route("/api/v_{}/trait_info//.".format(version)) -def get_trait_info(dataset_name, trait_name, file_format = "json"): +def get_trait_info(dataset_name, trait_name, file_format="json"): probeset_query = """ SELECT ProbeSet.Id, ProbeSet.Name, ProbeSet.Symbol, ProbeSet.description, ProbeSet.Chr, ProbeSet.Mb, ProbeSet.alias, @@ -648,24 +648,24 @@ def get_trait_info(dataset_name, trait_name, file_format = "json"): trait_info = probeset_results.fetchone() if trait_info: trait_dict = { - "id" : trait_info[0], - "name" : trait_info[1], - "symbol" : trait_info[2], - "description" : trait_info[3], - "chr" : trait_info[4], - "mb" : trait_info[5], - "alias" :trait_info[6], - "mean" : trait_info[7], - "se" : trait_info[8], - "locus" : trait_info[9], - "lrs" : trait_info[10], - "p_value" : trait_info[11], - "additive" : trait_info[12] + "id": trait_info[0], + "name": trait_info[1], + "symbol": trait_info[2], + "description": trait_info[3], + "chr": trait_info[4], + "mb": trait_info[5], + "alias": trait_info[6], + "mean": trait_info[7], + "se": trait_info[8], + "locus": trait_info[9], + "lrs": trait_info[10], + "p_value": trait_info[11], + "additive": trait_info[12] } return flask.jsonify(trait_dict) else: - if "Publish" in dataset_name: #ZS: Check if the user input the dataset_name as BXDPublish, etc (which is always going to be the group name + "Publish" + if "Publish" in dataset_name: # ZS: Check if the user input the dataset_name as BXDPublish, etc (which is always going to be the group name + "Publish" dataset_name = dataset_name.replace("Publish", "") group_id = get_group_id(dataset_name) @@ -684,10 +684,10 @@ def get_trait_info(dataset_name, trait_name, file_format = "json"): trait_info = pheno_results.fetchone() if trait_info: trait_dict = { - "id" : trait_info[0], - "locus" : trait_info[1], - "lrs" : trait_info[2], - "additive" : trait_info[3] + "id": trait_info[0], + "locus": trait_info[1], + "lrs": trait_info[2], + "additive": trait_info[3] } return flask.jsonify(trait_dict) @@ -699,7 +699,7 @@ def get_corr_results(): results = correlation.do_correlation(request.args) if len(results) > 0: - return flask.jsonify(results) #ZS: I think flask.jsonify expects a dict/list instead of JSON + return flask.jsonify(results) # ZS: I think flask.jsonify expects a dict/list instead of JSON else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") @@ -754,7 +754,7 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None): output_lines.append(line.split()) i += 1 - csv_writer = csv.writer(si, delimiter = "\t", escapechar = "\\", quoting = csv.QUOTE_NONE) + csv_writer = csv.writer(si, delimiter="\t", escapechar="\\", quoting = csv.QUOTE_NONE) else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") elif file_format == "rqtl2": @@ -802,7 +802,7 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None): output_lines.append([line.strip() for line in line.split(",")]) i += 1 - csv_writer = csv.writer(si, delimiter = ",") + csv_writer = csv.writer(si, delimiter=",") else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") @@ -826,8 +826,8 @@ def return_error(code, source, title, details): json_ob = {"errors": [ { "status": code, - "source": { "pointer": source }, - "title" : title, + "source": {"pointer": source}, + "title": title, "detail": details } ]} @@ -842,8 +842,8 @@ def get_dataset_trait_ids(dataset_name, start_vars): limit_string = "" if "Geno" in dataset_name: - data_type = "Geno" #ZS: Need to pass back the dataset type - query = """ + data_type = "Geno" # ZS: Need to pass back the dataset type + query = """ SELECT GenoXRef.GenoId, Geno.Name, GenoXRef.GenoFreezeId FROM diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index 0291f2b8..6a1b88ca 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -68,18 +68,18 @@ def collections_add(): uc_id = g.user_session.add_collection(collection_name, set()) collections = g.user_session.user_collections - #ZS: One of these might be unnecessary + # ZS: One of these might be unnecessary if 'traits' in request.args: - traits=request.args['traits'] + traits = request.args['traits'] return render_template("collections/add.html", - traits = traits, - collections = collections, + traits=traits, + collections=collections, ) else: hash = request.args['hash'] return render_template("collections/add.html", - hash = hash, - collections = collections, + hash=hash, + collections=collections, ) @app.route("/collections/new") @@ -139,8 +139,8 @@ def list_collections(): user_collections = list(g.user_session.user_collections) return render_template("collections/list.html", - params = params, - collections = user_collections, + params=params, + collections=user_collections, ) @app.route("/collections/remove", methods=('POST',)) @@ -196,7 +196,7 @@ def view_collection(): name, dataset_name = atrait.split(':') if dataset_name == "Temp": group = name.split("_")[2] - dataset = create_dataset(dataset_name, dataset_type = "Temp", group_name = group) + dataset = create_dataset(dataset_name, dataset_type="Temp", group_name=group) trait_ob = create_trait(name=name, dataset=dataset) else: dataset = create_dataset(dataset_name) @@ -207,7 +207,7 @@ def view_collection(): json_version.append(jsonable(trait_ob)) collection_info = dict(trait_obs=trait_obs, - uc = uc) + uc=uc) if "json" in params: return json.dumps(json_version) diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py index 5855ccf0..c135faa3 100644 --- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py +++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py @@ -1,4 +1,4 @@ -## Copyright (C) University of Tennessee Health Science Center, Memphis, TN. +# Copyright (C) University of Tennessee Health Science Center, Memphis, TN. # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License @@ -23,7 +23,7 @@ from pprint import pformat as pf from base.trait import create_trait from base import data_set from utility import webqtlUtil, helper_functions, corr_result_helpers -import utility.webqtlUtil #this is for parallel computing only. +import utility.webqtlUtil # this is for parallel computing only. from wqflask.correlation import correlation_functions from MySQLdb import escape_string as escape @@ -41,7 +41,7 @@ class ComparisonBarChart: self.all_sample_list = [] self.traits = [] self.insufficient_shared_samples = False - this_group = self.trait_list[0][1].group.name #ZS: Getting initial group name before verifying all traits are in the same group in the following loop + this_group = self.trait_list[0][1].group.name # ZS: Getting initial group name before verifying all traits are in the same group in the following loop for trait_db in self.trait_list: if trait_db[1].group.name != this_group: @@ -74,9 +74,9 @@ class ComparisonBarChart: this_trait_vals.append('') self.sample_data.append(this_trait_vals) - self.js_data = dict(traits = [trait.name for trait in self.traits], - samples = self.all_sample_list, - sample_data = self.sample_data,) + self.js_data = dict(traits=[trait.name for trait in self.traits], + samples=self.all_sample_list, + sample_data=self.sample_data,) def get_trait_db_obs(self, trait_db_list): diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index d5dc26f5..22941ad5 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -9,18 +9,18 @@ from scipy import stats import numpy as np import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) class CorrScatterPlot: """Page that displays a correlation scatterplot with a line fitted to it""" def __init__(self, params): if "Temp" in params['dataset_1']: - self.dataset_1 = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = params['dataset_1'].split("_")[1]) + self.dataset_1 = data_set.create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = params['dataset_1'].split("_")[1]) else: self.dataset_1 = data_set.create_dataset(params['dataset_1']) if "Temp" in params['dataset_2']: - self.dataset_2 = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = params['dataset_2'].split("_")[1]) + self.dataset_2 = data_set.create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = params['dataset_2'].split("_")[1]) else: self.dataset_2 = data_set.create_dataset(params['dataset_2']) @@ -60,8 +60,8 @@ class CorrScatterPlot: else: slope_string = '%.3f' % slope - x_buffer = (max(vals_1) - min(vals_1))*0.1 - y_buffer = (max(vals_2) - min(vals_2))*0.1 + x_buffer = (max(vals_1) - min(vals_1)) * 0.1 + y_buffer = (max(vals_2) - min(vals_2)) * 0.1 x_range = [min(vals_1) - x_buffer, max(vals_1) + x_buffer] y_range = [min(vals_2) - y_buffer, max(vals_2) + y_buffer] @@ -80,8 +80,8 @@ class CorrScatterPlot: else: srslope_string = '%.3f' % srslope - x_buffer = (max(rx) - min(rx))*0.1 - y_buffer = (max(ry) - min(ry))*0.1 + x_buffer = (max(rx) - min(rx)) * 0.1 + y_buffer = (max(ry) - min(ry)) * 0.1 sr_range = [min(rx) - x_buffer, max(rx) + x_buffer] @@ -92,33 +92,33 @@ class CorrScatterPlot: self.collections_exist = "True" self.js_data = dict( - data = self.data, - rdata = self.rdata, - indIDs = self.indIDs, - trait_1 = self.trait_1.dataset.name + ": " + str(self.trait_1.name), - trait_2 = self.trait_2.dataset.name + ": " + str(self.trait_2.name), - samples_1 = samples_1, - samples_2 = samples_2, - num_overlap = num_overlap, - vals_1 = vals_1, - vals_2 = vals_2, - x_range = x_range, - y_range = y_range, - sr_range = sr_range, - intercept_coords = intercept_coords, - sr_intercept_coords = sr_intercept_coords, - - slope = slope, - slope_string = slope_string, - intercept = intercept, - r_value = r_value, - p_value = p_value, - - srslope = srslope, - srslope_string = srslope_string, - srintercept = srintercept, - srr_value = srr_value, - srp_value = srp_value + data=self.data, + rdata=self.rdata, + indIDs=self.indIDs, + trait_1=self.trait_1.dataset.name + ": " + str(self.trait_1.name), + trait_2=self.trait_2.dataset.name + ": " + str(self.trait_2.name), + samples_1=samples_1, + samples_2=samples_2, + num_overlap=num_overlap, + vals_1=vals_1, + vals_2=vals_2, + x_range=x_range, + y_range=y_range, + sr_range=sr_range, + intercept_coords=intercept_coords, + sr_intercept_coords=sr_intercept_coords, + + slope=slope, + slope_string=slope_string, + intercept=intercept, + r_value=r_value, + p_value=p_value, + + srslope=srslope, + srslope_string=srslope_string, + srintercept=srintercept, + srr_value=srr_value, + srp_value=srp_value #trait3 = self.trait_3.data, #vals_3 = vals_3 @@ -129,10 +129,10 @@ class CorrScatterPlot: def get_intercept_coords(slope, intercept, x_range, y_range): intercept_coords = [] - y1 = slope*x_range[0] + intercept - y2 = slope*x_range[1] + intercept - x1 = (y1-intercept)/slope - x2 = (y2-intercept)/slope + y1 = slope * x_range[0] + intercept + y2 = slope * x_range[1] + intercept + x1 = (y1 - intercept) / slope + x2 = (y2 - intercept) / slope intercept_coords.append([x1, y1]) intercept_coords.append([x2, y2]) diff --git a/wqflask/wqflask/correlation/correlation_functions.py b/wqflask/wqflask/correlation/correlation_functions.py index fd7691d4..0f24241a 100644 --- a/wqflask/wqflask/correlation/correlation_functions.py +++ b/wqflask/wqflask/correlation/correlation_functions.py @@ -34,19 +34,19 @@ from flask import Flask, g ##################################################################################### -#Input: primaryValue(list): one list of expression values of one probeSet, +# Input: primaryValue(list): one list of expression values of one probeSet, # targetValue(list): one list of expression values of one probeSet, # method(string): indicate correlation method ('pearson' or 'spearman') -#Output: corr_result(list): first item is Correlation Value, second item is tissue number, +# Output: corr_result(list): first item is Correlation Value, second item is tissue number, # third item is PValue -#Function: get correlation value,Tissue quantity ,p value result by using R; -#Note : This function is special case since both primaryValue and targetValue are from -#the same dataset. So the length of these two parameters is the same. They are pairs. -#Also, in the datatable TissueProbeSetData, all Tissue values are loaded based on -#the same tissue order +# Function: get correlation value,Tissue quantity ,p value result by using R; +# Note : This function is special case since both primaryValue and targetValue are from +# the same dataset. So the length of these two parameters is the same. They are pairs. +# Also, in the datatable TissueProbeSetData, all Tissue values are loaded based on +# the same tissue order ##################################################################################### -def cal_zero_order_corr_for_tiss (primaryValue=[], targetValue=[], method='pearson'): +def cal_zero_order_corr_for_tiss(primaryValue=[], targetValue=[], method='pearson'): R_primary = rpy2.robjects.FloatVector(list(range(len(primaryValue)))) N = len(primaryValue) @@ -55,27 +55,27 @@ def cal_zero_order_corr_for_tiss (primaryValue=[], targetValue=[], method='pears R_target = rpy2.robjects.FloatVector(list(range(len(targetValue)))) for i in range(len(targetValue)): - R_target[i]=targetValue[i] + R_target[i] = targetValue[i] R_corr_test = rpy2.robjects.r['cor.test'] - if method =='spearman': + if method == 'spearman': R_result = R_corr_test(R_primary, R_target, method='spearman') else: R_result = R_corr_test(R_primary, R_target) - corr_result =[] - corr_result.append( R_result[3][0]) - corr_result.append( N ) - corr_result.append( R_result[2][0]) + corr_result = [] + corr_result.append(R_result[3][0]) + corr_result.append(N) + corr_result.append(R_result[2][0]) return corr_result ######################################################################################################## -#input: cursor, symbolList (list), dataIdDict(Dict): key is symbol -#output: SymbolValuePairDict(dictionary):one dictionary of Symbol and Value Pair. +# input: cursor, symbolList (list), dataIdDict(Dict): key is symbol +# output: SymbolValuePairDict(dictionary):one dictionary of Symbol and Value Pair. # key is symbol, value is one list of expression values of one probeSet. -#function: wrapper function for getSymbolValuePairDict function +# function: wrapper function for getSymbolValuePairDict function # build gene symbol list if necessary, cut it into small lists if necessary, # then call getSymbolValuePairDict function and merge the results. ######################################################################################################## diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index cb341e79..051ac1cb 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -1,4 +1,4 @@ -## Copyright (C) University of Tennessee Health Science Center, Memphis, TN. +# Copyright (C) University of Tennessee Health Science Center, Memphis, TN. # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License @@ -78,7 +78,7 @@ class CorrelationResults: with Bench("Doing correlations"): if start_vars['dataset'] == "Temp": - self.dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = start_vars['group']) + self.dataset = data_set.create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = start_vars['group']) self.trait_id = start_vars['trait_id'] self.this_trait = create_trait(dataset=self.dataset, name=self.trait_id, @@ -109,8 +109,8 @@ class CorrelationResults: self.get_formatted_corr_type() self.return_number = int(start_vars['corr_return_results']) - #The two if statements below append samples to the sample list based upon whether the user - #rselected Primary Samples Only, Other Samples Only, or All Samples + # The two if statements below append samples to the sample list based upon whether the user + # rselected Primary Samples Only, Other Samples Only, or All Samples primary_samples = self.dataset.group.samplelist if self.dataset.group.parlist != None: @@ -118,13 +118,13 @@ class CorrelationResults: if self.dataset.group.f1list != None: primary_samples += self.dataset.group.f1list - #If either BXD/whatever Only or All Samples, append all of that group's samplelist + # If either BXD/whatever Only or All Samples, append all of that group's samplelist if corr_samples_group != 'samples_other': self.process_samples(start_vars, primary_samples) - #If either Non-BXD/whatever or All Samples, get all samples from this_trait.data and - #exclude the primary samples (because they would have been added in the previous - #if statement if the user selected All Samples) + # If either Non-BXD/whatever or All Samples, get all samples from this_trait.data and + # exclude the primary samples (because they would have been added in the previous + # if statement if the user selected All Samples) if corr_samples_group != 'samples_primary': if corr_samples_group == 'samples_other': primary_samples = [x for x in primary_samples if x not in ( @@ -173,7 +173,7 @@ class CorrelationResults: key=lambda t: -abs(t[1][0]))) - #ZS: Convert min/max chromosome to an int for the location range option + # ZS: Convert min/max chromosome to an int for the location range option range_chr_as_int = None for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()): if 'loc_chr' in start_vars: @@ -259,15 +259,15 @@ class CorrelationResults: def do_tissue_correlation_for_trait_list(self, tissue_dataset_id=1): """Given a list of correlation results (self.correlation_results), gets the tissue correlation value for each""" - #Gets tissue expression values for the primary trait + # Gets tissue expression values for the primary trait primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( - symbol_list = [self.this_trait.symbol]) + symbol_list=[self.this_trait.symbol]) if self.this_trait.symbol.lower() in primary_trait_tissue_vals_dict: primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower()] gene_symbol_list = [trait.symbol for trait in self.correlation_results if trait.symbol] - corr_result_tissue_vals_dict= correlation_functions.get_trait_symbol_and_tissue_values( + corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( symbol_list=gene_symbol_list) for trait in self.correlation_results: @@ -282,15 +282,15 @@ class CorrelationResults: trait.tissue_pvalue = result[2] def do_tissue_correlation_for_all_traits(self, tissue_dataset_id=1): - #Gets tissue expression values for the primary trait + # Gets tissue expression values for the primary trait primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( - symbol_list = [self.this_trait.symbol]) + symbol_list=[self.this_trait.symbol]) if self.this_trait.symbol.lower() in primary_trait_tissue_vals_dict: primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower()] #print("trait_gene_symbols: ", pf(trait_gene_symbols.values())) - corr_result_tissue_vals_dict= correlation_functions.get_trait_symbol_and_tissue_values( + corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( symbol_list=list(self.trait_symbol_dict.values())) #print("corr_result_tissue_vals: ", pf(corr_result_tissue_vals_dict)) @@ -443,7 +443,7 @@ class CorrelationResults: self.this_trait_vals, target_vals, num_overlap = corr_result_helpers.normalize_values(self.this_trait_vals, target_vals) if num_overlap > 5: - #ZS: 2015 could add biweight correlation, see http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3465711/ + # ZS: 2015 could add biweight correlation, see http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3465711/ if self.corr_method == 'bicor': sample_r, sample_p = do_bicor(self.this_trait_vals, target_vals) elif self.corr_method == 'pearson': @@ -475,16 +475,16 @@ def do_bicor(this_trait_vals, target_trait_vals): r_library("WGCNA") r_bicor = ro.r["bicorAndPvalue"] # Map the bicorAndPvalue function - r_options(stringsAsFactors = False) + r_options(stringsAsFactors=False) this_vals = ro.Vector(this_trait_vals) target_vals = ro.Vector(target_trait_vals) - the_r, the_p, _fisher_transform, _the_t, _n_obs = [numpy.asarray(x) for x in r_bicor(x = this_vals, y = target_vals)] + the_r, the_p, _fisher_transform, _the_t, _n_obs = [numpy.asarray(x) for x in r_bicor(x=this_vals, y=target_vals)] return the_r, the_p -def generate_corr_json(corr_results, this_trait, dataset, target_dataset, for_api = False): +def generate_corr_json(corr_results, this_trait, dataset, target_dataset, for_api=False): results_list = [] for i, trait in enumerate(corr_results): if trait.view == False: diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index d0b4a156..94c8931f 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -1,4 +1,4 @@ -## Copyright (C) University of Tennessee Health Science Center, Memphis, TN. +# Copyright (C) University of Tennessee Health Science Center, Memphis, TN. # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License @@ -52,7 +52,7 @@ class CorrelationMatrix: self.traits = [] self.insufficient_shared_samples = False self.do_PCA = True - this_group = self.trait_list[0][1].group.name #ZS: Getting initial group name before verifying all traits are in the same group in the following loop + this_group = self.trait_list[0][1].group.name # ZS: Getting initial group name before verifying all traits are in the same group in the following loop for trait_db in self.trait_list: this_group = trait_db[1].group.name this_trait = trait_db[0] @@ -76,10 +76,10 @@ class CorrelationMatrix: this_trait_vals.append('') self.sample_data.append(this_trait_vals) - if len(this_trait_vals) < len(self.trait_list): #Shouldn't do PCA if there are more traits than observations/samples + if len(this_trait_vals) < len(self.trait_list): # Shouldn't do PCA if there are more traits than observations/samples self.do_PCA = False - self.lowest_overlap = 8 #ZS: Variable set to the lowest overlapping samples in order to notify user, or 8, whichever is lower (since 8 is when we want to display warning) + self.lowest_overlap = 8 # ZS: Variable set to the lowest overlapping samples in order to notify user, or 8, whichever is lower (since 8 is when we want to display warning) self.corr_results = [] self.pca_corr_results = [] @@ -93,7 +93,7 @@ class CorrelationMatrix: corr_result_row = [] pca_corr_result_row = [] - is_spearman = False #ZS: To determine if it's above or below the diagonal + is_spearman = False # ZS: To determine if it's above or below the diagonal for target in self.trait_list: target_trait = target[0] target_db = target[1] @@ -168,12 +168,12 @@ class CorrelationMatrix: except: self.pca_works = "False" - self.js_data = dict(traits = [trait.name for trait in self.traits], - groups = groups, - cols = list(range(len(self.traits))), - rows = list(range(len(self.traits))), - samples = self.all_sample_list, - sample_data = self.sample_data,) + self.js_data = dict(traits=[trait.name for trait in self.traits], + groups=groups, + cols=list(range(len(self.traits))), + rows=list(range(len(self.traits))), + samples=self.all_sample_list, + sample_data=self.sample_data,) def calculate_pca(self, cols, corr_eigen_value, corr_eigen_vectors): base = importr('base') @@ -183,7 +183,7 @@ class CorrelationMatrix: m = robjects.r.matrix(corr_results_to_list, nrow=len(cols)) eigen = base.eigen(m) - pca = stats.princomp(m, cor = "TRUE") + pca = stats.princomp(m, cor="TRUE") self.loadings = pca.rx('loadings') self.scores = pca.rx('scores') self.scale = pca.rx('scale') @@ -193,15 +193,15 @@ class CorrelationMatrix: pca_traits = [] for i, vector in enumerate(trait_array_vectors): - #ZS: Check if below check is necessary - #if corr_eigen_value[i-1] > 100.0/len(self.trait_list): - pca_traits.append((vector*-1.0).tolist()) + # ZS: Check if below check is necessary + # if corr_eigen_value[i-1] > 100.0/len(self.trait_list): + pca_traits.append((vector * -1.0).tolist()) this_group_name = self.trait_list[0][1].group.name - temp_dataset = data_set.create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = this_group_name) + temp_dataset = data_set.create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = this_group_name) temp_dataset.group.get_samplelist() for i, pca_trait in enumerate(pca_traits): - trait_id = "PCA" + str(i+1) + "_" + temp_dataset.group.species + "_" + this_group_name + "_" + datetime.datetime.now().strftime("%m%d%H%M%S") + trait_id = "PCA" + str(i + 1) + "_" + temp_dataset.group.species + "_" + this_group_name + "_" + datetime.datetime.now().strftime("%m%d%H%M%S") this_vals_string = "" position = 0 for sample in temp_dataset.group.all_samples_ordered(): @@ -228,7 +228,7 @@ class CorrelationMatrix: else: the_range = 2 for j in range(the_range): - position = i + len(self.trait_list)*j + position = i + len(self.trait_list) * j loadings_row.append(self.loadings[0][position]) loadings_array.append(loadings_row) return loadings_array @@ -271,14 +271,14 @@ def zScore(trait_data_array): i = 0 for data in trait_data_array: N = len(data) - S = reduce(lambda x, y: x+y, data, 0.) - SS = reduce(lambda x, y: x+y*y, data, 0.) - mean = S/N - var = SS - S*S/N - stdev = math.sqrt(var/(N-1)) + S = reduce(lambda x, y: x + y, data, 0.) + SS = reduce(lambda x, y: x + y * y, data, 0.) + mean = S / N + var = SS - S * S / N + stdev = math.sqrt(var / (N - 1)) if stdev == 0: stdev = 1e-100 - data2 = [(x-mean)/stdev for x in data] + data2 = [(x - mean) / stdev for x in data] trait_data_array[i] = data2 i += 1 return trait_data_array @@ -298,8 +298,8 @@ def sortEigenVectors(vector): for item in combines: A.append(item[0]) B.append(item[1]) - sum = reduce(lambda x, y: x+y, A, 0.0) - A = [x*100.0/sum for x in A] + sum = reduce(lambda x, y: x + y, A, 0.0) + A = [x * 100.0 / sum for x in A] return [A, B] except: return [] diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index 1556e370..2fa90a93 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -25,33 +25,33 @@ from utility.tools import locate, GN2_BRANCH_URL from rpy2.robjects.packages import importr import utility.logger -logger = utility.logger.getLogger(__name__ ) - -## Get pointers to some common R functions -r_library = ro.r["library"] # Map the library function -r_options = ro.r["options"] # Map the options function -r_t = ro.r["t"] # Map the t function -r_unlist = ro.r["unlist"] # Map the unlist function -r_list = ro.r.list # Map the list function -r_png = ro.r["png"] # Map the png function for plotting -r_dev_off = ro.r["dev.off"] # Map the dev.off function -r_write_table = ro.r["write.table"] # Map the write.table function -r_data_frame = ro.r["data.frame"] # Map the write.table function -r_as_numeric = ro.r["as.numeric"] # Map the write.table function +logger = utility.logger.getLogger(__name__) + +# Get pointers to some common R functions +r_library = ro.r["library"] # Map the library function +r_options = ro.r["options"] # Map the options function +r_t = ro.r["t"] # Map the t function +r_unlist = ro.r["unlist"] # Map the unlist function +r_list = ro.r.list # Map the list function +r_png = ro.r["png"] # Map the png function for plotting +r_dev_off = ro.r["dev.off"] # Map the dev.off function +r_write_table = ro.r["write.table"] # Map the write.table function +r_data_frame = ro.r["data.frame"] # Map the write.table function +r_as_numeric = ro.r["as.numeric"] # Map the write.table function class CTL: def __init__(self): logger.info("Initialization of CTL") #log = r_file("/tmp/genenetwork_ctl.log", open = "wt") - #r_sink(log) # Uncomment the r_sink() commands to log output from stdout/stderr to a file + # r_sink(log) # Uncomment the r_sink() commands to log output from stdout/stderr to a file #r_sink(log, type = "message") r_library("ctl") # Load CTL - Should only be done once, since it is quite expensive - r_options(stringsAsFactors = False) + r_options(stringsAsFactors=False) logger.info("Initialization of CTL done, package loaded in R session") - self.r_CTLscan = ro.r["CTLscan"] # Map the CTLscan function - self.r_CTLsignificant = ro.r["CTLsignificant"] # Map the CTLsignificant function - self.r_lineplot = ro.r["ctl.lineplot"] # Map the ctl.lineplot function - self.r_plotCTLobject = ro.r["plot.CTLobject"] # Map the CTLsignificant function + self.r_CTLscan = ro.r["CTLscan"] # Map the CTLscan function + self.r_CTLsignificant = ro.r["CTLsignificant"] # Map the CTLsignificant function + self.r_lineplot = ro.r["ctl.lineplot"] # Map the ctl.lineplot function + self.r_plotCTLobject = ro.r["plot.CTLobject"] # Map the CTLsignificant function self.nodes_list = [] self.edges_list = [] logger.info("Obtained pointers to CTL functions") @@ -59,23 +59,23 @@ class CTL: self.gn2_url = GN2_BRANCH_URL def addNode(self, gt): - node_dict = { 'data' : {'id' : str(gt.name) + ":" + str(gt.dataset.name), - 'sid' : str(gt.name), - 'dataset' : str(gt.dataset.name), - 'label' : gt.name, - 'symbol' : gt.symbol, - 'geneid' : gt.geneid, - 'omim' : gt.omim } } + node_dict = {'data': {'id': str(gt.name) + ":" + str(gt.dataset.name), + 'sid': str(gt.name), + 'dataset': str(gt.dataset.name), + 'label': gt.name, + 'symbol': gt.symbol, + 'geneid': gt.geneid, + 'omim': gt.omim}} self.nodes_list.append(node_dict) def addEdge(self, gtS, gtT, significant, x): - edge_data = {'id' : str(gtS.symbol) + '_' + significant[1][x] + '_' + str(gtT.symbol), - 'source' : str(gtS.name) + ":" + str(gtS.dataset.name), - 'target' : str(gtT.name) + ":" + str(gtT.dataset.name), - 'lod' : significant[3][x], - 'color' : "#ff0000", - 'width' : significant[3][x] } - edge_dict = { 'data' : edge_data } + edge_data = {'id': str(gtS.symbol) + '_' + significant[1][x] + '_' + str(gtT.symbol), + 'source': str(gtS.name) + ":" + str(gtS.dataset.name), + 'target': str(gtT.name) + ":" + str(gtT.dataset.name), + 'lod': significant[3][x], + 'color': "#ff0000", + 'width': significant[3][x]} + edge_dict = {'data': edge_data} self.edges_list.append(edge_dict) def run_analysis(self, requestform): @@ -114,7 +114,7 @@ class CTL: genotypes = list(itertools.chain(*markers)) logger.debug(len(genotypes) / len(individuals), "==", len(parser.markers)) - rGeno = r_t(ro.r.matrix(r_unlist(genotypes), nrow=len(markernames), ncol=len(individuals), dimnames = r_list(markernames, individuals), byrow=True)) + rGeno = r_t(ro.r.matrix(r_unlist(genotypes), nrow=len(markernames), ncol=len(individuals), dimnames=r_list(markernames, individuals), byrow=True)) # Create a phenotype matrix traits = [] @@ -122,7 +122,7 @@ class CTL: logger.debug("retrieving data for", trait) if trait != "": ts = trait.split(':') - gt = create_trait(name = ts[0], dataset_name = ts[1]) + gt = create_trait(name=ts[0], dataset_name=ts[1]) gt = retrieve_sample_data(gt, dataset, individuals) for ind in individuals: if ind in list(gt.data.keys()): @@ -130,23 +130,23 @@ class CTL: else: traits.append("-999") - rPheno = r_t(ro.r.matrix(r_as_numeric(r_unlist(traits)), nrow=len(self.trait_db_list), ncol=len(individuals), dimnames = r_list(self.trait_db_list, individuals), byrow=True)) + rPheno = r_t(ro.r.matrix(r_as_numeric(r_unlist(traits)), nrow=len(self.trait_db_list), ncol=len(individuals), dimnames=r_list(self.trait_db_list, individuals), byrow=True)) logger.debug(rPheno) # Use a data frame to store the objects - rPheno = r_data_frame(rPheno, check_names = False) - rGeno = r_data_frame(rGeno, check_names = False) + rPheno = r_data_frame(rPheno, check_names=False) + rGeno = r_data_frame(rGeno, check_names=False) # Debug: Print the genotype and phenotype files to disk #r_write_table(rGeno, "~/outputGN/geno.csv") #r_write_table(rPheno, "~/outputGN/pheno.csv") # Perform the CTL scan - res = self.r_CTLscan(rGeno, rPheno, strategy = strategy, nperm = nperm, parametric = parametric, nthreads=6) + res = self.r_CTLscan(rGeno, rPheno, strategy=strategy, nperm=nperm, parametric = parametric, nthreads=6) # Get significant interactions - significant = self.r_CTLsignificant(res, significance = significance) + significant = self.r_CTLsignificant(res, significance=significance) # Create an image for output self.results = {} @@ -158,7 +158,7 @@ class CTL: # Create the lineplot r_png(self.results['imgloc1'], width=1000, height=600, type='cairo-png') - self.r_lineplot(res, significance = significance) + self.r_lineplot(res, significance=significance) r_dev_off() n = 2 # We start from 2, since R starts from 1 :) @@ -167,7 +167,7 @@ class CTL: self.results['imgurl' + str(n)] = webqtlUtil.genRandStr("CTL_") + ".png" self.results['imgloc' + str(n)] = GENERATED_IMAGE_DIR + self.results['imgurl' + str(n)] r_png(self.results['imgloc' + str(n)], width=1000, height=600, type='cairo-png') - self.r_plotCTLobject(res, (n-1), significance = significance, main='Phenotype ' + trait) + self.r_plotCTLobject(res, (n - 1), significance=significance, main='Phenotype ' + trait) r_dev_off() n = n + 1 @@ -180,8 +180,8 @@ class CTL: logger.debug(significant[0][x], significant[1][x], significant[2][x]) # Debug to console tsS = significant[0][x].split(':') # Source tsT = significant[2][x].split(':') # Target - gtS = create_trait(name = tsS[0], dataset_name = tsS[1]) # Retrieve Source info from the DB - gtT = create_trait(name = tsT[0], dataset_name = tsT[1]) # Retrieve Target info from the DB + gtS = create_trait(name=tsS[0], dataset_name=tsS[1]) # Retrieve Source info from the DB + gtT = create_trait(name=tsT[0], dataset_name=tsT[1]) # Retrieve Target info from the DB self.addNode(gtS) self.addNode(gtT) self.addEdge(gtS, gtT, significant, x) diff --git a/wqflask/wqflask/database.py b/wqflask/wqflask/database.py index adeed6ad..b6e85494 100644 --- a/wqflask/wqflask/database.py +++ b/wqflask/wqflask/database.py @@ -6,7 +6,7 @@ from sqlalchemy.ext.declarative import declarative_base from utility.tools import SQL_URI import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) engine = create_engine(SQL_URI, encoding="latin1") diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index 364a3eed..115182e4 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -32,7 +32,7 @@ class DoSearch: self.search_type = search_type if self.dataset: - #Get group information for dataset and the species id + # Get group information for dataset and the species id self.species_id = webqtlDatabaseFunction.retrieve_species_id(self.dataset.group.name) def execute(self, query): @@ -137,7 +137,7 @@ class MrnaAssaySearch(DoSearch): return where_clause - def compile_final_query(self, from_clause = '', where_clause = ''): + def compile_final_query(self, from_clause='', where_clause=''): """Generates the final query string""" from_clause = self.normalize_spaces(from_clause) @@ -153,7 +153,7 @@ class MrnaAssaySearch(DoSearch): escape(str(self.dataset.id)))) return query - def run_combined(self, from_clause = '', where_clause = ''): + def run_combined(self, from_clause='', where_clause=''): """Generates and runs a combined search of an mRNA expression dataset""" logger.debug("Running ProbeSetSearch") @@ -218,10 +218,10 @@ class PhenotypeSearch(DoSearch): def get_where_clause(self): """Generate clause for WHERE portion of query""" - #Todo: Zach will figure out exactly what both these lines mean - #and comment here + # Todo: Zach will figure out exactly what both these lines mean + # and comment here - #if "'" not in self.search_term[0]: + # if "'" not in self.search_term[0]: search_term = "[[:<:]]" + self.handle_wildcard(self.search_term[0]) + "[[:>:]]" if "_" in self.search_term[0]: if len(self.search_term[0].split("_")[0]) == 3: @@ -236,7 +236,7 @@ class PhenotypeSearch(DoSearch): return where_clause - def compile_final_query(self, from_clause = '', where_clause = ''): + def compile_final_query(self, from_clause='', where_clause=''): """Generates the final query string""" from_clause = self.normalize_spaces(from_clause) @@ -292,7 +292,7 @@ class PhenotypeSearch(DoSearch): def run(self): """Generates and runs a simple search of a phenotype dataset""" - query = self.compile_final_query(where_clause = self.get_where_clause()) + query = self.compile_final_query(where_clause=self.get_where_clause()) return self.execute(query) @@ -334,7 +334,7 @@ class GenotypeSearch(DoSearch): return where_clause - def compile_final_query(self, from_clause = '', where_clause = ''): + def compile_final_query(self, from_clause='', where_clause=''): """Generates the final query string""" from_clause = self.normalize_spaces(from_clause) @@ -344,26 +344,26 @@ class GenotypeSearch(DoSearch): query = (self.base_query + """WHERE Geno.Id = GenoXRef.GenoId and GenoXRef.GenoFreezeId = GenoFreeze.Id - and GenoFreeze.Id = %s"""% (escape(str(self.dataset.id)))) + and GenoFreeze.Id = %s""" % (escape(str(self.dataset.id)))) else: query = (self.base_query + """WHERE %s and Geno.Id = GenoXRef.GenoId and GenoXRef.GenoFreezeId = GenoFreeze.Id - and GenoFreeze.Id = %s"""% (where_clause, + and GenoFreeze.Id = %s""" % (where_clause, escape(str(self.dataset.id)))) return query def run(self): """Generates and runs a simple search of a genotype dataset""" - #Todo: Zach will figure out exactly what both these lines mean - #and comment here + # Todo: Zach will figure out exactly what both these lines mean + # and comment here if self.search_term[0] == "*": self.query = self.compile_final_query() else: - self.query = self.compile_final_query(where_clause = self.get_where_clause()) + self.query = self.compile_final_query(where_clause=self.get_where_clause()) return self.execute(self.query) @@ -393,7 +393,7 @@ class RifSearch(MrnaAssaySearch): class WikiSearch(MrnaAssaySearch): """Searches GeneWiki for traits other people have annotated""" - DoSearch.search_types['ProbeSet_WIKI'] = "WikiSearch" + DoSearch.search_types['ProbeSet_WIKI'] = "WikiSearch" def get_from_clause(self): return ", GeneRIF " @@ -403,7 +403,7 @@ class WikiSearch(MrnaAssaySearch): and GeneRIF.versionId=0 and GeneRIF.display>0 and (GeneRIF.comment REGEXP '%s' or GeneRIF.initial = '%s') """ % (self.dataset.type, - "[[:<:]]"+str(self.search_term[0])+"[[:>:]]", + "[[:<:]]" + str(self.search_term[0]) + "[[:>:]]", str(self.search_term[0])) return where_clause @@ -418,7 +418,7 @@ class WikiSearch(MrnaAssaySearch): class GoSearch(MrnaAssaySearch): """Searches for synapse-associated genes listed in the Gene Ontology.""" - DoSearch.search_types['ProbeSet_GO'] = "GoSearch" + DoSearch.search_types['ProbeSet_GO'] = "GoSearch" def get_from_clause(self): from_clause = """, db_GeneOntology.term as GOterm, @@ -429,7 +429,7 @@ class GoSearch(MrnaAssaySearch): def get_where_clause(self): field = 'GOterm.acc' - go_id = 'GO:' + ('0000000'+self.search_term[0])[-7:] + go_id = 'GO:' + ('0000000' + self.search_term[0])[-7:] statements = ("""%s.symbol=GOgene_product.symbol and GOassociation.gene_product_id=GOgene_product.id and @@ -448,7 +448,7 @@ class GoSearch(MrnaAssaySearch): return self.execute(query) -#ZS: Not sure what the best way to deal with LRS searches is +# ZS: Not sure what the best way to deal with LRS searches is class LrsSearch(DoSearch): """Searches for genes with a QTL within the given LRS values @@ -486,8 +486,8 @@ class LrsSearch(DoSearch): assert isinstance(self.search_term, (list, tuple)) lrs_min, lrs_max = self.search_term[:2] if self.search_type == "LOD": - lrs_min = lrs_min*4.61 - lrs_max = lrs_max*4.61 + lrs_min = lrs_min * 4.61 + lrs_max = lrs_max * 4.61 where_clause = """ %sXRef.LRS > %s and %sXRef.LRS < %s """ % self.mescape(self.dataset.type, @@ -496,7 +496,7 @@ class LrsSearch(DoSearch): max(lrs_min, lrs_max)) if len(self.search_term) > 2: - #If the user typed, for example "Chr4", the "Chr" substring needs to be removed so that all search elements can be converted to floats + # If the user typed, for example "Chr4", the "Chr" substring needs to be removed so that all search elements can be converted to floats chr_num = self.search_term[2] if "chr" in self.search_term[2].lower(): chr_num = self.search_term[2].lower().replace("chr", "") @@ -518,7 +518,7 @@ class LrsSearch(DoSearch): logger.debug("self.search_term is:", self.search_term) lrs_val = self.search_term[0] if self.search_type == "LOD": - lrs_val = lrs_val*4.61 + lrs_val = lrs_val * 4.61 where_clause = """ %sXRef.LRS %s %s """ % self.mescape(self.dataset.type, self.search_operator, @@ -546,7 +546,7 @@ class MrnaLrsSearch(LrsSearch, MrnaAssaySearch): self.from_clause = self.get_from_clause() self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(from_clause = self.from_clause, where_clause = self.where_clause) + self.query = self.compile_final_query(from_clause=self.from_clause, where_clause=self.where_clause) return self.execute(self.query) @@ -560,7 +560,7 @@ class PhenotypeLrsSearch(LrsSearch, PhenotypeSearch): self.from_clause = self.get_from_clause() self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(from_clause = self.from_clause, where_clause = self.where_clause) + self.query = self.compile_final_query(from_clause=self.from_clause, where_clause=self.where_clause) return self.execute(self.query) @@ -599,7 +599,7 @@ class CisTransLrsSearch(DoSearch): lrs_max = lrs_max * 4.61 sub_clause = """ %sXRef.LRS > %s and - %sXRef.LRS < %s and """ % ( + %sXRef.LRS < %s and """ % ( escape(self.dataset.type), escape(str(min(lrs_min, lrs_max))), escape(self.dataset.type), @@ -607,7 +607,7 @@ class CisTransLrsSearch(DoSearch): ) else: # Deal with >, <, >=, and <= - sub_clause = """ %sXRef.LRS %s %s and """ % ( + sub_clause = """ %sXRef.LRS %s %s and """ % ( escape(self.dataset.type), escape(self.search_operator), escape(self.search_term[0]) @@ -667,7 +667,7 @@ class CisLrsSearch(CisTransLrsSearch, MrnaAssaySearch): """ for search_key in ('LRS', 'LOD'): - DoSearch.search_types['ProbeSet_CIS'+search_key] = "CisLrsSearch" + DoSearch.search_types['ProbeSet_CIS' + search_key] = "CisLrsSearch" def get_where_clause(self): return CisTransLrsSearch.get_where_clause(self, "cis") @@ -697,7 +697,7 @@ class TransLrsSearch(CisTransLrsSearch, MrnaAssaySearch): """ for search_key in ('LRS', 'LOD'): - DoSearch.search_types['ProbeSet_TRANS'+search_key] = "TransLrsSearch" + DoSearch.search_types['ProbeSet_TRANS' + search_key] = "TransLrsSearch" def get_where_clause(self): return CisTransLrsSearch.get_where_clause(self, "trans") @@ -740,7 +740,7 @@ class MeanSearch(MrnaAssaySearch): self.where_clause = self.get_where_clause() logger.debug("where_clause is:", pf(self.where_clause)) - self.query = self.compile_final_query(where_clause = self.where_clause) + self.query = self.compile_final_query(where_clause=self.where_clause) return self.execute(self.query) @@ -775,7 +775,7 @@ class RangeSearch(MrnaAssaySearch): def run(self): self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(where_clause = self.where_clause) + self.query = self.compile_final_query(where_clause=self.where_clause) return self.execute(self.query) @@ -815,7 +815,7 @@ class PositionSearch(DoSearch): def run(self): self.get_where_clause() - self.query = self.compile_final_query(where_clause = self.where_clause) + self.query = self.compile_final_query(where_clause=self.where_clause) return self.execute(self.query) @@ -823,12 +823,12 @@ class MrnaPositionSearch(PositionSearch, MrnaAssaySearch): """Searches for genes located within a specified range on a specified chromosome""" for search_key in ('POSITION', 'POS', 'MB'): - DoSearch.search_types['ProbeSet_'+search_key] = "MrnaPositionSearch" + DoSearch.search_types['ProbeSet_' + search_key] = "MrnaPositionSearch" def run(self): self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(where_clause = self.where_clause) + self.query = self.compile_final_query(where_clause=self.where_clause) return self.execute(self.query) @@ -836,12 +836,12 @@ class GenotypePositionSearch(PositionSearch, GenotypeSearch): """Searches for genes located within a specified range on a specified chromosome""" for search_key in ('POSITION', 'POS', 'MB'): - DoSearch.search_types['Geno_'+search_key] = "GenotypePositionSearch" + DoSearch.search_types['Geno_' + search_key] = "GenotypePositionSearch" def run(self): self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(where_clause = self.where_clause) + self.query = self.compile_final_query(where_clause=self.where_clause) return self.execute(self.query) @@ -873,7 +873,7 @@ class PvalueSearch(MrnaAssaySearch): logger.debug("where_clause is:", pf(self.where_clause)) - self.query = self.compile_final_query(where_clause = self.where_clause) + self.query = self.compile_final_query(where_clause=self.where_clause) logger.sql(self.query) return self.execute(self.query) @@ -888,7 +888,7 @@ class AuthorSearch(PhenotypeSearch): self.where_clause = """ Publication.Authors REGEXP "[[:<:]]%s[[:>:]]" and """ % (self.search_term[0]) - self.query = self.compile_final_query(where_clause = self.where_clause) + self.query = self.compile_final_query(where_clause=self.where_clause) return self.execute(self.query) @@ -924,8 +924,8 @@ def get_aliases(symbol, species): return filtered_aliases if __name__ == "__main__": - ### Usually this will be used as a library, but call it from the command line for testing - ### And it runs the code below + # Usually this will be used as a library, but call it from the command line for testing + # And it runs the code below import MySQLdb import sys diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py index 6fb760e0..95c20673 100644 --- a/wqflask/wqflask/export_traits.py +++ b/wqflask/wqflask/export_traits.py @@ -13,7 +13,7 @@ from base.trait import create_trait, retrieve_trait_info from pprint import pformat as pf from utility.logger import getLogger -logger = getLogger(__name__ ) +logger = getLogger(__name__) def export_search_results_csv(targs): diff --git a/wqflask/wqflask/external_tools/send_to_bnw.py b/wqflask/wqflask/external_tools/send_to_bnw.py index c5c79e98..9836eb9c 100644 --- a/wqflask/wqflask/external_tools/send_to_bnw.py +++ b/wqflask/wqflask/external_tools/send_to_bnw.py @@ -1,4 +1,4 @@ -## Copyright (C) University of Tennessee Health Science Center, Memphis, TN. +# Copyright (C) University of Tennessee Health Science Center, Memphis, TN. # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License @@ -22,7 +22,7 @@ from base.trait import GeneralTrait from utility import helper_functions, corr_result_helpers import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) class SendToBNW: def __init__(self, start_vars): @@ -40,7 +40,7 @@ class SendToBNW: shared_samples = list(set(trait_samples_list[0]).intersection(*trait_samples_list)) - self.form_value = "" #ZS: string that is passed to BNW through form + self.form_value = "" # ZS: string that is passed to BNW through form values_list = [] for trait_db in self.trait_list: this_trait = trait_db[0] diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py index 47e4c53a..36f1b8e5 100644 --- a/wqflask/wqflask/external_tools/send_to_geneweaver.py +++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py @@ -1,4 +1,4 @@ -## Copyright (C) University of Tennessee Health Science Center, Memphis, TN. +# Copyright (C) University of Tennessee Health Science Center, Memphis, TN. # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License @@ -27,7 +27,7 @@ from base.species import TheSpecies from utility import helper_functions, corr_result_helpers import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) class SendToGeneWeaver: def __init__(self, start_vars): @@ -74,7 +74,7 @@ def test_chip(trait_list): FROM GeneChip, ProbeFreeze, ProbeSetFreeze WHERE GeneChip.Id = ProbeFreeze.ChipId and ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and - ProbeSetFreeze.Name = '%s'""" % dataset.name).fetchone() + ProbeSetFreeze.Name = '%s'""" % dataset.name).fetchone() if result: chip_name = result[0] diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py index e1e5e655..f50eeb8b 100644 --- a/wqflask/wqflask/external_tools/send_to_webgestalt.py +++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py @@ -1,4 +1,4 @@ -## Copyright (C) University of Tennessee Health Science Center, Memphis, TN. +# Copyright (C) University of Tennessee Health Science Center, Memphis, TN. # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License @@ -27,7 +27,7 @@ from base.species import TheSpecies from utility import helper_functions, corr_result_helpers import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) class SendToWebGestalt: def __init__(self, start_vars): @@ -47,16 +47,16 @@ class SendToWebGestalt: id_type = "entrezgene" self.hidden_vars = { - 'gene_list' : "\n".join(gene_id_list), - 'id_type' : "entrezgene", - 'ref_set' : "genome", - 'enriched_database_category' : "geneontology", - 'enriched_database_name' : "Biological_Process", - 'sig_method' : "fdr", - 'sig_value' : "0.05", - 'enrich_method' : "ORA", - 'fdr_method' : "BH", - 'min_num' : "2" + 'gene_list': "\n".join(gene_id_list), + 'id_type': "entrezgene", + 'ref_set': "genome", + 'enriched_database_category': "geneontology", + 'enriched_database_name': "Biological_Process", + 'sig_method': "fdr", + 'sig_value': "0.05", + 'enrich_method': "ORA", + 'fdr_method': "BH", + 'min_num': "2" } species = self.trait_list[0][1].group.species @@ -78,7 +78,7 @@ def test_chip(trait_list): FROM GeneChip, ProbeFreeze, ProbeSetFreeze WHERE GeneChip.Id = ProbeFreeze.ChipId and ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id and - ProbeSetFreeze.Name = '%s'""" % dataset.name).fetchone() + ProbeSetFreeze.Name = '%s'""" % dataset.name).fetchone() if result: chip_name = result[0] diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py index 69ee9623..f29c0e4d 100644 --- a/wqflask/wqflask/group_manager.py +++ b/wqflask/wqflask/group_manager.py @@ -40,8 +40,8 @@ def view_group(): user_info = get_user_by_unique_column("user_id", user_id) members_info.append(user_info) - #ZS: This whole part might not scale well with many resources - resources_info = [] + # ZS: This whole part might not scale well with many resources + resources_info = [] all_resources = get_resources() for resource_id in all_resources: resource_info = get_resource_info(resource_id) @@ -82,10 +82,10 @@ def add_users(user_type='members'): group_id = request.form['group_id'] if user_type == "admins": user_emails = request.form['admin_emails_to_add'].split(",") - add_users_to_group(g.user_session.user_id, group_id, user_emails, admins = True) + add_users_to_group(g.user_session.user_id, group_id, user_emails, admins=True) elif user_type == "members": user_emails = request.form['member_emails_to_add'].split(",") - add_users_to_group(g.user_session.user_id, group_id, user_emails, admins = False) + add_users_to_group(g.user_session.user_id, group_id, user_emails, admins=False) return redirect(url_for('view_group', id=group_id)) @@ -103,7 +103,7 @@ def add_or_edit_group(): if "group_name" in params: member_user_ids = set() admin_user_ids = set() - admin_user_ids.add(g.user_session.user_id) #ZS: Always add the user creating the group as an admin + admin_user_ids.add(g.user_session.user_id) # ZS: Always add the user creating the group as an admin if "admin_emails_to_add" in params: admin_emails = params['admin_emails_to_add'].split(",") for email in admin_emails: @@ -124,22 +124,22 @@ def add_or_edit_group(): else: return render_template("admin/create_group.html") -#ZS: Will integrate this later, for now just letting users be added directly -def send_group_invites(group_id, user_email_list = [], user_type="members"): +# ZS: Will integrate this later, for now just letting users be added directly +def send_group_invites(group_id, user_email_list=[], user_type="members"): for user_email in user_email_list: user_details = get_user_by_unique_column("email_address", user_email) if user_details: group_info = get_group_info(group_id) - #ZS: Probably not necessary since the group should normally always exist if group_id is being passed here, + # ZS: Probably not necessary since the group should normally always exist if group_id is being passed here, # but it's technically possible to hit it if Redis is cleared out before submitting the new users or something if group_info: - #ZS: Don't add user if they're already an admin or if they're being added a regular user and are already a regular user, + # ZS: Don't add user if they're already an admin or if they're being added a regular user and are already a regular user, # but do add them if they're a regular user and are added as an admin if (user_details['user_id'] in group_info['admins']) or \ ((user_type == "members") and (user_details['user_id'] in group_info['members'])): continue else: - send_verification_email(user_details, template_name = "email/group_verification.txt", key_prefix = "verification_code", subject = "You've been invited to join a GeneNetwork user group") + send_verification_email(user_details, template_name="email/group_verification.txt", key_prefix="verification_code", subject = "You've been invited to join a GeneNetwork user group") else: temp_password = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) user_details = { @@ -152,4 +152,4 @@ def send_group_invites(group_id, user_email_list = [], user_type="members"): save_user(user_details, user_details['user_id']) send_invitation_email(user_email, temp_password) -#@app.route() +# @app.route() diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py index 9bf23d57..8cb81dcc 100644 --- a/wqflask/wqflask/gsearch.py +++ b/wqflask/wqflask/gsearch.py @@ -102,7 +102,7 @@ class GSearch: this_trait['locus_chr'] = line[16] this_trait['locus_mb'] = line[17] - dataset_ob = SimpleNamespace(id=this_trait["dataset_id"], type="ProbeSet",species=this_trait["species"]) + dataset_ob = SimpleNamespace(id=this_trait["dataset_id"], type="ProbeSet", species=this_trait["species"]) if dataset_ob.id not in dataset_to_permissions: permissions = check_resource_availability(dataset_ob) dataset_to_permissions[dataset_ob.id] = permissions diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index 20e3559a..f8ef7028 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -12,7 +12,7 @@ from utility.logger import getLogger Redis = Redis() -logger = getLogger(__name__ ) +logger = getLogger(__name__) class Heatmap: @@ -24,7 +24,7 @@ class Heatmap: self.num_permutations = 5000 self.dataset = self.trait_list[0][1] - self.json_data = {} #The dictionary that will be used to create the json object that contains all the data needed to create the figure + self.json_data = {} # The dictionary that will be used to create the json object that contains all the data needed to create the figure self.all_sample_list = [] self.traits = [] @@ -83,7 +83,7 @@ class Heatmap: self.json_data[trait] = self.trait_results[trait] self.js_data = dict( - json_data = self.json_data + json_data=self.json_data ) def gen_reaper_results(self): diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index d0dd7aea..9779878e 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -2,14 +2,14 @@ import string from flask import Flask, g -#Just return a list of dictionaries -#each dictionary contains sub-dictionary +# Just return a list of dictionaries +# each dictionary contains sub-dictionary def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): fetchFields = ['SpeciesId', 'Id', 'GeneSymbol', 'GeneDescription', 'Chromosome', 'TxStart', 'TxEnd', 'Strand', 'GeneID', 'NM_ID', 'kgID', 'GenBankID', 'UnigenID', 'ProteinID', 'AlignID', 'exonCount', 'exonStarts', 'exonEnds', 'cdsStart', 'cdsEnd'] - ##List All Species in the Gene Table + # List All Species in the Gene Table speciesDict = {} results = g.db.execute(""" SELECT Species.Name, GeneList.SpeciesId @@ -20,7 +20,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): for item in results: speciesDict[item[0]] = item[1] - ##List current Species and other Species + # List current Species and other Species speciesId = speciesDict[species] otherSpecies = [[X, speciesDict[X]] for X in list(speciesDict.keys())] otherSpecies.remove([species, speciesId]) @@ -43,8 +43,8 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): newdict = {} for j, item in enumerate(fetchFields): newdict[item] = result[j] - #count SNPs if possible - if diffCol and species=='mouse': + # count SNPs if possible + if diffCol and species == 'mouse': newdict["snpCount"] = g.db.execute(""" SELECT count(*) FROM BXDSnpPosition @@ -52,16 +52,16 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): Mb >= %2.6f AND Mb < %2.6f AND StrainId1 = %d AND StrainId2 = %d """ % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0] - newdict["snpDensity"] = newdict["snpCount"]/(newdict["TxEnd"]-newdict["TxStart"])/1000.0 + newdict["snpDensity"] = newdict["snpCount"] / (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0 else: newdict["snpDensity"] = newdict["snpCount"] = 0 try: - newdict['GeneLength'] = 1000.0*(newdict['TxEnd'] - newdict['TxStart']) + newdict['GeneLength'] = 1000.0 * (newdict['TxEnd'] - newdict['TxStart']) except: pass - #load gene from other Species by the same name + # load gene from other Species by the same name for item in otherSpecies: othSpec, othSpecId = item newdict2 = {} @@ -74,7 +74,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): for j, item in enumerate(fetchFields): newdict2[item] = resultsOther[j] - #count SNPs if possible, could be a separate function + # count SNPs if possible, could be a separate function if diffCol and othSpec == 'mouse': newdict2["snpCount"] = g.db.execute(""" SELECT count(*) @@ -84,12 +84,12 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): StrainId1 = %d AND StrainId2 = %d """ % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0] - newdict2["snpDensity"] = newdict2["snpCount"]/(newdict2["TxEnd"]-newdict2["TxStart"])/1000.0 + newdict2["snpDensity"] = newdict2["snpCount"] / (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0 else: newdict2["snpDensity"] = newdict2["snpCount"] = 0 try: - newdict2['GeneLength'] = 1000.0*(newdict2['TxEnd'] - newdict2['TxStart']) + newdict2['GeneLength'] = 1000.0 * (newdict2['TxEnd'] - newdict2['TxStart']) except: pass diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 4074f098..3753d1ce 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -51,7 +51,7 @@ try: # Only import this for Python3 from functools import reduce except: pass -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) RED = ImageColor.getrgb("red") BLUE = ImageColor.getrgb("blue") @@ -390,24 +390,24 @@ class DisplayMappingResults: else: self.genotype = self.dataset.group.read_genotype_file() - #Darwing Options + # Darwing Options try: if self.selectedChr > -1: - self.graphWidth = min(self.GRAPH_MAX_WIDTH, max(self.GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) + self.graphWidth = min(self.GRAPH_MAX_WIDTH, max(self.GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) else: - self.graphWidth = min(self.GRAPH_MAX_WIDTH, max(self.MULT_GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) + self.graphWidth = min(self.GRAPH_MAX_WIDTH, max(self.MULT_GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) except: if self.selectedChr > -1: - self.graphWidth = self.GRAPH_DEFAULT_WIDTH + self.graphWidth = self.GRAPH_DEFAULT_WIDTH else: - self.graphWidth = self.MULT_GRAPH_DEFAULT_WIDTH + self.graphWidth = self.MULT_GRAPH_DEFAULT_WIDTH -## BEGIN HaplotypeAnalyst +# BEGIN HaplotypeAnalyst if 'haplotypeAnalystCheck' in list(start_vars.keys()): self.haplotypeAnalystChecked = start_vars['haplotypeAnalystCheck'] else: self.haplotypeAnalystChecked = False -## END HaplotypeAnalyst +# END HaplotypeAnalyst self.graphHeight = self.GRAPH_DEFAULT_HEIGHT self.dominanceChecked = False @@ -446,7 +446,7 @@ class DisplayMappingResults: except: self.lrsMax = 0 - #Trait Infos + # Trait Infos self.identification = "" ################################################################ @@ -471,10 +471,10 @@ class DisplayMappingResults: Chr_Length.OrderId """ % (self.dataset.group.name, ", ".join(["'%s'" % X[0] for X in self.ChrList[1:]]))) - self.ChrLengthMbList = [x[0]/1000000.0 for x in self.ChrLengthMbList] - self.ChrLengthMbSum = reduce(lambda x, y:x+y, self.ChrLengthMbList, 0.0) + self.ChrLengthMbList = [x[0] / 1000000.0 for x in self.ChrLengthMbList] + self.ChrLengthMbSum = reduce(lambda x, y: x + y, self.ChrLengthMbList, 0.0) if self.ChrLengthMbList: - self.MbGraphInterval = self.ChrLengthMbSum/(len(self.ChrLengthMbList)*12) #Empirical Mb interval + self.MbGraphInterval = self.ChrLengthMbSum / (len(self.ChrLengthMbList) * 12) # Empirical Mb interval else: self.MbGraphInterval = 1 @@ -482,18 +482,18 @@ class DisplayMappingResults: for i, _chr in enumerate(self.genotype): self.ChrLengthCMList.append(_chr[-1].cM - _chr[0].cM) - self.ChrLengthCMSum = reduce(lambda x, y:x+y, self.ChrLengthCMList, 0.0) + self.ChrLengthCMSum = reduce(lambda x, y: x + y, self.ChrLengthCMList, 0.0) if self.plotScale == 'physic': - self.GraphInterval = self.MbGraphInterval #Mb + self.GraphInterval = self.MbGraphInterval # Mb else: - self.GraphInterval = self.cMGraphInterval #cM + self.GraphInterval = self.cMGraphInterval # cM -## BEGIN HaplotypeAnalyst -## count the amount of individuals to be plotted, and increase self.graphHeight +# BEGIN HaplotypeAnalyst +# count the amount of individuals to be plotted, and increase self.graphHeight if self.haplotypeAnalystChecked and self.selectedChr > -1: thisTrait = self.this_trait - smd=[] + smd = [] for sample in self.sample_vals_dict.keys(): if self.sample_vals_dict[sample] != "x": temp = GeneralObject(name=sample, value=float(self.sample_vals_dict[sample])) @@ -501,19 +501,19 @@ class DisplayMappingResults: else: continue samplelist = list(self.genotype.prgy) - for j, _geno in enumerate (self.genotype[0][1].genotype): + for j, _geno in enumerate(self.genotype[0][1].genotype): for item in smd: if item.name == samplelist[j]: self.NR_INDIVIDUALS = self.NR_INDIVIDUALS + 1 # default: - self.graphHeight = self.graphHeight + 2 * (self.NR_INDIVIDUALS+10) * self.EACH_GENE_HEIGHT -## END HaplotypeAnalyst + self.graphHeight = self.graphHeight + 2 * (self.NR_INDIVIDUALS + 10) * self.EACH_GENE_HEIGHT +# END HaplotypeAnalyst ######################### - ## Get the sorting column + # Get the sorting column ######################### RISet = self.dataset.group.name if RISet in ('AXB', 'BXA', 'AXBXA'): @@ -529,7 +529,7 @@ class DisplayMappingResults: elif RISet in ('LXS'): self.diffCol = ['ILS', 'ISS'] else: - self.diffCol= [] + self.diffCol = [] for i, strain in enumerate(self.diffCol): self.diffCol[i] = g.db.execute("select Id from Strain where Symbol = %s", strain).fetchone()[0] @@ -546,7 +546,7 @@ class DisplayMappingResults: geneTable = "" self.geneCol = None - if self.plotScale == 'physic' and self.selectedChr > -1 and (self.intervalAnalystChecked or self.geneChecked): + if self.plotScale == 'physic' and self.selectedChr > -1 and (self.intervalAnalystChecked or self.geneChecked): # Draw the genes for this chromosome / region of this chromosome webqtldatabase = self.dataset.name @@ -580,11 +580,11 @@ class DisplayMappingResults: showLocusForm = "" intCanvas = Image.new("RGBA", size=(self.graphWidth, self.graphHeight)) with Bench("Drawing Plot"): - gifmap = self.plotIntMapping(intCanvas, startMb = self.startMb, endMb = self.endMb, showLocusForm= showLocusForm) + gifmap = self.plotIntMapping(intCanvas, startMb=self.startMb, endMb=self.endMb, showLocusForm= showLocusForm) self.gifmap = gifmap.__str__() - self.filename= webqtlUtil.genRandStr("Itvl_") + self.filename = webqtlUtil.genRandStr("Itvl_") intCanvas.save( "{}.png".format( os.path.join(webqtlConfig.GENERATED_IMAGE_DIR, self.filename)), @@ -594,20 +594,20 @@ class DisplayMappingResults: border="0", usemap='#WebQTLImageMap' ) - #Scales plot differently for high resolution + # Scales plot differently for high resolution if self.draw2X: - intCanvasX2 = Image.new("RGBA", size=(self.graphWidth*2, self.graphHeight*2)) - gifmapX2 = self.plotIntMapping(intCanvasX2, startMb = self.startMb, endMb = self.endMb, showLocusForm= showLocusForm, zoom=2) + intCanvasX2 = Image.new("RGBA", size=(self.graphWidth * 2, self.graphHeight * 2)) + gifmapX2 = self.plotIntMapping(intCanvasX2, startMb=self.startMb, endMb=self.endMb, showLocusForm= showLocusForm, zoom=2) intCanvasX2.save( "{}.png".format( os.path.join(webqtlConfig.GENERATED_IMAGE_DIR, - self.filename+"X2")), + self.filename + "X2")), format='png') ################################################################ # Outputs goes here ################################################################ - #this form is used for opening Locus page or trait page, only available for genetic mapping + # this form is used for opening Locus page or trait page, only available for genetic mapping if showLocusForm: showLocusForm = HtmlGenWrapper.create_form_tag( cgi=os.path.join(webqtlConfig.CGIDIR, webqtlConfig.SCRIPTFILE), @@ -615,7 +615,7 @@ class DisplayMappingResults: name=showLocusForm, submit=HtmlGenWrapper.create_input_tag(type_='hidden')) - hddn = {'FormID':'showDatabase', 'ProbeSetID':'_','database':fd.RISet+"Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'} + hddn = {'FormID': 'showDatabase', 'ProbeSetID': '_', 'database': fd.RISet+"Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'} for key in hddn.keys(): showLocusForm.append(HtmlGenWrapper.create_input_tag( name=key, value=hddn[key], type_='hidden')) @@ -636,9 +636,9 @@ class DisplayMappingResults: btminfo.append(HtmlGenWrapper.create_br_tag()) btminfo.append('Mapping using genotype data as a trait will result in infinity LRS at one locus. In order to display the result properly, all LRSs higher than 100 are capped at 100.') - def plotIntMapping(self, canvas, offset= (80, 120, 90, 100), zoom = 1, startMb = None, endMb = None, showLocusForm = ""): + def plotIntMapping(self, canvas, offset=(80, 120, 90, 100), zoom=1, startMb= None, endMb = None, showLocusForm = ""): im_drawer = ImageDraw.Draw(canvas) - #calculating margins + # calculating margins xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset if self.multipleInterval: yTopOffset = max(90, yTopOffset) @@ -659,34 +659,34 @@ class DisplayMappingResults: xLeftOffset += 20 fontZoom = 1.5 - xLeftOffset = int(xLeftOffset*fontZoom) - xRightOffset = int(xRightOffset*fontZoom) - yBottomOffset = int(yBottomOffset*fontZoom) + xLeftOffset = int(xLeftOffset * fontZoom) + xRightOffset = int(xRightOffset * fontZoom) + yBottomOffset = int(yBottomOffset * fontZoom) cWidth = canvas.size[0] cHeight = canvas.size[1] plotWidth = cWidth - xLeftOffset - xRightOffset plotHeight = cHeight - yTopOffset - yBottomOffset - #Drawing Area Height + # Drawing Area Height drawAreaHeight = plotHeight if self.plotScale == 'physic' and self.selectedChr > -1: if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": - drawAreaHeight -= 4*self.BAND_HEIGHT + 4*self.BAND_SPACING+ 10*zoom + drawAreaHeight -= 4 * self.BAND_HEIGHT + 4 * self.BAND_SPACING + 10 * zoom else: - drawAreaHeight -= 3*self.BAND_HEIGHT + 3*self.BAND_SPACING+ 10*zoom + drawAreaHeight -= 3 * self.BAND_HEIGHT + 3 * self.BAND_SPACING + 10 * zoom if self.geneChecked: - drawAreaHeight -= self.NUM_GENE_ROWS*self.EACH_GENE_HEIGHT + 3*self.BAND_SPACING + 10*zoom + drawAreaHeight -= self.NUM_GENE_ROWS * self.EACH_GENE_HEIGHT + 3 * self.BAND_SPACING + 10 * zoom else: if self.selectedChr > -1: drawAreaHeight -= 20 else: drawAreaHeight -= 30 -## BEGIN HaplotypeAnalyst +# BEGIN HaplotypeAnalyst if self.haplotypeAnalystChecked and self.selectedChr > -1: - drawAreaHeight -= self.EACH_GENE_HEIGHT * (self.NR_INDIVIDUALS+10) * 2 * zoom -## END HaplotypeAnalyst + drawAreaHeight -= self.EACH_GENE_HEIGHT * (self.NR_INDIVIDUALS + 10) * 2 * zoom +# END HaplotypeAnalyst if zoom == 2: drawAreaHeight -= 60 @@ -696,42 +696,42 @@ class DisplayMappingResults: newoffset = (xLeftOffset, xRightOffset, yTopOffset, yBottomOffset) # Draw the alternating-color background first and get plotXScale - plotXScale = self.drawGraphBackground(canvas, gifmap, offset=newoffset, zoom= zoom, startMb=startMb, endMb = endMb) + plotXScale = self.drawGraphBackground(canvas, gifmap, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) - #draw bootstap + # draw bootstap if self.bootChecked and not self.multipleInterval: - self.drawBootStrapResult(canvas, self.nboot, drawAreaHeight, plotXScale, offset=newoffset, zoom= zoom, startMb=startMb, endMb = endMb) + self.drawBootStrapResult(canvas, self.nboot, drawAreaHeight, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) # Draw clickable region and gene band if selected if self.plotScale == 'physic' and self.selectedChr > -1: - self.drawClickBand(canvas, gifmap, plotXScale, offset=newoffset, zoom = zoom, startMb=startMb, endMb = endMb) + self.drawClickBand(canvas, gifmap, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) if self.geneChecked and self.geneCol: - self.drawGeneBand(canvas, gifmap, plotXScale, offset=newoffset, zoom = zoom, startMb=startMb, endMb = endMb) + self.drawGeneBand(canvas, gifmap, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) if self.SNPChecked: - self.drawSNPTrackNew(canvas, offset=newoffset, zoom = 2*zoom, startMb=startMb, endMb = endMb) -## BEGIN HaplotypeAnalyst + self.drawSNPTrackNew(canvas, offset=newoffset, zoom=2 * zoom, startMb=startMb, endMb = endMb) +# BEGIN HaplotypeAnalyst if self.haplotypeAnalystChecked: - self.drawHaplotypeBand(canvas, gifmap, plotXScale, offset=newoffset, zoom = zoom, startMb=startMb, endMb = endMb) -## END HaplotypeAnalyst + self.drawHaplotypeBand(canvas, gifmap, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) +# END HaplotypeAnalyst # Draw X axis - self.drawXAxis(canvas, drawAreaHeight, gifmap, plotXScale, showLocusForm, offset=newoffset, zoom = zoom, startMb=startMb, endMb = endMb) + self.drawXAxis(canvas, drawAreaHeight, gifmap, plotXScale, showLocusForm, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) # Draw QTL curve - self.drawQTL(canvas, drawAreaHeight, gifmap, plotXScale, offset=newoffset, zoom= zoom, startMb=startMb, endMb = endMb) + self.drawQTL(canvas, drawAreaHeight, gifmap, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) - #draw legend + # draw legend if self.multipleInterval: self.drawMultiTraitName(fd, canvas, gifmap, showLocusForm, offset=newoffset) elif self.legendChecked: - self.drawLegendPanel(canvas, offset=newoffset, zoom = zoom) + self.drawLegendPanel(canvas, offset=newoffset, zoom=zoom) else: pass - #draw position, no need to use a separate function - self.drawProbeSetPosition(canvas, plotXScale, offset=newoffset, zoom = zoom) + # draw position, no need to use a separate function + self.drawProbeSetPosition(canvas, plotXScale, offset=newoffset, zoom=zoom) return gifmap - def drawBootStrapResult(self, canvas, nboot, drawAreaHeight, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + def drawBootStrapResult(self, canvas, nboot, drawAreaHeight, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset @@ -741,9 +741,9 @@ class DisplayMappingResults: if zoom == 2: fontZoom = 1.5 - bootHeightThresh = drawAreaHeight*3/4 + bootHeightThresh = drawAreaHeight * 3 / 4 - #break bootstrap result into groups + # break bootstrap result into groups BootCoord = [] i = 0 previous_chr = None @@ -751,7 +751,7 @@ class DisplayMappingResults: startX = xLeftOffset BootChrCoord = [] - if self.selectedChr == -1: #ZS: If viewing full genome/all chromosomes + if self.selectedChr == -1: # ZS: If viewing full genome/all chromosomes for i, result in enumerate(self.qtlresults): if result['chr'] != previous_chr: previous_chr = result['chr'] @@ -759,28 +759,28 @@ class DisplayMappingResults: if previous_chr_as_int != 1: BootCoord.append(BootChrCoord) BootChrCoord = [] - startX += (self.ChrLengthDistList[previous_chr_as_int - 2] + self.GraphInterval)*plotXScale + startX += (self.ChrLengthDistList[previous_chr_as_int - 2] + self.GraphInterval) * plotXScale if self.plotScale == 'physic': - Xc = startX + (result['Mb']-self.startMb)*plotXScale + Xc = startX + (result['Mb'] - self.startMb) * plotXScale else: - Xc = startX + (result['cM']-self.qtlresults[0]['cM'])*plotXScale + Xc = startX + (result['cM'] - self.qtlresults[0]['cM']) * plotXScale BootChrCoord.append([Xc, self.bootResult[i]]) else: for i, result in enumerate(self.qtlresults): if str(result['chr']) == str(self.ChrList[self.selectedChr][0]): if self.plotScale == 'physic': - Xc = startX + (result['Mb']-self.startMb)*plotXScale + Xc = startX + (result['Mb'] - self.startMb) * plotXScale else: - Xc = startX + (result['cM']-self.qtlresults[0]['cM'])*plotXScale + Xc = startX + (result['cM'] - self.qtlresults[0]['cM']) * plotXScale BootChrCoord.append([Xc, self.bootResult[i]]) BootCoord = [BootChrCoord] - #reduce bootResult + # reduce bootResult if self.selectedChr > -1: maxBootBar = 80.0 else: maxBootBar = 200.0 - stepBootStrap = plotWidth/maxBootBar + stepBootStrap = plotWidth / maxBootBar reducedBootCoord = [] maxBootCount = 0 @@ -801,8 +801,8 @@ class DisplayMappingResults: bootCount = BootChrCoord[i][1] # end else # end for - #add last piece - if BootChrCoord[-1][0] - bootStartPixX > stepBootStrap/2.0: + # add last piece + if BootChrCoord[-1][0] - bootStartPixX > stepBootStrap / 2.0: reducedBootCoord.append([bootStartPixX, BootChrCoord[-1][0], bootCount]) else: reducedBootCoord[-1][2] += bootCount @@ -815,64 +815,64 @@ class DisplayMappingResults: if item[2] > 0: if item[0] < xLeftOffset: item[0] = xLeftOffset - if item[0] > xLeftOffset+plotWidth: - item[0] = xLeftOffset+plotWidth + if item[0] > xLeftOffset + plotWidth: + item[0] = xLeftOffset + plotWidth if item[1] < xLeftOffset: item[1] = xLeftOffset - if item[1] > xLeftOffset+plotWidth: - item[1] = xLeftOffset+plotWidth + if item[1] > xLeftOffset + plotWidth: + item[1] = xLeftOffset + plotWidth if item[0] != item[1]: im_drawer.rectangle( xy=((item[0], yZero), - (item[1], yZero - item[2]*bootHeightThresh/maxBootCount)), + (item[1], yZero - item[2] * bootHeightThresh / maxBootCount)), fill=self.BOOTSTRAP_BOX_COLOR, outline=BLACK) - ###draw boot scale - highestPercent = (maxBootCount*100.0)/nboot + # draw boot scale + highestPercent = (maxBootCount * 100.0) / nboot bootScale = Plot.detScale(0, highestPercent) - bootScale = Plot.frange(bootScale[0], bootScale[1], bootScale[1]/bootScale[2]) + bootScale = Plot.frange(bootScale[0], bootScale[1], bootScale[1] / bootScale[2]) bootScale = bootScale[:-1] + [highestPercent] - bootOffset = 50*fontZoom - bootScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=13*fontZoom) + bootOffset = 50 * fontZoom + bootScaleFont = ImageFont.truetype(font=VERDANA_FILE, size=13 * fontZoom) im_drawer.rectangle( - xy=((canvas.size[0]-bootOffset, yZero-bootHeightThresh), - (canvas.size[0]-bootOffset-15*zoom, yZero)), - fill = YELLOW, outline=BLACK) + xy=((canvas.size[0] - bootOffset, yZero - bootHeightThresh), + (canvas.size[0] - bootOffset - 15*zoom, yZero)), + fill=YELLOW, outline=BLACK) im_drawer.line( - xy=((canvas.size[0]-bootOffset+4, yZero), - (canvas.size[0]-bootOffset, yZero)), + xy=((canvas.size[0] - bootOffset + 4, yZero), + (canvas.size[0] - bootOffset, yZero)), fill=BLACK) TEXT_Y_DISPLACEMENT = -8 - im_drawer.text(xy=(canvas.size[0]-bootOffset+10, yZero+TEXT_Y_DISPLACEMENT), text='0%', + im_drawer.text(xy=(canvas.size[0] - bootOffset + 10, yZero+TEXT_Y_DISPLACEMENT), text='0%', font=bootScaleFont, fill=BLACK) for item in bootScale: if item == 0: continue - bootY = yZero-bootHeightThresh*item/highestPercent + bootY = yZero - bootHeightThresh * item / highestPercent im_drawer.line( - xy=((canvas.size[0]-bootOffset+4, bootY), - (canvas.size[0]-bootOffset, bootY)), + xy=((canvas.size[0] - bootOffset + 4, bootY), + (canvas.size[0] - bootOffset, bootY)), fill=BLACK) - im_drawer.text(xy=(canvas.size[0]-bootOffset+10, bootY+TEXT_Y_DISPLACEMENT), - text='%2.1f'%item, font=bootScaleFont, fill=BLACK) + im_drawer.text(xy=(canvas.size[0] - bootOffset + 10, bootY+TEXT_Y_DISPLACEMENT), + text='%2.1f' % item, font=bootScaleFont, fill=BLACK) if self.legendChecked: if hasattr(self.traitList[0], 'chr') and hasattr(self.traitList[0], 'mb'): startPosY = 30 else: startPosY = 15 - smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom) + smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12 * fontZoom) leftOffset = canvas.size[0] - xRightOffset - 190 im_drawer.rectangle( - xy=((leftOffset, startPosY-6), (leftOffset+12, startPosY+6)), + xy=((leftOffset, startPosY - 6), (leftOffset + 12, startPosY + 6)), fill=YELLOW, outline=BLACK) - im_drawer.text(xy=(canvas.size[0] - xRightOffset - 170, startPosY+TEXT_Y_DISPLACEMENT), + im_drawer.text(xy=(canvas.size[0] - xRightOffset - 170, startPosY + TEXT_Y_DISPLACEMENT), text='Frequency of the Peak LRS', font=smallLabelFont, fill=BLACK) - def drawProbeSetPosition(self, canvas, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + def drawProbeSetPosition(self, canvas, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): im_drawer = ImageDraw.Draw(canvas) if len(self.traitList) != 1: return @@ -896,21 +896,21 @@ class DisplayMappingResults: if self.plotScale == "physic": this_chr = str(self.ChrList[self.selectedChr][0]) else: - this_chr = str(self.ChrList[self.selectedChr][1]+1) + this_chr = str(self.ChrList[self.selectedChr][1] + 1) if self.plotScale == 'physic': if self.selectedChr > -1: if this_chr != Chr or Mb < self.startMb or Mb > self.endMb: return else: - locPixel = xLeftOffset + (Mb-self.startMb)*plotXScale + locPixel = xLeftOffset + (Mb - self.startMb) * plotXScale else: locPixel = xLeftOffset for i, _chr in enumerate(self.ChrList[1:]): if _chr[0] != Chr: - locPixel += (self.ChrLengthDistList[i] + self.GraphInterval)*plotXScale + locPixel += (self.ChrLengthDistList[i] + self.GraphInterval) * plotXScale else: - locPixel += Mb*plotXScale + locPixel += Mb * plotXScale break else: if self.selectedChr > -1: @@ -918,33 +918,33 @@ class DisplayMappingResults: if qtlresult['chr'] != self.selectedChr: continue - if i==0 and qtlresult['Mb'] >= Mb: - locPixel=-1 + if i == 0 and qtlresult['Mb'] >= Mb: + locPixel = -1 break - #the trait's position is between two traits - if i > 0 and self.qtlresults[i-1]['Mb'] < Mb and qtlresult['Mb'] >= Mb: - locPixel = xLeftOffset + plotXScale*(self.qtlresults[i-1]['Mb']+(qtlresult['Mb']-self.qtlresults[i-1]['Mb'])*(Mb - self.qtlresults[i-1]['Mb'])/(qtlresult['Mb']-self.qtlresults[i-1]['Mb'])) + # the trait's position is between two traits + if i > 0 and self.qtlresults[i - 1]['Mb'] < Mb and qtlresult['Mb'] >= Mb: + locPixel = xLeftOffset + plotXScale * (self.qtlresults[i - 1]['Mb'] + (qtlresult['Mb'] - self.qtlresults[i - 1]['Mb']) * (Mb - self.qtlresults[i - 1]['Mb']) / (qtlresult['Mb'] - self.qtlresults[i - 1]['Mb'])) break - #the trait's position is on the right of the last genotype - if i==len(self.qtlresults) and Mb>=qtlresult['Mb']: + # the trait's position is on the right of the last genotype + if i == len(self.qtlresults) and Mb >= qtlresult['Mb']: locPixel = -1 else: locPixel = xLeftOffset for i, _chr in enumerate(self.ChrList): - if i < (len(self.ChrList)-1): + if i < (len(self.ChrList) - 1): if _chr != Chr: - locPixel += (self.ChrLengthDistList[i] + self.GraphInterval)*plotXScale + locPixel += (self.ChrLengthDistList[i] + self.GraphInterval) * plotXScale else: - locPixel += (Mb*(_chr[-1].cM-_chr[0].cM)/self.ChrLengthCMList[i])*plotXScale + locPixel += (Mb * (_chr[-1].cM - _chr[0].cM) / self.ChrLengthCMList[i]) * plotXScale break if locPixel >= 0 and self.plotScale == 'physic': - traitPixel = ((locPixel, yZero), (locPixel-7, yZero+14), (locPixel+7, yZero+14)) + traitPixel = ((locPixel, yZero), (locPixel - 7, yZero + 14), (locPixel + 7, yZero + 14)) draw_open_polygon(canvas, xy=traitPixel, outline=BLACK, fill=self.TRANSCRIPT_LOCATION_COLOR) - def drawSNPTrackNew(self, canvas, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + def drawSNPTrackNew(self, canvas, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): im_drawer = ImageDraw.Draw(canvas) if self.plotScale != 'physic' or self.selectedChr == -1 or not self.diffCol: return @@ -963,36 +963,36 @@ class DisplayMappingResults: #chrName = self.genotype[0].name chrName = self.ChrList[self.selectedChr][0] - stepMb = (endMb-startMb)/plotWidth + stepMb = (endMb - startMb) / plotWidth strainId1, strainId2 = self.diffCol SNPCounts = [] - while startMb= %2.6f AND Mb < %2.6f AND StrainId1 = %d AND StrainId2 = %d - """ % (chrName, startMb, startMb+stepMb, strainId1, strainId2)).fetchone()[0] + """ % (chrName, startMb, startMb + stepMb, strainId1, strainId2)).fetchone()[0] SNPCounts.append(snp_count) startMb += stepMb if (len(SNPCounts) > 0): maxCount = max(SNPCounts) - if maxCount>0: + if maxCount > 0: for i in range(xLeftOffset, xLeftOffset + plotWidth): - snpDensity = float(SNPCounts[i-xLeftOffset]*SNP_HEIGHT_MODIFIER/maxCount) + snpDensity = float(SNPCounts[i - xLeftOffset] * SNP_HEIGHT_MODIFIER / maxCount) im_drawer.line( - xy=((i, drawSNPLocationY+(snpDensity)*zoom), - (i, drawSNPLocationY-(snpDensity)*zoom)), + xy=((i, drawSNPLocationY + (snpDensity) * zoom), + (i, drawSNPLocationY - (snpDensity) * zoom)), fill=self.SNP_COLOR, width=1) - def drawMultiTraitName(self, fd, canvas, gifmap, showLocusForm, offset= (40, 120, 80, 10), zoom = 1): + def drawMultiTraitName(self, fd, canvas, gifmap, showLocusForm, offset=(40, 120, 80, 10), zoom=1): nameWidths = [] yPaddingTop = 10 - colorFont=ImageFont.truetype(font=TREBUC_FILE, size=12) - if len(self.qtlresults) >20 and self.selectedChr > -1: + colorFont = ImageFont.truetype(font=TREBUC_FILE, size=12) + if len(self.qtlresults) > 20 and self.selectedChr > -1: rightShift = 20 rightShiftStep = 60 rectWidth = 10 @@ -1004,7 +1004,7 @@ class DisplayMappingResults: for k, thisTrait in enumerate(self.traitList): thisLRSColor = self.colorCollection[k] kstep = k % 4 - if k!=0 and kstep==0: + if k != 0 and kstep == 0: if nameWidths: rightShiftStep = max(nameWidths[-4:]) + rectWidth + 20 rightShift += rightShiftStep @@ -1014,19 +1014,19 @@ class DisplayMappingResults: nameWidths.append(nameWidth) im_drawer.rectangle( - xy=((rightShift, yPaddingTop+kstep*15), - (rectWidth+rightShift, yPaddingTop+10+kstep*15)), + xy=((rightShift, yPaddingTop + kstep * 15), + (rectWidth + rightShift, yPaddingTop + 10+kstep*15)), fill=thisLRSColor, outline=BLACK) im_drawer.text( - text=name, xy=(rectWidth+2+rightShift, yPaddingTop+10+kstep*15), + text=name, xy=(rectWidth + 2 + rightShift, yPaddingTop + 10 + kstep * 15), font=colorFont, fill=BLACK) if thisTrait.db: - COORDS = "%d,%d,%d,%d" %(rectWidth+2+rightShift, yPaddingTop+kstep*15, rectWidth+2+rightShift+nameWidth, yPaddingTop+10+kstep*15,) - HREF= "javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm, thisTrait.db.name, thisTrait.name) + COORDS = "%d,%d,%d,%d" % (rectWidth + 2 + rightShift, yPaddingTop + kstep * 15, rectWidth + 2 + rightShift + nameWidth, yPaddingTop + 10 + kstep * 15,) + HREF = "javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm, thisTrait.db.name, thisTrait.name) Areas = HtmlGenWrapper.create_area_tag(shape='rect', coords=COORDS, href=HREF) - gifmap.append(Areas) ### TODO + gifmap.append(Areas) # TODO - def drawLegendPanel(self, canvas, offset= (40, 120, 80, 10), zoom = 1): + def drawLegendPanel(self, canvas, offset=(40, 120, 80, 10), zoom=1): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset @@ -1037,80 +1037,80 @@ class DisplayMappingResults: if zoom == 2: fontZoom = 1.5 - labelFont=ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom) + labelFont = ImageFont.truetype(font=TREBUC_FILE, size=12 * fontZoom) startPosY = 15 - stepPosY = 12*fontZoom + stepPosY = 12 * fontZoom startPosX = canvas.size[0] - xRightOffset - 415 if hasattr(self.traitList[0], 'chr') and hasattr(self.traitList[0], 'mb'): startPosY = 15 nCol = 2 - smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12*fontZoom) + smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12 * fontZoom) leftOffset = canvas.size[0] - xRightOffset - 190 draw_open_polygon( canvas, xy=( - (leftOffset + 6, startPosY-7), - (leftOffset - 1, startPosY+7), - (leftOffset + 13, startPosY+7)), + (leftOffset + 6, startPosY - 7), + (leftOffset - 1, startPosY + 7), + (leftOffset + 13, startPosY + 7)), outline=BLACK, fill=self.TRANSCRIPT_LOCATION_COLOR ) TEXT_Y_DISPLACEMENT = -8 im_drawer.text( text="Sequence Site", - xy=(leftOffset + 20, startPosY+TEXT_Y_DISPLACEMENT), font=smallLabelFont, + xy=(leftOffset + 20, startPosY + TEXT_Y_DISPLACEMENT), font=smallLabelFont, fill=self.TOP_RIGHT_INFO_COLOR) if self.manhattan_plot != True: im_drawer.line( - xy=((startPosX, startPosY), (startPosX+32, startPosY)), + xy=((startPosX, startPosY), (startPosX + 32, startPosY)), fill=self.LRS_COLOR, width=2) im_drawer.text( - text=self.LRS_LOD, xy=(startPosX+40, startPosY+TEXT_Y_DISPLACEMENT), + text=self.LRS_LOD, xy=(startPosX + 40, startPosY + TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK) startPosY += stepPosY if self.additiveChecked: im_drawer.line( - xy=((startPosX, startPosY), (startPosX+17, startPosY)), + xy=((startPosX, startPosY), (startPosX + 17, startPosY)), fill=self.ADDITIVE_COLOR_POSITIVE, width=2) im_drawer.line( - xy=((startPosX+18, startPosY), (startPosX+32, startPosY)), + xy=((startPosX + 18, startPosY), (startPosX + 32, startPosY)), fill=self.ADDITIVE_COLOR_NEGATIVE, width=2) im_drawer.text( - text='Additive Effect', xy=(startPosX+40, startPosY+TEXT_Y_DISPLACEMENT), + text='Additive Effect', xy=(startPosX + 40, startPosY + TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK) startPosY += stepPosY if self.genotype.type == 'intercross' and self.dominanceChecked: im_drawer.line( - xy=((startPosX, startPosY), (startPosX+17, startPosY)), + xy=((startPosX, startPosY), (startPosX + 17, startPosY)), fill=self.DOMINANCE_COLOR_POSITIVE, width=4) im_drawer.line( - xy=((startPosX+18, startPosY), (startPosX+35, startPosY)), + xy=((startPosX + 18, startPosY), (startPosX + 35, startPosY)), fill=self.DOMINANCE_COLOR_NEGATIVE, width=4) im_drawer.text( - text='Dominance Effect', xy=(startPosX+42, startPosY+5), + text='Dominance Effect', xy=(startPosX + 42, startPosY + 5), font=labelFont, fill=BLACK) startPosY += stepPosY if self.haplotypeAnalystChecked: im_drawer.line( - xy=((startPosX-34, startPosY), (startPosX-17, startPosY)), + xy=((startPosX - 34, startPosY), (startPosX - 17, startPosY)), fill=self.HAPLOTYPE_POSITIVE, width=4) im_drawer.line( - xy=((startPosX-17, startPosY), (startPosX, startPosY)), + xy=((startPosX - 17, startPosY), (startPosX, startPosY)), fill=self.HAPLOTYPE_NEGATIVE, width=4) im_drawer.line( - xy=((startPosX, startPosY), (startPosX+17, startPosY)), + xy=((startPosX, startPosY), (startPosX + 17, startPosY)), fill=self.HAPLOTYPE_HETEROZYGOUS, width=4) im_drawer.line( - xy=((startPosX+17, startPosY), (startPosX+34, startPosY)), + xy=((startPosX + 17, startPosY), (startPosX + 34, startPosY)), fill=self.HAPLOTYPE_RECOMBINATION, width=4) im_drawer.text( text='Haplotypes (Pat, Mat, Het, Unk)', - xy=(startPosX+41, startPosY+TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK) + xy=(startPosX + 41, startPosY + TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK) startPosY += stepPosY if self.permChecked and self.nperm > 0: @@ -1118,20 +1118,20 @@ class DisplayMappingResults: if self.multipleInterval and not self.bootChecked: thisStartX = canvas.size[0] - xRightOffset - 205 im_drawer.line( - xy=((thisStartX, startPosY), ( startPosX + 32, startPosY)), + xy=((thisStartX, startPosY), (startPosX + 32, startPosY)), fill=self.SIGNIFICANT_COLOR, width=self.SIGNIFICANT_WIDTH) im_drawer.line( - xy=((thisStartX, startPosY + stepPosY), ( startPosX + 32, startPosY + stepPosY)), + xy=((thisStartX, startPosY + stepPosY), (startPosX + 32, startPosY + stepPosY)), fill=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH) im_drawer.text( text='Significant %s = %2.2f' % (self.LRS_LOD, self.significant), - xy=(thisStartX+40, startPosY+TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK) + xy=(thisStartX + 40, startPosY + TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK) im_drawer.text( text='Suggestive %s = %2.2f' % (self.LRS_LOD, self.suggestive), - xy=(thisStartX+40, startPosY + TEXT_Y_DISPLACEMENT +stepPosY), font=labelFont, + xy=(thisStartX + 40, startPosY + TEXT_Y_DISPLACEMENT + stepPosY), font=labelFont, fill=BLACK) - labelFont = ImageFont.truetype(font=VERDANA_FILE, size=12*fontZoom) + labelFont = ImageFont.truetype(font=VERDANA_FILE, size=12 * fontZoom) labelColor = BLACK if self.dataset.type == "Publish" or self.dataset.type == "Geno": @@ -1195,17 +1195,17 @@ class DisplayMappingResults: identification += "Trait: %s" % (self.this_trait.name) identification += " with %s samples" % (self.n_samples) - d = 4+ max( + d = 4 + max( im_drawer.textsize(identification, font=labelFont)[0], im_drawer.textsize(string1, font=labelFont)[0], im_drawer.textsize(string2, font=labelFont)[0]) im_drawer.text( text=identification, - xy=(xLeftOffset, y_constant*fontZoom), font=labelFont, + xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) y_constant += 15 else: - d = 4+ max( + d = 4 + max( im_drawer.textsize(string1, font=labelFont)[0], im_drawer.textsize(string2, font=labelFont)[0]) @@ -1223,28 +1223,28 @@ class DisplayMappingResults: transform_text += "Invert +/-" im_drawer.text( - text=transform_text, xy=(xLeftOffset, y_constant*fontZoom), + text=transform_text, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) y_constant += 15 im_drawer.text( - text=string1, xy=(xLeftOffset, y_constant*fontZoom), + text=string1, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) y_constant += 15 im_drawer.text( - text=string2, xy=(xLeftOffset, y_constant*fontZoom), + text=string2, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) y_constant += 15 if string3 != '': im_drawer.text( - text=string3, xy=(xLeftOffset, y_constant*fontZoom), + text=string3, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) y_constant += 15 if string4 != '': im_drawer.text( - text=string4, xy=(xLeftOffset, y_constant*fontZoom), + text=string4, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) - def drawGeneBand(self, canvas, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + def drawGeneBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): im_drawer = ImageDraw.Draw(canvas) if self.plotScale != 'physic' or self.selectedChr == -1 or not self.geneCol: return @@ -1264,9 +1264,9 @@ class DisplayMappingResults: if self.dataset.group.species == "mouse": txStart = theGO["TxStart"] txEnd = theGO["TxEnd"] - geneLength = (txEnd - txStart)*1000.0 - tenPercentLength = geneLength*0.0001 - SNPdensity = theGO["snpCount"]/geneLength + geneLength = (txEnd - txStart) * 1000.0 + tenPercentLength = geneLength * 0.0001 + SNPdensity = theGO["snpCount"] / geneLength exonStarts = list(map(float, theGO['exonStarts'].split(",")[:-1])) exonEnds = list(map(float, theGO['exonEnds'].split(",")[:-1])) @@ -1277,23 +1277,23 @@ class DisplayMappingResults: strand = theGO["Strand"] exonCount = theGO["exonCount"] - geneStartPix = xLeftOffset + plotXScale*(float(txStart) - startMb) - geneEndPix = xLeftOffset + plotXScale*(float(txEnd) - startMb) #at least one pixel + geneStartPix = xLeftOffset + plotXScale * (float(txStart) - startMb) + geneEndPix = xLeftOffset + plotXScale * (float(txEnd) - startMb) # at least one pixel if (geneEndPix < xLeftOffset): - return; # this gene is not on the screen + return; # this gene is not on the screen elif (geneEndPix > xLeftOffset + plotWidth): - geneEndPix = xLeftOffset + plotWidth; # clip the last in-range gene + geneEndPix = xLeftOffset + plotWidth; # clip the last in-range gene if (geneStartPix > xLeftOffset + plotWidth): - return; # we are outside the valid on-screen range, so stop drawing genes + return; # we are outside the valid on-screen range, so stop drawing genes elif (geneStartPix < xLeftOffset): - geneStartPix = xLeftOffset; # clip the first in-range gene + geneStartPix = xLeftOffset; # clip the first in-range gene - #color the gene based on SNP density - #found earlier, needs to be recomputed as snps are added - #always apply colors now, even if SNP Track not checked - Zach 11/24/2010 + # color the gene based on SNP density + # found earlier, needs to be recomputed as snps are added + # always apply colors now, even if SNP Track not checked - Zach 11/24/2010 - densities=[1.0000000000000001e-05, 0.094094033555233408, 0.3306166377816987, 0.88246026851027781, 2.6690084029581951, 4.1, 61.0] + densities = [1.0000000000000001e-05, 0.094094033555233408, 0.3306166377816987, 0.88246026851027781, 2.6690084029581951, 4.1, 61.0] if SNPdensity < densities[0]: myColor = BLACK elif SNPdensity < densities[1]: @@ -1310,11 +1310,11 @@ class DisplayMappingResults: myColor = DARKRED outlineColor = myColor - fillColor = myColor + fillColor = myColor TITLE = "Gene: %s (%s)\nFrom %2.3f to %2.3f Mb (%s)\nNum. exons: %d." % (geneSymbol, accession, float(txStart), float(txEnd), strand, exonCount) # NL: 06-02-2011 Rob required to change this link for gene related - HREF=geneNCBILink %geneSymbol + HREF = geneNCBILink % geneSymbol elif self.dataset.group.species == "rat": exonStarts = [] @@ -1327,85 +1327,85 @@ class DisplayMappingResults: strand = theGO["Strand"] exonCount = 0 - geneStartPix = xLeftOffset + plotXScale*(float(txStart) - startMb) - geneEndPix = xLeftOffset + plotXScale*(float(txEnd) - startMb) #at least one pixel + geneStartPix = xLeftOffset + plotXScale * (float(txStart) - startMb) + geneEndPix = xLeftOffset + plotXScale * (float(txEnd) - startMb) # at least one pixel if (geneEndPix < xLeftOffset): - return; # this gene is not on the screen + return; # this gene is not on the screen elif (geneEndPix > xLeftOffset + plotWidth): - geneEndPix = xLeftOffset + plotWidth; # clip the last in-range gene + geneEndPix = xLeftOffset + plotWidth; # clip the last in-range gene if (geneStartPix > xLeftOffset + plotWidth): - return; # we are outside the valid on-screen range, so stop drawing genes + return; # we are outside the valid on-screen range, so stop drawing genes elif (geneStartPix < xLeftOffset): - geneStartPix = xLeftOffset; # clip the first in-range gene + geneStartPix = xLeftOffset; # clip the first in-range gene outlineColor = DARKBLUE fillColor = DARKBLUE TITLE = "Gene: %s\nFrom %2.3f to %2.3f Mb (%s)" % (geneSymbol, float(txStart), float(txEnd), strand) # NL: 06-02-2011 Rob required to change this link for gene related - HREF=geneNCBILink %geneSymbol + HREF = geneNCBILink % geneSymbol else: outlineColor = ORANGE fillColor = ORANGE TITLE = "Gene: %s" % geneSymbol - #Draw Genes - geneYLocation = yPaddingTop + (gIndex % self.NUM_GENE_ROWS) * self.EACH_GENE_HEIGHT*zoom + # Draw Genes + geneYLocation = yPaddingTop + (gIndex % self.NUM_GENE_ROWS) * self.EACH_GENE_HEIGHT * zoom if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": - geneYLocation += 4*self.BAND_HEIGHT + 4*self.BAND_SPACING + geneYLocation += 4 * self.BAND_HEIGHT + 4 * self.BAND_SPACING else: - geneYLocation += 3*self.BAND_HEIGHT + 3*self.BAND_SPACING + geneYLocation += 3 * self.BAND_HEIGHT + 3 * self.BAND_SPACING - #draw the detail view + # draw the detail view if self.endMb - self.startMb <= self.DRAW_DETAIL_MB and geneEndPix - geneStartPix > self.EACH_GENE_ARROW_SPACING * 3: utrColor = ImageColor.getrgb("rgb(66%, 66%, 66%)") arrowColor = ImageColor.getrgb("rgb(70%, 70%, 70%)") - #draw the line that runs the entire length of the gene + # draw the line that runs the entire length of the gene im_drawer.line( xy=( - (geneStartPix, geneYLocation + self.EACH_GENE_HEIGHT/2*zoom), - ( geneEndPix, geneYLocation + self.EACH_GENE_HEIGHT/2*zoom)), + (geneStartPix, geneYLocation + self.EACH_GENE_HEIGHT / 2 * zoom), + (geneEndPix, geneYLocation + self.EACH_GENE_HEIGHT / 2 *zoom)), fill=outlineColor, width=1) - #draw the arrows + # draw the arrows if geneEndPix - geneStartPix < 1: genePixRange = 1 else: genePixRange = int(geneEndPix - geneStartPix) for xCoord in range(0, genePixRange): - if (xCoord % self.EACH_GENE_ARROW_SPACING == 0 and xCoord + self.EACH_GENE_ARROW_SPACING < geneEndPix-geneStartPix) or xCoord == 0: + if (xCoord % self.EACH_GENE_ARROW_SPACING == 0 and xCoord + self.EACH_GENE_ARROW_SPACING < geneEndPix - geneStartPix) or xCoord == 0: if strand == "+": im_drawer.line( xy=((geneStartPix + xCoord, geneYLocation), (geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, - geneYLocation +(self.EACH_GENE_HEIGHT / 2)*zoom)), + geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)), fill=arrowColor, width=1) im_drawer.line( xy=((geneStartPix + xCoord, - geneYLocation + self.EACH_GENE_HEIGHT*zoom), - (geneStartPix + xCoord+self.EACH_GENE_ARROW_WIDTH, + geneYLocation + self.EACH_GENE_HEIGHT * zoom), + (geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)), fill=arrowColor, width=1) else: im_drawer.line( xy=((geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, geneYLocation), - ( geneStartPix + xCoord, - geneYLocation +(self.EACH_GENE_HEIGHT / 2)*zoom)), + (geneStartPix + xCoord, + geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)), fill=arrowColor, width=1) im_drawer.line( xy=((geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, - geneYLocation + self.EACH_GENE_HEIGHT*zoom), - ( geneStartPix + xCoord, - geneYLocation + (self.EACH_GENE_HEIGHT / 2)*zoom)), + geneYLocation + self.EACH_GENE_HEIGHT * zoom), + (geneStartPix + xCoord, + geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)), fill=arrowColor, width=1) - #draw the blocks for the exon regions + # draw the blocks for the exon regions for i in range(0, len(exonStarts)): - exonStartPix = (exonStarts[i]-startMb)*plotXScale + xLeftOffset - exonEndPix = (exonEnds[i]-startMb)*plotXScale + xLeftOffset + exonStartPix = (exonStarts[i] - startMb) * plotXScale + xLeftOffset + exonEndPix = (exonEnds[i] - startMb) * plotXScale + xLeftOffset if (exonStartPix < xLeftOffset): exonStartPix = xLeftOffset if (exonEndPix < xLeftOffset): @@ -1416,13 +1416,13 @@ class DisplayMappingResults: exonStartPix = xLeftOffset + plotWidth im_drawer.rectangle( xy=((exonStartPix, geneYLocation), - (exonEndPix, (geneYLocation + self.EACH_GENE_HEIGHT*zoom))), - outline = outlineColor, fill = fillColor) + (exonEndPix, (geneYLocation + self.EACH_GENE_HEIGHT * zoom))), + outline=outlineColor, fill=fillColor) - #draw gray blocks for 3' and 5' UTR blocks + # draw gray blocks for 3' and 5' UTR blocks if cdsStart and cdsEnd: - utrStartPix = (txStart-startMb)*plotXScale + xLeftOffset - utrEndPix = (cdsStart-startMb)*plotXScale + xLeftOffset + utrStartPix = (txStart - startMb) * plotXScale + xLeftOffset + utrEndPix = (cdsStart - startMb) * plotXScale + xLeftOffset if (utrStartPix < xLeftOffset): utrStartPix = xLeftOffset if (utrEndPix < xLeftOffset): @@ -1439,13 +1439,13 @@ class DisplayMappingResults: labelText = "5'" im_drawer.text( text=labelText, - xy=(utrStartPix-9, geneYLocation+self.EACH_GENE_HEIGHT), + xy=(utrStartPix - 9, geneYLocation + self.EACH_GENE_HEIGHT), font=ImageFont.truetype(font=ARIAL_FILE, size=2)) - #the second UTR region + # the second UTR region - utrStartPix = (cdsEnd-startMb)*plotXScale + xLeftOffset - utrEndPix = (txEnd-startMb)*plotXScale + xLeftOffset + utrStartPix = (cdsEnd - startMb) * plotXScale + xLeftOffset + utrEndPix = (txEnd - startMb) * plotXScale + xLeftOffset if (utrStartPix < xLeftOffset): utrStartPix = xLeftOffset if (utrEndPix < xLeftOffset): @@ -1462,17 +1462,17 @@ class DisplayMappingResults: labelText = "3'" im_drawer.text( text=labelText, - xy=(utrEndPix+2, geneYLocation+self.EACH_GENE_HEIGHT), + xy=(utrEndPix + 2, geneYLocation + self.EACH_GENE_HEIGHT), font=ImageFont.truetype(font=ARIAL_FILE, size=2)) - #draw the genes as rectangles + # draw the genes as rectangles else: im_drawer.rectangle( xy=((geneStartPix, geneYLocation), - (geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT*zoom))), - outline= outlineColor, fill = fillColor) + (geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT * zoom))), + outline=outlineColor, fill=fillColor) - COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation, geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT)) + COORDS = "%d, %d, %d, %d" % (geneStartPix, geneYLocation, geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT)) # NL: 06-02-2011 Rob required to display NCBI info in a new window gifmap.append( HtmlGenWrapper.create_area_tag( @@ -1482,8 +1482,8 @@ class DisplayMappingResults: title=TITLE, target="_blank")) -## BEGIN HaplotypeAnalyst - def drawHaplotypeBand(self, canvas, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): +# BEGIN HaplotypeAnalyst + def drawHaplotypeBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): if self.plotScale != 'physic' or self.selectedChr == -1 or not self.geneCol: return @@ -1496,7 +1496,7 @@ class DisplayMappingResults: samplelist = list(self.genotype.prgy) - smd=[] + smd = [] for sample in self.sample_vals_dict.keys(): if self.sample_vals_dict[sample] != "x" and sample in samplelist: temp = GeneralObject(name=sample, value=float(self.sample_vals_dict[sample])) @@ -1504,24 +1504,24 @@ class DisplayMappingResults: else: continue - smd.sort(key = lambda A: A.value) + smd.sort(key=lambda A: A.value) smd.reverse() oldgeneEndPix = -1 - #Initializing plotRight, error before + # Initializing plotRight, error before plotRight = xRightOffset im_drawer = ImageDraw.Draw(canvas) -#### find out PlotRight +# find out PlotRight for _chr in self.genotype: if _chr.name == self.ChrList[self.selectedChr][0]: for i, _locus in enumerate(_chr): txStart = _chr[i].Mb - txEnd = _chr[i].Mb + txEnd = _chr[i].Mb - geneStartPix = xLeftOffset + plotXScale*(float(txStart) - startMb) - 0 - geneEndPix = xLeftOffset + plotXScale*(float(txEnd) - startMb) - 0 + geneStartPix = xLeftOffset + plotXScale * (float(txStart) - startMb) - 0 + geneEndPix = xLeftOffset + plotXScale * (float(txEnd) - startMb) - 0 drawit = 1 if (geneStartPix < xLeftOffset): @@ -1530,27 +1530,27 @@ class DisplayMappingResults: drawit = 0; if drawit == 1: - if _chr[i].name != " - " : + if _chr[i].name != " - ": plotRight = geneEndPix + 4 -#### end find out PlotRight +# end find out PlotRight firstGene = 1 lastGene = 0 - #Sets the length to the length of the strain list. Beforehand, "oldgeno = self.genotype[0][i].genotype" - #was the only place it was initialized, which worked as long as the very start (startMb = None/0) wasn't being mapped. - #Now there should always be some value set for "oldgeno" - Zach 12/14/2010 - oldgeno = [None]*len(self.strainlist) + # Sets the length to the length of the strain list. Beforehand, "oldgeno = self.genotype[0][i].genotype" + # was the only place it was initialized, which worked as long as the very start (startMb = None/0) wasn't being mapped. + # Now there should always be some value set for "oldgeno" - Zach 12/14/2010 + oldgeno = [None] * len(self.strainlist) for i, _chr in enumerate(self.genotype): if _chr.name == self.ChrList[self.selectedChr][0]: for j, _locus in enumerate(_chr): txStart = _chr[j].Mb - txEnd = _chr[j].Mb + txEnd = _chr[j].Mb - geneStartPix = xLeftOffset + plotXScale*(float(txStart) - startMb) - 0 - geneEndPix = xLeftOffset + plotXScale*(float(txEnd) - startMb) + 0 + geneStartPix = xLeftOffset + plotXScale * (float(txStart) - startMb) - 0 + geneEndPix = xLeftOffset + plotXScale * (float(txEnd) - startMb) + 0 if oldgeneEndPix >= xLeftOffset: drawStart = oldgeneEndPix + 4 @@ -1582,36 +1582,36 @@ class DisplayMappingResults: if drawit == 1: myColor = DARKBLUE outlineColor = myColor - fillColor = myColor + fillColor = myColor - maxind=0 + maxind = 0 - #Draw Genes + # Draw Genes - geneYLocation = yPaddingTop + self.NUM_GENE_ROWS * (self.EACH_GENE_HEIGHT)*zoom + geneYLocation = yPaddingTop + self.NUM_GENE_ROWS * (self.EACH_GENE_HEIGHT) * zoom if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": - geneYLocation += 4*self.BAND_HEIGHT + 4*self.BAND_SPACING + geneYLocation += 4 * self.BAND_HEIGHT + 4 * self.BAND_SPACING else: - geneYLocation += 3*self.BAND_HEIGHT + 3*self.BAND_SPACING + geneYLocation += 3 * self.BAND_HEIGHT + 3 * self.BAND_SPACING - if _chr[j].name != " - " : + if _chr[j].name != " - ": if (firstGene == 1) and (lastGene != 1): oldgeneEndPix = drawStart = xLeftOffset oldgeno = _chr[j].genotype continue - for k, _geno in enumerate (_chr[j].genotype): - plotbxd=0 + for k, _geno in enumerate(_chr[j].genotype): + plotbxd = 0 if samplelist[k] in [item.name for item in smd]: - plotbxd=1 + plotbxd = 1 if (plotbxd == 1): ind = 0 if samplelist[k] in [item.name for item in smd]: ind = [item.name for item in smd].index(samplelist[k]) - maxind=max(ind, maxind) + maxind = max(ind, maxind) # lines if (oldgeno[k] == -1 and _geno == -1): @@ -1621,27 +1621,27 @@ class DisplayMappingResults: elif (oldgeno[k] == 0 and _geno == 0): mylineColor = self.HAPLOTYPE_HETEROZYGOUS else: - mylineColor = self.HAPLOTYPE_RECOMBINATION # XZ: Unknown + mylineColor = self.HAPLOTYPE_RECOMBINATION # XZ: Unknown im_drawer.line( xy=((drawStart, - geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom), (drawEnd, - geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom)), - fill= mylineColor, width=zoom*(self.EACH_GENE_HEIGHT+2)) + geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom)), + fill= mylineColor, width=zoom * (self.EACH_GENE_HEIGHT + 2)) - fillColor=BLACK - outlineColor=BLACK + fillColor = BLACK + outlineColor = BLACK if lastGene == 0: im_drawer.rectangle( xy=((geneStartPix, - geneYLocation+2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 2 * ind*self.EACH_GENE_HEIGHT*zoom), (geneEndPix, - geneYLocation+2*ind*self.EACH_GENE_HEIGHT+ 2*self.EACH_GENE_HEIGHT*zoom)), + geneYLocation + 2 *ind*self.EACH_GENE_HEIGHT + 2*self.EACH_GENE_HEIGHT*zoom)), outline=outlineColor, fill=fillColor) - COORDS = "%d, %d, %d, %d" %(geneStartPix, geneYLocation+ind*self.EACH_GENE_HEIGHT, geneEndPix+1, (geneYLocation + ind*self.EACH_GENE_HEIGHT)) + COORDS = "%d, %d, %d, %d" % (geneStartPix, geneYLocation + ind * self.EACH_GENE_HEIGHT, geneEndPix + 1, (geneYLocation + ind * self.EACH_GENE_HEIGHT)) TITLE = "Strain: %s, marker (%s) \n Position %2.3f Mb." % (samplelist[k], _chr[j].name, float(txStart)) HREF = '' gifmap.append( @@ -1658,10 +1658,10 @@ class DisplayMappingResults: mylineColor = self.HAPLOTYPE_RECOMBINATION im_drawer.line( xy=((plotRight, - geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom), (drawEnd, - geneYLocation+7+2*ind*self.EACH_GENE_HEIGHT*zoom)), - fill= mylineColor, width=zoom*(self.EACH_GENE_HEIGHT+2)) + geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom)), + fill= mylineColor, width=zoom * (self.EACH_GENE_HEIGHT + 2)) if lastGene == 0: @@ -1670,7 +1670,7 @@ class DisplayMappingResults: font=ImageFont.truetype(font=VERDANA_FILE, size=12), xy=(geneStartPix, - geneYLocation+17+2*maxind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 17 + 2*maxind*self.EACH_GENE_HEIGHT*zoom), fill=BLACK, angle=-90) oldgeneEndPix = geneEndPix; @@ -1683,9 +1683,9 @@ class DisplayMappingResults: if _chr.name == self.ChrList[self.selectedChr][0]: for j, _geno in enumerate(_chr[1].genotype): - plotbxd=0 + plotbxd = 0 if samplelist[j] in [item.name for item in smd]: - plotbxd=1 + plotbxd = 1 if (plotbxd == 1): ind = [item.name for item in smd].index(samplelist[j]) - 1 @@ -1695,19 +1695,19 @@ class DisplayMappingResults: im_drawer.text( text="%s" % (samplelist[j]), xy=((xLeftOffset + plotWidth + 10), - geneYLocation+11+2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 11 + 2*ind*self.EACH_GENE_HEIGHT*zoom), font=ImageFont.truetype(font=VERDANA_FILE, size=12), fill=BLACK) im_drawer.text( text="%2.2f" % (expr), xy=((xLeftOffset + plotWidth + 60), - geneYLocation+11+2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 11 + 2*ind*self.EACH_GENE_HEIGHT*zoom), font=ImageFont.truetype(font=VERDANA_FILE, size=12), fill=BLACK) -## END HaplotypeAnalyst +# END HaplotypeAnalyst - def drawClickBand(self, canvas, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + def drawClickBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): im_drawer = ImageDraw.Draw(canvas) if self.plotScale != 'physic' or self.selectedChr == -1: return @@ -1724,12 +1724,12 @@ class DisplayMappingResults: # but it makes the HTML huge, and takes forever to render the page in the first place) # Draw the bands that you can click on to go to UCSC / Ensembl MAX_CLICKABLE_REGION_DIVISIONS = 100 - clickableRegionLabelFont=ImageFont.truetype(font=VERDANA_FILE, size=9) - pixelStep = max(5, int(float(plotWidth)/MAX_CLICKABLE_REGION_DIVISIONS)) + clickableRegionLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=9) + pixelStep = max(5, int(float(plotWidth) / MAX_CLICKABLE_REGION_DIVISIONS)) # pixelStep: every N pixels, we make a new clickable area for the user to go to that area of the genome. - numBasesCurrentlyOnScreen = self.kONE_MILLION*abs(startMb - endMb) # Number of bases on screen now - flankingWidthInBases = int ( min( (float(numBasesCurrentlyOnScreen) / 2.0), (5*self.kONE_MILLION) ) ) + numBasesCurrentlyOnScreen = self.kONE_MILLION * abs(startMb - endMb) # Number of bases on screen now + flankingWidthInBases = int (min((float(numBasesCurrentlyOnScreen) / 2.0), (5*self.kONE_MILLION))) webqtlZoomWidth = numBasesCurrentlyOnScreen / 16.0 # Flanking width should be such that we either zoom in to a 10 million base region, or we show the clicked region at the same scale as we are currently seeing. @@ -1739,22 +1739,22 @@ class DisplayMappingResults: paddingTop = yTopOffset if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": phenogenPaddingTop = paddingTop + (self.BAND_HEIGHT + self.BAND_SPACING) - ucscPaddingTop = paddingTop + 2*(self.BAND_HEIGHT + self.BAND_SPACING) - ensemblPaddingTop = paddingTop + 3*(self.BAND_HEIGHT + self.BAND_SPACING) + ucscPaddingTop = paddingTop + 2 * (self.BAND_HEIGHT + self.BAND_SPACING) + ensemblPaddingTop = paddingTop + 3 * (self.BAND_HEIGHT + self.BAND_SPACING) else: ucscPaddingTop = paddingTop + (self.BAND_HEIGHT + self.BAND_SPACING) - ensemblPaddingTop = paddingTop + 2*(self.BAND_HEIGHT + self.BAND_SPACING) + ensemblPaddingTop = paddingTop + 2 * (self.BAND_HEIGHT + self.BAND_SPACING) if zoom == 1: for pixel in range(xLeftOffset, xLeftOffset + plotWidth, pixelStep): - calBase = self.kONE_MILLION*(startMb + (endMb-startMb)*(pixel-xLeftOffset-0.0)/plotWidth) + calBase = self.kONE_MILLION * (startMb + (endMb - startMb) * (pixel - xLeftOffset - 0.0) / plotWidth) xBrowse1 = pixel xBrowse2 = min(xLeftOffset + plotWidth, (pixel + pixelStep - 1)) - WEBQTL_COORDS = "%d, %d, %d, %d" % (xBrowse1, paddingTop, xBrowse2, (paddingTop+self.BAND_HEIGHT)) - WEBQTL_HREF = "javascript:rangeView('%s', %f, %f)" % (self.selectedChr - 1, max(0, (calBase-webqtlZoomWidth))/1000000.0, (calBase+webqtlZoomWidth)/1000000.0) + WEBQTL_COORDS = "%d, %d, %d, %d" % (xBrowse1, paddingTop, xBrowse2, (paddingTop + self.BAND_HEIGHT)) + WEBQTL_HREF = "javascript:rangeView('%s', %f, %f)" % (self.selectedChr - 1, max(0, (calBase - webqtlZoomWidth)) / 1000000.0, (calBase + webqtlZoomWidth) / 1000000.0) WEBQTL_TITLE = "Click to view this section of the genome in WebQTL" gifmap.append( @@ -1769,15 +1769,15 @@ class DisplayMappingResults: outline=self.CLICKABLE_WEBQTL_REGION_COLOR, fill=self.CLICKABLE_WEBQTL_REGION_COLOR) im_drawer.line( - xy=((xBrowse1, paddingTop), ( xBrowse1, (paddingTop + self.BAND_HEIGHT))), + xy=((xBrowse1, paddingTop), (xBrowse1, (paddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_WEBQTL_REGION_OUTLINE_COLOR) if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": - PHENOGEN_COORDS = "%d, %d, %d, %d" % (xBrowse1, phenogenPaddingTop, xBrowse2, (phenogenPaddingTop+self.BAND_HEIGHT)) + PHENOGEN_COORDS = "%d, %d, %d, %d" % (xBrowse1, phenogenPaddingTop, xBrowse2, (phenogenPaddingTop + self.BAND_HEIGHT)) if self.dataset.group.species == "mouse": - PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) + PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) else: - PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) + PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) PHENOGEN_TITLE = "Click to view this section of the genome in PhenoGen" gifmap.append( HtmlGenWrapper.create_area_tag( @@ -1787,18 +1787,18 @@ class DisplayMappingResults: title=PHENOGEN_TITLE)) im_drawer.rectangle( xy=((xBrowse1, phenogenPaddingTop), - (xBrowse2, (phenogenPaddingTop+self.BAND_HEIGHT))), + (xBrowse2, (phenogenPaddingTop + self.BAND_HEIGHT))), outline=self.CLICKABLE_PHENOGEN_REGION_COLOR, fill=self.CLICKABLE_PHENOGEN_REGION_COLOR) im_drawer.line( - xy=((xBrowse1, phenogenPaddingTop), ( xBrowse1, (phenogenPaddingTop+self.BAND_HEIGHT))), + xy=((xBrowse1, phenogenPaddingTop), (xBrowse1, (phenogenPaddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_PHENOGEN_REGION_OUTLINE_COLOR) - UCSC_COORDS = "%d, %d, %d, %d" %(xBrowse1, ucscPaddingTop, xBrowse2, (ucscPaddingTop+self.BAND_HEIGHT)) + UCSC_COORDS = "%d, %d, %d, %d" % (xBrowse1, ucscPaddingTop, xBrowse2, (ucscPaddingTop + self.BAND_HEIGHT)) if self.dataset.group.species == "mouse": - UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d&hgt.customText=%s/snp/chr%s" % (self._ucscDb, self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases, webqtlConfig.PORTADDR, self.selectedChr) + UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d&hgt.customText=%s/snp/chr%s" % (self._ucscDb, self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases, webqtlConfig.PORTADDR, self.selectedChr) else: - UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d" % (self._ucscDb, self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) + UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d" % (self._ucscDb, self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) UCSC_TITLE = "Click to view this section of the genome in the UCSC Genome Browser" gifmap.append( HtmlGenWrapper.create_area_tag( @@ -1808,19 +1808,19 @@ class DisplayMappingResults: title=UCSC_TITLE)) im_drawer.rectangle( xy=((xBrowse1, ucscPaddingTop), - (xBrowse2, (ucscPaddingTop+self.BAND_HEIGHT))), + (xBrowse2, (ucscPaddingTop + self.BAND_HEIGHT))), outline=self.CLICKABLE_UCSC_REGION_COLOR, fill=self.CLICKABLE_UCSC_REGION_COLOR) im_drawer.line( xy=((xBrowse1, ucscPaddingTop), - (xBrowse1, (ucscPaddingTop+self.BAND_HEIGHT))), + (xBrowse1, (ucscPaddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_UCSC_REGION_OUTLINE_COLOR) - ENSEMBL_COORDS = "%d, %d, %d, %d" %(xBrowse1, ensemblPaddingTop, xBrowse2, (ensemblPaddingTop+self.BAND_HEIGHT)) + ENSEMBL_COORDS = "%d, %d, %d, %d" % (xBrowse1, ensemblPaddingTop, xBrowse2, (ensemblPaddingTop + self.BAND_HEIGHT)) if self.dataset.group.species == "mouse": - ENSEMBL_HREF = "http://www.ensembl.org/Mus_musculus/contigview?highlight=&chr=%s&vc_start=%d&vc_end=%d&x=35&y=12" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) + ENSEMBL_HREF = "http://www.ensembl.org/Mus_musculus/contigview?highlight=&chr=%s&vc_start=%d&vc_end=%d&x=35&y=12" % (self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) else: - ENSEMBL_HREF = "http://www.ensembl.org/Rattus_norvegicus/contigview?chr=%s&start=%d&end=%d" % (self.selectedChr, max(0, calBase-flankingWidthInBases), calBase+flankingWidthInBases) + ENSEMBL_HREF = "http://www.ensembl.org/Rattus_norvegicus/contigview?chr=%s&start=%d&end=%d" % (self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) ENSEMBL_TITLE = "Click to view this section of the genome in the Ensembl Genome Browser" gifmap.append(HtmlGenWrapper.create_area_tag( shape='rect', @@ -1829,36 +1829,36 @@ class DisplayMappingResults: title=ENSEMBL_TITLE)) im_drawer.rectangle( xy=((xBrowse1, ensemblPaddingTop), - (xBrowse2, (ensemblPaddingTop+self.BAND_HEIGHT))), + (xBrowse2, (ensemblPaddingTop + self.BAND_HEIGHT))), outline=self.CLICKABLE_ENSEMBL_REGION_COLOR, fill=self.CLICKABLE_ENSEMBL_REGION_COLOR) im_drawer.line( xy=((xBrowse1, ensemblPaddingTop), - (xBrowse1, (ensemblPaddingTop+self.BAND_HEIGHT))), + (xBrowse1, (ensemblPaddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_ENSEMBL_REGION_OUTLINE_COLOR) # end for im_drawer.text( text="Click to view the corresponding section of the genome in an 8x expanded WebQTL map", - xy=((xLeftOffset + 10), paddingTop),# + self.BAND_HEIGHT/2), + xy=((xLeftOffset + 10), paddingTop), # + self.BAND_HEIGHT/2), font=clickableRegionLabelFont, fill=self.CLICKABLE_WEBQTL_TEXT_COLOR) if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": im_drawer.text( text="Click to view the corresponding section of the genome in PhenoGen", - xy=((xLeftOffset + 10), phenogenPaddingTop),# + self.BAND_HEIGHT/2), + xy=((xLeftOffset + 10), phenogenPaddingTop), # + self.BAND_HEIGHT/2), font=clickableRegionLabelFont, fill=self.CLICKABLE_PHENOGEN_TEXT_COLOR) im_drawer.text( text="Click to view the corresponding section of the genome in the UCSC Genome Browser", - xy=((xLeftOffset + 10), ucscPaddingTop),# + self.BAND_HEIGHT/2), + xy=((xLeftOffset + 10), ucscPaddingTop), # + self.BAND_HEIGHT/2), font=clickableRegionLabelFont, fill=self.CLICKABLE_UCSC_TEXT_COLOR) im_drawer.text( text="Click to view the corresponding section of the genome in the Ensembl Genome Browser", - xy=((xLeftOffset+10), ensemblPaddingTop),# + self.BAND_HEIGHT/2), + xy=((xLeftOffset + 10), ensemblPaddingTop), # + self.BAND_HEIGHT/2), font=clickableRegionLabelFont, fill=self.CLICKABLE_ENSEMBL_TEXT_COLOR) - #draw the gray text - chrFont = ImageFont.truetype(font=VERDANA_BOLD_FILE, size=26*zoom) + # draw the gray text + chrFont = ImageFont.truetype(font=VERDANA_BOLD_FILE, size=26 * zoom) chrX = xLeftOffset + plotWidth - 2 - im_drawer.textsize( "Chr %s" % self.ChrList[self.selectedChr][0], font=chrFont)[0] im_drawer.text( @@ -1866,17 +1866,17 @@ class DisplayMappingResults: xy=(chrX, phenogenPaddingTop), font=chrFont, fill=GRAY) # end of drawBrowserClickableRegions else: - #draw the gray text - chrFont = ImageFont.truetype(font=VERDANA_FILE, size=26*zoom) + # draw the gray text + chrFont = ImageFont.truetype(font=VERDANA_FILE, size=26 * zoom) chrX = xLeftOffset + (plotWidth - im_drawer.textsize( - "Chr %s" % currentChromosome, font=chrFont)[0])/2 + "Chr %s" % currentChromosome, font=chrFont)[0]) / 2 im_drawer.text( text="Chr %s" % currentChromosome, xy=(chrX, 32), font=chrFont, fill=GRAY) # end of drawBrowserClickableRegions pass - def drawXAxis(self, canvas, drawAreaHeight, gifmap, plotXScale, showLocusForm, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + def drawXAxis(self, canvas, drawAreaHeight, gifmap, plotXScale, showLocusForm, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset @@ -1886,33 +1886,33 @@ class DisplayMappingResults: if zoom == 2: fontZoom = 1.5 - #Parameters - NUM_MINOR_TICKS = 5 # Number of minor ticks between major ticks + # Parameters + NUM_MINOR_TICKS = 5 # Number of minor ticks between major ticks X_MAJOR_TICK_THICKNESS = 3 X_MINOR_TICK_THICKNESS = 1 - X_AXIS_THICKNESS = 1*zoom + X_AXIS_THICKNESS = 1 * zoom # ======= Alex: Draw the X-axis labels (megabase location) - MBLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=15*zoom) - xMajorTickHeight = 10 * zoom # How high the tick extends below the axis - xMinorTickHeight = 5*zoom + MBLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=15 * zoom) + xMajorTickHeight = 10 * zoom # How high the tick extends below the axis + xMinorTickHeight = 5 * zoom xAxisTickMarkColor = BLACK xAxisLabelColor = BLACK - fontHeight = 12*fontZoom # How tall the font that we're using is + fontHeight = 12 * fontZoom # How tall the font that we're using is spacingFromLabelToAxis = 10 if self.plotScale == 'physic': - strYLoc = yZero + MBLabelFont.font.height/2 - ###Physical single chromosome view + strYLoc = yZero + MBLabelFont.font.height / 2 + # Physical single chromosome view if self.selectedChr > -1: XScale = Plot.detScale(startMb, endMb) XStart, XEnd, XStep = XScale if XStep < 8: XStep *= 2 - spacingAmtX = spacingAmt = (XEnd-XStart)/XStep + spacingAmtX = spacingAmt = (XEnd - XStart) / XStep j = 0 - while abs(spacingAmtX -int(spacingAmtX)) >= spacingAmtX/100.0 and j < 6: + while abs(spacingAmtX - int(spacingAmtX)) >= spacingAmtX / 100.0 and j < 6: j += 1 spacingAmtX *= 10 @@ -1921,13 +1921,13 @@ class DisplayMappingResults: for counter, _Mb in enumerate(Plot.frange(XStart, XEnd, spacingAmt / NUM_MINOR_TICKS)): if _Mb < startMb or _Mb > endMb: continue - Xc = xLeftOffset + plotXScale*(_Mb - startMb) - if counter % NUM_MINOR_TICKS == 0: # Draw a MAJOR mark, not just a minor tick mark + Xc = xLeftOffset + plotXScale * (_Mb - startMb) + if counter % NUM_MINOR_TICKS == 0: # Draw a MAJOR mark, not just a minor tick mark im_drawer.line(xy=((Xc, yZero), - (Xc, yZero+xMajorTickHeight)), + (Xc, yZero + xMajorTickHeight)), fill=xAxisTickMarkColor, - width=X_MAJOR_TICK_THICKNESS) # Draw the MAJOR tick mark - labelStr = str(formatStr % _Mb) # What Mbase location to put on the label + width=X_MAJOR_TICK_THICKNESS) # Draw the MAJOR tick mark + labelStr = str(formatStr % _Mb) # What Mbase location to put on the label strWidth, strHeight = im_drawer.textsize(labelStr, font=MBLabelFont) drawStringXc = (Xc - (strWidth / 2.0)) im_drawer.text(xy=(drawStringXc, strYLoc), @@ -1935,16 +1935,16 @@ class DisplayMappingResults: fill=xAxisLabelColor) else: im_drawer.line(xy=((Xc, yZero), - (Xc, yZero+xMinorTickHeight)), + (Xc, yZero + xMinorTickHeight)), fill=xAxisTickMarkColor, - width=X_MINOR_TICK_THICKNESS) # Draw the MINOR tick mark + width=X_MINOR_TICK_THICKNESS) # Draw the MINOR tick mark - ###Physical genome wide view + # Physical genome wide view else: distScale = 0 startPosX = xLeftOffset for i, distLen in enumerate(self.ChrLengthDistList): - if distScale == 0: #universal scale in whole genome mapping + if distScale == 0: # universal scale in whole genome mapping if distLen > 75: distScale = 25 elif distLen > 30: @@ -1953,51 +1953,51 @@ class DisplayMappingResults: distScale = 5 for j, tickdists in enumerate(range(distScale, int(ceil(distLen)), distScale)): im_drawer.line( - xy=((startPosX+tickdists*plotXScale, yZero), - (startPosX+tickdists*plotXScale, yZero + 7)), - fill=BLACK, width=1*zoom) + xy=((startPosX + tickdists * plotXScale, yZero), + (startPosX + tickdists * plotXScale, yZero + 7)), + fill=BLACK, width=1 * zoom) if j % 2 == 0: draw_rotated_text( canvas, text=str(tickdists), font=MBLabelFont, - xy=(startPosX+tickdists*plotXScale, - yZero+10*zoom), fill=BLACK, angle=270) - startPosX += (self.ChrLengthDistList[i]+self.GraphInterval)*plotXScale + xy=(startPosX + tickdists * plotXScale, + yZero + 10 * zoom), fill=BLACK, angle=270) + startPosX += (self.ChrLengthDistList[i] + self.GraphInterval) * plotXScale - megabaseLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18*zoom*1.5)) + megabaseLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18 * zoom * 1.5)) im_drawer.text( text="Megabases", xy=( - xLeftOffset+(plotWidth-im_drawer.textsize( - "Megabases", font=megabaseLabelFont)[0])/2, - strYLoc+MBLabelFont.font.height+10*(zoom%2)), + xLeftOffset + (plotWidth - im_drawer.textsize( + "Megabases", font=megabaseLabelFont)[0]) / 2, + strYLoc + MBLabelFont.font.height + 10*(zoom%2)), font=megabaseLabelFont, fill=BLACK) pass else: - strYLoc = yZero + spacingFromLabelToAxis + MBLabelFont.font.height/2 + strYLoc = yZero + spacingFromLabelToAxis + MBLabelFont.font.height / 2 ChrAInfo = [] preLpos = -1 distinctCount = 0.0 - if self.selectedChr == -1: #ZS: If viewing full genome/all chromosomes + if self.selectedChr == -1: # ZS: If viewing full genome/all chromosomes for i, _chr in enumerate(self.genotype): thisChr = [] Locus0CM = _chr[0].cM nLoci = len(_chr) - if nLoci <= 8: + if nLoci <= 8: for _locus in _chr: if _locus.name != ' - ': if _locus.cM != preLpos: distinctCount += 1 preLpos = _locus.cM - thisChr.append([_locus.name, _locus.cM-Locus0CM]) + thisChr.append([_locus.name, _locus.cM - Locus0CM]) else: - for j in (0, nLoci/4, nLoci/2, nLoci*3/4, -1): + for j in (0, nLoci / 4, nLoci / 2, nLoci*3/4, -1): while _chr[j].name == ' - ': j += 1 if _chr[j].cM != preLpos: distinctCount += 1 preLpos = _chr[j].cM - thisChr.append([_chr[j].name, _chr[j].cM-Locus0CM]) + thisChr.append([_chr[j].name, _chr[j].cM - Locus0CM]) ChrAInfo.append(thisChr) else: for i, _chr in enumerate(self.genotype): @@ -2009,10 +2009,10 @@ class DisplayMappingResults: if _locus.cM != preLpos: distinctCount += 1 preLpos = _locus.cM - thisChr.append([_locus.name, _locus.cM-Locus0CM]) + thisChr.append([_locus.name, _locus.cM - Locus0CM]) ChrAInfo.append(thisChr) - stepA = (plotWidth+0.0)/distinctCount + stepA = (plotWidth + 0.0) / distinctCount LRectWidth = 10 LRectHeight = 3 @@ -2037,28 +2037,28 @@ class DisplayMappingResults: Zorder = 0 if differ: im_drawer.line( - xy=((startPosX+Lpos, yZero), (xLeftOffset+offsetA,\ - yZero+25)), + xy=((startPosX + Lpos, yZero), (xLeftOffset + offsetA,\ + yZero + 25)), fill=lineColor) im_drawer.line( - xy=((xLeftOffset+offsetA, yZero+25), (xLeftOffset+offsetA,\ - yZero+40+Zorder*(LRectWidth+3))), + xy=((xLeftOffset + offsetA, yZero + 25), (xLeftOffset+offsetA,\ + yZero + 40 + Zorder*(LRectWidth+3))), fill=lineColor) rectColor = ORANGE else: im_drawer.line( - xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)-3), (\ - xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3))), + xy=((xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3)-3), (\ + xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3))), fill=lineColor) rectColor = DEEPPINK im_drawer.rectangle( - xy=((xLeftOffset+offsetA, yZero+40+Zorder*(LRectWidth+3)), - (xLeftOffset+offsetA-LRectHeight, - yZero+40+Zorder*(LRectWidth+3)+LRectWidth)), - outline=rectColor, fill=rectColor, width = 0) - COORDS="%d,%d,%d,%d"%(xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\ - xLeftOffset+offsetA,yZero+40+Zorder*(LRectWidth+3)+LRectWidth) - HREF = "/show_trait?trait_id=%s&dataset=%s" % (Lname, self.dataset.group.name+"Geno") + xy=((xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3)), + (xLeftOffset + offsetA - LRectHeight, + yZero + 40 + Zorder*(LRectWidth+3)+LRectWidth)), + outline=rectColor, fill=rectColor, width=0) + COORDS = "%d,%d,%d,%d" % (xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\ + xLeftOffset + offsetA, yZero +40+Zorder*(LRectWidth+3)+LRectWidth) + HREF = "/show_trait?trait_id=%s&dataset=%s" % (Lname, self.dataset.group.name + "Geno") #HREF="javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm,fd.RISet+"Geno", Lname) Areas = HtmlGenWrapper.create_area_tag( shape='rect', @@ -2067,26 +2067,26 @@ class DisplayMappingResults: target="_blank", title="Locus : {}".format(Lname)) gifmap.append(Areas) - ##piddle bug + # piddle bug if j == 0: im_drawer.line( - xy=((startPosX, yZero), (startPosX, yZero+40)), + xy=((startPosX, yZero), (startPosX, yZero + 40)), fill=lineColor) - startPosX += (self.ChrLengthDistList[j]+self.GraphInterval)*plotXScale + startPosX += (self.ChrLengthDistList[j] + self.GraphInterval) * plotXScale - centimorganLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18*zoom*1.5)) + centimorganLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18 * zoom * 1.5)) im_drawer.text( text="Centimorgans", - xy=(xLeftOffset+(plotWidth-im_drawer.textsize( - "Centimorgans", font=centimorganLabelFont)[0])/2, - strYLoc + MBLabelFont.font.height+ 10*(zoom%2)), + xy=(xLeftOffset + (plotWidth - im_drawer.textsize( + "Centimorgans", font=centimorganLabelFont)[0]) / 2, + strYLoc + MBLabelFont.font.height + 10 * (zoom %2)), font=centimorganLabelFont, fill=BLACK) - im_drawer.line(xy=((xLeftOffset, yZero), (xLeftOffset+plotWidth, yZero)), - fill=BLACK, width=X_AXIS_THICKNESS) # Draw the X axis itself + im_drawer.line(xy=((xLeftOffset, yZero), (xLeftOffset + plotWidth, yZero)), + fill=BLACK, width=X_AXIS_THICKNESS) # Draw the X axis itself - def drawQTL(self, canvas, drawAreaHeight, gifmap, plotXScale, offset= (40, 120, 80, 10), zoom = 1, startMb = None, endMb = None): + def drawQTL(self, canvas, drawAreaHeight, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset @@ -2095,16 +2095,16 @@ class DisplayMappingResults: if zoom == 2: fontZoom = 1.5 - INTERCROSS = (self.genotype.type=="intercross") + INTERCROSS = (self.genotype.type == "intercross") - #draw the LRS scale - #We first determine whether or not we are using a sliding scale. - #If so, we need to compute the maximum LRS value to determine where the max y-value should be, and call this LRS_LOD_Max. - #LRSTop is then defined to be above the LRS_LOD_Max by enough to add one additional LRSScale increment. - #if we are using a set-scale, then we set LRSTop to be the user's value, and LRS_LOD_Max doesn't matter. + # draw the LRS scale + # We first determine whether or not we are using a sliding scale. + # If so, we need to compute the maximum LRS value to determine where the max y-value should be, and call this LRS_LOD_Max. + # LRSTop is then defined to be above the LRS_LOD_Max by enough to add one additional LRSScale increment. + # if we are using a set-scale, then we set LRSTop to be the user's value, and LRS_LOD_Max doesn't matter. - #ZS: This is a mess, but I don't know a better way to account for different mapping methods returning results in different formats + the option to change between LRS and LOD - if self.lrsMax <= 0: #sliding scale + # ZS: This is a mess, but I don't know a better way to account for different mapping methods returning results in different formats + the option to change between LRS and LOD + if self.lrsMax <= 0: # sliding scale if "lrs_value" in self.qtlresults[0]: LRS_LOD_Max = max([result['lrs_value'] for result in self.qtlresults]) if self.LRS_LOD == "LOD" or self.LRS_LOD == "-logP": @@ -2135,34 +2135,34 @@ class DisplayMappingResults: if self.permChecked and self.nperm > 0 and not self.multipleInterval: LRS_LOD_Max = max(self.significant, LRS_LOD_Max) - #genotype trait will give infinite LRS + # genotype trait will give infinite LRS LRS_LOD_Max = min(LRS_LOD_Max, webqtlConfig.MAXLRS) else: LRS_LOD_Max = self.lrsMax - #ZS: Needed to pass to genome browser + # ZS: Needed to pass to genome browser js_data = json.loads(self.js_data) if self.LRS_LOD == "LRS": - js_data['max_score'] = LRS_LOD_Max/4.61 + js_data['max_score'] = LRS_LOD_Max / 4.61 else: js_data['max_score'] = LRS_LOD_Max self.js_data = json.dumps(js_data) - LRSScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=16*zoom) - LRSLODFont=ImageFont.truetype(font=VERDANA_FILE, size=int(18*zoom*1.5)) + LRSScaleFont = ImageFont.truetype(font=VERDANA_FILE, size=16 * zoom) + LRSLODFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18 * zoom * 1.5)) yZero = yTopOffset + plotHeight # LRSHeightThresh = drawAreaHeight # AdditiveHeightThresh = drawAreaHeight/2 # DominanceHeightThresh = drawAreaHeight/2 if self.selectedChr == 1: - LRSHeightThresh = drawAreaHeight - yTopOffset + 30*(zoom - 1) - AdditiveHeightThresh = LRSHeightThresh/2 - DominanceHeightThresh = LRSHeightThresh/2 + LRSHeightThresh = drawAreaHeight - yTopOffset + 30 * (zoom - 1) + AdditiveHeightThresh = LRSHeightThresh / 2 + DominanceHeightThresh = LRSHeightThresh / 2 else: LRSHeightThresh = drawAreaHeight - AdditiveHeightThresh = drawAreaHeight/2 - DominanceHeightThresh = drawAreaHeight/2 + AdditiveHeightThresh = drawAreaHeight / 2 + DominanceHeightThresh = drawAreaHeight / 2 # LRSHeightThresh = (yZero - yTopOffset + 30*(zoom - 1)) # AdditiveHeightThresh = LRSHeightThresh/2 # DominanceHeightThresh = LRSHeightThresh/2 @@ -2178,7 +2178,7 @@ class DisplayMappingResults: LRSAxisList = Plot.frange(LRSScale, LRS_LOD_Max, LRSScale) - #ZS: Convert to int if all axis values are whole numbers + # ZS: Convert to int if all axis values are whole numbers all_int = True for item in LRSAxisList: if isinstance(item, int): @@ -2192,9 +2192,9 @@ class DisplayMappingResults: # else: # max_lrs_width = canvas.stringWidth("%2.1f" % LRS_LOD_Max, font=LRSScaleFont) + 30 - #draw the "LRS" or "LOD" string to the left of the axis - LRSScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=16*zoom) - LRSLODFont=ImageFont.truetype(font=VERDANA_FILE, size=int(18*zoom*1.5)) + # draw the "LRS" or "LOD" string to the left of the axis + LRSScaleFont = ImageFont.truetype(font=VERDANA_FILE, size=16 * zoom) + LRSLODFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18 * zoom * 1.5)) yZero = yTopOffset + plotHeight # TEXT_X_DISPLACEMENT = -20 @@ -2210,64 +2210,64 @@ class DisplayMappingResults: draw_rotated_text( canvas, text=self.LRS_LOD, font=LRSLODFont, xy=(xLeftOffset - im_drawer.textsize( - "999.99", font=LRSScaleFont)[0] - 15*(zoom-1) + TEXT_X_DISPLACEMENT, - yZero + TEXT_Y_DISPLACEMENT - 300*(zoom - 1)), + "999.99", font=LRSScaleFont)[0] - 15 * (zoom - 1) + TEXT_X_DISPLACEMENT, + yZero + TEXT_Y_DISPLACEMENT - 300 * (zoom - 1)), fill=BLACK, angle=90) for item in LRSAxisList: if LRS_LOD_Max == 0.0: LRS_LOD_Max = 0.000001 - yTopOffset + 30*(zoom - 1) - yLRS = yZero - (item/LRS_LOD_Max) * LRSHeightThresh - im_drawer.line(xy=((xLeftOffset, yLRS), (xLeftOffset-4, yLRS)), - fill=self.LRS_COLOR, width=1*zoom) + yTopOffset + 30 * (zoom - 1) + yLRS = yZero - (item / LRS_LOD_Max) * LRSHeightThresh + im_drawer.line(xy=((xLeftOffset, yLRS), (xLeftOffset - 4, yLRS)), + fill=self.LRS_COLOR, width=1 * zoom) if all_int: scaleStr = "%d" % item else: scaleStr = "%2.1f" % item - #Draw the LRS/LOD Y axis label + # Draw the LRS/LOD Y axis label TEXT_Y_DISPLACEMENT = -10 im_drawer.text( text=scaleStr, - xy=(xLeftOffset-4-im_drawer.textsize(scaleStr, font=LRSScaleFont)[0]-5, - yLRS+TEXT_Y_DISPLACEMENT), + xy=(xLeftOffset - 4 - im_drawer.textsize(scaleStr, font=LRSScaleFont)[0]-5, + yLRS + TEXT_Y_DISPLACEMENT), font=LRSScaleFont, fill=self.LRS_COLOR) if self.permChecked and self.nperm > 0 and not self.multipleInterval: - significantY = yZero - self.significant*LRSHeightThresh/LRS_LOD_Max - suggestiveY = yZero - self.suggestive*LRSHeightThresh/LRS_LOD_Max + significantY = yZero - self.significant * LRSHeightThresh / LRS_LOD_Max + suggestiveY = yZero - self.suggestive * LRSHeightThresh / LRS_LOD_Max # significantY = yZero - self.significant*LRSHeightThresh/LRSAxisList[-1] # suggestiveY = yZero - self.suggestive*LRSHeightThresh/LRSAxisList[-1] startPosX = xLeftOffset - #"Significant" and "Suggestive" Drawing Routine + # "Significant" and "Suggestive" Drawing Routine # ======= Draw the thick lines for "Significant" and "Suggestive" ===== (crowell: I tried to make the SNPs draw over these lines, but piddle wouldn't have it...) - #ZS: I don't know if what I did here with this inner function is clever or overly complicated, but it's the only way I could think of to avoid duplicating the code inside this function + # ZS: I don't know if what I did here with this inner function is clever or overly complicated, but it's the only way I could think of to avoid duplicating the code inside this function def add_suggestive_significant_lines_and_legend(start_pos_x, chr_length_dist): - rightEdge = int(start_pos_x + chr_length_dist*plotXScale - self.SUGGESTIVE_WIDTH/1.5) + rightEdge = int(start_pos_x + chr_length_dist * plotXScale - self.SUGGESTIVE_WIDTH / 1.5) im_drawer.line( - xy=((start_pos_x+self.SUGGESTIVE_WIDTH/1.5, suggestiveY), + xy=((start_pos_x + self.SUGGESTIVE_WIDTH / 1.5, suggestiveY), (rightEdge, suggestiveY)), - fill=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH*zoom - #,clipX=(xLeftOffset, xLeftOffset + plotWidth-2) + fill=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH * zoom + # ,clipX=(xLeftOffset, xLeftOffset + plotWidth-2) ) im_drawer.line( - xy=((start_pos_x+self.SUGGESTIVE_WIDTH/1.5, significantY), + xy=((start_pos_x + self.SUGGESTIVE_WIDTH / 1.5, significantY), (rightEdge, significantY)), fill=self.SIGNIFICANT_COLOR, - width=self.SIGNIFICANT_WIDTH*zoom - #, clipX=(xLeftOffset, xLeftOffset + plotWidth-2) + width=self.SIGNIFICANT_WIDTH * zoom + # , clipX=(xLeftOffset, xLeftOffset + plotWidth-2) ) - sugg_coords = "%d, %d, %d, %d" % (start_pos_x, suggestiveY-2, rightEdge + 2*zoom, suggestiveY+2) - sig_coords = "%d, %d, %d, %d" % (start_pos_x, significantY-2, rightEdge + 2*zoom, significantY+2) + sugg_coords = "%d, %d, %d, %d" % (start_pos_x, suggestiveY - 2, rightEdge + 2 * zoom, suggestiveY + 2) + sig_coords = "%d, %d, %d, %d" % (start_pos_x, significantY - 2, rightEdge + 2 * zoom, significantY + 2) if self.LRS_LOD == 'LRS': sugg_title = "Suggestive LRS = %0.2f" % self.suggestive sig_title = "Significant LRS = %0.2f" % self.significant else: - sugg_title = "Suggestive LOD = %0.2f" % (self.suggestive/4.61) - sig_title = "Significant LOD = %0.2f" % (self.significant/4.61) + sugg_title = "Suggestive LOD = %0.2f" % (self.suggestive / 4.61) + sig_title = "Significant LOD = %0.2f" % (self.significant / 4.61) Areas1 = HtmlGenWrapper.create_area_tag( shape='rect', coords=sugg_coords, @@ -2279,7 +2279,7 @@ class DisplayMappingResults: gifmap.append(Areas1) gifmap.append(Areas2) - start_pos_x += (chr_length_dist+self.GraphInterval)*plotXScale + start_pos_x += (chr_length_dist + self.GraphInterval) * plotXScale return start_pos_x for i, _chr in enumerate(self.genotype): @@ -2306,7 +2306,7 @@ class DisplayMappingResults: AdditiveCoordXY = [] DominanceCoordXY = [] - symbolFont = ImageFont.truetype(font=FNT_BS_FILE, size=5) #ZS: For Manhattan Plot + symbolFont = ImageFont.truetype(font=FNT_BS_FILE, size=5) # ZS: For Manhattan Plot previous_chr = 1 previous_chr_as_int = 0 @@ -2332,128 +2332,128 @@ class DisplayMappingResults: minusColor = self.ADDITIVE_COLOR_NEGATIVE for k, aPoint in enumerate(AdditiveCoordXY): if k > 0: - Xc0, Yc0 = AdditiveCoordXY[k-1] + Xc0, Yc0 = AdditiveCoordXY[k - 1] Xc, Yc = aPoint - if (Yc0-yZero)*(Yc-yZero) < 0: - if Xc == Xc0: #genotype , locus distance is 0 + if (Yc0 - yZero) * (Yc-yZero) < 0: + if Xc == Xc0: # genotype , locus distance is 0 Xcm = Xc else: - Xcm = (yZero-Yc0)/((Yc-Yc0)/(Xc-Xc0)) +Xc0 + Xcm = (yZero - Yc0) / ((Yc - Yc0) / (Xc - Xc0)) + Xc0 if Yc0 < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xcm, yZero)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) im_drawer.line( - xy=((Xcm, yZero), (Xc, yZero-(Yc-yZero))), + xy=((Xcm, yZero), (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: im_drawer.line( - xy=((Xc0, yZero-(Yc0-yZero)), + xy=((Xc0, yZero - (Yc0 - yZero)), (Xcm, yZero)), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) im_drawer.line( xy=((Xcm, yZero), (Xc, Yc)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) - elif (Yc0-yZero)*(Yc-yZero) > 0: + elif (Yc0 - yZero) * (Yc-yZero) > 0: if Yc < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xc, Yc)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: im_drawer.line( - xy=((Xc0, yZero - (Yc0-yZero)), - (Xc, yZero - (Yc-yZero))), + xy=((Xc0, yZero - (Yc0 - yZero)), + (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: - minYc = min(Yc-yZero, Yc0-yZero) + minYc = min(Yc - yZero, Yc0 - yZero) if minYc < 0: im_drawer.line( xy=((Xc0, Yc0), (Xc, Yc)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: im_drawer.line( - xy=((Xc0, yZero - (Yc0-yZero)), - (Xc, yZero - (Yc-yZero))), + xy=((Xc0, yZero - (Yc0 - yZero)), + (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) LRSCoordXY = [] AdditiveCoordXY = [] previous_chr = qtlresult['chr'] previous_chr_as_int += 1 - newStartPosX = (self.ChrLengthDistList[previous_chr_as_int - 1]+self.GraphInterval)*plotXScale + newStartPosX = (self.ChrLengthDistList[previous_chr_as_int - 1] + self.GraphInterval) * plotXScale if newStartPosX != oldStartPosX: startPosX += newStartPosX oldStartPosX = newStartPosX - #ZS: This is because the chromosome value stored in qtlresult['chr'] can be (for example) either X or 20 depending upon the mapping method/scale used + # ZS: This is because the chromosome value stored in qtlresult['chr'] can be (for example) either X or 20 depending upon the mapping method/scale used this_chr = str(self.ChrList[self.selectedChr][0]) if self.plotScale != "physic": - this_chr = str(self.ChrList[self.selectedChr][1]+1) + this_chr = str(self.ChrList[self.selectedChr][1] + 1) if self.selectedChr == -1 or str(qtlresult['chr']) == this_chr: if self.plotScale != "physic" and self.mapping_method == "reaper" and not self.manhattan_plot: - Xc = startPosX + (qtlresult['cM']-startMb)*plotXScale + Xc = startPosX + (qtlresult['cM'] - startMb) * plotXScale if hasattr(self.genotype, "filler"): if self.genotype.filler: if self.selectedChr != -1: start_cm = self.genotype[self.selectedChr - 1][0].cM - Xc = startPosX + (qtlresult['Mb'] - start_cm)*plotXScale + Xc = startPosX + (qtlresult['Mb'] - start_cm) * plotXScale else: start_cm = self.genotype[previous_chr_as_int][0].cM - Xc = startPosX + ((qtlresult['Mb']-start_cm-startMb)*plotXScale)*(((qtlresult['Mb']-start_cm-startMb)*plotXScale)/((qtlresult['Mb']-start_cm-startMb+self.GraphInterval)*plotXScale)) + Xc = startPosX + ((qtlresult['Mb'] - start_cm - startMb) * plotXScale) * (((qtlresult['Mb'] - start_cm - startMb) * plotXScale) / ((qtlresult['Mb'] - start_cm - startMb + self.GraphInterval) * plotXScale)) else: - Xc = startPosX + (qtlresult['Mb']-startMb)*plotXScale + Xc = startPosX + (qtlresult['Mb'] - startMb) * plotXScale # updated by NL 06-18-2011: # fix the over limit LRS graph issue since genotype trait may give infinite LRS; # for any lrs is over than 460(LRS max in this system), it will be reset to 460 - yLRS = yZero - (item/LRS_LOD_Max) * LRSHeightThresh + yLRS = yZero - (item / LRS_LOD_Max) * LRSHeightThresh if 'lrs_value' in qtlresult: if self.LRS_LOD == "LOD" or self.LRS_LOD == "-logP": - if qtlresult['lrs_value'] > 460 or qtlresult['lrs_value']=='inf': + if qtlresult['lrs_value'] > 460 or qtlresult['lrs_value'] == 'inf': #Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/(LRSAxisList[-1]*self.LODFACTOR) - Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/(LRS_LOD_Max*self.LODFACTOR) + Yc = yZero - webqtlConfig.MAXLRS * LRSHeightThresh / (LRS_LOD_Max * self.LODFACTOR) else: #Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/(LRSAxisList[-1]*self.LODFACTOR) - Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/(LRS_LOD_Max*self.LODFACTOR) + Yc = yZero - qtlresult['lrs_value'] * LRSHeightThresh / (LRS_LOD_Max * self.LODFACTOR) else: - if qtlresult['lrs_value'] > 460 or qtlresult['lrs_value']=='inf': + if qtlresult['lrs_value'] > 460 or qtlresult['lrs_value'] == 'inf': #Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRSAxisList[-1] - Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRS_LOD_Max + Yc = yZero - webqtlConfig.MAXLRS * LRSHeightThresh / LRS_LOD_Max else: #Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/LRSAxisList[-1] - Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/LRS_LOD_Max + Yc = yZero - qtlresult['lrs_value'] * LRSHeightThresh / LRS_LOD_Max else: - if qtlresult['lod_score'] > 100 or qtlresult['lod_score']=='inf': + if qtlresult['lod_score'] > 100 or qtlresult['lod_score'] == 'inf': #Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRSAxisList[-1] - Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRS_LOD_Max + Yc = yZero - webqtlConfig.MAXLRS * LRSHeightThresh / LRS_LOD_Max else: if self.LRS_LOD == "LRS": #Yc = yZero - qtlresult['lod_score']*self.LODFACTOR*LRSHeightThresh/LRSAxisList[-1] - Yc = yZero - qtlresult['lod_score']*self.LODFACTOR*LRSHeightThresh/LRS_LOD_Max + Yc = yZero - qtlresult['lod_score'] * self.LODFACTOR * LRSHeightThresh / LRS_LOD_Max else: #Yc = yZero - qtlresult['lod_score']*LRSHeightThresh/LRSAxisList[-1] - Yc = yZero - qtlresult['lod_score']*LRSHeightThresh/LRS_LOD_Max + Yc = yZero - qtlresult['lod_score'] * LRSHeightThresh / LRS_LOD_Max if self.manhattan_plot == True: if self.color_scheme == "single": @@ -2469,8 +2469,8 @@ class DisplayMappingResults: im_drawer.text( text="5", xy=( - Xc-im_drawer.textsize("5", font=symbolFont)[0]/2+1, - Yc-4), + Xc - im_drawer.textsize("5", font=symbolFont)[0] / 2 + 1, + Yc - 4), fill=point_color, font=symbolFont) else: LRSCoordXY.append((Xc, Yc)) @@ -2478,7 +2478,7 @@ class DisplayMappingResults: if not self.multipleInterval and self.additiveChecked: if additiveMax == 0.0: additiveMax = 0.000001 - Yc = yZero - qtlresult['additive']*AdditiveHeightThresh/additiveMax + Yc = yZero - qtlresult['additive'] * AdditiveHeightThresh / additiveMax AdditiveCoordXY.append((Xc, Yc)) m += 1 @@ -2497,64 +2497,64 @@ class DisplayMappingResults: minusColor = self.ADDITIVE_COLOR_NEGATIVE for k, aPoint in enumerate(AdditiveCoordXY): if k > 0: - Xc0, Yc0 = AdditiveCoordXY[k-1] + Xc0, Yc0 = AdditiveCoordXY[k - 1] Xc, Yc = aPoint - if (Yc0-yZero)*(Yc-yZero) < 0: - if Xc == Xc0: #genotype , locus distance is 0 + if (Yc0 - yZero) * (Yc-yZero) < 0: + if Xc == Xc0: # genotype , locus distance is 0 Xcm = Xc else: - Xcm = (yZero-Yc0)/((Yc-Yc0)/(Xc-Xc0)) +Xc0 + Xcm = (yZero - Yc0) / ((Yc - Yc0) / (Xc - Xc0)) + Xc0 if Yc0 < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xcm, yZero)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) im_drawer.line( - xy=((Xcm, yZero), (Xc, yZero-(Yc-yZero))), + xy=((Xcm, yZero), (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: im_drawer.line( - xy=((Xc0, yZero - (Yc0-yZero)), + xy=((Xc0, yZero - (Yc0 - yZero)), (Xcm, yZero)), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) im_drawer.line( xy=((Xcm, yZero), (Xc, Yc)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) - elif (Yc0-yZero)*(Yc-yZero) > 0: + elif (Yc0 - yZero) * (Yc-yZero) > 0: if Yc < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xc, Yc)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: im_drawer.line( - xy=((Xc0, yZero-(Yc0-yZero)), - (Xc, yZero-(Yc-yZero))), + xy=((Xc0, yZero - (Yc0 - yZero)), + (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: - minYc = min(Yc-yZero, Yc0-yZero) + minYc = min(Yc - yZero, Yc0 - yZero) if minYc < 0: im_drawer.line( xy=((Xc0, Yc0), (Xc, Yc)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: im_drawer.line( - xy=((Xc0, yZero - (Yc0-yZero)), - (Xc, yZero - (Yc-yZero))), + xy=((Xc0, yZero - (Yc0 - yZero)), + (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) if not self.multipleInterval and INTERCROSS and self.dominanceChecked: @@ -2562,114 +2562,114 @@ class DisplayMappingResults: minusColor = self.DOMINANCE_COLOR_NEGATIVE for k, aPoint in enumerate(DominanceCoordXY): if k > 0: - Xc0, Yc0 = DominanceCoordXY[k-1] + Xc0, Yc0 = DominanceCoordXY[k - 1] Xc, Yc = aPoint - if (Yc0-yZero)*(Yc-yZero) < 0: - if Xc == Xc0: #genotype , locus distance is 0 + if (Yc0 - yZero) * (Yc-yZero) < 0: + if Xc == Xc0: # genotype , locus distance is 0 Xcm = Xc else: - Xcm = (yZero-Yc0)/((Yc-Yc0)/(Xc-Xc0)) +Xc0 + Xcm = (yZero - Yc0) / ((Yc - Yc0) / (Xc - Xc0)) + Xc0 if Yc0 < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xcm, yZero)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) im_drawer.line( - xy=((Xcm, yZero), (Xc, yZero-(Yc-yZero))), + xy=((Xcm, yZero), (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: im_drawer.line( - xy=((Xc0, yZero - (Yc0-yZero)), (Xcm, yZero)), + xy=((Xc0, yZero - (Yc0 - yZero)), (Xcm, yZero)), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) im_drawer.line( xy=((Xcm, yZero), (Xc, Yc)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) - elif (Yc0-yZero)*(Yc-yZero) > 0: + elif (Yc0 - yZero) * (Yc-yZero) > 0: if Yc < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xc, Yc)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: im_drawer.line( - xy=((Xc0, yZero - (Yc0-yZero)), - (Xc, yZero - (Yc-yZero))), + xy=((Xc0, yZero - (Yc0 - yZero)), + (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: - minYc = min(Yc-yZero, Yc0-yZero) + minYc = min(Yc - yZero, Yc0 - yZero) if minYc < 0: im_drawer.line( xy=((Xc0, Yc0), (Xc, Yc)), fill=plusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) else: im_drawer.line( - xy=((Xc0, yZero - (Yc0-yZero)), - (Xc, yZero - (Yc-yZero))), fill=minusColor, + xy=((Xc0, yZero - (Yc0 - yZero)), + (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth - #, clipX=(xLeftOffset, xLeftOffset + plotWidth) + # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) - ###draw additive scale + # draw additive scale if not self.multipleInterval and self.additiveChecked: - additiveScaleFont=ImageFont.truetype(font=VERDANA_FILE, size=16*zoom) + additiveScaleFont = ImageFont.truetype(font=VERDANA_FILE, size=16 * zoom) additiveScale = Plot.detScaleOld(0, additiveMax) - additiveStep = (additiveScale[1]-additiveScale[0])/additiveScale[2] + additiveStep = (additiveScale[1] - additiveScale[0]) / additiveScale[2] additiveAxisList = Plot.frange(0, additiveScale[1], additiveStep) - addPlotScale = AdditiveHeightThresh/additiveMax + addPlotScale = AdditiveHeightThresh / additiveMax TEXT_Y_DISPLACEMENT = -8 additiveAxisList.append(additiveScale[1]) for item in additiveAxisList: - additiveY = yZero - item*addPlotScale + additiveY = yZero - item * addPlotScale im_drawer.line( xy=((xLeftOffset + plotWidth, additiveY), - (xLeftOffset+4+ plotWidth, additiveY)), - fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom) + (xLeftOffset + 4 + plotWidth, additiveY)), + fill=self.ADDITIVE_COLOR_POSITIVE, width=1 * zoom) scaleStr = "%2.3f" % item im_drawer.text( text=scaleStr, - xy=(xLeftOffset + plotWidth +6, additiveY+TEXT_Y_DISPLACEMENT), + xy= (xLeftOffset + plotWidth + 6, additiveY + TEXT_Y_DISPLACEMENT), font=additiveScaleFont, fill=self.ADDITIVE_COLOR_POSITIVE) im_drawer.line( - xy=((xLeftOffset+plotWidth, additiveY), - (xLeftOffset+plotWidth, yZero)), - fill=self.ADDITIVE_COLOR_POSITIVE, width=1*zoom) + xy=((xLeftOffset + plotWidth, additiveY), + (xLeftOffset + plotWidth, yZero)), + fill=self.ADDITIVE_COLOR_POSITIVE, width=1 * zoom) im_drawer.line( - xy=((xLeftOffset, yZero), (xLeftOffset, yTopOffset + 30*(zoom - 1))), - fill=self.LRS_COLOR, width=1*zoom) #the blue line running up the y axis + xy=((xLeftOffset, yZero), (xLeftOffset, yTopOffset + 30 * (zoom - 1))), + fill=self.LRS_COLOR, width=1 * zoom) # the blue line running up the y axis - def drawGraphBackground(self, canvas, gifmap, offset= (80, 120, 80, 50), zoom = 1, startMb = None, endMb = None): - ##conditions - ##multiple Chromosome view - ##single Chromosome Physical - ##single Chromosome Genetic + def drawGraphBackground(self, canvas, gifmap, offset=(80, 120, 80, 50), zoom=1, startMb= None, endMb = None): + # conditions + # multiple Chromosome view + # single Chromosome Physical + # single Chromosome Genetic im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset plotHeight = canvas.size[1] - yTopOffset - yBottomOffset - yBottom = yTopOffset+plotHeight + yBottom = yTopOffset + plotHeight fontZoom = zoom if zoom == 2: fontZoom = 1.5 yTopOffset += 30 - #calculate plot scale + # calculate plot scale if self.plotScale != 'physic': self.ChrLengthDistList = self.ChrLengthCMList drawRegionDistance = self.ChrLengthCMSum @@ -2677,10 +2677,10 @@ class DisplayMappingResults: self.ChrLengthDistList = self.ChrLengthMbList drawRegionDistance = self.ChrLengthMbSum - if self.selectedChr > -1: #single chromosome view - spacingAmt = plotWidth/13.5 + if self.selectedChr > -1: # single chromosome view + spacingAmt = plotWidth / 13.5 i = 0 - for startPix in Plot.frange(xLeftOffset, xLeftOffset+plotWidth, spacingAmt): + for startPix in Plot.frange(xLeftOffset, xLeftOffset + plotWidth, spacingAmt): if (i % 2 == 0): theBackColor = self.GRAPH_BACK_DARK_COLOR else: @@ -2688,25 +2688,25 @@ class DisplayMappingResults: i += 1 im_drawer.rectangle( [(startPix, yTopOffset), - (min(startPix+spacingAmt, xLeftOffset+plotWidth), yBottom)], + (min(startPix + spacingAmt, xLeftOffset + plotWidth), yBottom)], outline=theBackColor, fill=theBackColor) drawRegionDistance = self.ChrLengthDistList[self.ChrList[self.selectedChr][1]] self.ChrLengthDistList = [drawRegionDistance] if self.plotScale == 'physic': - plotXScale = plotWidth / (endMb-startMb) + plotXScale = plotWidth / (endMb - startMb) else: plotXScale = plotWidth / drawRegionDistance - else: #multiple chromosome view - plotXScale = plotWidth / ((len(self.genotype)-1)*self.GraphInterval + drawRegionDistance) + else: # multiple chromosome view + plotXScale = plotWidth / ((len(self.genotype) - 1) * self.GraphInterval + drawRegionDistance) startPosX = xLeftOffset if fontZoom == 1.5: chrFontZoom = 2 else: chrFontZoom = 1 - chrLabelFont=ImageFont.truetype(font=VERDANA_FILE, size=24*chrFontZoom) + chrLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=24 * chrFontZoom) for i, _chr in enumerate(self.genotype): if (i % 2 == 0): @@ -2714,23 +2714,23 @@ class DisplayMappingResults: else: theBackColor = self.GRAPH_BACK_LIGHT_COLOR - #draw the shaded boxes and the sig/sug thick lines + # draw the shaded boxes and the sig/sug thick lines im_drawer.rectangle( ((startPosX, yTopOffset), - (startPosX + self.ChrLengthDistList[i]*plotXScale, yBottom)), + (startPosX + self.ChrLengthDistList[i] * plotXScale, yBottom)), outline=GAINSBORO, fill=theBackColor) chrNameWidth, chrNameHeight = im_drawer.textsize(_chr.name, font=chrLabelFont) - chrStartPix = startPosX + (self.ChrLengthDistList[i]*plotXScale -chrNameWidth)/2 - chrEndPix = startPosX + (self.ChrLengthDistList[i]*plotXScale +chrNameWidth)/2 + chrStartPix = startPosX + (self.ChrLengthDistList[i] * plotXScale - chrNameWidth) / 2 + chrEndPix = startPosX + (self.ChrLengthDistList[i] * plotXScale + chrNameWidth) / 2 TEXT_Y_DISPLACEMENT = 0 im_drawer.text(xy=(chrStartPix, yTopOffset + TEXT_Y_DISPLACEMENT), text=_chr.name, font=chrLabelFont, fill=BLACK) - COORDS = "%d,%d,%d,%d" %(chrStartPix, yTopOffset, chrEndPix, yTopOffset +20) + COORDS = "%d,%d,%d,%d" % (chrStartPix, yTopOffset, chrEndPix, yTopOffset + 20) - #add by NL 09-03-2010 + # add by NL 09-03-2010 HREF = "javascript:chrView(%d,%s);" % (i, self.ChrLengthMbList) #HREF = "javascript:changeView(%d,%s);" % (i,self.ChrLengthMbList) Areas = HtmlGenWrapper.create_area_tag( @@ -2738,7 +2738,7 @@ class DisplayMappingResults: coords=COORDS, href=HREF) gifmap.append(Areas) - startPosX += (self.ChrLengthDistList[i]+self.GraphInterval)*plotXScale + startPosX += (self.ChrLengthDistList[i] + self.GraphInterval) * plotXScale return plotXScale @@ -2748,15 +2748,15 @@ class DisplayMappingResults: ######################################### myCanvas = Image.new("RGBA", size=(500, 300)) if 'lod_score' in self.qtlresults[0] and self.LRS_LOD == "LRS": - perm_output = [value*4.61 for value in self.perm_output] + perm_output = [value * 4.61 for value in self.perm_output] elif 'lod_score' not in self.qtlresults[0] and self.LRS_LOD == "LOD": - perm_output = [value/4.61 for value in self.perm_output] + perm_output = [value / 4.61 for value in self.perm_output] else: perm_output = self.perm_output - filename= webqtlUtil.genRandStr("Reg_") + filename = webqtlUtil.genRandStr("Reg_") Plot.plotBar(myCanvas, perm_output, XLabel=self.LRS_LOD, YLabel='Frequency', title=' Histogram of Permutation Test') - myCanvas.save("{}.gif".format(GENERATED_IMAGE_DIR+filename), + myCanvas.save("{}.gif".format(GENERATED_IMAGE_DIR + filename), format='gif') return filename @@ -2821,18 +2821,18 @@ class DisplayMappingResults: tableIterationsCnt = tableIterationsCnt + 1 - this_row = [] #container for the cells of each row + this_row = [] # container for the cells of each row selectCheck = HtmlGenWrapper.create_input_tag( type_="checkbox", name="selectCheck", value=theGO["GeneSymbol"], Class="checkbox trait_checkbox") # checkbox for each row - geneLength = (theGO["TxEnd"] - theGO["TxStart"])*1000.0 - tenPercentLength = geneLength*0.0001 + geneLength = (theGO["TxEnd"] - theGO["TxStart"]) * 1000.0 + tenPercentLength = geneLength * 0.0001 txStart = theGO["TxStart"] txEnd = theGO["TxEnd"] - theGO["snpDensity"] = theGO["snpCount"]/geneLength + theGO["snpDensity"] = theGO["snpCount"] / geneLength if self.ALEX_DEBUG_BOOL_PRINT_GENE_LIST: geneIdString = 'http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=gene&cmd=Retrieve&dopt=Graphics&list_uids=%s' % theGO["GeneID"] @@ -2844,16 +2844,16 @@ class DisplayMappingResults: f"end={theGO['TxEnd']}&" f"geneName={theGO['GeneSymbol']}&" f"s1={self.diffCol[0]}&s2=%d"), - str(theGO["snpCount"]) # The text to display + str(theGO["snpCount"]) # The text to display ) snpString.set_blank_target() snpString.set_attribute("class", "normalsize") else: snpString = 0 - mouseStartString = "http://genome.ucsc.edu/cgi-bin/hgTracks?clade=vertebrate&org=Mouse&db=mm9&position=chr" + theGO["Chromosome"] + "%3A" + str(int(theGO["TxStart"] * 1000000.0)) + "-" + str(int(theGO["TxEnd"]*1000000.0)) +"&pix=620&Submit=submit" + mouseStartString = "http://genome.ucsc.edu/cgi-bin/hgTracks?clade=vertebrate&org=Mouse&db=mm9&position=chr" + theGO["Chromosome"] + "%3A" + str(int(theGO["TxStart"] * 1000000.0)) + "-" + str(int(theGO["TxEnd"] * 1000000.0)) + "&pix=620&Submit=submit" - #the chromosomes for human 1 are 1qXX.XX + # the chromosomes for human 1 are 1qXX.XX if theGO['humanGene']: if theGO['humanGene']["TxStart"] == '': humanStartDisplay = "" @@ -2863,20 +2863,20 @@ class DisplayMappingResults: humanChr = theGO['humanGene']["Chromosome"] humanTxStart = theGO['humanGene']["TxStart"] - humanStartString = "http://genome.ucsc.edu/cgi-bin/hgTracks?clade=vertebrate&org=Human&db=hg17&position=chr%s:%d-%d" % (humanChr, int(1000000*theGO['humanGene']["TxStart"]), int(1000000*theGO['humanGene']["TxEnd"])) + humanStartString = "http://genome.ucsc.edu/cgi-bin/hgTracks?clade=vertebrate&org=Human&db=hg17&position=chr%s:%d-%d" % (humanChr, int(1000000 * theGO['humanGene']["TxStart"]), int(1000000 * theGO['humanGene']["TxEnd"])) else: humanStartString = humanChr = humanStartDisplay = "--" geneDescription = theGO["GeneDescription"] if len(geneDescription) > 70: - geneDescription = geneDescription[:70]+"..." + geneDescription = geneDescription[:70] + "..." if theGO["snpDensity"] < 0.000001: snpDensityStr = "0" else: snpDensityStr = "%0.6f" % theGO["snpDensity"] - avgExpr = [] #theGO["avgExprVal"] + avgExpr = [] # theGO["avgExprVal"] if avgExpr in ([], None): avgExpr = "--" else: @@ -2905,8 +2905,8 @@ class DisplayMappingResults: str(HtmlGenWrapper.create_link_tag( "javascript:rangeView('{}', {:f}, {:f})".format( str(chr_as_int), - txStart-tenPercentLength, - txEnd+tenPercentLength), + txStart - tenPercentLength, + txEnd + tenPercentLength), "{:.3f}".format(geneLength))), snpString, snpDensityStr, @@ -2931,8 +2931,8 @@ class DisplayMappingResults: str(HtmlGenWrapper.create_link_tag( "javascript:rangeView('{}', {:f}, {:f})".format( str(chr_as_int), - txStart-tenPercentLength, - txEnd+tenPercentLength), + txStart - tenPercentLength, + txEnd + tenPercentLength), "{:.3f}".format(geneLength))), snpString, snpDensityStr, @@ -2969,7 +2969,7 @@ class DisplayMappingResults: chr_as_int = int(theGO["Chromosome"]) - 1 geneLength = (float(theGO["TxEnd"]) - float(theGO["TxStart"])) - geneLengthURL = "javascript:rangeView('%s', %f, %f)" % (theGO["Chromosome"], float(theGO["TxStart"])-(geneLength*0.1), float(theGO["TxEnd"])+(geneLength*0.1)) + geneLengthURL = "javascript:rangeView('%s', %f, %f)" % (theGO["Chromosome"], float(theGO["TxStart"]) - (geneLength * 0.1), float(theGO["TxEnd"]) + (geneLength * 0.1)) avgExprVal = [] if avgExprVal != "" and avgExprVal: @@ -2977,14 +2977,14 @@ class DisplayMappingResults: else: avgExprVal = "" - #Mouse Gene + # Mouse Gene if theGO['mouseGene']: mouseChr = theGO['mouseGene']["Chromosome"] mouseTxStart = "%0.6f" % theGO['mouseGene']["TxStart"] else: mouseChr = mouseTxStart = "" - #the chromosomes for human 1 are 1qXX.XX + # the chromosomes for human 1 are 1qXX.XX if theGO['humanGene']: humanChr = theGO['humanGene']["Chromosome"] humanTxStart = "%0.6f" % theGO['humanGene']["TxStart"] @@ -2996,12 +2996,12 @@ class DisplayMappingResults: geneDesc = "" this_row = [selectCheck.__str__(), - str(gIndex+1), + str(gIndex + 1), geneSymbolNCBI, "%0.6f" % theGO["TxStart"], str(HtmlGenWrapper.create_link_tag( geneLengthURL, - "{:.3f}".format(geneLength*1000.0))), + "{:.3f}".format(geneLength * 1000.0))), avgExprVal, mouseChr, mouseTxStart, @@ -3013,7 +3013,7 @@ class DisplayMappingResults: return gene_table_body - def getLiteratureCorrelation(cursor,geneId1=None,geneId2=None): + def getLiteratureCorrelation(cursor, geneId1=None, geneId2=None): if not geneId1 or not geneId2: return None if geneId1 == geneId2: @@ -3025,9 +3025,9 @@ class DisplayMappingResults: query = 'SELECT Value FROM LCorrRamin3 WHERE GeneId1 = %s and GeneId2 = %s' for x, y in [(geneId1, geneId2), (geneId2, geneId1)]: cursor.execute(query, (x, y)) - lCorr = cursor.fetchone() + lCorr = cursor.fetchone() if lCorr: lCorr = lCorr[0] break - except: raise #lCorr = None + except: raise # lCorr = None return lCorr diff --git a/wqflask/wqflask/marker_regression/gemma_mapping.py b/wqflask/wqflask/marker_regression/gemma_mapping.py index 06c9300a..68689104 100644 --- a/wqflask/wqflask/marker_regression/gemma_mapping.py +++ b/wqflask/wqflask/marker_regression/gemma_mapping.py @@ -149,7 +149,7 @@ def gen_covariates_file(this_dataset, covariates, samples): dataset_name = covariate.split(":")[1] if dataset_name == "Temp": temp_group = trait_name.split("_")[2] - dataset_ob = create_dataset(dataset_name = "Temp", dataset_type = "Temp", group_name = temp_group) + dataset_ob = create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = temp_group) else: dataset_ob = create_dataset(covariate.split(":")[1]) trait_ob = create_trait(dataset=dataset_ob, diff --git a/wqflask/wqflask/marker_regression/plink_mapping.py b/wqflask/wqflask/marker_regression/plink_mapping.py index 5d675c38..e6c78536 100644 --- a/wqflask/wqflask/marker_regression/plink_mapping.py +++ b/wqflask/wqflask/marker_regression/plink_mapping.py @@ -6,7 +6,7 @@ from utility import webqtlUtil from utility.tools import flat_files, PLINK_COMMAND import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) def run_plink(this_trait, dataset, species, vals, maf): plink_output_filename = webqtlUtil.genRandStr(f"{dataset.group.name}_{this_trait.name}_") @@ -34,7 +34,7 @@ def gen_pheno_txt_file(this_dataset, vals): split_line = line.split() current_file_data.append(split_line) - with open(f"{flat_files('mapping')}/{this_dataset.group.name}.fam","w") as outfile: + with open(f"{flat_files('mapping')}/{this_dataset.group.name}.fam", "w") as outfile: for i, line in enumerate(current_file_data): if vals[i] == "x": this_val = -9 @@ -42,7 +42,7 @@ def gen_pheno_txt_file(this_dataset, vals): this_val = vals[i] outfile.write("0 " + line[1] + " " + line[2] + " " + line[3] + " " + line[4] + " " + str(this_val) + "\n") -def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''): +def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename=''): ped_sample_list = get_samples_from_ped_file(dataset) output_file = open(f"{TMPDIR}{pheno_filename}.txt", "wb") header = f"FID\tIID\t{this_trait.name}\n" @@ -50,7 +50,7 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''): new_value_list = [] - #if valueDict does not include some strain, value will be set to -9999 as missing value + # if valueDict does not include some strain, value will be set to -9999 as missing value for i, sample in enumerate(ped_sample_list): try: value = vals[i] @@ -63,11 +63,11 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''): new_line = '' for i, sample in enumerate(ped_sample_list): - j = i+1 + j = i + 1 value = new_value_list[i] new_line += f"{sample}\t{sample}\t{value}\n" - if j%1000 == 0: + if j % 1000 == 0: output_file.write(newLine) new_line = '' @@ -78,9 +78,9 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename = ''): # get strain name from ped file in order def get_samples_from_ped_file(dataset): - ped_file= open(f"{flat_files('mapping')}{dataset.group.name}.ped","r") + ped_file = open(f"{flat_files('mapping')}{dataset.group.name}.ped", "r") line = ped_file.readline() - sample_list=[] + sample_list = [] while line: lineList = line.strip().split('\t') @@ -94,24 +94,24 @@ def get_samples_from_ped_file(dataset): return sample_list def parse_plink_output(output_filename, species): - plink_results={} + plink_results = {} threshold_p_value = 1 - result_fp = open(f"{TMPDIR}{output_filename}.qassoc","rb") + result_fp = open(f"{TMPDIR}{output_filename}.qassoc", "rb") line = result_fp.readline() - value_list = [] # initialize value list, this list will include snp, bp and pvalue info + value_list = [] # initialize value list, this list will include snp, bp and pvalue info p_value_dict = {} count = 0 while line: - #convert line from str to list + # convert line from str to list line_list = build_line_list(line=line) # only keep the records whose chromosome name is in db - if int(line_list[0]) in species.chromosomes.chromosomes and line_list[-1] and line_list[-1].strip()!='NA': + if int(line_list[0]) in species.chromosomes.chromosomes and line_list[-1] and line_list[-1].strip() != 'NA': chr_name = species.chromosomes.chromosomes[int(line_list[0])] snp = line_list[1] @@ -125,7 +125,7 @@ def parse_plink_output(output_filename, species): value_list = plink_results[chr_name] # pvalue range is [0,1] - if threshold_p_value >=0 and threshold_p_value <= 1: + if threshold_p_value >= 0 and threshold_p_value <= 1: if p_value < threshold_p_value: value_list.append((snp, BP, p_value)) count += 1 @@ -141,7 +141,7 @@ def parse_plink_output(output_filename, species): if value_list: plink_results[chr_name] = value_list - value_list=[] + value_list = [] line = result_fp.readline() else: @@ -155,8 +155,8 @@ def parse_plink_output(output_filename, species): # output: lineList list ####################################################### def build_line_list(line=""): - line_list = line.strip().split(' ')# irregular number of whitespaces between columns - line_list = [item for item in line_list if item !=''] + line_list = line.strip().split(' ') # irregular number of whitespaces between columns + line_list = [item for item in line_list if item != ''] line_list = [item.strip() for item in line_list] return line_list diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py index 8341ee55..dd044cb0 100644 --- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py +++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py @@ -6,7 +6,7 @@ from base.data_set import create_dataset from utility.tools import flat_files, REAPER_COMMAND, TEMPDIR import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boot_check, num_bootstrap, do_control, control_marker, manhattan_plot, first_run=True, output_files=None): """Generates p-values for each marker using qtlreaper""" @@ -17,10 +17,10 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo else: genofile_name = this_dataset.group.name - trait_filename =f"{str(this_trait.name)}_{str(this_dataset.name)}_pheno" + trait_filename = f"{str(this_trait.name)}_{str(this_dataset.name)}_pheno" gen_pheno_txt_file(samples, vals, trait_filename) - output_filename = (f"{this_dataset.group.name}_GWA_"+ + output_filename = (f"{this_dataset.group.name}_GWA_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) ) bootstrap_filename = None @@ -36,7 +36,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo opt_list.append(f"--n_bootstrap {str(num_bootstrap)}") opt_list.append(f"--bootstrap_output {webqtlConfig.GENERATED_IMAGE_DIR}{bootstrap_filename}.txt") if num_perm > 0: - permu_filename =("{this_dataset.group.name}_PERM_" + + permu_filename = ("{this_dataset.group.name}_PERM_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) ) @@ -67,8 +67,8 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo suggestive = 0 significant = 0 if len(permu_vals) > 0: - suggestive = permu_vals[int(num_perm*0.37-1)] - significant = permu_vals[int(num_perm*0.95-1)] + suggestive = permu_vals[int(num_perm * 0.37 - 1)] + significant = permu_vals[int(num_perm * 0.95 - 1)] return (marker_obs, permu_vals, suggestive, significant, bootstrap_vals, [output_filename, permu_filename, bootstrap_filename]) @@ -76,7 +76,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo def gen_pheno_txt_file(samples, vals, trait_filename): """Generates phenotype file for GEMMA""" - with open(f"{TEMPDIR}/gn2/{trait_filename}.txt","w") as outfile: + with open(f"{TEMPDIR}/gn2/{trait_filename}.txt", "w") as outfile: outfile.write("Trait\t") filtered_sample_list = [] @@ -121,7 +121,7 @@ def parse_reaper_output(gwa_filename, permu_filename, bootstrap_filename): marker['cM'] = float(line.split("\t")[3]) else: if float(line.split("\t")[3]) > 1000: - marker['Mb'] = float(line.split("\t")[3])/1000000 + marker['Mb'] = float(line.split("\t")[3]) / 1000000 else: marker['Mb'] = float(line.split("\t")[3]) if float(line.split("\t")[6]) != 1: @@ -132,7 +132,7 @@ def parse_reaper_output(gwa_filename, permu_filename, bootstrap_filename): else: marker['cM'] = float(line.split("\t")[3]) if float(line.split("\t")[4]) > 1000: - marker['Mb'] = float(line.split("\t")[4])/1000000 + marker['Mb'] = float(line.split("\t")[4]) / 1000000 else: marker['Mb'] = float(line.split("\t")[4]) if float(line.split("\t")[7]) != 1: @@ -142,7 +142,7 @@ def parse_reaper_output(gwa_filename, permu_filename, bootstrap_filename): marker['additive'] = float(line.split("\t")[6]) marker_obs.append(marker) - #ZS: Results have to be reordered because the new reaper returns results sorted alphabetically by chr for some reason, resulting in chr 1 being followed by 10, etc + # ZS: Results have to be reordered because the new reaper returns results sorted alphabetically by chr for some reason, resulting in chr 1 being followed by 10, etc sorted_indices = natural_sort(marker_obs) permu_vals = [] @@ -185,18 +185,18 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da suggestive = 0 significant = 0 else: - perm_output = genotype.permutation(strains = trimmed_samples, trait = trimmed_values, nperm=num_perm) - suggestive = perm_output[int(num_perm*0.37-1)] - significant = perm_output[int(num_perm*0.95-1)] - #highly_significant = perm_output[int(num_perm*0.99-1)] #ZS: Currently not used, but leaving it here just in case + perm_output = genotype.permutation(strains=trimmed_samples, trait=trimmed_values, nperm=num_perm) + suggestive = perm_output[int(num_perm * 0.37 - 1)] + significant = perm_output[int(num_perm * 0.95 - 1)] + # highly_significant = perm_output[int(num_perm*0.99-1)] #ZS: Currently not used, but leaving it here just in case json_data['suggestive'] = suggestive json_data['significant'] = significant if control_marker != "" and do_control == "true": - reaper_results = genotype.regression(strains = trimmed_samples, - trait = trimmed_values, - control = str(control_marker)) + reaper_results = genotype.regression(strains=trimmed_samples, + trait=trimmed_values, + control=str(control_marker)) if bootCheck: control_geno = [] control_geno2 = [] @@ -215,31 +215,31 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da _idx = _prgy.index(_strain) control_geno.append(control_geno2[_idx]) - bootstrap_results = genotype.bootstrap(strains = trimmed_samples, - trait = trimmed_values, - control = control_geno, - nboot = num_bootstrap) + bootstrap_results = genotype.bootstrap(strains=trimmed_samples, + trait=trimmed_values, + control=control_geno, + nboot=num_bootstrap) else: - reaper_results = genotype.regression(strains = trimmed_samples, - trait = trimmed_values) + reaper_results = genotype.regression(strains=trimmed_samples, + trait=trimmed_values) if bootCheck: - bootstrap_results = genotype.bootstrap(strains = trimmed_samples, - trait = trimmed_values, - nboot = num_bootstrap) + bootstrap_results = genotype.bootstrap(strains=trimmed_samples, + trait=trimmed_values, + nboot=num_bootstrap) json_data['chr'] = [] json_data['pos'] = [] json_data['lod.hk'] = [] json_data['markernames'] = [] - #if self.additive: + # if self.additive: # self.json_data['additive'] = [] - #Need to convert the QTL objects that qtl reaper returns into a json serializable dictionary + # Need to convert the QTL objects that qtl reaper returns into a json serializable dictionary qtl_results = [] for qtl in reaper_results: reaper_locus = qtl.locus - #ZS: Convert chr to int + # ZS: Convert chr to int converted_chr = reaper_locus.chr if reaper_locus.chr != "X" and reaper_locus.chr != "X/Y": converted_chr = int(reaper_locus.chr) @@ -247,11 +247,11 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da json_data['pos'].append(reaper_locus.Mb) json_data['lod.hk'].append(qtl.lrs) json_data['markernames'].append(reaper_locus.name) - #if self.additive: + # if self.additive: # self.json_data['additive'].append(qtl.additive) - locus = {"name":reaper_locus.name, "chr":reaper_locus.chr, "cM":reaper_locus.cM, "Mb":reaper_locus.Mb} - qtl = {"lrs_value": qtl.lrs, "chr":converted_chr, "Mb":reaper_locus.Mb, - "cM":reaper_locus.cM, "name":reaper_locus.name, "additive":qtl.additive, "dominance":qtl.dominance} + locus = {"name": reaper_locus.name, "chr": reaper_locus.chr, "cM": reaper_locus.cM, "Mb": reaper_locus.Mb} + qtl = {"lrs_value": qtl.lrs, "chr": converted_chr, "Mb": reaper_locus.Mb, + "cM": reaper_locus.cM, "name": reaper_locus.name, "additive": qtl.additive, "dominance": qtl.dominance} qtl_results.append(qtl) return qtl_results, json_data, perm_output, suggestive, significant, bootstrap_results @@ -261,5 +261,5 @@ def natural_sort(marker_list): Changed to return indices instead of values, though, since the same reordering needs to be applied to bootstrap results """ convert = lambda text: int(text) if text.isdigit() else text.lower() - alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', str(marker_list[key]['chr'])) ] - return sorted(list(range(len(marker_list))), key = alphanum_key) \ No newline at end of file + alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', str(marker_list[key]['chr']))] + return sorted(list(range(len(marker_list))), key=alphanum_key) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 4117a0e5..2bd94512 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -13,7 +13,7 @@ from utility.tools import locate, TEMPDIR from flask import g import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) # Get a trait's type (numeric, categorical, etc) from the DB def get_trait_data_type(trait_db_string): @@ -35,29 +35,29 @@ def get_trait_data_type(trait_db_string): # Run qtl mapping using R/qtl def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permCheck, num_perm, perm_strata_list, do_control, control_marker, manhattan_plot, pair_scan, cofactors): logger.info("Start run_rqtl_geno"); - ## Get pointers to some common R functions - r_library = ro.r["library"] # Map the library function - r_c = ro.r["c"] # Map the c function - plot = ro.r["plot"] # Map the plot function - png = ro.r["png"] # Map the png function - dev_off = ro.r["dev.off"] # Map the device off function + # Get pointers to some common R functions + r_library = ro.r["library"] # Map the library function + r_c = ro.r["c"] # Map the c function + plot = ro.r["plot"] # Map the plot function + png = ro.r["png"] # Map the png function + dev_off = ro.r["dev.off"] # Map the device off function print((r_library("qtl"))) # Load R/qtl logger.info("QTL library loaded"); - ## Get pointers to some R/qtl functions - scanone = ro.r["scanone"] # Map the scanone function - scantwo = ro.r["scantwo"] # Map the scantwo function - calc_genoprob = ro.r["calc.genoprob"] # Map the calc.genoprob function + # Get pointers to some R/qtl functions + scanone = ro.r["scanone"] # Map the scanone function + scantwo = ro.r["scantwo"] # Map the scantwo function + calc_genoprob = ro.r["calc.genoprob"] # Map the calc.genoprob function crossname = dataset.group.name - #try: + # try: # generate_cross_from_rdata(dataset) # read_cross_from_rdata = ro.r["generate_cross_from_rdata"] # Map the local read_cross_from_rdata function # genofilelocation = locate(crossname + ".RData", "genotype/rdata") # cross_object = read_cross_from_rdata(genofilelocation) # Map the local GENOtoCSVR function - #except: + # except: if mapping_scale == "morgan": scale_units = "cM" @@ -65,10 +65,10 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec scale_units = "Mb" generate_cross_from_geno(dataset, scale_units) - GENOtoCSVR = ro.r["GENOtoCSVR"] # Map the local GENOtoCSVR function + GENOtoCSVR = ro.r["GENOtoCSVR"] # Map the local GENOtoCSVR function crossfilelocation = TMPDIR + crossname + ".cross" if dataset.group.genofile: - genofilelocation = locate(dataset.group.genofile, "genotype") + genofilelocation = locate(dataset.group.genofile, "genotype") else: genofilelocation = locate(dataset.group.name + ".geno", "genotype") logger.info("Going to create a cross from geno"); @@ -95,38 +95,38 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec else: ro.r('all_covars <- marker_covars') covars = ro.r['all_covars'] - #DEBUG to save the session object to file + # DEBUG to save the session object to file if pair_scan: if do_control == "true": - logger.info("Using covariate"); result_data_frame = scantwo(cross_object, pheno = "the_pheno", addcovar = covars, model=model, method=method, n_cluster = 16) + logger.info("Using covariate"); result_data_frame = scantwo(cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method, n_cluster = 16) else: - logger.info("No covariates"); result_data_frame = scantwo(cross_object, pheno = "the_pheno", model=model, method=method, n_cluster = 16) + logger.info("No covariates"); result_data_frame = scantwo(cross_object, pheno="the_pheno", model=model, method=method, n_cluster=16) pair_scan_filename = webqtlUtil.genRandStr("scantwo_") + ".png" - png(file=TEMPDIR+pair_scan_filename) + png(file=TEMPDIR + pair_scan_filename) plot(result_data_frame) dev_off() return process_pair_scan_results(result_data_frame) else: if do_control == "true" or cofactors != "": - logger.info("Using covariate"); result_data_frame = scanone(cross_object, pheno = "the_pheno", addcovar = covars, model=model, method=method) + logger.info("Using covariate"); result_data_frame = scanone(cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method) ro.r('save.image(file = "/home/zas1024/gn2-zach/itp_cofactor_test.RData")') else: - logger.info("No covariates"); result_data_frame = scanone(cross_object, pheno = "the_pheno", model=model, method=method) + logger.info("No covariates"); result_data_frame = scanone(cross_object, pheno="the_pheno", model=model, method=method) if num_perm > 0 and permCheck == "ON": # Do permutation (if requested by user) - if len(perm_strata_list) > 0: #ZS: The strata list would only be populated if "Stratified" was checked on before mapping + if len(perm_strata_list) > 0: # ZS: The strata list would only be populated if "Stratified" was checked on before mapping cross_object, strata_ob = add_perm_strata(cross_object, perm_strata_list) if do_control == "true" or cofactors != "": - perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", addcovar = covars, n_perm = int(num_perm), perm_strata = strata_ob, model=model, method=method) + perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm = int(num_perm), perm_strata = strata_ob, model=model, method=method) else: - perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", n_perm = num_perm, perm_strata = strata_ob, model=model, method=method) + perm_data_frame = scanone(cross_object, pheno_col="the_pheno", n_perm=num_perm, perm_strata = strata_ob, model=model, method=method) else: if do_control == "true" or cofactors != "": - perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", addcovar = covars, n_perm = int(num_perm), model=model, method=method) + perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm = int(num_perm), model=model, method=method) else: - perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", n_perm = num_perm, model=model, method=method) + perm_data_frame = scanone(cross_object, pheno_col="the_pheno", n_perm=num_perm, model=model, method=method) perm_output, suggestive, significant = process_rqtl_perm_results(num_perm, perm_data_frame) # Functions that sets the thresholds for the webinterface return perm_output, suggestive, significant, process_rqtl_results(result_data_frame, dataset.group.species) @@ -134,7 +134,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec return process_rqtl_results(result_data_frame, dataset.group.species) def generate_cross_from_rdata(dataset): - rdata_location = locate(dataset.group.name + ".RData", "genotype/rdata") + rdata_location = locate(dataset.group.name + ".RData", "genotype/rdata") ro.r(""" generate_cross_from_rdata <- function(filename = '%s') { load(file=filename) @@ -206,7 +206,7 @@ def sanitize_rqtl_phenotype(vals): for i, val in enumerate(vals): if val == "x": if i < (len(vals) - 1): - pheno_as_string += "NA," + pheno_as_string += "NA," else: pheno_as_string += "NA" else: @@ -223,7 +223,7 @@ def sanitize_rqtl_names(vals): for i, val in enumerate(vals): if val == "x": if i < (len(vals) - 1): - pheno_as_string += "NA," + pheno_as_string += "NA," else: pheno_as_string += "NA" else: @@ -238,7 +238,7 @@ def sanitize_rqtl_names(vals): def add_phenotype(cross, pheno_as_string, col_name): ro.globalenv["the_cross"] = cross ro.r('pheno <- data.frame(pull.pheno(the_cross))') - ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + ' = as.numeric('+ pheno_as_string +'))') + ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + ' = as.numeric(' + pheno_as_string + '))') return ro.r["the_cross"] def add_categorical_covar(cross, covar_as_string, i): @@ -256,8 +256,8 @@ def add_categorical_covar(cross, covar_as_string, i): nCol = int(nCol[0]) logger.info("nCol python int:" + str(nCol)); col_names = [] - #logger.info("loop") - for x in range(1, (nCol+1)): + # logger.info("loop") + for x in range(1, (nCol + 1)): #logger.info("loop" + str(x)); col_name = "covar_" + str(i) + "_" + str(x) #logger.info("col_name" + col_name); @@ -272,12 +272,12 @@ def add_categorical_covar(cross, covar_as_string, i): def add_names(cross, names_as_string, col_name): ro.globalenv["the_cross"] = cross ro.r('pheno <- data.frame(pull.pheno(the_cross))') - ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + ' = '+ names_as_string +')') + ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + ' = ' + names_as_string + ')') return ro.r["the_cross"] def pull_var(var_name, cross, var_string): ro.globalenv["the_cross"] = cross - ro.r(var_name +' <- pull.pheno(the_cross, ' + var_string + ')') + ro.r(var_name + ' <- pull.pheno(the_cross, ' + var_string + ')') return ro.r[var_name] @@ -317,15 +317,15 @@ def add_cofactors(cross, this_dataset, covariates, samples): datatype = get_trait_data_type(covariate) logger.info("Covariate: " + covariate + " is of type: " + datatype); - if(datatype == "categorical"): # Cat variable + if(datatype == "categorical"): # Cat variable logger.info("call of add_categorical_covar"); - cross, col_names = add_categorical_covar(cross, covar_as_string, i) # Expand and add it to the cross + cross, col_names = add_categorical_covar(cross, covar_as_string, i) # Expand and add it to the cross logger.info("add_categorical_covar returned"); - for z, col_name in enumerate(col_names): # Go through the additional covar names + for z, col_name in enumerate(col_names): # Go through the additional covar names if i < (len(covariate_list) - 1): covar_name_string += '"' + col_name + '", ' else: - if(z < (len(col_names) -1)): + if(z < (len(col_names) - 1)): covar_name_string += '"' + col_name + '", ' else: covar_name_string += '"' + col_name + '"' @@ -376,7 +376,7 @@ def process_pair_scan_results(result): def process_rqtl_perm_results(num_perm, results): perm_vals = [] - for line in str(results).split("\n")[1:(num_perm+1)]: + for line in str(results).split("\n")[1:(num_perm + 1)]: #print("R/qtl permutation line:", line.split()) perm_vals.append(float(line.split()[1])) @@ -393,7 +393,7 @@ def process_rqtl_results(result, species_name): # TODO: how to make this for i, line in enumerate(result.iter_row()): marker = {} marker['name'] = result.rownames[i] - if species_name == "mouse" and output[i][0] == 20: #ZS: This is awkward, but I'm not sure how to change the 20s to Xs in the RData file + if species_name == "mouse" and output[i][0] == 20: # ZS: This is awkward, but I'm not sure how to change the 20s to Xs in the RData file marker['chr'] = "X" else: marker['chr'] = output[i][0] @@ -402,4 +402,4 @@ def process_rqtl_results(result, species_name): # TODO: how to make this marker['lod_score'] = output[i][2] qtl_results.append(marker) - return qtl_results \ No newline at end of file + return qtl_results diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 7dd0bcb6..f1665570 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -1,5 +1,5 @@ from base.trait import GeneralTrait -from base import data_set #import create_dataset +from base import data_set # import create_dataset from pprint import pformat as pf @@ -43,16 +43,16 @@ from utility.external import shell from base.webqtlConfig import TMPDIR, GENERATED_TEXT_DIR import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) class RunMapping: def __init__(self, start_vars, temp_uuid): helper_functions.get_species_dataset_trait(self, start_vars) - self.temp_uuid = temp_uuid #needed to pass temp_uuid to gn1 mapping code (marker_regression_gn1.py) + self.temp_uuid = temp_uuid # needed to pass temp_uuid to gn1 mapping code (marker_regression_gn1.py) - #ZS: Needed to zoom in or remap temp traits like PCA traits + # ZS: Needed to zoom in or remap temp traits like PCA traits if "temp_trait" in start_vars and start_vars['temp_trait'] != "False": self.temp_trait = "True" self.group = self.dataset.group.name @@ -60,7 +60,7 @@ class RunMapping: self.json_data = {} self.json_data['lodnames'] = ['lod.hk'] - #ZS: Sometimes a group may have a genofile that only includes a subset of samples + # ZS: Sometimes a group may have a genofile that only includes a subset of samples genofile_samplelist = [] if 'genofile' in start_vars: if start_vars['genofile'] != "": @@ -93,7 +93,7 @@ class RunMapping: else: self.n_samples = len([val for val in self.vals if val != "x"]) - #ZS: Check if genotypes exist in the DB in order to create links for markers + # ZS: Check if genotypes exist in the DB in order to create links for markers self.geno_db_exists = geno_db_exists(self.dataset) @@ -114,19 +114,19 @@ class RunMapping: self.manhattan_single_color = start_vars['manhattan_single_color'] self.manhattan_plot = True - self.maf = start_vars['maf'] # Minor allele frequency + self.maf = start_vars['maf'] # Minor allele frequency if "use_loco" in start_vars: self.use_loco = start_vars['use_loco'] else: self.use_loco = None self.suggestive = "" self.significant = "" - self.pair_scan = False # Initializing this since it is checked in views to determine which template to use + self.pair_scan = False # Initializing this since it is checked in views to determine which template to use if 'transform' in start_vars: self.transform = start_vars['transform'] else: self.transform = "" - self.score_type = "LRS" #ZS: LRS or LOD + self.score_type = "LRS" # ZS: LRS or LOD self.mapping_scale = "physic" if "mapping_scale" in start_vars: self.mapping_scale = start_vars['mapping_scale'] @@ -136,10 +136,10 @@ class RunMapping: self.covariates = start_vars['covariates'] if "covariates" in start_vars else "" self.categorical_vars = [] - #ZS: This is passed to GN1 code for single chr mapping + # ZS: This is passed to GN1 code for single chr mapping self.selected_chr = -1 if "selected_chr" in start_vars: - if int(start_vars['selected_chr']) != -1: #ZS: Needs to be -1 if showing full map; there's probably a better way to fix this + if int(start_vars['selected_chr']) != -1: # ZS: Needs to be -1 if showing full map; there's probably a better way to fix this self.selected_chr = int(start_vars['selected_chr']) + 1 else: self.selected_chr = int(start_vars['selected_chr']) @@ -153,7 +153,7 @@ class RunMapping: self.lrsMax = start_vars['lrsMax'] if "haplotypeAnalystCheck" in start_vars: self.haplotypeAnalystCheck = start_vars['haplotypeAnalystCheck'] - if "startMb" in start_vars: #ZS: This is to ensure showGenes, Legend, etc are checked the first time you open the mapping page, since startMb will only not be set during the first load + if "startMb" in start_vars: # ZS: This is to ensure showGenes, Legend, etc are checked the first time you open the mapping page, since startMb will only not be set during the first load if "permCheck" in start_vars: self.permCheck = "ON" else: @@ -191,13 +191,13 @@ class RunMapping: self.showGenes = "ON" self.viewLegend = "ON" - #self.dataset.group.get_markers() + # self.dataset.group.get_markers() if self.mapping_method == "gemma": self.first_run = True self.output_files = None if 'output_files' in start_vars: self.output_files = start_vars['output_files'] - if 'first_run' in start_vars: #ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) + if 'first_run' in start_vars: # ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) self.first_run = False self.score_type = "-logP" self.manhattan_plot = True @@ -214,9 +214,9 @@ class RunMapping: if "perm_strata" in start_vars and "categorical_vars" in start_vars: self.categorical_vars = start_vars["categorical_vars"].split(",") if len(self.categorical_vars) and start_vars["perm_strata"] == "True": - primary_samples = SampleList(dataset = self.dataset, - sample_names = self.samples, - this_trait = self.this_trait) + primary_samples = SampleList(dataset=self.dataset, + sample_names=self.samples, + this_trait=self.this_trait) perm_strata = get_perm_strata(self.this_trait, primary_samples, self.categorical_vars, self.samples) self.score_type = "LOD" @@ -227,14 +227,14 @@ class RunMapping: else: self.method = "em" self.model = start_vars['mapmodel_rqtl_geno'] - #if start_vars['pair_scan'] == "true": + # if start_vars['pair_scan'] == "true": # self.pair_scan = True if self.permCheck and self.num_perm > 0: - self.perm_output, self.suggestive, self.significant, results= rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) + self.perm_output, self.suggestive, self.significant, results = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) else: results = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) elif self.mapping_method == "reaper": - if "startMb" in start_vars: #ZS: Check if first time page loaded, so it can default to ON + if "startMb" in start_vars: # ZS: Check if first time page loaded, so it can default to ON if "additiveCheck" in start_vars: self.additiveCheck = start_vars['additiveCheck'] else: @@ -267,7 +267,7 @@ class RunMapping: if self.reaper_version == "new": self.first_run = True self.output_files = None - if 'first_run' in start_vars: #ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) + if 'first_run' in start_vars: # ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) self.first_run = False if 'output_files' in start_vars: self.output_files = start_vars['output_files'].split(",") @@ -311,7 +311,7 @@ class RunMapping: else: if self.pair_scan == True: self.qtl_results = [] - highest_chr = 1 #This is needed in order to convert the highest chr to X/Y + highest_chr = 1 # This is needed in order to convert the highest chr to X/Y for marker in results: if marker['chr1'] > 0 or marker['chr1'] == "X" or marker['chr1'] == "X/Y": if marker['chr1'] > highest_chr or marker['chr1'] == "X" or marker['chr1'] == "X/Y": @@ -328,51 +328,51 @@ class RunMapping: self.json_data['markernames'].append(qtl['name']) self.js_data = dict( - json_data = self.json_data, - this_trait = self.this_trait.name, - data_set = self.dataset.name, - maf = self.maf, - manhattan_plot = self.manhattan_plot, - mapping_scale = self.mapping_scale, - qtl_results = self.qtl_results + json_data=self.json_data, + this_trait=self.this_trait.name, + data_set=self.dataset.name, + maf=self.maf, + manhattan_plot=self.manhattan_plot, + mapping_scale=self.mapping_scale, + qtl_results=self.qtl_results ) else: self.qtl_results = [] self.results_for_browser = [] self.annotations_for_browser = [] - highest_chr = 1 #This is needed in order to convert the highest chr to X/Y + highest_chr = 1 # This is needed in order to convert the highest chr to X/Y for marker in results: if 'Mb' in marker: - this_ps = marker['Mb']*1000000 + this_ps = marker['Mb'] * 1000000 else: - this_ps = marker['cM']*1000000 + this_ps = marker['cM'] * 1000000 browser_marker = dict( - chr = str(marker['chr']), - rs = marker['name'], - ps = this_ps, - url = "/show_trait?trait_id=" + marker['name'] + "&dataset=" + self.dataset.group.name + "Geno" + chr=str(marker['chr']), + rs=marker['name'], + ps=this_ps, + url="/show_trait?trait_id=" + marker['name'] + "&dataset=" + self.dataset.group.name + "Geno" ) if self.geno_db_exists == "True": annot_marker = dict( - name = str(marker['name']), - chr = str(marker['chr']), - rs = marker['name'], - pos = this_ps, - url = "/show_trait?trait_id=" + marker['name'] + "&dataset=" + self.dataset.group.name + "Geno" + name=str(marker['name']), + chr=str(marker['chr']), + rs=marker['name'], + pos=this_ps, + url="/show_trait?trait_id=" + marker['name'] + "&dataset=" + self.dataset.group.name + "Geno" ) else: annot_marker = dict( - name = str(marker['name']), - chr = str(marker['chr']), - rs = marker['name'], - pos = this_ps + name=str(marker['name']), + chr=str(marker['chr']), + rs=marker['name'], + pos=this_ps ) if 'lrs_value' in marker and marker['lrs_value'] > 0: - browser_marker['p_wald'] = 10**-(marker['lrs_value']/4.61) + browser_marker['p_wald'] = 10**-(marker['lrs_value'] / 4.61) elif 'lod_score' in marker and marker['lod_score'] > 0: browser_marker['p_wald'] = 10**-(marker['lod_score']) else: @@ -417,7 +417,7 @@ class RunMapping: chr_lengths = get_chr_lengths(self.mapping_scale, self.mapping_method, self.dataset, self.qtl_results) - #ZS: For zooming into genome browser, need to pass chromosome name instead of number + # ZS: For zooming into genome browser, need to pass chromosome name instead of number if self.dataset.group.species == "mouse": if self.selected_chr == 20: this_chr = "X" @@ -451,29 +451,29 @@ class RunMapping: #mapping_scale = self.mapping_scale, #chromosomes = chromosome_mb_lengths, #qtl_results = self.qtl_results, - categorical_vars = self.categorical_vars, - chr_lengths = chr_lengths, - num_perm = self.num_perm, - perm_results = self.perm_output, - significant = significant_for_browser, - browser_files = browser_files, - selected_chr = this_chr, - total_markers = total_markers + categorical_vars=self.categorical_vars, + chr_lengths=chr_lengths, + num_perm=self.num_perm, + perm_results=self.perm_output, + significant=significant_for_browser, + browser_files=browser_files, + selected_chr=this_chr, + total_markers=total_markers ) else: self.js_data = dict( - chr_lengths = chr_lengths, - browser_files = browser_files, - selected_chr = this_chr, - total_markers = total_markers + chr_lengths=chr_lengths, + browser_files=browser_files, + selected_chr=this_chr, + total_markers=total_markers ) def run_rqtl_plink(self): # os.chdir("") never do this inside a webserver!! - output_filename = webqtlUtil.genRandStr("%s_%s_"%(self.dataset.group.name, self.this_trait.name)) + output_filename = webqtlUtil.genRandStr("%s_%s_" % (self.dataset.group.name, self.this_trait.name)) - plink_mapping.gen_pheno_txt_file_plink(self.this_trait, self.dataset, self.vals, pheno_filename = output_filename) + plink_mapping.gen_pheno_txt_file_plink(self.this_trait, self.dataset, self.vals, pheno_filename=output_filename) rqtl_command = './plink --noweb --ped %s.ped --no-fid --no-parents --no-sex --no-pheno --map %s.map --pheno %s/%s.txt --pheno-name %s --maf %s --missing-phenotype -9999 --out %s%s --assoc ' % (self.dataset.group.name, self.dataset.group.name, TMPDIR, plink_output_filename, self.this_trait.name, self.maf, TMPDIR, plink_output_filename) @@ -612,11 +612,11 @@ def trim_markers_for_figure(markers): if low_counter % 20 == 0: filtered_markers.append(marker) low_counter += 1 - elif 4.61 <= marker[score_type] < (2*4.61): + elif 4.61 <= marker[score_type] < (2 * 4.61): if med_counter % 10 == 0: filtered_markers.append(marker) med_counter += 1 - elif (2*4.61) <= marker[score_type] <= (3*4.61): + elif (2 * 4.61) <= marker[score_type] <= (3 * 4.61): if high_counter % 2 == 0: filtered_markers.append(marker) high_counter += 1 @@ -630,7 +630,7 @@ def trim_markers_for_table(markers): else: sorted_markers = sorted(markers, key=lambda k: k['lrs_value'], reverse=True) - #ZS: So we end up with a list of just 2000 markers + # ZS: So we end up with a list of just 2000 markers if len(sorted_markers) >= 2000: trimmed_sorted_markers = sorted_markers[:2000] return trimmed_sorted_markers @@ -682,9 +682,9 @@ def get_chr_lengths(mapping_scale, mapping_method, dataset, qtl_results): highest_pos = float(result['cM']) * 1000000 else: highest_pos = float(result['Mb']) * 1000000 - chr_lengths.append({ "chr": str(this_chr), "size": str(highest_pos)}) + chr_lengths.append({"chr": str(this_chr), "size": str(highest_pos)}) else: - chr_lengths.append({ "chr": str(this_chr), "size": str(highest_pos)}) + chr_lengths.append({"chr": str(this_chr), "size": str(highest_pos)}) this_chr = chr_as_num else: if mapping_method == "reaper": @@ -721,7 +721,7 @@ def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples): perm_strata_strings.append(combined_string) - d = dict([(y, x+1) for x, y in enumerate(sorted(set(perm_strata_strings)))]) + d = dict([(y, x + 1) for x, y in enumerate(sorted(set(perm_strata_strings)))]) list_to_numbers = [d[x] for x in perm_strata_strings] perm_strata = list_to_numbers diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py index 772f74e4..8abd6516 100644 --- a/wqflask/wqflask/model.py +++ b/wqflask/wqflask/model.py @@ -29,14 +29,14 @@ class User(Base): registration_info = Column(Text) # json detailing when they were registered, etc. - confirmed = Column(Text) # json detailing when they confirmed, etc. + confirmed = Column(Text) # json detailing when they confirmed, etc. - superuser = Column(Text) # json detailing when they became a superuser, otherwise empty + superuser = Column(Text) # json detailing when they became a superuser, otherwise empty # if not superuser logins = relationship("Login", order_by="desc(Login.timestamp)", - lazy='dynamic', # Necessary for filter in login_count + lazy='dynamic', # Necessary for filter in login_count foreign_keys="Login.user", ) @@ -67,7 +67,7 @@ class User(Base): def get_collection_by_name(self, collection_name): try: collect = self.user_collections.filter_by(name=collection_name).first() - except sqlalchemy.orm.exc.NoResultFound: + except sqlalchemy.orm.exc.NoResultFound: collect = None return collect diff --git a/wqflask/wqflask/parser.py b/wqflask/wqflask/parser.py index dcd328c9..dfd374e2 100644 --- a/wqflask/wqflask/parser.py +++ b/wqflask/wqflask/parser.py @@ -22,7 +22,7 @@ import re from pprint import pformat as pf from utility.logger import getLogger -logger = getLogger(__name__ ) +logger = getLogger(__name__) def parse(pstring): """ @@ -52,7 +52,7 @@ def parse(pstring): if '(' in value or '[' in value: assert value.startswith(("(", "[")), "Invalid token" assert value.endswith((")", "]")), "Invalid token" - value = value[1:-1] # Get rid of the parenthesis + value = value[1:-1] # Get rid of the parenthesis values = re.split(r"""\s+|,""", value) value = [value.strip() for value in values if value.strip()] else: diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index 7d51a83d..a1fe0f8f 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -26,7 +26,7 @@ def manage_resource(): owner_display_name = None if owner_id != "none": - try: #ZS: User IDs are sometimes stored in Redis as bytes and sometimes as strings, so this is just to avoid any errors for the time being + try: # ZS: User IDs are sometimes stored in Redis as bytes and sometimes as strings, so this is just to avoid any errors for the time being owner_id = str.encode(owner_id) except: pass @@ -38,7 +38,7 @@ def manage_resource(): elif 'email_address' in owner_info: owner_display_name = owner_info['email_address'] - return render_template("admin/manage_resource.html", owner_name = owner_display_name, resource_id = resource_id, resource_info=resource_info, default_mask=default_mask, group_masks=group_masks_with_names, admin_status=admin_status) + return render_template("admin/manage_resource.html", owner_name=owner_display_name, resource_id=resource_id, resource_info=resource_info, default_mask=default_mask, group_masks=group_masks_with_names, admin_status=admin_status) @app.route("/search_for_users", methods=('POST',)) def search_for_user(): @@ -79,7 +79,7 @@ def change_owner(): flash("You lack the permissions to make this change.", "error") return redirect(url_for("manage_resource", resource_id=resource_id)) else: - return render_template("admin/change_resource_owner.html", resource_id = resource_id) + return render_template("admin/change_resource_owner.html", resource_id=resource_id) @app.route("/resources/change_default_privileges", methods=('POST',)) def change_default_privileges(): @@ -108,7 +108,7 @@ def add_group_to_resource(): group_id = request.form['selected_group'] resource_info = get_resource_info(resource_id) default_privileges = resource_info['default_mask'] - return render_template("admin/set_group_privileges.html", resource_id = resource_id, group_id = group_id, default_privileges = default_privileges) + return render_template("admin/set_group_privileges.html", resource_id=resource_id, group_id=group_id, default_privileges = default_privileges) elif all(key in request.form for key in ('data_privilege', 'metadata_privilege', 'admin_privilege')): group_id = request.form['group_id'] group_name = get_group_info(group_id)['name'] @@ -121,7 +121,7 @@ def add_group_to_resource(): flash("Privileges have been added for group {}.".format(group_name), "alert-info") return redirect(url_for("manage_resource", resource_id=resource_id)) else: - return render_template("admin/search_for_groups.html", resource_id = resource_id) + return render_template("admin/search_for_groups.html", resource_id=resource_id) else: return redirect(url_for("no_access_page")) diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index fd7c132b..16eb1864 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -22,7 +22,7 @@ from utility.tools import GN2_BASE_URL from utility.type_checking import is_str from utility.logger import getLogger -logger = getLogger(__name__ ) +logger = getLogger(__name__) class SearchResultPage: #maxReturn = 3000 @@ -39,7 +39,7 @@ class SearchResultPage: self.uc_id = uuid.uuid4() self.go_term = None - logger.debug("uc_id:", self.uc_id) # contains a unique id + logger.debug("uc_id:", self.uc_id) # contains a unique id logger.debug("kw is:", kw) # dict containing search terms if kw['search_terms_or']: @@ -72,7 +72,7 @@ class SearchResultPage: self.dataset = create_dataset(kw['dataset'], dataset_type) logger.debug("search_terms:", self.search_terms) - #ZS: I don't like using try/except, but it seems like the easiest way to account for all possible bad searches here + # ZS: I don't like using try/except, but it seems like the easiest way to account for all possible bad searches here try: self.search() except: @@ -183,7 +183,7 @@ class SearchResultPage: combined_from_clause = "" combined_where_clause = "" - previous_from_clauses = [] #The same table can't be referenced twice in the from clause + previous_from_clauses = [] # The same table can't be referenced twice in the from clause logger.debug("len(search_terms)>1") symbol_list = [] @@ -231,7 +231,7 @@ class SearchResultPage: combined_from_clause += from_clause where_clause = the_search.get_where_clause() combined_where_clause += "(" + where_clause + ")" - if (i+1) < len(self.search_terms): + if (i + 1) < len(self.search_terms): if self.and_or == "and": combined_where_clause += "AND" else: @@ -291,7 +291,7 @@ def insert_newlines(string, every=64): """ This is because it is seemingly impossible to change the width of the description column, so I'm just manually adding line breaks """ lines = [] for i in range(0, len(string), every): - lines.append(string[i:i+every]) + lines.append(string[i:i + every]) return '\n'.join(lines) def get_aliases(symbol_list, species): @@ -322,9 +322,9 @@ def get_aliases(symbol_list, species): search_terms = [] for alias in filtered_aliases: - the_search_term = {'key': None, + the_search_term = {'key': None, 'search_term': [alias], - 'separator' : None} + 'separator': None} search_terms.append(the_search_term) return search_terms diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index f955f632..388f831f 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -36,16 +36,16 @@ class SampleList: if isinstance(self.this_trait, list): sample = webqtlCaseData.webqtlCaseData(name=sample_name) if counter <= len(self.this_trait): - if isinstance(self.this_trait[counter-1], (bytes, bytearray)): - if (self.this_trait[counter-1].decode("utf-8").lower() != 'x'): + if isinstance(self.this_trait[counter - 1], (bytes, bytearray)): + if (self.this_trait[counter - 1].decode("utf-8").lower() != 'x'): sample = webqtlCaseData.webqtlCaseData( name=sample_name, - value=float(self.this_trait[counter-1])) + value=float(self.this_trait[counter - 1])) else: - if (self.this_trait[counter-1].lower() != 'x'): + if (self.this_trait[counter - 1].lower() != 'x'): sample = webqtlCaseData.webqtlCaseData( name=sample_name, - value=float(self.this_trait[counter-1])) + value=float(self.this_trait[counter - 1])) else: # ZS - If there's no value for the sample/strain, # create the sample object (so samples with no value @@ -69,7 +69,7 @@ class SampleList: sample.extra_attributes = self.sample_attribute_values.get( sample_name, {}) - #ZS: Add a url so RRID case attributes can be displayed as links + # ZS: Add a url so RRID case attributes can be displayed as links if 'rrid' in sample.extra_attributes: if self.dataset.group.species == "mouse": if len(sample.extra_attributes['rrid'].split(":")) > 1: @@ -129,7 +129,7 @@ class SampleList: self.attributes[key].name = name self.attributes[key].distinct_values = [ item.Value for item in values] - self.attributes[key].distinct_values=natural_sort(self.attributes[key].distinct_values) + self.attributes[key].distinct_values = natural_sort(self.attributes[key].distinct_values) all_numbers = True for value in self.attributes[key].distinct_values: try: diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index f9c5fbe6..18cadea4 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -46,7 +46,7 @@ class ShowTrait: resource_id=self.resource_id) elif 'group' in kw: self.temp_trait = True - self.trait_id = "Temp_"+kw['species'] + "_" + kw['group'] + \ + self.trait_id = "Temp_" + kw['species'] + "_" + kw['group'] + \ "_" + datetime.datetime.now().strftime("%m%d%H%M%S") self.temp_species = kw['species'] self.temp_group = kw['group'] @@ -368,8 +368,8 @@ class ShowTrait: chr = transcript_start = transcript_end = None if chr and transcript_start and transcript_end and self.this_trait.refseq_transcriptid: - transcript_start = int(transcript_start*1000000) - transcript_end = int(transcript_end*1000000) + transcript_start = int(transcript_start * 1000000) + transcript_end = int(transcript_end * 1000000) self.ucsc_blat_link = webqtlConfig.UCSC_REFSEQ % ( 'mm10', self.this_trait.refseq_transcriptid, chr, transcript_start, transcript_end) @@ -393,8 +393,8 @@ class ShowTrait: if chr and transcript_start and transcript_end and kgId: # Convert to bases from megabases - transcript_start = int(transcript_start*1000000) - transcript_end = int(transcript_end*1000000) + transcript_start = int(transcript_start * 1000000) + transcript_end = int(transcript_end * 1000000) self.ucsc_blat_link = webqtlConfig.UCSC_REFSEQ % ( 'rn6', kgId, chr, transcript_start, transcript_end) @@ -515,13 +515,13 @@ def quantile_normalize_vals(sample_groups): ranked_vals = ss.rankdata(trait_vals) p_list = [] for i, val in enumerate(trait_vals): - p_list.append(((i+1) - 0.5)/len(trait_vals)) + p_list.append(((i + 1) - 0.5) / len(trait_vals)) z = ss.norm.ppf(p_list) normed_vals = [] for rank in ranked_vals: - normed_vals.append("%0.3f" % z[int(rank)-1]) + normed_vals.append("%0.3f" % z[int(rank) - 1]) return normed_vals @@ -585,7 +585,7 @@ def get_nearest_marker(this_trait, this_db): GenoXRef.GenoId = Geno.Id AND GenoFreeze.Id = GenoXRef.GenoFreezeId AND GenoFreeze.Name = '{}' - ORDER BY ABS( Geno.Mb - {}) LIMIT 1""".format(this_chr, this_db.group.name+"Geno", this_mb) + ORDER BY ABS( Geno.Mb - {}) LIMIT 1""".format(this_chr, this_db.group.name + "Geno", this_mb) logger.sql(query) result = g.db.execute(query).fetchall() @@ -605,7 +605,7 @@ def get_table_widths(sample_groups, sample_column_width, has_num_cases=False): trait_table_width += 80 if has_num_cases: trait_table_width += 80 - trait_table_width += len(sample_groups[0].attributes)*88 + trait_table_width += len(sample_groups[0].attributes) * 88 trait_table_width = str(trait_table_width) + "px" diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index 8658abf8..7a0df94b 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -26,9 +26,9 @@ class SnpBrowser: self.table_rows = [] if self.limit_strains == "true": - self.header_fields, self.empty_field_count, self.header_data_names = get_header_list(variant_type = self.variant_type, strains = self.chosen_strains, empty_columns = self.empty_columns) + self.header_fields, self.empty_field_count, self.header_data_names = get_header_list(variant_type=self.variant_type, strains=self.chosen_strains, empty_columns = self.empty_columns) else: - self.header_fields, self.empty_field_count, self.header_data_names = get_header_list(variant_type = self.variant_type, strains = self.strain_lists, species = self.species_name, empty_columns = self.empty_columns) + self.header_fields, self.empty_field_count, self.header_data_names = get_header_list(variant_type=self.variant_type, strains=self.strain_lists, species = self.species_name, empty_columns = self.empty_columns) def initialize_parameters(self, start_vars): if 'first_run' in start_vars: @@ -249,7 +249,7 @@ class SnpBrowser: def filter_results(self, results): filtered_results = [] - strain_index_list = [] #ZS: List of positions of selected strains in strain list + strain_index_list = [] # ZS: List of positions of selected strains in strain list last_mb = -1 if self.limit_strains == "true" and len(self.chosen_strains) > 0: @@ -272,9 +272,9 @@ class SnpBrowser: if self.limit_strains == "true" and len(self.chosen_strains) > 0: for index in strain_index_list: if self.species_id == 1: - display_strains.append(result[29+index]) + display_strains.append(result[29 + index]) elif self.species_id == 2: - display_strains.append(result[31+index]) + display_strains.append(result[31 + index]) self.allele_list = display_strains effect_info_dict = get_effect_info(effect_list) @@ -300,7 +300,7 @@ class SnpBrowser: else: gene = check_if_in_gene(species_id, chr, mb) transcript = exon = function = function_details = '' - if self.redundant == "false" or last_mb != mb: # filter redundant + if self.redundant == "false" or last_mb != mb: # filter redundant if self.include_record(domain, function, snp_source, conservation_score): info_list = [snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain, function, function_details, snp_source, conservation_score, snp_id] info_list.extend(self.allele_list) @@ -366,7 +366,7 @@ class SnpBrowser: if len(gene_name_list) > 0: gene_id_name_dict = get_gene_id_name_dict(self.species_id, gene_name_list) - #ZS: list of booleans representing which columns are entirely empty, so they aren't displayed on the page; only including ones that are sometimes empty (since there's always a location, etc) + # ZS: list of booleans representing which columns are entirely empty, so they aren't displayed on the page; only including ones that are sometimes empty (since there's always a location, etc) self.empty_columns = { "snp_source": "false", "conservation_score": "false", @@ -389,8 +389,8 @@ class SnpBrowser: snp_name = rs else: rs = "" - start_bp = int(mb*1000000 - 100) - end_bp = int(mb*1000000 + 100) + start_bp = int(mb * 1000000 - 100) + end_bp = int(mb * 1000000 + 100) position_info = "chr%s:%d-%d" % (chr, start_bp, end_bp) if self.species_id == 2: snp_url = webqtlConfig.GENOMEBROWSER_URL % ("rn6", position_info) @@ -434,7 +434,7 @@ class SnpBrowser: transcript_link = "" if exon: - exon = exon[1] # exon[0] is exon_id, exon[1] is exon_rank + exon = exon[1] # exon[0] is exon_id, exon[1] is exon_rank self.empty_columns['exon'] = "true" else: exon = "" @@ -575,7 +575,7 @@ class SnpBrowser: if conservation_score: score_as_float = float(conservation_score) try: - input_score_float = float(self.score) # the user-input score + input_score_float = float(self.score) # the user-input score except: input_score_float = 0.0 @@ -628,17 +628,17 @@ class SnpBrowser: left_offset, right_offset, top_offset, bottom_offset = (30, 30, 40, 50) plot_width = canvas_width - left_offset - right_offset plot_height = canvas_height - top_offset - bottom_offset - y_zero = top_offset + plot_height/2 + y_zero = top_offset + plot_height / 2 - x_scale = plot_width/(self.end_mb - self.start_mb) + x_scale = plot_width / (self.end_mb - self.start_mb) - #draw clickable image map at some point + # draw clickable image map at some point n_click = 80.0 - click_step = plot_width/n_click - click_mb_step = (self.end_mb - self.start_mb)/n_click + click_step = plot_width / n_click + click_mb_step = (self.end_mb - self.start_mb) / n_click - #for i in range(n_click): + # for i in range(n_click): # href = url_for('snp_browser', first_run="false", chosen_strains_mouse=self.chosen_strains_mouse, chosen_strains_rat=self.chosen_strains_rat, variant=self.variant_type, species=self.species_name, gene_name=self.gene_name, chr=self.chr, start_mb=self.start_mb, end_mb=self.end_mb, limit_strains=self.limit_strains, domain=self.domain, function=self.function, criteria=self.criteria, score=self.score, diff_alleles=self.diff_alleles) def get_browser_sample_lists(species_id=1): @@ -660,7 +660,7 @@ def get_browser_sample_lists(species_id=1): return strain_lists -def get_header_list(variant_type, strains, species = None, empty_columns = None): +def get_header_list(variant_type, strains, species=None, empty_columns=None): if species == "Mouse": strain_list = strains['mouse'] elif species == "Rat": @@ -668,7 +668,7 @@ def get_header_list(variant_type, strains, species = None, empty_columns = None) else: strain_list = strains - empty_field_count = 0 #ZS: This is an awkward way of letting the javascript know the index where the allele value columns start; there's probably a better way of doing this + empty_field_count = 0 # ZS: This is an awkward way of letting the javascript know the index where the allele value columns start; there's probably a better way of doing this header_fields = [] header_data_names = [] @@ -715,7 +715,7 @@ def get_header_list(variant_type, strains, species = None, empty_columns = None) return header_fields, empty_field_count, header_data_names -def get_effect_details_by_category(effect_name = None, effect_value = None): +def get_effect_details_by_category(effect_name=None, effect_value=None): gene_list = [] transcript_list = [] exon_list = [] @@ -878,7 +878,7 @@ def get_gene_id_name_dict(species_id, gene_name_list): return gene_id_name_dict def check_if_in_gene(species_id, chr, mb): - if species_id != 0: #ZS: Check if this is necessary + if species_id != 0: # ZS: Check if this is necessary query = """SELECT geneId, geneSymbol FROM GeneList WHERE SpeciesId = {0} AND chromosome = '{1}' AND diff --git a/wqflask/wqflask/submit_bnw.py b/wqflask/wqflask/submit_bnw.py index a0e84c8c..4ad6f9e3 100644 --- a/wqflask/wqflask/submit_bnw.py +++ b/wqflask/wqflask/submit_bnw.py @@ -3,7 +3,7 @@ from base import data_set from utility import helper_functions import utility.logger -logger = utility.logger.getLogger(__name__ ) +logger = utility.logger.getLogger(__name__) def get_bnw_input(start_vars): logger.debug("BNW VARS:", start_vars) diff --git a/wqflask/wqflask/update_search_results.py b/wqflask/wqflask/update_search_results.py index 22a46ef2..08b4f9f5 100644 --- a/wqflask/wqflask/update_search_results.py +++ b/wqflask/wqflask/update_search_results.py @@ -53,7 +53,7 @@ class GSearch: for line in re: dataset = create_dataset(line[3], "ProbeSet", get_samplelist=False) trait_id = line[4] - #with Bench("Building trait object"): + # with Bench("Building trait object"): this_trait = GeneralTrait(dataset=dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) self.trait_list.append(this_trait) @@ -108,8 +108,8 @@ class GSearch: json_dict['data'] = [] for i, trait in enumerate(self.trait_list): - trait_row = { "checkbox": "".format(trait.name, trait.dataset.name), - "index": i+1, + trait_row = {"checkbox": "".format(trait.name, trait.dataset.name), + "index": i + 1, "species": trait.dataset.group.species, "group": trait.dataset.group.name, "tissue": trait.dataset.tissue, diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index b6e7973f..3f5b43ee 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -33,9 +33,9 @@ def timestamp(): return datetime.datetime.utcnow().isoformat() def basic_info(): - return dict(timestamp = timestamp(), - ip_address = request.remote_addr, - user_agent = request.headers.get('User-Agent')) + return dict(timestamp=timestamp(), + ip_address=request.remote_addr, + user_agent=request.headers.get('User-Agent')) def encode_password(pass_gen_fields, unencrypted_password): @@ -77,31 +77,31 @@ def get_signed_session_id(user): session_id_signature = hmac.hmac_creation(session_id) session_id_signed = session_id + ":" + session_id_signature - #ZS: Need to check if this is ever actually used or exists + # ZS: Need to check if this is ever actually used or exists if 'user_id' not in user: user['user_id'] = str(uuid.uuid4()) save_user(user, user['user_id']) if 'github_id' in user: - session = dict(login_time = time.time(), - user_type = "github", - user_id = user['user_id'], - github_id = user['github_id'], - user_name = user['name'], - user_url = user['user_url']) + session = dict(login_time=time.time(), + user_type="github", + user_id=user['user_id'], + github_id=user['github_id'], + user_name=user['name'], + user_url=user['user_url']) elif 'orcid' in user: - session = dict(login_time = time.time(), - user_type = "orcid", - user_id = user['user_id'], - github_id = user['orcid'], - user_name = user['name'], - user_url = user['user_url']) + session = dict(login_time=time.time(), + user_type="orcid", + user_id=user['user_id'], + github_id=user['orcid'], + user_name=user['name'], + user_url=user['user_url']) else: - session = dict(login_time = time.time(), - user_type = "gn2", - user_id = user['user_id'], - user_name = user['full_name'], - user_email_address = user['email_address']) + session = dict(login_time=time.time(), + user_type="gn2", + user_id=user['user_id'], + user_name=user['full_name'], + user_email_address=user['email_address']) key = UserSession.user_cookie_name + ":" + session_id Redis.hmset(key, session) @@ -123,23 +123,23 @@ def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): server.login(SMTP_USERNAME, SMTP_PASSWORD) server.sendmail(fromaddr, toaddr, msg) server.quit() - logger.info("Successfully sent email to "+toaddr) + logger.info("Successfully sent email to " + toaddr) -def send_verification_email(user_details, template_name = "email/user_verification.txt", key_prefix = "verification_code", subject = "GeneNetwork e-mail verification"): +def send_verification_email(user_details, template_name="email/user_verification.txt", key_prefix="verification_code", subject = "GeneNetwork e-mail verification"): verification_code = str(uuid.uuid4()) key = key_prefix + ":" + verification_code - data = json.dumps(dict(id=user_details['user_id'], timestamp = timestamp())) + data = json.dumps(dict(id=user_details['user_id'], timestamp=timestamp())) Redis.set(key, data) Redis.expire(key, THREE_DAYS) recipient = user_details['email_address'] - body = render_template(template_name, verification_code = verification_code) + body = render_template(template_name, verification_code=verification_code) send_email(recipient, subject, body) return {"recipient": recipient, "subject": subject, "body": body} -def send_invitation_email(user_email, temp_password, template_name = "email/user_invitation.txt", subject = "You've been added to a GeneNetwork user group"): +def send_invitation_email(user_email, temp_password, template_name="email/user_invitation.txt", subject= "You've been added to a GeneNetwork user group"): recipient = user_email body = render_template(template_name, temp_password) send_email(recipient, subject, body) @@ -154,7 +154,7 @@ def verify_email(): # We might as well log them in session_id_signed = get_signed_session_id(user_details) flash("Thank you for logging in {}.".format(user_details['full_name']), "alert-success") - response = make_response(redirect(url_for('index_page', import_collections = import_col, anon_id = anon_id))) + response = make_response(redirect(url_for('index_page', import_collections=import_col, anon_id=anon_id))) response.set_cookie(UserSession.user_cookie_name, session_id_signed, max_age=None) return response else: @@ -165,15 +165,15 @@ def login(): params = request.form if request.form else request.args logger.debug("in login params are:", params) - if not params: #ZS: If coming to page for first time + if not params: # ZS: If coming to page for first time from utility.tools import GITHUB_AUTH_URL, GITHUB_CLIENT_ID, ORCID_AUTH_URL, ORCID_CLIENT_ID external_login = {} if GITHUB_AUTH_URL and GITHUB_CLIENT_ID != 'UNKNOWN': external_login["github"] = GITHUB_AUTH_URL if ORCID_AUTH_URL and ORCID_CLIENT_ID != 'UNKNOWN': external_login["orcid"] = ORCID_AUTH_URL - return render_template("new_security/login_user.html", external_login = external_login, redis_is_available=is_redis_available()) - else: #ZS: After clicking sign-in + return render_template("new_security/login_user.html", external_login=external_login, redis_is_available=is_redis_available()) + else: # ZS: After clicking sign-in if 'type' in params and 'uid' in params: user_details = get_user_by_unique_column("user_id", params['uid']) if user_details: @@ -204,13 +204,13 @@ def login(): encrypted_pass_fields = encode_password(pwfields, submitted_password) password_match = pbkdf2.safe_str_cmp(encrypted_pass_fields['password'], pwfields['password']) - else: # Invalid e-mail + else: # Invalid e-mail flash("Invalid e-mail address. Please try again.", "alert-danger") response = make_response(redirect(url_for('login'))) return response - if password_match: # If password correct - if user_details['confirmed']: # If account confirmed + if password_match: # If password correct + if user_details['confirmed']: # If account confirmed import_col = "false" anon_id = "" if 'import_collections' in params: @@ -219,14 +219,14 @@ def login(): session_id_signed = get_signed_session_id(user_details) flash("Thank you for logging in {}.".format(user_details['full_name']), "alert-success") - response = make_response(redirect(url_for('index_page', import_collections = import_col, anon_id = anon_id))) + response = make_response(redirect(url_for('index_page', import_collections=import_col, anon_id=anon_id))) response.set_cookie(UserSession.user_cookie_name, session_id_signed, max_age=None) return response else: - email_ob = send_verification_email(user_details, template_name = "email/user_verification.txt") + email_ob = send_verification_email(user_details, template_name="email/user_verification.txt") return render_template("newsecurity/verification_still_needed.html", subject=email_ob['subject']) - else: # Incorrect password - #ZS: It previously seemed to store that there was an incorrect log-in attempt here, but it did so in the MySQL DB so this might need to be reproduced with Redis + else: # Incorrect password + # ZS: It previously seemed to store that there was an incorrect log-in attempt here, but it did so in the MySQL DB so this might need to be reproduced with Redis flash("Invalid password. Please try again.", "alert-danger") response = make_response(redirect(url_for('login'))) @@ -243,7 +243,7 @@ def github_oauth2(): } result = requests.post("https://github.com/login/oauth/access_token", json=data) - result_dict = {arr[0]:arr[1] for arr in [tok.split("=") for tok in result.text.split("&")]} + result_dict = {arr[0]: arr[1] for arr in [tok.split("=") for tok in result.text.split("&")]} github_user = get_github_user_details(result_dict["access_token"]) @@ -261,12 +261,12 @@ def github_oauth2(): } save_user(user_details, user_details["user_id"]) - url = "/n/login?type=github&uid="+user_details["user_id"] + url = "/n/login?type=github&uid=" + user_details["user_id"] return redirect(url) def get_github_user_details(access_token): from utility.tools import GITHUB_API_URL - result = requests.get(GITHUB_API_URL, headers = {'Authorization':'token ' + access_token }).content + result = requests.get(GITHUB_API_URL, headers={'Authorization': 'token ' + access_token}).content return json.loads(result) @@ -303,14 +303,14 @@ def orcid_oauth2(): } save_user(user_details, user_details["user_id"]) - url = "/n/login?type=orcid&uid="+user_details["user_id"] + url = "/n/login?type=orcid&uid=" + user_details["user_id"] else: flash("There was an error getting code from ORCID") return redirect(url) def get_github_user_details(access_token): from utility.tools import GITHUB_API_URL - result = requests.get(GITHUB_API_URL, headers = {'Authorization':'token ' + access_token }).content + result = requests.get(GITHUB_API_URL, headers={'Authorization': 'token ' + access_token}).content return json.loads(result) @@ -337,7 +337,7 @@ def send_forgot_password_email(verification_email): from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText - template_name = "email/forgot_password.txt" + template_name = "email/forgot_password.txt" key_prefix = "forgot_password_code" subject = "GeneNetwork password reset" fromaddr = "no-reply@genenetwork.org" @@ -353,7 +353,7 @@ def send_forgot_password_email(verification_email): save_verification_code(verification_email, verification_code) - body = render_template(template_name, verification_code = verification_code) + body = render_template(template_name, verification_code=verification_code) msg = MIMEMultipart() msg["To"] = verification_email diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index fcec3b67..013920f9 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -74,11 +74,11 @@ class AnonUser: self.key = "anon_collection:v1:{}".format(self.anon_id) def add_collection(self, new_collection): - collection_dict = dict(name = new_collection.name, - created_timestamp = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), - changed_timestamp = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), - num_members = new_collection.num_members, - members = new_collection.get_members()) + collection_dict = dict(name=new_collection.name, + created_timestamp=datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), + changed_timestamp=datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), + num_members=new_collection.num_members, + members=new_collection.get_members()) Redis.set(self.key, json.dumps(collection_dict)) Redis.expire(self.key, 60 * 60 * 24 * 365) @@ -111,7 +111,7 @@ class AnonUser: collection['created_timestamp'] = datetime.datetime.strptime(collection['created_timestamp'], '%b %d %Y %I:%M%p') collection['changed_timestamp'] = datetime.datetime.strptime(collection['changed_timestamp'], '%b %d %Y %I:%M%p') - collections = sorted(collections, key = lambda i: i['changed_timestamp'], reverse = True) + collections = sorted(collections, key=lambda i: i['changed_timestamp'], reverse=True) return collections def import_traits_to_user(self): @@ -182,13 +182,13 @@ class UserSession: # weekend and the site hasn't been visited by the user self.logged_in = False - ########### Grrr...this won't work because of the way flask handles cookies + # Grrr...this won't work because of the way flask handles cookies # Delete the cookie #response = make_response(redirect(url_for('login'))) #response.set_cookie(self.cookie_name, '', expires=0) - #flash( + # flash( # "Due to inactivity your session has expired. If you'd like please login again.") - #return response + # return response return if Redis.ttl(self.redis_key) < THREE_DAYS: @@ -213,7 +213,7 @@ class UserSession: user_email = self.record['user_email_address'] - #ZS: Get user's collections if they exist + # ZS: Get user's collections if they exist user_id = None user_id = get_user_id("email_address", user_email) return user_id @@ -230,7 +230,7 @@ class UserSession: def user_collections(self): """List of user's collections""" - #ZS: Get user's collections if they exist + # ZS: Get user's collections if they exist collections = get_user_collections(self.redis_user_id) return collections @@ -248,7 +248,7 @@ class UserSession: 'created_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), 'changed_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), 'num_members': len(traits), - 'members': list(traits) } + 'members': list(traits)} current_collections = self.user_collections current_collections.append(collection_dict) @@ -347,7 +347,7 @@ def get_cookie(): g.user_session = UserSession() g.cookie_session = AnonUser() -#@app.after_request +# @app.after_request def set_cookie(response): if not request.cookies.get(g.cookie_session.cookie_name): response.set_cookie(g.cookie_session.cookie_name, g.cookie_session.cookie) @@ -455,7 +455,7 @@ def set_password(password, user): class VerificationEmail: - template_name = "email/verification.txt" + template_name = "email/verification.txt" key_prefix = "verification_code" subject = "GeneNetwork email verification" @@ -473,7 +473,7 @@ class VerificationEmail: to = user.email_address subject = self.subject body = render_template(self.template_name, - verification_code = verification_code) + verification_code=verification_code) send_email(to, subject, body) class ForgotPasswordEmail(VerificationEmail): @@ -500,7 +500,7 @@ class ForgotPasswordEmail(VerificationEmail): subject = self.subject body = render_template( self.template_name, - verification_code = verification_code) + verification_code=verification_code) msg = MIMEMultipart() msg["To"] = toaddr @@ -525,11 +525,11 @@ class Password: def basic_info(): - return dict(timestamp = timestamp(), - ip_address = request.remote_addr, - user_agent = request.headers.get('User-Agent')) + return dict(timestamp=timestamp(), + ip_address=request.remote_addr, + user_agent=request.headers.get('User-Agent')) -#@app.route("/manage/verify_email") +# @app.route("/manage/verify_email") def verify_email(): user = DecodeUser(VerificationEmail.key_prefix).user user.confirmed = json.dumps(basic_info(), sort_keys=True) @@ -543,7 +543,7 @@ def verify_email(): response.set_cookie(UserSession.cookie_name, session_id_signed) return response -#@app.route("/n/password_reset", methods=['GET']) +# @app.route("/n/password_reset", methods=['GET']) def password_reset(): """Entry point after user clicks link in E-mail""" logger.debug("in password_reset request.url is:", request.url) @@ -567,7 +567,7 @@ def password_reset(): else: return redirect(url_for("login")) -#@app.route("/n/password_reset_step2", methods=('POST',)) +# @app.route("/n/password_reset_step2", methods=('POST',)) def password_reset_step2(): """Handle confirmation E-mail for password reset""" logger.debug("in password_reset request.url is:", request.url) @@ -611,7 +611,7 @@ class DecodeUser: logger.debug("data is:", data) return model.User.query.get(data['id']) -#@app.route("/n/login", methods=('GET', 'POST')) +# @app.route("/n/login", methods=('GET', 'POST')) def login(): lu = LoginUser() login_type = request.args.get("type") @@ -621,7 +621,7 @@ def login(): else: return lu.standard_login() -#@app.route("/n/login/github_oauth2", methods=('GET', 'POST')) +# @app.route("/n/login/github_oauth2", methods=('GET', 'POST')) def github_oauth2(): from utility.tools import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET code = request.args.get("code") @@ -631,28 +631,21 @@ def github_oauth2(): "code": code } result = requests.post("https://github.com/login/oauth/access_token", json=data) - result_dict = {arr[0]:arr[1] for arr in [tok.split("=") for tok in [token.encode("utf-8") for token in result.text.split("&")]]} + result_dict = {arr[0]: arr[1] for arr in [tok.split("=") for tok in [token.encode("utf-8") for token in result.text.split("&")]]} github_user = get_github_user_details(result_dict["access_token"]) user_details = get_user_by_unique_column("github_id", github_user["id"]) if user_details == None: user_details = { - "user_id": str(uuid.uuid4()) - , "name": github_user["name"].encode("utf-8") - , "github_id": github_user["id"] - , "user_url": github_user["html_url"].encode("utf-8") - , "login_type": "github" - , "organization": "" - , "active": 1 - , "confirmed": 1 + "user_id": str(uuid.uuid4()), "name": github_user["name"].encode("utf-8"), "github_id": github_user["id"], "user_url": github_user["html_url"].encode("utf-8") , "login_type": "github" , "organization": "" , "active": 1 , "confirmed": 1 } save_user(user_details, user_details["user_id"]) - url = "/n/login?type=github&uid="+user_details["user_id"] + url = "/n/login?type=github&uid=" + user_details["user_id"] return redirect(url) -#@app.route("/n/login/orcid_oauth2", methods=('GET', 'POST')) +# @app.route("/n/login/orcid_oauth2", methods=('GET', 'POST')) def orcid_oauth2(): from uuid import uuid4 from utility.tools import ORCID_CLIENT_ID, ORCID_CLIENT_SECRET, ORCID_TOKEN_URL, ORCID_AUTH_URL @@ -661,10 +654,7 @@ def orcid_oauth2(): url = "/n/login" if code: data = { - "client_id": ORCID_CLIENT_ID - , "client_secret": ORCID_CLIENT_SECRET - , "grant_type": "authorization_code" - , "code": code + "client_id": ORCID_CLIENT_ID, "client_secret": ORCID_CLIENT_SECRET, "grant_type": "authorization_code", "code": code } result = requests.post(ORCID_TOKEN_URL, data=data) result_dict = json.loads(result.text.encode("utf-8")) @@ -672,31 +662,24 @@ def orcid_oauth2(): user_details = get_user_by_unique_column("orcid", result_dict["orcid"]) if user_details == None: user_details = { - "user_id": str(uuid4()) - , "name": result_dict["name"] - , "orcid": result_dict["orcid"] - , "user_url": "%s/%s" % ( + "user_id": str(uuid4()), "name": result_dict["name"], "orcid": result_dict["orcid"], "user_url": "%s/%s" % ( "/".join(ORCID_AUTH_URL.split("/")[:-2]), - result_dict["orcid"]) - , "login_type": "orcid" - , "organization": "" - , "active": 1 - , "confirmed": 1 + result_dict["orcid"]), "login_type": "orcid", "organization": "", "active": 1 , "confirmed": 1 } save_user(user_details, user_details["user_id"]) - url = "/n/login?type=orcid&uid="+user_details["user_id"] + url = "/n/login?type=orcid&uid=" + user_details["user_id"] else: flash("There was an error getting code from ORCID") return redirect(url) def get_github_user_details(access_token): from utility.tools import GITHUB_API_URL - result = requests.get(GITHUB_API_URL, params={"access_token":access_token}) + result = requests.get(GITHUB_API_URL, params={"access_token": access_token}) return result.json() class LoginUser: - remember_time = 60 * 60 * 24 * 30 # One month in seconds + remember_time = 60 * 60 * 24 * 30 # One month in seconds def __init__(self): self.remember_me = False @@ -730,9 +713,7 @@ class LoginUser: external_login["orcid"] = ORCID_AUTH_URL return render_template( - "new_security/login_user.html" - , external_login=external_login - , redis_is_available = is_redis_available()) + "new_security/login_user.html", external_login=external_login, redis_is_available=is_redis_available()) else: user_details = get_user_by_unique_column("email_address", params["email_address"]) #user_details = get_user_by_unique_column(es, "email_address", params["email_address"]) @@ -770,7 +751,7 @@ class LoginUser: else: import_col = "false" - #g.cookie_session.import_traits_to_user() + # g.cookie_session.import_traits_to_user() self.logged_in = True @@ -810,10 +791,10 @@ class LoginUser: if not user.id: user.id = '' - session = dict(login_time = time.time(), - user_id = user.id, - user_name = user.full_name, - user_email_address = user.email_address) + session = dict(login_time=time.time(), + user_id=user.id, + user_name=user.full_name, + user_email_address=user.email_address) key = UserSession.cookie_name + ":" + login_rec.session_id logger.debug("Key when signing:", key) @@ -832,7 +813,7 @@ class LoginUser: db_session.add(login_rec) db_session.commit() -#@app.route("/n/logout") +# @app.route("/n/logout") def logout(): logger.debug("Logging out...") UserSession().delete_session() @@ -843,7 +824,7 @@ def logout(): return response -#@app.route("/n/forgot_password", methods=['GET']) +# @app.route("/n/forgot_password", methods=['GET']) def forgot_password(): """Entry point for forgotten password""" print("ARGS: ", request.args) @@ -851,7 +832,7 @@ def forgot_password(): print("ERRORS: ", errors) return render_template("new_security/forgot_password.html", errors=errors) -#@app.route("/n/forgot_password_submit", methods=('POST',)) +# @app.route("/n/forgot_password_submit", methods=('POST',)) def forgot_password_submit(): """When a forgotten password form is submitted we get here""" params = request.form @@ -886,7 +867,7 @@ def is_redis_available(): ### # ZS: The following 6 functions require the old MySQL User accounts; I'm leaving them commented out just in case we decide to reimplement them using ElasticSearch ### -#def super_only(): +# def super_only(): # try: # superuser = g.user_session.user_ob.superuser # except AttributeError: @@ -895,26 +876,26 @@ def is_redis_available(): # flash("You must be a superuser to access that page.", "alert-error") # abort(401) -#@app.route("/manage/users") -#def manage_users(): +# @app.route("/manage/users") +# def manage_users(): # super_only() # template_vars = UsersManager() # return render_template("admin/user_manager.html", **template_vars.__dict__) -#@app.route("/manage/user") -#def manage_user(): +# @app.route("/manage/user") +# def manage_user(): # super_only() # template_vars = UserManager(request.args) # return render_template("admin/ind_user_manager.html", **template_vars.__dict__) -#@app.route("/manage/groups") -#def manage_groups(): +# @app.route("/manage/groups") +# def manage_groups(): # super_only() # template_vars = GroupsManager(request.args) # return render_template("admin/group_manager.html", **template_vars.__dict__) -#@app.route("/manage/make_superuser") -#def make_superuser(): +# @app.route("/manage/make_superuser") +# def make_superuser(): # super_only() # params = request.args # user_id = params['user_id'] @@ -926,8 +907,8 @@ def is_redis_available(): # flash("We've made {} a superuser!".format(user.name_and_org)) # return redirect(url_for("manage_users")) -#@app.route("/manage/assume_identity") -#def assume_identity(): +# @app.route("/manage/assume_identity") +# def assume_identity(): # super_only() # params = request.args # user_id = params['user_id'] @@ -936,7 +917,7 @@ def is_redis_available(): # return LoginUser().actual_login(user, assumed_by=assumed_by) -#@app.route("/n/register", methods=('GET', 'POST')) +# @app.route("/n/register", methods=('GET', 'POST')) def register(): params = None errors = None @@ -1023,21 +1004,21 @@ def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): """ if SMTP_USERNAME == 'UNKNOWN': - logger.debug("SMTP: connecting with host "+SMTP_CONNECT) + logger.debug("SMTP: connecting with host " + SMTP_CONNECT) server = SMTP(SMTP_CONNECT) server.sendmail(fromaddr, toaddr, msg) else: - logger.debug("SMTP: connecting TLS with host "+SMTP_CONNECT) + logger.debug("SMTP: connecting TLS with host " + SMTP_CONNECT) server = SMTP(SMTP_CONNECT) server.starttls() - logger.debug("SMTP: login with user "+SMTP_USERNAME) + logger.debug("SMTP: login with user " + SMTP_USERNAME) server.login(SMTP_USERNAME, SMTP_PASSWORD) - logger.debug("SMTP: "+fromaddr) - logger.debug("SMTP: "+toaddr) - logger.debug("SMTP: "+msg) + logger.debug("SMTP: " + fromaddr) + logger.debug("SMTP: " + toaddr) + logger.debug("SMTP: " + msg) server.sendmail(fromaddr, toaddr, msg) server.quit() - logger.info("Successfully sent email to "+toaddr) + logger.info("Successfully sent email to " + toaddr) class GroupsManager: def __init__(self, kw): diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index cc0ac744..78db7bd2 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -24,7 +24,7 @@ THIRTY_DAYS = 60 * 60 * 24 * 30 def get_user_session(): logger.info("@app.before_request get_session") g.user_session = UserSession() - #ZS: I think this should solve the issue of deleting the cookie and redirecting to the home page when a user's session has expired + # ZS: I think this should solve the issue of deleting the cookie and redirecting to the home page when a user's session has expired if not g.user_session: response = make_response(redirect(url_for('login'))) response.set_cookie('session_id_v2', '', expires=0) @@ -51,7 +51,7 @@ def create_signed_cookie(): logger.debug("uuid_signed:", uuid_signed) return the_uuid, uuid_signed -@app.route("/user/manage", methods=('GET','POST')) +@app.route("/user/manage", methods=('GET', 'POST')) def manage_user(): params = request.form if request.form else request.args if 'new_full_name' in params: @@ -61,7 +61,7 @@ def manage_user(): user_details = get_user_by_unique_column("user_id", g.user_session.user_id) - return render_template("admin/manage_user.html", user_details = user_details) + return render_template("admin/manage_user.html", user_details=user_details) class UserSession: """Logged in user handling""" @@ -89,25 +89,25 @@ class UserSession: self.session_id = session_id self.record = Redis.hgetall(self.redis_key) - #ZS: If user correctled logged in but their session expired - #ZS: Need to test this by setting the time-out to be really short or something + # ZS: If user correctled logged in but their session expired + # ZS: Need to test this by setting the time-out to be really short or something if not self.record or self.record == []: if user_cookie: self.logged_in = False - self.record = dict(login_time = time.time(), - user_type = "anon", - user_id = str(uuid.uuid4())) + self.record = dict(login_time=time.time(), + user_type="anon", + user_id=str(uuid.uuid4())) Redis.hmset(self.redis_key, self.record) Redis.expire(self.redis_key, THIRTY_DAYS) - ########### Grrr...this won't work because of the way flask handles cookies + # Grrr...this won't work because of the way flask handles cookies # Delete the cookie flash("Due to inactivity your session has expired. If you'd like please login again.") return None else: - self.record = dict(login_time = time.time(), - user_type = "anon", - user_id = str(uuid.uuid4())) + self.record = dict(login_time=time.time(), + user_type="anon", + user_id=str(uuid.uuid4())) Redis.hmset(self.redis_key, self.record) Redis.expire(self.redis_key, THIRTY_DAYS) else: @@ -138,13 +138,13 @@ class UserSession: def redis_user_id(self): """User id from Redis (need to check if this is the same as the id stored in self.records)""" - #ZS: This part is a bit weird. Some accounts used to not have saved user ids, and in the process of testing I think I created some duplicate accounts for myself. - #ZS: Accounts should automatically generate user_ids if they don't already have one now, so this might not be necessary for anything other than my account's collections + # ZS: This part is a bit weird. Some accounts used to not have saved user ids, and in the process of testing I think I created some duplicate accounts for myself. + # ZS: Accounts should automatically generate user_ids if they don't already have one now, so this might not be necessary for anything other than my account's collections if 'user_email_address' in self.record: user_email = self.record['user_email_address'] - #ZS: Get user's collections if they exist + # ZS: Get user's collections if they exist user_id = None user_id = get_user_id("email_address", user_email) elif 'user_id' in self.record: @@ -153,7 +153,7 @@ class UserSession: user_github_id = self.record['github_id'] user_id = None user_id = get_user_id("github_id", user_github_id) - else: #ZS: Anonymous user + else: # ZS: Anonymous user return None return user_id @@ -170,9 +170,9 @@ class UserSession: def user_collections(self): """List of user's collections""" - #ZS: Get user's collections if they exist + # ZS: Get user's collections if they exist collections = get_user_collections(self.user_id) - collections = [item for item in collections if item['name'] != "Your Default Collection"] + [item for item in collections if item['name'] == "Your Default Collection"] #ZS: Ensure Default Collection is last in list + collections = [item for item in collections if item['name'] != "Your Default Collection"] + [item for item in collections if item['name'] == "Your Default Collection"] # ZS: Ensure Default Collection is last in list return collections @property @@ -189,7 +189,7 @@ class UserSession: 'created_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), 'changed_timestamp': datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p'), 'num_members': len(traits), - 'members': list(traits) } + 'members': list(traits)} current_collections = self.user_collections current_collections.append(collection_dict) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 37f2094f..2c53012a 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -141,7 +141,7 @@ def handle_bad_request(e): now = datetime.datetime.utcnow() time_str = now.strftime('%l:%M%p UTC %b %d, %Y') formatted_lines = [request.url + - " ("+time_str+")"]+traceback.format_exc().splitlines() + " (" + time_str + ")"]+traceback.format_exc().splitlines() # Handle random animations # Use a cookie to have one animation on refresh @@ -240,7 +240,7 @@ def search_page(): if USE_REDIS and valid_search: Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL)) - Redis.expire(key, 60*60) + Redis.expire(key, 60 * 60) if valid_search: return render_template("search_result_page.html", **result) @@ -601,7 +601,7 @@ def heatmap_page(): pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL) logger.info("pickled result length:", len(pickled_result)) Redis.set(key, pickled_result) - Redis.expire(key, 60*60) + Redis.expire(key, 60 * 60) with Bench("Rendering template"): rendered_template = render_template("heatmap.html", **result) diff --git a/wqflask/wsgi.py b/wqflask/wsgi.py index be9c7b37..755da333 100644 --- a/wqflask/wsgi.py +++ b/wqflask/wsgi.py @@ -1,4 +1,4 @@ -from run_gunicorn import app as application # expect application as a name +from run_gunicorn import app as application # expect application as a name if __name__ == "__main__": application.run() -- cgit v1.2.3 From 406eb27859cca232a562c722cbbd37aca2e3be84 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 30 Apr 2021 12:26:19 +0300 Subject: autopep8: Fix E301,E302,E303,E304,E305,E306 --- etc/default_settings.py | 21 +++---- wqflask/base/data_set.py | 1 - wqflask/base/mrna_assay_tissue_data.py | 1 + wqflask/base/species.py | 3 + wqflask/base/webqtlCaseData.py | 3 +- wqflask/db/call.py | 4 ++ wqflask/db/gn_server.py | 1 + wqflask/db/webqtlDatabaseFunction.py | 1 + wqflask/maintenance/convert_dryad_to_bimbam.py | 3 + wqflask/maintenance/convert_geno_to_bimbam.py | 4 ++ wqflask/maintenance/gen_select_dataset.py | 5 ++ .../maintenance/generate_kinship_from_bimbam.py | 1 + .../maintenance/generate_probesetfreeze_file.py | 7 +++ wqflask/maintenance/geno_to_json.py | 6 +- wqflask/maintenance/get_group_samplelists.py | 3 + wqflask/maintenance/print_benchmark.py | 4 ++ wqflask/maintenance/quantile_normalize.py | 6 ++ wqflask/maintenance/set_resource_defaults.py | 7 +++ wqflask/run_gunicorn.py | 2 + wqflask/tests/unit/base/test_webqtl_case_data.py | 1 + .../unit/utility/test_authentication_tools.py | 1 + wqflask/tests/unit/utility/test_chunks.py | 1 + .../test_display_mapping_results.py | 2 + .../marker_regression/test_plink_mapping.py | 3 +- .../marker_regression/test_qtlreaper_mapping.py | 2 + .../wqflask/marker_regression/test_rqtl_mapping.py | 2 +- wqflask/tests/unit/wqflask/test_collect.py | 2 + .../tests/wqflask/show_trait/test_show_trait.py | 1 - wqflask/utility/Plot.py | 13 +++++ wqflask/utility/__init__.py | 2 + wqflask/utility/after.py | 1 + wqflask/utility/authentication_tools.py | 1 + wqflask/utility/benchmark.py | 1 + wqflask/utility/corestats.py | 2 + wqflask/utility/elasticsearch_tools.py | 7 +++ wqflask/utility/external.py | 1 + wqflask/utility/gen_geno_ob.py | 3 + wqflask/utility/genofile_parser.py | 1 + wqflask/utility/logger.py | 3 + wqflask/utility/pillow_utils.py | 4 ++ wqflask/utility/startup_config.py | 1 + wqflask/utility/svg.py | 67 +++++++++++++++++++++- wqflask/utility/temp_data.py | 1 + wqflask/utility/tools.py | 25 ++++++++ wqflask/utility/type_checking.py | 5 ++ wqflask/utility/webqtlUtil.py | 5 ++ wqflask/wqflask/__init__.py | 1 + wqflask/wqflask/api/correlation.py | 7 +++ wqflask/wqflask/api/mapping.py | 2 +- wqflask/wqflask/api/router.py | 20 +++++++ wqflask/wqflask/collect.py | 8 +++ wqflask/wqflask/correlation/corr_scatter_plot.py | 1 + wqflask/wqflask/correlation/show_corr_results.py | 4 +- .../wqflask/correlation_matrix/show_corr_matrix.py | 3 + wqflask/wqflask/ctl/ctl_analysis.py | 1 + wqflask/wqflask/database.py | 2 + wqflask/wqflask/db_info.py | 1 + wqflask/wqflask/do_search.py | 21 ++++++- wqflask/wqflask/docs.py | 2 +- wqflask/wqflask/export_traits.py | 2 + wqflask/wqflask/external_tools/send_to_bnw.py | 1 + .../wqflask/external_tools/send_to_geneweaver.py | 3 + .../wqflask/external_tools/send_to_webgestalt.py | 3 + wqflask/wqflask/group_manager.py | 9 +++ wqflask/wqflask/gsearch.py | 1 + wqflask/wqflask/heatmap/heatmap.py | 3 + wqflask/wqflask/interval_analyst/GeneUtil.py | 2 + .../marker_regression/display_mapping_results.py | 9 --- wqflask/wqflask/marker_regression/plink_mapping.py | 9 ++- .../wqflask/marker_regression/qtlreaper_mapping.py | 5 ++ wqflask/wqflask/marker_regression/rqtl_mapping.py | 15 +++++ wqflask/wqflask/marker_regression/run_mapping.py | 9 +++ wqflask/wqflask/model.py | 7 ++- wqflask/wqflask/network_graph/network_graph.py | 1 + wqflask/wqflask/news.py | 1 + wqflask/wqflask/parser.py | 1 + wqflask/wqflask/pbkdf2.py | 2 + wqflask/wqflask/resource_manager.py | 7 +++ wqflask/wqflask/search_results.py | 4 ++ wqflask/wqflask/send_mail.py | 3 +- wqflask/wqflask/server_side.py | 1 - wqflask/wqflask/show_trait/SampleList.py | 1 + wqflask/wqflask/show_trait/export_trait_data.py | 3 + wqflask/wqflask/snp_browser/snp_browser.py | 9 ++- wqflask/wqflask/submit_bnw.py | 1 + wqflask/wqflask/update_search_results.py | 1 + wqflask/wqflask/user_login.py | 21 +++++++ wqflask/wqflask/user_manager.py | 38 +++++++++++- wqflask/wqflask/user_session.py | 6 ++ 89 files changed, 446 insertions(+), 46 deletions(-) (limited to 'wqflask/utility') diff --git a/etc/default_settings.py b/etc/default_settings.py index 27522187..a194b10e 100644 --- a/etc/default_settings.py +++ b/etc/default_settings.py @@ -73,23 +73,24 @@ SMTP_PASSWORD = "UNKNOWN" # ---- Behavioural settings (defaults) note that logger and log levels can # be overridden at the module level and with enviroment settings -WEBSERVER_MODE = 'DEV' # Python webserver mode (DEBUG|DEV|PROD) +WEBSERVER_MODE = 'DEV' # Python webserver mode (DEBUG|DEV|PROD) WEBSERVER_BRANDING = None # Set the branding (nyi) WEBSERVER_DEPLOY = None # Deployment specifics (nyi) -WEBSERVER_URL = "http://localhost:"+str(SERVER_PORT)+"/" # external URL +WEBSERVER_URL = "http://localhost:" + str(SERVER_PORT) + "/" # external URL -LOG_LEVEL = 'WARNING' # Logger mode (DEBUG|INFO|WARNING|ERROR|CRITICAL) +LOG_LEVEL = 'WARNING' # Logger mode (DEBUG|INFO|WARNING|ERROR|CRITICAL) LOG_LEVEL_DEBUG = '0' # logger.debugf log level (0-5, 5 = show all) -LOG_SQL = 'False' # Log SQL/backend and GN_SERVER calls +LOG_SQL = 'False' # Log SQL/backend and GN_SERVER calls LOG_SQL_ALCHEMY = 'False' -LOG_BENCH = True # Log bench marks +LOG_BENCH = True # Log bench marks -USE_REDIS = True # REDIS caching (note that redis will be phased out) -USE_GN_SERVER = 'False' # Use GN_SERVER SQL calls -HOME = os.environ['HOME'] +USE_REDIS = True # REDIS caching (note that redis will be phased out) +USE_GN_SERVER = 'False' # Use GN_SERVER SQL calls +HOME = os.environ['HOME'] # ---- Default locations -GENENETWORK_FILES = HOME+"/genotype_files" # base dir for all static data files +# base dir for all static data files +GENENETWORK_FILES = HOME + "/genotype_files" # ---- Path overrides for Genenetwork - the defaults are normally # picked up from Guix or in the HOME directory @@ -98,7 +99,7 @@ GENENETWORK_FILES = HOME+"/genotype_files" # base dir for all static data fil # PRIVATE_FILES = HOME+"/gn2_private_data" # private static data files (unused) # ---- Local path to JS libraries - for development modules (only) -JS_GN_PATH = os.environ['HOME']+"/genenetwork/javascript" +JS_GN_PATH = os.environ['HOME'] + "/genenetwork/javascript" # ---- GN2 Executables (overwrite for testing only) # PLINK_COMMAND = str.strip(os.popen("which plink2").read()) diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 55ab45f5..c14808d6 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -311,7 +311,6 @@ class HumanMarkers(Markers): marker['Mb'] = float(splat[3]) / 1000000 self.markers.append(marker) - def add_pvalues(self, p_values): super(HumanMarkers, self).add_pvalues(p_values) diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py index f3264b3d..cbc05738 100644 --- a/wqflask/base/mrna_assay_tissue_data.py +++ b/wqflask/base/mrna_assay_tissue_data.py @@ -11,6 +11,7 @@ from utility.db_tools import escape from utility.logger import getLogger logger = getLogger(__name__) + class MrnaAssayTissueData: def __init__(self, gene_symbols=None): diff --git a/wqflask/base/species.py b/wqflask/base/species.py index cf764d72..44f133b5 100644 --- a/wqflask/base/species.py +++ b/wqflask/base/species.py @@ -6,6 +6,7 @@ from flask import Flask, g from utility.logger import getLogger logger = getLogger(__name__) + class TheSpecies: def __init__(self, dataset=None, species_name=None): if species_name != None: @@ -15,6 +16,7 @@ class TheSpecies: self.dataset = dataset self.chromosomes = Chromosomes(dataset=self.dataset) + class IndChromosome: def __init__(self, name, length): self.name = name @@ -25,6 +27,7 @@ class IndChromosome: """Chromosome length in megabases""" return self.length / 1000000 + class Chromosomes: def __init__(self, dataset=None, species=None): self.chromosomes = collections.OrderedDict() diff --git a/wqflask/base/webqtlCaseData.py b/wqflask/base/webqtlCaseData.py index aa55470f..2d07ab9d 100644 --- a/wqflask/base/webqtlCaseData.py +++ b/wqflask/base/webqtlCaseData.py @@ -28,6 +28,7 @@ import utility.tools utility.tools.show_settings() + class webqtlCaseData: """one case data in one trait""" @@ -78,4 +79,4 @@ class webqtlCaseData: def display_num_cases(self): if self.num_cases is not None: return "%s" % self.num_cases - return "x" \ No newline at end of file + return "x" diff --git a/wqflask/db/call.py b/wqflask/db/call.py index 555878ad..9412b376 100644 --- a/wqflask/db/call.py +++ b/wqflask/db/call.py @@ -16,6 +16,7 @@ logger = getLogger(__name__) # from inspect import stack + def fetch1(query, path=None, func=None): """Fetch one result as a Tuple using either a SQL query or the URI path to GN_SERVER (when USE_GN_SERVER is True). Apply func to @@ -35,6 +36,7 @@ GN_SERVER result when set (which should return a Tuple) else: return fetchone(query) + def fetchone(query): """Return tuple containing one row by calling SQL directly (the original fetchone, but with logging) @@ -46,6 +48,7 @@ original fetchone, but with logging) return res.fetchone() return logger.sql(query, helper) + def fetchall(query): """Return row iterator by calling SQL directly (the original fetchall, but with logging) @@ -57,6 +60,7 @@ original fetchall, but with logging) return res.fetchall() return logger.sql(query, helper) + def gn_server(path): """Return JSON record by calling GN_SERVER diff --git a/wqflask/db/gn_server.py b/wqflask/db/gn_server.py index 6c7383d0..f9b01658 100644 --- a/wqflask/db/gn_server.py +++ b/wqflask/db/gn_server.py @@ -5,5 +5,6 @@ from db.call import gn_server from utility.logger import getLogger logger = getLogger(__name__) + def menu_main(): return gn_server("/int/menu/main.json") diff --git a/wqflask/db/webqtlDatabaseFunction.py b/wqflask/db/webqtlDatabaseFunction.py index 18ade405..50ac06fd 100644 --- a/wqflask/db/webqtlDatabaseFunction.py +++ b/wqflask/db/webqtlDatabaseFunction.py @@ -31,6 +31,7 @@ logger = getLogger(__name__) # function: connect to database and return cursor instance ########################################################################### + def retrieve_species(group): """Get the species of a group (e.g. returns string "mouse" on "BXD" diff --git a/wqflask/maintenance/convert_dryad_to_bimbam.py b/wqflask/maintenance/convert_dryad_to_bimbam.py index 8eab66e8..e417c280 100644 --- a/wqflask/maintenance/convert_dryad_to_bimbam.py +++ b/wqflask/maintenance/convert_dryad_to_bimbam.py @@ -55,15 +55,18 @@ def read_dryad_file(filename): # # return geno_rows + def write_bimbam_files(geno_rows): with open('/home/zas1024/cfw_data/CFW_geno.txt', 'w') as geno_fh: for row in geno_rows: geno_fh.write(", ".join(row) + "\n") + def convert_dryad_to_bimbam(filename): geno_file_rows = read_dryad_file(filename) write_bimbam_files(geno_file_rows) + if __name__ == "__main__": input_filename = "/home/zas1024/cfw_data/" + sys.argv[1] + ".txt" convert_dryad_to_bimbam(input_filename) diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py index dc01cbb3..5b2369c9 100644 --- a/wqflask/maintenance/convert_geno_to_bimbam.py +++ b/wqflask/maintenance/convert_geno_to_bimbam.py @@ -20,8 +20,10 @@ import simplejson as json from pprint import pformat as pf + class EmptyConfigurations(Exception): pass + class Marker: def __init__(self): self.name = None @@ -30,6 +32,7 @@ class Marker: self.Mb = None self.genotypes = [] + class ConvertGenoFile: def __init__(self, input_file, output_files): @@ -178,6 +181,7 @@ class ConvertGenoFile: print(" Row is:", convertob.latest_row_value) break + if __name__ == "__main__": Old_Geno_Directory = """/export/local/home/zas1024/gn2-zach/genotype_files/genotype""" New_Geno_Directory = """/export/local/home/zas1024/gn2-zach/genotype_files/genotype/bimbam""" diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py index f480d63f..583a06e1 100644 --- a/wqflask/maintenance/gen_select_dataset.py +++ b/wqflask/maintenance/gen_select_dataset.py @@ -55,6 +55,7 @@ from pprint import pformat as pf #conn = Engine.connect() + def parse_db_uri(): """Converts a database URI to the db name, host name, user name, and password""" @@ -143,6 +144,7 @@ def phenotypes_exist(group_name): else: return False + def genotypes_exist(group_name): #print("group_name:", group_name) Cursor.execute("""select Name from GenoFreeze @@ -156,6 +158,7 @@ def genotypes_exist(group_name): else: return False + def build_types(species, group): """Fetches tissues @@ -184,6 +187,7 @@ def build_types(species, group): return results + def get_datasets(types): """Build datasets list""" datasets = {} @@ -308,6 +312,7 @@ def _test_it(): datasets = build_datasets("Mouse", "BXD", "Hippocampus") #print("build_datasets:", pf(datasets)) + if __name__ == '__main__': Conn = MySQLdb.Connect(**parse_db_uri()) Cursor = Conn.cursor() diff --git a/wqflask/maintenance/generate_kinship_from_bimbam.py b/wqflask/maintenance/generate_kinship_from_bimbam.py index 664e9e48..7cc60c9e 100644 --- a/wqflask/maintenance/generate_kinship_from_bimbam.py +++ b/wqflask/maintenance/generate_kinship_from_bimbam.py @@ -13,6 +13,7 @@ sys.path.append("..") import os import glob + class GenerateKinshipMatrices: def __init__(self, group_name, geno_file, pheno_file): self.group_name = group_name diff --git a/wqflask/maintenance/generate_probesetfreeze_file.py b/wqflask/maintenance/generate_probesetfreeze_file.py index b1e41e9a..bd9c2ab4 100644 --- a/wqflask/maintenance/generate_probesetfreeze_file.py +++ b/wqflask/maintenance/generate_probesetfreeze_file.py @@ -23,10 +23,12 @@ def get_cursor(): cursor = con.cursor() return cursor + def show_progress(process, counter): if counter % 1000 == 0: print("{}: {}".format(process, counter)) + def get_strains(cursor): cursor.execute("""select Strain.Name from Strain, StrainXRef, InbredSet @@ -42,6 +44,7 @@ def get_strains(cursor): return strains + def get_probeset_vals(cursor, dataset_name): cursor.execute(""" select ProbeSet.Id, ProbeSet.Name from ProbeSetXRef, @@ -77,6 +80,7 @@ def get_probeset_vals(cursor, dataset_name): return probeset_vals + def trim_strains(strains, probeset_vals): trimmed_strains = [] #print("probeset_vals is:", pf(probeset_vals)) @@ -89,6 +93,7 @@ def trim_strains(strains, probeset_vals): print("trimmed_strains:", pf(trimmed_strains)) return trimmed_strains + def write_data_matrix_file(strains, probeset_vals, filename): with open(filename, "wb") as fh: csv_writer = csv.writer(fh, delimiter=",", quoting=csv.QUOTE_ALL) @@ -103,6 +108,7 @@ def write_data_matrix_file(strains, probeset_vals, filename): csv_writer.writerow(row_data) show_progress("Writing", counter) + def main(): filename = os.path.expanduser("~/gene/wqflask/maintenance/" + "ProbeSetFreezeId_210_FullName_Eye_AXBXA_Illumina_V6.2" + @@ -117,5 +123,6 @@ def main(): trimmed_strains = trim_strains(strains, probeset_vals) write_data_matrix_file(trimmed_strains, probeset_vals, filename) + if __name__ == '__main__': main() diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py index fa0dcebd..ad3f2b72 100644 --- a/wqflask/maintenance/geno_to_json.py +++ b/wqflask/maintenance/geno_to_json.py @@ -25,10 +25,10 @@ from pprint import pformat as pf #from utility.tools import flat_files + class EmptyConfigurations(Exception): pass - class Marker: def __init__(self): self.name = None @@ -37,6 +37,7 @@ class Marker: self.Mb = None self.genotypes = [] + class ConvertGenoFile: def __init__(self, input_file, output_file): @@ -78,7 +79,6 @@ class ConvertGenoFile: # elif self.file_type == "snps": # self.process_snps_file() - def process_csv(self): for row_count, row in enumerate(self.process_rows()): row_items = row.split("\t") @@ -121,7 +121,6 @@ class ConvertGenoFile: # self.output_fh.write("\n") - def process_rows(self): for self.latest_row_pos, row in enumerate(self.input_fh): # if self.input_file.endswith(".geno.gz"): @@ -182,7 +181,6 @@ class ConvertGenoFile: # convertob = ConvertGenoFile(input_file, output_file) - if __name__ == "__main__": Old_Geno_Directory = """/export/local/home/zas1024/gn2-zach/genotype_files/genotype""" New_Geno_Directory = """/export/local/home/zas1024/gn2-zach/genotype_files/genotype/json""" diff --git a/wqflask/maintenance/get_group_samplelists.py b/wqflask/maintenance/get_group_samplelists.py index 3f9d0278..0a450d3f 100644 --- a/wqflask/maintenance/get_group_samplelists.py +++ b/wqflask/maintenance/get_group_samplelists.py @@ -4,12 +4,14 @@ import gzip from base import webqtlConfig + def get_samplelist(file_type, geno_file): if file_type == "geno": return get_samplelist_from_geno(geno_file) elif file_type == "plink": return get_samplelist_from_plink(geno_file) + def get_samplelist_from_geno(genofilename): if os.path.isfile(genofilename + '.gz'): genofilename += '.gz' @@ -33,6 +35,7 @@ def get_samplelist_from_geno(genofilename): samplelist = headers[3:] return samplelist + def get_samplelist_from_plink(genofilename): genofile = open(genofilename) diff --git a/wqflask/maintenance/print_benchmark.py b/wqflask/maintenance/print_benchmark.py index a1046c86..9d12da8a 100644 --- a/wqflask/maintenance/print_benchmark.py +++ b/wqflask/maintenance/print_benchmark.py @@ -15,15 +15,18 @@ class TheCounter: self.time_took = time.time() - start_time TheCounter.Counters[self.__class__.__name__] = self.time_took + class PrintAll(TheCounter): def print_it(self, counter): print(counter) + class PrintSome(TheCounter): def print_it(self, counter): if counter % 1000 == 0: print(counter) + class PrintNone(TheCounter): def print_it(self, counter): pass @@ -37,5 +40,6 @@ def new_main(): print(pf(TheCounter.Counters)) + if __name__ == '__main__': new_main() diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py index 6751a8e5..1896bc52 100644 --- a/wqflask/maintenance/quantile_normalize.py +++ b/wqflask/maintenance/quantile_normalize.py @@ -14,6 +14,7 @@ from wqflask import app from utility.elasticsearch_tools import get_elasticsearch_connection from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT, SQL_URI + def parse_db_uri(): """Converts a database URI to the db name, host name, user name, and password""" @@ -28,6 +29,7 @@ def parse_db_uri(): print(db_conn_info) return db_conn_info + def create_dataframe(input_file): with open(input_file) as f: ncols = len(f.readline().split("\t")) @@ -36,6 +38,8 @@ def create_dataframe(input_file): return pd.DataFrame(input_array) # This function taken from https://github.com/ShawnLYU/Quantile_Normalize + + def quantileNormalize(df_input): df = df_input.copy() # compute rank @@ -50,6 +54,7 @@ def quantileNormalize(df_input): df[col] = [rank[i] for i in t] return df + def set_data(dataset_name): orig_file = "/home/zas1024/cfw_data/" + dataset_name + ".txt" @@ -95,6 +100,7 @@ def set_data(dataset_name): } } + if __name__ == '__main__': Conn = MySQLdb.Connect(**parse_db_uri()) Cursor = Conn.cursor() diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index 286094dd..c6c4f44c 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -37,6 +37,7 @@ import urllib.parse from utility.logger import getLogger logger = getLogger(__name__) + def parse_db_uri(): """Converts a database URI to the db name, host name, user name, and password""" @@ -51,6 +52,7 @@ def parse_db_uri(): print(db_conn_info) return db_conn_info + def insert_probeset_resources(default_owner_id): current_resources = Redis.hgetall("resources") Cursor.execute(""" SELECT @@ -77,6 +79,7 @@ def insert_probeset_resources(default_owner_id): add_resource(resource_ob, update=False) + def insert_publish_resources(default_owner_id): current_resources = Redis.hgetall("resources") Cursor.execute(""" SELECT @@ -110,6 +113,7 @@ def insert_publish_resources(default_owner_id): else: continue + def insert_geno_resources(default_owner_id): current_resources = Redis.hgetall("resources") Cursor.execute(""" SELECT @@ -139,6 +143,7 @@ def insert_geno_resources(default_owner_id): add_resource(resource_ob, update=False) + def insert_resources(default_owner_id): current_resources = get_resources() print("START") @@ -149,6 +154,7 @@ def insert_resources(default_owner_id): insert_probeset_resources(default_owner_id) print("AFTER PROBESET") + def main(): """Generates and outputs (as json file) the data for the main dropdown menus on the home page""" @@ -158,6 +164,7 @@ def main(): insert_resources(owner_id) + if __name__ == '__main__': Conn = MySQLdb.Connect(**parse_db_uri()) Cursor = Conn.cursor() diff --git a/wqflask/run_gunicorn.py b/wqflask/run_gunicorn.py index 58108e03..03f310eb 100644 --- a/wqflask/run_gunicorn.py +++ b/wqflask/run_gunicorn.py @@ -14,9 +14,11 @@ from utility.startup_config import app_config app_config() + @app.route("/gunicorn") def hello(): return "

    Hello There!

    " + if __name__ == "__main__": app.run(host='0.0.0.0') diff --git a/wqflask/tests/unit/base/test_webqtl_case_data.py b/wqflask/tests/unit/base/test_webqtl_case_data.py index 8e8ba482..cebd41ce 100644 --- a/wqflask/tests/unit/base/test_webqtl_case_data.py +++ b/wqflask/tests/unit/base/test_webqtl_case_data.py @@ -4,6 +4,7 @@ import unittest from wqflask import app # Required because of utility.tools in webqtlCaseData.py from base.webqtlCaseData import webqtlCaseData + class TestWebqtlCaseData(unittest.TestCase): """Tests for WebqtlCaseData class""" diff --git a/wqflask/tests/unit/utility/test_authentication_tools.py b/wqflask/tests/unit/utility/test_authentication_tools.py index 42dcae88..024ab43f 100644 --- a/wqflask/tests/unit/utility/test_authentication_tools.py +++ b/wqflask/tests/unit/utility/test_authentication_tools.py @@ -5,6 +5,7 @@ from unittest import mock from utility.authentication_tools import check_resource_availability from utility.authentication_tools import add_new_resource + class TestResponse: """Mock Test Response after a request""" @property diff --git a/wqflask/tests/unit/utility/test_chunks.py b/wqflask/tests/unit/utility/test_chunks.py index 8d90a1ec..1d349193 100644 --- a/wqflask/tests/unit/utility/test_chunks.py +++ b/wqflask/tests/unit/utility/test_chunks.py @@ -7,6 +7,7 @@ from utility.chunks import divide_into_chunks class TestChunks(unittest.TestCase): "Test Utility method for chunking" + def test_divide_into_chunks(self): "Check that a list is chunked correctly" self.assertEqual(divide_into_chunks([1, 2, 7, 3, 22, 8, 5, 22, 333], 3), diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_display_mapping_results.py b/wqflask/tests/unit/wqflask/marker_regression/test_display_mapping_results.py index 8ae0f09f..219a6a29 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_display_mapping_results.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_display_mapping_results.py @@ -9,6 +9,7 @@ from wqflask.marker_regression.display_mapping_results import ( class TestDisplayMappingResults(unittest.TestCase): """Basic Methods to test Mapping Results""" + def test_pil_colors(self): """Test that colors use PILLOW color format""" self.assertEqual(DisplayMappingResults.CLICKABLE_WEBQTL_REGION_COLOR, @@ -17,6 +18,7 @@ class TestDisplayMappingResults(unittest.TestCase): class TestHtmlGenWrapper(unittest.TestCase): """Test Wrapper around HTMLGen""" + def test_create_image(self): """Test HT.Image method""" self.assertEqual( diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_plink_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_plink_mapping.py index 5eec93f1..fd21a825 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_plink_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_plink_mapping.py @@ -12,9 +12,10 @@ class AttributeSetter: def __init__(self, obj): for key, val in obj.items(): setattr(self, key, val) -class TestPlinkMapping(unittest.TestCase): +class TestPlinkMapping(unittest.TestCase): + def test_build_line_list(self): """test for building line list""" line_1 = "this is line one test" diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py index bbb79f98..5cc8fd0f 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py @@ -4,6 +4,8 @@ from wqflask.marker_regression.qtlreaper_mapping import gen_pheno_txt_file # issues some methods in genofile object are not defined # modify samples should equal to vals + + class TestQtlReaperMapping(unittest.TestCase): @mock.patch("wqflask.marker_regression.qtlreaper_mapping.TEMPDIR", "/home/user/data") def test_gen_pheno_txt_file(self): diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py index 0e617e93..6267ce9a 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py @@ -5,6 +5,7 @@ from wqflask.marker_regression.rqtl_mapping import get_trait_data_type from wqflask.marker_regression.rqtl_mapping import sanitize_rqtl_phenotype from wqflask.marker_regression.rqtl_mapping import sanitize_rqtl_names + class TestRqtlMapping(unittest.TestCase): def setUp(self): @@ -14,7 +15,6 @@ class TestRqtlMapping(unittest.TestCase): def tearDown(self): self.app_context.pop() - @mock.patch("wqflask.marker_regression.rqtl_mapping.g") @mock.patch("wqflask.marker_regression.rqtl_mapping.logger") def test_get_trait_data(self, mock_logger, mock_db): diff --git a/wqflask/tests/unit/wqflask/test_collect.py b/wqflask/tests/unit/wqflask/test_collect.py index 9a36132d..2a914fb2 100644 --- a/wqflask/tests/unit/wqflask/test_collect.py +++ b/wqflask/tests/unit/wqflask/test_collect.py @@ -11,6 +11,7 @@ app = Flask(__name__) class MockSession: """Helper class for mocking wqflask.collect.g.user_session.logged_in""" + def __init__(self, is_logged_in=False): self.is_logged_in = is_logged_in @@ -21,6 +22,7 @@ class MockSession: class MockFlaskG: """Helper class for mocking wqflask.collect.g.user_session""" + def __init__(self, is_logged_in=False): self.is_logged_in = is_logged_in diff --git a/wqflask/tests/wqflask/show_trait/test_show_trait.py b/wqflask/tests/wqflask/show_trait/test_show_trait.py index 8c866874..24c3923e 100644 --- a/wqflask/tests/wqflask/show_trait/test_show_trait.py +++ b/wqflask/tests/wqflask/show_trait/test_show_trait.py @@ -242,7 +242,6 @@ class TestTraits(unittest.TestCase): self.assertEqual(get_genotype_scales(file_location), expected_results) mock_get_scales.assert_called_once_with(file_location) - @mock.patch("wqflask.show_trait.show_trait.locate_ignore_error") def test_get_scales_from_genofile_found(self, mock_ignore_location): """"add test for get scales from genofile where file is found""" diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 68c2cb72..d35b2089 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -47,6 +47,7 @@ COUR_FILE = "./wqflask/static/fonts/courbd.ttf" TAHOMA_FILE = "./wqflask/static/fonts/tahoma.ttf" # ---- END: FONT FILES ---- # + def cformat(d, rank=0): 'custom string format' strD = "%2.6f" % d @@ -68,6 +69,7 @@ def cformat(d, rank=0): strD = '0.0' return strD + def frange(start, end=None, inc=1.0): "A faster range-like function that does accept float increments..." if end == None: @@ -84,6 +86,7 @@ def frange(start, end=None, inc=1.0): L[i] = start + i * inc return L + def find_outliers(vals): """Calculates the upper and lower bounds of a set of sample/case values @@ -119,6 +122,8 @@ def find_outliers(vals): # parameter: data is either object returned by reaper permutation function (called by MarkerRegressionPage.py) # or the first object returned by direct (pair-scan) permu function (called by DirectPlotPage.py) + + def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLabel=None, YLabel=None, title=None, offset=(60, 20, 40, 40), zoom=1): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset @@ -230,6 +235,8 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab font=labelFont, fill=labelColor) # This function determines the scale of the plot + + def detScaleOld(min, max): if min >= max: return None @@ -246,6 +253,7 @@ def detScaleOld(min, max): high = c * ceil(max / c) return [low, high, round((high - low) / c)] + def detScale(min=0, max=0): if min >= max: @@ -283,15 +291,19 @@ def detScale(min=0, max=0): return [low, high, n] + def bluefunc(x): return 1.0 / (1.0 + exp(-10 * (x - 0.6))) + def redfunc(x): return 1.0 / (1.0 + exp(10 * (x - 0.5))) + def greenfunc(x): return 1 - pow(redfunc(x + 0.2), 2) - bluefunc(x - 0.3) + def colorSpectrum(n=100): multiple = 10 if n == 1: @@ -319,6 +331,7 @@ def colorSpectrum(n=100): out2.append(out[-1]) return out2 + def _test(): import doctest doctest.testmod() diff --git a/wqflask/utility/__init__.py b/wqflask/utility/__init__.py index ec7e72d0..d540c96e 100644 --- a/wqflask/utility/__init__.py +++ b/wqflask/utility/__init__.py @@ -2,8 +2,10 @@ from pprint import pformat as pf # Todo: Move these out of __init__ + class Bunch: """Like a dictionary but using object notation""" + def __init__(self, **kw): self.__dict__ = kw diff --git a/wqflask/utility/after.py b/wqflask/utility/after.py index 06091ecb..2b560e48 100644 --- a/wqflask/utility/after.py +++ b/wqflask/utility/after.py @@ -7,6 +7,7 @@ from flask import g from wqflask import app + def after_this_request(f): if not hasattr(g, 'after_request_callbacks'): g.after_request_callbacks = [] diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 672b36d5..57dbf8ba 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -11,6 +11,7 @@ from utility.redis_tools import (get_redis_conn, add_resource) Redis = get_redis_conn() + def check_resource_availability(dataset, trait_id=None): # At least for now assume temporary entered traits are accessible if type(dataset) == str or dataset.type == "Temp": diff --git a/wqflask/utility/benchmark.py b/wqflask/utility/benchmark.py index 3d40a3b8..48ab1dc0 100644 --- a/wqflask/utility/benchmark.py +++ b/wqflask/utility/benchmark.py @@ -6,6 +6,7 @@ from utility.tools import LOG_BENCH from utility.logger import getLogger logger = getLogger(__name__) + class Bench: entries = collections.OrderedDict() diff --git a/wqflask/utility/corestats.py b/wqflask/utility/corestats.py index 15d1cb8d..523280a1 100644 --- a/wqflask/utility/corestats.py +++ b/wqflask/utility/corestats.py @@ -16,6 +16,8 @@ import sys # ZS: Should switch to using some third party library for this; maybe scipy has an equivalent + + class Stats: def __init__(self, sequence): diff --git a/wqflask/utility/elasticsearch_tools.py b/wqflask/utility/elasticsearch_tools.py index ae1181e4..9415cef0 100644 --- a/wqflask/utility/elasticsearch_tools.py +++ b/wqflask/utility/elasticsearch_tools.py @@ -47,11 +47,13 @@ logger = getLogger(__name__) from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT + def test_elasticsearch_connection(): es = Elasticsearch(['http://' + ELASTICSEARCH_HOST + ":" + str(ELASTICSEARCH_PORT) + '/'], verify_certs=True) if not es.ping(): logger.warning("Elasticsearch is DOWN") + def get_elasticsearch_connection(for_user=True): """Return a connection to ES. Returns None on failure""" logger.info("get_elasticsearch_connection") @@ -77,6 +79,7 @@ def get_elasticsearch_connection(for_user=True): return es + def setup_users_index(es_connection): if es_connection: index_settings = { @@ -87,12 +90,15 @@ def setup_users_index(es_connection): es_connection.indices.create(index='users', ignore=400) es_connection.indices.put_mapping(body=index_settings, index="users", doc_type="local") + def get_user_by_unique_column(es, column_name, column_value, index="users", doc_type="local"): return get_item_by_unique_column(es, column_name, column_value, index=index, doc_type=doc_type) + def save_user(es, user, user_id): es_save_data(es, "users", "local", user, user_id) + def get_item_by_unique_column(es, column_name, column_value, index, doc_type): item_details = None try: @@ -106,6 +112,7 @@ def get_item_by_unique_column(es, column_name, column_value, index, doc_type): pass return item_details + def es_save_data(es, index, doc_type, data_item, data_id,): from time import sleep es.create(index, doc_type, body=data_item, id=data_id) diff --git a/wqflask/utility/external.py b/wqflask/utility/external.py index c1bf4043..805d2ffe 100644 --- a/wqflask/utility/external.py +++ b/wqflask/utility/external.py @@ -4,6 +4,7 @@ import os import sys import subprocess + def shell(command): if subprocess.call(command, shell=True) != 0: raise Exception("ERROR: failed on " + command) diff --git a/wqflask/utility/gen_geno_ob.py b/wqflask/utility/gen_geno_ob.py index 9cfa39f9..24604e58 100644 --- a/wqflask/utility/gen_geno_ob.py +++ b/wqflask/utility/gen_geno_ob.py @@ -1,6 +1,7 @@ import utility.logger logger = utility.logger.getLogger(__name__) + class genotype: """ Replacement for reaper.Dataset so we can remove qtlreaper use while still generating mapping output figure @@ -119,6 +120,7 @@ class genotype: self.chromosomes.append(chr_ob) + class Chr: def __init__(self, name, geno_ob): self.name = name @@ -140,6 +142,7 @@ class Chr: def add_marker(self, marker_row): self.loci.append(Locus(self.geno_ob, marker_row)) + class Locus: def __init__(self, geno_ob, marker_row=None): self.chr = None diff --git a/wqflask/utility/genofile_parser.py b/wqflask/utility/genofile_parser.py index f8e96d19..94a08c17 100644 --- a/wqflask/utility/genofile_parser.py +++ b/wqflask/utility/genofile_parser.py @@ -12,6 +12,7 @@ import simplejson as json from pprint import pformat as pf + class Marker: def __init__(self): self.name = None diff --git a/wqflask/utility/logger.py b/wqflask/utility/logger.py index 16912e58..47079818 100644 --- a/wqflask/utility/logger.py +++ b/wqflask/utility/logger.py @@ -35,6 +35,7 @@ import datetime from utility.tools import LOG_LEVEL, LOG_LEVEL_DEBUG, LOG_SQL + class GNLogger: """A logger class with some additional functionality, such as multiple parameter logging, SQL logging, timing, colors, and lazy @@ -139,6 +140,8 @@ LOG_LEVEL_DEBUG (NYI). # Get the module logger. You can override log levels at the # module level + + def getLogger(name, level=None): gnlogger = GNLogger(name) logger = gnlogger.logger diff --git a/wqflask/utility/pillow_utils.py b/wqflask/utility/pillow_utils.py index 6e95beb0..5713e155 100644 --- a/wqflask/utility/pillow_utils.py +++ b/wqflask/utility/pillow_utils.py @@ -9,6 +9,8 @@ BLACK = ImageColor.getrgb("black") WHITE = ImageColor.getrgb("white") # def draw_rotated_text(canvas: Image, text: str, font: ImageFont, xy: tuple, fill: ImageColor=BLACK, angle: int=-90): + + def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90): # type: (Image, str, ImageFont, tuple, ImageColor, int) """Utility function draw rotated text""" @@ -20,6 +22,8 @@ def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90): canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy])) # def draw_open_polygon(canvas: Image, xy: tuple, fill: ImageColor=WHITE, outline: ImageColor=BLACK): + + def draw_open_polygon(canvas, xy, fill=None, outline=BLACK, width=0): # type: (Image, tuple, ImageColor, ImageColor) draw_ctx = ImageDraw.Draw(canvas) diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index f22f4b14..92f944bc 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -10,6 +10,7 @@ GREEN = '\033[92m' BOLD = '\033[1m' ENDC = '\033[0m' + def app_config(): app.config['SESSION_TYPE'] = 'filesystem' if not app.config.get('SECRET_KEY'): diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index 4c478c36..f5ef81e1 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -459,11 +459,13 @@ class rect(SVGelement): if stroke_width != None: self.attributes['stroke-width'] = stroke_width + class ellipse(SVGelement): """e=ellipse(rx,ry,x,y,fill,stroke,stroke_width,**args) an ellipse is defined as a center and a x and y radius. """ + def __init__(self, cx=None, cy=None, rx=None, ry=None,fill=None,stroke=None,stroke_width=None,**args): if rx == None or ry == None: raise ValueError('both rx and ry are required') @@ -486,6 +488,7 @@ class circle(SVGelement): The circle creates an element using a x, y and radius values eg """ + def __init__(self, cx=None, cy=None, r=None, fill=None,stroke=None,stroke_width=None,**args): if r == None: raise ValueError('r is required') @@ -501,20 +504,24 @@ class circle(SVGelement): if stroke_width != None: self.attributes['stroke-width'] = stroke_width + class point(circle): """p=point(x,y,color) A point is defined as a circle with a size 1 radius. It may be more efficient to use a very small rectangle if you use many points because a circle is difficult to render. """ + def __init__(self, x, y, fill='black', **args): circle.__init__(self, x, y, 1, fill, **args) + class line(SVGelement): """l=line(x1,y1,x2,y2,stroke,stroke_width,**args) A line is defined by a begin x,y pair and an end x,y pair """ + def __init__(self, x1=None, y1=None, x2=None, y2=None,stroke=None,stroke_width=None,**args): SVGelement.__init__(self, 'line', **args) if x1 != None: @@ -530,11 +537,13 @@ class line(SVGelement): if stroke != None: self.attributes['stroke'] = stroke + class polyline(SVGelement): """pl=polyline([[x1,y1],[x2,y2],...],fill,stroke,stroke_width,**args) a polyline is defined by a list of xy pairs """ + def __init__(self, points, fill=None, stroke=None, stroke_width=None,**args): SVGelement.__init__(self, 'polyline', {'points': _xypointlist(points)}, **args) if fill != None: @@ -544,11 +553,13 @@ class polyline(SVGelement): if stroke != None: self.attributes['stroke'] = stroke + class polygon(SVGelement): """pl=polyline([[x1,y1],[x2,y2],...],fill,stroke,stroke_width,**args) a polygon is defined by a list of xy pairs """ + def __init__(self, points, fill=None, stroke=None, stroke_width=None,**args): SVGelement.__init__(self, 'polygon', {'points': _xypointlist(points)}, **args) if fill != None: @@ -558,11 +569,13 @@ class polygon(SVGelement): if stroke != None: self.attributes['stroke'] = stroke + class path(SVGelement): """p=path(path,fill,stroke,stroke_width,**args) a path is defined by a path object and optional width, stroke and fillcolor """ + def __init__(self, pathdata, fill=None, stroke=None, stroke_width=None,id=None,**args): SVGelement.__init__(self, 'path', {'d': str(pathdata)}, **args) if stroke != None: @@ -580,6 +593,7 @@ class text(SVGelement): a text element can bge used for displaying text on the screen """ + def __init__(self, x=None, y=None, text=None, font_size=None,font_family=None,text_anchor=None,**args): SVGelement.__init__(self, 'text', **args) if x != None: @@ -601,11 +615,13 @@ class textpath(SVGelement): a textpath places a text on a path which is referenced by a link. """ + def __init__(self, link, text=None, **args): SVGelement.__init__(self, 'textPath', {'xlink:href': link}, **args) if text != None: self.text = text + class pattern(SVGelement): """p=pattern(x,y,width,height,patternUnits,**args) @@ -613,6 +629,7 @@ class pattern(SVGelement): graphic object which can be replicated ("tiled") at fixed intervals in x and y to cover the areas to be painted. """ + def __init__(self, x=None, y=None, width=None, height=None,patternUnits=None,**args): SVGelement.__init__(self, 'pattern', **args) if x != None: @@ -626,34 +643,40 @@ class pattern(SVGelement): if patternUnits != None: self.attributes['patternUnits'] = patternUnits + class title(SVGelement): """t=title(text,**args) a title is a text element. The text is displayed in the title bar add at least one to the root svg element """ + def __init__(self, text=None, **args): SVGelement.__init__(self, 'title', **args) if text != None: self.text = text + class description(SVGelement): """d=description(text,**args) a description can be added to any element and is used for a tooltip Add this element before adding other elements. """ + def __init__(self, text=None, **args): SVGelement.__init__(self, 'desc', **args) if text != None: self.text = text + class lineargradient(SVGelement): """lg=lineargradient(x1,y1,x2,y2,id,**args) defines a lineargradient using two xy pairs. stop elements van be added to define the gradient colors. """ + def __init__(self, x1=None, y1=None, x2=None, y2=None,id=None,**args): SVGelement.__init__(self, 'linearGradient', **args) if x1 != None: @@ -667,12 +690,14 @@ class lineargradient(SVGelement): if id != None: self.attributes['id'] = id + class radialgradient(SVGelement): """rg=radialgradient(cx,cy,r,fx,fy,id,**args) defines a radial gradient using a outer circle which are defined by a cx,cy and r and by using a focalpoint. stop elements van be added to define the gradient colors. """ + def __init__(self, cx=None, cy=None, r=None, fx=None,fy=None,id=None,**args): SVGelement.__init__(self, 'radialGradient', **args) if cx != None: @@ -688,21 +713,25 @@ class radialgradient(SVGelement): if id != None: self.attributes['id'] = id + class stop(SVGelement): """st=stop(offset,stop_color,**args) Puts a stop color at the specified radius """ + def __init__(self, offset, stop_color=None, **args): SVGelement.__init__(self, 'stop', {'offset': offset}, **args) if stop_color != None: self.attributes['stop-color'] = stop_color + class style(SVGelement): """st=style(type,cdata=None,**args) Add a CDATA element to this element for defing in line stylesheets etc.. """ + def __init__(self, type, cdata=None, **args): SVGelement.__init__(self, 'style', {'type': type}, cdata=cdata, **args) @@ -712,6 +741,7 @@ class image(SVGelement): adds an image to the drawing. Supported formats are .png, .jpg and .svg. """ + def __init__(self, url, x=None, y=None, width=None,height=None,**args): if width == None or height == None: raise ValueError('both height and width are required') @@ -721,11 +751,13 @@ class image(SVGelement): if y != None: self.attributes['y'] = y + class cursor(SVGelement): """c=cursor(url,**args) defines a custom cursor for a element or a drawing """ + def __init__(self, url, **args): SVGelement.__init__(self, 'cursor', {'xlink:href': url}, **args) @@ -736,6 +768,7 @@ class marker(SVGelement): defines a marker which can be used as an endpoint for a line or other pathtypes add an element to it which should be used as a marker. """ + def __init__(self, id=None, viewBox=None, refx=None, refy=None,markerWidth=None,markerHeight=None,**args): SVGelement.__init__(self, 'marker', **args) if id != None: @@ -751,17 +784,20 @@ class marker(SVGelement): if markerHeight != None: self.attributes['markerHeight'] = markerHeight + class group(SVGelement): """g=group(id,**args) a group is defined by an id and is used to contain elements g.addElement(SVGelement) """ + def __init__(self, id=None, **args): SVGelement.__init__(self, 'g', **args) if id != None: self.attributes['id'] = id + class symbol(SVGelement): """sy=symbol(id,viewbox,**args) @@ -778,14 +814,17 @@ class symbol(SVGelement): if viewBox != None: self.attributes['viewBox'] = _viewboxlist(viewBox) + class defs(SVGelement): """d=defs(**args) container for defining elements """ + def __init__(self, **args): SVGelement.__init__(self, 'defs', **args) + class switch(SVGelement): """sw=switch(**args) @@ -793,6 +832,7 @@ class switch(SVGelement): requiredFeatures, requiredExtensions and systemLanguage. Refer to the SVG specification for details. """ + def __init__(self, **args): SVGelement.__init__(self, 'switch', **args) @@ -802,6 +842,7 @@ class use(SVGelement): references a symbol by linking to its id and its position, height and width """ + def __init__(self, link, x=None, y=None, width=None,height=None,**args): SVGelement.__init__(self, 'use', {'xlink:href': link}, **args) if x != None: @@ -821,32 +862,39 @@ class link(SVGelement): a link is defined by a hyperlink. add elements which have to be linked a.addElement(SVGelement) """ + def __init__(self, link='', **args): SVGelement.__init__(self, 'a', {'xlink:href': link}, **args) + class view(SVGelement): """v=view(id,**args) a view can be used to create a view with different attributes""" + def __init__(self, id=None, **args): SVGelement.__init__(self, 'view', **args) if id != None: self.attributes['id'] = id + class script(SVGelement): """sc=script(type,type,cdata,**args) adds a script element which contains CDATA to the SVG drawing """ + def __init__(self, type, cdata=None, **args): SVGelement.__init__(self, 'script', {'type': type}, cdata=cdata, **args) + class animate(SVGelement): """an=animate(attribute,from,to,during,**args) animates an attribute. """ + def __init__(self, attribute, fr=None, to=None, dur=None,**args): SVGelement.__init__(self, 'animate', {'attributeName': attribute}, **args) if fr != None: @@ -856,11 +904,13 @@ class animate(SVGelement): if dur != None: self.attributes['dur'] = dur + class animateMotion(SVGelement): """an=animateMotion(pathdata,dur,**args) animates a SVGelement over the given path in dur seconds """ + def __init__(self, pathdata, dur, **args): SVGelement.__init__(self, 'animateMotion', **args) if pathdata != None: @@ -868,11 +918,13 @@ class animateMotion(SVGelement): if dur != None: self.attributes['dur'] = dur + class animateTransform(SVGelement): """antr=animateTransform(type,from,to,dur,**args) transform an element from and to a value. """ + def __init__(self, type=None, fr=None, to=None, dur=None,**args): SVGelement.__init__(self, 'animateTransform', {'attributeName': 'transform'}, **args) # As far as I know the attributeName is always transform @@ -884,11 +936,14 @@ class animateTransform(SVGelement): self.attributes['to'] = to if dur != None: self.attributes['dur'] = dur + + class animateColor(SVGelement): """ac=animateColor(attribute,type,from,to,dur,**args) Animates the color of a element """ + def __init__(self, attribute, type=None, fr=None, to=None,dur=None,**args): SVGelement.__init__(self, 'animateColor', {'attributeName': attribute}, **args) if type != None: @@ -899,11 +954,14 @@ class animateColor(SVGelement): self.attributes['to'] = to if dur != None: self.attributes['dur'] = dur + + class set(SVGelement): """st=set(attribute,to,during,**args) sets an attribute to a value for a """ + def __init__(self, attribute, to=None, dur=None, **args): SVGelement.__init__(self, 'set', {'attributeName': attribute}, **args) if to != None: @@ -912,7 +970,6 @@ class set(SVGelement): self.attributes['dur'] = dur - class svg(SVGelement): """s=svg(viewbox,width,height,**args) @@ -928,6 +985,7 @@ class svg(SVGelement): d.setSVG(s) d.toXml() """ + def __init__(self, viewBox=None, width=None, height=None, **args): SVGelement.__init__(self, 'svg', **args) if viewBox != None: @@ -938,6 +996,7 @@ class svg(SVGelement): self.attributes['height'] = height self.namespace = "http://www.w3.org/2000/svg" + class drawing: """d=drawing() @@ -952,6 +1011,7 @@ class drawing: def __init__(self, entity={}): self.svg = None self.entity = entity + def setSVG(self, svg): self.svg = svg # Voeg een element toe aan de grafiek toe. @@ -1005,6 +1065,7 @@ class drawing: root = implementation.createDocument(None, None, doctype) # Create the xml document. global appender + def appender(element, elementroot): """This recursive function appends elements to an element and sets the attributes and type. It stops when alle elements have been appended""" @@ -1053,6 +1114,7 @@ class drawing: f.close() except: print(("Cannot write SVG file: " + filename)) + def validate(self): try: import xml.parsers.xmlproc.xmlval @@ -1066,9 +1128,10 @@ class drawing: raise Exception("SVG is not well formed, see messages above") else: print("SVG well formed") -if __name__ == '__main__': +if __name__ == '__main__': + d = drawing() s = svg((0, 0, 100, 100)) r = rect(-100, -100, 300, 300, 'cyan') diff --git a/wqflask/utility/temp_data.py b/wqflask/utility/temp_data.py index 2088ba9a..07c5a318 100644 --- a/wqflask/utility/temp_data.py +++ b/wqflask/utility/temp_data.py @@ -2,6 +2,7 @@ from redis import Redis import simplejson as json + class TempData: def __init__(self, temp_uuid): diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 9b751344..4fe4db08 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -13,11 +13,13 @@ logger = logging.getLogger(__name__) OVERRIDES = {} + def app_set(command_id, value): """Set application wide value""" app.config.setdefault(command_id, value) return value + def get_setting(command_id, guess=None): """Resolve a setting from the environment or the global settings in app.config, with valid_path is a function checking whether the @@ -66,12 +68,14 @@ def get_setting(command_id, guess=None): # print("Set "+command_id+"="+str(command)) return command + def get_setting_bool(id): v = get_setting(id) if v not in [0, False, 'False', 'FALSE', None]: return True return False + def get_setting_int(id): v = get_setting(id) if isinstance(v, str): @@ -80,21 +84,25 @@ def get_setting_int(id): return 0 return v + def valid_bin(bin): if os.path.islink(bin) or valid_file(bin): return bin return None + def valid_file(fn): if os.path.isfile(fn): return fn return None + def valid_path(dir): if os.path.isdir(dir): return dir return None + def js_path(module=None): """ Find the JS module in the two paths @@ -107,38 +115,47 @@ def js_path(module=None): return try_guix raise "No JS path found for " + module + " (if not in Guix check JS_GN_PATH)" + def reaper_command(guess=None): return get_setting("REAPER_COMMAND", guess) + def gemma_command(guess=None): return assert_bin(get_setting("GEMMA_COMMAND", guess)) + def gemma_wrapper_command(guess=None): return assert_bin(get_setting("GEMMA_WRAPPER_COMMAND", guess)) + def plink_command(guess=None): return assert_bin(get_setting("PLINK_COMMAND", guess)) + def flat_file_exists(subdir): base = get_setting("GENENETWORK_FILES") return valid_path(base + "/" + subdir) + def flat_files(subdir=None): base = get_setting("GENENETWORK_FILES") if subdir: return assert_dir(base + "/" + subdir) return assert_dir(base) + def assert_bin(fn): if not valid_bin(fn): raise Exception("ERROR: can not find binary " + fn) return fn + def assert_dir(dir): if not valid_path(dir): raise Exception("ERROR: can not find directory " + dir) return dir + def assert_writable_dir(dir): try: fn = dir + "/test.txt" @@ -150,16 +167,19 @@ def assert_writable_dir(dir): raise Exception('Unable to write test.txt to directory ' + dir) return dir + def assert_file(fn): if not valid_file(fn): raise Exception('Unable to find file ' + fn) return fn + def mk_dir(dir): if not valid_path(dir): os.makedirs(dir) return assert_dir(dir) + def locate(name, subdir=None): """ Locate a static flat file in the GENENETWORK_FILES environment. @@ -179,9 +199,11 @@ def locate(name, subdir=None): if subdir: sys.stderr.write(subdir) raise Exception("Can not locate " + name + " in " + base) + def locate_phewas(name, subdir=None): return locate(name, '/phewas/' + subdir) + def locate_ignore_error(name, subdir=None): """ Locate a static flat file in the GENENETWORK_FILES environment. @@ -200,17 +222,20 @@ def locate_ignore_error(name, subdir=None): logger.info("WARNING: file " + name + " not found\n") return None + def tempdir(): """ Get UNIX TMPDIR by default """ return valid_path(get_setting("TMPDIR", "/tmp")) + BLUE = '\033[94m' GREEN = '\033[92m' BOLD = '\033[1m' ENDC = '\033[0m' + def show_settings(): from utility.tools import LOG_LEVEL diff --git a/wqflask/utility/type_checking.py b/wqflask/utility/type_checking.py index 662bf794..00f14ba9 100644 --- a/wqflask/utility/type_checking.py +++ b/wqflask/utility/type_checking.py @@ -7,6 +7,7 @@ def is_float(value): except: return False + def is_int(value): try: int(value) @@ -14,6 +15,7 @@ def is_int(value): except: return False + def is_str(value): if value is None: return False @@ -23,18 +25,21 @@ def is_str(value): except: return False + def get_float(vars_obj, name, default=None): if name in vars_obj: if is_float(vars_obj[name]): return float(vars_obj[name]) return default + def get_int(vars_obj, name, default=None): if name in vars_obj: if is_int(vars_obj[name]): return float(vars_obj[name]) return default + def get_string(vars_obj, name, default=None): if name in vars_obj: if not vars_obj[name] is None: diff --git a/wqflask/utility/webqtlUtil.py b/wqflask/utility/webqtlUtil.py index ed59b0eb..f355a865 100644 --- a/wqflask/utility/webqtlUtil.py +++ b/wqflask/utility/webqtlUtil.py @@ -64,6 +64,7 @@ ParInfo = { # Accessory Functions ######################################### + def genRandStr(prefix="", length=8, chars=string.ascii_letters + string.digits): from random import choice _str = prefix[:] @@ -71,6 +72,7 @@ def genRandStr(prefix="", length=8, chars=string.ascii_letters + string.digits): _str += choice(chars) return _str + def ListNotNull(lst): '''Obsolete - Use built in function any (or all or whatever) @@ -83,6 +85,7 @@ def ListNotNull(lst): return 1 return None + def readLineCSV(line): # dcrowell July 2008 """Parses a CSV string of text and returns a list containing each element as a string. Used by correlationPage""" @@ -91,6 +94,7 @@ def readLineCSV(line): # dcrowell July 2008 returnList[0] = returnList[0][1:] return returnList + def cmpEigenValue(A, B): try: if A[0] > B[0]: @@ -102,6 +106,7 @@ def cmpEigenValue(A, B): except: return 0 + def hasAccessToConfidentialPhenotypeTrait(privilege, userName, authorized_users): access_to_confidential_phenotype_trait = 0 if webqtlConfig.USERDICT[privilege] > webqtlConfig.USERDICT['user']: diff --git a/wqflask/wqflask/__init__.py b/wqflask/wqflask/__init__.py index bab87115..712517a3 100644 --- a/wqflask/wqflask/__init__.py +++ b/wqflask/wqflask/__init__.py @@ -30,6 +30,7 @@ app.register_blueprint(policies_blueprint, url_prefix="/policies") app.register_blueprint(environments_blueprint, url_prefix="/environments") app.register_blueprint(facilities_blueprint, url_prefix="/facilities") + @app.before_request def before_request(): g.request_start_time = time.time() diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py index e0b7fea0..f1dd148f 100644 --- a/wqflask/wqflask/api/correlation.py +++ b/wqflask/wqflask/api/correlation.py @@ -18,6 +18,7 @@ from utility.benchmark import Bench import utility.logger logger = utility.logger.getLogger(__name__) + def do_correlation(start_vars): assert('db' in start_vars) assert('target_db' in start_vars) @@ -66,6 +67,7 @@ def do_correlation(start_vars): return final_results + def calculate_results(this_trait, this_dataset, target_dataset, corr_params): corr_results = {} @@ -91,6 +93,7 @@ def calculate_results(this_trait, this_dataset, target_dataset, corr_params): return sorted_results + def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params, tissue_dataset_id=1): # Gets tissue expression values for the primary trait primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=[this_trait.symbol]) @@ -113,6 +116,7 @@ def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_par return tissue_corr_data + def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_geneid_dict, corr_params): input_trait_mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), this_trait.geneid) @@ -145,6 +149,7 @@ def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_g return lit_corr_data + def get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, type): """ Calculates the sample r (or rho) and p-value @@ -176,6 +181,7 @@ def get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_data else: return [sample_r, sample_p, num_overlap] + def convert_to_mouse_gene_id(species=None, gene_id=None): """If the species is rat or human, translate the gene_id to the mouse geneid @@ -212,6 +218,7 @@ def convert_to_mouse_gene_id(species=None, gene_id=None): return mouse_gene_id + def init_corr_params(start_vars): method = "pearson" if 'method' in start_vars: diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py index fbfbc879..662090d5 100644 --- a/wqflask/wqflask/api/mapping.py +++ b/wqflask/wqflask/api/mapping.py @@ -10,6 +10,7 @@ from wqflask.marker_regression import gemma_mapping, rqtl_mapping, qtlreaper_map import utility.logger logger = utility.logger.getLogger(__name__) + def do_mapping_for_api(start_vars): assert('db' in start_vars) assert('trait_id' in start_vars) @@ -74,7 +75,6 @@ def do_mapping_for_api(start_vars): return result_markers, None - def initialize_parameters(start_vars, dataset, this_trait): mapping_params = {} diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py index b0559a07..4f9cc6e5 100644 --- a/wqflask/wqflask/api/router.py +++ b/wqflask/wqflask/api/router.py @@ -27,10 +27,12 @@ logger = utility.logger.getLogger(__name__) version = "pre1" + @app.route("/api/v_{}/".format(version)) def hello_world(): return flask.jsonify({"hello": "world"}) + @app.route("/api/v_{}/species".format(version)) def get_species_list(): results = g.db.execute("SELECT SpeciesId, Name, FullName, TaxonomyId FROM Species;") @@ -47,6 +49,7 @@ def get_species_list(): return flask.jsonify(species_list) + @app.route("/api/v_{}/species/".format(version)) @app.route("/api/v_{}/species/.".format(version)) def get_species_info(species_name, file_format="json"): @@ -64,6 +67,7 @@ def get_species_info(species_name, file_format="json"): return flask.jsonify(species_dict) + @app.route("/api/v_{}/groups".format(version)) @app.route("/api/v_{}/groups/".format(version)) def get_groups_list(species_name=None): @@ -102,6 +106,7 @@ def get_groups_list(species_name=None): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/group/".format(version)) @app.route("/api/v_{}/group/.".format(version)) @app.route("/api/v_{}/group//".format(version)) @@ -145,6 +150,7 @@ def get_group_info(group_name, species_name=None, file_format="json"): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/datasets/".format(version)) @app.route("/api/v_{}/datasets//".format(version)) def get_datasets_for_group(group_name, species_name=None): @@ -197,6 +203,7 @@ def get_datasets_for_group(group_name, species_name=None): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/dataset/".format(version)) @app.route("/api/v_{}/dataset/.".format(version)) @app.route("/api/v_{}/dataset//".format(version)) @@ -302,6 +309,7 @@ def get_dataset_info(dataset_name, group_name=None, file_format="json"): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/traits/".format(version), methods=("GET",)) @app.route("/api/v_{}/traits/.".format(version), methods=("GET",)) def fetch_traits(dataset_name, file_format="json"): @@ -430,6 +438,7 @@ def fetch_traits(dataset_name, file_format="json"): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/sample_data/".format(version)) @app.route("/api/v_{}/sample_data/.".format(version)) def all_sample_data(dataset_name, file_format="csv"): @@ -536,6 +545,7 @@ def all_sample_data(dataset_name, file_format="csv"): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/sample_data//".format(version)) @app.route("/api/v_{}/sample_data//.".format(version)) def trait_sample_data(dataset_name, trait_name, file_format="json"): @@ -625,6 +635,7 @@ def trait_sample_data(dataset_name, trait_name, file_format="json"): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/trait//".format(version)) @app.route("/api/v_{}/trait//.".format(version)) @app.route("/api/v_{}/trait_info//".format(version)) @@ -694,6 +705,7 @@ def get_trait_info(dataset_name, trait_name, file_format="json"): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/correlation".format(version), methods=("GET",)) def get_corr_results(): results = correlation.do_correlation(request.args) @@ -703,6 +715,7 @@ def get_corr_results(): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/mapping".format(version), methods=("GET",)) def get_mapping_results(): results, format = mapping.do_mapping_for_api(request.args) @@ -726,6 +739,7 @@ def get_mapping_results(): else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + @app.route("/api/v_{}/genotypes///.zip".format(version)) @app.route("/api/v_{}/genotypes///".format(version)) @app.route("/api/v_{}/genotypes//.zip".format(version)) @@ -813,6 +827,7 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None): return output + @app.route("/api/v_{}/gen_dropdown".format(version), methods=("GET",)) def gen_dropdown_menu(): results = gen_menu.gen_dropdown_json() @@ -822,6 +837,7 @@ def gen_dropdown_menu(): else: return return_error(code=500, source=request.url_rule.rule, title="Some error occurred", details="") + def return_error(code, source, title, details): json_ob = {"errors": [ { @@ -834,6 +850,7 @@ def return_error(code, source, title, details): return flask.jsonify(json_ob) + def get_dataset_trait_ids(dataset_name, start_vars): if 'limit_to' in start_vars: @@ -906,6 +923,7 @@ def get_dataset_trait_ids(dataset_name, start_vars): dataset_id = results[0][2] return trait_ids, trait_names, data_type, dataset_id + def get_samplelist(dataset_name): group_id = get_group_id_from_dataset(dataset_name) @@ -922,6 +940,7 @@ def get_samplelist(dataset_name): return samplelist + def get_group_id_from_dataset(dataset_name): if "Publish" in dataset_name: query = """ @@ -962,6 +981,7 @@ def get_group_id_from_dataset(dataset_name): else: return None + def get_group_id(group_name): query = """ SELECT InbredSet.Id diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index 6a1b88ca..61f73106 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -40,6 +40,7 @@ def process_traits(unprocessed_traits): return traits + def report_change(len_before, len_now): new_length = len_now - len_before if new_length: @@ -48,6 +49,7 @@ def report_change(len_before, len_now): else: logger.debug("No new traits were added.") + @app.route("/collections/store_trait_list", methods=('POST',)) def store_traits_list(): params = request.form @@ -59,6 +61,7 @@ def store_traits_list(): return hash + @app.route("/collections/add") def collections_add(): @@ -82,6 +85,7 @@ def collections_add(): collections=collections, ) + @app.route("/collections/new") def collections_new(): params = request.args @@ -118,6 +122,7 @@ def collections_new(): # CauseAnError pass + def create_new(collection_name): params = request.args @@ -133,6 +138,7 @@ def create_new(collection_name): return redirect(url_for('view_collection', uc_id=uc_id)) + @app.route("/collections/list") def list_collections(): params = request.args @@ -143,6 +149,7 @@ def list_collections(): collections=user_collections, ) + @app.route("/collections/remove", methods=('POST',)) def remove_traits(): params = request.form @@ -216,6 +223,7 @@ def view_collection(): **collection_info ) + @app.route("/collections/change_name", methods=('POST',)) def change_collection_name(): params = request.form diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index 22941ad5..6afea715 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -11,6 +11,7 @@ import numpy as np import utility.logger logger = utility.logger.getLogger(__name__) + class CorrScatterPlot: """Page that displays a correlation scatterplot with a line fitted to it""" diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index 051ac1cb..e2fe1ff4 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -172,7 +172,6 @@ class CorrelationResults: self.correlation_data = collections.OrderedDict(sorted(list(self.correlation_data.items()), key=lambda t: -abs(t[1][0]))) - # ZS: Convert min/max chromosome to an int for the location range option range_chr_as_int = None for order_id, chr_info in list(self.dataset.species.chromosomes.chromosomes.items()): @@ -348,7 +347,6 @@ class CorrelationResults: else: trait.lit_corr = 0 - def do_lit_correlation_for_all_traits(self): input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), self.this_trait.geneid) @@ -484,6 +482,7 @@ def do_bicor(this_trait_vals, target_trait_vals): return the_r, the_p + def generate_corr_json(corr_results, this_trait, dataset, target_dataset, for_api=False): results_list = [] for i, trait in enumerate(corr_results): @@ -574,6 +573,7 @@ def generate_corr_json(corr_results, this_trait, dataset, target_dataset, for_ap return json.dumps(results_list) + def get_header_fields(data_type, corr_method): if data_type == "ProbeSet": if corr_method == "spearman": diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index 94c8931f..331cb1dc 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -233,6 +233,7 @@ class CorrelationMatrix: loadings_array.append(loadings_row) return loadings_array + def export_corr_matrix(corr_results): corr_matrix_filename = "corr_matrix_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) matrix_export_path = "{}{}.csv".format(GENERATED_TEXT_DIR, corr_matrix_filename) @@ -263,6 +264,7 @@ def export_corr_matrix(corr_results): return corr_matrix_filename, matrix_export_path + def zScore(trait_data_array): NN = len(trait_data_array[0]) if NN < 10: @@ -283,6 +285,7 @@ def zScore(trait_data_array): i += 1 return trait_data_array + def sortEigenVectors(vector): try: eigenValues = vector[0].tolist() diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index 2fa90a93..ec66e59f 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -39,6 +39,7 @@ r_write_table = ro.r["write.table"] # Map the write.table function r_data_frame = ro.r["data.frame"] # Map the write.table function r_as_numeric = ro.r["as.numeric"] # Map the write.table function + class CTL: def __init__(self): logger.info("Initialization of CTL") diff --git a/wqflask/wqflask/database.py b/wqflask/wqflask/database.py index b6e85494..e743c4b3 100644 --- a/wqflask/wqflask/database.py +++ b/wqflask/wqflask/database.py @@ -17,6 +17,7 @@ db_session = scoped_session(sessionmaker(autocommit=False, Base = declarative_base() Base.query = db_session.query_property() + def init_db(): # import all modules here that might define models so that # they will be registered properly on the metadata. Otherwise @@ -27,4 +28,5 @@ def init_db(): Base.metadata.create_all(bind=engine) logger.info("Done creating all model metadata") + init_db() diff --git a/wqflask/wqflask/db_info.py b/wqflask/wqflask/db_info.py index 25e624ef..c7558ed8 100644 --- a/wqflask/wqflask/db_info.py +++ b/wqflask/wqflask/db_info.py @@ -90,6 +90,7 @@ class InfoPage: except Exception as e: pass + def process_query_results(results): info_ob = { 'info_page_name': results[0], diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index 115182e4..7442dc72 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -73,6 +73,7 @@ class DoSearch: else: return None + class MrnaAssaySearch(DoSearch): """A search within an expression dataset, including mRNA, protein, SNP, but not phenotype or metabolites""" @@ -296,6 +297,7 @@ class PhenotypeSearch(DoSearch): return self.execute(query) + class GenotypeSearch(DoSearch): """A search within a genotype dataset""" @@ -339,7 +341,6 @@ class GenotypeSearch(DoSearch): from_clause = self.normalize_spaces(from_clause) - if self.search_term[0] == "*": query = (self.base_query + """WHERE Geno.Id = GenoXRef.GenoId @@ -367,6 +368,7 @@ class GenotypeSearch(DoSearch): return self.execute(self.query) + class RifSearch(MrnaAssaySearch): """Searches for traits with a Gene RIF entry including the search term.""" @@ -390,6 +392,7 @@ class RifSearch(MrnaAssaySearch): return self.execute(query) + class WikiSearch(MrnaAssaySearch): """Searches GeneWiki for traits other people have annotated""" @@ -415,6 +418,7 @@ class WikiSearch(MrnaAssaySearch): return self.execute(query) + class GoSearch(MrnaAssaySearch): """Searches for synapse-associated genes listed in the Gene Ontology.""" @@ -449,6 +453,8 @@ class GoSearch(MrnaAssaySearch): return self.execute(query) # ZS: Not sure what the best way to deal with LRS searches is + + class LrsSearch(DoSearch): """Searches for genes with a QTL within the given LRS values @@ -526,7 +532,6 @@ class LrsSearch(DoSearch): return where_clause - def run(self): self.from_clause = self.get_from_clause() @@ -550,6 +555,7 @@ class MrnaLrsSearch(LrsSearch, MrnaAssaySearch): return self.execute(self.query) + class PhenotypeLrsSearch(LrsSearch, PhenotypeSearch): for search_key in ('LRS', 'LOD'): @@ -649,6 +655,7 @@ class CisTransLrsSearch(DoSearch): return where_clause + class CisLrsSearch(CisTransLrsSearch, MrnaAssaySearch): """ Searches for genes on a particular chromosome with a cis-eQTL within the given LRS values @@ -680,6 +687,7 @@ class CisLrsSearch(CisTransLrsSearch, MrnaAssaySearch): return self.execute(self.query) + class TransLrsSearch(CisTransLrsSearch, MrnaAssaySearch): """Searches for genes on a particular chromosome with a cis-eQTL within the given LRS values @@ -744,6 +752,7 @@ class MeanSearch(MrnaAssaySearch): return self.execute(self.query) + class RangeSearch(MrnaAssaySearch): """Searches for genes with a range of expression varying between two values""" @@ -779,6 +788,7 @@ class RangeSearch(MrnaAssaySearch): return self.execute(self.query) + class PositionSearch(DoSearch): """Searches for genes/markers located within a specified range on a specified chromosome""" @@ -800,7 +810,6 @@ class PositionSearch(DoSearch): self.dataset.type, max(self.mb_min, self.mb_max)) - return where_clause def get_chr(self): @@ -819,6 +828,7 @@ class PositionSearch(DoSearch): return self.execute(self.query) + class MrnaPositionSearch(PositionSearch, MrnaAssaySearch): """Searches for genes located within a specified range on a specified chromosome""" @@ -832,6 +842,7 @@ class MrnaPositionSearch(PositionSearch, MrnaAssaySearch): return self.execute(self.query) + class GenotypePositionSearch(PositionSearch, GenotypeSearch): """Searches for genes located within a specified range on a specified chromosome""" @@ -845,6 +856,7 @@ class GenotypePositionSearch(PositionSearch, GenotypeSearch): return self.execute(self.query) + class PvalueSearch(MrnaAssaySearch): """Searches for traits with a permutationed p-value between low and high""" @@ -878,6 +890,7 @@ class PvalueSearch(MrnaAssaySearch): logger.sql(self.query) return self.execute(self.query) + class AuthorSearch(PhenotypeSearch): """Searches for phenotype traits with specified author(s)""" @@ -900,6 +913,7 @@ def is_number(s): except ValueError: return False + def get_aliases(symbol, species): if species == "mouse": symbol_string = symbol.capitalize() @@ -923,6 +937,7 @@ def get_aliases(symbol, species): return filtered_aliases + if __name__ == "__main__": # Usually this will be used as a library, but call it from the command line for testing # And it runs the code below diff --git a/wqflask/wqflask/docs.py b/wqflask/wqflask/docs.py index 207767c4..81424b9c 100644 --- a/wqflask/wqflask/docs.py +++ b/wqflask/wqflask/docs.py @@ -5,6 +5,7 @@ from flask import g from utility.logger import getLogger logger = getLogger(__name__) + class Docs: def __init__(self, entry, start_vars={}): @@ -23,7 +24,6 @@ class Docs: self.title = result[0] self.content = result[1].decode("utf-8") - self.editable = "false" # ZS: Removing option to edit to see if text still gets vandalized try: diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py index 95c20673..d0745ef7 100644 --- a/wqflask/wqflask/export_traits.py +++ b/wqflask/wqflask/export_traits.py @@ -15,6 +15,7 @@ from pprint import pformat as pf from utility.logger import getLogger logger = getLogger(__name__) + def export_search_results_csv(targs): table_data = json.loads(targs['export_data']) @@ -132,6 +133,7 @@ def export_search_results_csv(targs): return file_list + def sort_traits_by_group(trait_list=[]): traits_by_group = {} for trait in trait_list: diff --git a/wqflask/wqflask/external_tools/send_to_bnw.py b/wqflask/wqflask/external_tools/send_to_bnw.py index 9836eb9c..1556c6a0 100644 --- a/wqflask/wqflask/external_tools/send_to_bnw.py +++ b/wqflask/wqflask/external_tools/send_to_bnw.py @@ -24,6 +24,7 @@ from utility import helper_functions, corr_result_helpers import utility.logger logger = utility.logger.getLogger(__name__) + class SendToBNW: def __init__(self, start_vars): trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py index 36f1b8e5..c55c43e6 100644 --- a/wqflask/wqflask/external_tools/send_to_geneweaver.py +++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py @@ -29,6 +29,7 @@ from utility import helper_functions, corr_result_helpers import utility.logger logger = utility.logger.getLogger(__name__) + class SendToGeneWeaver: def __init__(self, start_vars): trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] @@ -58,6 +59,7 @@ class SendToGeneWeaver: 'list': ",".join(trait_name_list), } + def get_trait_name_list(trait_list): name_list = [] for trait_db in trait_list: @@ -65,6 +67,7 @@ def get_trait_name_list(trait_list): return name_list + def test_chip(trait_list): final_chip_name = "" diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py index f50eeb8b..6b78725c 100644 --- a/wqflask/wqflask/external_tools/send_to_webgestalt.py +++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py @@ -29,6 +29,7 @@ from utility import helper_functions, corr_result_helpers import utility.logger logger = utility.logger.getLogger(__name__) + class SendToWebGestalt: def __init__(self, start_vars): trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] @@ -69,6 +70,7 @@ class SendToWebGestalt: else: self.hidden_vars['organism'] = "others" + def test_chip(trait_list): final_chip_name = "" @@ -113,6 +115,7 @@ def test_chip(trait_list): return chip_name + def gen_gene_id_list(trait_list): trait_name_list = [] gene_id_list = [] diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py index f29c0e4d..92a65112 100644 --- a/wqflask/wqflask/group_manager.py +++ b/wqflask/wqflask/group_manager.py @@ -12,6 +12,7 @@ from utility.redis_tools import get_user_groups, get_group_info, save_user, crea from utility.logger import getLogger logger = getLogger(__name__) + @app.route("/groups/manage", methods=('GET', 'POST')) def manage_groups(): params = request.form if request.form else request.args @@ -21,6 +22,7 @@ def manage_groups(): admin_groups, member_groups = get_user_groups(g.user_session.user_id) return render_template("admin/group_manager.html", admin_groups=admin_groups, member_groups=member_groups) + @app.route("/groups/view", methods=('GET', 'POST')) def view_group(): params = request.form if request.form else request.args @@ -58,6 +60,7 @@ def view_group(): return render_template("admin/view_group.html", group_info=group_info, admins=admins_info, members=members_info, user_is_admin=user_is_admin, resources=resources_info) + @app.route("/groups/remove", methods=('POST',)) def remove_groups(): group_ids_to_remove = request.form['selected_group_ids'] @@ -66,6 +69,7 @@ def remove_groups(): return redirect(url_for('manage_groups')) + @app.route("/groups/remove_users", methods=('POST',)) def remove_users(): group_id = request.form['group_id'] @@ -77,6 +81,7 @@ def remove_users(): return redirect(url_for('view_group', id=group_id)) + @app.route("/groups/add_", methods=('POST',)) def add_users(user_type='members'): group_id = request.form['group_id'] @@ -89,6 +94,7 @@ def add_users(user_type='members'): return redirect(url_for('view_group', id=group_id)) + @app.route("/groups/change_name", methods=('POST',)) def change_name(): group_id = request.form['group_id'] @@ -97,6 +103,7 @@ def change_name(): return new_name + @app.route("/groups/create", methods=('GET', 'POST')) def add_or_edit_group(): params = request.form if request.form else request.args @@ -125,6 +132,8 @@ def add_or_edit_group(): return render_template("admin/create_group.html") # ZS: Will integrate this later, for now just letting users be added directly + + def send_group_invites(group_id, user_email_list=[], user_type="members"): for user_email in user_email_list: user_details = get_user_by_unique_column("email_address", user_email) diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py index 8cb81dcc..a21dae84 100644 --- a/wqflask/wqflask/gsearch.py +++ b/wqflask/wqflask/gsearch.py @@ -18,6 +18,7 @@ from utility.type_checking import is_float, is_int, is_str, get_float, get_int, from utility.logger import getLogger logger = getLogger(__name__) + class GSearch: def __init__(self, kw): diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index f8ef7028..0b477446 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -14,6 +14,7 @@ Redis = Redis() logger = getLogger(__name__) + class Heatmap: def __init__(self, start_vars, temp_uuid): @@ -132,6 +133,7 @@ class Heatmap: else: self.trait_results[this_trait.name].append(float(qtl['lrs_value'])) + def gen_pheno_txt_file(samples, vals, filename): """Generates phenotype file for GEMMA""" @@ -151,6 +153,7 @@ def gen_pheno_txt_file(samples, vals, filename): values_string = "\t".join(filtered_vals_list) outfile.write(values_string) + def parse_reaper_output(gwa_filename): included_markers = [] p_values = [] diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index 9779878e..8dd1c7c0 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -4,6 +4,8 @@ from flask import Flask, g # Just return a list of dictionaries # each dictionary contains sub-dictionary + + def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): fetchFields = ['SpeciesId', 'Id', 'GeneSymbol', 'GeneDescription', 'Chromosome', 'TxStart', 'TxEnd', 'Strand', 'GeneID', 'NM_ID', 'kgID', 'GenBankID', 'UnigenID', 'ProteinID', 'AlignID', diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 3753d1ce..5c7b81dd 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -509,9 +509,6 @@ class DisplayMappingResults: self.graphHeight = self.graphHeight + 2 * (self.NR_INDIVIDUALS + 10) * self.EACH_GENE_HEIGHT # END HaplotypeAnalyst - - - ######################### # Get the sorting column ######################### @@ -1640,7 +1637,6 @@ class DisplayMappingResults: geneYLocation + 2 *ind*self.EACH_GENE_HEIGHT + 2*self.EACH_GENE_HEIGHT*zoom)), outline=outlineColor, fill=fillColor) - COORDS = "%d, %d, %d, %d" % (geneStartPix, geneYLocation + ind * self.EACH_GENE_HEIGHT, geneEndPix + 1, (geneYLocation + ind * self.EACH_GENE_HEIGHT)) TITLE = "Strain: %s, marker (%s) \n Position %2.3f Mb." % (samplelist[k], _chr[j].name, float(txStart)) HREF = '' @@ -1663,7 +1659,6 @@ class DisplayMappingResults: geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom)), fill= mylineColor, width=zoom * (self.EACH_GENE_HEIGHT + 2)) - if lastGene == 0: draw_rotated_text( canvas, text="%s" % (_chr[j].name), @@ -2085,7 +2080,6 @@ class DisplayMappingResults: im_drawer.line(xy=((xLeftOffset, yZero), (xLeftOffset + plotWidth, yZero)), fill=BLACK, width=X_AXIS_THICKNESS) # Draw the X axis itself - def drawQTL(self, canvas, drawAreaHeight, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset @@ -2427,7 +2421,6 @@ class DisplayMappingResults: yLRS = yZero - (item / LRS_LOD_Max) * LRSHeightThresh - if 'lrs_value' in qtlresult: if self.LRS_LOD == "LOD" or self.LRS_LOD == "-logP": if qtlresult['lrs_value'] > 460 or qtlresult['lrs_value'] == 'inf': @@ -2621,7 +2614,6 @@ class DisplayMappingResults: # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) - # draw additive scale if not self.multipleInterval and self.additiveChecked: additiveScaleFont = ImageFont.truetype(font=VERDANA_FILE, size=16 * zoom) @@ -2653,7 +2645,6 @@ class DisplayMappingResults: xy=((xLeftOffset, yZero), (xLeftOffset, yTopOffset + 30 * (zoom - 1))), fill=self.LRS_COLOR, width=1 * zoom) # the blue line running up the y axis - def drawGraphBackground(self, canvas, gifmap, offset=(80, 120, 80, 50), zoom=1, startMb= None, endMb = None): # conditions # multiple Chromosome view diff --git a/wqflask/wqflask/marker_regression/plink_mapping.py b/wqflask/wqflask/marker_regression/plink_mapping.py index e6c78536..22a50bb8 100644 --- a/wqflask/wqflask/marker_regression/plink_mapping.py +++ b/wqflask/wqflask/marker_regression/plink_mapping.py @@ -8,11 +8,11 @@ from utility.tools import flat_files, PLINK_COMMAND import utility.logger logger = utility.logger.getLogger(__name__) + def run_plink(this_trait, dataset, species, vals, maf): plink_output_filename = webqtlUtil.genRandStr(f"{dataset.group.name}_{this_trait.name}_") gen_pheno_txt_file(dataset, vals) - plink_command = f"{PLINK_COMMAND} --noweb --bfile {flat_files('mapping')}/{dataset.group.name} --no-pheno --no-fid --no-parents --no-sex --maf {maf} --out { TMPDIR}{plink_output_filename} --assoc " logger.debug("plink_command:", plink_command) @@ -25,6 +25,7 @@ def run_plink(this_trait, dataset, species, vals, maf): return dataset.group.markers.markers + def gen_pheno_txt_file(this_dataset, vals): """Generates phenotype file for GEMMA/PLINK""" @@ -42,6 +43,7 @@ def gen_pheno_txt_file(this_dataset, vals): this_val = vals[i] outfile.write("0 " + line[1] + " " + line[2] + " " + line[3] + " " + line[4] + " " + str(this_val) + "\n") + def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename=''): ped_sample_list = get_samples_from_ped_file(dataset) output_file = open(f"{TMPDIR}{pheno_filename}.txt", "wb") @@ -77,6 +79,8 @@ def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename=''): output_file.close() # get strain name from ped file in order + + def get_samples_from_ped_file(dataset): ped_file = open(f"{flat_files('mapping')}{dataset.group.name}.ped", "r") line = ped_file.readline() @@ -93,6 +97,7 @@ def get_samples_from_ped_file(dataset): return sample_list + def parse_plink_output(output_filename, species): plink_results = {} @@ -154,6 +159,8 @@ def parse_plink_output(output_filename, species): # function: convert line from str to list; # output: lineList list ####################################################### + + def build_line_list(line=""): line_list = line.strip().split(' ') # irregular number of whitespaces between columns line_list = [item for item in line_list if item != ''] diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py index dd044cb0..313c40ca 100644 --- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py +++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py @@ -8,6 +8,7 @@ from utility.tools import flat_files, REAPER_COMMAND, TEMPDIR import utility.logger logger = utility.logger.getLogger(__name__) + def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boot_check, num_bootstrap, do_control, control_marker, manhattan_plot, first_run=True, output_files=None): """Generates p-values for each marker using qtlreaper""" @@ -73,6 +74,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo return (marker_obs, permu_vals, suggestive, significant, bootstrap_vals, [output_filename, permu_filename, bootstrap_filename]) + def gen_pheno_txt_file(samples, vals, trait_filename): """Generates phenotype file for GEMMA""" @@ -92,6 +94,7 @@ def gen_pheno_txt_file(samples, vals, trait_filename): values_string = "\t".join(filtered_vals_list) outfile.write(values_string) + def parse_reaper_output(gwa_filename, permu_filename, bootstrap_filename): included_markers = [] p_values = [] @@ -163,6 +166,7 @@ def parse_reaper_output(gwa_filename, permu_filename, bootstrap_filename): return marker_obs, permu_vals, bootstrap_vals + def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_data, num_perm, bootCheck, num_bootstrap, do_control, control_marker, manhattan_plot): genotype = dataset.group.read_genotype_file(use_reaper=True) @@ -255,6 +259,7 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da qtl_results.append(qtl) return qtl_results, json_data, perm_output, suggestive, significant, bootstrap_results + def natural_sort(marker_list): """ Function to naturally sort numbers + strings, adopted from user Mark Byers here: https://stackoverflow.com/questions/4836710/does-python-have-a-built-in-function-for-string-natural-sort diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 2bd94512..588600f5 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -16,6 +16,8 @@ import utility.logger logger = utility.logger.getLogger(__name__) # Get a trait's type (numeric, categorical, etc) from the DB + + def get_trait_data_type(trait_db_string): logger.info("get_trait_data_type"); the_query = "SELECT value FROM TraitMetadata WHERE type='trait_data_type'" @@ -133,6 +135,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec else: return process_rqtl_results(result_data_frame, dataset.group.species) + def generate_cross_from_rdata(dataset): rdata_location = locate(dataset.group.name + ".RData", "genotype/rdata") ro.r(""" @@ -143,6 +146,7 @@ def generate_cross_from_rdata(dataset): } """ % (rdata_location)) + def generate_cross_from_geno(dataset, scale_units): # TODO: Need to figure out why some genofiles have the wrong format and don't convert properly ro.r(""" @@ -187,6 +191,7 @@ def generate_cross_from_geno(dataset, scale_units): # TODO: Need to figur } """ % (dataset.group.genofile, scale_units)) + def add_perm_strata(cross, perm_strata): col_string = 'c("the_strata")' perm_strata_string = "c(" @@ -201,6 +206,7 @@ def add_perm_strata(cross, perm_strata): return cross, strata_ob + def sanitize_rqtl_phenotype(vals): pheno_as_string = "c(" for i, val in enumerate(vals): @@ -218,6 +224,7 @@ def sanitize_rqtl_phenotype(vals): return pheno_as_string + def sanitize_rqtl_names(vals): pheno_as_string = "c(" for i, val in enumerate(vals): @@ -235,12 +242,14 @@ def sanitize_rqtl_names(vals): return pheno_as_string + def add_phenotype(cross, pheno_as_string, col_name): ro.globalenv["the_cross"] = cross ro.r('pheno <- data.frame(pull.pheno(the_cross))') ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + ' = as.numeric(' + pheno_as_string + '))') return ro.r["the_cross"] + def add_categorical_covar(cross, covar_as_string, i): ro.globalenv["the_cross"] = cross logger.info("cross set"); @@ -275,12 +284,14 @@ def add_names(cross, names_as_string, col_name): ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + ' = ' + names_as_string + ')') return ro.r["the_cross"] + def pull_var(var_name, cross, var_string): ro.globalenv["the_cross"] = cross ro.r(var_name + ' <- pull.pheno(the_cross, ' + var_string + ')') return ro.r[var_name] + def add_cofactors(cross, this_dataset, covariates, samples): ro.numpy2ri.activate() @@ -341,6 +352,7 @@ def add_cofactors(cross, this_dataset, covariates, samples): covars_ob = pull_var("trait_covars", cross, covar_name_string) return cross, covars_ob + def create_marker_covariates(control_marker, cross): ro.globalenv["the_cross"] = cross ro.r('genotypes <- pull.geno(the_cross)') # Get the genotype matrix @@ -358,6 +370,7 @@ def create_marker_covariates(control_marker, cross): # TODO: Create a design matrix from the marker covars for the markers in case of an F2, 4way, etc return ro.r["marker_covars"] + def process_pair_scan_results(result): pair_scan_results = [] @@ -374,6 +387,7 @@ def process_pair_scan_results(result): return pair_scan_results + def process_rqtl_perm_results(num_perm, results): perm_vals = [] for line in str(results).split("\n")[1:(num_perm + 1)]: @@ -386,6 +400,7 @@ def process_rqtl_perm_results(num_perm, results): return perm_output, suggestive, significant + def process_rqtl_results(result, species_name): # TODO: how to make this a one liner and not copy the stuff in a loop qtl_results = [] output = [tuple([result[j][i] for j in range(result.ncol)]) for i in range(result.nrow)] diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index f1665570..31c58083 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -45,6 +45,7 @@ from base.webqtlConfig import TMPDIR, GENERATED_TEXT_DIR import utility.logger logger = utility.logger.getLogger(__name__) + class RunMapping: def __init__(self, start_vars, temp_uuid): @@ -504,6 +505,7 @@ class RunMapping: trimmed_genotype_data.append(new_genotypes) return trimmed_genotype_data + def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, score_type, transform, covariates, n_samples): with open(results_path, "w+") as output_file: output_file.write("Time/Date: " + datetime.datetime.now().strftime("%x / %X") + "\n") @@ -564,6 +566,7 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, if i < (len(markers) - 1): output_file.write("\n") + def trim_markers_for_figure(markers): if 'p_wald' in list(markers[0].keys()): score_type = 'p_wald' @@ -624,6 +627,7 @@ def trim_markers_for_figure(markers): filtered_markers.append(marker) return filtered_markers + def trim_markers_for_table(markers): if 'lod_score' in list(markers[0].keys()): sorted_markers = sorted(markers, key=lambda k: k['lod_score'], reverse=True) @@ -637,6 +641,7 @@ def trim_markers_for_table(markers): else: return sorted_markers + def write_input_for_browser(this_dataset, gwas_results, annotations): file_base = this_dataset.group.name + "_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) gwas_filename = file_base + "_GWAS" @@ -650,6 +655,7 @@ def write_input_for_browser(this_dataset, gwas_results, annotations): return [gwas_filename, annot_filename] + def geno_db_exists(this_dataset): geno_db_name = this_dataset.group.name + "Geno" try: @@ -658,6 +664,7 @@ def geno_db_exists(this_dataset): except: return "False" + def get_chr_lengths(mapping_scale, mapping_method, dataset, qtl_results): chr_lengths = [] if mapping_scale == "physic": @@ -696,6 +703,7 @@ def get_chr_lengths(mapping_scale, mapping_method, dataset, qtl_results): return chr_lengths + def get_genofile_samplelist(dataset): genofile_samplelist = [] @@ -706,6 +714,7 @@ def get_genofile_samplelist(dataset): return genofile_samplelist + def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples): perm_strata_strings = [] for sample in used_samples: diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py index 8abd6516..d7c9ef95 100644 --- a/wqflask/wqflask/model.py +++ b/wqflask/wqflask/model.py @@ -14,6 +14,7 @@ from sqlalchemy.orm import relationship from wqflask.database import Base, init_db + class User(Base): __tablename__ = "user" id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4())) @@ -63,7 +64,6 @@ class User(Base): print("Couldn't display_num_collections:", why) return "" - def get_collection_by_name(self, collection_name): try: collect = self.user_collections.filter_by(name=collection_name).first() @@ -83,7 +83,6 @@ class User(Base): def login_count(self): return self.logins.filter_by(successful=True).count() - @property def confirmed_at(self): if self.confirmed: @@ -116,6 +115,7 @@ class User(Base): except IndexError: return None + class Login(Base): __tablename__ = "login" id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4())) @@ -134,6 +134,7 @@ class Login(Base): ################################################################################################## + class UserCollection(Base): __tablename__ = "user_collection" id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4())) @@ -158,12 +159,14 @@ class UserCollection(Base): def members_as_set(self): return set(json.loads(self.members)) + def display_collapsible(number): if number: return number else: return "" + def user_uuid(): """Unique cookie for a user""" user_uuid = request.cookies.get('user_uuid') diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py index 93785a3a..e089643e 100644 --- a/wqflask/wqflask/network_graph/network_graph.py +++ b/wqflask/wqflask/network_graph/network_graph.py @@ -27,6 +27,7 @@ from utility import helper_functions from utility import corr_result_helpers from utility.tools import GN2_BRANCH_URL + class NetworkGraph: def __init__(self, start_vars): diff --git a/wqflask/wqflask/news.py b/wqflask/wqflask/news.py index 861a93f2..e262dd51 100644 --- a/wqflask/wqflask/news.py +++ b/wqflask/wqflask/news.py @@ -1,5 +1,6 @@ from flask import g + class News: def __init__(self): diff --git a/wqflask/wqflask/parser.py b/wqflask/wqflask/parser.py index dfd374e2..6b836e20 100644 --- a/wqflask/wqflask/parser.py +++ b/wqflask/wqflask/parser.py @@ -24,6 +24,7 @@ from pprint import pformat as pf from utility.logger import getLogger logger = getLogger(__name__) + def parse(pstring): """ diff --git a/wqflask/wqflask/pbkdf2.py b/wqflask/wqflask/pbkdf2.py index 6346df03..1a965fc5 100644 --- a/wqflask/wqflask/pbkdf2.py +++ b/wqflask/wqflask/pbkdf2.py @@ -4,6 +4,8 @@ from werkzeug.security import safe_str_cmp as ssc # Replace this because it just wraps around Python3's internal # functions. Added this during migration. + + def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc="sha1"): """Wrapper function of python's hashlib.pbkdf2_hmac. """ diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index a1fe0f8f..4591710c 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -11,6 +11,7 @@ from utility.redis_tools import get_resource_info, get_group_info, get_groups_li from utility.logger import getLogger logger = getLogger(__name__) + @app.route("/resources/manage", methods=('GET', 'POST')) def manage_resource(): params = request.form if request.form else request.args @@ -40,6 +41,7 @@ def manage_resource(): return render_template("admin/manage_resource.html", owner_name=owner_display_name, resource_id=resource_id, resource_info=resource_info, default_mask=default_mask, group_masks=group_masks_with_names, admin_status=admin_status) + @app.route("/search_for_users", methods=('POST',)) def search_for_user(): params = request.form @@ -49,6 +51,7 @@ def search_for_user(): return json.dumps(user_list) + @app.route("/search_for_groups", methods=('POST',)) def search_for_groups(): params = request.form @@ -65,6 +68,7 @@ def search_for_groups(): return json.dumps(group_list) + @app.route("/resources/change_owner", methods=('POST',)) def change_owner(): resource_id = request.form['resource_id'] @@ -81,6 +85,7 @@ def change_owner(): else: return render_template("admin/change_resource_owner.html", resource_id=resource_id) + @app.route("/resources/change_default_privileges", methods=('POST',)) def change_default_privileges(): resource_id = request.form['resource_id'] @@ -99,6 +104,7 @@ def change_default_privileges(): else: return redirect(url_for("no_access_page")) + @app.route("/resources/add_group", methods=('POST',)) def add_group_to_resource(): resource_id = request.form['resource_id'] @@ -125,6 +131,7 @@ def add_group_to_resource(): else: return redirect(url_for("no_access_page")) + def get_group_names(group_masks): group_masks_with_names = {} for group_id, group_mask in list(group_masks.items()): diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index 16eb1864..ed5f9bad 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -24,6 +24,7 @@ from utility.type_checking import is_str from utility.logger import getLogger logger = getLogger(__name__) + class SearchResultPage: #maxReturn = 3000 @@ -270,6 +271,7 @@ class SearchResultPage: else: return None + def get_GO_symbols(a_search): query = """SELECT genes FROM GORef @@ -287,6 +289,7 @@ def get_GO_symbols(a_search): return new_terms + def insert_newlines(string, every=64): """ This is because it is seemingly impossible to change the width of the description column, so I'm just manually adding line breaks """ lines = [] @@ -294,6 +297,7 @@ def insert_newlines(string, every=64): lines.append(string[i:i + every]) return '\n'.join(lines) + def get_aliases(symbol_list, species): updated_symbols = [] diff --git a/wqflask/wqflask/send_mail.py b/wqflask/wqflask/send_mail.py index 86e8a558..299c866a 100644 --- a/wqflask/wqflask/send_mail.py +++ b/wqflask/wqflask/send_mail.py @@ -8,10 +8,12 @@ Redis = StrictRedis() import mailer + def timestamp(): ts = datetime.datetime.utcnow() return ts.isoformat() + def main(): while True: print("I'm alive!") @@ -31,7 +33,6 @@ def main(): process_message(msg) - def process_message(msg): msg = json.loads(msg) diff --git a/wqflask/wqflask/server_side.py b/wqflask/wqflask/server_side.py index 48761fa0..8b3a4faa 100644 --- a/wqflask/wqflask/server_side.py +++ b/wqflask/wqflask/server_side.py @@ -1,7 +1,6 @@ # handles server side table processing - class ServerSideTable: """ This class is used to do server-side processing diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 388f831f..496dee57 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -8,6 +8,7 @@ from pprint import pformat as pf from utility import Plot from utility import Bunch + class SampleList: def __init__(self, dataset, diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py index 379b746c..f0fcd27d 100644 --- a/wqflask/wqflask/show_trait/export_trait_data.py +++ b/wqflask/wqflask/show_trait/export_trait_data.py @@ -5,6 +5,7 @@ from functools import cmp_to_key from base.trait import create_trait from base import data_set + def export_sample_table(targs): sample_data = json.loads(targs['export_data']) @@ -28,6 +29,7 @@ def export_sample_table(targs): return trait_name, final_sample_data + def get_export_metadata(trait_id, dataset_name): dataset = data_set.create_dataset(dataset_name) this_trait = create_trait(dataset=dataset, @@ -64,6 +66,7 @@ def dict_to_sorted_list(dictionary): sorted_values = [item[1] for item in sorted_list] return sorted_values + def cmp_samples(a, b): if b[0] == 'name': return 1 diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index 7a0df94b..e5c67165 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -9,6 +9,7 @@ logger = getLogger(__name__) from base import species from base import webqtlConfig + class SnpBrowser: def __init__(self, start_vars): @@ -472,7 +473,6 @@ class SnpBrowser: base_color_dict = {"A": "#C33232", "C": "#1569C7", "T": "#CFCF32", "G": "#32C332", "t": "#FF6", "c": "#5CB3FF", "a": "#F66", "g": "#CF9", ":": "#FFFFFF", "-": "#FFFFFF", "?": "#FFFFFF"} - the_bases = [] for j, item in enumerate(allele_value_list): if item and isinstance(item, str): @@ -641,6 +641,7 @@ class SnpBrowser: # for i in range(n_click): # href = url_for('snp_browser', first_run="false", chosen_strains_mouse=self.chosen_strains_mouse, chosen_strains_rat=self.chosen_strains_rat, variant=self.variant_type, species=self.species_name, gene_name=self.gene_name, chr=self.chr, start_mb=self.start_mb, end_mb=self.end_mb, limit_strains=self.limit_strains, domain=self.domain, function=self.function, criteria=self.criteria, score=self.score, diff_alleles=self.diff_alleles) + def get_browser_sample_lists(species_id=1): strain_lists = {} mouse_strain_list = [] @@ -660,6 +661,7 @@ def get_browser_sample_lists(species_id=1): return strain_lists + def get_header_list(variant_type, strains, species=None, empty_columns=None): if species == "Mouse": strain_list = strains['mouse'] @@ -715,6 +717,7 @@ def get_header_list(variant_type, strains, species=None, empty_columns=None): return header_fields, empty_field_count, header_data_names + def get_effect_details_by_category(effect_name=None, effect_value=None): gene_list = [] transcript_list = [] @@ -764,6 +767,7 @@ def get_effect_details_by_category(effect_name=None, effect_value=None): return [gene_list, transcript_list, exon_list, function_list, function_detail_list] + def get_effect_info(effect_list): domain = "" effect_detail_list = [] @@ -836,6 +840,7 @@ def get_effect_info(effect_list): return effect_info_dict + def get_gene_id(species_id, gene_name): query = """ SELECT @@ -853,6 +858,7 @@ def get_gene_id(species_id, gene_name): else: return "" + def get_gene_id_name_dict(species_id, gene_name_list): gene_id_name_dict = {} if len(gene_name_list) == 0: @@ -877,6 +883,7 @@ def get_gene_id_name_dict(species_id, gene_name_list): return gene_id_name_dict + def check_if_in_gene(species_id, chr, mb): if species_id != 0: # ZS: Check if this is necessary query = """SELECT geneId, geneSymbol diff --git a/wqflask/wqflask/submit_bnw.py b/wqflask/wqflask/submit_bnw.py index 4ad6f9e3..b21a88cc 100644 --- a/wqflask/wqflask/submit_bnw.py +++ b/wqflask/wqflask/submit_bnw.py @@ -5,5 +5,6 @@ from utility import helper_functions import utility.logger logger = utility.logger.getLogger(__name__) + def get_bnw_input(start_vars): logger.debug("BNW VARS:", start_vars) diff --git a/wqflask/wqflask/update_search_results.py b/wqflask/wqflask/update_search_results.py index 08b4f9f5..07073d6a 100644 --- a/wqflask/wqflask/update_search_results.py +++ b/wqflask/wqflask/update_search_results.py @@ -10,6 +10,7 @@ from utility.benchmark import Bench from utility.logger import getLogger logger = getLogger(__name__) + class GSearch: def __init__(self, kw): diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index 3f5b43ee..2a2f8484 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -29,9 +29,11 @@ from utility.tools import SMTP_CONNECT, SMTP_USERNAME, SMTP_PASSWORD, LOG_SQL_AL THREE_DAYS = 60 * 60 * 24 * 3 + def timestamp(): return datetime.datetime.utcnow().isoformat() + def basic_info(): return dict(timestamp=timestamp(), ip_address=request.remote_addr, @@ -54,6 +56,7 @@ def encode_password(pass_gen_fields, unencrypted_password): return pass_gen_fields + def set_password(password): pass_gen_fields = { "unencrypted_password": password, @@ -71,6 +74,7 @@ def set_password(password): return encoded_password + def get_signed_session_id(user): session_id = str(uuid.uuid4()) @@ -109,6 +113,7 @@ def get_signed_session_id(user): return session_id_signed + def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): """Send an E-mail through SMTP_CONNECT host. If SMTP_USERNAME is not 'UNKNOWN' TLS is used @@ -125,6 +130,7 @@ def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): server.quit() logger.info("Successfully sent email to " + toaddr) + def send_verification_email(user_details, template_name="email/user_verification.txt", key_prefix="verification_code", subject = "GeneNetwork e-mail verification"): verification_code = str(uuid.uuid4()) key = key_prefix + ":" + verification_code @@ -139,12 +145,14 @@ def send_verification_email(user_details, template_name="email/user_verification send_email(recipient, subject, body) return {"recipient": recipient, "subject": subject, "body": body} + def send_invitation_email(user_email, temp_password, template_name="email/user_invitation.txt", subject= "You've been added to a GeneNetwork user group"): recipient = user_email body = render_template(template_name, temp_password) send_email(recipient, subject, body) return {"recipient": recipient, "subject": subject, "body": body} + @app.route("/manage/verify_email") def verify_email(): if 'code' in request.args: @@ -160,6 +168,7 @@ def verify_email(): else: flash("Invalid code: Password reset code does not exist or might have expired!", "error") + @app.route("/n/login", methods=('GET', 'POST')) def login(): params = request.form if request.form else request.args @@ -232,6 +241,7 @@ def login(): return response + @app.route("/n/login/github_oauth2", methods=('GET', 'POST')) def github_oauth2(): from utility.tools import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, GITHUB_AUTH_URL @@ -264,12 +274,14 @@ def github_oauth2(): url = "/n/login?type=github&uid=" + user_details["user_id"] return redirect(url) + def get_github_user_details(access_token): from utility.tools import GITHUB_API_URL result = requests.get(GITHUB_API_URL, headers={'Authorization': 'token ' + access_token}).content return json.loads(result) + @app.route("/n/login/orcid_oauth2", methods=('GET', 'POST')) def orcid_oauth2(): from uuid import uuid4 @@ -308,6 +320,7 @@ def orcid_oauth2(): flash("There was an error getting code from ORCID") return redirect(url) + def get_github_user_details(access_token): from utility.tools import GITHUB_API_URL result = requests.get(GITHUB_API_URL, headers={'Authorization': 'token ' + access_token}).content @@ -325,6 +338,7 @@ def logout(): response.set_cookie(UserSession.user_cookie_name, '', expires=0) return response + @app.route("/n/forgot_password", methods=['GET']) def forgot_password(): """Entry point for forgotten password""" @@ -333,6 +347,7 @@ def forgot_password(): print("ERRORS: ", errors) return render_template("new_security/forgot_password.html", errors=errors) + def send_forgot_password_email(verification_email): from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText @@ -365,6 +380,7 @@ def send_forgot_password_email(verification_email): return subject + @app.route("/n/forgot_password_submit", methods=('POST',)) def forgot_password_submit(): """When a forgotten password form is submitted we get here""" @@ -386,6 +402,7 @@ def forgot_password_submit(): flash("You MUST provide an email", "alert-danger") return redirect(url_for("forgot_password")) + @app.route("/n/password_reset", methods=['GET']) def password_reset(): """Entry point after user clicks link in E-mail""" @@ -405,6 +422,7 @@ def password_reset(): else: return redirect(url_for("login")) + @app.route("/n/password_reset_step2", methods=('POST',)) def password_reset_step2(): """Handle confirmation E-mail for password reset""" @@ -422,6 +440,7 @@ def password_reset_step2(): flash("Password changed successfully. You can now sign in.", "alert-info") return redirect(url_for('login')) + def register_user(params): thank_you_mode = False errors = [] @@ -461,6 +480,7 @@ def register_user(params): return errors + @app.route("/n/register", methods=('GET', 'POST')) def register(): errors = [] @@ -478,6 +498,7 @@ def register(): return render_template("new_security/register_user.html", values=params, errors=errors) + @app.errorhandler(401) def unauthorized(error): return redirect(url_for('login')) diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index 013920f9..a9bd65e6 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -151,6 +151,7 @@ def verify_cookie(cookie): assert the_signature == actual_hmac_creation(the_uuid), "Uh-oh, someone tampering with the cookie?" return the_uuid + def create_signed_cookie(): the_uuid = str(uuid.uuid4()) signature = actual_hmac_creation(the_uuid) @@ -158,6 +159,7 @@ def create_signed_cookie(): logger.debug("uuid_signed:", uuid_signed) return the_uuid, uuid_signed + class UserSession: """Logged in user handling""" @@ -341,6 +343,7 @@ class UserSession: Redis.delete(self.cookie_name) logger.debug("At end of delete_session") + @app.before_request def get_cookie(): logger.info("@app.before_request get cookie") @@ -348,16 +351,20 @@ def get_cookie(): g.cookie_session = AnonUser() # @app.after_request + + def set_cookie(response): if not request.cookies.get(g.cookie_session.cookie_name): response.set_cookie(g.cookie_session.cookie_name, g.cookie_session.cookie) return response + class UsersManager: def __init__(self): self.users = model.User.query.all() logger.debug("Users are:", self.users) + class UserManager: def __init__(self, kw): self.user_id = kw['user_id'] @@ -419,6 +426,7 @@ class RegisterUser: self.user.registration_info = json.dumps(basic_info(), sort_keys=True) save_user(self.user.__dict__, self.user.user_id) + def set_password(password, user): pwfields = Bunch() @@ -476,6 +484,7 @@ class VerificationEmail: verification_code=verification_code) send_email(to, subject, body) + class ForgotPasswordEmail(VerificationEmail): template_name = "email/forgot_password.txt" key_prefix = "forgot_password_code" @@ -496,7 +505,6 @@ class ForgotPasswordEmail(VerificationEmail): save_verification_code(toaddr, verification_code) - subject = self.subject body = render_template( self.template_name, @@ -530,6 +538,8 @@ def basic_info(): user_agent=request.headers.get('User-Agent')) # @app.route("/manage/verify_email") + + def verify_email(): user = DecodeUser(VerificationEmail.key_prefix).user user.confirmed = json.dumps(basic_info(), sort_keys=True) @@ -544,6 +554,8 @@ def verify_email(): return response # @app.route("/n/password_reset", methods=['GET']) + + def password_reset(): """Entry point after user clicks link in E-mail""" logger.debug("in password_reset request.url is:", request.url) @@ -568,6 +580,8 @@ def password_reset(): return redirect(url_for("login")) # @app.route("/n/password_reset_step2", methods=('POST',)) + + def password_reset_step2(): """Handle confirmation E-mail for password reset""" logger.debug("in password_reset request.url is:", request.url) @@ -577,7 +591,6 @@ def password_reset_step2(): logger.debug("locals are:", locals()) - user = Bunch() password = request.form['password'] set_password(password, user) @@ -589,6 +602,7 @@ def password_reset_step2(): return response + class DecodeUser: def __init__(self, code_prefix): @@ -612,6 +626,8 @@ class DecodeUser: return model.User.query.get(data['id']) # @app.route("/n/login", methods=('GET', 'POST')) + + def login(): lu = LoginUser() login_type = request.args.get("type") @@ -622,6 +638,8 @@ def login(): return lu.standard_login() # @app.route("/n/login/github_oauth2", methods=('GET', 'POST')) + + def github_oauth2(): from utility.tools import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET code = request.args.get("code") @@ -646,6 +664,8 @@ def github_oauth2(): return redirect(url) # @app.route("/n/login/orcid_oauth2", methods=('GET', 'POST')) + + def orcid_oauth2(): from uuid import uuid4 from utility.tools import ORCID_CLIENT_ID, ORCID_CLIENT_SECRET, ORCID_TOKEN_URL, ORCID_AUTH_URL @@ -673,11 +693,13 @@ def orcid_oauth2(): flash("There was an error getting code from ORCID") return redirect(url) + def get_github_user_details(access_token): from utility.tools import GITHUB_API_URL result = requests.get(GITHUB_API_URL, params={"access_token": access_token}) return result.json() + class LoginUser: remember_time = 60 * 60 * 24 * 30 # One month in seconds @@ -814,6 +836,8 @@ class LoginUser: db_session.commit() # @app.route("/n/logout") + + def logout(): logger.debug("Logging out...") UserSession().delete_session() @@ -833,6 +857,8 @@ def forgot_password(): return render_template("new_security/forgot_password.html", errors=errors) # @app.route("/n/forgot_password_submit", methods=('POST',)) + + def forgot_password_submit(): """When a forgotten password form is submitted we get here""" params = request.form @@ -853,10 +879,12 @@ def forgot_password_submit(): flash("You MUST provide an email", "alert-danger") return redirect(url_for("forgot_password")) + @app.errorhandler(401) def unauthorized(error): return redirect(url_for('login')) + def is_redis_available(): try: Redis.ping() @@ -922,7 +950,6 @@ def register(): params = None errors = None - params = request.form if request.form else request.args params = params.to_dict(flat=True) @@ -952,6 +979,7 @@ def url_for_hmac(endpoint, **values): combiner = "?" return url + combiner + "hm=" + hm + def data_hmac(stringy): """Takes arbitray data string and appends :hmac so we know data hasn't been tampered with""" return stringy + ":" + actual_hmac_creation(stringy) @@ -974,6 +1002,7 @@ def verify_url_hmac(url): assert hm == hmac, "Unexpected url (stage 3)" + def actual_hmac_creation(stringy): """Helper function to create the actual hmac""" @@ -986,6 +1015,7 @@ def actual_hmac_creation(stringy): hm = hm[:20] return hm + app.jinja_env.globals.update(url_for_hmac=url_for_hmac, data_hmac=data_hmac) @@ -998,6 +1028,7 @@ app.jinja_env.globals.update(url_for_hmac=url_for_hmac, # Body=body)) # Redis.rpush("mail_queue", msg) + def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): """Send an E-mail through SMTP_CONNECT host. If SMTP_USERNAME is not 'UNKNOWN' TLS is used @@ -1020,6 +1051,7 @@ def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): server.quit() logger.info("Successfully sent email to " + toaddr) + class GroupsManager: def __init__(self, kw): self.datasets = create_datasets_list() diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index 78db7bd2..f0f0d60c 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -20,6 +20,7 @@ logger = getLogger(__name__) THREE_DAYS = 60 * 60 * 24 * 3 THIRTY_DAYS = 60 * 60 * 24 * 30 + @app.before_request def get_user_session(): logger.info("@app.before_request get_session") @@ -30,6 +31,7 @@ def get_user_session(): response.set_cookie('session_id_v2', '', expires=0) return response + @app.after_request def set_user_session(response): if hasattr(g, 'user_session'): @@ -37,6 +39,7 @@ def set_user_session(response): response.set_cookie(g.user_session.cookie_name, g.user_session.cookie) return response + def verify_cookie(cookie): the_uuid, separator, the_signature = cookie.partition(':') assert len(the_uuid) == 36, "Is session_id a uuid?" @@ -44,6 +47,7 @@ def verify_cookie(cookie): assert the_signature == hmac.hmac_creation(the_uuid), "Uh-oh, someone tampering with the cookie?" return the_uuid + def create_signed_cookie(): the_uuid = str(uuid.uuid4()) signature = hmac.hmac_creation(the_uuid) @@ -51,6 +55,7 @@ def create_signed_cookie(): logger.debug("uuid_signed:", uuid_signed) return the_uuid, uuid_signed + @app.route("/user/manage", methods=('GET', 'POST')) def manage_user(): params = request.form if request.form else request.args @@ -63,6 +68,7 @@ def manage_user(): return render_template("admin/manage_user.html", user_details=user_details) + class UserSession: """Logged in user handling""" -- cgit v1.2.3 From 03b6bcee689c1910bd850c6109cc37adc509cf5a Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 30 Apr 2021 12:45:59 +0300 Subject: autopep8: Fix E501 --- wqflask/base/data_set.py | 6 +- wqflask/base/mrna_assay_tissue_data.py | 3 +- wqflask/base/species.py | 3 +- wqflask/base/webqtlCaseData.py | 6 +- wqflask/db/webqtlDatabaseFunction.py | 6 +- wqflask/maintenance/convert_geno_to_bimbam.py | 21 +- wqflask/maintenance/gen_select_dataset.py | 6 +- .../maintenance/generate_kinship_from_bimbam.py | 13 +- wqflask/maintenance/geno_to_json.py | 3 +- wqflask/maintenance/quantile_normalize.py | 3 +- .../test_display_mapping_results.py | 3 +- .../marker_regression/test_gemma_mapping.py | 15 +- .../marker_regression/test_qtlreaper_mapping.py | 6 +- .../wqflask/marker_regression/test_rqtl_mapping.py | 3 +- .../wqflask/marker_regression/test_run_mapping.py | 3 +- wqflask/tests/unit/wqflask/test_server_side.py | 9 +- wqflask/tests/wqflask/show_trait/testSampleList.py | 3 +- .../tests/wqflask/show_trait/test_show_trait.py | 3 +- wqflask/utility/Plot.py | 9 +- wqflask/utility/benchmark.py | 9 +- wqflask/utility/corestats.py | 3 +- wqflask/utility/elasticsearch_tools.py | 6 +- wqflask/utility/gen_geno_ob.py | 12 +- wqflask/utility/genofile_parser.py | 3 +- wqflask/utility/logger.py | 3 +- wqflask/utility/redis_tools.py | 9 +- wqflask/utility/startup_config.py | 6 +- wqflask/utility/svg.py | 60 ++- wqflask/utility/tools.py | 12 +- wqflask/wqflask/api/correlation.py | 54 +- wqflask/wqflask/api/mapping.py | 15 +- wqflask/wqflask/api/router.py | 48 +- wqflask/wqflask/collect.py | 15 +- .../comparison_bar_chart/comparison_bar_chart.py | 6 +- wqflask/wqflask/correlation/corr_scatter_plot.py | 33 +- wqflask/wqflask/correlation/show_corr_results.py | 75 ++- .../wqflask/correlation_matrix/show_corr_matrix.py | 64 ++- wqflask/wqflask/ctl/ctl_analysis.py | 72 ++- wqflask/wqflask/do_search.py | 69 ++- wqflask/wqflask/docs.py | 6 +- wqflask/wqflask/export_traits.py | 24 +- wqflask/wqflask/external_tools/send_to_bnw.py | 6 +- .../wqflask/external_tools/send_to_geneweaver.py | 3 +- .../wqflask/external_tools/send_to_webgestalt.py | 3 +- wqflask/wqflask/group_manager.py | 24 +- wqflask/wqflask/gsearch.py | 41 +- wqflask/wqflask/heatmap/heatmap.py | 19 +- wqflask/wqflask/interval_analyst/GeneUtil.py | 9 +- .../marker_regression/display_mapping_results.py | 548 ++++++++++++++------- wqflask/wqflask/marker_regression/gemma_mapping.py | 3 +- wqflask/wqflask/marker_regression/plink_mapping.py | 9 +- .../wqflask/marker_regression/qtlreaper_mapping.py | 27 +- wqflask/wqflask/marker_regression/rqtl_mapping.py | 103 ++-- wqflask/wqflask/marker_regression/run_mapping.py | 126 +++-- wqflask/wqflask/model.py | 30 +- wqflask/wqflask/resource_manager.py | 9 +- wqflask/wqflask/search_results.py | 33 +- wqflask/wqflask/show_trait/SampleList.py | 24 +- wqflask/wqflask/show_trait/export_trait_data.py | 21 +- wqflask/wqflask/snp_browser/snp_browser.py | 150 ++++-- wqflask/wqflask/update_search_results.py | 9 +- wqflask/wqflask/user_login.py | 87 ++-- wqflask/wqflask/user_manager.py | 96 ++-- wqflask/wqflask/user_session.py | 28 +- 64 files changed, 1431 insertions(+), 705 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index c14808d6..8ec0aaad 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -168,7 +168,8 @@ class DatasetType: results = g.db.execute(sql_query_mapping[t] % group_name).fetchone() if results: self.datasets[name] = dataset_name_mapping[t] - self.redis_instance.set("dataset_structure", json.dumps(self.datasets)) + self.redis_instance.set( + "dataset_structure", json.dumps(self.datasets)) return True return None @@ -239,7 +240,8 @@ class Markers: for line in bimbam_fh: marker = {} marker['name'] = line.split(delimiter)[0].rstrip() - marker['Mb'] = float(line.split(delimiter)[1].rstrip()) / 1000000 + marker['Mb'] = float(line.split(delimiter)[ + 1].rstrip()) / 1000000 marker['chr'] = line.split(delimiter)[2].rstrip() markers.append(marker) diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py index cbc05738..9bb29664 100644 --- a/wqflask/base/mrna_assay_tissue_data.py +++ b/wqflask/base/mrna_assay_tissue_data.py @@ -88,6 +88,7 @@ class MrnaAssayTissueData: if result.Symbol.lower() not in symbol_values_dict: symbol_values_dict[result.Symbol.lower()] = [result.value] else: - symbol_values_dict[result.Symbol.lower()].append(result.value) + symbol_values_dict[result.Symbol.lower()].append( + result.value) return symbol_values_dict diff --git a/wqflask/base/species.py b/wqflask/base/species.py index 44f133b5..e3c29916 100644 --- a/wqflask/base/species.py +++ b/wqflask/base/species.py @@ -55,4 +55,5 @@ class Chromosomes: results = g.db.execute(query).fetchall() for item in results: - self.chromosomes[item.OrderId] = IndChromosome(item.Name, item.Length) + self.chromosomes[item.OrderId] = IndChromosome( + item.Name, item.Length) diff --git a/wqflask/base/webqtlCaseData.py b/wqflask/base/webqtlCaseData.py index 2d07ab9d..25b6cb8a 100644 --- a/wqflask/base/webqtlCaseData.py +++ b/wqflask/base/webqtlCaseData.py @@ -34,12 +34,14 @@ class webqtlCaseData: def __init__(self, name, value=None, variance=None, num_cases=None, name2=None): self.name = name - self.name2 = name2 # Other name (for traits like BXD65a) + # Other name (for traits like BXD65a) + self.name2 = name2 self.value = value # Trait Value self.variance = variance # Trait Variance self.num_cases = num_cases # Number of individuals/cases self.extra_attributes = None - self.this_id = None # Set a sane default (can't be just "id" cause that's a reserved word) + # Set a sane default (can't be just "id" cause that's a reserved word) + self.this_id = None self.outlier = None # Not set to True/False until later def __repr__(self): diff --git a/wqflask/db/webqtlDatabaseFunction.py b/wqflask/db/webqtlDatabaseFunction.py index 50ac06fd..29112949 100644 --- a/wqflask/db/webqtlDatabaseFunction.py +++ b/wqflask/db/webqtlDatabaseFunction.py @@ -36,13 +36,15 @@ def retrieve_species(group): """Get the species of a group (e.g. returns string "mouse" on "BXD" """ - result = fetch1("select Species.Name from Species, InbredSet where InbredSet.Name = '%s' and InbredSet.SpeciesId = Species.Id" % (group), "/cross/" + group + ".json", lambda r: (r["species"],))[0] + result = fetch1("select Species.Name from Species, InbredSet where InbredSet.Name = '%s' and InbredSet.SpeciesId = Species.Id" % ( + group), "/cross/" + group + ".json", lambda r: (r["species"],))[0] logger.debug("retrieve_species result:", result) return result def retrieve_species_id(group): - result = fetch1("select SpeciesId from InbredSet where Name = '%s'" % (group), "/cross/" + group + ".json", lambda r: (r["species_id"],))[0] + result = fetch1("select SpeciesId from InbredSet where Name = '%s'" % ( + group), "/cross/" + group + ".json", lambda r: (r["species_id"],))[0] logger.debug("retrieve_species_id result:", result) return result diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py index 5b2369c9..a1712500 100644 --- a/wqflask/maintenance/convert_geno_to_bimbam.py +++ b/wqflask/maintenance/convert_geno_to_bimbam.py @@ -83,7 +83,8 @@ class ConvertGenoFile: genotypes = row_items[2:] for item_count, genotype in enumerate(genotypes): if genotype.upper().strip() in self.configurations: - this_marker.genotypes.append(self.configurations[genotype.upper().strip()]) + this_marker.genotypes.append( + self.configurations[genotype.upper().strip()]) else: this_marker.genotypes.append("NA") @@ -106,9 +107,11 @@ class ConvertGenoFile: with open(self.output_files[2], "w") as snp_fh: for marker in self.markers: if self.mb_exists: - snp_fh.write(marker['name'] + ", " + str(int(float(marker['Mb']) * 1000000)) + ", " + marker['chr'] + "\n") + snp_fh.write( + marker['name'] + ", " + str(int(float(marker['Mb']) * 1000000)) + ", " + marker['chr'] + "\n") else: - snp_fh.write(marker['name'] + ", " + str(int(float(marker['cM']) * 1000000)) + ", " + marker['chr'] + "\n") + snp_fh.write( + marker['name'] + ", " + str(int(float(marker['cM']) * 1000000)) + ", " + marker['chr'] + "\n") def get_sample_list(self, row_contents): self.sample_list = [] @@ -160,10 +163,14 @@ class ConvertGenoFile: group_name = ".".join(input_file.split('.')[:-1]) if group_name == "HSNIH-Palmer": continue - geno_output_file = os.path.join(new_directory, group_name + "_geno.txt") - pheno_output_file = os.path.join(new_directory, group_name + "_pheno.txt") - snp_output_file = os.path.join(new_directory, group_name + "_snps.txt") - output_files = [geno_output_file, pheno_output_file, snp_output_file] + geno_output_file = os.path.join( + new_directory, group_name + "_geno.txt") + pheno_output_file = os.path.join( + new_directory, group_name + "_pheno.txt") + snp_output_file = os.path.join( + new_directory, group_name + "_snps.txt") + output_files = [geno_output_file, + pheno_output_file, snp_output_file] print("%s -> %s" % ( os.path.join(old_directory, input_file), geno_output_file)) convertob = ConvertGenoFile(input_file, output_files) diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py index 583a06e1..484336a6 100644 --- a/wqflask/maintenance/gen_select_dataset.py +++ b/wqflask/maintenance/gen_select_dataset.py @@ -120,14 +120,16 @@ def get_types(groups): else: if not phenotypes_exist(group_name) and not genotypes_exist(group_name): types[species].pop(group_name, None) - groups[species] = tuple(group for group in groups[species] if group[0] != group_name) + groups[species] = tuple( + group for group in groups[species] if group[0] != group_name) else: # ZS: This whole else statement might be unnecessary, need to check types_list = build_types(species, group_name) if len(types_list) > 0: types[species][group_name] = types_list else: types[species].pop(group_name, None) - groups[species] = tuple(group for group in groups[species] if group[0] != group_name) + groups[species] = tuple( + group for group in groups[species] if group[0] != group_name) return types diff --git a/wqflask/maintenance/generate_kinship_from_bimbam.py b/wqflask/maintenance/generate_kinship_from_bimbam.py index 7cc60c9e..bed634fa 100644 --- a/wqflask/maintenance/generate_kinship_from_bimbam.py +++ b/wqflask/maintenance/generate_kinship_from_bimbam.py @@ -21,7 +21,9 @@ class GenerateKinshipMatrices: self.pheno_file = pheno_file def generate_kinship(self): - gemma_command = "/gnu/store/xhzgjr0jvakxv6h3blj8z496xjig69b0-profile/bin/gemma -g " + self.geno_file + " -p " + self.pheno_file + " -gk 1 -outdir /home/zas1024/genotype_files/genotype/bimbam/ -o " + self.group_name + gemma_command = "/gnu/store/xhzgjr0jvakxv6h3blj8z496xjig69b0-profile/bin/gemma -g " + self.geno_file + \ + " -p " + self.pheno_file + \ + " -gk 1 -outdir /home/zas1024/genotype_files/genotype/bimbam/ -o " + self.group_name print("command:", gemma_command) os.system(gemma_command) @@ -34,9 +36,12 @@ class GenerateKinshipMatrices: group_name = ".".join(input_file.split('.')[:-1]) if group_name == "HSNIH-Palmer": continue - geno_input_file = os.path.join(bimbam_dir, group_name + "_geno.txt") - pheno_input_file = os.path.join(bimbam_dir, group_name + "_pheno.txt") - convertob = GenerateKinshipMatrices(group_name, geno_input_file, pheno_input_file) + geno_input_file = os.path.join( + bimbam_dir, group_name + "_geno.txt") + pheno_input_file = os.path.join( + bimbam_dir, group_name + "_pheno.txt") + convertob = GenerateKinshipMatrices( + group_name, geno_input_file, pheno_input_file) try: convertob.generate_kinship() except EmptyConfigurations as why: diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py index ad3f2b72..7bdf2b53 100644 --- a/wqflask/maintenance/geno_to_json.py +++ b/wqflask/maintenance/geno_to_json.py @@ -100,7 +100,8 @@ class ConvertGenoFile: genotypes = row_items[2:] for item_count, genotype in enumerate(genotypes): if genotype.upper() in self.configurations: - this_marker.genotypes.append(self.configurations[genotype.upper()]) + this_marker.genotypes.append( + self.configurations[genotype.upper()]) else: this_marker.genotypes.append("NA") diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py index 1896bc52..ac7689f5 100644 --- a/wqflask/maintenance/quantile_normalize.py +++ b/wqflask/maintenance/quantile_normalize.py @@ -34,7 +34,8 @@ def create_dataframe(input_file): with open(input_file) as f: ncols = len(f.readline().split("\t")) - input_array = np.loadtxt(open(input_file, "rb"), delimiter="\t", skiprows=1, usecols=list(range(1, ncols))) + input_array = np.loadtxt(open( + input_file, "rb"), delimiter="\t", skiprows=1, usecols=list(range(1, ncols))) return pd.DataFrame(input_array) # This function taken from https://github.com/ShawnLYU/Quantile_Normalize diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_display_mapping_results.py b/wqflask/tests/unit/wqflask/marker_regression/test_display_mapping_results.py index 219a6a29..f4869c45 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_display_mapping_results.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_display_mapping_results.py @@ -39,7 +39,8 @@ class TestHtmlGenWrapper(unittest.TestCase): cgi="/testing/", enctype='multipart/form-data', name="formName", - submit=HtmlGenWrapper.create_input_tag(type_='hidden', name='Default_Name') + submit=HtmlGenWrapper.create_input_tag( + type_='hidden', name='Default_Name') ) test_image = HtmlGenWrapper.create_image_tag( src="test.png", diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py index f194c6c9..5cbaf0e0 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py @@ -70,7 +70,8 @@ class TestGemmaMapping(unittest.TestCase): ], vals=[], covariates="", use_loco=True) self.assertEqual(mock_os.system.call_count, 2) mock_gen_pheno_txt.assert_called_once() - mock_parse_loco.assert_called_once_with(dataset, "GP1_GWA_RRRRRR", True) + mock_parse_loco.assert_called_once_with( + dataset, "GP1_GWA_RRRRRR", True) mock_os.path.isfile.assert_called_once_with( ('/home/user/imgfile_output.assoc.txt')) self.assertEqual(mock_flat_files.call_count, 4) @@ -102,7 +103,8 @@ class TestGemmaMapping(unittest.TestCase): create_trait_side_effect = [] for i in range(4): - create_dataset_side_effect.append(AttributeSetter({"name": f'name_{i}'})) + create_dataset_side_effect.append( + AttributeSetter({"name": f'name_{i}'})) create_trait_side_effect.append( AttributeSetter({"data": [f'data_{i}']})) @@ -160,9 +162,12 @@ X\tM5\t12\tQ\tE\tMMB\tR\t21.1\tW\t0.65\t0.6""" results = parse_loco_output( this_dataset={}, gwa_output_filename=".xw/") expected_results = [ - {'name': 'M1', 'chr': 'X/Y', 'Mb': 2.8457155e-05, 'p_value': 0.85, 'additive': 23.3, 'lod_score': 0.07058107428570727}, - {'name': 'M2', 'chr': 4, 'Mb': 1.2e-05, 'p_value': 0.5, 'additive': 24.0, 'lod_score': 0.3010299956639812}, - {'name': 'M4', 'chr': 'Y', 'Mb': 1.2e-05, 'p_value': 0.7, 'additive': 11.6, 'lod_score': 0.1549019599857432}, + {'name': 'M1', 'chr': 'X/Y', 'Mb': 2.8457155e-05, 'p_value': 0.85, + 'additive': 23.3, 'lod_score': 0.07058107428570727}, + {'name': 'M2', 'chr': 4, 'Mb': 1.2e-05, 'p_value': 0.5, + 'additive': 24.0, 'lod_score': 0.3010299956639812}, + {'name': 'M4', 'chr': 'Y', 'Mb': 1.2e-05, 'p_value': 0.7, + 'additive': 11.6, 'lod_score': 0.1549019599857432}, {'name': 'M5', 'chr': 'X', 'Mb': 1.2e-05, 'p_value': 0.6, 'additive': 21.1, 'lod_score': 0.22184874961635637}] self.assertEqual(expected_results, results) diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py index 5cc8fd0f..c762982b 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py @@ -13,10 +13,12 @@ class TestQtlReaperMapping(unittest.TestCase): samples = ["S1", "S2", "S3", "S4","S5"] trait_filename = "trait_file" with mock.patch("builtins.open", mock.mock_open())as mock_open: - gen_pheno_txt_file(samples=samples, vals=vals, trait_filename=trait_filename) + gen_pheno_txt_file(samples=samples, vals=vals, + trait_filename=trait_filename) mock_open.assert_called_once_with("/home/user/data/gn2/trait_file.txt", "w") filehandler = mock_open() - write_calls = [mock.call('Trait\t'), mock.call('S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')] + write_calls = [mock.call('Trait\t'), mock.call( + 'S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')] filehandler.write.assert_has_calls(write_calls) diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py index 6267ce9a..6996c275 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py @@ -20,7 +20,8 @@ class TestRqtlMapping(unittest.TestCase): def test_get_trait_data(self, mock_logger, mock_db): """test for getting trait data_type return True""" query_value = """SELECT value FROM TraitMetadata WHERE type='trait_data_type'""" - mock_db.db.execute.return_value.fetchone.return_value = ["""{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""] + mock_db.db.execute.return_value.fetchone.return_value = [ + """{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""] results = get_trait_data_type("traid_id") mock_db.db.execute.assert_called_with(query_value) self.assertEqual(results, "fer434f") diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_run_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_run_mapping.py index a29d8cfb..78cd3be9 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_run_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_run_mapping.py @@ -187,7 +187,8 @@ class TestRunMapping(unittest.TestCase): mock.call('Time/Date: 09/01/19 / 10:12:12\n'), mock.call('Population: Human GP1_\n'), mock.call( 'Data Set: dataser_1\n'), - mock.call('N Samples: 100\n'), mock.call('Transform - Quantile Normalized\n'), + mock.call('N Samples: 100\n'), mock.call( + 'Transform - Quantile Normalized\n'), mock.call('Gene Symbol: IGFI\n'), mock.call( 'Location: X1 @ 123313 Mb\n'), mock.call('Cofactors (dataset - trait):\n'), diff --git a/wqflask/tests/unit/wqflask/test_server_side.py b/wqflask/tests/unit/wqflask/test_server_side.py index 4f91d8ca..69977146 100644 --- a/wqflask/tests/unit/wqflask/test_server_side.py +++ b/wqflask/tests/unit/wqflask/test_server_side.py @@ -22,10 +22,13 @@ class TestServerSideTableTests(unittest.TestCase): {'first': 'c', 'second': 1, 'third': 'ss'}, ] headers = ['first', 'second', 'third'] - request_args = {'sEcho': '1', 'iSortCol_0': '1', 'iSortingCols': '1', 'sSortDir_0': 'asc', 'iDisplayStart': '0', 'iDisplayLength': '3'} + request_args = {'sEcho': '1', 'iSortCol_0': '1', 'iSortingCols': '1', + 'sSortDir_0': 'asc', 'iDisplayStart': '0', 'iDisplayLength': '3'} - test_page = ServerSideTable(rows_count, table_rows, headers, request_args).get_page() + test_page = ServerSideTable( + rows_count, table_rows, headers, request_args).get_page() self.assertEqual(test_page['sEcho'], '1') self.assertEqual(test_page['iTotalRecords'], 'nan') self.assertEqual(test_page['iTotalDisplayRecords'], '3') - self.assertEqual(test_page['data'], [{'first': 'b', 'second': 2, 'third': 'aa'}, {'first': 'c', 'second': 1, 'third': 'ss'}, {'first': 'd', 'second': 4, 'third': 'zz'}]) + self.assertEqual(test_page['data'], [{'first': 'b', 'second': 2, 'third': 'aa'}, { + 'first': 'c', 'second': 1, 'third': 'ss'}, {'first': 'd', 'second': 4, 'third': 'zz'}]) diff --git a/wqflask/tests/wqflask/show_trait/testSampleList.py b/wqflask/tests/wqflask/show_trait/testSampleList.py index 441a88a7..305586ce 100644 --- a/wqflask/tests/wqflask/show_trait/testSampleList.py +++ b/wqflask/tests/wqflask/show_trait/testSampleList.py @@ -13,4 +13,5 @@ class TestSampleList(unittest.TestCase): sorted_list_a = natural_sort(characters_list) sorted_list_b = natural_sort(names_list) self.assertEqual(sorted_list_a, ["a", "f", "g", "q", "s", "t", "z"]) - self.assertEqual(sorted_list_b, ["Dataset", "Sample", "publish", "temp1"]) + self.assertEqual( + sorted_list_b, ["Dataset", "Sample", "publish", "temp1"]) diff --git a/wqflask/tests/wqflask/show_trait/test_show_trait.py b/wqflask/tests/wqflask/show_trait/test_show_trait.py index 24c3923e..63df2ba5 100644 --- a/wqflask/tests/wqflask/show_trait/test_show_trait.py +++ b/wqflask/tests/wqflask/show_trait/test_show_trait.py @@ -72,7 +72,8 @@ class TestTraits(unittest.TestCase): mock_get.return_value = get_return_obj results = get_ncbi_summary(trait) mock_exists.assert_called_once() - mock_get.assert_called_once_with(f"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&id={trait.geneid}&retmode=json") + mock_get.assert_called_once_with( + f"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&id={trait.geneid}&retmode=json") self.assertEqual(results, "this is a summary of the geneid") diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index d35b2089..4f5691c1 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -178,7 +178,8 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab # draw drawing region im_drawer.rectangle( - xy=((xLeftOffset, yTopOffset), (xLeftOffset + plotWidth, yTopOffset + plotHeight)) + xy=((xLeftOffset, yTopOffset), + (xLeftOffset + plotWidth, yTopOffset + plotHeight)) ) # draw scale @@ -199,11 +200,13 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab y = yLow for i in range(int(stepY) + 1): yc = yTopOffset + plotHeight - (y - yLow) * yScale - im_drawer.line(xy=((xLeftOffset, yc), (xLeftOffset - 5, yc)), fill=axesColor) + im_drawer.line( + xy=((xLeftOffset, yc), (xLeftOffset - 5, yc)), fill=axesColor) strY = "%d" % y im_drawer.text( text=strY, - xy=(xLeftOffset - im_drawer.textsize(strY, font=scaleFont)[0] - 6, yc + 5), + xy=(xLeftOffset - im_drawer.textsize(strY, + font=scaleFont)[0] - 6, yc + 5), font=scaleFont) y += (yTop - yLow) / stepY diff --git a/wqflask/utility/benchmark.py b/wqflask/utility/benchmark.py index 48ab1dc0..6ece2f21 100644 --- a/wqflask/utility/benchmark.py +++ b/wqflask/utility/benchmark.py @@ -19,7 +19,8 @@ class Bench: if self.name: logger.debug("Starting benchmark: %s" % (self.name)) else: - logger.debug("Starting benchmark at: %s [%i]" % (inspect.stack()[1][3], inspect.stack()[1][2])) + logger.debug("Starting benchmark at: %s [%i]" % ( + inspect.stack()[1][3], inspect.stack()[1][2])) self.start_time = time.time() def __exit__(self, type, value, traceback): @@ -33,11 +34,13 @@ class Bench: logger.info(" %s took: %f seconds" % (name, (time_taken))) if self.name: - Bench.entries[self.name] = Bench.entries.get(self.name, 0) + time_taken + Bench.entries[self.name] = Bench.entries.get( + self.name, 0) + time_taken @classmethod def report(cls): - total_time = sum((time_taken for time_taken in list(cls.entries.values()))) + total_time = sum( + (time_taken for time_taken in list(cls.entries.values()))) print("\nTiming report\n") for name, time_taken in list(cls.entries.items()): percent = int(round((time_taken / total_time) * 100)) diff --git a/wqflask/utility/corestats.py b/wqflask/utility/corestats.py index 523280a1..da0a21db 100644 --- a/wqflask/utility/corestats.py +++ b/wqflask/utility/corestats.py @@ -65,7 +65,8 @@ class Stats: if len(self.sequence) < 1: value = None elif (percentile >= 100): - sys.stderr.write('ERROR: percentile must be < 100. you supplied: %s\n' % percentile) + sys.stderr.write( + 'ERROR: percentile must be < 100. you supplied: %s\n' % percentile) value = None else: element_idx = int(len(self.sequence) * (percentile / 100.0)) diff --git a/wqflask/utility/elasticsearch_tools.py b/wqflask/utility/elasticsearch_tools.py index 9415cef0..55907dd5 100644 --- a/wqflask/utility/elasticsearch_tools.py +++ b/wqflask/utility/elasticsearch_tools.py @@ -49,7 +49,8 @@ from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT def test_elasticsearch_connection(): - es = Elasticsearch(['http://' + ELASTICSEARCH_HOST + ":" + str(ELASTICSEARCH_PORT) + '/'], verify_certs=True) + es = Elasticsearch(['http://' + ELASTICSEARCH_HOST + \ + ":" + str(ELASTICSEARCH_PORT) + '/'], verify_certs=True) if not es.ping(): logger.warning("Elasticsearch is DOWN") @@ -88,7 +89,8 @@ def setup_users_index(es_connection): "type": "keyword"}}} es_connection.indices.create(index='users', ignore=400) - es_connection.indices.put_mapping(body=index_settings, index="users", doc_type="local") + es_connection.indices.put_mapping( + body=index_settings, index="users", doc_type="local") def get_user_by_unique_column(es, column_name, column_value, index="users", doc_type="local"): diff --git a/wqflask/utility/gen_geno_ob.py b/wqflask/utility/gen_geno_ob.py index 24604e58..e619b7b6 100644 --- a/wqflask/utility/gen_geno_ob.py +++ b/wqflask/utility/gen_geno_ob.py @@ -38,13 +38,15 @@ class genotype: def read_rdata_output(self, qtl_results): # ZS: This is necessary because R/qtl requires centimorgan marker positions, which it normally gets from the .geno file, but that doesn't exist for HET3-ITP (which only has RData), so it needs to read in the marker cM positions from the results - self.chromosomes = [] # ZS: Overwriting since the .geno file's contents are just placeholders + # ZS: Overwriting since the .geno file's contents are just placeholders + self.chromosomes = [] this_chr = "" # ZS: This is so it can track when the chromosome changes as it iterates through markers chr_ob = None for marker in qtl_results: locus = Locus(self) - if (str(marker['chr']) != this_chr) and this_chr != "X": # ZS: This is really awkward but works as a temporary fix + # ZS: This is really awkward but works as a temporary fix + if (str(marker['chr']) != this_chr) and this_chr != "X": if this_chr != "": self.chromosomes.append(chr_ob) this_chr = str(marker['chr']) @@ -156,9 +158,11 @@ class Locus: try: self.cM = float(marker_row[geno_ob.cm_column]) except: - self.cM = float(marker_row[geno_ob.mb_column]) if geno_ob.mb_exists else 0 + self.cM = float( + marker_row[geno_ob.mb_column]) if geno_ob.mb_exists else 0 try: - self.Mb = float(marker_row[geno_ob.mb_column]) if geno_ob.mb_exists else None + self.Mb = float( + marker_row[geno_ob.mb_column]) if geno_ob.mb_exists else None except: self.Mb = self.cM diff --git a/wqflask/utility/genofile_parser.py b/wqflask/utility/genofile_parser.py index 94a08c17..09100bd9 100644 --- a/wqflask/utility/genofile_parser.py +++ b/wqflask/utility/genofile_parser.py @@ -92,7 +92,8 @@ class ConvertGenoFile: genotypes = row_items[2:] for item_count, genotype in enumerate(genotypes): if genotype.upper().strip() in self.configurations: - this_marker.genotypes.append(self.configurations[genotype.upper().strip()]) + this_marker.genotypes.append( + self.configurations[genotype.upper().strip()]) else: print("WARNING:", genotype.upper()) this_marker.genotypes.append("NA") diff --git a/wqflask/utility/logger.py b/wqflask/utility/logger.py index 47079818..d706e32a 100644 --- a/wqflask/utility/logger.py +++ b/wqflask/utility/logger.py @@ -151,5 +151,6 @@ def getLogger(name, level=None): else: logger.setLevel(LOG_LEVEL) - logger.info("Log level of " + name + " set to " + logging.getLevelName(logger.getEffectiveLevel())) + logger.info("Log level of " + name + " set to " + \ + logging.getLevelName(logger.getEffectiveLevel())) return gnlogger diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 8052035f..96a4be12 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -133,8 +133,10 @@ def get_user_groups(user_id): for key in groups_list: try: group_ob = json.loads(groups_list[key]) - group_admins = set([this_admin.encode('utf-8') if this_admin else None for this_admin in group_ob['admins']]) - group_members = set([this_member.encode('utf-8') if this_member else None for this_member in group_ob['members']]) + group_admins = set([this_admin.encode( + 'utf-8') if this_admin else None for this_admin in group_ob['admins']]) + group_members = set([this_member.encode( + 'utf-8') if this_member else None for this_member in group_ob['members']]) if user_id in group_admins: admin_group_ids.append(group_ob['id']) elif user_id in group_members: @@ -203,7 +205,8 @@ def get_groups_like_unique_column(column_name, column_value): if column_value in group_info[column_name]: matched_groups.append(group_info) else: - matched_groups.append(load_json_from_redis(group_list, column_value)) + matched_groups.append( + load_json_from_redis(group_list, column_value)) return matched_groups diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index 92f944bc..05f8a2b0 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -28,7 +28,8 @@ def app_config(): port = get_setting_int("SERVER_PORT") if get_setting_bool("USE_GN_SERVER"): - print(("GN2 API server URL is [" + BLUE + get_setting("GN_SERVER_URL") + ENDC + "]")) + print( + ("GN2 API server URL is [" + BLUE + get_setting("GN_SERVER_URL") + ENDC + "]")) import requests page = requests.get(get_setting("GN_SERVER_URL")) if page.status_code != 200: @@ -37,4 +38,5 @@ def app_config(): # import utility.elasticsearch_tools as es # es.test_elasticsearch_connection() - print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL")))) + print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % + (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL")))) diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index f5ef81e1..8d2e13ab 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -239,19 +239,23 @@ class pathdata: def smbezier(self, x2, y2, x, y): """smooth bezier with xy2 to xy absolut""" - self.path.append('S' + str(x2) + ',' + str(y2) + ' ' + str(x) + ',' + str(y)) + self.path.append('S' + str(x2) + ',' + str(y2) + \ + ' ' + str(x) + ',' + str(y)) def relsmbezier(self, x2, y2, x, y): """smooth bezier with xy2 to xy relative""" - self.path.append('s' + str(x2) + ',' + str(y2) + ' ' + str(x) + ',' + str(y)) + self.path.append('s' + str(x2) + ',' + str(y2) + \ + ' ' + str(x) + ',' + str(y)) def qbezier(self, x1, y1, x, y): """quadratic bezier with xy1 to xy absolut""" - self.path.append('Q' + str(x1) + ',' + str(y1) + ' ' + str(x) + ',' + str(y)) + self.path.append('Q' + str(x1) + ',' + str(y1) + \ + ' ' + str(x) + ',' + str(y)) def relqbezier(self, x1, y1, x, y): """quadratic bezier with xy1 to xy relative""" - self.path.append('q' + str(x1) + ',' + str(y1) + ' ' + str(x) + ',' + str(y)) + self.path.append('q' + str(x1) + ',' + str(y1) + \ + ' ' + str(x) + ',' + str(y)) def smqbezier(self, x, y): """smooth quadratic bezier to xy absolut""" @@ -447,7 +451,8 @@ class rect(SVGelement): if width == None or height == None: raise ValueError('both height and width are required') - SVGelement.__init__(self, 'rect', {'width': width, 'height': height}, **args) + SVGelement.__init__( + self, 'rect', {'width': width, 'height': height}, **args) if x != None: self.attributes['x'] = x if y != None: @@ -545,7 +550,8 @@ class polyline(SVGelement): """ def __init__(self, points, fill=None, stroke=None, stroke_width=None,**args): - SVGelement.__init__(self, 'polyline', {'points': _xypointlist(points)}, **args) + SVGelement.__init__(self, 'polyline', { + 'points': _xypointlist(points)}, **args) if fill != None: self.attributes['fill'] = fill if stroke_width != None: @@ -561,7 +567,8 @@ class polygon(SVGelement): """ def __init__(self, points, fill=None, stroke=None, stroke_width=None,**args): - SVGelement.__init__(self, 'polygon', {'points': _xypointlist(points)}, **args) + SVGelement.__init__( + self, 'polygon', {'points': _xypointlist(points)}, **args) if fill != None: self.attributes['fill'] = fill if stroke_width != None: @@ -745,7 +752,8 @@ class image(SVGelement): def __init__(self, url, x=None, y=None, width=None,height=None,**args): if width == None or height == None: raise ValueError('both height and width are required') - SVGelement.__init__(self, 'image', {'xlink:href': url, 'width': width, 'height':height}, **args) + SVGelement.__init__( + self, 'image', {'xlink:href': url, 'width': width, 'height':height}, **args) if x != None: self.attributes['x'] = x if y != None: @@ -886,7 +894,8 @@ class script(SVGelement): """ def __init__(self, type, cdata=None, **args): - SVGelement.__init__(self, 'script', {'type': type}, cdata=cdata, **args) + SVGelement.__init__( + self, 'script', {'type': type}, cdata=cdata, **args) class animate(SVGelement): @@ -896,7 +905,8 @@ class animate(SVGelement): """ def __init__(self, attribute, fr=None, to=None, dur=None,**args): - SVGelement.__init__(self, 'animate', {'attributeName': attribute}, **args) + SVGelement.__init__( + self, 'animate', {'attributeName': attribute}, **args) if fr != None: self.attributes['from'] = fr if to != None: @@ -926,7 +936,8 @@ class animateTransform(SVGelement): """ def __init__(self, type=None, fr=None, to=None, dur=None,**args): - SVGelement.__init__(self, 'animateTransform', {'attributeName': 'transform'}, **args) + SVGelement.__init__(self, 'animateTransform', { + 'attributeName': 'transform'}, **args) # As far as I know the attributeName is always transform if type != None: self.attributes['type'] = type @@ -945,7 +956,8 @@ class animateColor(SVGelement): """ def __init__(self, attribute, type=None, fr=None, to=None,dur=None,**args): - SVGelement.__init__(self, 'animateColor', {'attributeName': attribute}, **args) + SVGelement.__init__(self, 'animateColor', { + 'attributeName': attribute}, **args) if type != None: self.attributes['type'] = type if fr != None: @@ -1020,11 +1032,13 @@ class drawing: import io xml = io.StringIO() xml.write("\n") - xml.write("\n" % (item, self.entity[item])) + xml.write("\n" % + (item, self.entity[item])) xml.write("]") xml.write(">\n") self.svg.toXml(0, xml) @@ -1042,7 +1056,8 @@ class drawing: else: if filename[-4:] == 'svgz': import gzip - f = gzip.GzipFile(filename=filename, mode="wb", compresslevel=9) + f = gzip.GzipFile(filename=filename, + mode="wb", compresslevel=9) f.write(xml.getvalue()) f.close() else: @@ -1057,7 +1072,8 @@ class drawing: writes a svg drawing to the screen or to a file compresses if filename ends with svgz or if compress is true """ - doctype = implementation.createDocumentType('svg', "-//W3C//DTD SVG 1.0//EN""", 'http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd ') + doctype = implementation.createDocumentType( + 'svg', "-//W3C//DTD SVG 1.0//EN""", 'http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd ') global root # root is defined global so it can be used by the appender. Its also possible to use it as an arugument but @@ -1076,8 +1092,10 @@ class drawing: if element.text: textnode = root.createTextNode(element.text) e.appendChild(textnode) - for attribute in list(element.attributes.keys()): # in element.attributes is supported from python 2.2 - e.setAttribute(attribute, str(element.attributes[attribute])) + # in element.attributes is supported from python 2.2 + for attribute in list(element.attributes.keys()): + e.setAttribute(attribute, str( + element.attributes[attribute])) if element.elements: for el in element.elements: e = appender(el, e) @@ -1105,7 +1123,8 @@ class drawing: import io xml = io.StringIO() PrettyPrint(root, xml) - f = gzip.GzipFile(filename=filename, mode='wb', compresslevel=9) + f = gzip.GzipFile(filename=filename, + mode='wb', compresslevel=9) f.write(xml.getvalue()) f.close() else: @@ -1119,7 +1138,8 @@ class drawing: try: import xml.parsers.xmlproc.xmlval except: - raise exceptions.ImportError('PyXml is required for validating SVG') + raise exceptions.ImportError( + 'PyXml is required for validating SVG') svg = self.toXml() xv = xml.parsers.xmlproc.xmlval.XMLValidator() try: diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 4fe4db08..263c3948 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -64,7 +64,8 @@ def get_setting(command_id, guess=None): command = value(guess) if command is None or command == "": # print command - raise Exception(command_id + ' setting unknown or faulty (update default_settings.py?).') + raise Exception( + command_id + ' setting unknown or faulty (update default_settings.py?).') # print("Set "+command_id+"="+str(command)) return command @@ -113,7 +114,8 @@ def js_path(module=None): try_guix = get_setting("JS_GUIX_PATH") + "/" + module if valid_path(try_guix): return try_guix - raise "No JS path found for " + module + " (if not in Guix check JS_GN_PATH)" + raise "No JS path found for " + module + \ + " (if not in Guix check JS_GN_PATH)" def reaper_command(guess=None): @@ -292,7 +294,8 @@ ORCID_CLIENT_SECRET = get_setting('ORCID_CLIENT_SECRET') ORCID_AUTH_URL = None if ORCID_CLIENT_ID != 'UNKNOWN' and ORCID_CLIENT_SECRET: ORCID_AUTH_URL = "https://orcid.org/oauth/authorize?response_type=code&scope=/authenticate&show_login=true&client_id=" + \ - ORCID_CLIENT_ID + "&client_secret=" + ORCID_CLIENT_SECRET + "&redirect_uri=" + GN2_BRANCH_URL + "n/login/orcid_oauth2" + ORCID_CLIENT_ID + "&client_secret=" + ORCID_CLIENT_SECRET + \ + "&redirect_uri=" + GN2_BRANCH_URL + "n/login/orcid_oauth2" ORCID_TOKEN_URL = get_setting('ORCID_TOKEN_URL') ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST') @@ -320,7 +323,8 @@ assert_dir(JS_GUIX_PATH + '/cytoscape-panzoom') CSS_PATH = JS_GUIX_PATH # The CSS is bundled together with the JS # assert_dir(JS_PATH) -JS_TWITTER_POST_FETCHER_PATH = get_setting("JS_TWITTER_POST_FETCHER_PATH", js_path("javascript-twitter-post-fetcher")) +JS_TWITTER_POST_FETCHER_PATH = get_setting( + "JS_TWITTER_POST_FETCHER_PATH", js_path("javascript-twitter-post-fetcher")) assert_dir(JS_TWITTER_POST_FETCHER_PATH) assert_file(JS_TWITTER_POST_FETCHER_PATH + "/js/twitterFetcher_min.js") diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py index f1dd148f..52026a82 100644 --- a/wqflask/wqflask/api/correlation.py +++ b/wqflask/wqflask/api/correlation.py @@ -25,13 +25,16 @@ def do_correlation(start_vars): assert('trait_id' in start_vars) this_dataset = data_set.create_dataset(dataset_name=start_vars['db']) - target_dataset = data_set.create_dataset(dataset_name=start_vars['target_db']) - this_trait = create_trait(dataset=this_dataset, name=start_vars['trait_id']) + target_dataset = data_set.create_dataset( + dataset_name=start_vars['target_db']) + this_trait = create_trait(dataset=this_dataset, + name=start_vars['trait_id']) this_trait = retrieve_sample_data(this_trait, this_dataset) corr_params = init_corr_params(start_vars) - corr_results = calculate_results(this_trait, this_dataset, target_dataset, corr_params) + corr_results = calculate_results( + this_trait, this_dataset, target_dataset, corr_params) #corr_results = collections.OrderedDict(sorted(corr_results.items(), key=lambda t: -abs(t[1][0]))) final_results = [] @@ -75,54 +78,66 @@ def calculate_results(this_trait, this_dataset, target_dataset, corr_params): if corr_params['type'] == "tissue": trait_symbol_dict = this_dataset.retrieve_genes("Symbol") - corr_results = do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params) + corr_results = do_tissue_correlation_for_all_traits( + this_trait, trait_symbol_dict, corr_params) sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), key=lambda t: -abs(t[1][1]))) - elif corr_params['type'] == "literature" or corr_params['type'] == "lit": # ZS: Just so a user can use either "lit" or "literature" + # ZS: Just so a user can use either "lit" or "literature" + elif corr_params['type'] == "literature" or corr_params['type'] == "lit": trait_geneid_dict = this_dataset.retrieve_genes("GeneId") - corr_results = do_literature_correlation_for_all_traits(this_trait, this_dataset, trait_geneid_dict, corr_params) + corr_results = do_literature_correlation_for_all_traits( + this_trait, this_dataset, trait_geneid_dict, corr_params) sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), key=lambda t: -abs(t[1][1]))) else: for target_trait, target_vals in list(target_dataset.trait_data.items()): - result = get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_dataset, corr_params['type']) + result = get_sample_r_and_p_values( + this_trait, this_dataset, target_vals, target_dataset, corr_params['type']) if result is not None: corr_results[target_trait] = result - sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), key=lambda t: -abs(t[1][0]))) + sorted_results = collections.OrderedDict( + sorted(list(corr_results.items()), key=lambda t: -abs(t[1][0]))) return sorted_results def do_tissue_correlation_for_all_traits(this_trait, trait_symbol_dict, corr_params, tissue_dataset_id=1): # Gets tissue expression values for the primary trait - primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=[this_trait.symbol]) + primary_trait_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( + symbol_list=[this_trait.symbol]) if this_trait.symbol.lower() in primary_trait_tissue_vals_dict: - primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower()] + primary_trait_tissue_values = primary_trait_tissue_vals_dict[this_trait.symbol.lower( + )] - corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(symbol_list=list(trait_symbol_dict.values())) + corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( + symbol_list=list(trait_symbol_dict.values())) tissue_corr_data = {} for trait, symbol in list(trait_symbol_dict.items()): if symbol and symbol.lower() in corr_result_tissue_vals_dict: - this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()] + this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower( + )] result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values, this_trait_tissue_values, corr_params['method']) - tissue_corr_data[trait] = [result[0], result[1], result[2], symbol] + tissue_corr_data[trait] = [ + result[0], result[1], result[2], symbol] return tissue_corr_data def do_literature_correlation_for_all_traits(this_trait, target_dataset, trait_geneid_dict, corr_params): - input_trait_mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), this_trait.geneid) + input_trait_mouse_gene_id = convert_to_mouse_gene_id( + target_dataset.group.species.lower(), this_trait.geneid) lit_corr_data = {} for trait, gene_id in list(trait_geneid_dict.items()): - mouse_gene_id = convert_to_mouse_gene_id(target_dataset.group.species.lower(), gene_id) + mouse_gene_id = convert_to_mouse_gene_id( + target_dataset.group.species.lower(), gene_id) if mouse_gene_id and str(mouse_gene_id).find(";") == -1: result = g.db.execute( @@ -168,12 +183,15 @@ def get_sample_r_and_p_values(this_trait, this_dataset, target_vals, target_data this_trait_vals.append(this_sample_value) shared_target_vals.append(target_sample_value) - this_trait_vals, shared_target_vals, num_overlap = corr_result_helpers.normalize_values(this_trait_vals, shared_target_vals) + this_trait_vals, shared_target_vals, num_overlap = corr_result_helpers.normalize_values( + this_trait_vals, shared_target_vals) if type == 'pearson': - sample_r, sample_p = scipy.stats.pearsonr(this_trait_vals, shared_target_vals) + sample_r, sample_p = scipy.stats.pearsonr( + this_trait_vals, shared_target_vals) else: - sample_r, sample_p = scipy.stats.spearmanr(this_trait_vals, shared_target_vals) + sample_r, sample_p = scipy.stats.spearmanr( + this_trait_vals, shared_target_vals) if num_overlap > 5: if scipy.isnan(sample_r): diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py index 662090d5..c22b44a9 100644 --- a/wqflask/wqflask/api/mapping.py +++ b/wqflask/wqflask/api/mapping.py @@ -37,20 +37,25 @@ def do_mapping_for_api(start_vars): mapping_params = initialize_parameters(start_vars, dataset, this_trait) - covariates = "" # ZS: It seems to take an empty string as default. This should probably be changed. + # ZS: It seems to take an empty string as default. This should probably be changed. + covariates = "" if mapping_params['mapping_method'] == "gemma": header_row = ["name", "chr", "Mb", "lod_score", "p_value"] - if mapping_params['use_loco'] == "True": # ZS: gemma_mapping returns both results and the filename for LOCO, so need to only grab the former for api - result_markers = gemma_mapping.run_gemma(this_trait, dataset, samples, vals, covariates, mapping_params['use_loco'], mapping_params['maf'])[0] + # ZS: gemma_mapping returns both results and the filename for LOCO, so need to only grab the former for api + if mapping_params['use_loco'] == "True": + result_markers = gemma_mapping.run_gemma( + this_trait, dataset, samples, vals, covariates, mapping_params['use_loco'], mapping_params['maf'])[0] else: - result_markers = gemma_mapping.run_gemma(this_trait, dataset, samples, vals, covariates, mapping_params['use_loco'], mapping_params['maf']) + result_markers = gemma_mapping.run_gemma( + this_trait, dataset, samples, vals, covariates, mapping_params['use_loco'], mapping_params['maf']) elif mapping_params['mapping_method'] == "rqtl": header_row = ["name", "chr", "cM", "lod_score"] if mapping_params['num_perm'] > 0: _sperm_output, _suggestive, _significant, result_markers = rqtl_mapping.run_rqtl_geno(vals, dataset, mapping_params['rqtl_method'], mapping_params['rqtl_model'], mapping_params['perm_check'], mapping_params['num_perm'], - mapping_params['do_control'], mapping_params['control_marker'], + mapping_params['do_control'], mapping_params[ + 'control_marker'], mapping_params['manhattan_plot'], mapping_params['pair_scan']) else: result_markers = rqtl_mapping.run_rqtl_geno(vals, dataset, mapping_params['rqtl_method'], mapping_params['rqtl_model'], diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py index 4f9cc6e5..e7dfa4e0 100644 --- a/wqflask/wqflask/api/router.py +++ b/wqflask/wqflask/api/router.py @@ -35,7 +35,8 @@ def hello_world(): @app.route("/api/v_{}/species".format(version)) def get_species_list(): - results = g.db.execute("SELECT SpeciesId, Name, FullName, TaxonomyId FROM Species;") + results = g.db.execute( + "SELECT SpeciesId, Name, FullName, TaxonomyId FROM Species;") the_species = results.fetchall() species_list = [] for species in the_species: @@ -313,7 +314,8 @@ def get_dataset_info(dataset_name, group_name=None, file_format="json"): @app.route("/api/v_{}/traits/".format(version), methods=("GET",)) @app.route("/api/v_{}/traits/.".format(version), methods=("GET",)) def fetch_traits(dataset_name, file_format="json"): - trait_ids, trait_names, data_type, dataset_id = get_dataset_trait_ids(dataset_name, request.args) + trait_ids, trait_names, data_type, dataset_id = get_dataset_trait_ids( + dataset_name, request.args) if ("ids_only" in request.args) and (len(trait_ids) > 0): if file_format == "json": filename = dataset_name + "_trait_ids.json" @@ -361,7 +363,8 @@ def fetch_traits(dataset_name, file_format="json"): ProbeSet.Id """ - field_list = ["Id", "Name", "Symbol", "Description", "Chr", "Mb", "Aliases", "Mean", "SE", "Locus", "LRS", "P-Value", "Additive", "h2"] + field_list = ["Id", "Name", "Symbol", "Description", "Chr", "Mb", + "Aliases", "Mean", "SE", "Locus", "LRS", "P-Value", "Additive", "h2"] elif data_type == "Geno": query = """ SELECT @@ -378,7 +381,8 @@ def fetch_traits(dataset_name, file_format="json"): Geno.Id """ - field_list = ["Id", "Name", "Marker_Name", "Chr", "Mb", "Sequence", "Source"] + field_list = ["Id", "Name", "Marker_Name", + "Chr", "Mb", "Sequence", "Source"] else: query = """ SELECT @@ -394,7 +398,8 @@ def fetch_traits(dataset_name, file_format="json"): PublishXRef.Id """ - field_list = ["Id", "PhenotypeId", "PublicationId", "Locus", "LRS", "Additive", "Sequence"] + field_list = ["Id", "PhenotypeId", "PublicationId", + "Locus", "LRS", "Additive", "Sequence"] if 'limit_to' in request.args: limit_number = request.args['limit_to'] @@ -442,7 +447,8 @@ def fetch_traits(dataset_name, file_format="json"): @app.route("/api/v_{}/sample_data/".format(version)) @app.route("/api/v_{}/sample_data/.".format(version)) def all_sample_data(dataset_name, file_format="csv"): - trait_ids, trait_names, data_type, dataset_id = get_dataset_trait_ids(dataset_name, request.args) + trait_ids, trait_names, data_type, dataset_id = get_dataset_trait_ids( + dataset_name, request.args) if len(trait_ids) > 0: sample_list = get_samplelist(dataset_name) @@ -676,7 +682,8 @@ def get_trait_info(dataset_name, trait_name, file_format="json"): return flask.jsonify(trait_dict) else: - if "Publish" in dataset_name: # ZS: Check if the user input the dataset_name as BXDPublish, etc (which is always going to be the group name + "Publish" + # ZS: Check if the user input the dataset_name as BXDPublish, etc (which is always going to be the group name + "Publish" + if "Publish" in dataset_name: dataset_name = dataset_name.replace("Publish", "") group_id = get_group_id(dataset_name) @@ -711,7 +718,8 @@ def get_corr_results(): results = correlation.do_correlation(request.args) if len(results) > 0: - return flask.jsonify(results) # ZS: I think flask.jsonify expects a dict/list instead of JSON + # ZS: I think flask.jsonify expects a dict/list instead of JSON + return flask.jsonify(results) else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") @@ -768,7 +776,8 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None): output_lines.append(line.split()) i += 1 - csv_writer = csv.writer(si, delimiter="\t", escapechar="\\", quoting = csv.QUOTE_NONE) + csv_writer = csv.writer( + si, delimiter="\t", escapechar="\\", quoting = csv.QUOTE_NONE) else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") elif file_format == "rqtl2": @@ -779,18 +788,23 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None): filename = group_name if os.path.isfile("{0}/{1}_geno.csv".format(flat_files("genotype/rqtl2"), group_name)): - yaml_file = json.load(open("{0}/{1}.json".format(flat_files("genotype/rqtl2"), group_name))) + yaml_file = json.load( + open("{0}/{1}.json".format(flat_files("genotype/rqtl2"), group_name))) yaml_file["geno"] = filename + "_geno.csv" yaml_file["gmap"] = filename + "_gmap.csv" yaml_file["pheno"] = filename + "_pheno.csv" config_file = [filename + ".json", json.dumps(yaml_file)] #config_file = [filename + ".yaml", open("{0}/{1}.yaml".format(flat_files("genotype/rqtl2"), group_name))] - geno_file = [filename + "_geno.csv", open("{0}/{1}_geno.csv".format(flat_files("genotype/rqtl2"), group_name))] - gmap_file = [filename + "_gmap.csv", open("{0}/{1}_gmap.csv".format(flat_files("genotype/rqtl2"), group_name))] + geno_file = [filename + "_geno.csv", + open("{0}/{1}_geno.csv".format(flat_files("genotype/rqtl2"), group_name))] + gmap_file = [filename + "_gmap.csv", + open("{0}/{1}_gmap.csv".format(flat_files("genotype/rqtl2"), group_name))] if dataset_name: - phenotypes = requests.get("http://gn2.genenetwork.org/api/v_pre1/sample_data/" + dataset_name) + phenotypes = requests.get( + "http://gn2.genenetwork.org/api/v_pre1/sample_data/" + dataset_name) else: - phenotypes = requests.get("http://gn2.genenetwork.org/api/v_pre1/sample_data/" + group_name + "Publish") + phenotypes = requests.get( + "http://gn2.genenetwork.org/api/v_pre1/sample_data/" + group_name + "Publish") with ZipFile(memory_file, 'w', compression=ZIP_DEFLATED) as zf: zf.writestr(config_file[0], config_file[1]) @@ -813,7 +827,8 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None): for line in genofile: if limit_num and i >= limit_num: break - output_lines.append([line.strip() for line in line.split(",")]) + output_lines.append([line.strip() + for line in line.split(",")]) i += 1 csv_writer = csv.writer(si, delimiter=",") @@ -898,7 +913,8 @@ def get_dataset_trait_ids(dataset_name, start_vars): results = g.db.execute(query).fetchall() trait_ids = [result[0] for result in results] - trait_names = [str(result[2]) + "_" + str(result[1]) for result in results] + trait_names = [str(result[2]) + "_" + str(result[1]) + for result in results] return trait_ids, trait_names, data_type, dataset_id diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index 61f73106..1fcf15f0 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -158,7 +158,8 @@ def remove_traits(): traits_to_remove = params['trait_list'] traits_to_remove = process_traits(traits_to_remove) - members_now = g.user_session.remove_traits_from_collection(uc_id, traits_to_remove) + members_now = g.user_session.remove_traits_from_collection( + uc_id, traits_to_remove) return redirect(url_for("view_collection", uc_id=uc_id)) @@ -181,7 +182,8 @@ def delete_collection(): else: flash("We've deleted the selected collection.", "alert-info") else: - flash("We've deleted the collection: {}.".format(collection_name), "alert-info") + flash("We've deleted the collection: {}.".format( + collection_name), "alert-info") return redirect(url_for('list_collections')) @@ -191,7 +193,8 @@ def view_collection(): params = request.args uc_id = params['uc_id'] - uc = next((collection for collection in g.user_session.user_collections if collection["id"] == uc_id)) + uc = next( + (collection for collection in g.user_session.user_collections if collection["id"] == uc_id)) traits = uc["members"] trait_obs = [] @@ -203,12 +206,14 @@ def view_collection(): name, dataset_name = atrait.split(':') if dataset_name == "Temp": group = name.split("_")[2] - dataset = create_dataset(dataset_name, dataset_type="Temp", group_name=group) + dataset = create_dataset( + dataset_name, dataset_type="Temp", group_name=group) trait_ob = create_trait(name=name, dataset=dataset) else: dataset = create_dataset(dataset_name) trait_ob = create_trait(name=name, dataset=dataset) - trait_ob = retrieve_trait_info(trait_ob, dataset, get_qtl_info=True) + trait_ob = retrieve_trait_info( + trait_ob, dataset, get_qtl_info=True) trait_obs.append(trait_ob) json_version.append(jsonable(trait_ob)) diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py index c135faa3..0fabb833 100644 --- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py +++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py @@ -34,14 +34,16 @@ from flask import Flask, g class ComparisonBarChart: def __init__(self, start_vars): - trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] + trait_db_list = [trait.strip() + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) self.all_sample_list = [] self.traits = [] self.insufficient_shared_samples = False - this_group = self.trait_list[0][1].group.name # ZS: Getting initial group name before verifying all traits are in the same group in the following loop + # ZS: Getting initial group name before verifying all traits are in the same group in the following loop + this_group = self.trait_list[0][1].group.name for trait_db in self.trait_list: if trait_db[1].group.name != this_group: diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index 6afea715..f9a0ea11 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -17,17 +17,21 @@ class CorrScatterPlot: def __init__(self, params): if "Temp" in params['dataset_1']: - self.dataset_1 = data_set.create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = params['dataset_1'].split("_")[1]) + self.dataset_1 = data_set.create_dataset( + dataset_name="Temp", dataset_type="Temp", group_name = params['dataset_1'].split("_")[1]) else: self.dataset_1 = data_set.create_dataset(params['dataset_1']) if "Temp" in params['dataset_2']: - self.dataset_2 = data_set.create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = params['dataset_2'].split("_")[1]) + self.dataset_2 = data_set.create_dataset( + dataset_name="Temp", dataset_type="Temp", group_name = params['dataset_2'].split("_")[1]) else: self.dataset_2 = data_set.create_dataset(params['dataset_2']) #self.dataset_3 = data_set.create_dataset(params['dataset_3']) - self.trait_1 = create_trait(name=params['trait_1'], dataset=self.dataset_1) - self.trait_2 = create_trait(name=params['trait_2'], dataset=self.dataset_2) + self.trait_1 = create_trait( + name=params['trait_1'], dataset=self.dataset_1) + self.trait_2 = create_trait( + name=params['trait_2'], dataset=self.dataset_2) #self.trait_3 = create_trait(name=params['trait_3'], dataset=self.dataset_3) self.method = params['method'] @@ -38,10 +42,13 @@ class CorrScatterPlot: if self.dataset_1.group.f1list != None: primary_samples += self.dataset_1.group.f1list - self.trait_1 = retrieve_sample_data(self.trait_1, self.dataset_1, primary_samples) - self.trait_2 = retrieve_sample_data(self.trait_2, self.dataset_2, primary_samples) + self.trait_1 = retrieve_sample_data( + self.trait_1, self.dataset_1, primary_samples) + self.trait_2 = retrieve_sample_data( + self.trait_2, self.dataset_2, primary_samples) - samples_1, samples_2, num_overlap = corr_result_helpers.normalize_values_with_samples(self.trait_1.data, self.trait_2.data) + samples_1, samples_2, num_overlap = corr_result_helpers.normalize_values_with_samples( + self.trait_1.data, self.trait_2.data) self.data = [] self.indIDs = list(samples_1.keys()) @@ -54,7 +61,8 @@ class CorrScatterPlot: vals_2.append(samples_2[sample].value) self.data.append(vals_2) - slope, intercept, r_value, p_value, std_err = stats.linregress(vals_1, vals_2) + slope, intercept, r_value, p_value, std_err = stats.linregress( + vals_1, vals_2) if slope < 0.001: slope_string = '%.3E' % slope @@ -67,14 +75,16 @@ class CorrScatterPlot: x_range = [min(vals_1) - x_buffer, max(vals_1) + x_buffer] y_range = [min(vals_2) - y_buffer, max(vals_2) + y_buffer] - intercept_coords = get_intercept_coords(slope, intercept, x_range, y_range) + intercept_coords = get_intercept_coords( + slope, intercept, x_range, y_range) rx = stats.rankdata(vals_1) ry = stats.rankdata(vals_2) self.rdata = [] self.rdata.append(rx.tolist()) self.rdata.append(ry.tolist()) - srslope, srintercept, srr_value, srp_value, srstd_err = stats.linregress(rx, ry) + srslope, srintercept, srr_value, srp_value, srstd_err = stats.linregress( + rx, ry) if srslope < 0.001: srslope_string = '%.3E' % srslope @@ -86,7 +96,8 @@ class CorrScatterPlot: sr_range = [min(rx) - x_buffer, max(rx) + x_buffer] - sr_intercept_coords = get_intercept_coords(srslope, srintercept, sr_range, sr_range) + sr_intercept_coords = get_intercept_coords( + srslope, srintercept, sr_range, sr_range) self.collections_exist = "False" if g.user_session.num_collections > 0: diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index e2fe1ff4..e75c4a85 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -78,7 +78,8 @@ class CorrelationResults: with Bench("Doing correlations"): if start_vars['dataset'] == "Temp": - self.dataset = data_set.create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = start_vars['group']) + self.dataset = data_set.create_dataset( + dataset_name="Temp", dataset_type="Temp", group_name = start_vars['group']) self.trait_id = start_vars['trait_id'] self.this_trait = create_trait(dataset=self.dataset, name=self.trait_id, @@ -129,12 +130,15 @@ class CorrelationResults: if corr_samples_group == 'samples_other': primary_samples = [x for x in primary_samples if x not in ( self.dataset.group.parlist + self.dataset.group.f1list)] - self.process_samples(start_vars, list(self.this_trait.data.keys()), primary_samples) + self.process_samples(start_vars, list( + self.this_trait.data.keys()), primary_samples) - self.target_dataset = data_set.create_dataset(start_vars['corr_dataset']) + self.target_dataset = data_set.create_dataset( + start_vars['corr_dataset']) self.target_dataset.get_trait_data(list(self.sample_data.keys())) - self.header_fields = get_header_fields(self.target_dataset.type, self.corr_method) + self.header_fields = get_header_fields( + self.target_dataset.type, self.corr_method) if self.target_dataset.type == "ProbeSet": self.filter_cols = [7, 6] @@ -153,7 +157,8 @@ class CorrelationResults: tissue_corr_data = self.do_tissue_correlation_for_all_traits() if tissue_corr_data != None: for trait in list(tissue_corr_data.keys())[:self.return_number]: - self.get_sample_r_and_p_values(trait, self.target_dataset.trait_data[trait]) + self.get_sample_r_and_p_values( + trait, self.target_dataset.trait_data[trait]) else: for trait, values in list(self.target_dataset.trait_data.items()): self.get_sample_r_and_p_values(trait, values) @@ -163,7 +168,8 @@ class CorrelationResults: lit_corr_data = self.do_lit_correlation_for_all_traits() for trait in list(lit_corr_data.keys())[:self.return_number]: - self.get_sample_r_and_p_values(trait, self.target_dataset.trait_data[trait]) + self.get_sample_r_and_p_values( + trait, self.target_dataset.trait_data[trait]) elif self.corr_type == "sample": for trait, values in list(self.target_dataset.trait_data.items()): @@ -180,7 +186,8 @@ class CorrelationResults: range_chr_as_int = order_id for _trait_counter, trait in enumerate(list(self.correlation_data.keys())[:self.return_number]): - trait_object = create_trait(dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False) + trait_object = create_trait( + dataset=self.target_dataset, name=trait, get_qtl_info=True, get_sample_info=False) if not trait_object: continue @@ -235,7 +242,8 @@ class CorrelationResults: if self.corr_type != "tissue" and self.dataset.type == "ProbeSet" and self.target_dataset.type == "ProbeSet": self.do_tissue_correlation_for_trait_list() - self.json_results = generate_corr_json(self.correlation_results, self.this_trait, self.dataset, self.target_dataset) + self.json_results = generate_corr_json( + self.correlation_results, self.this_trait, self.dataset, self.target_dataset) ############################################################################################################################################ @@ -263,15 +271,18 @@ class CorrelationResults: symbol_list=[self.this_trait.symbol]) if self.this_trait.symbol.lower() in primary_trait_tissue_vals_dict: - primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower()] - gene_symbol_list = [trait.symbol for trait in self.correlation_results if trait.symbol] + primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower( + )] + gene_symbol_list = [ + trait.symbol for trait in self.correlation_results if trait.symbol] corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( symbol_list=gene_symbol_list) for trait in self.correlation_results: if trait.symbol and trait.symbol.lower() in corr_result_tissue_vals_dict: - this_trait_tissue_values = corr_result_tissue_vals_dict[trait.symbol.lower()] + this_trait_tissue_values = corr_result_tissue_vals_dict[trait.symbol.lower( + )] result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values, this_trait_tissue_values, @@ -286,7 +297,8 @@ class CorrelationResults: symbol_list=[self.this_trait.symbol]) if self.this_trait.symbol.lower() in primary_trait_tissue_vals_dict: - primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower()] + primary_trait_tissue_values = primary_trait_tissue_vals_dict[self.this_trait.symbol.lower( + )] #print("trait_gene_symbols: ", pf(trait_gene_symbols.values())) corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( @@ -299,7 +311,8 @@ class CorrelationResults: tissue_corr_data = {} for trait, symbol in list(self.trait_symbol_dict.items()): if symbol and symbol.lower() in corr_result_tissue_vals_dict: - this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower()] + this_trait_tissue_values = corr_result_tissue_vals_dict[symbol.lower( + )] result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values, this_trait_tissue_values, @@ -314,12 +327,14 @@ class CorrelationResults: def do_lit_correlation_for_trait_list(self): - input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), self.this_trait.geneid) + input_trait_mouse_gene_id = self.convert_to_mouse_gene_id( + self.dataset.group.species.lower(), self.this_trait.geneid) for trait in self.correlation_results: if trait.geneid: - trait.mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), trait.geneid) + trait.mouse_gene_id = self.convert_to_mouse_gene_id( + self.dataset.group.species.lower(), trait.geneid) else: trait.mouse_gene_id = None @@ -348,11 +363,13 @@ class CorrelationResults: trait.lit_corr = 0 def do_lit_correlation_for_all_traits(self): - input_trait_mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), self.this_trait.geneid) + input_trait_mouse_gene_id = self.convert_to_mouse_gene_id( + self.dataset.group.species.lower(), self.this_trait.geneid) lit_corr_data = {} for trait, gene_id in list(self.trait_geneid_dict.items()): - mouse_gene_id = self.convert_to_mouse_gene_id(self.dataset.group.species.lower(), gene_id) + mouse_gene_id = self.convert_to_mouse_gene_id( + self.dataset.group.species.lower(), gene_id) if mouse_gene_id and str(mouse_gene_id).find(";") == -1: #print("gene_symbols:", input_trait_mouse_gene_id + " / " + mouse_gene_id) @@ -438,21 +455,26 @@ class CorrelationResults: self.this_trait_vals.append(sample_value) target_vals.append(target_sample_value) - self.this_trait_vals, target_vals, num_overlap = corr_result_helpers.normalize_values(self.this_trait_vals, target_vals) + self.this_trait_vals, target_vals, num_overlap = corr_result_helpers.normalize_values( + self.this_trait_vals, target_vals) if num_overlap > 5: # ZS: 2015 could add biweight correlation, see http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3465711/ if self.corr_method == 'bicor': - sample_r, sample_p = do_bicor(self.this_trait_vals, target_vals) + sample_r, sample_p = do_bicor( + self.this_trait_vals, target_vals) elif self.corr_method == 'pearson': - sample_r, sample_p = scipy.stats.pearsonr(self.this_trait_vals, target_vals) + sample_r, sample_p = scipy.stats.pearsonr( + self.this_trait_vals, target_vals) else: - sample_r, sample_p = scipy.stats.spearmanr(self.this_trait_vals, target_vals) + sample_r, sample_p = scipy.stats.spearmanr( + self.this_trait_vals, target_vals) if numpy.isnan(sample_r): pass else: - self.correlation_data[trait] = [sample_r, sample_p, num_overlap] + self.correlation_data[trait] = [ + sample_r, sample_p, num_overlap] def process_samples(self, start_vars, sample_names, excluded_samples=None): if not excluded_samples: @@ -478,7 +500,8 @@ def do_bicor(this_trait_vals, target_trait_vals): this_vals = ro.Vector(this_trait_vals) target_vals = ro.Vector(target_trait_vals) - the_r, the_p, _fisher_transform, _the_t, _n_obs = [numpy.asarray(x) for x in r_bicor(x=this_vals, y=target_vals)] + the_r, the_p, _fisher_transform, _the_t, _n_obs = [ + numpy.asarray(x) for x in r_bicor(x=this_vals, y=target_vals)] return the_r, the_p @@ -492,7 +515,8 @@ def generate_corr_json(corr_results, this_trait, dataset, target_dataset, for_ap results_dict['index'] = i + 1 results_dict['trait_id'] = trait.name results_dict['dataset'] = trait.dataset.name - results_dict['hmac'] = hmac.data_hmac('{}:{}'.format(trait.name, trait.dataset.name)) + results_dict['hmac'] = hmac.data_hmac( + '{}:{}'.format(trait.name, trait.dataset.name)) if target_dataset.type == "ProbeSet": results_dict['symbol'] = trait.symbol results_dict['description'] = "N/A" @@ -543,7 +567,8 @@ def generate_corr_json(corr_results, this_trait, dataset, target_dataset, for_ap if bool(trait.authors): authors_list = trait.authors.split(',') if len(authors_list) > 6: - results_dict['authors_display'] = ", ".join(authors_list[:6]) + ", et al." + results_dict['authors_display'] = ", ".join( + authors_list[:6]) + ", et al." else: results_dict['authors_display'] = trait.authors if bool(trait.pubmed_id): diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index 331cb1dc..aefb4453 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -44,7 +44,8 @@ THIRTY_DAYS = 60 * 60 * 24 * 30 class CorrelationMatrix: def __init__(self, start_vars): - trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] + trait_db_list = [trait.strip() + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) @@ -52,7 +53,8 @@ class CorrelationMatrix: self.traits = [] self.insufficient_shared_samples = False self.do_PCA = True - this_group = self.trait_list[0][1].group.name # ZS: Getting initial group name before verifying all traits are in the same group in the following loop + # ZS: Getting initial group name before verifying all traits are in the same group in the following loop + this_group = self.trait_list[0][1].group.name for trait_db in self.trait_list: this_group = trait_db[1].group.name this_trait = trait_db[0] @@ -76,10 +78,12 @@ class CorrelationMatrix: this_trait_vals.append('') self.sample_data.append(this_trait_vals) - if len(this_trait_vals) < len(self.trait_list): # Shouldn't do PCA if there are more traits than observations/samples + # Shouldn't do PCA if there are more traits than observations/samples + if len(this_trait_vals) < len(self.trait_list): self.do_PCA = False - self.lowest_overlap = 8 # ZS: Variable set to the lowest overlapping samples in order to notify user, or 8, whichever is lower (since 8 is when we want to display warning) + # ZS: Variable set to the lowest overlapping samples in order to notify user, or 8, whichever is lower (since 8 is when we want to display warning) + self.lowest_overlap = 8 self.corr_results = [] self.pca_corr_results = [] @@ -112,7 +116,8 @@ class CorrelationMatrix: if sample in self.shared_samples_list: self.shared_samples_list.remove(sample) - this_trait_vals, target_vals, num_overlap = corr_result_helpers.normalize_values(this_trait_vals, target_vals) + this_trait_vals, target_vals, num_overlap = corr_result_helpers.normalize_values( + this_trait_vals, target_vals) if num_overlap < self.lowest_overlap: self.lowest_overlap = num_overlap @@ -120,21 +125,25 @@ class CorrelationMatrix: corr_result_row.append([target_trait, 0, num_overlap]) pca_corr_result_row.append(0) else: - pearson_r, pearson_p = scipy.stats.pearsonr(this_trait_vals, target_vals) + pearson_r, pearson_p = scipy.stats.pearsonr( + this_trait_vals, target_vals) if is_spearman == False: sample_r, sample_p = pearson_r, pearson_p if sample_r == 1: is_spearman = True else: - sample_r, sample_p = scipy.stats.spearmanr(this_trait_vals, target_vals) + sample_r, sample_p = scipy.stats.spearmanr( + this_trait_vals, target_vals) - corr_result_row.append([target_trait, sample_r, num_overlap]) + corr_result_row.append( + [target_trait, sample_r, num_overlap]) pca_corr_result_row.append(pearson_r) self.corr_results.append(corr_result_row) self.pca_corr_results.append(pca_corr_result_row) - self.export_filename, self.export_filepath = export_corr_matrix(self.corr_results) + self.export_filename, self.export_filepath = export_corr_matrix( + self.corr_results) self.trait_data_array = [] for trait_db in self.trait_list: @@ -156,12 +165,14 @@ class CorrelationMatrix: try: corr_result_eigen = np.linalg.eig(np.array(self.pca_corr_results)) - corr_eigen_value, corr_eigen_vectors = sortEigenVectors(corr_result_eigen) + corr_eigen_value, corr_eigen_vectors = sortEigenVectors( + corr_result_eigen) if self.do_PCA == True: self.pca_works = "True" self.pca_trait_ids = [] - pca = self.calculate_pca(list(range(len(self.traits))), corr_eigen_value, corr_eigen_vectors) + pca = self.calculate_pca( + list(range(len(self.traits))), corr_eigen_value, corr_eigen_vectors) self.loadings_array = self.process_loadings() else: self.pca_works = "False" @@ -179,7 +190,8 @@ class CorrelationMatrix: base = importr('base') stats = importr('stats') - corr_results_to_list = robjects.FloatVector([item for sublist in self.pca_corr_results for item in sublist]) + corr_results_to_list = robjects.FloatVector( + [item for sublist in self.pca_corr_results for item in sublist]) m = robjects.r.matrix(corr_results_to_list, nrow=len(cols)) eigen = base.eigen(m) @@ -198,10 +210,12 @@ class CorrelationMatrix: pca_traits.append((vector * -1.0).tolist()) this_group_name = self.trait_list[0][1].group.name - temp_dataset = data_set.create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = this_group_name) + temp_dataset = data_set.create_dataset( + dataset_name="Temp", dataset_type="Temp", group_name = this_group_name) temp_dataset.group.get_samplelist() for i, pca_trait in enumerate(pca_traits): - trait_id = "PCA" + str(i + 1) + "_" + temp_dataset.group.species + "_" + this_group_name + "_" + datetime.datetime.now().strftime("%m%d%H%M%S") + trait_id = "PCA" + str(i + 1) + "_" + temp_dataset.group.species + "_" + \ + this_group_name + "_" + datetime.datetime.now().strftime("%m%d%H%M%S") this_vals_string = "" position = 0 for sample in temp_dataset.group.all_samples_ordered(): @@ -235,17 +249,23 @@ class CorrelationMatrix: def export_corr_matrix(corr_results): - corr_matrix_filename = "corr_matrix_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) - matrix_export_path = "{}{}.csv".format(GENERATED_TEXT_DIR, corr_matrix_filename) + corr_matrix_filename = "corr_matrix_" + \ + ''.join(random.choice(string.ascii_uppercase + string.digits) + for _ in range(6)) + matrix_export_path = "{}{}.csv".format( + GENERATED_TEXT_DIR, corr_matrix_filename) with open(matrix_export_path, "w+") as output_file: - output_file.write("Time/Date: " + datetime.datetime.now().strftime("%x / %X") + "\n") + output_file.write( + "Time/Date: " + datetime.datetime.now().strftime("%x / %X") + "\n") output_file.write("\n") output_file.write("Correlation ") for i, item in enumerate(corr_results[0]): - output_file.write("Trait" + str(i + 1) + ": " + str(item[0].dataset.name) + "::" + str(item[0].name) + "\t") + output_file.write("Trait" + str(i + 1) + ": " + \ + str(item[0].dataset.name) + "::" + str(item[0].name) + "\t") output_file.write("\n") for i, row in enumerate(corr_results): - output_file.write("Trait" + str(i + 1) + ": " + str(row[0][0].dataset.name) + "::" + str(row[0][0].name) + "\t") + output_file.write("Trait" + str(i + 1) + ": " + \ + str(row[0][0].dataset.name) + "::" + str(row[0][0].name) + "\t") for item in row: output_file.write(str(item[1]) + "\t") output_file.write("\n") @@ -254,10 +274,12 @@ def export_corr_matrix(corr_results): output_file.write("\n") output_file.write("N ") for i, item in enumerate(corr_results[0]): - output_file.write("Trait" + str(i) + ": " + str(item[0].dataset.name) + "::" + str(item[0].name) + "\t") + output_file.write("Trait" + str(i) + ": " + \ + str(item[0].dataset.name) + "::" + str(item[0].name) + "\t") output_file.write("\n") for i, row in enumerate(corr_results): - output_file.write("Trait" + str(i) + ": " + str(row[0][0].dataset.name) + "::" + str(row[0][0].name) + "\t") + output_file.write("Trait" + str(i) + ": " + \ + str(row[0][0].dataset.name) + "::" + str(row[0][0].name) + "\t") for item in row: output_file.write(str(item[2]) + "\t") output_file.write("\n") diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index ec66e59f..48a82435 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -46,13 +46,18 @@ class CTL: #log = r_file("/tmp/genenetwork_ctl.log", open = "wt") # r_sink(log) # Uncomment the r_sink() commands to log output from stdout/stderr to a file #r_sink(log, type = "message") - r_library("ctl") # Load CTL - Should only be done once, since it is quite expensive + # Load CTL - Should only be done once, since it is quite expensive + r_library("ctl") r_options(stringsAsFactors=False) logger.info("Initialization of CTL done, package loaded in R session") - self.r_CTLscan = ro.r["CTLscan"] # Map the CTLscan function - self.r_CTLsignificant = ro.r["CTLsignificant"] # Map the CTLsignificant function - self.r_lineplot = ro.r["ctl.lineplot"] # Map the ctl.lineplot function - self.r_plotCTLobject = ro.r["plot.CTLobject"] # Map the CTLsignificant function + # Map the CTLscan function + self.r_CTLscan = ro.r["CTLscan"] + # Map the CTLsignificant function + self.r_CTLsignificant = ro.r["CTLsignificant"] + # Map the ctl.lineplot function + self.r_lineplot = ro.r["ctl.lineplot"] + # Map the CTLsignificant function + self.r_plotCTLobject = ro.r["plot.CTLobject"] self.nodes_list = [] self.edges_list = [] logger.info("Obtained pointers to CTL functions") @@ -81,7 +86,8 @@ class CTL: def run_analysis(self, requestform): logger.info("Starting CTL analysis on dataset") - self.trait_db_list = [trait.strip() for trait in requestform['trait_list'].split(',')] + self.trait_db_list = [trait.strip() + for trait in requestform['trait_list'].split(',')] self.trait_db_list = [x for x in self.trait_db_list if x] logger.debug("strategy:", requestform.get("strategy")) @@ -113,9 +119,11 @@ class CTL: markers.append(marker["genotypes"]) genotypes = list(itertools.chain(*markers)) - logger.debug(len(genotypes) / len(individuals), "==", len(parser.markers)) + logger.debug(len(genotypes) / len(individuals), + "==", len(parser.markers)) - rGeno = r_t(ro.r.matrix(r_unlist(genotypes), nrow=len(markernames), ncol=len(individuals), dimnames=r_list(markernames, individuals), byrow=True)) + rGeno = r_t(ro.r.matrix(r_unlist(genotypes), nrow=len(markernames), ncol=len( + individuals), dimnames=r_list(markernames, individuals), byrow=True)) # Create a phenotype matrix traits = [] @@ -131,7 +139,8 @@ class CTL: else: traits.append("-999") - rPheno = r_t(ro.r.matrix(r_as_numeric(r_unlist(traits)), nrow=len(self.trait_db_list), ncol=len(individuals), dimnames=r_list(self.trait_db_list, individuals), byrow=True)) + rPheno = r_t(ro.r.matrix(r_as_numeric(r_unlist(traits)), nrow=len(self.trait_db_list), ncol=len( + individuals), dimnames=r_list(self.trait_db_list, individuals), byrow=True)) logger.debug(rPheno) @@ -144,7 +153,8 @@ class CTL: #r_write_table(rPheno, "~/outputGN/pheno.csv") # Perform the CTL scan - res = self.r_CTLscan(rGeno, rPheno, strategy=strategy, nperm=nperm, parametric = parametric, nthreads=6) + res = self.r_CTLscan(rGeno, rPheno, strategy=strategy, + nperm=nperm, parametric = parametric, nthreads=6) # Get significant interactions significant = self.r_CTLsignificant(res, significance=significance) @@ -155,20 +165,27 @@ class CTL: self.results['imgloc1'] = GENERATED_IMAGE_DIR + self.results['imgurl1'] self.results['ctlresult'] = significant - self.results['requestform'] = requestform # Store the user specified parameters for the output page + # Store the user specified parameters for the output page + self.results['requestform'] = requestform # Create the lineplot - r_png(self.results['imgloc1'], width=1000, height=600, type='cairo-png') + r_png(self.results['imgloc1'], width=1000, + height=600, type='cairo-png') self.r_lineplot(res, significance=significance) r_dev_off() - n = 2 # We start from 2, since R starts from 1 :) + # We start from 2, since R starts from 1 :) + n = 2 for trait in self.trait_db_list: # Create the QTL like CTL plots - self.results['imgurl' + str(n)] = webqtlUtil.genRandStr("CTL_") + ".png" - self.results['imgloc' + str(n)] = GENERATED_IMAGE_DIR + self.results['imgurl' + str(n)] - r_png(self.results['imgloc' + str(n)], width=1000, height=600, type='cairo-png') - self.r_plotCTLobject(res, (n - 1), significance=significance, main='Phenotype ' + trait) + self.results['imgurl' + \ + str(n)] = webqtlUtil.genRandStr("CTL_") + ".png" + self.results['imgloc' + str(n)] = GENERATED_IMAGE_DIR + \ + self.results['imgurl' + str(n)] + r_png(self.results['imgloc' + str(n)], + width=1000, height=600, type='cairo-png') + self.r_plotCTLobject( + res, (n - 1), significance=significance, main='Phenotype ' + trait) r_dev_off() n = n + 1 @@ -178,17 +195,24 @@ class CTL: # Create the interactive graph for cytoscape visualization (Nodes and Edges) if not isinstance(significant, ri.RNULLType): for x in range(len(significant[0])): - logger.debug(significant[0][x], significant[1][x], significant[2][x]) # Debug to console - tsS = significant[0][x].split(':') # Source - tsT = significant[2][x].split(':') # Target - gtS = create_trait(name=tsS[0], dataset_name=tsS[1]) # Retrieve Source info from the DB - gtT = create_trait(name=tsT[0], dataset_name=tsT[1]) # Retrieve Target info from the DB + logger.debug(significant[0][x], significant[1] + [x], significant[2][x]) # Debug to console + # Source + tsS = significant[0][x].split(':') + # Target + tsT = significant[2][x].split(':') + # Retrieve Source info from the DB + gtS = create_trait(name=tsS[0], dataset_name=tsS[1]) + # Retrieve Target info from the DB + gtT = create_trait(name=tsT[0], dataset_name=tsT[1]) self.addNode(gtS) self.addNode(gtT) self.addEdge(gtS, gtT, significant, x) - significant[0][x] = "{} ({})".format(gtS.symbol, gtS.name) # Update the trait name for the displayed table - significant[2][x] = "{} ({})".format(gtT.symbol, gtT.name) # Update the trait name for the displayed table + # Update the trait name for the displayed table + significant[0][x] = "{} ({})".format(gtS.symbol, gtS.name) + # Update the trait name for the displayed table + significant[2][x] = "{} ({})".format(gtT.symbol, gtT.name) self.elements = json.dumps(self.nodes_list + self.edges_list) diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index 7442dc72..e50ff50b 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -26,14 +26,16 @@ class DoSearch: def __init__(self, search_term, search_operator=None, dataset=None, search_type=None): self.search_term = search_term # Make sure search_operator is something we expect - assert search_operator in (None, "=", "<", ">", "<=", ">="), "Bad search operator" + assert search_operator in ( + None, "=", "<", ">", "<=", ">="), "Bad search operator" self.search_operator = search_operator self.dataset = dataset self.search_type = search_type if self.dataset: # Get group information for dataset and the species id - self.species_id = webqtlDatabaseFunction.retrieve_species_id(self.dataset.group.name) + self.species_id = webqtlDatabaseFunction.retrieve_species_id( + self.dataset.group.name) def execute(self, query): """Executes query and returns results""" @@ -104,7 +106,8 @@ class MrnaAssaySearch(DoSearch): search_string = escape(self.search_term[0]) if self.search_term[0] != "*": - match_clause = """((MATCH (ProbeSet.symbol) AGAINST ('%s' IN BOOLEAN MODE))) and """ % (search_string) + match_clause = """((MATCH (ProbeSet.symbol) AGAINST ('%s' IN BOOLEAN MODE))) and """ % ( + search_string) else: match_clause = "" @@ -223,16 +226,19 @@ class PhenotypeSearch(DoSearch): # and comment here # if "'" not in self.search_term[0]: - search_term = "[[:<:]]" + self.handle_wildcard(self.search_term[0]) + "[[:>:]]" + search_term = "[[:<:]]" + \ + self.handle_wildcard(self.search_term[0]) + "[[:>:]]" if "_" in self.search_term[0]: if len(self.search_term[0].split("_")[0]) == 3: - search_term = "[[:<:]]" + self.handle_wildcard(self.search_term[0].split("_")[1]) + "[[:>:]]" + search_term = "[[:<:]]" + self.handle_wildcard( + self.search_term[0].split("_")[1]) + "[[:>:]]" # This adds a clause to the query that matches the search term # against each field in the search_fields tuple where_clause_list = [] for field in self.search_fields: - where_clause_list.append('''%s REGEXP "%s"''' % (field, search_term)) + where_clause_list.append('''%s REGEXP "%s"''' % + (field, search_term)) where_clause = "(%s) " % ' OR '.join(where_clause_list) return where_clause @@ -364,7 +370,8 @@ class GenotypeSearch(DoSearch): if self.search_term[0] == "*": self.query = self.compile_final_query() else: - self.query = self.compile_final_query(where_clause=self.get_where_clause()) + self.query = self.compile_final_query( + where_clause=self.get_where_clause()) return self.execute(self.query) @@ -497,7 +504,8 @@ class LrsSearch(DoSearch): where_clause = """ %sXRef.LRS > %s and %sXRef.LRS < %s """ % self.mescape(self.dataset.type, - min(lrs_min, lrs_max), + min(lrs_min, + lrs_max), self.dataset.type, max(lrs_min, lrs_max)) @@ -537,7 +545,8 @@ class LrsSearch(DoSearch): self.from_clause = self.get_from_clause() self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(self.from_clause, self.where_clause) + self.query = self.compile_final_query( + self.from_clause, self.where_clause) return self.execute(self.query) @@ -551,7 +560,8 @@ class MrnaLrsSearch(LrsSearch, MrnaAssaySearch): self.from_clause = self.get_from_clause() self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(from_clause=self.from_clause, where_clause=self.where_clause) + self.query = self.compile_final_query( + from_clause=self.from_clause, where_clause=self.where_clause) return self.execute(self.query) @@ -566,7 +576,8 @@ class PhenotypeLrsSearch(LrsSearch, PhenotypeSearch): self.from_clause = self.get_from_clause() self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(from_clause=self.from_clause, where_clause=self.where_clause) + self.query = self.compile_final_query( + from_clause=self.from_clause, where_clause=self.where_clause) return self.execute(self.query) @@ -593,7 +604,8 @@ class CisTransLrsSearch(DoSearch): elif len(self.search_term) == 3: lrs_min, lrs_max, self.mb_buffer = self.search_term elif len(self.search_term) == 4: - lrs_min, lrs_max, self.mb_buffer = [float(value) for value in self.search_term[:3]] + lrs_min, lrs_max, self.mb_buffer = [ + float(value) for value in self.search_term[:3]] chromosome = self.search_term[3] if "Chr" in chromosome or "chr" in chromosome: chromosome = int(chromosome[3:]) @@ -636,14 +648,19 @@ class CisTransLrsSearch(DoSearch): if chromosome: location_clause = "(%s.Chr = '%s' and %s.Chr = Geno.Chr and ABS(%s.Mb-Geno.Mb) %s %s) or (%s.Chr != Geno.Chr and Geno.Chr = '%s')" % (escape(self.dataset.type), chromosome, - escape(self.dataset.type), - escape(self.dataset.type), + escape( + self.dataset.type), + escape( + self.dataset.type), the_operator, - escape(str(self.mb_buffer)), - escape(self.dataset.type), + escape( + str(self.mb_buffer)), + escape( + self.dataset.type), chromosome) else: - location_clause = "(ABS(%s.Mb-Geno.Mb) %s %s and %s.Chr = Geno.Chr) or (%s.Chr != Geno.Chr)" % (escape(self.dataset.type), the_operator, escape(str(self.mb_buffer)), escape(self.dataset.type), escape(self.dataset.type)) + location_clause = "(ABS(%s.Mb-Geno.Mb) %s %s and %s.Chr = Geno.Chr) or (%s.Chr != Geno.Chr)" % (escape( + self.dataset.type), the_operator, escape(str(self.mb_buffer)), escape(self.dataset.type), escape(self.dataset.type)) where_clause = sub_clause + """ %sXRef.Locus = Geno.name and Geno.SpeciesId = %s and @@ -683,7 +700,8 @@ class CisLrsSearch(CisTransLrsSearch, MrnaAssaySearch): self.from_clause = self.get_from_clause() self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(self.from_clause, self.where_clause) + self.query = self.compile_final_query( + self.from_clause, self.where_clause) return self.execute(self.query) @@ -714,7 +732,8 @@ class TransLrsSearch(CisTransLrsSearch, MrnaAssaySearch): self.from_clause = self.get_from_clause() self.where_clause = self.get_where_clause() - self.query = self.compile_final_query(self.from_clause, self.where_clause) + self.query = self.compile_final_query( + self.from_clause, self.where_clause) return self.execute(self.query) @@ -733,7 +752,8 @@ class MeanSearch(MrnaAssaySearch): where_clause = """ %sXRef.mean > %s and %sXRef.mean < %s """ % self.mescape(self.dataset.type, - min(self.mean_min, self.mean_max), + min(self.mean_min, + self.mean_max), self.dataset.type, max(self.mean_min, self.mean_max)) else: @@ -796,7 +816,8 @@ class PositionSearch(DoSearch): DoSearch.search_types[search_key] = "PositionSearch" def get_where_clause(self): - self.search_term = [float(value) if is_number(value) else value for value in self.search_term] + self.search_term = [float(value) if is_number( + value) else value for value in self.search_term] chr, self.mb_min, self.mb_max = self.search_term[:3] self.chr = str(chr).lower() self.get_chr() @@ -806,7 +827,8 @@ class PositionSearch(DoSearch): %s.Mb < %s """ % self.mescape(self.dataset.type, self.chr, self.dataset.type, - min(self.mb_min, self.mb_max), + min(self.mb_min, + self.mb_max), self.dataset.type, max(self.mb_min, self.mb_max)) @@ -923,7 +945,8 @@ def get_aliases(symbol, species): return [] filtered_aliases = [] - response = requests.get(GN2_BASE_URL + "/gn3/gene/aliases/" + symbol_string) + response = requests.get( + GN2_BASE_URL + "/gn3/gene/aliases/" + symbol_string) if response: alias_list = json.loads(response.content) diff --git a/wqflask/wqflask/docs.py b/wqflask/wqflask/docs.py index 81424b9c..fc93248a 100644 --- a/wqflask/wqflask/docs.py +++ b/wqflask/wqflask/docs.py @@ -35,11 +35,13 @@ class Docs: def update_text(start_vars): content = start_vars['ckcontent'] - content = content.replace('%', '%%').replace('"', '\\"').replace("'", "\\'") + content = content.replace('%', '%%').replace( + '"', '\\"').replace("'", "\\'") try: if g.user_session.record['user_email_address'] == "zachary.a.sloan@gmail.com" or g.user_session.record['user_email_address'] == "labwilliams@gmail.com": - sql = "UPDATE Docs SET content='{0}' WHERE entry='{1}';".format(content, start_vars['entry_type']) + sql = "UPDATE Docs SET content='{0}' WHERE entry='{1}';".format( + content, start_vars['entry_type']) g.db.execute(sql) except: pass diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py index d0745ef7..5bd54f9d 100644 --- a/wqflask/wqflask/export_traits.py +++ b/wqflask/wqflask/export_traits.py @@ -35,9 +35,12 @@ def export_search_results_csv(targs): metadata.append(["Data Set: " + targs['database_name']]) if 'accession_id' in targs: if targs['accession_id'] != "None": - metadata.append(["Metadata Link: http://genenetwork.org/webqtl/main.py?FormID=sharinginfo&GN_AccessionId=" + targs['accession_id']]) - metadata.append(["Export Date: " + datetime.datetime.now().strftime("%B %d, %Y")]) - metadata.append(["Export Time: " + datetime.datetime.now().strftime("%H:%M GMT")]) + metadata.append( + ["Metadata Link: http://genenetwork.org/webqtl/main.py?FormID=sharinginfo&GN_AccessionId=" + targs['accession_id']]) + metadata.append( + ["Export Date: " + datetime.datetime.now().strftime("%B %d, %Y")]) + metadata.append( + ["Export Time: " + datetime.datetime.now().strftime("%H:%M GMT")]) if 'search_string' in targs: if targs['search_string'] != "None": metadata.append(["Search Query: " + targs['search_string']]) @@ -52,10 +55,12 @@ def export_search_results_csv(targs): for trait in table_rows: trait_name, dataset_name, _hash = trait.split(":") trait_ob = create_trait(name=trait_name, dataset_name=dataset_name) - trait_ob = retrieve_trait_info(trait_ob, trait_ob.dataset, get_qtl_info=True) + trait_ob = retrieve_trait_info( + trait_ob, trait_ob.dataset, get_qtl_info=True) trait_list.append(trait_ob) - table_headers = ['Index', 'URL', 'Species', 'Group', 'Dataset', 'Record ID', 'Symbol', 'Description', 'ProbeTarget', 'PubMed_ID', 'Chr', 'Mb', 'Alias', 'Gene_ID', 'Homologene_ID', 'UniGene_ID', 'Strand_Probe', 'Probe_set_specificity', 'Probe_set_BLAT_score', 'Probe_set_BLAT_Mb_start', 'Probe_set_BLAT_Mb_end', 'QTL_Chr', 'QTL_Mb', 'Locus_at_Peak', 'Max_LRS', 'P_value_of_MAX', 'Mean_Expression'] + table_headers = ['Index', 'URL', 'Species', 'Group', 'Dataset', 'Record ID', 'Symbol', 'Description', 'ProbeTarget', 'PubMed_ID', 'Chr', 'Mb', 'Alias', 'Gene_ID', 'Homologene_ID', 'UniGene_ID', + 'Strand_Probe', 'Probe_set_specificity', 'Probe_set_BLAT_score', 'Probe_set_BLAT_Mb_start', 'Probe_set_BLAT_Mb_end', 'QTL_Chr', 'QTL_Mb', 'Locus_at_Peak', 'Max_LRS', 'P_value_of_MAX', 'Mean_Expression'] traits_by_group = sort_traits_by_group(trait_list) @@ -87,7 +92,8 @@ def export_search_results_csv(targs): trait_symbol = "N/A" row_contents = [ i + 1, - "https://genenetwork.org/show_trait?trait_id=" + str(trait.name) + "&dataset=" + str(trait.dataset.name), + "https://genenetwork.org/show_trait?trait_id=" + \ + str(trait.name) + "&dataset=" + str(trait.dataset.name), trait.dataset.group.species, trait.dataset.group.name, trait.dataset.name, @@ -117,13 +123,15 @@ def export_search_results_csv(targs): for sample in trait.dataset.group.samplelist: if sample in trait.data: - row_contents += [trait.data[sample].value, trait.data[sample].variance] + row_contents += [trait.data[sample].value, + trait.data[sample].variance] else: row_contents += ["x", "x"] csv_rows.append(row_contents) - csv_rows = list(map(list, itertools.zip_longest(*[row for row in csv_rows]))) + csv_rows = list( + map(list, itertools.zip_longest(*[row for row in csv_rows]))) writer.writerows(csv_rows) csv_data = buff.getvalue() buff.close() diff --git a/wqflask/wqflask/external_tools/send_to_bnw.py b/wqflask/wqflask/external_tools/send_to_bnw.py index 1556c6a0..3c0f2ca7 100644 --- a/wqflask/wqflask/external_tools/send_to_bnw.py +++ b/wqflask/wqflask/external_tools/send_to_bnw.py @@ -27,7 +27,8 @@ logger = utility.logger.getLogger(__name__) class SendToBNW: def __init__(self, start_vars): - trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] + trait_db_list = [trait.strip() + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) trait_samples_list = [] @@ -39,7 +40,8 @@ class SendToBNW: trait1_samples = list(this_sample_data.keys()) trait_samples_list.append(trait1_samples) - shared_samples = list(set(trait_samples_list[0]).intersection(*trait_samples_list)) + shared_samples = list( + set(trait_samples_list[0]).intersection(*trait_samples_list)) self.form_value = "" # ZS: string that is passed to BNW through form values_list = [] diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py index c55c43e6..8af9bee9 100644 --- a/wqflask/wqflask/external_tools/send_to_geneweaver.py +++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py @@ -32,7 +32,8 @@ logger = utility.logger.getLogger(__name__) class SendToGeneWeaver: def __init__(self, start_vars): - trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] + trait_db_list = [trait.strip() + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) self.chip_name = test_chip(self.trait_list) diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py index 6b78725c..fd12562f 100644 --- a/wqflask/wqflask/external_tools/send_to_webgestalt.py +++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py @@ -32,7 +32,8 @@ logger = utility.logger.getLogger(__name__) class SendToWebGestalt: def __init__(self, start_vars): - trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] + trait_db_list = [trait.strip() + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) self.chip_name = test_chip(self.trait_list) diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py index 69ce3e7a..55a7da0e 100644 --- a/wqflask/wqflask/group_manager.py +++ b/wqflask/wqflask/group_manager.py @@ -77,8 +77,10 @@ def remove_users(): admin_ids_to_remove = request.form['selected_admin_ids'] member_ids_to_remove = request.form['selected_member_ids'] - remove_users_from_group(g.user_session.user_id, admin_ids_to_remove.split(":"), group_id, user_type="admins") - remove_users_from_group(g.user_session.user_id, member_ids_to_remove.split(":"), group_id, user_type="members") + remove_users_from_group(g.user_session.user_id, admin_ids_to_remove.split( + ":"), group_id, user_type="admins") + remove_users_from_group(g.user_session.user_id, member_ids_to_remove.split( + ":"), group_id, user_type="members") return redirect(url_for('view_group', id=group_id)) @@ -88,10 +90,12 @@ def add_users(user_type='members'): group_id = request.form['group_id'] if user_type == "admins": user_emails = request.form['admin_emails_to_add'].split(",") - add_users_to_group(g.user_session.user_id, group_id, user_emails, admins=True) + add_users_to_group(g.user_session.user_id, group_id, + user_emails, admins=True) elif user_type == "members": user_emails = request.form['member_emails_to_add'].split(",") - add_users_to_group(g.user_session.user_id, group_id, user_emails, admins=False) + add_users_to_group(g.user_session.user_id, group_id, + user_emails, admins=False) return redirect(url_for('view_group', id=group_id)) @@ -111,7 +115,8 @@ def add_or_edit_group(): if "group_name" in params: member_user_ids = set() admin_user_ids = set() - admin_user_ids.add(g.user_session.user_id) # ZS: Always add the user creating the group as an admin + # ZS: Always add the user creating the group as an admin + admin_user_ids.add(g.user_session.user_id) if "admin_emails_to_add" in params: admin_emails = params['admin_emails_to_add'].split(",") for email in admin_emails: @@ -127,7 +132,8 @@ def add_or_edit_group(): member_user_ids.add(user_details['user_id']) #send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members") - create_group(list(admin_user_ids), list(member_user_ids), params['group_name']) + create_group(list(admin_user_ids), list( + member_user_ids), params['group_name']) return redirect(url_for('manage_groups')) else: return render_template("admin/create_group.html") @@ -149,9 +155,11 @@ def send_group_invites(group_id, user_email_list=[], user_type="members"): ((user_type == "members") and (user_details['user_id'] in group_info['members'])): continue else: - send_verification_email(user_details, template_name="email/group_verification.txt", key_prefix="verification_code", subject = "You've been invited to join a GeneNetwork user group") + send_verification_email(user_details, template_name="email/group_verification.txt", + key_prefix="verification_code", subject = "You've been invited to join a GeneNetwork user group") else: - temp_password = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + temp_password = ''.join(random.choice( + string.ascii_uppercase + string.digits) for _ in range(6)) user_details = { 'user_id': str(uuid.uuid4()), 'email_address': user_email, diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py index a21dae84..9548d130 100644 --- a/wqflask/wqflask/gsearch.py +++ b/wqflask/wqflask/gsearch.py @@ -77,18 +77,21 @@ class GSearch: this_trait['name'] = line[5] this_trait['dataset'] = line[3] this_trait['dataset_fullname'] = line[4] - this_trait['hmac'] = hmac.data_hmac('{}:{}'.format(line[5], line[3])) + this_trait['hmac'] = hmac.data_hmac( + '{}:{}'.format(line[5], line[3])) this_trait['species'] = line[0] this_trait['group'] = line[1] this_trait['tissue'] = line[2] this_trait['symbol'] = line[6] if line[7]: - this_trait['description'] = line[7].decode('utf-8', 'replace') + this_trait['description'] = line[7].decode( + 'utf-8', 'replace') else: this_trait['description'] = "N/A" this_trait['location_repr'] = 'N/A' if (line[8] != "NULL" and line[8] != "") and (line[9] != 0): - this_trait['location_repr'] = 'Chr%s: %.6f' % (line[8], float(line[9])) + this_trait['location_repr'] = 'Chr%s: %.6f' % ( + line[8], float(line[9])) try: this_trait['mean'] = '%.3f' % line[10] except: @@ -103,7 +106,8 @@ class GSearch: this_trait['locus_chr'] = line[16] this_trait['locus_mb'] = line[17] - dataset_ob = SimpleNamespace(id=this_trait["dataset_id"], type="ProbeSet", species=this_trait["species"]) + dataset_ob = SimpleNamespace( + id=this_trait["dataset_id"], type="ProbeSet", species=this_trait["species"]) if dataset_ob.id not in dataset_to_permissions: permissions = check_resource_availability(dataset_ob) dataset_to_permissions[dataset_ob.id] = permissions @@ -118,7 +122,9 @@ class GSearch: max_lrs_text = "N/A" if this_trait['locus_chr'] != None and this_trait['locus_mb'] != None: - max_lrs_text = "Chr" + str(this_trait['locus_chr']) + ": " + str(this_trait['locus_mb']) + max_lrs_text = "Chr" + \ + str(this_trait['locus_chr']) + \ + ": " + str(this_trait['locus_mb']) this_trait['max_lrs_text'] = max_lrs_text trait_list.append(this_trait) @@ -146,7 +152,8 @@ class GSearch: if "_" in self.terms: if len(self.terms.split("_")[0]) == 3: search_term = self.terms.split("_")[1] - group_clause = "AND InbredSet.`InbredSetCode` = '{}'".format(self.terms.split("_")[0]) + group_clause = "AND InbredSet.`InbredSetCode` = '{}'".format( + self.terms.split("_")[0]) sql = """ SELECT Species.`Name`, @@ -192,18 +199,22 @@ class GSearch: this_trait['index'] = i + 1 this_trait['name'] = str(line[4]) if len(str(line[12])) == 3: - this_trait['display_name'] = str(line[12]) + "_" + this_trait['name'] + this_trait['display_name'] = str( + line[12]) + "_" + this_trait['name'] else: this_trait['display_name'] = this_trait['name'] this_trait['dataset'] = line[2] this_trait['dataset_fullname'] = line[3] - this_trait['hmac'] = hmac.data_hmac('{}:{}'.format(line[4], line[2])) + this_trait['hmac'] = hmac.data_hmac( + '{}:{}'.format(line[4], line[2])) this_trait['species'] = line[0] this_trait['group'] = line[1] if line[9] != None and line[6] != None: - this_trait['description'] = line[6].decode('utf-8', 'replace') + this_trait['description'] = line[6].decode( + 'utf-8', 'replace') elif line[5] != None: - this_trait['description'] = line[5].decode('utf-8', 'replace') + this_trait['description'] = line[5].decode( + 'utf-8', 'replace') else: this_trait['description'] = "N/A" if line[13] != None and line[13] != "": @@ -221,7 +232,8 @@ class GSearch: else: this_trait['pubmed_link'] = "N/A" if line[12]: - this_trait['display_name'] = line[12] + "_" + str(this_trait['name']) + this_trait['display_name'] = line[12] + \ + "_" + str(this_trait['name']) this_trait['LRS_score_repr'] = "N/A" if line[10] != "" and line[10] != None: this_trait['LRS_score_repr'] = '%3.1f' % line[10] @@ -230,13 +242,16 @@ class GSearch: this_trait['additive'] = '%.3f' % line[11] this_trait['max_lrs_text'] = "N/A" - trait_ob = create_trait(dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False) + trait_ob = create_trait( + dataset_name=this_trait['dataset'], name=this_trait['name'], get_qtl_info=True, get_sample_info=False) if not trait_ob: continue if this_trait['dataset'] == this_trait['group'] + "Publish": try: if trait_ob.locus_chr != "" and trait_ob.locus_mb != "": - this_trait['max_lrs_text'] = "Chr" + str(trait_ob.locus_chr) + ": " + str(trait_ob.locus_mb) + this_trait['max_lrs_text'] = "Chr" + \ + str(trait_ob.locus_chr) + \ + ": " + str(trait_ob.locus_mb) except: this_trait['max_lrs_text'] = "N/A" diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index 0b477446..aa11caa8 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -18,7 +18,8 @@ logger = getLogger(__name__) class Heatmap: def __init__(self, start_vars, temp_uuid): - trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')] + trait_db_list = [trait.strip() + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) self.temp_uuid = temp_uuid @@ -33,7 +34,8 @@ class Heatmap: chrnames = [] self.species = species.TheSpecies(dataset=self.trait_list[0][1]) for key in list(self.species.chromosomes.chromosomes.keys()): - chrnames.append([self.species.chromosomes.chromosomes[key].name, self.species.chromosomes.chromosomes[key].mb_length]) + chrnames.append([self.species.chromosomes.chromosomes[key].name, + self.species.chromosomes.chromosomes[key].mb_length]) for trait_db in self.trait_list: @@ -108,10 +110,13 @@ class Heatmap: trimmed_samples.append(str(samples[i])) trimmed_values.append(values[i]) - trait_filename = str(this_trait.name) + "_" + str(self.dataset.name) + "_pheno" + trait_filename = str(this_trait.name) + "_" + \ + str(self.dataset.name) + "_pheno" gen_pheno_txt_file(trimmed_samples, trimmed_values, trait_filename) - output_filename = self.dataset.group.name + "_GWA_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + output_filename = self.dataset.group.name + "_GWA_" + \ + ''.join(random.choice(string.ascii_uppercase + string.digits) + for _ in range(6)) reaper_command = REAPER_COMMAND + ' --geno {0}/{1}.geno --traits {2}/gn2/{3}.txt -n 1000 -o {4}{5}.txt'.format(flat_files('genotype'), genofile_name, @@ -129,9 +134,11 @@ class Heatmap: self.trait_results[this_trait.name] = [] for qtl in reaper_results: if qtl['additive'] > 0: - self.trait_results[this_trait.name].append(-float(qtl['lrs_value'])) + self.trait_results[this_trait.name].append( + -float(qtl['lrs_value'])) else: - self.trait_results[this_trait.name].append(float(qtl['lrs_value'])) + self.trait_results[this_trait.name].append( + float(qtl['lrs_value'])) def gen_pheno_txt_file(samples, vals, filename): diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index 8dd1c7c0..cadff080 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -54,7 +54,8 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): Mb >= %2.6f AND Mb < %2.6f AND StrainId1 = %d AND StrainId2 = %d """ % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0] - newdict["snpDensity"] = newdict["snpCount"] / (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0 + newdict["snpDensity"] = newdict["snpCount"] / \ + (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0 else: newdict["snpDensity"] = newdict["snpCount"] = 0 @@ -86,12 +87,14 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): StrainId1 = %d AND StrainId2 = %d """ % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0] - newdict2["snpDensity"] = newdict2["snpCount"] / (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0 + newdict2["snpDensity"] = newdict2["snpCount"] / \ + (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0 else: newdict2["snpDensity"] = newdict2["snpCount"] = 0 try: - newdict2['GeneLength'] = 1000.0 * (newdict2['TxEnd'] - newdict2['TxStart']) + newdict2['GeneLength'] = 1000.0 * \ + (newdict2['TxEnd'] - newdict2['TxStart']) except: pass diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 5c7b81dd..cde822e8 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -307,7 +307,8 @@ class DisplayMappingResults: if 'color_scheme' in start_vars: self.color_scheme = start_vars['color_scheme'] if self.color_scheme == "single": - self.manhattan_single_color = ImageColor.getrgb("#" + start_vars['manhattan_single_color']) + self.manhattan_single_color = ImageColor.getrgb( + "#" + start_vars['manhattan_single_color']) if 'permCheck' in list(start_vars.keys()): self.permChecked = start_vars['permCheck'] @@ -357,7 +358,8 @@ class DisplayMappingResults: if 'reaper_version' in list(start_vars.keys()) and self.mapping_method == "reaper": self.reaper_version = start_vars['reaper_version'] if 'output_files' in start_vars: - self.output_files = ",".join([(the_file if the_file is not None else "") for the_file in start_vars['output_files']]) + self.output_files = ",".join( + [(the_file if the_file is not None else "") for the_file in start_vars['output_files']]) self.categorical_vars = "" self.perm_strata = "" @@ -386,16 +388,19 @@ class DisplayMappingResults: self.dataset.group.genofile = self.genofile_string.split(":")[0] if self.mapping_method == "reaper" and self.manhattan_plot != True: - self.genotype = self.dataset.group.read_genotype_file(use_reaper=True) + self.genotype = self.dataset.group.read_genotype_file( + use_reaper=True) else: self.genotype = self.dataset.group.read_genotype_file() # Darwing Options try: if self.selectedChr > -1: - self.graphWidth = min(self.GRAPH_MAX_WIDTH, max(self.GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) + self.graphWidth = min(self.GRAPH_MAX_WIDTH, max( + self.GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) else: - self.graphWidth = min(self.GRAPH_MAX_WIDTH, max(self.MULT_GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) + self.graphWidth = min(self.GRAPH_MAX_WIDTH, max( + self.MULT_GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) except: if self.selectedChr > -1: self.graphWidth = self.GRAPH_DEFAULT_WIDTH @@ -472,9 +477,11 @@ class DisplayMappingResults: """ % (self.dataset.group.name, ", ".join(["'%s'" % X[0] for X in self.ChrList[1:]]))) self.ChrLengthMbList = [x[0] / 1000000.0 for x in self.ChrLengthMbList] - self.ChrLengthMbSum = reduce(lambda x, y: x + y, self.ChrLengthMbList, 0.0) + self.ChrLengthMbSum = reduce( + lambda x, y: x + y, self.ChrLengthMbList, 0.0) if self.ChrLengthMbList: - self.MbGraphInterval = self.ChrLengthMbSum / (len(self.ChrLengthMbList) * 12) # Empirical Mb interval + self.MbGraphInterval = self.ChrLengthMbSum / \ + (len(self.ChrLengthMbList) * 12) # Empirical Mb interval else: self.MbGraphInterval = 1 @@ -482,7 +489,8 @@ class DisplayMappingResults: for i, _chr in enumerate(self.genotype): self.ChrLengthCMList.append(_chr[-1].cM - _chr[0].cM) - self.ChrLengthCMSum = reduce(lambda x, y: x + y, self.ChrLengthCMList, 0.0) + self.ChrLengthCMSum = reduce( + lambda x, y: x + y, self.ChrLengthCMList, 0.0) if self.plotScale == 'physic': self.GraphInterval = self.MbGraphInterval # Mb @@ -496,7 +504,8 @@ class DisplayMappingResults: smd = [] for sample in self.sample_vals_dict.keys(): if self.sample_vals_dict[sample] != "x": - temp = GeneralObject(name=sample, value=float(self.sample_vals_dict[sample])) + temp = GeneralObject(name=sample, value=float( + self.sample_vals_dict[sample])) smd.append(temp) else: continue @@ -506,7 +515,8 @@ class DisplayMappingResults: if item.name == samplelist[j]: self.NR_INDIVIDUALS = self.NR_INDIVIDUALS + 1 # default: - self.graphHeight = self.graphHeight + 2 * (self.NR_INDIVIDUALS + 10) * self.EACH_GENE_HEIGHT + self.graphHeight = self.graphHeight + 2 * \ + (self.NR_INDIVIDUALS + 10) * self.EACH_GENE_HEIGHT # END HaplotypeAnalyst ######################### @@ -529,7 +539,8 @@ class DisplayMappingResults: self.diffCol = [] for i, strain in enumerate(self.diffCol): - self.diffCol[i] = g.db.execute("select Id from Strain where Symbol = %s", strain).fetchone()[0] + self.diffCol[i] = g.db.execute( + "select Id from Strain where Symbol = %s", strain).fetchone()[0] ################################################################ # GeneCollection goes here @@ -552,13 +563,15 @@ class DisplayMappingResults: chrName = "X" else: chrName = self.selectedChr - self.geneCol = GeneUtil.loadGenes(chrName, self.diffCol, self.startMb, self.endMb, "mouse") + self.geneCol = GeneUtil.loadGenes( + chrName, self.diffCol, self.startMb, self.endMb, "mouse") elif self.dataset.group.species == "rat": if self.selectedChr == 21: chrName = "X" else: chrName = self.selectedChr - self.geneCol = GeneUtil.loadGenes(chrName, self.diffCol, self.startMb, self.endMb, "rat") + self.geneCol = GeneUtil.loadGenes( + chrName, self.diffCol, self.startMb, self.endMb, "rat") if self.geneCol and self.intervalAnalystChecked: ####################################################################### @@ -577,7 +590,8 @@ class DisplayMappingResults: showLocusForm = "" intCanvas = Image.new("RGBA", size=(self.graphWidth, self.graphHeight)) with Bench("Drawing Plot"): - gifmap = self.plotIntMapping(intCanvas, startMb=self.startMb, endMb=self.endMb, showLocusForm= showLocusForm) + gifmap = self.plotIntMapping( + intCanvas, startMb=self.startMb, endMb=self.endMb, showLocusForm= showLocusForm) self.gifmap = gifmap.__str__() @@ -593,8 +607,10 @@ class DisplayMappingResults: # Scales plot differently for high resolution if self.draw2X: - intCanvasX2 = Image.new("RGBA", size=(self.graphWidth * 2, self.graphHeight * 2)) - gifmapX2 = self.plotIntMapping(intCanvasX2, startMb=self.startMb, endMb=self.endMb, showLocusForm= showLocusForm, zoom=2) + intCanvasX2 = Image.new("RGBA", size=( + self.graphWidth * 2, self.graphHeight * 2)) + gifmapX2 = self.plotIntMapping( + intCanvasX2, startMb=self.startMb, endMb=self.endMb, showLocusForm= showLocusForm, zoom=2) intCanvasX2.save( "{}.png".format( os.path.join(webqtlConfig.GENERATED_IMAGE_DIR, @@ -612,7 +628,8 @@ class DisplayMappingResults: name=showLocusForm, submit=HtmlGenWrapper.create_input_tag(type_='hidden')) - hddn = {'FormID': 'showDatabase', 'ProbeSetID': '_', 'database': fd.RISet+"Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'} + hddn = {'FormID': 'showDatabase', 'ProbeSetID': '_', 'database': fd.RISet+ \ + "Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'} for key in hddn.keys(): showLocusForm.append(HtmlGenWrapper.create_input_tag( name=key, value=hddn[key], type_='hidden')) @@ -631,7 +648,8 @@ class DisplayMappingResults: if self.traitList and self.traitList[0].dataset and self.traitList[0].dataset.type == 'Geno': btminfo.append(HtmlGenWrapper.create_br_tag()) - btminfo.append('Mapping using genotype data as a trait will result in infinity LRS at one locus. In order to display the result properly, all LRSs higher than 100 are capped at 100.') + btminfo.append( + 'Mapping using genotype data as a trait will result in infinity LRS at one locus. In order to display the result properly, all LRSs higher than 100 are capped at 100.') def plotIntMapping(self, canvas, offset=(80, 120, 90, 100), zoom=1, startMb= None, endMb = None, showLocusForm = ""): im_drawer = ImageDraw.Draw(canvas) @@ -673,7 +691,8 @@ class DisplayMappingResults: else: drawAreaHeight -= 3 * self.BAND_HEIGHT + 3 * self.BAND_SPACING + 10 * zoom if self.geneChecked: - drawAreaHeight -= self.NUM_GENE_ROWS * self.EACH_GENE_HEIGHT + 3 * self.BAND_SPACING + 10 * zoom + drawAreaHeight -= self.NUM_GENE_ROWS * \ + self.EACH_GENE_HEIGHT + 3 * self.BAND_SPACING + 10 * zoom else: if self.selectedChr > -1: drawAreaHeight -= 20 @@ -682,7 +701,8 @@ class DisplayMappingResults: # BEGIN HaplotypeAnalyst if self.haplotypeAnalystChecked and self.selectedChr > -1: - drawAreaHeight -= self.EACH_GENE_HEIGHT * (self.NR_INDIVIDUALS + 10) * 2 * zoom + drawAreaHeight -= self.EACH_GENE_HEIGHT * \ + (self.NR_INDIVIDUALS + 10) * 2 * zoom # END HaplotypeAnalyst if zoom == 2: @@ -693,38 +713,48 @@ class DisplayMappingResults: newoffset = (xLeftOffset, xRightOffset, yTopOffset, yBottomOffset) # Draw the alternating-color background first and get plotXScale - plotXScale = self.drawGraphBackground(canvas, gifmap, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) + plotXScale = self.drawGraphBackground( + canvas, gifmap, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) # draw bootstap if self.bootChecked and not self.multipleInterval: - self.drawBootStrapResult(canvas, self.nboot, drawAreaHeight, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) + self.drawBootStrapResult(canvas, self.nboot, drawAreaHeight, plotXScale, + offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) # Draw clickable region and gene band if selected if self.plotScale == 'physic' and self.selectedChr > -1: - self.drawClickBand(canvas, gifmap, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) + self.drawClickBand(canvas, gifmap, plotXScale, offset=newoffset, + zoom=zoom, startMb=startMb, endMb=endMb) if self.geneChecked and self.geneCol: - self.drawGeneBand(canvas, gifmap, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) + self.drawGeneBand(canvas, gifmap, plotXScale, offset=newoffset, + zoom=zoom, startMb=startMb, endMb=endMb) if self.SNPChecked: - self.drawSNPTrackNew(canvas, offset=newoffset, zoom=2 * zoom, startMb=startMb, endMb = endMb) + self.drawSNPTrackNew( + canvas, offset=newoffset, zoom=2 * zoom, startMb=startMb, endMb = endMb) # BEGIN HaplotypeAnalyst if self.haplotypeAnalystChecked: - self.drawHaplotypeBand(canvas, gifmap, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) + self.drawHaplotypeBand( + canvas, gifmap, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) # END HaplotypeAnalyst # Draw X axis - self.drawXAxis(canvas, drawAreaHeight, gifmap, plotXScale, showLocusForm, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) + self.drawXAxis(canvas, drawAreaHeight, gifmap, plotXScale, showLocusForm, + offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) # Draw QTL curve - self.drawQTL(canvas, drawAreaHeight, gifmap, plotXScale, offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) + self.drawQTL(canvas, drawAreaHeight, gifmap, plotXScale, + offset=newoffset, zoom=zoom, startMb=startMb, endMb=endMb) # draw legend if self.multipleInterval: - self.drawMultiTraitName(fd, canvas, gifmap, showLocusForm, offset=newoffset) + self.drawMultiTraitName( + fd, canvas, gifmap, showLocusForm, offset=newoffset) elif self.legendChecked: self.drawLegendPanel(canvas, offset=newoffset, zoom=zoom) else: pass # draw position, no need to use a separate function - self.drawProbeSetPosition(canvas, plotXScale, offset=newoffset, zoom=zoom) + self.drawProbeSetPosition( + canvas, plotXScale, offset=newoffset, zoom=zoom) return gifmap @@ -756,19 +786,24 @@ class DisplayMappingResults: if previous_chr_as_int != 1: BootCoord.append(BootChrCoord) BootChrCoord = [] - startX += (self.ChrLengthDistList[previous_chr_as_int - 2] + self.GraphInterval) * plotXScale + startX += ( + self.ChrLengthDistList[previous_chr_as_int - 2] + self.GraphInterval) * plotXScale if self.plotScale == 'physic': Xc = startX + (result['Mb'] - self.startMb) * plotXScale else: - Xc = startX + (result['cM'] - self.qtlresults[0]['cM']) * plotXScale + Xc = startX + \ + (result['cM'] - self.qtlresults[0]['cM']) * plotXScale BootChrCoord.append([Xc, self.bootResult[i]]) else: for i, result in enumerate(self.qtlresults): if str(result['chr']) == str(self.ChrList[self.selectedChr][0]): if self.plotScale == 'physic': - Xc = startX + (result['Mb'] - self.startMb) * plotXScale + Xc = startX + (result['Mb'] - \ + self.startMb) * plotXScale else: - Xc = startX + (result['cM'] - self.qtlresults[0]['cM']) * plotXScale + Xc = startX + \ + (result['cM'] - self.qtlresults[0] + ['cM']) * plotXScale BootChrCoord.append([Xc, self.bootResult[i]]) BootCoord = [BootChrCoord] @@ -793,14 +828,16 @@ class DisplayMappingResults: if maxBootCount < bootCount: maxBootCount = bootCount # end if - reducedBootCoord.append([bootStartPixX, BootChrCoord[i][0], bootCount]) + reducedBootCoord.append( + [bootStartPixX, BootChrCoord[i][0], bootCount]) bootStartPixX = BootChrCoord[i][0] bootCount = BootChrCoord[i][1] # end else # end for # add last piece if BootChrCoord[-1][0] - bootStartPixX > stepBootStrap / 2.0: - reducedBootCoord.append([bootStartPixX, BootChrCoord[-1][0], bootCount]) + reducedBootCoord.append( + [bootStartPixX, BootChrCoord[-1][0], bootCount]) else: reducedBootCoord[-1][2] += bootCount reducedBootCoord[-1][1] = BootChrCoord[-1][0] @@ -827,11 +864,13 @@ class DisplayMappingResults: # draw boot scale highestPercent = (maxBootCount * 100.0) / nboot bootScale = Plot.detScale(0, highestPercent) - bootScale = Plot.frange(bootScale[0], bootScale[1], bootScale[1] / bootScale[2]) + bootScale = Plot.frange( + bootScale[0], bootScale[1], bootScale[1] / bootScale[2]) bootScale = bootScale[:-1] + [highestPercent] bootOffset = 50 * fontZoom - bootScaleFont = ImageFont.truetype(font=VERDANA_FILE, size=13 * fontZoom) + bootScaleFont = ImageFont.truetype( + font=VERDANA_FILE, size=13 * fontZoom) im_drawer.rectangle( xy=((canvas.size[0] - bootOffset, yZero - bootHeightThresh), (canvas.size[0] - bootOffset - 15*zoom, yZero)), @@ -860,10 +899,12 @@ class DisplayMappingResults: startPosY = 30 else: startPosY = 15 - smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12 * fontZoom) + smallLabelFont = ImageFont.truetype( + font=TREBUC_FILE, size=12 * fontZoom) leftOffset = canvas.size[0] - xRightOffset - 190 im_drawer.rectangle( - xy=((leftOffset, startPosY - 6), (leftOffset + 12, startPosY + 6)), + xy=((leftOffset, startPosY - 6), + (leftOffset + 12, startPosY + 6)), fill=YELLOW, outline=BLACK) im_drawer.text(xy=(canvas.size[0] - xRightOffset - 170, startPosY + TEXT_Y_DISPLACEMENT), text='Frequency of the Peak LRS', @@ -905,7 +946,8 @@ class DisplayMappingResults: locPixel = xLeftOffset for i, _chr in enumerate(self.ChrList[1:]): if _chr[0] != Chr: - locPixel += (self.ChrLengthDistList[i] + self.GraphInterval) * plotXScale + locPixel += (self.ChrLengthDistList[i] + \ + self.GraphInterval) * plotXScale else: locPixel += Mb * plotXScale break @@ -921,7 +963,8 @@ class DisplayMappingResults: # the trait's position is between two traits if i > 0 and self.qtlresults[i - 1]['Mb'] < Mb and qtlresult['Mb'] >= Mb: - locPixel = xLeftOffset + plotXScale * (self.qtlresults[i - 1]['Mb'] + (qtlresult['Mb'] - self.qtlresults[i - 1]['Mb']) * (Mb - self.qtlresults[i - 1]['Mb']) / (qtlresult['Mb'] - self.qtlresults[i - 1]['Mb'])) + locPixel = xLeftOffset + plotXScale * (self.qtlresults[i - 1]['Mb'] + (qtlresult['Mb'] - self.qtlresults[i - 1]['Mb']) * ( + Mb - self.qtlresults[i - 1]['Mb']) / (qtlresult['Mb'] - self.qtlresults[i - 1]['Mb'])) break # the trait's position is on the right of the last genotype @@ -932,12 +975,15 @@ class DisplayMappingResults: for i, _chr in enumerate(self.ChrList): if i < (len(self.ChrList) - 1): if _chr != Chr: - locPixel += (self.ChrLengthDistList[i] + self.GraphInterval) * plotXScale + locPixel += (self.ChrLengthDistList[i] + \ + self.GraphInterval) * plotXScale else: - locPixel += (Mb * (_chr[-1].cM - _chr[0].cM) / self.ChrLengthCMList[i]) * plotXScale + locPixel += (Mb * (_chr[-1].cM - _chr[0].cM) / \ + self.ChrLengthCMList[i]) * plotXScale break if locPixel >= 0 and self.plotScale == 'physic': - traitPixel = ((locPixel, yZero), (locPixel - 7, yZero + 14), (locPixel + 7, yZero + 14)) + traitPixel = ((locPixel, yZero), (locPixel - 7, + yZero + 14), (locPixel + 7, yZero + 14)) draw_open_polygon(canvas, xy=traitPixel, outline=BLACK, fill=self.TRANSCRIPT_LOCATION_COLOR) @@ -979,7 +1025,8 @@ class DisplayMappingResults: maxCount = max(SNPCounts) if maxCount > 0: for i in range(xLeftOffset, xLeftOffset + plotWidth): - snpDensity = float(SNPCounts[i - xLeftOffset] * SNP_HEIGHT_MODIFIER / maxCount) + snpDensity = float( + SNPCounts[i - xLeftOffset] * SNP_HEIGHT_MODIFIER / maxCount) im_drawer.line( xy=((i, drawSNPLocationY + (snpDensity) * zoom), (i, drawSNPLocationY - (snpDensity) * zoom)), @@ -1015,12 +1062,16 @@ class DisplayMappingResults: (rectWidth + rightShift, yPaddingTop + 10+kstep*15)), fill=thisLRSColor, outline=BLACK) im_drawer.text( - text=name, xy=(rectWidth + 2 + rightShift, yPaddingTop + 10 + kstep * 15), + text=name, xy=(rectWidth + 2 + rightShift, + yPaddingTop + 10 + kstep * 15), font=colorFont, fill=BLACK) if thisTrait.db: - COORDS = "%d,%d,%d,%d" % (rectWidth + 2 + rightShift, yPaddingTop + kstep * 15, rectWidth + 2 + rightShift + nameWidth, yPaddingTop + 10 + kstep * 15,) - HREF = "javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm, thisTrait.db.name, thisTrait.name) - Areas = HtmlGenWrapper.create_area_tag(shape='rect', coords=COORDS, href=HREF) + COORDS = "%d,%d,%d,%d" % (rectWidth + 2 + rightShift, yPaddingTop + kstep * \ + 15, rectWidth + 2 + rightShift + nameWidth, yPaddingTop + 10 + kstep * 15,) + HREF = "javascript:showDatabase3('%s','%s','%s','');" % ( + showLocusForm, thisTrait.db.name, thisTrait.name) + Areas = HtmlGenWrapper.create_area_tag( + shape='rect', coords=COORDS, href=HREF) gifmap.append(Areas) # TODO def drawLegendPanel(self, canvas, offset=(40, 120, 80, 10), zoom=1): @@ -1042,7 +1093,8 @@ class DisplayMappingResults: if hasattr(self.traitList[0], 'chr') and hasattr(self.traitList[0], 'mb'): startPosY = 15 nCol = 2 - smallLabelFont = ImageFont.truetype(font=TREBUC_FILE, size=12 * fontZoom) + smallLabelFont = ImageFont.truetype( + font=TREBUC_FILE, size=12 * fontZoom) leftOffset = canvas.size[0] - xRightOffset - 190 draw_open_polygon( @@ -1064,7 +1116,8 @@ class DisplayMappingResults: xy=((startPosX, startPosY), (startPosX + 32, startPosY)), fill=self.LRS_COLOR, width=2) im_drawer.text( - text=self.LRS_LOD, xy=(startPosX + 40, startPosY + TEXT_Y_DISPLACEMENT), + text=self.LRS_LOD, xy=( + startPosX + 40, startPosY + TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK) startPosY += stepPosY @@ -1118,10 +1171,12 @@ class DisplayMappingResults: xy=((thisStartX, startPosY), (startPosX + 32, startPosY)), fill=self.SIGNIFICANT_COLOR, width=self.SIGNIFICANT_WIDTH) im_drawer.line( - xy=((thisStartX, startPosY + stepPosY), (startPosX + 32, startPosY + stepPosY)), + xy=((thisStartX, startPosY + stepPosY), + (startPosX + 32, startPosY + stepPosY)), fill=self.SUGGESTIVE_COLOR, width=self.SUGGESTIVE_WIDTH) im_drawer.text( - text='Significant %s = %2.2f' % (self.LRS_LOD, self.significant), + text='Significant %s = %2.2f' % ( + self.LRS_LOD, self.significant), xy=(thisStartX + 40, startPosY + TEXT_Y_DISPLACEMENT), font=labelFont, fill=BLACK) im_drawer.text( text='Suggestive %s = %2.2f' % (self.LRS_LOD, self.suggestive), @@ -1134,7 +1189,8 @@ class DisplayMappingResults: if self.dataset.type == "Publish" or self.dataset.type == "Geno": dataset_label = self.dataset.fullname else: - dataset_label = "%s - %s" % (self.dataset.group.name, self.dataset.fullname) + dataset_label = "%s - %s" % (self.dataset.group.name, + self.dataset.fullname) string1 = 'Dataset: %s' % (dataset_label) @@ -1151,7 +1207,8 @@ class DisplayMappingResults: string3 = 'Using GEMMA mapping method with ' if self.covariates != "": string3 += 'the cofactors below:' - cofactor_names = ", ".join([covar.split(":")[0] for covar in self.covariates.split(",")]) + cofactor_names = ", ".join( + [covar.split(":")[0] for covar in self.covariates.split(",")]) string4 = cofactor_names else: string3 += 'no cofactors' @@ -1159,7 +1216,8 @@ class DisplayMappingResults: string3 = 'Using R/qtl mapping method with ' if self.covariates != "": string3 += 'the cofactors below:' - cofactor_names = ", ".join([covar.split(":")[0] for covar in self.covariates.split(",")]) + cofactor_names = ", ".join( + [covar.split(":")[0] for covar in self.covariates.split(",")]) string4 = cofactor_names elif self.controlLocus and self.doControl != "false": string3 += '%s as control' % self.controlLocus @@ -1177,15 +1235,19 @@ class DisplayMappingResults: if self.selectedChr == -1: identification = "Mapping on All Chromosomes for " else: - identification = "Mapping on Chromosome %s for " % (self.ChrList[self.selectedChr][0]) + identification = "Mapping on Chromosome %s for " % ( + self.ChrList[self.selectedChr][0]) if self.this_trait.symbol: - identification += "Trait: %s - %s" % (self.this_trait.name, self.this_trait.symbol) + identification += "Trait: %s - %s" % ( + self.this_trait.name, self.this_trait.symbol) elif self.dataset.type == "Publish": if self.this_trait.post_publication_abbreviation: - identification += "Trait: %s - %s" % (self.this_trait.name, self.this_trait.post_publication_abbreviation) + identification += "Trait: %s - %s" % ( + self.this_trait.name, self.this_trait.post_publication_abbreviation) elif self.this_trait.pre_publication_abbreviation: - identification += "Trait: %s - %s" % (self.this_trait.name, self.this_trait.pre_publication_abbreviation) + identification += "Trait: %s - %s" % ( + self.this_trait.name, self.this_trait.pre_publication_abbreviation) else: identification += "Trait: %s" % (self.this_trait.name) else: @@ -1265,7 +1327,8 @@ class DisplayMappingResults: tenPercentLength = geneLength * 0.0001 SNPdensity = theGO["snpCount"] / geneLength - exonStarts = list(map(float, theGO['exonStarts'].split(",")[:-1])) + exonStarts = list( + map(float, theGO['exonStarts'].split(",")[:-1])) exonEnds = list(map(float, theGO['exonEnds'].split(",")[:-1])) cdsStart = theGO['cdsStart'] cdsEnd = theGO['cdsEnd'] @@ -1274,8 +1337,10 @@ class DisplayMappingResults: strand = theGO["Strand"] exonCount = theGO["exonCount"] - geneStartPix = xLeftOffset + plotXScale * (float(txStart) - startMb) - geneEndPix = xLeftOffset + plotXScale * (float(txEnd) - startMb) # at least one pixel + geneStartPix = xLeftOffset + \ + plotXScale * (float(txStart) - startMb) + geneEndPix = xLeftOffset + plotXScale * \ + (float(txEnd) - startMb) # at least one pixel if (geneEndPix < xLeftOffset): return; # this gene is not on the screen @@ -1290,7 +1355,8 @@ class DisplayMappingResults: # found earlier, needs to be recomputed as snps are added # always apply colors now, even if SNP Track not checked - Zach 11/24/2010 - densities = [1.0000000000000001e-05, 0.094094033555233408, 0.3306166377816987, 0.88246026851027781, 2.6690084029581951, 4.1, 61.0] + densities = [1.0000000000000001e-05, 0.094094033555233408, + 0.3306166377816987, 0.88246026851027781, 2.6690084029581951, 4.1, 61.0] if SNPdensity < densities[0]: myColor = BLACK elif SNPdensity < densities[1]: @@ -1309,7 +1375,8 @@ class DisplayMappingResults: outlineColor = myColor fillColor = myColor - TITLE = "Gene: %s (%s)\nFrom %2.3f to %2.3f Mb (%s)\nNum. exons: %d." % (geneSymbol, accession, float(txStart), float(txEnd), strand, exonCount) + TITLE = "Gene: %s (%s)\nFrom %2.3f to %2.3f Mb (%s)\nNum. exons: %d." % ( + geneSymbol, accession, float(txStart), float(txEnd), strand, exonCount) # NL: 06-02-2011 Rob required to change this link for gene related HREF = geneNCBILink % geneSymbol @@ -1324,8 +1391,10 @@ class DisplayMappingResults: strand = theGO["Strand"] exonCount = 0 - geneStartPix = xLeftOffset + plotXScale * (float(txStart) - startMb) - geneEndPix = xLeftOffset + plotXScale * (float(txEnd) - startMb) # at least one pixel + geneStartPix = xLeftOffset + \ + plotXScale * (float(txStart) - startMb) + geneEndPix = xLeftOffset + plotXScale * \ + (float(txEnd) - startMb) # at least one pixel if (geneEndPix < xLeftOffset): return; # this gene is not on the screen @@ -1338,7 +1407,8 @@ class DisplayMappingResults: outlineColor = DARKBLUE fillColor = DARKBLUE - TITLE = "Gene: %s\nFrom %2.3f to %2.3f Mb (%s)" % (geneSymbol, float(txStart), float(txEnd), strand) + TITLE = "Gene: %s\nFrom %2.3f to %2.3f Mb (%s)" % ( + geneSymbol, float(txStart), float(txEnd), strand) # NL: 06-02-2011 Rob required to change this link for gene related HREF = geneNCBILink % geneSymbol else: @@ -1347,7 +1417,8 @@ class DisplayMappingResults: TITLE = "Gene: %s" % geneSymbol # Draw Genes - geneYLocation = yPaddingTop + (gIndex % self.NUM_GENE_ROWS) * self.EACH_GENE_HEIGHT * zoom + geneYLocation = yPaddingTop + \ + (gIndex % self.NUM_GENE_ROWS) * self.EACH_GENE_HEIGHT * zoom if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": geneYLocation += 4 * self.BAND_HEIGHT + 4 * self.BAND_SPACING else: @@ -1361,7 +1432,8 @@ class DisplayMappingResults: # draw the line that runs the entire length of the gene im_drawer.line( xy=( - (geneStartPix, geneYLocation + self.EACH_GENE_HEIGHT / 2 * zoom), + (geneStartPix, geneYLocation + \ + self.EACH_GENE_HEIGHT / 2 * zoom), (geneEndPix, geneYLocation + self.EACH_GENE_HEIGHT / 2 *zoom)), fill=outlineColor, width=1) @@ -1401,8 +1473,10 @@ class DisplayMappingResults: # draw the blocks for the exon regions for i in range(0, len(exonStarts)): - exonStartPix = (exonStarts[i] - startMb) * plotXScale + xLeftOffset - exonEndPix = (exonEnds[i] - startMb) * plotXScale + xLeftOffset + exonStartPix = ( + exonStarts[i] - startMb) * plotXScale + xLeftOffset + exonEndPix = (exonEnds[i] - startMb) * \ + plotXScale + xLeftOffset if (exonStartPix < xLeftOffset): exonStartPix = xLeftOffset if (exonEndPix < xLeftOffset): @@ -1418,7 +1492,8 @@ class DisplayMappingResults: # draw gray blocks for 3' and 5' UTR blocks if cdsStart and cdsEnd: - utrStartPix = (txStart - startMb) * plotXScale + xLeftOffset + utrStartPix = (txStart - startMb) * \ + plotXScale + xLeftOffset utrEndPix = (cdsStart - startMb) * plotXScale + xLeftOffset if (utrStartPix < xLeftOffset): utrStartPix = xLeftOffset @@ -1436,7 +1511,8 @@ class DisplayMappingResults: labelText = "5'" im_drawer.text( text=labelText, - xy=(utrStartPix - 9, geneYLocation + self.EACH_GENE_HEIGHT), + xy=(utrStartPix - 9, geneYLocation + \ + self.EACH_GENE_HEIGHT), font=ImageFont.truetype(font=ARIAL_FILE, size=2)) # the second UTR region @@ -1459,7 +1535,8 @@ class DisplayMappingResults: labelText = "3'" im_drawer.text( text=labelText, - xy=(utrEndPix + 2, geneYLocation + self.EACH_GENE_HEIGHT), + xy=(utrEndPix + 2, geneYLocation + \ + self.EACH_GENE_HEIGHT), font=ImageFont.truetype(font=ARIAL_FILE, size=2)) # draw the genes as rectangles @@ -1469,7 +1546,8 @@ class DisplayMappingResults: (geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT * zoom))), outline=outlineColor, fill=fillColor) - COORDS = "%d, %d, %d, %d" % (geneStartPix, geneYLocation, geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT)) + COORDS = "%d, %d, %d, %d" % ( + geneStartPix, geneYLocation, geneEndPix, (geneYLocation + self.EACH_GENE_HEIGHT)) # NL: 06-02-2011 Rob required to display NCBI info in a new window gifmap.append( HtmlGenWrapper.create_area_tag( @@ -1496,7 +1574,8 @@ class DisplayMappingResults: smd = [] for sample in self.sample_vals_dict.keys(): if self.sample_vals_dict[sample] != "x" and sample in samplelist: - temp = GeneralObject(name=sample, value=float(self.sample_vals_dict[sample])) + temp = GeneralObject(name=sample, value=float( + self.sample_vals_dict[sample])) smd.append(temp) else: continue @@ -1517,8 +1596,10 @@ class DisplayMappingResults: txStart = _chr[i].Mb txEnd = _chr[i].Mb - geneStartPix = xLeftOffset + plotXScale * (float(txStart) - startMb) - 0 - geneEndPix = xLeftOffset + plotXScale * (float(txEnd) - startMb) - 0 + geneStartPix = xLeftOffset + plotXScale * \ + (float(txStart) - startMb) - 0 + geneEndPix = xLeftOffset + plotXScale * \ + (float(txEnd) - startMb) - 0 drawit = 1 if (geneStartPix < xLeftOffset): @@ -1546,8 +1627,10 @@ class DisplayMappingResults: txStart = _chr[j].Mb txEnd = _chr[j].Mb - geneStartPix = xLeftOffset + plotXScale * (float(txStart) - startMb) - 0 - geneEndPix = xLeftOffset + plotXScale * (float(txEnd) - startMb) + 0 + geneStartPix = xLeftOffset + plotXScale * \ + (float(txStart) - startMb) - 0 + geneEndPix = xLeftOffset + plotXScale * \ + (float(txEnd) - startMb) + 0 if oldgeneEndPix >= xLeftOffset: drawStart = oldgeneEndPix + 4 @@ -1585,7 +1668,8 @@ class DisplayMappingResults: # Draw Genes - geneYLocation = yPaddingTop + self.NUM_GENE_ROWS * (self.EACH_GENE_HEIGHT) * zoom + geneYLocation = yPaddingTop + self.NUM_GENE_ROWS * \ + (self.EACH_GENE_HEIGHT) * zoom if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": geneYLocation += 4 * self.BAND_HEIGHT + 4 * self.BAND_SPACING else: @@ -1606,7 +1690,8 @@ class DisplayMappingResults: if (plotbxd == 1): ind = 0 if samplelist[k] in [item.name for item in smd]: - ind = [item.name for item in smd].index(samplelist[k]) + ind = [item.name for item in smd].index( + samplelist[k]) maxind = max(ind, maxind) @@ -1637,8 +1722,10 @@ class DisplayMappingResults: geneYLocation + 2 *ind*self.EACH_GENE_HEIGHT + 2*self.EACH_GENE_HEIGHT*zoom)), outline=outlineColor, fill=fillColor) - COORDS = "%d, %d, %d, %d" % (geneStartPix, geneYLocation + ind * self.EACH_GENE_HEIGHT, geneEndPix + 1, (geneYLocation + ind * self.EACH_GENE_HEIGHT)) - TITLE = "Strain: %s, marker (%s) \n Position %2.3f Mb." % (samplelist[k], _chr[j].name, float(txStart)) + COORDS = "%d, %d, %d, %d" % ( + geneStartPix, geneYLocation + ind * self.EACH_GENE_HEIGHT, geneEndPix + 1, (geneYLocation + ind * self.EACH_GENE_HEIGHT)) + TITLE = "Strain: %s, marker (%s) \n Position %2.3f Mb." % ( + samplelist[k], _chr[j].name, float(txStart)) HREF = '' gifmap.append( HtmlGenWrapper.create_area_tag( @@ -1683,7 +1770,8 @@ class DisplayMappingResults: plotbxd = 1 if (plotbxd == 1): - ind = [item.name for item in smd].index(samplelist[j]) - 1 + ind = [item.name for item in smd].index( + samplelist[j]) - 1 expr = smd[ind].value # Place where font is hardcoded @@ -1691,13 +1779,15 @@ class DisplayMappingResults: text="%s" % (samplelist[j]), xy=((xLeftOffset + plotWidth + 10), geneYLocation + 11 + 2*ind*self.EACH_GENE_HEIGHT*zoom), - font=ImageFont.truetype(font=VERDANA_FILE, size=12), + font=ImageFont.truetype( + font=VERDANA_FILE, size=12), fill=BLACK) im_drawer.text( text="%2.2f" % (expr), xy=((xLeftOffset + plotWidth + 60), geneYLocation + 11 + 2*ind*self.EACH_GENE_HEIGHT*zoom), - font=ImageFont.truetype(font=VERDANA_FILE, size=12), + font=ImageFont.truetype( + font=VERDANA_FILE, size=12), fill=BLACK) # END HaplotypeAnalyst @@ -1719,12 +1809,16 @@ class DisplayMappingResults: # but it makes the HTML huge, and takes forever to render the page in the first place) # Draw the bands that you can click on to go to UCSC / Ensembl MAX_CLICKABLE_REGION_DIVISIONS = 100 - clickableRegionLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=9) - pixelStep = max(5, int(float(plotWidth) / MAX_CLICKABLE_REGION_DIVISIONS)) + clickableRegionLabelFont = ImageFont.truetype( + font=VERDANA_FILE, size=9) + pixelStep = max( + 5, int(float(plotWidth) / MAX_CLICKABLE_REGION_DIVISIONS)) # pixelStep: every N pixels, we make a new clickable area for the user to go to that area of the genome. - numBasesCurrentlyOnScreen = self.kONE_MILLION * abs(startMb - endMb) # Number of bases on screen now - flankingWidthInBases = int (min((float(numBasesCurrentlyOnScreen) / 2.0), (5*self.kONE_MILLION))) + numBasesCurrentlyOnScreen = self.kONE_MILLION * \ + abs(startMb - endMb) # Number of bases on screen now + flankingWidthInBases = int ( + min((float(numBasesCurrentlyOnScreen) / 2.0), (5*self.kONE_MILLION))) webqtlZoomWidth = numBasesCurrentlyOnScreen / 16.0 # Flanking width should be such that we either zoom in to a 10 million base region, or we show the clicked region at the same scale as we are currently seeing. @@ -1733,23 +1827,33 @@ class DisplayMappingResults: paddingTop = yTopOffset if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": - phenogenPaddingTop = paddingTop + (self.BAND_HEIGHT + self.BAND_SPACING) - ucscPaddingTop = paddingTop + 2 * (self.BAND_HEIGHT + self.BAND_SPACING) - ensemblPaddingTop = paddingTop + 3 * (self.BAND_HEIGHT + self.BAND_SPACING) + phenogenPaddingTop = paddingTop + \ + (self.BAND_HEIGHT + self.BAND_SPACING) + ucscPaddingTop = paddingTop + 2 * \ + (self.BAND_HEIGHT + self.BAND_SPACING) + ensemblPaddingTop = paddingTop + 3 * \ + (self.BAND_HEIGHT + self.BAND_SPACING) else: - ucscPaddingTop = paddingTop + (self.BAND_HEIGHT + self.BAND_SPACING) - ensemblPaddingTop = paddingTop + 2 * (self.BAND_HEIGHT + self.BAND_SPACING) + ucscPaddingTop = paddingTop + \ + (self.BAND_HEIGHT + self.BAND_SPACING) + ensemblPaddingTop = paddingTop + 2 * \ + (self.BAND_HEIGHT + self.BAND_SPACING) if zoom == 1: for pixel in range(xLeftOffset, xLeftOffset + plotWidth, pixelStep): - calBase = self.kONE_MILLION * (startMb + (endMb - startMb) * (pixel - xLeftOffset - 0.0) / plotWidth) + calBase = self.kONE_MILLION * \ + (startMb + (endMb - startMb) * \ + (pixel - xLeftOffset - 0.0) / plotWidth) xBrowse1 = pixel - xBrowse2 = min(xLeftOffset + plotWidth, (pixel + pixelStep - 1)) + xBrowse2 = min(xLeftOffset + plotWidth, + (pixel + pixelStep - 1)) - WEBQTL_COORDS = "%d, %d, %d, %d" % (xBrowse1, paddingTop, xBrowse2, (paddingTop + self.BAND_HEIGHT)) - WEBQTL_HREF = "javascript:rangeView('%s', %f, %f)" % (self.selectedChr - 1, max(0, (calBase - webqtlZoomWidth)) / 1000000.0, (calBase + webqtlZoomWidth) / 1000000.0) + WEBQTL_COORDS = "%d, %d, %d, %d" % ( + xBrowse1, paddingTop, xBrowse2, (paddingTop + self.BAND_HEIGHT)) + WEBQTL_HREF = "javascript:rangeView('%s', %f, %f)" % (self.selectedChr - 1, max( + 0, (calBase - webqtlZoomWidth)) / 1000000.0, (calBase + webqtlZoomWidth) / 1000000.0) WEBQTL_TITLE = "Click to view this section of the genome in WebQTL" gifmap.append( @@ -1764,15 +1868,19 @@ class DisplayMappingResults: outline=self.CLICKABLE_WEBQTL_REGION_COLOR, fill=self.CLICKABLE_WEBQTL_REGION_COLOR) im_drawer.line( - xy=((xBrowse1, paddingTop), (xBrowse1, (paddingTop + self.BAND_HEIGHT))), + xy=((xBrowse1, paddingTop), (xBrowse1, + (paddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_WEBQTL_REGION_OUTLINE_COLOR) if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": - PHENOGEN_COORDS = "%d, %d, %d, %d" % (xBrowse1, phenogenPaddingTop, xBrowse2, (phenogenPaddingTop + self.BAND_HEIGHT)) + PHENOGEN_COORDS = "%d, %d, %d, %d" % ( + xBrowse1, phenogenPaddingTop, xBrowse2, (phenogenPaddingTop + self.BAND_HEIGHT)) if self.dataset.group.species == "mouse": - PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) + PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % ( + self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) else: - PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % (self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) + PHENOGEN_HREF = "https://phenogen.org/gene.jsp?speciesCB=Mm&auto=Y&geneTxt=chr%s:%d-%d&genomeVer=mm10" % ( + self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) PHENOGEN_TITLE = "Click to view this section of the genome in PhenoGen" gifmap.append( HtmlGenWrapper.create_area_tag( @@ -1786,14 +1894,18 @@ class DisplayMappingResults: outline=self.CLICKABLE_PHENOGEN_REGION_COLOR, fill=self.CLICKABLE_PHENOGEN_REGION_COLOR) im_drawer.line( - xy=((xBrowse1, phenogenPaddingTop), (xBrowse1, (phenogenPaddingTop + self.BAND_HEIGHT))), + xy=((xBrowse1, phenogenPaddingTop), (xBrowse1, + (phenogenPaddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_PHENOGEN_REGION_OUTLINE_COLOR) - UCSC_COORDS = "%d, %d, %d, %d" % (xBrowse1, ucscPaddingTop, xBrowse2, (ucscPaddingTop + self.BAND_HEIGHT)) + UCSC_COORDS = "%d, %d, %d, %d" % ( + xBrowse1, ucscPaddingTop, xBrowse2, (ucscPaddingTop + self.BAND_HEIGHT)) if self.dataset.group.species == "mouse": - UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d&hgt.customText=%s/snp/chr%s" % (self._ucscDb, self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases, webqtlConfig.PORTADDR, self.selectedChr) + UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d&hgt.customText=%s/snp/chr%s" % ( + self._ucscDb, self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases, webqtlConfig.PORTADDR, self.selectedChr) else: - UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d" % (self._ucscDb, self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) + UCSC_HREF = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=%s&position=chr%s:%d-%d" % ( + self._ucscDb, self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) UCSC_TITLE = "Click to view this section of the genome in the UCSC Genome Browser" gifmap.append( HtmlGenWrapper.create_area_tag( @@ -1811,11 +1923,14 @@ class DisplayMappingResults: (xBrowse1, (ucscPaddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_UCSC_REGION_OUTLINE_COLOR) - ENSEMBL_COORDS = "%d, %d, %d, %d" % (xBrowse1, ensemblPaddingTop, xBrowse2, (ensemblPaddingTop + self.BAND_HEIGHT)) + ENSEMBL_COORDS = "%d, %d, %d, %d" % ( + xBrowse1, ensemblPaddingTop, xBrowse2, (ensemblPaddingTop + self.BAND_HEIGHT)) if self.dataset.group.species == "mouse": - ENSEMBL_HREF = "http://www.ensembl.org/Mus_musculus/contigview?highlight=&chr=%s&vc_start=%d&vc_end=%d&x=35&y=12" % (self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) + ENSEMBL_HREF = "http://www.ensembl.org/Mus_musculus/contigview?highlight=&chr=%s&vc_start=%d&vc_end=%d&x=35&y=12" % ( + self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) else: - ENSEMBL_HREF = "http://www.ensembl.org/Rattus_norvegicus/contigview?chr=%s&start=%d&end=%d" % (self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) + ENSEMBL_HREF = "http://www.ensembl.org/Rattus_norvegicus/contigview?chr=%s&start=%d&end=%d" % ( + self.selectedChr, max(0, calBase - flankingWidthInBases), calBase + flankingWidthInBases) ENSEMBL_TITLE = "Click to view this section of the genome in the Ensembl Genome Browser" gifmap.append(HtmlGenWrapper.create_area_tag( shape='rect', @@ -1841,19 +1956,23 @@ class DisplayMappingResults: if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": im_drawer.text( text="Click to view the corresponding section of the genome in PhenoGen", - xy=((xLeftOffset + 10), phenogenPaddingTop), # + self.BAND_HEIGHT/2), + # + self.BAND_HEIGHT/2), + xy=((xLeftOffset + 10), phenogenPaddingTop), font=clickableRegionLabelFont, fill=self.CLICKABLE_PHENOGEN_TEXT_COLOR) im_drawer.text( text="Click to view the corresponding section of the genome in the UCSC Genome Browser", - xy=((xLeftOffset + 10), ucscPaddingTop), # + self.BAND_HEIGHT/2), + # + self.BAND_HEIGHT/2), + xy=((xLeftOffset + 10), ucscPaddingTop), font=clickableRegionLabelFont, fill=self.CLICKABLE_UCSC_TEXT_COLOR) im_drawer.text( text="Click to view the corresponding section of the genome in the Ensembl Genome Browser", - xy=((xLeftOffset + 10), ensemblPaddingTop), # + self.BAND_HEIGHT/2), + # + self.BAND_HEIGHT/2), + xy=((xLeftOffset + 10), ensemblPaddingTop), font=clickableRegionLabelFont, fill=self.CLICKABLE_ENSEMBL_TEXT_COLOR) # draw the gray text - chrFont = ImageFont.truetype(font=VERDANA_BOLD_FILE, size=26 * zoom) + chrFont = ImageFont.truetype( + font=VERDANA_BOLD_FILE, size=26 * zoom) chrX = xLeftOffset + plotWidth - 2 - im_drawer.textsize( "Chr %s" % self.ChrList[self.selectedChr][0], font=chrFont)[0] im_drawer.text( @@ -1922,8 +2041,10 @@ class DisplayMappingResults: (Xc, yZero + xMajorTickHeight)), fill=xAxisTickMarkColor, width=X_MAJOR_TICK_THICKNESS) # Draw the MAJOR tick mark - labelStr = str(formatStr % _Mb) # What Mbase location to put on the label - strWidth, strHeight = im_drawer.textsize(labelStr, font=MBLabelFont) + # What Mbase location to put on the label + labelStr = str(formatStr % _Mb) + strWidth, strHeight = im_drawer.textsize( + labelStr, font=MBLabelFont) drawStringXc = (Xc - (strWidth / 2.0)) im_drawer.text(xy=(drawStringXc, strYLoc), text=labelStr, font=MBLabelFont, @@ -1956,9 +2077,11 @@ class DisplayMappingResults: canvas, text=str(tickdists), font=MBLabelFont, xy=(startPosX + tickdists * plotXScale, yZero + 10 * zoom), fill=BLACK, angle=270) - startPosX += (self.ChrLengthDistList[i] + self.GraphInterval) * plotXScale + startPosX += (self.ChrLengthDistList[i] + \ + self.GraphInterval) * plotXScale - megabaseLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18 * zoom * 1.5)) + megabaseLabelFont = ImageFont.truetype( + font=VERDANA_FILE, size=int(18 * zoom * 1.5)) im_drawer.text( text="Megabases", xy=( @@ -1984,7 +2107,8 @@ class DisplayMappingResults: if _locus.cM != preLpos: distinctCount += 1 preLpos = _locus.cM - thisChr.append([_locus.name, _locus.cM - Locus0CM]) + thisChr.append( + [_locus.name, _locus.cM - Locus0CM]) else: for j in (0, nLoci / 4, nLoci / 2, nLoci*3/4, -1): while _chr[j].name == ' - ': @@ -1992,7 +2116,8 @@ class DisplayMappingResults: if _chr[j].cM != preLpos: distinctCount += 1 preLpos = _chr[j].cM - thisChr.append([_chr[j].name, _chr[j].cM - Locus0CM]) + thisChr.append( + [_chr[j].name, _chr[j].cM - Locus0CM]) ChrAInfo.append(thisChr) else: for i, _chr in enumerate(self.genotype): @@ -2004,7 +2129,8 @@ class DisplayMappingResults: if _locus.cM != preLpos: distinctCount += 1 preLpos = _locus.cM - thisChr.append([_locus.name, _locus.cM - Locus0CM]) + thisChr.append( + [_locus.name, _locus.cM - Locus0CM]) ChrAInfo.append(thisChr) stepA = (plotWidth + 0.0) / distinctCount @@ -2053,7 +2179,8 @@ class DisplayMappingResults: outline=rectColor, fill=rectColor, width=0) COORDS = "%d,%d,%d,%d" % (xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\ xLeftOffset + offsetA, yZero +40+Zorder*(LRectWidth+3)+LRectWidth) - HREF = "/show_trait?trait_id=%s&dataset=%s" % (Lname, self.dataset.group.name + "Geno") + HREF = "/show_trait?trait_id=%s&dataset=%s" % ( + Lname, self.dataset.group.name + "Geno") #HREF="javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm,fd.RISet+"Geno", Lname) Areas = HtmlGenWrapper.create_area_tag( shape='rect', @@ -2067,9 +2194,11 @@ class DisplayMappingResults: im_drawer.line( xy=((startPosX, yZero), (startPosX, yZero + 40)), fill=lineColor) - startPosX += (self.ChrLengthDistList[j] + self.GraphInterval) * plotXScale + startPosX += (self.ChrLengthDistList[j] + \ + self.GraphInterval) * plotXScale - centimorganLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18 * zoom * 1.5)) + centimorganLabelFont = ImageFont.truetype( + font=VERDANA_FILE, size=int(18 * zoom * 1.5)) im_drawer.text( text="Centimorgans", xy=(xLeftOffset + (plotWidth - im_drawer.textsize( @@ -2100,29 +2229,39 @@ class DisplayMappingResults: # ZS: This is a mess, but I don't know a better way to account for different mapping methods returning results in different formats + the option to change between LRS and LOD if self.lrsMax <= 0: # sliding scale if "lrs_value" in self.qtlresults[0]: - LRS_LOD_Max = max([result['lrs_value'] for result in self.qtlresults]) + LRS_LOD_Max = max([result['lrs_value'] + for result in self.qtlresults]) if self.LRS_LOD == "LOD" or self.LRS_LOD == "-logP": LRS_LOD_Max = LRS_LOD_Max / self.LODFACTOR if self.permChecked and self.nperm > 0 and not self.multipleInterval: - self.significant = min(self.significant / self.LODFACTOR, webqtlConfig.MAXLRS) - self.suggestive = min(self.suggestive / self.LODFACTOR, webqtlConfig.MAXLRS) + self.significant = min( + self.significant / self.LODFACTOR, webqtlConfig.MAXLRS) + self.suggestive = min( + self.suggestive / self.LODFACTOR, webqtlConfig.MAXLRS) else: if self.permChecked and self.nperm > 0 and not self.multipleInterval: - self.significant = min(self.significant, webqtlConfig.MAXLRS) - self.suggestive = min(self.suggestive, webqtlConfig.MAXLRS) + self.significant = min( + self.significant, webqtlConfig.MAXLRS) + self.suggestive = min( + self.suggestive, webqtlConfig.MAXLRS) else: pass else: - LRS_LOD_Max = max([result['lod_score'] for result in self.qtlresults]) + LRS_LOD_Max = max([result['lod_score'] + for result in self.qtlresults]) if self.LRS_LOD == "LRS": LRS_LOD_Max = LRS_LOD_Max * self.LODFACTOR if self.permChecked and self.nperm > 0 and not self.multipleInterval: - self.significant = min(self.significant * self.LODFACTOR, webqtlConfig.MAXLRS) - self.suggestive = min(self.suggestive * self.LODFACTOR, webqtlConfig.MAXLRS) + self.significant = min( + self.significant * self.LODFACTOR, webqtlConfig.MAXLRS) + self.suggestive = min( + self.suggestive * self.LODFACTOR, webqtlConfig.MAXLRS) else: if self.permChecked and self.nperm > 0 and not self.multipleInterval: - self.significant = min(self.significant, webqtlConfig.MAXLRS) - self.suggestive = min(self.suggestive, webqtlConfig.MAXLRS) + self.significant = min( + self.significant, webqtlConfig.MAXLRS) + self.suggestive = min( + self.suggestive, webqtlConfig.MAXLRS) else: pass @@ -2143,7 +2282,8 @@ class DisplayMappingResults: self.js_data = json.dumps(js_data) LRSScaleFont = ImageFont.truetype(font=VERDANA_FILE, size=16 * zoom) - LRSLODFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18 * zoom * 1.5)) + LRSLODFont = ImageFont.truetype( + font=VERDANA_FILE, size=int(18 * zoom * 1.5)) yZero = yTopOffset + plotHeight # LRSHeightThresh = drawAreaHeight @@ -2188,7 +2328,8 @@ class DisplayMappingResults: # draw the "LRS" or "LOD" string to the left of the axis LRSScaleFont = ImageFont.truetype(font=VERDANA_FILE, size=16 * zoom) - LRSLODFont = ImageFont.truetype(font=VERDANA_FILE, size=int(18 * zoom * 1.5)) + LRSLODFont = ImageFont.truetype( + font=VERDANA_FILE, size=int(18 * zoom * 1.5)) yZero = yTopOffset + plotHeight # TEXT_X_DISPLACEMENT = -20 @@ -2239,7 +2380,8 @@ class DisplayMappingResults: # ZS: I don't know if what I did here with this inner function is clever or overly complicated, but it's the only way I could think of to avoid duplicating the code inside this function def add_suggestive_significant_lines_and_legend(start_pos_x, chr_length_dist): - rightEdge = int(start_pos_x + chr_length_dist * plotXScale - self.SUGGESTIVE_WIDTH / 1.5) + rightEdge = int(start_pos_x + chr_length_dist * \ + plotXScale - self.SUGGESTIVE_WIDTH / 1.5) im_drawer.line( xy=((start_pos_x + self.SUGGESTIVE_WIDTH / 1.5, suggestiveY), (rightEdge, suggestiveY)), @@ -2253,15 +2395,19 @@ class DisplayMappingResults: width=self.SIGNIFICANT_WIDTH * zoom # , clipX=(xLeftOffset, xLeftOffset + plotWidth-2) ) - sugg_coords = "%d, %d, %d, %d" % (start_pos_x, suggestiveY - 2, rightEdge + 2 * zoom, suggestiveY + 2) - sig_coords = "%d, %d, %d, %d" % (start_pos_x, significantY - 2, rightEdge + 2 * zoom, significantY + 2) + sugg_coords = "%d, %d, %d, %d" % ( + start_pos_x, suggestiveY - 2, rightEdge + 2 * zoom, suggestiveY + 2) + sig_coords = "%d, %d, %d, %d" % ( + start_pos_x, significantY - 2, rightEdge + 2 * zoom, significantY + 2) if self.LRS_LOD == 'LRS': sugg_title = "Suggestive LRS = %0.2f" % self.suggestive sig_title = "Significant LRS = %0.2f" % self.significant else: - sugg_title = "Suggestive LOD = %0.2f" % (self.suggestive / 4.61) - sig_title = "Significant LOD = %0.2f" % (self.significant / 4.61) + sugg_title = "Suggestive LOD = %0.2f" % ( + self.suggestive / 4.61) + sig_title = "Significant LOD = %0.2f" % ( + self.significant / 4.61) Areas1 = HtmlGenWrapper.create_area_tag( shape='rect', coords=sugg_coords, @@ -2273,24 +2419,28 @@ class DisplayMappingResults: gifmap.append(Areas1) gifmap.append(Areas2) - start_pos_x += (chr_length_dist + self.GraphInterval) * plotXScale + start_pos_x += (chr_length_dist + \ + self.GraphInterval) * plotXScale return start_pos_x for i, _chr in enumerate(self.genotype): if self.selectedChr != -1: if _chr.name == self.ChrList[self.selectedChr][0]: - startPosX = add_suggestive_significant_lines_and_legend(startPosX, self.ChrLengthDistList[0]) + startPosX = add_suggestive_significant_lines_and_legend( + startPosX, self.ChrLengthDistList[0]) break else: continue else: - startPosX = add_suggestive_significant_lines_and_legend(startPosX, self.ChrLengthDistList[i]) + startPosX = add_suggestive_significant_lines_and_legend( + startPosX, self.ChrLengthDistList[i]) if self.multipleInterval: lrsEdgeWidth = 1 else: if self.additiveChecked: - additiveMax = max([abs(X['additive']) for X in self.qtlresults]) + additiveMax = max([abs(X['additive']) + for X in self.qtlresults]) lrsEdgeWidth = 3 if zoom == 2: @@ -2300,7 +2450,8 @@ class DisplayMappingResults: AdditiveCoordXY = [] DominanceCoordXY = [] - symbolFont = ImageFont.truetype(font=FNT_BS_FILE, size=5) # ZS: For Manhattan Plot + symbolFont = ImageFont.truetype( + font=FNT_BS_FILE, size=5) # ZS: For Manhattan Plot previous_chr = 1 previous_chr_as_int = 0 @@ -2332,7 +2483,8 @@ class DisplayMappingResults: if Xc == Xc0: # genotype , locus distance is 0 Xcm = Xc else: - Xcm = (yZero - Yc0) / ((Yc - Yc0) / (Xc - Xc0)) + Xc0 + Xcm = (yZero - Yc0) / \ + ((Yc - Yc0) / (Xc - Xc0)) + Xc0 if Yc0 < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xcm, yZero)), @@ -2340,7 +2492,8 @@ class DisplayMappingResults: # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) im_drawer.line( - xy=((Xcm, yZero), (Xc, yZero - (Yc - yZero))), + xy=((Xcm, yZero), + (Xc, yZero - (Yc - yZero))), fill=minusColor, width=lineWidth # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) @@ -2391,7 +2544,8 @@ class DisplayMappingResults: AdditiveCoordXY = [] previous_chr = qtlresult['chr'] previous_chr_as_int += 1 - newStartPosX = (self.ChrLengthDistList[previous_chr_as_int - 1] + self.GraphInterval) * plotXScale + newStartPosX = ( + self.ChrLengthDistList[previous_chr_as_int - 1] + self.GraphInterval) * plotXScale if newStartPosX != oldStartPosX: startPosX += newStartPosX oldStartPosX = newStartPosX @@ -2408,10 +2562,12 @@ class DisplayMappingResults: if self.genotype.filler: if self.selectedChr != -1: start_cm = self.genotype[self.selectedChr - 1][0].cM - Xc = startPosX + (qtlresult['Mb'] - start_cm) * plotXScale + Xc = startPosX + \ + (qtlresult['Mb'] - start_cm) * plotXScale else: start_cm = self.genotype[previous_chr_as_int][0].cM - Xc = startPosX + ((qtlresult['Mb'] - start_cm - startMb) * plotXScale) * (((qtlresult['Mb'] - start_cm - startMb) * plotXScale) / ((qtlresult['Mb'] - start_cm - startMb + self.GraphInterval) * plotXScale)) + Xc = startPosX + ((qtlresult['Mb'] - start_cm - startMb) * plotXScale) * ( + ((qtlresult['Mb'] - start_cm - startMb) * plotXScale) / ((qtlresult['Mb'] - start_cm - startMb + self.GraphInterval) * plotXScale)) else: Xc = startPosX + (qtlresult['Mb'] - startMb) * plotXScale @@ -2425,17 +2581,23 @@ class DisplayMappingResults: if self.LRS_LOD == "LOD" or self.LRS_LOD == "-logP": if qtlresult['lrs_value'] > 460 or qtlresult['lrs_value'] == 'inf': #Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/(LRSAxisList[-1]*self.LODFACTOR) - Yc = yZero - webqtlConfig.MAXLRS * LRSHeightThresh / (LRS_LOD_Max * self.LODFACTOR) + Yc = yZero - webqtlConfig.MAXLRS * \ + LRSHeightThresh / \ + (LRS_LOD_Max * self.LODFACTOR) else: #Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/(LRSAxisList[-1]*self.LODFACTOR) - Yc = yZero - qtlresult['lrs_value'] * LRSHeightThresh / (LRS_LOD_Max * self.LODFACTOR) + Yc = yZero - \ + qtlresult['lrs_value'] * LRSHeightThresh / \ + (LRS_LOD_Max * self.LODFACTOR) else: if qtlresult['lrs_value'] > 460 or qtlresult['lrs_value'] == 'inf': #Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRSAxisList[-1] Yc = yZero - webqtlConfig.MAXLRS * LRSHeightThresh / LRS_LOD_Max else: #Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/LRSAxisList[-1] - Yc = yZero - qtlresult['lrs_value'] * LRSHeightThresh / LRS_LOD_Max + Yc = yZero - \ + qtlresult['lrs_value'] * \ + LRSHeightThresh / LRS_LOD_Max else: if qtlresult['lod_score'] > 100 or qtlresult['lod_score'] == 'inf': #Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRSAxisList[-1] @@ -2443,10 +2605,14 @@ class DisplayMappingResults: else: if self.LRS_LOD == "LRS": #Yc = yZero - qtlresult['lod_score']*self.LODFACTOR*LRSHeightThresh/LRSAxisList[-1] - Yc = yZero - qtlresult['lod_score'] * self.LODFACTOR * LRSHeightThresh / LRS_LOD_Max + Yc = yZero - \ + qtlresult['lod_score'] * self.LODFACTOR * \ + LRSHeightThresh / LRS_LOD_Max else: #Yc = yZero - qtlresult['lod_score']*LRSHeightThresh/LRSAxisList[-1] - Yc = yZero - qtlresult['lod_score'] * LRSHeightThresh / LRS_LOD_Max + Yc = yZero - \ + qtlresult['lod_score'] * \ + LRSHeightThresh / LRS_LOD_Max if self.manhattan_plot == True: if self.color_scheme == "single": @@ -2462,7 +2628,8 @@ class DisplayMappingResults: im_drawer.text( text="5", xy=( - Xc - im_drawer.textsize("5", font=symbolFont)[0] / 2 + 1, + Xc - im_drawer.textsize("5", + font=symbolFont)[0] / 2 + 1, Yc - 4), fill=point_color, font=symbolFont) else: @@ -2471,7 +2638,8 @@ class DisplayMappingResults: if not self.multipleInterval and self.additiveChecked: if additiveMax == 0.0: additiveMax = 0.000001 - Yc = yZero - qtlresult['additive'] * AdditiveHeightThresh / additiveMax + Yc = yZero - qtlresult['additive'] * \ + AdditiveHeightThresh / additiveMax AdditiveCoordXY.append((Xc, Yc)) m += 1 @@ -2496,7 +2664,8 @@ class DisplayMappingResults: if Xc == Xc0: # genotype , locus distance is 0 Xcm = Xc else: - Xcm = (yZero - Yc0) / ((Yc - Yc0) / (Xc - Xc0)) + Xc0 + Xcm = (yZero - Yc0) / \ + ((Yc - Yc0) / (Xc - Xc0)) + Xc0 if Yc0 < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xcm, yZero)), @@ -2561,7 +2730,8 @@ class DisplayMappingResults: if Xc == Xc0: # genotype , locus distance is 0 Xcm = Xc else: - Xcm = (yZero - Yc0) / ((Yc - Yc0) / (Xc - Xc0)) + Xc0 + Xcm = (yZero - Yc0) / \ + ((Yc - Yc0) / (Xc - Xc0)) + Xc0 if Yc0 < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xcm, yZero)), @@ -2616,9 +2786,11 @@ class DisplayMappingResults: # draw additive scale if not self.multipleInterval and self.additiveChecked: - additiveScaleFont = ImageFont.truetype(font=VERDANA_FILE, size=16 * zoom) + additiveScaleFont = ImageFont.truetype( + font=VERDANA_FILE, size=16 * zoom) additiveScale = Plot.detScaleOld(0, additiveMax) - additiveStep = (additiveScale[1] - additiveScale[0]) / additiveScale[2] + additiveStep = (additiveScale[1] - \ + additiveScale[0]) / additiveScale[2] additiveAxisList = Plot.frange(0, additiveScale[1], additiveStep) addPlotScale = AdditiveHeightThresh / additiveMax TEXT_Y_DISPLACEMENT = -8 @@ -2633,7 +2805,8 @@ class DisplayMappingResults: scaleStr = "%2.3f" % item im_drawer.text( text=scaleStr, - xy= (xLeftOffset + plotWidth + 6, additiveY + TEXT_Y_DISPLACEMENT), + xy= (xLeftOffset + plotWidth + 6, + additiveY + TEXT_Y_DISPLACEMENT), font=additiveScaleFont, fill=self.ADDITIVE_COLOR_POSITIVE) im_drawer.line( @@ -2690,14 +2863,16 @@ class DisplayMappingResults: plotXScale = plotWidth / drawRegionDistance else: # multiple chromosome view - plotXScale = plotWidth / ((len(self.genotype) - 1) * self.GraphInterval + drawRegionDistance) + plotXScale = plotWidth / \ + ((len(self.genotype) - 1) * self.GraphInterval + drawRegionDistance) startPosX = xLeftOffset if fontZoom == 1.5: chrFontZoom = 2 else: chrFontZoom = 1 - chrLabelFont = ImageFont.truetype(font=VERDANA_FILE, size=24 * chrFontZoom) + chrLabelFont = ImageFont.truetype( + font=VERDANA_FILE, size=24 * chrFontZoom) for i, _chr in enumerate(self.genotype): if (i % 2 == 0): @@ -2712,14 +2887,18 @@ class DisplayMappingResults: outline=GAINSBORO, fill=theBackColor) - chrNameWidth, chrNameHeight = im_drawer.textsize(_chr.name, font=chrLabelFont) - chrStartPix = startPosX + (self.ChrLengthDistList[i] * plotXScale - chrNameWidth) / 2 - chrEndPix = startPosX + (self.ChrLengthDistList[i] * plotXScale + chrNameWidth) / 2 + chrNameWidth, chrNameHeight = im_drawer.textsize( + _chr.name, font=chrLabelFont) + chrStartPix = startPosX + \ + (self.ChrLengthDistList[i] * plotXScale - chrNameWidth) / 2 + chrEndPix = startPosX + \ + (self.ChrLengthDistList[i] * plotXScale + chrNameWidth) / 2 TEXT_Y_DISPLACEMENT = 0 im_drawer.text(xy=(chrStartPix, yTopOffset + TEXT_Y_DISPLACEMENT), text=_chr.name, font=chrLabelFont, fill=BLACK) - COORDS = "%d,%d,%d,%d" % (chrStartPix, yTopOffset, chrEndPix, yTopOffset + 20) + COORDS = "%d,%d,%d,%d" % ( + chrStartPix, yTopOffset, chrEndPix, yTopOffset + 20) # add by NL 09-03-2010 HREF = "javascript:chrView(%d,%s);" % (i, self.ChrLengthMbList) @@ -2729,7 +2908,8 @@ class DisplayMappingResults: coords=COORDS, href=HREF) gifmap.append(Areas) - startPosX += (self.ChrLengthDistList[i] + self.GraphInterval) * plotXScale + startPosX += (self.ChrLengthDistList[i] + \ + self.GraphInterval) * plotXScale return plotXScale @@ -2746,7 +2926,8 @@ class DisplayMappingResults: perm_output = self.perm_output filename = webqtlUtil.genRandStr("Reg_") - Plot.plotBar(myCanvas, perm_output, XLabel=self.LRS_LOD, YLabel='Frequency', title=' Histogram of Permutation Test') + Plot.plotBar(myCanvas, perm_output, XLabel=self.LRS_LOD, + YLabel='Frequency', title=' Histogram of Permutation Test') myCanvas.save("{}.gif".format(GENERATED_IMAGE_DIR + filename), format='gif') @@ -2825,7 +3006,8 @@ class DisplayMappingResults: txEnd = theGO["TxEnd"] theGO["snpDensity"] = theGO["snpCount"] / geneLength if self.ALEX_DEBUG_BOOL_PRINT_GENE_LIST: - geneIdString = 'http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=gene&cmd=Retrieve&dopt=Graphics&list_uids=%s' % theGO["GeneID"] + geneIdString = 'http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=gene&cmd=Retrieve&dopt=Graphics&list_uids=%s' % theGO[ + "GeneID"] if theGO["snpCount"]: snpString = HT.Link( @@ -2842,7 +3024,9 @@ class DisplayMappingResults: else: snpString = 0 - mouseStartString = "http://genome.ucsc.edu/cgi-bin/hgTracks?clade=vertebrate&org=Mouse&db=mm9&position=chr" + theGO["Chromosome"] + "%3A" + str(int(theGO["TxStart"] * 1000000.0)) + "-" + str(int(theGO["TxEnd"] * 1000000.0)) + "&pix=620&Submit=submit" + mouseStartString = "http://genome.ucsc.edu/cgi-bin/hgTracks?clade=vertebrate&org=Mouse&db=mm9&position=chr" + \ + theGO["Chromosome"] + "%3A" + str(int(theGO["TxStart"] * 1000000.0)) + "-" + str( + int(theGO["TxEnd"] * 1000000.0)) + "&pix=620&Submit=submit" # the chromosomes for human 1 are 1qXX.XX if theGO['humanGene']: @@ -2854,7 +3038,8 @@ class DisplayMappingResults: humanChr = theGO['humanGene']["Chromosome"] humanTxStart = theGO['humanGene']["TxStart"] - humanStartString = "http://genome.ucsc.edu/cgi-bin/hgTracks?clade=vertebrate&org=Human&db=hg17&position=chr%s:%d-%d" % (humanChr, int(1000000 * theGO['humanGene']["TxStart"]), int(1000000 * theGO['humanGene']["TxEnd"])) + humanStartString = "http://genome.ucsc.edu/cgi-bin/hgTracks?clade=vertebrate&org=Human&db=hg17&position=chr%s:%d-%d" % ( + humanChr, int(1000000 * theGO['humanGene']["TxStart"]), int(1000000 * theGO['humanGene']["TxEnd"])) else: humanStartString = humanChr = humanStartDisplay = "--" @@ -2879,7 +3064,8 @@ class DisplayMappingResults: else: chr_as_int = int(theGO["Chromosome"]) - 1 if refGene: - literatureCorrelationString = str(self.getLiteratureCorrelation(self.cursor, refGene, theGO['GeneID']) or "N/A") + literatureCorrelationString = str(self.getLiteratureCorrelation( + self.cursor, refGene, theGO['GeneID']) or "N/A") this_row = [selectCheck.__str__(), str(tableIterationsCnt), @@ -2947,7 +3133,8 @@ class DisplayMappingResults: if theGO["GeneID"] != "": geneSymbolNCBI = str(HtmlGenWrapper.create_link_tag( - "http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=gene&cmd=Retrieve&dopt=Graphics&list_uids={}".format(theGO["GeneID"]), + "http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=gene&cmd=Retrieve&dopt=Graphics&list_uids={}".format( + theGO["GeneID"]), theGO["GeneSymbol"], Class="normalsize", target="_blank")) @@ -2960,7 +3147,8 @@ class DisplayMappingResults: chr_as_int = int(theGO["Chromosome"]) - 1 geneLength = (float(theGO["TxEnd"]) - float(theGO["TxStart"])) - geneLengthURL = "javascript:rangeView('%s', %f, %f)" % (theGO["Chromosome"], float(theGO["TxStart"]) - (geneLength * 0.1), float(theGO["TxEnd"]) + (geneLength * 0.1)) + geneLengthURL = "javascript:rangeView('%s', %f, %f)" % (theGO["Chromosome"], float( + theGO["TxStart"]) - (geneLength * 0.1), float(theGO["TxEnd"]) + (geneLength * 0.1)) avgExprVal = [] if avgExprVal != "" and avgExprVal: diff --git a/wqflask/wqflask/marker_regression/gemma_mapping.py b/wqflask/wqflask/marker_regression/gemma_mapping.py index 68689104..289f1d5c 100644 --- a/wqflask/wqflask/marker_regression/gemma_mapping.py +++ b/wqflask/wqflask/marker_regression/gemma_mapping.py @@ -149,7 +149,8 @@ def gen_covariates_file(this_dataset, covariates, samples): dataset_name = covariate.split(":")[1] if dataset_name == "Temp": temp_group = trait_name.split("_")[2] - dataset_ob = create_dataset(dataset_name="Temp", dataset_type="Temp", group_name = temp_group) + dataset_ob = create_dataset( + dataset_name="Temp", dataset_type="Temp", group_name = temp_group) else: dataset_ob = create_dataset(covariate.split(":")[1]) trait_ob = create_trait(dataset=dataset_ob, diff --git a/wqflask/wqflask/marker_regression/plink_mapping.py b/wqflask/wqflask/marker_regression/plink_mapping.py index 22a50bb8..2fa80841 100644 --- a/wqflask/wqflask/marker_regression/plink_mapping.py +++ b/wqflask/wqflask/marker_regression/plink_mapping.py @@ -10,7 +10,8 @@ logger = utility.logger.getLogger(__name__) def run_plink(this_trait, dataset, species, vals, maf): - plink_output_filename = webqtlUtil.genRandStr(f"{dataset.group.name}_{this_trait.name}_") + plink_output_filename = webqtlUtil.genRandStr( + f"{dataset.group.name}_{this_trait.name}_") gen_pheno_txt_file(dataset, vals) plink_command = f"{PLINK_COMMAND} --noweb --bfile {flat_files('mapping')}/{dataset.group.name} --no-pheno --no-fid --no-parents --no-sex --maf {maf} --out { TMPDIR}{plink_output_filename} --assoc " @@ -41,7 +42,8 @@ def gen_pheno_txt_file(this_dataset, vals): this_val = -9 else: this_val = vals[i] - outfile.write("0 " + line[1] + " " + line[2] + " " + line[3] + " " + line[4] + " " + str(this_val) + "\n") + outfile.write("0 " + line[1] + " " + line[2] + " " + \ + line[3] + " " + line[4] + " " + str(this_val) + "\n") def gen_pheno_txt_file_plink(this_trait, dataset, vals, pheno_filename=''): @@ -162,7 +164,8 @@ def parse_plink_output(output_filename, species): def build_line_list(line=""): - line_list = line.strip().split(' ') # irregular number of whitespaces between columns + # irregular number of whitespaces between columns + line_list = line.strip().split(' ') line_list = [item for item in line_list if item != ''] line_list = [item.strip() for item in line_list] diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py index b8fe2c37..f932498f 100644 --- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py +++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py @@ -26,7 +26,8 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo gen_pheno_txt_file(samples, vals, trait_filename) output_filename = (f"{this_dataset.group.name}_GWA_" + - ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + ''.join(random.choice(string.ascii_uppercase + string.digits) + for _ in range(6)) ) bootstrap_filename = None permu_filename = None @@ -34,19 +35,22 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo opt_list = [] if boot_check and num_bootstrap > 0: bootstrap_filename = (f"{this_dataset.group.name}_BOOTSTRAP_" + - ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + ''.join(random.choice(string.ascii_uppercase + string.digits) + for _ in range(6)) ) opt_list.append("-b") opt_list.append(f"--n_bootstrap {str(num_bootstrap)}") - opt_list.append(f"--bootstrap_output {webqtlConfig.GENERATED_IMAGE_DIR}{bootstrap_filename}.txt") + opt_list.append( + f"--bootstrap_output {webqtlConfig.GENERATED_IMAGE_DIR}{bootstrap_filename}.txt") if num_perm > 0: permu_filename = ("{this_dataset.group.name}_PERM_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) ) opt_list.append("-n " + str(num_perm)) - opt_list.append("--permu_output " + webqtlConfig.GENERATED_IMAGE_DIR + permu_filename + ".txt") + opt_list.append( + "--permu_output " + webqtlConfig.GENERATED_IMAGE_DIR + permu_filename + ".txt") if control_marker != "" and do_control == "true": opt_list.append("-c " + control_marker) if manhattan_plot != True: @@ -58,7 +62,8 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo genofile_name, TEMPDIR, trait_filename, - " ".join(opt_list), + " ".join( + opt_list), webqtlConfig.GENERATED_IMAGE_DIR, output_filename)) @@ -67,7 +72,8 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo else: output_filename, permu_filename, bootstrap_filename = output_files - marker_obs, permu_vals, bootstrap_vals = parse_reaper_output(output_filename, permu_filename, bootstrap_filename) + marker_obs, permu_vals, bootstrap_vals = parse_reaper_output( + output_filename, permu_filename, bootstrap_filename) suggestive = 0 significant = 0 @@ -193,7 +199,8 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da suggestive = 0 significant = 0 else: - perm_output = genotype.permutation(strains=trimmed_samples, trait=trimmed_values, nperm=num_perm) + perm_output = genotype.permutation( + strains=trimmed_samples, trait=trimmed_values, nperm=num_perm) suggestive = perm_output[int(num_perm * 0.37 - 1)] significant = perm_output[int(num_perm * 0.95 - 1)] # highly_significant = perm_output[int(num_perm*0.99-1)] #ZS: Currently not used, but leaving it here just in case @@ -257,7 +264,8 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da json_data['markernames'].append(reaper_locus.name) # if self.additive: # self.json_data['additive'].append(qtl.additive) - locus = {"name": reaper_locus.name, "chr": reaper_locus.chr, "cM": reaper_locus.cM, "Mb": reaper_locus.Mb} + locus = {"name": reaper_locus.name, "chr": reaper_locus.chr, + "cM": reaper_locus.cM, "Mb": reaper_locus.Mb} qtl = {"lrs_value": qtl.lrs, "chr": converted_chr, "Mb": reaper_locus.Mb, "cM": reaper_locus.cM, "name": reaper_locus.name, "additive": qtl.additive, "dominance": qtl.dominance} qtl_results.append(qtl) @@ -270,5 +278,6 @@ def natural_sort(marker_list): Changed to return indices instead of values, though, since the same reordering needs to be applied to bootstrap results """ convert = lambda text: int(text) if text.isdigit() else text.lower() - alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', str(marker_list[key]['chr']))] + alphanum_key = lambda key: [convert(c) for c in re.split( + '([0-9]+)', str(marker_list[key]['chr']))] return sorted(list(range(len(marker_list))), key=alphanum_key) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 588600f5..c2b165a4 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -51,7 +51,8 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec # Get pointers to some R/qtl functions scanone = ro.r["scanone"] # Map the scanone function scantwo = ro.r["scantwo"] # Map the scantwo function - calc_genoprob = ro.r["calc.genoprob"] # Map the calc.genoprob function + # Map the calc.genoprob function + calc_genoprob = ro.r["calc.genoprob"] crossname = dataset.group.name # try: @@ -67,14 +68,16 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec scale_units = "Mb" generate_cross_from_geno(dataset, scale_units) - GENOtoCSVR = ro.r["GENOtoCSVR"] # Map the local GENOtoCSVR function + # Map the local GENOtoCSVR function + GENOtoCSVR = ro.r["GENOtoCSVR"] crossfilelocation = TMPDIR + crossname + ".cross" if dataset.group.genofile: genofilelocation = locate(dataset.group.genofile, "genotype") else: genofilelocation = locate(dataset.group.name + ".geno", "genotype") logger.info("Going to create a cross from geno"); - cross_object = GENOtoCSVR(genofilelocation, crossfilelocation) # TODO: Add the SEX if that is available + # TODO: Add the SEX if that is available + cross_object = GENOtoCSVR(genofilelocation, crossfilelocation) logger.info("before calc_genoprob"); if manhattan_plot: cross_object = calc_genoprob(cross_object) @@ -85,14 +88,19 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec logger.info("phenostring done"); names_string = sanitize_rqtl_names(samples) logger.info("sanitized pheno and names"); - cross_object = add_phenotype(cross_object, pheno_string, "the_pheno") # Add the phenotype - cross_object = add_names(cross_object, names_string, "the_names") # Add the phenotype + # Add the phenotype + cross_object = add_phenotype(cross_object, pheno_string, "the_pheno") + # Add the phenotype + cross_object = add_names(cross_object, names_string, "the_names") logger.info("Added pheno and names"); - marker_covars = create_marker_covariates(control_marker, cross_object) # Create the additive covariate markers + # Create the additive covariate markers + marker_covars = create_marker_covariates(control_marker, cross_object) logger.info("Marker covars done"); if cofactors != "": logger.info("Cofactors: " + cofactors); - cross_object, trait_covars = add_cofactors(cross_object, dataset, cofactors, samples) # Create the covariates from selected traits + # Create the covariates from selected traits + cross_object, trait_covars = add_cofactors( + cross_object, dataset, cofactors, samples) ro.r('all_covars <- cbind(marker_covars, trait_covars)') else: ro.r('all_covars <- marker_covars') @@ -100,9 +108,11 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec # DEBUG to save the session object to file if pair_scan: if do_control == "true": - logger.info("Using covariate"); result_data_frame = scantwo(cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method, n_cluster = 16) + logger.info("Using covariate"); result_data_frame = scantwo( + cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method, n_cluster = 16) else: - logger.info("No covariates"); result_data_frame = scantwo(cross_object, pheno="the_pheno", model=model, method=method, n_cluster=16) + logger.info("No covariates"); result_data_frame = scantwo( + cross_object, pheno="the_pheno", model=model, method=method, n_cluster=16) pair_scan_filename = webqtlUtil.genRandStr("scantwo_") + ".png" png(file=TEMPDIR + pair_scan_filename) @@ -112,25 +122,36 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec return process_pair_scan_results(result_data_frame) else: if do_control == "true" or cofactors != "": - logger.info("Using covariate"); result_data_frame = scanone(cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method) + logger.info("Using covariate"); result_data_frame = scanone( + cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method) ro.r('save.image(file = "/home/zas1024/gn2-zach/itp_cofactor_test.RData")') else: - logger.info("No covariates"); result_data_frame = scanone(cross_object, pheno="the_pheno", model=model, method=method) - - if num_perm > 0 and permCheck == "ON": # Do permutation (if requested by user) - if len(perm_strata_list) > 0: # ZS: The strata list would only be populated if "Stratified" was checked on before mapping - cross_object, strata_ob = add_perm_strata(cross_object, perm_strata_list) + logger.info("No covariates"); result_data_frame = scanone( + cross_object, pheno="the_pheno", model=model, method=method) + + # Do permutation (if requested by user) + if num_perm > 0 and permCheck == "ON": + # ZS: The strata list would only be populated if "Stratified" was checked on before mapping + if len(perm_strata_list) > 0: + cross_object, strata_ob = add_perm_strata( + cross_object, perm_strata_list) if do_control == "true" or cofactors != "": - perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm = int(num_perm), perm_strata = strata_ob, model=model, method=method) + perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm = int( + num_perm), perm_strata = strata_ob, model=model, method=method) else: - perm_data_frame = scanone(cross_object, pheno_col="the_pheno", n_perm=num_perm, perm_strata = strata_ob, model=model, method=method) + perm_data_frame = scanone( + cross_object, pheno_col="the_pheno", n_perm=num_perm, perm_strata = strata_ob, model=model, method=method) else: if do_control == "true" or cofactors != "": - perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm = int(num_perm), model=model, method=method) + perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm = int( + num_perm), model=model, method=method) else: - perm_data_frame = scanone(cross_object, pheno_col="the_pheno", n_perm=num_perm, model=model, method=method) + perm_data_frame = scanone( + cross_object, pheno_col="the_pheno", n_perm=num_perm, model=model, method=method) - perm_output, suggestive, significant = process_rqtl_perm_results(num_perm, perm_data_frame) # Functions that sets the thresholds for the webinterface + # Functions that sets the thresholds for the webinterface + perm_output, suggestive, significant = process_rqtl_perm_results( + num_perm, perm_data_frame) return perm_output, suggestive, significant, process_rqtl_results(result_data_frame, dataset.group.species) else: return process_rqtl_results(result_data_frame, dataset.group.species) @@ -147,7 +168,8 @@ def generate_cross_from_rdata(dataset): """ % (rdata_location)) -def generate_cross_from_geno(dataset, scale_units): # TODO: Need to figure out why some genofiles have the wrong format and don't convert properly +# TODO: Need to figure out why some genofiles have the wrong format and don't convert properly +def generate_cross_from_geno(dataset, scale_units): ro.r(""" trim <- function( x ) { gsub("(^[[:space:]]+|[[:space:]]+$)", "", x) } @@ -246,7 +268,8 @@ def sanitize_rqtl_names(vals): def add_phenotype(cross, pheno_as_string, col_name): ro.globalenv["the_cross"] = cross ro.r('pheno <- data.frame(pull.pheno(the_cross))') - ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + ' = as.numeric(' + pheno_as_string + '))') + ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + \ + ' = as.numeric(' + pheno_as_string + '))') return ro.r["the_cross"] @@ -270,7 +293,8 @@ def add_categorical_covar(cross, covar_as_string, i): #logger.info("loop" + str(x)); col_name = "covar_" + str(i) + "_" + str(x) #logger.info("col_name" + col_name); - ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + ' = newcovar[,' + str(x) + '])') + ro.r('the_cross$pheno <- cbind(pheno, ' + \ + col_name + ' = newcovar[,' + str(x) + '])') col_names.append(col_name) #logger.info("loop" + str(x) + "done"); @@ -281,7 +305,8 @@ def add_categorical_covar(cross, covar_as_string, i): def add_names(cross, names_as_string, col_name): ro.globalenv["the_cross"] = cross ro.r('pheno <- data.frame(pull.pheno(the_cross))') - ro.r('the_cross$pheno <- cbind(pheno, ' + col_name + ' = ' + names_as_string + ')') + ro.r('the_cross$pheno <- cbind(pheno, ' + \ + col_name + ' = ' + names_as_string + ')') return ro.r["the_cross"] @@ -330,9 +355,11 @@ def add_cofactors(cross, this_dataset, covariates, samples): logger.info("Covariate: " + covariate + " is of type: " + datatype); if(datatype == "categorical"): # Cat variable logger.info("call of add_categorical_covar"); - cross, col_names = add_categorical_covar(cross, covar_as_string, i) # Expand and add it to the cross + cross, col_names = add_categorical_covar( + cross, covar_as_string, i) # Expand and add it to the cross logger.info("add_categorical_covar returned"); - for z, col_name in enumerate(col_names): # Go through the additional covar names + # Go through the additional covar names + for z, col_name in enumerate(col_names): if i < (len(covariate_list) - 1): covar_name_string += '"' + col_name + '", ' else: @@ -355,18 +382,22 @@ def add_cofactors(cross, this_dataset, covariates, samples): def create_marker_covariates(control_marker, cross): ro.globalenv["the_cross"] = cross - ro.r('genotypes <- pull.geno(the_cross)') # Get the genotype matrix - userinput_sanitized = control_marker.replace(" ", "").split(",") # TODO: sanitize user input, Never Ever trust a user + # Get the genotype matrix + ro.r('genotypes <- pull.geno(the_cross)') + # TODO: sanitize user input, Never Ever trust a user + userinput_sanitized = control_marker.replace(" ", "").split(",") logger.debug(userinput_sanitized) if len(userinput_sanitized) > 0: - covariate_names = ', '.join('"{0}"'.format(w) for w in userinput_sanitized) + covariate_names = ', '.join('"{0}"'.format(w) + for w in userinput_sanitized) ro.r('covnames <- c(' + covariate_names + ')') else: ro.r('covnames <- c()') ro.r('covInGeno <- which(covnames %in% colnames(genotypes))') ro.r('covnames <- covnames[covInGeno]') ro.r("cat('covnames (purged): ', covnames,'\n')") - ro.r('marker_covars <- genotypes[,covnames]') # Get the covariate matrix by using the marker name as index to the genotype file + # Get the covariate matrix by using the marker name as index to the genotype file + ro.r('marker_covars <- genotypes[,covnames]') # TODO: Create a design matrix from the marker covars for the markers in case of an F2, 4way, etc return ro.r["marker_covars"] @@ -375,7 +406,8 @@ def process_pair_scan_results(result): pair_scan_results = [] result = result[1] - output = [tuple([result[j][i] for j in range(result.ncol)]) for i in range(result.nrow)] + output = [tuple([result[j][i] for j in range(result.ncol)]) + for i in range(result.nrow)] for i, line in enumerate(result.iter_row()): marker = {} @@ -401,14 +433,17 @@ def process_rqtl_perm_results(num_perm, results): return perm_output, suggestive, significant -def process_rqtl_results(result, species_name): # TODO: how to make this a one liner and not copy the stuff in a loop +# TODO: how to make this a one liner and not copy the stuff in a loop +def process_rqtl_results(result, species_name): qtl_results = [] - output = [tuple([result[j][i] for j in range(result.ncol)]) for i in range(result.nrow)] + output = [tuple([result[j][i] for j in range(result.ncol)]) + for i in range(result.nrow)] for i, line in enumerate(result.iter_row()): marker = {} marker['name'] = result.rownames[i] - if species_name == "mouse" and output[i][0] == 20: # ZS: This is awkward, but I'm not sure how to change the 20s to Xs in the RData file + # ZS: This is awkward, but I'm not sure how to change the 20s to Xs in the RData file + if species_name == "mouse" and output[i][0] == 20: marker['chr'] = "X" else: marker['chr'] = output[i][0] diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 31c58083..d9b28fba 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -51,7 +51,8 @@ class RunMapping: def __init__(self, start_vars, temp_uuid): helper_functions.get_species_dataset_trait(self, start_vars) - self.temp_uuid = temp_uuid # needed to pass temp_uuid to gn1 mapping code (marker_regression_gn1.py) + # needed to pass temp_uuid to gn1 mapping code (marker_regression_gn1.py) + self.temp_uuid = temp_uuid # ZS: Needed to zoom in or remap temp traits like PCA traits if "temp_trait" in start_vars and start_vars['temp_trait'] != "False": @@ -102,8 +103,11 @@ class RunMapping: if "results_path" in start_vars: self.mapping_results_path = start_vars['results_path'] else: - mapping_results_filename = self.dataset.group.name + "_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) - self.mapping_results_path = "{}{}.csv".format(webqtlConfig.GENERATED_IMAGE_DIR, mapping_results_filename) + mapping_results_filename = self.dataset.group.name + "_" + \ + ''.join(random.choice(string.ascii_uppercase + string.digits) + for _ in range(6)) + self.mapping_results_path = "{}{}.csv".format( + webqtlConfig.GENERATED_IMAGE_DIR, mapping_results_filename) self.manhattan_plot = False if 'manhattan_plot' in start_vars: @@ -122,7 +126,8 @@ class RunMapping: self.use_loco = None self.suggestive = "" self.significant = "" - self.pair_scan = False # Initializing this since it is checked in views to determine which template to use + # Initializing this since it is checked in views to determine which template to use + self.pair_scan = False if 'transform' in start_vars: self.transform = start_vars['transform'] else: @@ -140,7 +145,8 @@ class RunMapping: # ZS: This is passed to GN1 code for single chr mapping self.selected_chr = -1 if "selected_chr" in start_vars: - if int(start_vars['selected_chr']) != -1: # ZS: Needs to be -1 if showing full map; there's probably a better way to fix this + # ZS: Needs to be -1 if showing full map; there's probably a better way to fix this + if int(start_vars['selected_chr']) != -1: self.selected_chr = int(start_vars['selected_chr']) + 1 else: self.selected_chr = int(start_vars['selected_chr']) @@ -198,28 +204,33 @@ class RunMapping: self.output_files = None if 'output_files' in start_vars: self.output_files = start_vars['output_files'] - if 'first_run' in start_vars: # ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) + # ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) + if 'first_run' in start_vars: self.first_run = False self.score_type = "-logP" self.manhattan_plot = True with Bench("Running GEMMA"): if self.use_loco == "True": - marker_obs, self.output_files = gemma_mapping.run_gemma(self.this_trait, self.dataset, self.samples, self.vals, self.covariates, self.use_loco, self.maf, self.first_run, self.output_files) + marker_obs, self.output_files = gemma_mapping.run_gemma( + self.this_trait, self.dataset, self.samples, self.vals, self.covariates, self.use_loco, self.maf, self.first_run, self.output_files) else: - marker_obs, self.output_files = gemma_mapping.run_gemma(self.this_trait, self.dataset, self.samples, self.vals, self.covariates, self.use_loco, self.maf, self.first_run, self.output_files) + marker_obs, self.output_files = gemma_mapping.run_gemma( + self.this_trait, self.dataset, self.samples, self.vals, self.covariates, self.use_loco, self.maf, self.first_run, self.output_files) results = marker_obs elif self.mapping_method == "rqtl_plink": results = self.run_rqtl_plink() elif self.mapping_method == "rqtl_geno": perm_strata = [] if "perm_strata" in start_vars and "categorical_vars" in start_vars: - self.categorical_vars = start_vars["categorical_vars"].split(",") + self.categorical_vars = start_vars["categorical_vars"].split( + ",") if len(self.categorical_vars) and start_vars["perm_strata"] == "True": primary_samples = SampleList(dataset=self.dataset, sample_names=self.samples, this_trait=self.this_trait) - perm_strata = get_perm_strata(self.this_trait, primary_samples, self.categorical_vars, self.samples) + perm_strata = get_perm_strata( + self.this_trait, primary_samples, self.categorical_vars, self.samples) self.score_type = "LOD" self.control_marker = start_vars['control_marker'] self.do_control = start_vars['do_control'] @@ -231,9 +242,11 @@ class RunMapping: # if start_vars['pair_scan'] == "true": # self.pair_scan = True if self.permCheck and self.num_perm > 0: - self.perm_output, self.suggestive, self.significant, results = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) + self.perm_output, self.suggestive, self.significant, results = rqtl_mapping.run_rqtl_geno( + self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) else: - results = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) + results = rqtl_mapping.run_rqtl_geno(self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, + self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) elif self.mapping_method == "reaper": if "startMb" in start_vars: # ZS: Check if first time page loaded, so it can default to ON if "additiveCheck" in start_vars: @@ -268,10 +281,12 @@ class RunMapping: if self.reaper_version == "new": self.first_run = True self.output_files = None - if 'first_run' in start_vars: # ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) + # ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) + if 'first_run' in start_vars: self.first_run = False if 'output_files' in start_vars: - self.output_files = start_vars['output_files'].split(",") + self.output_files = start_vars['output_files'].split( + ",") results, self.perm_output, self.suggestive, self.significant, self.bootstrap_results, self.output_files = qtlreaper_mapping.run_reaper(self.this_trait, self.dataset, @@ -301,7 +316,8 @@ class RunMapping: elif self.mapping_method == "plink": self.score_type = "-logP" self.manhattan_plot = True - results = plink_mapping.run_plink(self.this_trait, self.dataset, self.species, self.vals, self.maf) + results = plink_mapping.run_plink( + self.this_trait, self.dataset, self.species, self.vals, self.maf) #results = self.run_plink() else: logger.debug("RUNNING NOTHING") @@ -353,7 +369,9 @@ class RunMapping: chr=str(marker['chr']), rs=marker['name'], ps=this_ps, - url="/show_trait?trait_id=" + marker['name'] + "&dataset=" + self.dataset.group.name + "Geno" + url="/show_trait?trait_id=" + \ + marker['name'] + "&dataset=" + \ + self.dataset.group.name + "Geno" ) if self.geno_db_exists == "True": @@ -362,7 +380,9 @@ class RunMapping: chr=str(marker['chr']), rs=marker['name'], pos=this_ps, - url="/show_trait?trait_id=" + marker['name'] + "&dataset=" + self.dataset.group.name + "Geno" + url="/show_trait?trait_id=" + \ + marker['name'] + "&dataset=" + \ + self.dataset.group.name + "Geno" ) else: annot_marker = dict( @@ -373,7 +393,8 @@ class RunMapping: ) if 'lrs_value' in marker and marker['lrs_value'] > 0: - browser_marker['p_wald'] = 10**-(marker['lrs_value'] / 4.61) + browser_marker['p_wald'] = 10**- \ + (marker['lrs_value'] / 4.61) elif 'lod_score' in marker and marker['lod_score'] > 0: browser_marker['p_wald'] = 10**-(marker['lod_score']) else: @@ -386,9 +407,13 @@ class RunMapping: highest_chr = marker['chr'] if ('lod_score' in marker.keys()) or ('lrs_value' in marker.keys()): if 'Mb' in marker.keys(): - marker['display_pos'] = "Chr" + str(marker['chr']) + ": " + "{:.6f}".format(marker['Mb']) + marker['display_pos'] = "Chr" + \ + str(marker['chr']) + ": " + \ + "{:.6f}".format(marker['Mb']) elif 'cM' in marker.keys(): - marker['display_pos'] = "Chr" + str(marker['chr']) + ": " + "{:.3f}".format(marker['cM']) + marker['display_pos'] = "Chr" + \ + str(marker['chr']) + ": " + \ + "{:.3f}".format(marker['cM']) else: marker['display_pos'] = "N/A" self.qtl_results.append(marker) @@ -396,12 +421,15 @@ class RunMapping: total_markers = len(self.qtl_results) with Bench("Exporting Results"): - export_mapping_results(self.dataset, self.this_trait, self.qtl_results, self.mapping_results_path, self.mapping_scale, self.score_type, self.transform, self.covariates, self.n_samples) + export_mapping_results(self.dataset, self.this_trait, self.qtl_results, self.mapping_results_path, + self.mapping_scale, self.score_type, self.transform, self.covariates, self.n_samples) with Bench("Trimming Markers for Figure"): if len(self.qtl_results) > 30000: - self.qtl_results = trim_markers_for_figure(self.qtl_results) - self.results_for_browser = trim_markers_for_figure(self.results_for_browser) + self.qtl_results = trim_markers_for_figure( + self.qtl_results) + self.results_for_browser = trim_markers_for_figure( + self.results_for_browser) filtered_annotations = [] for marker in self.results_for_browser: for annot_marker in self.annotations_for_browser: @@ -409,14 +437,17 @@ class RunMapping: filtered_annotations.append(annot_marker) break self.annotations_for_browser = filtered_annotations - browser_files = write_input_for_browser(self.dataset, self.results_for_browser, self.annotations_for_browser) + browser_files = write_input_for_browser( + self.dataset, self.results_for_browser, self.annotations_for_browser) else: - browser_files = write_input_for_browser(self.dataset, self.results_for_browser, self.annotations_for_browser) + browser_files = write_input_for_browser( + self.dataset, self.results_for_browser, self.annotations_for_browser) with Bench("Trimming Markers for Table"): self.trimmed_markers = trim_markers_for_table(results) - chr_lengths = get_chr_lengths(self.mapping_scale, self.mapping_method, self.dataset, self.qtl_results) + chr_lengths = get_chr_lengths( + self.mapping_scale, self.mapping_method, self.dataset, self.qtl_results) # ZS: For zooming into genome browser, need to pass chromosome name instead of number if self.dataset.group.species == "mouse": @@ -472,11 +503,14 @@ class RunMapping: def run_rqtl_plink(self): # os.chdir("") never do this inside a webserver!! - output_filename = webqtlUtil.genRandStr("%s_%s_" % (self.dataset.group.name, self.this_trait.name)) + output_filename = webqtlUtil.genRandStr("%s_%s_" % ( + self.dataset.group.name, self.this_trait.name)) - plink_mapping.gen_pheno_txt_file_plink(self.this_trait, self.dataset, self.vals, pheno_filename=output_filename) + plink_mapping.gen_pheno_txt_file_plink( + self.this_trait, self.dataset, self.vals, pheno_filename=output_filename) - rqtl_command = './plink --noweb --ped %s.ped --no-fid --no-parents --no-sex --no-pheno --map %s.map --pheno %s/%s.txt --pheno-name %s --maf %s --missing-phenotype -9999 --out %s%s --assoc ' % (self.dataset.group.name, self.dataset.group.name, TMPDIR, plink_output_filename, self.this_trait.name, self.maf, TMPDIR, plink_output_filename) + rqtl_command = './plink --noweb --ped %s.ped --no-fid --no-parents --no-sex --no-pheno --map %s.map --pheno %s/%s.txt --pheno-name %s --maf %s --missing-phenotype -9999 --out %s%s --assoc ' % ( + self.dataset.group.name, self.dataset.group.name, TMPDIR, plink_output_filename, self.this_trait.name, self.maf, TMPDIR, plink_output_filename) os.system(rqtl_command) @@ -508,8 +542,10 @@ class RunMapping: def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, score_type, transform, covariates, n_samples): with open(results_path, "w+") as output_file: - output_file.write("Time/Date: " + datetime.datetime.now().strftime("%x / %X") + "\n") - output_file.write("Population: " + dataset.group.species.title() + " " + dataset.group.name + "\n") + output_file.write( + "Time/Date: " + datetime.datetime.now().strftime("%x / %X") + "\n") + output_file.write( + "Population: " + dataset.group.species.title() + " " + dataset.group.name + "\n") output_file.write("Data Set: " + dataset.fullname + "\n") output_file.write("N Samples: " + str(n_samples) + "\n") if len(transform) > 0: @@ -529,7 +565,8 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, output_file.write(transform_text + "\n") if dataset.type == "ProbeSet": output_file.write("Gene Symbol: " + trait.symbol + "\n") - output_file.write("Location: " + str(trait.chr) + " @ " + str(trait.mb) + " Mb\n") + output_file.write("Location: " + str(trait.chr) + \ + " @ " + str(trait.mb) + " Mb\n") if len(covariates) > 0: output_file.write("Cofactors (dataset - trait):\n") for covariate in covariates.split(","): @@ -630,9 +667,11 @@ def trim_markers_for_figure(markers): def trim_markers_for_table(markers): if 'lod_score' in list(markers[0].keys()): - sorted_markers = sorted(markers, key=lambda k: k['lod_score'], reverse=True) + sorted_markers = sorted( + markers, key=lambda k: k['lod_score'], reverse=True) else: - sorted_markers = sorted(markers, key=lambda k: k['lrs_value'], reverse=True) + sorted_markers = sorted( + markers, key=lambda k: k['lrs_value'], reverse=True) # ZS: So we end up with a list of just 2000 markers if len(sorted_markers) >= 2000: @@ -643,7 +682,9 @@ def trim_markers_for_table(markers): def write_input_for_browser(this_dataset, gwas_results, annotations): - file_base = this_dataset.group.name + "_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + file_base = this_dataset.group.name + "_" + \ + ''.join(random.choice(string.ascii_uppercase + string.digits) + for _ in range(6)) gwas_filename = file_base + "_GWAS" annot_filename = file_base + "_ANNOT" gwas_path = "{}/gn2/".format(TEMPDIR) + gwas_filename @@ -659,7 +700,8 @@ def write_input_for_browser(this_dataset, gwas_results, annotations): def geno_db_exists(this_dataset): geno_db_name = this_dataset.group.name + "Geno" try: - geno_db = data_set.create_dataset(dataset_name=geno_db_name, get_samplelist=False) + geno_db = data_set.create_dataset( + dataset_name=geno_db_name, get_samplelist=False) return "True" except: return "False" @@ -689,9 +731,11 @@ def get_chr_lengths(mapping_scale, mapping_method, dataset, qtl_results): highest_pos = float(result['cM']) * 1000000 else: highest_pos = float(result['Mb']) * 1000000 - chr_lengths.append({"chr": str(this_chr), "size": str(highest_pos)}) + chr_lengths.append( + {"chr": str(this_chr), "size": str(highest_pos)}) else: - chr_lengths.append({"chr": str(this_chr), "size": str(highest_pos)}) + chr_lengths.append( + {"chr": str(this_chr), "size": str(highest_pos)}) this_chr = chr_as_num else: if mapping_method == "reaper": @@ -722,7 +766,8 @@ def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples): combined_string = "" for var in categorical_vars: if var in list(sample_list.sample_attribute_values[sample].keys()): - combined_string += str(sample_list.sample_attribute_values[sample][var]) + combined_string += str( + sample_list.sample_attribute_values[sample][var]) else: combined_string += "NA" else: @@ -730,7 +775,8 @@ def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples): perm_strata_strings.append(combined_string) - d = dict([(y, x + 1) for x, y in enumerate(sorted(set(perm_strata_strings)))]) + d = dict([(y, x + 1) + for x, y in enumerate(sorted(set(perm_strata_strings)))]) list_to_numbers = [d[x] for x in perm_strata_strings] perm_strata = list_to_numbers diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py index d7c9ef95..7b9ff8fe 100644 --- a/wqflask/wqflask/model.py +++ b/wqflask/wqflask/model.py @@ -17,7 +17,8 @@ from wqflask.database import Base, init_db class User(Base): __tablename__ = "user" - id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4())) + id = Column(Unicode(36), primary_key=True, + default=lambda: str(uuid.uuid4())) email_address = Column(Unicode(50), unique=True, nullable=False) # Todo: Turn on strict mode for Mysql @@ -28,11 +29,13 @@ class User(Base): active = Column(Boolean(), nullable=False, default=True) - registration_info = Column(Text) # json detailing when they were registered, etc. + # json detailing when they were registered, etc. + registration_info = Column(Text) confirmed = Column(Text) # json detailing when they confirmed, etc. - superuser = Column(Text) # json detailing when they became a superuser, otherwise empty + # json detailing when they became a superuser, otherwise empty + superuser = Column(Text) # if not superuser logins = relationship("Login", @@ -66,7 +69,8 @@ class User(Base): def get_collection_by_name(self, collection_name): try: - collect = self.user_collections.filter_by(name=collection_name).first() + collect = self.user_collections.filter_by( + name=collection_name).first() except sqlalchemy.orm.exc.NoResultFound: collect = None return collect @@ -118,12 +122,15 @@ class User(Base): class Login(Base): __tablename__ = "login" - id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4())) + id = Column(Unicode(36), primary_key=True, + default=lambda: str(uuid.uuid4())) user = Column(Unicode(36), ForeignKey('user.id')) timestamp = Column(DateTime(), default=lambda: datetime.datetime.utcnow()) ip_address = Column(Unicode(39)) - successful = Column(Boolean(), nullable=False) # False if wrong password was entered - session_id = Column(Text) # Set only if successfully logged in, otherwise should be blank + # False if wrong password was entered + successful = Column(Boolean(), nullable=False) + # Set only if successfully logged in, otherwise should be blank + session_id = Column(Text) # Set to user who assumes identity if this was a login for debugging purposes by a superuser assumed_by = Column(Unicode(36), ForeignKey('user.id')) @@ -137,13 +144,16 @@ class Login(Base): class UserCollection(Base): __tablename__ = "user_collection" - id = Column(Unicode(36), primary_key=True, default=lambda: str(uuid.uuid4())) + id = Column(Unicode(36), primary_key=True, + default=lambda: str(uuid.uuid4())) user = Column(Unicode(36), ForeignKey('user.id')) # I'd prefer this to not have a length, but for the index below it needs one name = Column(Unicode(50)) - created_timestamp = Column(DateTime(), default=lambda: datetime.datetime.utcnow()) - changed_timestamp = Column(DateTime(), default=lambda: datetime.datetime.utcnow()) + created_timestamp = Column( + DateTime(), default=lambda: datetime.datetime.utcnow()) + changed_timestamp = Column( + DateTime(), default=lambda: datetime.datetime.utcnow()) members = Column(Text) # We're going to store them as a json list # This index ensures a user doesn't have more than one collection with the same name diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index 4591710c..36d4cd61 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -47,7 +47,8 @@ def search_for_user(): params = request.form user_list = [] user_list += get_users_like_unique_column("full_name", params['user_name']) - user_list += get_users_like_unique_column("email_address", params['user_email']) + user_list += get_users_like_unique_column( + "email_address", params['user_email']) return json.dumps(user_list) @@ -61,7 +62,8 @@ def search_for_groups(): user_list = [] user_list += get_users_like_unique_column("full_name", params['user_name']) - user_list += get_users_like_unique_column("email_address", params['user_email']) + user_list += get_users_like_unique_column( + "email_address", params['user_email']) for user in user_list: group_list += get_groups_like_unique_column("admins", user['user_id']) group_list += get_groups_like_unique_column("members", user['user_id']) @@ -124,7 +126,8 @@ def add_group_to_resource(): 'admin': request.form['admin_privilege'] } add_access_mask(resource_id, group_id, access_mask) - flash("Privileges have been added for group {}.".format(group_name), "alert-info") + flash("Privileges have been added for group {}.".format( + group_name), "alert-info") return redirect(url_for("manage_resource", resource_id=resource_id)) else: return render_template("admin/search_for_groups.html", resource_id=resource_id) diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index ed5f9bad..273a97a4 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -52,7 +52,8 @@ class SearchResultPage: search = self.search_terms self.original_search_string = self.search_terms # check for dodgy search terms - rx = re.compile(r'.*\W(href|http|sql|select|update)\W.*', re.IGNORECASE) + rx = re.compile( + r'.*\W(href|http|sql|select|update)\W.*', re.IGNORECASE) if rx.match(search): logger.info("Regex failed search") self.search_term_exists = False @@ -96,7 +97,8 @@ class SearchResultPage: trait_list = [] json_trait_list = [] - species = webqtlDatabaseFunction.retrieve_species(self.dataset.group.name) + species = webqtlDatabaseFunction.retrieve_species( + self.dataset.group.name) # result_set represents the results for each search term; a search of # "shh grin2b" would have two sets of results, one for each term logger.debug("self.results is:", pf(self.results)) @@ -109,7 +111,8 @@ class SearchResultPage: trait_dict = {} trait_id = result[0] - this_trait = create_trait(dataset=self.dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) + this_trait = create_trait( + dataset=self.dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) if this_trait: trait_dict['index'] = index + 1 trait_dict['name'] = this_trait.name @@ -118,7 +121,8 @@ class SearchResultPage: else: trait_dict['display_name'] = this_trait.name trait_dict['dataset'] = this_trait.dataset.name - trait_dict['hmac'] = hmac.data_hmac('{}:{}'.format(this_trait.name, this_trait.dataset.name)) + trait_dict['hmac'] = hmac.data_hmac( + '{}:{}'.format(this_trait.name, this_trait.dataset.name)) if this_trait.dataset.type == "ProbeSet": trait_dict['symbol'] = this_trait.symbol if this_trait.symbol else "N/A" trait_dict['description'] = "N/A" @@ -168,9 +172,11 @@ class SearchResultPage: self.trait_list = trait_list if self.dataset.type == "ProbeSet": - self.header_data_names = ['index', 'display_name', 'symbol', 'description', 'location', 'mean', 'lrs_score', 'lrs_location', 'additive'] + self.header_data_names = ['index', 'display_name', 'symbol', 'description', + 'location', 'mean', 'lrs_score', 'lrs_location', 'additive'] elif self.dataset.type == "Publish": - self.header_data_names = ['index', 'display_name', 'description', 'mean', 'authors', 'pubmed_text', 'lrs_score', 'lrs_location', 'additive'] + self.header_data_names = ['index', 'display_name', 'description', 'mean', + 'authors', 'pubmed_text', 'lrs_score', 'lrs_location', 'additive'] elif self.dataset.type == "Geno": self.header_data_names = ['index', 'display_name', 'location'] @@ -184,7 +190,8 @@ class SearchResultPage: combined_from_clause = "" combined_where_clause = "" - previous_from_clauses = [] # The same table can't be referenced twice in the from clause + # The same table can't be referenced twice in the from clause + previous_from_clauses = [] logger.debug("len(search_terms)>1") symbol_list = [] @@ -198,7 +205,8 @@ class SearchResultPage: for i, a_search in enumerate(alias_terms): the_search = self.get_search_ob(a_search) if the_search != None: - get_from_clause = getattr(the_search, "get_from_clause", None) + get_from_clause = getattr( + the_search, "get_from_clause", None) if callable(get_from_clause): from_clause = the_search.get_from_clause() if from_clause in previous_from_clauses: @@ -222,7 +230,8 @@ class SearchResultPage: else: the_search = self.get_search_ob(a_search) if the_search != None: - get_from_clause = getattr(the_search, "get_from_clause", None) + get_from_clause = getattr( + the_search, "get_from_clause", None) if callable(get_from_clause): from_clause = the_search.get_from_clause() if from_clause in previous_from_clauses: @@ -241,7 +250,8 @@ class SearchResultPage: self.search_term_exists = False if self.search_term_exists: combined_where_clause = "(" + combined_where_clause + ")" - final_query = the_search.compile_final_query(combined_from_clause, combined_where_clause) + final_query = the_search.compile_final_query( + combined_from_clause, combined_where_clause) results = the_search.execute(final_query) self.results.extend(results) @@ -312,7 +322,8 @@ def get_aliases(symbol_list, species): symbols_string = ",".join(updated_symbols) filtered_aliases = [] - response = requests.get(GN2_BASE_URL + "/gn3/gene/aliases2/" + symbols_string) + response = requests.get( + GN2_BASE_URL + "/gn3/gene/aliases2/" + symbols_string) if response: alias_lists = json.loads(response.content) seen = set() diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 496dee57..6419335e 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -74,14 +74,20 @@ class SampleList: if 'rrid' in sample.extra_attributes: if self.dataset.group.species == "mouse": if len(sample.extra_attributes['rrid'].split(":")) > 1: - the_rrid = sample.extra_attributes['rrid'].split(":")[1] - sample.extra_attributes['rrid'] = [sample.extra_attributes['rrid']] - sample.extra_attributes['rrid'].append(webqtlConfig.RRID_MOUSE_URL % the_rrid) + the_rrid = sample.extra_attributes['rrid'].split(":")[ + 1] + sample.extra_attributes['rrid'] = [ + sample.extra_attributes['rrid']] + sample.extra_attributes['rrid'].append( + webqtlConfig.RRID_MOUSE_URL % the_rrid) elif self.dataset.group.species == "rat": if len(str(sample.extra_attributes['rrid'])): - the_rrid = sample.extra_attributes['rrid'].split("_")[1] - sample.extra_attributes['rrid'] = [sample.extra_attributes['rrid']] - sample.extra_attributes['rrid'].append(webqtlConfig.RRID_RAT_URL % the_rrid) + the_rrid = sample.extra_attributes['rrid'].split("_")[ + 1] + sample.extra_attributes['rrid'] = [ + sample.extra_attributes['rrid']] + sample.extra_attributes['rrid'].append( + webqtlConfig.RRID_RAT_URL % the_rrid) self.sample_list.append(sample) @@ -130,7 +136,8 @@ class SampleList: self.attributes[key].name = name self.attributes[key].distinct_values = [ item.Value for item in values] - self.attributes[key].distinct_values = natural_sort(self.attributes[key].distinct_values) + self.attributes[key].distinct_values = natural_sort( + self.attributes[key].distinct_values) all_numbers = True for value in self.attributes[key].distinct_values: try: @@ -170,7 +177,8 @@ class SampleList: except ValueError: pass - attribute_values[self.attributes[item.Id].name.lower()] = attribute_value + attribute_values[self.attributes[item.Id].name.lower( + )] = attribute_value self.sample_attribute_values[sample_name] = attribute_values def get_first_attr_col(self): diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py index f0fcd27d..81e7903b 100644 --- a/wqflask/wqflask/show_trait/export_trait_data.py +++ b/wqflask/wqflask/show_trait/export_trait_data.py @@ -40,16 +40,23 @@ def get_export_metadata(trait_id, dataset_name): metadata = [] if dataset.type == "Publish": metadata.append(["Phenotype ID: " + trait_id]) - metadata.append(["Phenotype URL: " + "http://genenetwork.org/show_trait?trait_id=" + trait_id + "&dataset=" + dataset_name]) + metadata.append(["Phenotype URL: " + "http://genenetwork.org/show_trait?trait_id=" + \ + trait_id + "&dataset=" + dataset_name]) metadata.append(["Group: " + dataset.group.name]) - metadata.append(["Phenotype: " + this_trait.description_display.replace(",", "\",\"")]) - metadata.append(["Authors: " + (this_trait.authors if this_trait.authors else "N/A")]) - metadata.append(["Title: " + (this_trait.title if this_trait.title else "N/A")]) - metadata.append(["Journal: " + (this_trait.journal if this_trait.journal else "N/A")]) - metadata.append(["Dataset Link: http://gn1.genenetwork.org/webqtl/main.py?FormID=sharinginfo&InfoPageName=" + dataset.name]) + metadata.append( + ["Phenotype: " + this_trait.description_display.replace(",", "\",\"")]) + metadata.append( + ["Authors: " + (this_trait.authors if this_trait.authors else "N/A")]) + metadata.append( + ["Title: " + (this_trait.title if this_trait.title else "N/A")]) + metadata.append( + ["Journal: " + (this_trait.journal if this_trait.journal else "N/A")]) + metadata.append( + ["Dataset Link: http://gn1.genenetwork.org/webqtl/main.py?FormID=sharinginfo&InfoPageName=" + dataset.name]) else: metadata.append(["Record ID: " + trait_id]) - metadata.append(["Trait URL: " + "http://genenetwork.org/show_trait?trait_id=" + trait_id + "&dataset=" + dataset_name]) + metadata.append(["Trait URL: " + "http://genenetwork.org/show_trait?trait_id=" + \ + trait_id + "&dataset=" + dataset_name]) if this_trait.symbol: metadata.append(["Symbol: " + this_trait.symbol]) metadata.append(["Dataset: " + dataset.name]) diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index e5c67165..7fcbe984 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -27,9 +27,11 @@ class SnpBrowser: self.table_rows = [] if self.limit_strains == "true": - self.header_fields, self.empty_field_count, self.header_data_names = get_header_list(variant_type=self.variant_type, strains=self.chosen_strains, empty_columns = self.empty_columns) + self.header_fields, self.empty_field_count, self.header_data_names = get_header_list( + variant_type=self.variant_type, strains=self.chosen_strains, empty_columns = self.empty_columns) else: - self.header_fields, self.empty_field_count, self.header_data_names = get_header_list(variant_type=self.variant_type, strains=self.strain_lists, species = self.species_name, empty_columns = self.empty_columns) + self.header_fields, self.empty_field_count, self.header_data_names = get_header_list( + variant_type=self.variant_type, strains=self.strain_lists, species = self.species_name, empty_columns = self.empty_columns) def initialize_parameters(self, start_vars): if 'first_run' in start_vars: @@ -53,10 +55,12 @@ class SnpBrowser: self.rat_chr_list = [] mouse_species_ob = species.TheSpecies(species_name="Mouse") for key in mouse_species_ob.chromosomes.chromosomes: - self.mouse_chr_list.append(mouse_species_ob.chromosomes.chromosomes[key].name) + self.mouse_chr_list.append( + mouse_species_ob.chromosomes.chromosomes[key].name) rat_species_ob = species.TheSpecies(species_name="Rat") for key in rat_species_ob.chromosomes.chromosomes: - self.rat_chr_list.append(rat_species_ob.chromosomes.chromosomes[key].name) + self.rat_chr_list.append( + rat_species_ob.chromosomes.chromosomes[key].name) if self.species_id == 1: self.this_chr_list = self.mouse_chr_list @@ -109,9 +113,11 @@ class SnpBrowser: "CAST/EiJ"] self.chosen_strains_rat = ["BN", "F344", "WLI", "WMI"] if 'chosen_strains_mouse' in start_vars: - self.chosen_strains_mouse = start_vars['chosen_strains_mouse'].split(",") + self.chosen_strains_mouse = start_vars['chosen_strains_mouse'].split( + ",") if 'chosen_strains_rat' in start_vars: - self.chosen_strains_rat = start_vars['chosen_strains_rat'].split(",") + self.chosen_strains_rat = start_vars['chosen_strains_rat'].split( + ",") if self.species_id == 1: self.chosen_strains = self.chosen_strains_mouse @@ -150,9 +156,11 @@ class SnpBrowser: if self.gene_name != "": if self.species_id != 0: - query = "SELECT geneSymbol, chromosome, txStart, txEnd FROM GeneList WHERE SpeciesId = %s AND geneSymbol = '%s'" % (self.species_id, self.gene_name) + query = "SELECT geneSymbol, chromosome, txStart, txEnd FROM GeneList WHERE SpeciesId = %s AND geneSymbol = '%s'" % ( + self.species_id, self.gene_name) else: - query = "SELECT geneSymbol, chromosome, txStart, txEnd FROM GeneList WHERE geneSymbol = '%s'" % (self.gene_name) + query = "SELECT geneSymbol, chromosome, txStart, txEnd FROM GeneList WHERE geneSymbol = '%s'" % ( + self.gene_name) result = g.db.execute(query).fetchone() if result: self.gene_name, self.chr, self.start_mb, self.end_mb = result @@ -163,9 +171,11 @@ class SnpBrowser: query = "SELECT Id, Chromosome, Position, Position+0.000001 FROM SnpAll WHERE Rs = '%s'" % self.gene_name else: if self.species_id != 0: - query = "SELECT Id, Chromosome, Position, Position+0.000001 FROM SnpAll where SpeciesId = %s AND SnpName = '%s'" % (self.species_id, self.gene_name) + query = "SELECT Id, Chromosome, Position, Position+0.000001 FROM SnpAll where SpeciesId = %s AND SnpName = '%s'" % ( + self.species_id, self.gene_name) else: - query = "SELECT Id, Chromosome, Position, Position+0.000001 FROM SnpAll where SnpName = '%s'" % (self.gene_name) + query = "SELECT Id, Chromosome, Position, Position+0.000001 FROM SnpAll where SnpName = '%s'" % ( + self.gene_name) result_snp = g.db.execute(query).fetchall() if result_snp: self.snp_list = [item[0] for item in result_snp] @@ -177,9 +187,11 @@ class SnpBrowser: elif self.variant_type == "InDel": if self.gene_name[0] == "I": if self.species_id != 0: - query = "SELECT Id, Chromosome, Mb_start, Mb_end FROM IndelAll WHERE SpeciesId = %s AND Name = '%s'" % (self.species_id, self.gene_name) + query = "SELECT Id, Chromosome, Mb_start, Mb_end FROM IndelAll WHERE SpeciesId = %s AND Name = '%s'" % ( + self.species_id, self.gene_name) else: - query = "SELECT Id, Chromosome, Mb_start, Mb_end FROM IndelAll WHERE Name = '%s'" % (self.gene_name) + query = "SELECT Id, Chromosome, Mb_start, Mb_end FROM IndelAll WHERE Name = '%s'" % ( + self.gene_name) result_snp = g.db.execute(query).fetchall() if result_snp: self.snp_list = [item[0] for item in result_snp] @@ -255,7 +267,8 @@ class SnpBrowser: if self.limit_strains == "true" and len(self.chosen_strains) > 0: for item in self.chosen_strains: - index = self.strain_lists[self.species_name.lower()].index(item) + index = self.strain_lists[self.species_name.lower()].index( + item) strain_index_list.append(index) for seq, result in enumerate(results): @@ -263,7 +276,8 @@ class SnpBrowser: if self.variant_type == "SNP": display_strains = [] - snp_id, species_id, snp_name, rs, chr, mb, mb_2016, alleles, snp_source, conservation_score = result[:10] + snp_id, species_id, snp_name, rs, chr, mb, mb_2016, alleles, snp_source, conservation_score = result[ + :10] effect_list = result[10:28] if self.species_id == 1: self.allele_list = result[30:] @@ -279,7 +293,8 @@ class SnpBrowser: self.allele_list = display_strains effect_info_dict = get_effect_info(effect_list) - coding_domain_list = ['Start Gained', 'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] + coding_domain_list = ['Start Gained', 'Start Lost', + 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] intron_domain_list = ['Splice Site', 'Nonsplice Site'] for key in effect_info_dict: @@ -296,19 +311,22 @@ class SnpBrowser: if 'Intergenic' in domain: if self.gene_name != "": - gene_id = get_gene_id(self.species_id, self.gene_name) + gene_id = get_gene_id( + self.species_id, self.gene_name) gene = [gene_id, self.gene_name] else: gene = check_if_in_gene(species_id, chr, mb) transcript = exon = function = function_details = '' if self.redundant == "false" or last_mb != mb: # filter redundant if self.include_record(domain, function, snp_source, conservation_score): - info_list = [snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain, function, function_details, snp_source, conservation_score, snp_id] + info_list = [snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain, + function, function_details, snp_source, conservation_score, snp_id] info_list.extend(self.allele_list) filtered_results.append(info_list) last_mb = mb else: - gene_list, transcript_list, exon_list, function_list, function_details_list = effect_info_dict[key] + gene_list, transcript_list, exon_list, function_list, function_details_list = effect_info_dict[ + key] for index, item in enumerate(gene_list): gene = item transcript = transcript_list[index] @@ -325,13 +343,15 @@ class SnpBrowser: function = "" if function_details_list: - function_details = "Biotype: " + function_details_list[index] + function_details = "Biotype: " + \ + function_details_list[index] else: function_details = "" if self.redundant == "false" or last_mb != mb: if self.include_record(domain, function, snp_source, conservation_score): - info_list = [snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain, function, function_details, snp_source, conservation_score, snp_id] + info_list = [snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain, + function, function_details, snp_source, conservation_score, snp_id] info_list.extend(self.allele_list) filtered_results.append(info_list) last_mb = mb @@ -345,7 +365,8 @@ class SnpBrowser: gene = "No Gene" domain = conservation_score = snp_id = snp_name = rs = flank_3 = flank_5 = ncbi = function = "" if self.include_record(domain, function, source_name, conservation_score): - filtered_results.append([indel_name, indel_chr, indel_mb_start, indel_mb_end, indel_strand, indel_type, indel_size, indel_sequence, source_name]) + filtered_results.append([indel_name, indel_chr, indel_mb_start, indel_mb_end, + indel_strand, indel_type, indel_size, indel_sequence, source_name]) last_mb = indel_mb_start else: @@ -365,7 +386,8 @@ class SnpBrowser: if gene_name and (gene_name not in gene_name_list): gene_name_list.append(gene_name) if len(gene_name_list) > 0: - gene_id_name_dict = get_gene_id_name_dict(self.species_id, gene_name_list) + gene_id_name_dict = get_gene_id_name_dict( + self.species_id, gene_name_list) # ZS: list of booleans representing which columns are entirely empty, so they aren't displayed on the page; only including ones that are sometimes empty (since there's always a location, etc) self.empty_columns = { @@ -383,7 +405,8 @@ class SnpBrowser: for i, result in enumerate(self.filtered_results): this_row = {} if self.variant_type == "SNP": - snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain, function, function_details, snp_source, conservation_score, snp_id = result[:14] + snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain, function, function_details, snp_source, conservation_score, snp_id = result[ + :14] allele_value_list = result[14:] if rs: snp_url = webqtlConfig.DBSNP % (rs) @@ -394,9 +417,11 @@ class SnpBrowser: end_bp = int(mb * 1000000 + 100) position_info = "chr%s:%d-%d" % (chr, start_bp, end_bp) if self.species_id == 2: - snp_url = webqtlConfig.GENOMEBROWSER_URL % ("rn6", position_info) + snp_url = webqtlConfig.GENOMEBROWSER_URL % ( + "rn6", position_info) else: - snp_url = webqtlConfig.GENOMEBROWSER_URL % ("mm10", position_info) + snp_url = webqtlConfig.GENOMEBROWSER_URL % ( + "mm10", position_info) mb = float(mb) mb_formatted = "%2.6f" % mb @@ -429,7 +454,8 @@ class SnpBrowser: gene_link = "" if transcript: - transcript_link = webqtlConfig.ENSEMBLETRANSCRIPT_URL % (transcript) + transcript_link = webqtlConfig.ENSEMBLETRANSCRIPT_URL % ( + transcript) self.empty_columns['transcript'] = "true" else: transcript_link = "" @@ -460,7 +486,8 @@ class SnpBrowser: function_list = function_details.strip().split(",") function_list = [item.strip() for item in function_list] function_list[0] = function_list[0].title() - function_details = ", ".join(item for item in function_list) + function_details = ", ".join( + item for item in function_list) function_details = function_details.replace("_", " ") function_details = function_details.replace("/", " -> ") if function_details == "Biotype: Protein Coding": @@ -675,8 +702,10 @@ def get_header_list(variant_type, strains, species=None, empty_columns=None): header_fields = [] header_data_names = [] if variant_type == "SNP": - header_fields.append(['Index', 'SNP ID', 'Chr', 'Mb', 'Alleles', 'Source', 'ConScore', 'Gene', 'Transcript', 'Exon', 'Domain 1', 'Domain 2', 'Function', 'Details']) - header_data_names = ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles', 'snp_source', 'conservation_score', 'gene_name', 'transcript', 'exon', 'domain_1', 'domain_2', 'function', 'function_details'] + header_fields.append(['Index', 'SNP ID', 'Chr', 'Mb', 'Alleles', 'Source', 'ConScore', + 'Gene', 'Transcript', 'Exon', 'Domain 1', 'Domain 2', 'Function', 'Details']) + header_data_names = ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles', 'snp_source', 'conservation_score', + 'gene_name', 'transcript', 'exon', 'domain_1', 'domain_2', 'function', 'function_details'] header_fields.append(strain_list) header_data_names += strain_list @@ -712,8 +741,10 @@ def get_header_list(variant_type, strains, species=None, empty_columns=None): header_data_names.remove(col) elif variant_type == "InDel": - header_fields = ['Index', 'ID', 'Type', 'InDel Chr', 'Mb Start', 'Mb End', 'Strand', 'Size', 'Sequence', 'Source'] - header_data_names = ['index', 'indel_name', 'indel_type', 'indel_chr', 'indel_mb_s', 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name'] + header_fields = ['Index', 'ID', 'Type', 'InDel Chr', + 'Mb Start', 'Mb End', 'Strand', 'Size', 'Sequence', 'Source'] + header_data_names = ['index', 'indel_name', 'indel_type', 'indel_chr', 'indel_mb_s', + 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name'] return header_fields, empty_field_count, header_data_names @@ -726,10 +757,13 @@ def get_effect_details_by_category(effect_name=None, effect_value=None): function_detail_list = [] tmp_list = [] - gene_group_list = ['Upstream', 'Downstream', 'Splice Site', 'Nonsplice Site', '3\' UTR'] - biotype_group_list = ['Unknown Effect In Exon', 'Start Gained', 'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] + gene_group_list = ['Upstream', 'Downstream', + 'Splice Site', 'Nonsplice Site', '3\' UTR'] + biotype_group_list = ['Unknown Effect In Exon', 'Start Gained', + 'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] new_codon_group_list = ['Start Gained'] - codon_effect_group_list = ['Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] + codon_effect_group_list = [ + 'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] effect_detail_list = effect_value.strip().split('|') effect_detail_list = [item.strip() for item in effect_detail_list] @@ -773,8 +807,10 @@ def get_effect_info(effect_list): effect_detail_list = [] effect_info_dict = {} - prime3_utr, prime5_utr, upstream, downstream, intron, nonsplice_site, splice_site, intergenic = effect_list[:8] - exon, non_synonymous_coding, synonymous_coding, start_gained, start_lost, stop_gained, stop_lost, unknown_effect_in_exon = effect_list[8:16] + prime3_utr, prime5_utr, upstream, downstream, intron, nonsplice_site, splice_site, intergenic = effect_list[ + :8] + exon, non_synonymous_coding, synonymous_coding, start_gained, start_lost, stop_gained, stop_lost, unknown_effect_in_exon = effect_list[ + 8:16] if intergenic: domain = "Intergenic" @@ -783,59 +819,72 @@ def get_effect_info(effect_list): # if not exon, get gene list/transcript list info if upstream: domain = "Upstream" - effect_detail_list = get_effect_details_by_category(effect_name='Upstream', effect_value=upstream) + effect_detail_list = get_effect_details_by_category( + effect_name='Upstream', effect_value=upstream) effect_info_dict[domain] = effect_detail_list if downstream: domain = "Downstream" - effect_detail_list = get_effect_details_by_category(effect_name='Downstream', effect_value=downstream) + effect_detail_list = get_effect_details_by_category( + effect_name='Downstream', effect_value=downstream) effect_info_dict[domain] = effect_detail_list if intron: if splice_site: domain = "Splice Site" - effect_detail_list = get_effect_details_by_category(effect_name='Splice Site', effect_value=splice_site) + effect_detail_list = get_effect_details_by_category( + effect_name='Splice Site', effect_value=splice_site) effect_info_dict[domain] = effect_detail_list if nonsplice_site: domain = "Nonsplice Site" - effect_detail_list = get_effect_details_by_category(effect_name='Nonsplice Site', effect_value=nonsplice_site) + effect_detail_list = get_effect_details_by_category( + effect_name='Nonsplice Site', effect_value=nonsplice_site) effect_info_dict[domain] = effect_detail_list # get gene, transcript_list, and exon info if prime3_utr: domain = "3\' UTR" - effect_detail_list = get_effect_details_by_category(effect_name='3\' UTR', effect_value=prime3_utr) + effect_detail_list = get_effect_details_by_category( + effect_name='3\' UTR', effect_value=prime3_utr) effect_info_dict[domain] = effect_detail_list if prime5_utr: domain = "5\' UTR" - effect_detail_list = get_effect_details_by_category(effect_name='5\' UTR', effect_value=prime5_utr) + effect_detail_list = get_effect_details_by_category( + effect_name='5\' UTR', effect_value=prime5_utr) effect_info_dict[domain] = effect_detail_list if start_gained: domain = "Start Gained" - effect_detail_list = get_effect_details_by_category(effect_name='Start Gained', effect_value=start_gained) + effect_detail_list = get_effect_details_by_category( + effect_name='Start Gained', effect_value=start_gained) effect_info_dict[domain] = effect_detail_list if unknown_effect_in_exon: domain = "Unknown Effect In Exon" - effect_detail_list = get_effect_details_by_category(effect_name='Unknown Effect In Exon', effect_value=unknown_effect_in_exon) + effect_detail_list = get_effect_details_by_category( + effect_name='Unknown Effect In Exon', effect_value=unknown_effect_in_exon) effect_info_dict[domain] = effect_detail_list if start_lost: domain = "Start Lost" - effect_detail_list = get_effect_details_by_category(effect_name='Start Lost', effect_value=start_lost) + effect_detail_list = get_effect_details_by_category( + effect_name='Start Lost', effect_value=start_lost) effect_info_dict[domain] = effect_detail_list if stop_gained: domain = "Stop Gained" - effect_detail_list = get_effect_details_by_category(effect_name='Stop Gained', effect_value=stop_gained) + effect_detail_list = get_effect_details_by_category( + effect_name='Stop Gained', effect_value=stop_gained) effect_info_dict[domain] = effect_detail_list if stop_lost: domain = "Stop Lost" - effect_detail_list = get_effect_details_by_category(effect_name='Stop Lost', effect_value=stop_lost) + effect_detail_list = get_effect_details_by_category( + effect_name='Stop Lost', effect_value=stop_lost) effect_info_dict[domain] = effect_detail_list if non_synonymous_coding: domain = "Nonsynonymous" - effect_detail_list = get_effect_details_by_category(effect_name='Nonsynonymous', effect_value=non_synonymous_coding) + effect_detail_list = get_effect_details_by_category( + effect_name='Nonsynonymous', effect_value=non_synonymous_coding) effect_info_dict[domain] = effect_detail_list if synonymous_coding: domain = "Synonymous" - effect_detail_list = get_effect_details_by_category(effect_name='Synonymous', effect_value=synonymous_coding) + effect_detail_list = get_effect_details_by_category( + effect_name='Synonymous', effect_value=synonymous_coding) effect_info_dict[domain] = effect_detail_list return effect_info_dict @@ -863,7 +912,8 @@ def get_gene_id_name_dict(species_id, gene_name_list): gene_id_name_dict = {} if len(gene_name_list) == 0: return "" - gene_name_str_list = ["'" + gene_name + "'" for gene_name in gene_name_list] + gene_name_str_list = ["'" + gene_name + \ + "'" for gene_name in gene_name_list] gene_name_str = ",".join(gene_name_str_list) query = """ diff --git a/wqflask/wqflask/update_search_results.py b/wqflask/wqflask/update_search_results.py index 07073d6a..2e467dc8 100644 --- a/wqflask/wqflask/update_search_results.py +++ b/wqflask/wqflask/update_search_results.py @@ -52,10 +52,12 @@ class GSearch: self.trait_list = [] with Bench("Creating trait objects"): for line in re: - dataset = create_dataset(line[3], "ProbeSet", get_samplelist=False) + dataset = create_dataset( + line[3], "ProbeSet", get_samplelist=False) trait_id = line[4] # with Bench("Building trait object"): - this_trait = GeneralTrait(dataset=dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) + this_trait = GeneralTrait( + dataset=dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) self.trait_list.append(this_trait) elif self.type == "phenotype": @@ -97,7 +99,8 @@ class GSearch: for line in re: dataset = create_dataset(line[2], "Publish") trait_id = line[3] - this_trait = GeneralTrait(dataset=dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) + this_trait = GeneralTrait( + dataset=dataset, name=trait_id, get_qtl_info=True, get_sample_info=False) self.trait_list.append(this_trait) self.results = self.convert_to_json() diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index 2a2f8484..708d43d2 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -70,7 +70,8 @@ def set_password(password): assert len(password) >= 6, "Password shouldn't be shorter than 6 characters" - encoded_password = encode_password(pass_gen_fields, pass_gen_fields['unencrypted_password']) + encoded_password = encode_password( + pass_gen_fields, pass_gen_fields['unencrypted_password']) return encoded_password @@ -161,12 +162,16 @@ def verify_email(): # As long as they have access to the email account # We might as well log them in session_id_signed = get_signed_session_id(user_details) - flash("Thank you for logging in {}.".format(user_details['full_name']), "alert-success") - response = make_response(redirect(url_for('index_page', import_collections=import_col, anon_id=anon_id))) - response.set_cookie(UserSession.user_cookie_name, session_id_signed, max_age=None) + flash("Thank you for logging in {}.".format( + user_details['full_name']), "alert-success") + response = make_response(redirect( + url_for('index_page', import_collections=import_col, anon_id=anon_id))) + response.set_cookie(UserSession.user_cookie_name, + session_id_signed, max_age=None) return response else: - flash("Invalid code: Password reset code does not exist or might have expired!", "error") + flash( + "Invalid code: Password reset code does not exist or might have expired!", "error") @app.route("/n/login", methods=('GET', 'POST')) @@ -195,23 +200,28 @@ def login(): display_id = user_details['orcid'] else: display_id = "" - flash("Thank you for logging in {}.".format(display_id), "alert-success") + flash("Thank you for logging in {}.".format( + display_id), "alert-success") response = make_response(redirect(url_for('index_page'))) - response.set_cookie(UserSession.user_cookie_name, session_id_signed, max_age=None) + response.set_cookie( + UserSession.user_cookie_name, session_id_signed, max_age=None) else: flash("Something went unexpectedly wrong.", "alert-danger") response = make_response(redirect(url_for('index_page'))) return response else: - user_details = get_user_by_unique_column("email_address", params['email_address']) + user_details = get_user_by_unique_column( + "email_address", params['email_address']) password_match = False if user_details: submitted_password = params['password'] pwfields = user_details['password'] if isinstance(pwfields, str): pwfields = json.loads(pwfields) - encrypted_pass_fields = encode_password(pwfields, submitted_password) - password_match = pbkdf2.safe_str_cmp(encrypted_pass_fields['password'], pwfields['password']) + encrypted_pass_fields = encode_password( + pwfields, submitted_password) + password_match = pbkdf2.safe_str_cmp( + encrypted_pass_fields['password'], pwfields['password']) else: # Invalid e-mail flash("Invalid e-mail address. Please try again.", "alert-danger") @@ -227,12 +237,16 @@ def login(): anon_id = params['anon_id'] session_id_signed = get_signed_session_id(user_details) - flash("Thank you for logging in {}.".format(user_details['full_name']), "alert-success") - response = make_response(redirect(url_for('index_page', import_collections=import_col, anon_id=anon_id))) - response.set_cookie(UserSession.user_cookie_name, session_id_signed, max_age=None) + flash("Thank you for logging in {}.".format( + user_details['full_name']), "alert-success") + response = make_response(redirect( + url_for('index_page', import_collections=import_col, anon_id=anon_id))) + response.set_cookie( + UserSession.user_cookie_name, session_id_signed, max_age=None) return response else: - email_ob = send_verification_email(user_details, template_name="email/user_verification.txt") + email_ob = send_verification_email( + user_details, template_name="email/user_verification.txt") return render_template("newsecurity/verification_still_needed.html", subject=email_ob['subject']) else: # Incorrect password # ZS: It previously seemed to store that there was an incorrect log-in attempt here, but it did so in the MySQL DB so this might need to be reproduced with Redis @@ -252,8 +266,10 @@ def github_oauth2(): "code": code } - result = requests.post("https://github.com/login/oauth/access_token", json=data) - result_dict = {arr[0]: arr[1] for arr in [tok.split("=") for tok in result.text.split("&")]} + result = requests.post( + "https://github.com/login/oauth/access_token", json=data) + result_dict = {arr[0]: arr[1] + for arr in [tok.split("=") for tok in result.text.split("&")]} github_user = get_github_user_details(result_dict["access_token"]) @@ -277,7 +293,8 @@ def github_oauth2(): def get_github_user_details(access_token): from utility.tools import GITHUB_API_URL - result = requests.get(GITHUB_API_URL, headers={'Authorization': 'token ' + access_token}).content + result = requests.get(GITHUB_API_URL, headers={ + 'Authorization': 'token ' + access_token}).content return json.loads(result) @@ -323,7 +340,8 @@ def orcid_oauth2(): def get_github_user_details(access_token): from utility.tools import GITHUB_API_URL - result = requests.get(GITHUB_API_URL, headers={'Authorization': 'token ' + access_token}).content + result = requests.get(GITHUB_API_URL, headers={ + 'Authorization': 'token ' + access_token}).content return json.loads(result) @@ -389,13 +407,16 @@ def forgot_password_submit(): next_page = None if email_address != "": logger.debug("Wants to send password E-mail to ", email_address) - user_details = get_user_by_unique_column("email_address", email_address) + user_details = get_user_by_unique_column( + "email_address", email_address) if user_details: - email_subject = send_forgot_password_email(user_details["email_address"]) + email_subject = send_forgot_password_email( + user_details["email_address"]) return render_template("new_security/forgot_password_step2.html", subject=email_subject) else: - flash("The e-mail entered is not associated with an account.", "alert-danger") + flash("The e-mail entered is not associated with an account.", + "alert-danger") return redirect(url_for("forgot_password")) else: @@ -417,7 +438,8 @@ def password_reset(): return render_template( "new_security/password_reset.html", user_encode=user_details["email_address"]) else: - flash("Invalid code: Password reset code does not exist or might have expired!", "error") + flash( + "Invalid code: Password reset code does not exist or might have expired!", "error") return redirect(url_for("login")) else: return redirect(url_for("login")) @@ -446,21 +468,27 @@ def register_user(params): errors = [] user_details = {} - user_details['email_address'] = params.get('email_address', '').encode("utf-8").strip() + user_details['email_address'] = params.get( + 'email_address', '').encode("utf-8").strip() if not (5 <= len(user_details['email_address']) <= 50): - errors.append('Email Address needs to be between 5 and 50 characters.') + errors.append( + 'Email Address needs to be between 5 and 50 characters.') else: - email_exists = get_user_by_unique_column("email_address", user_details['email_address']) + email_exists = get_user_by_unique_column( + "email_address", user_details['email_address']) if email_exists: errors.append('User already exists with that email') - user_details['full_name'] = params.get('full_name', '').encode("utf-8").strip() + user_details['full_name'] = params.get( + 'full_name', '').encode("utf-8").strip() if not (5 <= len(user_details['full_name']) <= 50): errors.append('Full Name needs to be between 5 and 50 characters.') - user_details['organization'] = params.get('organization', '').encode("utf-8").strip() + user_details['organization'] = params.get( + 'organization', '').encode("utf-8").strip() if user_details['organization'] and not (5 <= len(user_details['organization']) <= 50): - errors.append('Organization needs to be empty or between 5 and 50 characters.') + errors.append( + 'Organization needs to be empty or between 5 and 50 characters.') password = str(params.get('password', '')) if not (6 <= len(password)): @@ -493,7 +521,8 @@ def register(): errors = register_user(params) if len(errors) == 0: - flash("Registration successful. You may login with your new account", "alert-info") + flash( + "Registration successful. You may login with your new account", "alert-info") return redirect(url_for("login")) return render_template("new_security/register_user.html", values=params, errors=errors) diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index a9bd65e6..5610833b 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -93,8 +93,10 @@ class AnonUser: this_collection = {} this_collection['id'] = collection['id'] this_collection['name'] = collection['name'] - this_collection['created_timestamp'] = collection['created_timestamp'].strftime('%b %d %Y %I:%M%p') - this_collection['changed_timestamp'] = collection['changed_timestamp'].strftime('%b %d %Y %I:%M%p') + this_collection['created_timestamp'] = collection['created_timestamp'].strftime( + '%b %d %Y %I:%M%p') + this_collection['changed_timestamp'] = collection['changed_timestamp'].strftime( + '%b %d %Y %I:%M%p') this_collection['num_members'] = collection['num_members'] this_collection['members'] = collection['members'] updated_collections.append(this_collection) @@ -108,21 +110,26 @@ class AnonUser: else: collections = json.loads(json_collections) for collection in collections: - collection['created_timestamp'] = datetime.datetime.strptime(collection['created_timestamp'], '%b %d %Y %I:%M%p') - collection['changed_timestamp'] = datetime.datetime.strptime(collection['changed_timestamp'], '%b %d %Y %I:%M%p') + collection['created_timestamp'] = datetime.datetime.strptime( + collection['created_timestamp'], '%b %d %Y %I:%M%p') + collection['changed_timestamp'] = datetime.datetime.strptime( + collection['changed_timestamp'], '%b %d %Y %I:%M%p') - collections = sorted(collections, key=lambda i: i['changed_timestamp'], reverse=True) + collections = sorted( + collections, key=lambda i: i['changed_timestamp'], reverse=True) return collections def import_traits_to_user(self): result = Redis.get(self.key) collections_list = json.loads(result if result else "[]") for collection in collections_list: - collection_exists = g.user_session.get_collection_by_name(collection['name']) + collection_exists = g.user_session.get_collection_by_name( + collection['name']) if collection_exists: continue else: - g.user_session.add_collection(collection['name'], collection['members']) + g.user_session.add_collection( + collection['name'], collection['members']) def display_num_collections(self): """ @@ -148,7 +155,8 @@ def verify_cookie(cookie): the_uuid, separator, the_signature = cookie.partition(':') assert len(the_uuid) == 36, "Is session_id a uuid?" assert separator == ":", "Expected a : here" - assert the_signature == actual_hmac_creation(the_uuid), "Uh-oh, someone tampering with the cookie?" + assert the_signature == actual_hmac_creation( + the_uuid), "Uh-oh, someone tampering with the cookie?" return the_uuid @@ -282,7 +290,8 @@ class UserSession: updated_collection['members'] = updated_traits updated_collection['num_members'] = len(updated_traits) - updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime( + '%b %d %Y %I:%M%p') updated_collections = [] for collection in self.user_collections: @@ -308,7 +317,8 @@ class UserSession: updated_collection['members'] = updated_traits updated_collection['num_members'] = len(updated_traits) - updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime( + '%b %d %Y %I:%M%p') updated_collections = [] for collection in self.user_collections: @@ -355,7 +365,8 @@ def get_cookie(): def set_cookie(response): if not request.cookies.get(g.cookie_session.cookie_name): - response.set_cookie(g.cookie_session.cookie_name, g.cookie_session.cookie) + response.set_cookie(g.cookie_session.cookie_name, + g.cookie_session.cookie) return response @@ -390,22 +401,28 @@ class RegisterUser: self.errors = [] self.user = Bunch() - self.user.email_address = kw.get('email_address', '').encode("utf-8").strip() + self.user.email_address = kw.get( + 'email_address', '').encode("utf-8").strip() if not (5 <= len(self.user.email_address) <= 50): - self.errors.append('Email Address needs to be between 5 and 50 characters.') + self.errors.append( + 'Email Address needs to be between 5 and 50 characters.') else: - email_exists = get_user_by_unique_column("email_address", self.user.email_address) + email_exists = get_user_by_unique_column( + "email_address", self.user.email_address) #email_exists = get_user_by_unique_column(es, "email_address", self.user.email_address) if email_exists: self.errors.append('User already exists with that email') self.user.full_name = kw.get('full_name', '').encode("utf-8").strip() if not (5 <= len(self.user.full_name) <= 50): - self.errors.append('Full Name needs to be between 5 and 50 characters.') + self.errors.append( + 'Full Name needs to be between 5 and 50 characters.') - self.user.organization = kw.get('organization', '').encode("utf-8").strip() + self.user.organization = kw.get( + 'organization', '').encode("utf-8").strip() if self.user.organization and not (5 <= len(self.user.organization) <= 50): - self.errors.append('Organization needs to be empty or between 5 and 50 characters.') + self.errors.append( + 'Organization needs to be empty or between 5 and 50 characters.') password = str(kw.get('password', '')) if not (6 <= len(password)): @@ -568,14 +585,16 @@ def password_reset(): if verification_code: user_email = check_verification_code(verification_code) if user_email: - user_details = get_user_by_unique_column('email_address', user_email) + user_details = get_user_by_unique_column( + 'email_address', user_email) if user_details: return render_template( "new_security/password_reset.html", user_encode=user_details["user_id"]) else: flash("Invalid code: User no longer exists!", "error") else: - flash("Invalid code: Password reset code does not exist or might have expired!", "error") + flash( + "Invalid code: Password reset code does not exist or might have expired!", "error") else: return redirect(url_for("login")) @@ -648,8 +667,10 @@ def github_oauth2(): "client_secret": GITHUB_CLIENT_SECRET, "code": code } - result = requests.post("https://github.com/login/oauth/access_token", json=data) - result_dict = {arr[0]: arr[1] for arr in [tok.split("=") for tok in [token.encode("utf-8") for token in result.text.split("&")]]} + result = requests.post( + "https://github.com/login/oauth/access_token", json=data) + result_dict = {arr[0]: arr[1] for arr in [tok.split( + "=") for tok in [token.encode("utf-8") for token in result.text.split("&")]]} github_user = get_github_user_details(result_dict["access_token"]) @@ -696,7 +717,8 @@ def orcid_oauth2(): def get_github_user_details(access_token): from utility.tools import GITHUB_API_URL - result = requests.get(GITHUB_API_URL, params={"access_token": access_token}) + result = requests.get(GITHUB_API_URL, params={ + "access_token": access_token}) return result.json() @@ -737,7 +759,8 @@ class LoginUser: return render_template( "new_security/login_user.html", external_login=external_login, redis_is_available=is_redis_available()) else: - user_details = get_user_by_unique_column("email_address", params["email_address"]) + user_details = get_user_by_unique_column( + "email_address", params["email_address"]) #user_details = get_user_by_unique_column(es, "email_address", params["email_address"]) user = None valid = None @@ -755,8 +778,10 @@ class LoginUser: pwfields.iterations, pwfields.keylength, pwfields.hashfunc) - logger.debug("\n\nComparing:\n{}\n{}\n".format(encrypted.password, pwfields.password)) - valid = pbkdf2.safe_str_cmp(encrypted.password, pwfields.password) + logger.debug("\n\nComparing:\n{}\n{}\n".format( + encrypted.password, pwfields.password)) + valid = pbkdf2.safe_str_cmp( + encrypted.password, pwfields.password) logger.debug("valid is:", valid) if valid and not user.confirmed: @@ -782,7 +807,8 @@ class LoginUser: else: if user: self.unsuccessful_login(user) - flash("Invalid email-address or password. Please try again.", "alert-danger") + flash("Invalid email-address or password. Please try again.", + "alert-danger") response = make_response(redirect(url_for('login'))) return response @@ -790,14 +816,17 @@ class LoginUser: def actual_login(self, user, assumed_by=None, import_collections=None): """The meat of the logging in process""" session_id_signed = self.successful_login(user, assumed_by) - flash("Thank you for logging in {}.".format(user.full_name), "alert-success") - response = make_response(redirect(url_for('index_page', import_collections=import_collections))) + flash("Thank you for logging in {}.".format( + user.full_name), "alert-success") + response = make_response( + redirect(url_for('index_page', import_collections=import_collections))) if self.remember_me: max_age = self.remember_time else: max_age = None - response.set_cookie(UserSession.cookie_name, session_id_signed, max_age=max_age) + response.set_cookie(UserSession.cookie_name, + session_id_signed, max_age=max_age) return response def successful_login(self, user, assumed_by=None): @@ -866,13 +895,15 @@ def forgot_password_submit(): next_page = None if email_address != "": logger.debug("Wants to send password E-mail to ", email_address) - user_details = get_user_by_unique_column("email_address", email_address) + user_details = get_user_by_unique_column( + "email_address", email_address) if user_details: ForgotPasswordEmail(user_details["email_address"]) return render_template("new_security/forgot_password_step2.html", subject=ForgotPasswordEmail.subject) else: - flash("The e-mail entered is not associated with an account.", "alert-danger") + flash("The e-mail entered is not associated with an account.", + "alert-danger") return redirect(url_for("forgot_password")) else: @@ -959,7 +990,8 @@ def register(): errors = result.errors if len(errors) == 0: - flash("Registration successful. You may login with your new account", "alert-info") + flash( + "Registration successful. You may login with your new account", "alert-info") return redirect(url_for("login")) return render_template("new_security/register_user.html", values=params, errors=errors) diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index f0f0d60c..6ccb2e80 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -36,7 +36,8 @@ def get_user_session(): def set_user_session(response): if hasattr(g, 'user_session'): if not request.cookies.get(g.user_session.cookie_name): - response.set_cookie(g.user_session.cookie_name, g.user_session.cookie) + response.set_cookie(g.user_session.cookie_name, + g.user_session.cookie) return response @@ -44,7 +45,8 @@ def verify_cookie(cookie): the_uuid, separator, the_signature = cookie.partition(':') assert len(the_uuid) == 36, "Is session_id a uuid?" assert separator == ":", "Expected a : here" - assert the_signature == hmac.hmac_creation(the_uuid), "Uh-oh, someone tampering with the cookie?" + assert the_signature == hmac.hmac_creation( + the_uuid), "Uh-oh, someone tampering with the cookie?" return the_uuid @@ -60,9 +62,11 @@ def create_signed_cookie(): def manage_user(): params = request.form if request.form else request.args if 'new_full_name' in params: - set_user_attribute(g.user_session.user_id, 'full_name', params['new_full_name']) + set_user_attribute(g.user_session.user_id, + 'full_name', params['new_full_name']) if 'new_organization' in params: - set_user_attribute(g.user_session.user_id, 'organization', params['new_organization']) + set_user_attribute(g.user_session.user_id, + 'organization', params['new_organization']) user_details = get_user_by_unique_column("user_id", g.user_session.user_id) @@ -108,7 +112,8 @@ class UserSession: # Grrr...this won't work because of the way flask handles cookies # Delete the cookie - flash("Due to inactivity your session has expired. If you'd like please login again.") + flash( + "Due to inactivity your session has expired. If you'd like please login again.") return None else: self.record = dict(login_time=time.time(), @@ -178,7 +183,9 @@ class UserSession: # ZS: Get user's collections if they exist collections = get_user_collections(self.user_id) - collections = [item for item in collections if item['name'] != "Your Default Collection"] + [item for item in collections if item['name'] == "Your Default Collection"] # ZS: Ensure Default Collection is last in list + collections = [item for item in collections if item['name'] != "Your Default Collection"] + \ + [item for item in collections if item['name'] == + "Your Default Collection"] # ZS: Ensure Default Collection is last in list return collections @property @@ -234,12 +241,14 @@ class UserSession: this_collection = self.get_collection_by_id(collection_id) updated_collection = this_collection - current_members_minus_new = [member for member in this_collection['members'] if member not in traits_to_add] + current_members_minus_new = [ + member for member in this_collection['members'] if member not in traits_to_add] updated_traits = traits_to_add + current_members_minus_new updated_collection['members'] = updated_traits updated_collection['num_members'] = len(updated_traits) - updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime( + '%b %d %Y %I:%M%p') updated_collections = [] for collection in self.user_collections: @@ -265,7 +274,8 @@ class UserSession: updated_collection['members'] = updated_traits updated_collection['num_members'] = len(updated_traits) - updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p') + updated_collection['changed_timestamp'] = datetime.datetime.utcnow().strftime( + '%b %d %Y %I:%M%p') updated_collections = [] for collection in self.user_collections: -- cgit v1.2.3 From d1bc52a0d8e1219f377e804c3f27a3543d234fcb Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 30 Apr 2021 12:47:43 +0300 Subject: autopep8: Fix E70 and E701 --- wqflask/maintenance/convert_geno_to_bimbam.py | 3 +- wqflask/maintenance/geno_to_json.py | 3 +- wqflask/utility/Plot.py | 2 +- wqflask/utility/tools.py | 3 +- .../marker_regression/display_mapping_results.py | 25 ++++---- wqflask/wqflask/marker_regression/rqtl_mapping.py | 66 ++++++++++++---------- wqflask/wqflask/snp_browser/snp_browser.py | 4 +- wqflask/wqflask/user_manager.py | 4 +- 8 files changed, 59 insertions(+), 51 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py index a1712500..a2ede1f9 100644 --- a/wqflask/maintenance/convert_geno_to_bimbam.py +++ b/wqflask/maintenance/convert_geno_to_bimbam.py @@ -21,7 +21,8 @@ import simplejson as json from pprint import pformat as pf -class EmptyConfigurations(Exception): pass +class EmptyConfigurations(Exception): + pass class Marker: diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py index 7bdf2b53..76a0fc98 100644 --- a/wqflask/maintenance/geno_to_json.py +++ b/wqflask/maintenance/geno_to_json.py @@ -26,7 +26,8 @@ from pprint import pformat as pf #from utility.tools import flat_files -class EmptyConfigurations(Exception): pass +class EmptyConfigurations(Exception): + pass class Marker: diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 4f5691c1..00658d10 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -319,7 +319,7 @@ def colorSpectrum(n=100): ImageColor.getrgb("rgb(0%,100%,0%)"), ImageColor.getrgb("rgb(0%,0%,100%)")] N = n * multiple - out = [None] * N; + out = [None] * N for i in range(N): x = float(i) / N out[i] = ImageColor.getrgb("rgb({}%,{}%,{}%".format( diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 263c3948..d82e478d 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -198,7 +198,8 @@ def locate(name, subdir=None): return lookfor else: raise Exception("Can not locate " + lookfor) - if subdir: sys.stderr.write(subdir) + if subdir: + sys.stderr.write(subdir) raise Exception("Can not locate " + name + " in " + base) diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index cde822e8..8d0134d8 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -1343,13 +1343,13 @@ class DisplayMappingResults: (float(txEnd) - startMb) # at least one pixel if (geneEndPix < xLeftOffset): - return; # this gene is not on the screen + return # this gene is not on the screen elif (geneEndPix > xLeftOffset + plotWidth): - geneEndPix = xLeftOffset + plotWidth; # clip the last in-range gene + geneEndPix = xLeftOffset + plotWidth # clip the last in-range gene if (geneStartPix > xLeftOffset + plotWidth): - return; # we are outside the valid on-screen range, so stop drawing genes + return # we are outside the valid on-screen range, so stop drawing genes elif (geneStartPix < xLeftOffset): - geneStartPix = xLeftOffset; # clip the first in-range gene + geneStartPix = xLeftOffset # clip the first in-range gene # color the gene based on SNP density # found earlier, needs to be recomputed as snps are added @@ -1397,13 +1397,13 @@ class DisplayMappingResults: (float(txEnd) - startMb) # at least one pixel if (geneEndPix < xLeftOffset): - return; # this gene is not on the screen + return # this gene is not on the screen elif (geneEndPix > xLeftOffset + plotWidth): - geneEndPix = xLeftOffset + plotWidth; # clip the last in-range gene + geneEndPix = xLeftOffset + plotWidth # clip the last in-range gene if (geneStartPix > xLeftOffset + plotWidth): - return; # we are outside the valid on-screen range, so stop drawing genes + return # we are outside the valid on-screen range, so stop drawing genes elif (geneStartPix < xLeftOffset): - geneStartPix = xLeftOffset; # clip the first in-range gene + geneStartPix = xLeftOffset # clip the first in-range gene outlineColor = DARKBLUE fillColor = DARKBLUE @@ -1603,9 +1603,9 @@ class DisplayMappingResults: drawit = 1 if (geneStartPix < xLeftOffset): - drawit = 0; + drawit = 0 if (geneStartPix > xLeftOffset + plotWidth): - drawit = 0; + drawit = 0 if drawit == 1: if _chr[i].name != " - ": @@ -1755,7 +1755,7 @@ class DisplayMappingResults: geneYLocation + 17 + 2*maxind*self.EACH_GENE_HEIGHT*zoom), fill=BLACK, angle=-90) - oldgeneEndPix = geneEndPix; + oldgeneEndPix = geneEndPix oldgeno = _chr[j].genotype firstGene = 0 else: @@ -3208,5 +3208,6 @@ class DisplayMappingResults: if lCorr: lCorr = lCorr[0] break - except: raise # lCorr = None + except: + raise # lCorr = None return lCorr diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index c2b165a4..741d6c23 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -19,24 +19,24 @@ logger = utility.logger.getLogger(__name__) def get_trait_data_type(trait_db_string): - logger.info("get_trait_data_type"); + logger.info("get_trait_data_type") the_query = "SELECT value FROM TraitMetadata WHERE type='trait_data_type'" - logger.info("the_query done"); + logger.info("the_query done") results_json = g.db.execute(the_query).fetchone() - logger.info("the_query executed"); + logger.info("the_query executed") results_ob = json.loads(results_json[0]) - logger.info("json results loaded"); + logger.info("json results loaded") if trait_db_string in results_ob: - logger.info("found"); + logger.info("found") return results_ob[trait_db_string] else: - logger.info("not found"); + logger.info("not found") return "numeric" # Run qtl mapping using R/qtl def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permCheck, num_perm, perm_strata_list, do_control, control_marker, manhattan_plot, pair_scan, cofactors): - logger.info("Start run_rqtl_geno"); + logger.info("Start run_rqtl_geno") # Get pointers to some common R functions r_library = ro.r["library"] # Map the library function r_c = ro.r["c"] # Map the c function @@ -46,7 +46,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec print((r_library("qtl"))) # Load R/qtl - logger.info("QTL library loaded"); + logger.info("QTL library loaded") # Get pointers to some R/qtl functions scanone = ro.r["scanone"] # Map the scanone function @@ -75,29 +75,29 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec genofilelocation = locate(dataset.group.genofile, "genotype") else: genofilelocation = locate(dataset.group.name + ".geno", "genotype") - logger.info("Going to create a cross from geno"); + logger.info("Going to create a cross from geno") # TODO: Add the SEX if that is available cross_object = GENOtoCSVR(genofilelocation, crossfilelocation) - logger.info("before calc_genoprob"); + logger.info("before calc_genoprob") if manhattan_plot: cross_object = calc_genoprob(cross_object) else: cross_object = calc_genoprob(cross_object, step=5, stepwidth="max") - logger.info("after calc_genoprob"); + logger.info("after calc_genoprob") pheno_string = sanitize_rqtl_phenotype(vals) - logger.info("phenostring done"); + logger.info("phenostring done") names_string = sanitize_rqtl_names(samples) - logger.info("sanitized pheno and names"); + logger.info("sanitized pheno and names") # Add the phenotype cross_object = add_phenotype(cross_object, pheno_string, "the_pheno") # Add the phenotype cross_object = add_names(cross_object, names_string, "the_names") - logger.info("Added pheno and names"); + logger.info("Added pheno and names") # Create the additive covariate markers marker_covars = create_marker_covariates(control_marker, cross_object) - logger.info("Marker covars done"); + logger.info("Marker covars done") if cofactors != "": - logger.info("Cofactors: " + cofactors); + logger.info("Cofactors: " + cofactors) # Create the covariates from selected traits cross_object, trait_covars = add_cofactors( cross_object, dataset, cofactors, samples) @@ -108,10 +108,12 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec # DEBUG to save the session object to file if pair_scan: if do_control == "true": - logger.info("Using covariate"); result_data_frame = scantwo( + logger.info("Using covariate") + result_data_frame = scantwo( cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method, n_cluster = 16) else: - logger.info("No covariates"); result_data_frame = scantwo( + logger.info("No covariates") + result_data_frame = scantwo( cross_object, pheno="the_pheno", model=model, method=method, n_cluster=16) pair_scan_filename = webqtlUtil.genRandStr("scantwo_") + ".png" @@ -122,11 +124,13 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec return process_pair_scan_results(result_data_frame) else: if do_control == "true" or cofactors != "": - logger.info("Using covariate"); result_data_frame = scanone( + logger.info("Using covariate") + result_data_frame = scanone( cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method) ro.r('save.image(file = "/home/zas1024/gn2-zach/itp_cofactor_test.RData")') else: - logger.info("No covariates"); result_data_frame = scanone( + logger.info("No covariates") + result_data_frame = scanone( cross_object, pheno="the_pheno", model=model, method=method) # Do permutation (if requested by user) @@ -275,18 +279,18 @@ def add_phenotype(cross, pheno_as_string, col_name): def add_categorical_covar(cross, covar_as_string, i): ro.globalenv["the_cross"] = cross - logger.info("cross set"); + logger.info("cross set") ro.r('covar <- as.factor(' + covar_as_string + ')') - logger.info("covar set"); + logger.info("covar set") ro.r('newcovar <- model.matrix(~covar-1)') - logger.info("model.matrix finished"); + logger.info("model.matrix finished") ro.r('cat("new covar columns", ncol(newcovar), "\n")') nCol = ro.r('ncol(newcovar)') - logger.info("ncol covar done: " + str(nCol[0])); + logger.info("ncol covar done: " + str(nCol[0])) ro.r('pheno <- data.frame(pull.pheno(the_cross))') - logger.info("pheno pulled from cross"); + logger.info("pheno pulled from cross") nCol = int(nCol[0]) - logger.info("nCol python int:" + str(nCol)); + logger.info("nCol python int:" + str(nCol)) col_names = [] # logger.info("loop") for x in range(1, (nCol + 1)): @@ -298,7 +302,7 @@ def add_categorical_covar(cross, covar_as_string, i): col_names.append(col_name) #logger.info("loop" + str(x) + "done"); - logger.info("returning from add_categorical_covar"); + logger.info("returning from add_categorical_covar") return ro.r["the_cross"], col_names @@ -323,7 +327,7 @@ def add_cofactors(cross, this_dataset, covariates, samples): covariate_list = covariates.split(",") covar_name_string = "c(" for i, covariate in enumerate(covariate_list): - logger.info("Covariate: " + covariate); + logger.info("Covariate: " + covariate) this_covar_data = [] covar_as_string = "c(" trait_name = covariate.split(":")[0] @@ -352,12 +356,12 @@ def add_cofactors(cross, this_dataset, covariates, samples): covar_as_string += ")" datatype = get_trait_data_type(covariate) - logger.info("Covariate: " + covariate + " is of type: " + datatype); + logger.info("Covariate: " + covariate + " is of type: " + datatype) if(datatype == "categorical"): # Cat variable - logger.info("call of add_categorical_covar"); + logger.info("call of add_categorical_covar") cross, col_names = add_categorical_covar( cross, covar_as_string, i) # Expand and add it to the cross - logger.info("add_categorical_covar returned"); + logger.info("add_categorical_covar returned") # Go through the additional covar names for z, col_name in enumerate(col_names): if i < (len(covariate_list) - 1): diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index 7fcbe984..e98cfb71 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -673,13 +673,13 @@ def get_browser_sample_lists(species_id=1): strain_lists = {} mouse_strain_list = [] query = "SHOW COLUMNS FROM SnpPattern;" - results = g.db.execute(query).fetchall(); + results = g.db.execute(query).fetchall() for result in results[1:]: mouse_strain_list.append(result[0]) rat_strain_list = [] query = "SHOW COLUMNS FROM RatSnpPattern;" - results = g.db.execute(query).fetchall(); + results = g.db.execute(query).fetchall() for result in results[2:]: rat_strain_list.append(result[0]) diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index 5610833b..9ebec405 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -765,10 +765,10 @@ class LoginUser: user = None valid = None if user_details: - user = model.User(); + user = model.User() for key in user_details: user.__dict__[key] = user_details[key] - valid = False; + valid = False submitted_password = params['password'] pwfields = Struct(json.loads(user.password)) -- cgit v1.2.3 From bd702e59d7a426fe351d34367bf824683c655696 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 30 Apr 2021 12:52:55 +0300 Subject: autopep8: Fix W291, W292, W293, W391 --- wqflask/base/mrna_assay_tissue_data.py | 2 +- wqflask/maintenance/convert_dryad_to_bimbam.py | 2 +- wqflask/maintenance/convert_geno_to_bimbam.py | 4 +-- .../maintenance/generate_kinship_from_bimbam.py | 8 ++--- wqflask/maintenance/geno_to_json.py | 34 +++++++++--------- wqflask/tests/unit/wqflask/api/test_correlation.py | 4 +-- .../correlation/test_correlation_functions.py | 2 +- .../marker_regression/test_qtlreaper_mapping.py | 4 +-- .../wqflask/marker_regression/test_rqtl_mapping.py | 7 ---- .../unit/wqflask/snp_browser/test_snp_browser.py | 8 ++--- wqflask/tests/unit/wqflask/test_server_side.py | 4 +-- wqflask/utility/__init__.py | 2 -- wqflask/utility/genofile_parser.py | 1 - wqflask/wqflask/__init__.py | 2 +- wqflask/wqflask/api/mapping.py | 2 -- wqflask/wqflask/api/router.py | 14 ++++---- wqflask/wqflask/collect.py | 1 - .../comparison_bar_chart/comparison_bar_chart.py | 7 ++-- wqflask/wqflask/correlation/corr_scatter_plot.py | 4 +-- wqflask/wqflask/correlation/show_corr_results.py | 1 - .../wqflask/correlation_matrix/show_corr_matrix.py | 2 +- wqflask/wqflask/ctl/ctl_analysis.py | 3 +- wqflask/wqflask/db_info.py | 2 -- wqflask/wqflask/docs.py | 2 +- wqflask/wqflask/export_traits.py | 4 +-- .../wqflask/external_tools/send_to_webgestalt.py | 2 +- wqflask/wqflask/heatmap/heatmap.py | 2 +- wqflask/wqflask/interval_analyst/GeneUtil.py | 24 ++++++------- .../wqflask/marker_regression/qtlreaper_mapping.py | 10 +++--- wqflask/wqflask/marker_regression/rqtl_mapping.py | 2 +- wqflask/wqflask/resource_manager.py | 2 +- wqflask/wqflask/search_results.py | 1 - wqflask/wqflask/server_side.py | 2 +- wqflask/wqflask/snp_browser/snp_browser.py | 7 ++-- wqflask/wqflask/user_login.py | 42 +++++++++++----------- wqflask/wqflask/user_session.py | 2 -- 36 files changed, 98 insertions(+), 124 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/mrna_assay_tissue_data.py b/wqflask/base/mrna_assay_tissue_data.py index 9bb29664..882ae911 100644 --- a/wqflask/base/mrna_assay_tissue_data.py +++ b/wqflask/base/mrna_assay_tissue_data.py @@ -74,7 +74,7 @@ class MrnaAssayTissueData: def get_symbol_values_pairs(self): id_list = [self.data[symbol].data_id for symbol in self.data] - + symbol_values_dict = {} if len(id_list) > 0: diff --git a/wqflask/maintenance/convert_dryad_to_bimbam.py b/wqflask/maintenance/convert_dryad_to_bimbam.py index e417c280..18fbb8a1 100644 --- a/wqflask/maintenance/convert_dryad_to_bimbam.py +++ b/wqflask/maintenance/convert_dryad_to_bimbam.py @@ -52,7 +52,7 @@ def read_dryad_file(filename): # this_row.append(line.split(" ")[i+2]) # print("row: " + str(i)) # geno_rows.append(this_row) - # + # # return geno_rows diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py index a2ede1f9..c5af1ca6 100644 --- a/wqflask/maintenance/convert_geno_to_bimbam.py +++ b/wqflask/maintenance/convert_geno_to_bimbam.py @@ -91,7 +91,7 @@ class ConvertGenoFile: self.markers.append(this_marker.__dict__) - self.write_to_bimbam() + self.write_to_bimbam() def write_to_bimbam(self): with open(self.output_files[0], "w") as geno_fh: @@ -126,7 +126,7 @@ class ConvertGenoFile: self.sample_list = row_contents[3:] else: self.sample_list = row_contents[2:] - + def process_rows(self): for self.latest_row_pos, row in enumerate(self.input_fh): self.latest_row_value = row diff --git a/wqflask/maintenance/generate_kinship_from_bimbam.py b/wqflask/maintenance/generate_kinship_from_bimbam.py index bed634fa..cd39fceb 100644 --- a/wqflask/maintenance/generate_kinship_from_bimbam.py +++ b/wqflask/maintenance/generate_kinship_from_bimbam.py @@ -19,7 +19,7 @@ class GenerateKinshipMatrices: self.group_name = group_name self.geno_file = geno_file self.pheno_file = pheno_file - + def generate_kinship(self): gemma_command = "/gnu/store/xhzgjr0jvakxv6h3blj8z496xjig69b0-profile/bin/gemma -g " + self.geno_file + \ " -p " + self.pheno_file + \ @@ -56,11 +56,11 @@ class GenerateKinshipMatrices: print(" Column is:", convertob.latest_col_value) print(" Row is:", convertob.latest_row_value) break - - + + if __name__ == "__main__": Geno_Directory = """/export/local/home/zas1024/genotype_files/genotype/""" Bimbam_Directory = """/export/local/home/zas1024/genotype_files/genotype/bimbam/""" GenerateKinshipMatrices.process_all(Geno_Directory, Bimbam_Directory) - + # ./gemma -g /home/zas1024/genotype_files/genotype/bimbam/BXD_geno.txt -p /home/zas1024/genotype_files/genotype/bimbam/BXD_pheno.txt -gk 1 -o BXD diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py index 76a0fc98..27eb6553 100644 --- a/wqflask/maintenance/geno_to_json.py +++ b/wqflask/maintenance/geno_to_json.py @@ -29,7 +29,7 @@ from pprint import pformat as pf class EmptyConfigurations(Exception): pass - + class Marker: def __init__(self): self.name = None @@ -42,20 +42,20 @@ class Marker: class ConvertGenoFile: def __init__(self, input_file, output_file): - + self.input_file = input_file self.output_file = output_file - + self.mb_exists = False self.cm_exists = False self.markers = [] - + self.latest_row_pos = None self.latest_col_pos = None - + self.latest_row_value = None self.latest_col_value = None - + def convert(self): self.haplotype_notation = { @@ -64,16 +64,16 @@ class ConvertGenoFile: '@het': "0.5", '@unk': "NA" } - + self.configurations = {} #self.skipped_cols = 3 - + # if self.input_file.endswith(".geno.gz"): # print("self.input_file: ", self.input_file) # self.input_fh = gzip.open(self.input_file) # else: self.input_fh = open(self.input_file) - + with open(self.output_file, "w") as self.output_fh: # if self.file_type == "geno": self.process_csv() @@ -105,22 +105,22 @@ class ConvertGenoFile: self.configurations[genotype.upper()]) else: this_marker.genotypes.append("NA") - - #print("this_marker is:", pf(this_marker.__dict__)) + + #print("this_marker is:", pf(this_marker.__dict__)) # if this_marker.chr == "14": self.markers.append(this_marker.__dict__) with open(self.output_file, 'w') as fh: json.dump(self.markers, fh, indent=" ", sort_keys=True) - + # print('configurations:', str(configurations)) #self.latest_col_pos = item_count + self.skipped_cols #self.latest_col_value = item - + # if item_count != 0: # self.output_fh.write(" ") # self.output_fh.write(self.configurations[item.upper()]) - + # self.output_fh.write("\n") def process_rows(self): @@ -176,12 +176,12 @@ class ConvertGenoFile: print(" Column is:", convertob.latest_col_value) print(" Row is:", convertob.latest_row_value) break - + # def process_snps_file(cls, snps_file, new_directory): # output_file = os.path.join(new_directory, "mouse_families.json") # print("%s -> %s" % (snps_file, output_file)) # convertob = ConvertGenoFile(input_file, output_file) - + if __name__ == "__main__": Old_Geno_Directory = """/export/local/home/zas1024/gn2-zach/genotype_files/genotype""" @@ -192,5 +192,5 @@ if __name__ == "__main__": # convertob.convert() ConvertGenoFile.process_all(Old_Geno_Directory, New_Geno_Directory) # ConvertGenoFiles(Geno_Directory) - + #process_csv(Input_File, Output_File) diff --git a/wqflask/tests/unit/wqflask/api/test_correlation.py b/wqflask/tests/unit/wqflask/api/test_correlation.py index bd99838d..34ffa9ef 100644 --- a/wqflask/tests/unit/wqflask/api/test_correlation.py +++ b/wqflask/tests/unit/wqflask/api/test_correlation.py @@ -105,9 +105,9 @@ class TestCorrelations(unittest.TestCase): target_dataset = AttributeSetter({"group": group}) target_vals = [3.4, 6.2, 4.1, 3.4, 1.2, 5.6] - trait_data = {"S1": AttributeSetter({"value": 2.3}), "S2": AttributeSetter({"value": 1.1}), + trait_data = {"S1": AttributeSetter({"value": 2.3}), "S2": AttributeSetter({"value": 1.1}), "S3": AttributeSetter( - {"value": 6.3}), "S4": AttributeSetter({"value": 3.6}), "S5": AttributeSetter({"value": 4.1}), + {"value": 6.3}), "S4": AttributeSetter({"value": 3.6}), "S5": AttributeSetter({"value": 4.1}), "S6": AttributeSetter({"value": 5.0})} this_trait = AttributeSetter({"data": trait_data}) mock_normalize.return_value = ([2.3, 1.1, 6.3, 3.6, 4.1, 5.0], diff --git a/wqflask/tests/unit/wqflask/correlation/test_correlation_functions.py b/wqflask/tests/unit/wqflask/correlation/test_correlation_functions.py index 44d2e0fc..2bbeab1f 100644 --- a/wqflask/tests/unit/wqflask/correlation/test_correlation_functions.py +++ b/wqflask/tests/unit/wqflask/correlation/test_correlation_functions.py @@ -5,7 +5,7 @@ from wqflask.correlation.correlation_functions import cal_zero_order_corr_for_ti class TestCorrelationFunctions(unittest.TestCase): - + @mock.patch("wqflask.correlation.correlation_functions.MrnaAssayTissueData") def test_get_trait_symbol_and_tissue_values(self, mock_class): """test for getting trait symbol and tissue_values""" diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py index c762982b..1198740d 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py @@ -8,7 +8,7 @@ from wqflask.marker_regression.qtlreaper_mapping import gen_pheno_txt_file class TestQtlReaperMapping(unittest.TestCase): @mock.patch("wqflask.marker_regression.qtlreaper_mapping.TEMPDIR", "/home/user/data") - def test_gen_pheno_txt_file(self): + def test_gen_pheno_txt_file(self): vals = ["V1", "x", "V4", "V3","x"] samples = ["S1", "S2", "S3", "S4","S5"] trait_filename = "trait_file" @@ -21,5 +21,3 @@ class TestQtlReaperMapping(unittest.TestCase): 'S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')] filehandler.write.assert_has_calls(write_calls) - - diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py index 6996c275..d69a20d3 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py @@ -40,10 +40,3 @@ class TestRqtlMapping(unittest.TestCase): expected_sanitized_name = "c('f',NA,'r',NA,NA)" results = sanitize_rqtl_names(vals) self.assertEqual(expected_sanitized_name, results) - - - - - - - diff --git a/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py b/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py index ce3e7b83..8823e1fc 100644 --- a/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py +++ b/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py @@ -21,9 +21,9 @@ class TestSnpBrowser(unittest.TestCase): "transcript": "false", "exon": "false", "domain_2": "true", "function": "false", "function_details": "true"} strains = {"mouse": ["S1", "S2", "S3", "S4", "S5"], "rat": []} expected_results = ([['Index', 'SNP ID', 'Chr', 'Mb', 'Alleles', 'ConScore', - 'Domain 1', 'Domain 2', 'Details'], - ['S1', 'S2', 'S3', 'S4', 'S5']], 5, - ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles', + 'Domain 1', 'Domain 2', 'Details'], + ['S1', 'S2', 'S3', 'S4', 'S5']], 5, + ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles', 'conservation_score', 'domain_1', 'domain_2', 'function_details', 'S1', 'S2', 'S3', 'S4', 'S5']) @@ -33,7 +33,7 @@ class TestSnpBrowser(unittest.TestCase): variant_type="InDel", strains=strains, species="rat", empty_columns=[]) expected_results_with_indel = ( ['Index', 'ID', 'Type', 'InDel Chr', 'Mb Start', - 'Mb End', 'Strand', 'Size', 'Sequence', 'Source'], 0, + 'Mb End', 'Strand', 'Size', 'Sequence', 'Source'], 0, ['index', 'indel_name', 'indel_type', 'indel_chr', 'indel_mb_s', 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name']) diff --git a/wqflask/tests/unit/wqflask/test_server_side.py b/wqflask/tests/unit/wqflask/test_server_side.py index 69977146..9d988aea 100644 --- a/wqflask/tests/unit/wqflask/test_server_side.py +++ b/wqflask/tests/unit/wqflask/test_server_side.py @@ -17,8 +17,8 @@ class TestServerSideTableTests(unittest.TestCase): def test_get_page(self): rows_count = 3 table_rows = [ - {'first': 'd', 'second': 4, 'third': 'zz'}, - {'first': 'b', 'second': 2, 'third': 'aa'}, + {'first': 'd', 'second': 4, 'third': 'zz'}, + {'first': 'b', 'second': 2, 'third': 'aa'}, {'first': 'c', 'second': 1, 'third': 'ss'}, ] headers = ['first', 'second', 'third'] diff --git a/wqflask/utility/__init__.py b/wqflask/utility/__init__.py index d540c96e..816bc4df 100644 --- a/wqflask/utility/__init__.py +++ b/wqflask/utility/__init__.py @@ -33,5 +33,3 @@ class Struct: def __repr__(self): return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in list(self.__dict__.items()))) - - diff --git a/wqflask/utility/genofile_parser.py b/wqflask/utility/genofile_parser.py index 09100bd9..c0629b5d 100644 --- a/wqflask/utility/genofile_parser.py +++ b/wqflask/utility/genofile_parser.py @@ -98,4 +98,3 @@ class ConvertGenoFile: print("WARNING:", genotype.upper()) this_marker.genotypes.append("NA") self.markers.append(this_marker.__dict__) - diff --git a/wqflask/wqflask/__init__.py b/wqflask/wqflask/__init__.py index 712517a3..a2bf3085 100644 --- a/wqflask/wqflask/__init__.py +++ b/wqflask/wqflask/__init__.py @@ -50,4 +50,4 @@ from wqflask import db_info from wqflask import user_login from wqflask import user_session -import wqflask.views +import wqflask.views diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py index c22b44a9..e4a3fb77 100644 --- a/wqflask/wqflask/api/mapping.py +++ b/wqflask/wqflask/api/mapping.py @@ -140,5 +140,3 @@ def initialize_parameters(start_vars, dataset, this_trait): mapping_params['perm_check'] = False return mapping_params - - diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py index e7dfa4e0..f7d52ca3 100644 --- a/wqflask/wqflask/api/router.py +++ b/wqflask/wqflask/api/router.py @@ -59,13 +59,13 @@ def get_species_info(species_name, file_format="json"): WHERE (Name="{0}" OR FullName="{0}" OR SpeciesName="{0}");""".format(species_name)) the_species = results.fetchone() - species_dict = { + species_dict = { "Id": the_species[0], "Name": the_species[1], "FullName": the_species[2], "TaxonomyId": the_species[3] } - + return flask.jsonify(species_dict) @@ -639,7 +639,7 @@ def trait_sample_data(dataset_name, trait_name, file_format="json"): return flask.jsonify(sample_list) else: - return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") + return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") @app.route("/api/v_{}/trait//".format(version)) @@ -685,7 +685,7 @@ def get_trait_info(dataset_name, trait_name, file_format="json"): # ZS: Check if the user input the dataset_name as BXDPublish, etc (which is always going to be the group name + "Publish" if "Publish" in dataset_name: dataset_name = dataset_name.replace("Publish", "") - + group_id = get_group_id(dataset_name) pheno_query = """ SELECT @@ -898,7 +898,7 @@ def get_dataset_trait_ids(dataset_name, start_vars): data_type = "Publish" dataset_name = dataset_name.replace("Publish", "") dataset_id = get_group_id(dataset_name) - + query = """ SELECT PublishXRef.PhenotypeId, PublishXRef.Id, InbredSet.InbredSetCode @@ -949,9 +949,9 @@ def get_samplelist(dataset_name): WHERE StrainXRef.StrainId = Strain.Id AND StrainXRef.InbredSetId = {} """.format(group_id) - + results = g.db.execute(query).fetchall() - + samplelist = [result[0] for result in results] return samplelist diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index 1fcf15f0..9fd89524 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -239,4 +239,3 @@ def change_collection_name(): g.user_session.change_collection_name(collection_id, new_name) return new_name - diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py index 0fabb833..d86c8e16 100644 --- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py +++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py @@ -45,7 +45,7 @@ class ComparisonBarChart: # ZS: Getting initial group name before verifying all traits are in the same group in the following loop this_group = self.trait_list[0][1].group.name for trait_db in self.trait_list: - + if trait_db[1].group.name != this_group: self.insufficient_shared_samples = True break @@ -53,7 +53,7 @@ class ComparisonBarChart: this_group = trait_db[1].group.name this_trait = trait_db[0] self.traits.append(this_trait) - + this_sample_data = this_trait.data for sample in this_sample_data: @@ -79,7 +79,7 @@ class ComparisonBarChart: self.js_data = dict(traits=[trait.name for trait in self.traits], samples=self.all_sample_list, sample_data=self.sample_data,) - + def get_trait_db_obs(self, trait_db_list): self.trait_list = [] @@ -95,4 +95,3 @@ class ComparisonBarChart: self.trait_list.append((trait_ob, dataset_ob)) #print("trait_list:", self.trait_list) - diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index f9a0ea11..4f756f58 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -68,7 +68,7 @@ class CorrScatterPlot: slope_string = '%.3E' % slope else: slope_string = '%.3f' % slope - + x_buffer = (max(vals_1) - min(vals_1)) * 0.1 y_buffer = (max(vals_2) - min(vals_2)) * 0.1 @@ -82,7 +82,7 @@ class CorrScatterPlot: ry = stats.rankdata(vals_2) self.rdata = [] self.rdata.append(rx.tolist()) - self.rdata.append(ry.tolist()) + self.rdata.append(ry.tolist()) srslope, srintercept, srr_value, srp_value, srstd_err = stats.linregress( rx, ry) diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index e75c4a85..8ee24246 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -680,4 +680,3 @@ def get_header_fields(data_type, corr_method): 'Sample p(r)'] return header_fields - diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index aefb4453..59469428 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -324,7 +324,7 @@ def sortEigenVectors(vector): A.append(item[0]) B.append(item[1]) sum = reduce(lambda x, y: x + y, A, 0.0) - A = [x * 100.0 / sum for x in A] + A = [x * 100.0 / sum for x in A] return [A, B] except: return [] diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index 48a82435..f4eafbe7 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -66,7 +66,7 @@ class CTL: def addNode(self, gt): node_dict = {'data': {'id': str(gt.name) + ":" + str(gt.dataset.name), - 'sid': str(gt.name), + 'sid': str(gt.name), 'dataset': str(gt.dataset.name), 'label': gt.name, 'symbol': gt.symbol, @@ -238,4 +238,3 @@ class CTL: self.render_image(self.results) sys.stdout.flush() return(dict(template_vars)) - diff --git a/wqflask/wqflask/db_info.py b/wqflask/wqflask/db_info.py index c7558ed8..8d28fef0 100644 --- a/wqflask/wqflask/db_info.py +++ b/wqflask/wqflask/db_info.py @@ -135,5 +135,3 @@ def process_query_results(results): } return info_ob - - diff --git a/wqflask/wqflask/docs.py b/wqflask/wqflask/docs.py index fc93248a..0a1a597d 100644 --- a/wqflask/wqflask/docs.py +++ b/wqflask/wqflask/docs.py @@ -20,7 +20,7 @@ class Docs: self.title = self.entry.capitalize() self.content = "" else: - + self.title = result[0] self.content = result[1].decode("utf-8") diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py index 5bd54f9d..2c180d49 100644 --- a/wqflask/wqflask/export_traits.py +++ b/wqflask/wqflask/export_traits.py @@ -1,6 +1,6 @@ import csv import xlsxwriter -import io +import io import datetime import itertools @@ -20,7 +20,7 @@ def export_search_results_csv(targs): table_data = json.loads(targs['export_data']) table_rows = table_data['rows'] - + now = datetime.datetime.now() time_str = now.strftime('%H:%M_%d%B%Y') if 'file_name' in targs: diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py index fd12562f..fcd943ba 100644 --- a/wqflask/wqflask/external_tools/send_to_webgestalt.py +++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py @@ -48,7 +48,7 @@ class SendToWebGestalt: id_type = "entrezgene" - self.hidden_vars = { + self.hidden_vars = { 'gene_list': "\n".join(gene_id_list), 'id_type': "entrezgene", 'ref_set': "genome", diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index aa11caa8..02eb66e5 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -125,7 +125,7 @@ class Heatmap: webqtlConfig.GENERATED_IMAGE_DIR, output_filename) - os.system(reaper_command) + os.system(reaper_command) reaper_results = parse_reaper_output(output_filename) diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index cadff080..e624a146 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -10,7 +10,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): fetchFields = ['SpeciesId', 'Id', 'GeneSymbol', 'GeneDescription', 'Chromosome', 'TxStart', 'TxEnd', 'Strand', 'GeneID', 'NM_ID', 'kgID', 'GenBankID', 'UnigenID', 'ProteinID', 'AlignID', 'exonCount', 'exonStarts', 'exonEnds', 'cdsStart', 'cdsEnd'] - + # List All Species in the Gene Table speciesDict = {} results = g.db.execute(""" @@ -21,7 +21,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): for item in results: speciesDict[item[0]] = item[1] - + # List current Species and other Species speciesId = speciesDict[species] otherSpecies = [[X, speciesDict[X]] for X in list(speciesDict.keys())] @@ -45,7 +45,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): newdict = {} for j, item in enumerate(fetchFields): newdict[item] = result[j] - # count SNPs if possible + # count SNPs if possible if diffCol and species == 'mouse': newdict["snpCount"] = g.db.execute(""" SELECT count(*) @@ -58,17 +58,17 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0 else: newdict["snpDensity"] = newdict["snpCount"] = 0 - + try: newdict['GeneLength'] = 1000.0 * (newdict['TxEnd'] - newdict['TxStart']) except: pass - + # load gene from other Species by the same name for item in otherSpecies: othSpec, othSpecId = item newdict2 = {} - + resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (", ".join(fetchFields), othSpecId, newdict["GeneSymbol"])).fetchone() @@ -76,8 +76,8 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): if resultsOther: for j, item in enumerate(fetchFields): newdict2[item] = resultsOther[j] - - # count SNPs if possible, could be a separate function + + # count SNPs if possible, could be a separate function if diffCol and othSpec == 'mouse': newdict2["snpCount"] = g.db.execute(""" SELECT count(*) @@ -91,17 +91,15 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0 else: newdict2["snpDensity"] = newdict2["snpCount"] = 0 - + try: newdict2['GeneLength'] = 1000.0 * \ (newdict2['TxEnd'] - newdict2['TxStart']) except: pass - + newdict['%sGene' % othSpec] = newdict2 - + GeneList.append(newdict) return GeneList - - diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py index f932498f..c51b7a9a 100644 --- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py +++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py @@ -34,7 +34,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo opt_list = [] if boot_check and num_bootstrap > 0: - bootstrap_filename = (f"{this_dataset.group.name}_BOOTSTRAP_" + + bootstrap_filename = (f"{this_dataset.group.name}_BOOTSTRAP_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) ) @@ -44,8 +44,8 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo opt_list.append( f"--bootstrap_output {webqtlConfig.GENERATED_IMAGE_DIR}{bootstrap_filename}.txt") if num_perm > 0: - permu_filename = ("{this_dataset.group.name}_PERM_" + - ''.join(random.choice(string.ascii_uppercase + + permu_filename = ("{this_dataset.group.name}_PERM_" + + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) ) opt_list.append("-n " + str(num_perm)) @@ -56,7 +56,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo if manhattan_plot != True: opt_list.append("--interval 1") - reaper_command = (REAPER_COMMAND + + reaper_command = (REAPER_COMMAND + ' --geno {0}/{1}.geno --traits {2}/gn2/{3}.txt {4} -o {5}{6}.txt'.format(flat_files('genotype'), genofile_name, @@ -81,7 +81,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo suggestive = permu_vals[int(num_perm * 0.37 - 1)] significant = permu_vals[int(num_perm * 0.95 - 1)] - return (marker_obs, permu_vals, suggestive, significant, bootstrap_vals, + return (marker_obs, permu_vals, suggestive, significant, bootstrap_vals, [output_filename, permu_filename, bootstrap_filename]) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 741d6c23..b3c9fddf 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -300,7 +300,7 @@ def add_categorical_covar(cross, covar_as_string, i): ro.r('the_cross$pheno <- cbind(pheno, ' + \ col_name + ' = newcovar[,' + str(x) + '])') col_names.append(col_name) - #logger.info("loop" + str(x) + "done"); + #logger.info("loop" + str(x) + "done"); logger.info("returning from add_categorical_covar") return ro.r["the_cross"], col_names diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index 36d4cd61..61f3b202 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -142,5 +142,5 @@ def get_group_names(group_masks): group_name = get_group_info(group_id)['name'] this_mask['name'] = group_name group_masks_with_names[group_id] = this_mask - + return group_masks_with_names diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index 273a97a4..0d2fb2f8 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -343,4 +343,3 @@ def get_aliases(symbol_list, species): search_terms.append(the_search_term) return search_terms - diff --git a/wqflask/wqflask/server_side.py b/wqflask/wqflask/server_side.py index 8b3a4faa..8ca3a9eb 100644 --- a/wqflask/wqflask/server_side.py +++ b/wqflask/wqflask/server_side.py @@ -30,7 +30,7 @@ class ServerSideTable: self.rows_count = rows_count self.table_rows = table_rows self.header_data_names = header_data_names - + self.sort_rows() self.paginate_rows() diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index e98cfb71..5b7a663c 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -494,10 +494,10 @@ class SnpBrowser: function_details = function_details + ", Coding Region Unknown" self.empty_columns['function_details'] = "true" - + #[snp_href, chr, mb_formatted, alleles, snp_source_cell, conservation_score, gene_name_cell, transcript_href, exon, domain_1, domain_2, function, function_details] - base_color_dict = {"A": "#C33232", "C": "#1569C7", "T": "#CFCF32", "G": "#32C332", + base_color_dict = {"A": "#C33232", "C": "#1569C7", "T": "#CFCF32", "G": "#32C332", "t": "#FF6", "c": "#5CB3FF", "a": "#F66", "g": "#CF9", ":": "#FFFFFF", "-": "#FFFFFF", "?": "#FFFFFF"} the_bases = [] @@ -735,7 +735,7 @@ def get_header_list(variant_type, strains, species=None, empty_columns=None): if empty_columns['function_details'] == "false": empty_field_count += 1 header_fields[0].remove('Details') - + for col in empty_columns.keys(): if empty_columns[col] == "false": header_data_names.remove(col) @@ -952,4 +952,3 @@ def check_if_in_gene(species_id, chr, mb): return [result[0], result[1]] else: return "" - diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index 708d43d2..bfaed9c2 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -45,10 +45,10 @@ def encode_password(pass_gen_fields, unencrypted_password): salt = pass_gen_fields['salt'] else: salt = bytes(pass_gen_fields['salt'], "utf-8") - encrypted_password = pbkdf2.pbkdf2_hex(str(unencrypted_password), + encrypted_password = pbkdf2.pbkdf2_hex(str(unencrypted_password), salt, - pass_gen_fields['iterations'], - pass_gen_fields['keylength'], + pass_gen_fields['iterations'], + pass_gen_fields['keylength'], pass_gen_fields['hashfunc']) pass_gen_fields.pop("unencrypted_password", None) @@ -111,7 +111,7 @@ def get_signed_session_id(user): key = UserSession.user_cookie_name + ":" + session_id Redis.hmset(key, session) Redis.expire(key, THREE_DAYS) - + return session_id_signed @@ -207,7 +207,7 @@ def login(): UserSession.user_cookie_name, session_id_signed, max_age=None) else: flash("Something went unexpectedly wrong.", "alert-danger") - response = make_response(redirect(url_for('index_page'))) + response = make_response(redirect(url_for('index_page'))) return response else: user_details = get_user_by_unique_column( @@ -276,13 +276,13 @@ def github_oauth2(): user_details = get_user_by_unique_column("github_id", github_user["id"]) if user_details == None: user_details = { - "user_id": str(uuid.uuid4()), - "name": github_user["name"].encode("utf-8") if github_user["name"] else "None", + "user_id": str(uuid.uuid4()), + "name": github_user["name"].encode("utf-8") if github_user["name"] else "None", "github_id": github_user["id"], - "user_url": github_user["html_url"].encode("utf-8"), - "login_type": "github", - "organization": "", - "active": 1, + "user_url": github_user["html_url"].encode("utf-8"), + "login_type": "github", + "organization": "", + "active": 1, "confirmed": 1 } save_user(user_details, user_details["user_id"]) @@ -308,8 +308,8 @@ def orcid_oauth2(): url = "/n/login" if code: data = { - "client_id": ORCID_CLIENT_ID, - "client_secret": ORCID_CLIENT_SECRET, + "client_id": ORCID_CLIENT_ID, + "client_secret": ORCID_CLIENT_SECRET, "grant_type": "authorization_code", "redirect_uri": GN2_BRANCH_URL + "n/login/orcid_oauth2", "code": code @@ -321,13 +321,13 @@ def orcid_oauth2(): user_details = get_user_by_unique_column("orcid", result_dict["orcid"]) if user_details == None: user_details = { - "user_id": str(uuid4()), - "name": result_dict["name"], - "orcid": result_dict["orcid"], - "user_url": "%s/%s" % ("/".join(ORCID_AUTH_URL.split("/")[:-2]), result_dict["orcid"]), - "login_type": "orcid", - "organization": "", - "active": 1, + "user_id": str(uuid4()), + "name": result_dict["name"], + "orcid": result_dict["orcid"], + "user_url": "%s/%s" % ("/".join(ORCID_AUTH_URL.split("/")[:-2]), result_dict["orcid"]), + "login_type": "orcid", + "organization": "", + "active": 1, "confirmed": 1 } save_user(user_details, user_details["user_id"]) @@ -374,7 +374,7 @@ def send_forgot_password_email(verification_email): key_prefix = "forgot_password_code" subject = "GeneNetwork password reset" fromaddr = "no-reply@genenetwork.org" - + verification_code = str(uuid.uuid4()) key = key_prefix + ":" + verification_code diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index 6ccb2e80..963288b3 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -318,5 +318,3 @@ class UserSession: # And more importantly delete the redis record Redis.delete(self.redis_key) self.logged_in = False - - -- cgit v1.2.3 From 90ec57905c8afdbd5e9e8c44dcc369bd0e9c2d1b Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 30 Apr 2021 13:01:02 +0300 Subject: autopep8: Fix W504 --- wqflask/base/GeneralObject.py | 4 +- wqflask/base/data_set.py | 4 +- wqflask/base/trait.py | 10 ++-- .../maintenance/generate_probesetfreeze_file.py | 7 ++- wqflask/tests/unit/base/test_data_set.py | 10 ++-- wqflask/tests/unit/wqflask/api/test_gen_menu.py | 68 +++++++++++----------- wqflask/utility/svg.py | 24 ++++---- wqflask/wqflask/api/gen_menu.py | 10 ++-- wqflask/wqflask/correlation/show_corr_results.py | 10 ++-- wqflask/wqflask/db_info.py | 34 +++++------ wqflask/wqflask/do_search.py | 12 ++-- .../wqflask/marker_regression/qtlreaper_mapping.py | 14 ++--- wqflask/wqflask/show_trait/SampleList.py | 4 +- wqflask/wqflask/show_trait/show_trait.py | 4 +- wqflask/wqflask/user_session.py | 4 +- wqflask/wqflask/views.py | 12 ++-- 16 files changed, 116 insertions(+), 115 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/GeneralObject.py b/wqflask/base/GeneralObject.py index 249195e2..ce8e60b8 100644 --- a/wqflask/base/GeneralObject.py +++ b/wqflask/base/GeneralObject.py @@ -62,5 +62,5 @@ class GeneralObject: return s def __eq__(self, other): - return (len(list(self.__dict__.keys())) == - len(list(other.__dict__.keys()))) + return (len(list(self.__dict__.keys())) + == len(list(other.__dict__.keys()))) diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 8ec0aaad..5eac695e 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -1032,8 +1032,8 @@ class MrnaAssayDataSet(DataSet): else: description_display = this_trait.symbol - if (len(description_display) > 1 and description_display != 'N/A' and - len(target_string) > 1 and target_string != 'None'): + if (len(description_display) > 1 and description_display != 'N/A' + and len(target_string) > 1 and target_string != 'None'): description_display = description_display + '; ' + target_string.strip() # Save it for the jinja2 template diff --git a/wqflask/base/trait.py b/wqflask/base/trait.py index 5574128d..d09cfd40 100644 --- a/wqflask/base/trait.py +++ b/wqflask/base/trait.py @@ -479,9 +479,9 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): else: description_display = trait.symbol - if (str(description_display or "") != "" and - description_display != 'N/A' and - str(target_string or "") != "" and target_string != 'None'): + if (str(description_display or "") != "" + and description_display != 'N/A' + and str(target_string or "") != "" and target_string != 'None'): description_display = description_display + '; ' + target_string.strip() # Save it for the jinja2 template @@ -575,6 +575,6 @@ def retrieve_trait_info(trait, dataset, get_qtl_info=False): if str(trait.lrs or "") != "": trait.LRS_score_repr = LRS_score_repr = '%3.1f' % trait.lrs else: - raise KeyError(repr(trait.name) + - ' information is not found in the database.') + raise KeyError(repr(trait.name) + + ' information is not found in the database.') return trait diff --git a/wqflask/maintenance/generate_probesetfreeze_file.py b/wqflask/maintenance/generate_probesetfreeze_file.py index bd9c2ab4..e964c8ed 100644 --- a/wqflask/maintenance/generate_probesetfreeze_file.py +++ b/wqflask/maintenance/generate_probesetfreeze_file.py @@ -110,9 +110,10 @@ def write_data_matrix_file(strains, probeset_vals, filename): def main(): - filename = os.path.expanduser("~/gene/wqflask/maintenance/" + - "ProbeSetFreezeId_210_FullName_Eye_AXBXA_Illumina_V6.2" + - "(Oct08)_RankInv_Beta.txt") + filename = os.path.expanduser( + "~/gene/wqflask/maintenance/" + "ProbeSetFreezeId_210_FullName_Eye_AXBXA_Illumina_V6.2" + "(Oct08)_RankInv_Beta.txt") dataset_name = "Eye_AXBXA_1008_RankInv" cursor = get_cursor() diff --git a/wqflask/tests/unit/base/test_data_set.py b/wqflask/tests/unit/base/test_data_set.py index ee5d6f06..66ad361d 100644 --- a/wqflask/tests/unit/base/test_data_set.py +++ b/wqflask/tests/unit/base/test_data_set.py @@ -88,8 +88,8 @@ class TestDataSetTypes(unittest.TestCase): '"B139_K_1206_R": "ProbeSet", ' '"Test": "ProbeSet"}')) db_mock.db.execute.assert_called_once_with( - ("SELECT ProbeSetFreeze.Id FROM ProbeSetFreeze " + - "WHERE ProbeSetFreeze.Name = \"Test\" ") + ("SELECT ProbeSetFreeze.Id FROM ProbeSetFreeze " + + "WHERE ProbeSetFreeze.Name = \"Test\" ") ) @mock.patch('base.data_set.g') @@ -145,9 +145,9 @@ class TestDataSetTypes(unittest.TestCase): '"Test": "Publish"}')) db_mock.db.execute.assert_called_with( - ("SELECT PublishFreeze.Name " + - "FROM PublishFreeze, InbredSet " + - "WHERE InbredSet.Name = 'Test' AND " + ("SELECT PublishFreeze.Name " + + "FROM PublishFreeze, InbredSet " + + "WHERE InbredSet.Name = 'Test' AND " "PublishFreeze.InbredSetId = InbredSet.Id") ) diff --git a/wqflask/tests/unit/wqflask/api/test_gen_menu.py b/wqflask/tests/unit/wqflask/api/test_gen_menu.py index 57eb1650..fd0fe52e 100644 --- a/wqflask/tests/unit/wqflask/api/test_gen_menu.py +++ b/wqflask/tests/unit/wqflask/api/test_gen_menu.py @@ -105,13 +105,13 @@ class TestGenMenu(unittest.TestCase): for name in ["mouse", "human"]: db_mock.db.execute.assert_any_call( ("SELECT InbredSet.Name, InbredSet.FullName, " + - "IFNULL(InbredSet.Family, 'None') " + - "FROM InbredSet, Species WHERE Species.Name " + - "= '{}' AND InbredSet.SpeciesId = Species.Id GROUP by " + - "InbredSet.Name ORDER BY IFNULL(InbredSet.FamilyOrder, " + - "InbredSet.FullName) ASC, IFNULL(InbredSet.Family, " + - "InbredSet.FullName) ASC, InbredSet.FullName ASC, " + - "InbredSet.MenuOrderId ASC").format(name) + "IFNULL(InbredSet.Family, 'None') " + + "FROM InbredSet, Species WHERE Species.Name " + + "= '{}' AND InbredSet.SpeciesId = Species.Id GROUP by " + + "InbredSet.Name ORDER BY IFNULL(InbredSet.FamilyOrder, " + + "InbredSet.FullName) ASC, IFNULL(InbredSet.Family, " + + "InbredSet.FullName) ASC, InbredSet.FullName ASC, " + + "InbredSet.MenuOrderId ASC").format(name) ) @mock.patch('wqflask.api.gen_menu.g') @@ -172,12 +172,12 @@ class TestGenMenu(unittest.TestCase): self.assertEqual(build_datasets("Mouse", "BXD", "Phenotypes"), [['602', "BXDPublish", "BXD Published Phenotypes"]]) db_mock.db.execute.assert_called_with( - "SELECT InfoFiles.GN_AccesionId, PublishFreeze.Name, " + - "PublishFreeze.FullName FROM InfoFiles, PublishFreeze, " + - "InbredSet WHERE InbredSet.Name = 'BXD' AND " + - "PublishFreeze.InbredSetId = InbredSet.Id AND " + - "InfoFiles.InfoPageName = PublishFreeze.Name " + - "ORDER BY PublishFreeze.CreateTime ASC" + "SELECT InfoFiles.GN_AccesionId, PublishFreeze.Name, " + + "PublishFreeze.FullName FROM InfoFiles, PublishFreeze, " + + "InbredSet WHERE InbredSet.Name = 'BXD' AND " + + "PublishFreeze.InbredSetId = InbredSet.Id AND " + + "InfoFiles.InfoPageName = PublishFreeze.Name " + + "ORDER BY PublishFreeze.CreateTime ASC" ) self.assertEqual(build_datasets("Mouse", "MDP", "Phenotypes"), [['602', "BXDPublish", "Mouse Phenome Database"]]) @@ -221,8 +221,8 @@ class TestGenMenu(unittest.TestCase): "SELECT InfoFiles.GN_AccesionId FROM InfoFiles, " "GenoFreeze, InbredSet WHERE InbredSet.Name = 'HLC' AND " "GenoFreeze.InbredSetId = InbredSet.Id AND " - "InfoFiles.InfoPageName = GenoFreeze.ShortName " + - "ORDER BY GenoFreeze.CreateTime DESC" + "InfoFiles.InfoPageName = GenoFreeze.ShortName " + + "ORDER BY GenoFreeze.CreateTime DESC" ) db_mock.db.execute.return_value.fetchone.return_value = () self.assertEqual(build_datasets("Mouse", "HLC", "Genotypes"), @@ -239,16 +239,16 @@ class TestGenMenu(unittest.TestCase): "112", 'HC_M2_0606_P', "Hippocampus Consortium M430v2 (Jun06) PDNN" ]]) db_mock.db.execute.assert_called_once_with( - "SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, " + - "ProbeSetFreeze.FullName FROM ProbeSetFreeze, " + - "ProbeFreeze, InbredSet, Tissue, Species WHERE " + - "Species.Name = 'Mouse' AND Species.Id = " + - "InbredSet.SpeciesId AND InbredSet.Name = 'HLC' AND " + - "ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id AND " + - "Tissue.Name = 'mRNA' AND ProbeFreeze.TissueId = " + - "Tissue.Id AND ProbeFreeze.InbredSetId = InbredSet.Id AND " + - "ProbeSetFreeze.public > 0 " + - "ORDER BY -ProbeSetFreeze.OrderList DESC, ProbeSetFreeze.CreateTime DESC") + "SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, " + + "ProbeSetFreeze.FullName FROM ProbeSetFreeze, " + + "ProbeFreeze, InbredSet, Tissue, Species WHERE " + + "Species.Name = 'Mouse' AND Species.Id = " + + "InbredSet.SpeciesId AND InbredSet.Name = 'HLC' AND " + + "ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id AND " + + "Tissue.Name = 'mRNA' AND ProbeFreeze.TissueId = " + + "Tissue.Id AND ProbeFreeze.InbredSetId = InbredSet.Id AND " + + "ProbeSetFreeze.public > 0 " + + "ORDER BY -ProbeSetFreeze.OrderList DESC, ProbeSetFreeze.CreateTime DESC") @mock.patch('wqflask.api.gen_menu.build_datasets') @mock.patch('wqflask.api.gen_menu.g') @@ -266,15 +266,15 @@ class TestGenMenu(unittest.TestCase): ['H', 'H', 'Molecular Traits'], ['R', 'R', 'Molecular Traits']]) db_mock.db.execute.assert_called_once_with( - "SELECT DISTINCT Tissue.Name " + - "FROM ProbeFreeze, ProbeSetFreeze, InbredSet, " + - "Tissue, Species WHERE Species.Name = 'mouse' " + - "AND Species.Id = InbredSet.SpeciesId AND " + - "InbredSet.Name = 'random group' AND " + - "ProbeFreeze.TissueId = Tissue.Id AND " + - "ProbeFreeze.InbredSetId = InbredSet.Id AND " + - "ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id " + - "ORDER BY Tissue.Name" + "SELECT DISTINCT Tissue.Name " + + "FROM ProbeFreeze, ProbeSetFreeze, InbredSet, " + + "Tissue, Species WHERE Species.Name = 'mouse' " + + "AND Species.Id = InbredSet.SpeciesId AND " + + "InbredSet.Name = 'random group' AND " + + "ProbeFreeze.TissueId = Tissue.Id AND " + + "ProbeFreeze.InbredSetId = InbredSet.Id AND " + + "ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id " + + "ORDER BY Tissue.Name" ) @mock.patch('wqflask.api.gen_menu.build_types') diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index 8d2e13ab..bc3bc833 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -229,13 +229,13 @@ class pathdata: def bezier(self, x1, y1, x2, y2, x, y): """bezier with xy1 and xy2 to xy absolut""" - self.path.append('C' + str(x1) + ','+str(y1)+' '+str(x2) + - ',' + str(y2) + ' '+str(x)+','+str(y)) + self.path.append('C' + str(x1) + ','+str(y1)+' '+str(x2) + + ',' + str(y2) + ' '+str(x)+','+str(y)) def relbezier(self, x1, y1, x2, y2, x, y): """bezier with xy1 and xy2 to xy relative""" - self.path.append('c' + str(x1) + ','+str(y1)+' '+str(x2) + - ',' + str(y2) + ' '+str(x)+','+str(y)) + self.path.append('c' + str(x1) + ','+str(y1)+' '+str(x2) + + ',' + str(y2) + ' '+str(x)+','+str(y)) def smbezier(self, x2, y2, x, y): """smooth bezier with xy2 to xy absolut""" @@ -267,13 +267,13 @@ class pathdata: def ellarc(self, rx, ry, xrot, laf, sf, x, y): """elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy absolut""" - self.path.append('A' + str(rx) + ','+str(ry)+' '+str(xrot) + - ' ' + str(laf) + ' '+str(sf)+' '+str(x)+' '+str(y)) + self.path.append('A' + str(rx) + ','+str(ry)+' '+str(xrot) + + ' ' + str(laf) + ' '+str(sf)+' '+str(x)+' '+str(y)) def relellarc(self, rx, ry, xrot, laf, sf, x, y): """elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy relative""" - self.path.append('a' + str(rx) + ','+str(ry)+' '+str(xrot) + - ' ' + str(laf) + ' '+str(sf)+' '+str(x)+' '+str(y)) + self.path.append('a' + str(rx) + ','+str(ry)+' '+str(xrot) + + ' ' + str(laf) + ' '+str(sf)+' '+str(x)+' '+str(y)) def __repr__(self): return ' '.join(self.path) @@ -319,11 +319,11 @@ class SVGelement: f.write('\t' * level) f.write('<' + self.type) for attkey in list(self.attributes.keys()): - f.write(' ' + _escape(str(attkey)) + '=' + - _quoteattr(str(self.attributes[attkey]))) + f.write(' ' + _escape(str(attkey)) + '=' + + _quoteattr(str(self.attributes[attkey]))) if self.namespace: - f.write(' xmlns="' + _escape(str(self.namespace)) + - '" xmlns:xlink="http://www.w3.org/1999/xlink"') + f.write(' xmlns="' + _escape(str(self.namespace)) + + '" xmlns:xlink="http://www.w3.org/1999/xlink"') if self.elements or self.text or self.cdata: f.write('>') if self.elements: diff --git a/wqflask/wqflask/api/gen_menu.py b/wqflask/wqflask/api/gen_menu.py index a64524dd..e65b36e4 100644 --- a/wqflask/wqflask/api/gen_menu.py +++ b/wqflask/wqflask/api/gen_menu.py @@ -179,11 +179,11 @@ def build_datasets(species, group, type_name): elif type_name == "Genotypes": results = g.db.execute( ("SELECT InfoFiles.GN_AccesionId " + - "FROM InfoFiles, GenoFreeze, InbredSet " + - "WHERE InbredSet.Name = '{}' AND " + - "GenoFreeze.InbredSetId = InbredSet.Id AND " + - "InfoFiles.InfoPageName = GenoFreeze.ShortName " + - "ORDER BY GenoFreeze.CreateTime DESC").format(group)).fetchone() + "FROM InfoFiles, GenoFreeze, InbredSet " + + "WHERE InbredSet.Name = '{}' AND " + + "GenoFreeze.InbredSetId = InbredSet.Id AND " + + "InfoFiles.InfoPageName = GenoFreeze.ShortName " + + "ORDER BY GenoFreeze.CreateTime DESC").format(group)).fetchone() dataset_id = "None" if bool(results): diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index 8ee24246..e8b7b057 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -96,9 +96,9 @@ class CorrelationResults: self.p_range_lower = get_float(start_vars, 'p_range_lower', -1.0) self.p_range_upper = get_float(start_vars, 'p_range_upper', 1.0) - if ('loc_chr' in start_vars and - 'min_loc_mb' in start_vars and - 'max_loc_mb' in start_vars): + if ('loc_chr' in start_vars + and 'min_loc_mb' in start_vars + and 'max_loc_mb' in start_vars): self.location_type = get_string(start_vars, 'location_type') self.location_chr = get_string(start_vars, 'loc_chr') @@ -200,8 +200,8 @@ class CorrelationResults: if chr_info.name == trait_object.chr: chr_as_int = order_id - if (float(self.correlation_data[trait][0]) >= self.p_range_lower and - float(self.correlation_data[trait][0]) <= self.p_range_upper): + if (float(self.correlation_data[trait][0]) >= self.p_range_lower + and float(self.correlation_data[trait][0]) <= self.p_range_upper): if (self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Publish") and bool(trait_object.mean): if (self.min_expr != None) and (float(trait_object.mean) < self.min_expr): diff --git a/wqflask/wqflask/db_info.py b/wqflask/wqflask/db_info.py index 8d28fef0..938c453e 100644 --- a/wqflask/wqflask/db_info.py +++ b/wqflask/wqflask/db_info.py @@ -23,23 +23,23 @@ class InfoPage: def get_info(self, create=False): query_base = ("SELECT InfoPageName, GN_AccesionId, Species.MenuName, Species.TaxonomyId, Tissue.Name, InbredSet.Name, " + - "GeneChip.GeneChipName, GeneChip.GeoPlatform, AvgMethod.Name, Datasets.DatasetName, Datasets.GeoSeries, " + - "Datasets.PublicationTitle, DatasetStatus.DatasetStatusName, Datasets.Summary, Datasets.AboutCases, " + - "Datasets.AboutTissue, Datasets.AboutDataProcessing, Datasets.Acknowledgment, Datasets.ExperimentDesign, " + - "Datasets.Contributors, Datasets.Citation, Datasets.Notes, Investigators.FirstName, Investigators.LastName, " + - "Investigators.Address, Investigators.City, Investigators.State, Investigators.ZipCode, Investigators.Country, " + - "Investigators.Phone, Investigators.Email, Investigators.Url, Organizations.OrganizationName, " + - "InvestigatorId, DatasetId, DatasetStatusId, Datasets.AboutPlatform, InfoFileTitle, Specifics " + - "FROM InfoFiles " + - "LEFT JOIN Species USING (SpeciesId) " + - "LEFT JOIN Tissue USING (TissueId) " + - "LEFT JOIN InbredSet USING (InbredSetId) " + - "LEFT JOIN GeneChip USING (GeneChipId) " + - "LEFT JOIN AvgMethod USING (AvgMethodId) " + - "LEFT JOIN Datasets USING (DatasetId) " + - "LEFT JOIN Investigators USING (InvestigatorId) " + - "LEFT JOIN Organizations USING (OrganizationId) " + - "LEFT JOIN DatasetStatus USING (DatasetStatusId) WHERE ") + "GeneChip.GeneChipName, GeneChip.GeoPlatform, AvgMethod.Name, Datasets.DatasetName, Datasets.GeoSeries, " + + "Datasets.PublicationTitle, DatasetStatus.DatasetStatusName, Datasets.Summary, Datasets.AboutCases, " + + "Datasets.AboutTissue, Datasets.AboutDataProcessing, Datasets.Acknowledgment, Datasets.ExperimentDesign, " + + "Datasets.Contributors, Datasets.Citation, Datasets.Notes, Investigators.FirstName, Investigators.LastName, " + + "Investigators.Address, Investigators.City, Investigators.State, Investigators.ZipCode, Investigators.Country, " + + "Investigators.Phone, Investigators.Email, Investigators.Url, Organizations.OrganizationName, " + + "InvestigatorId, DatasetId, DatasetStatusId, Datasets.AboutPlatform, InfoFileTitle, Specifics " + + "FROM InfoFiles " + + "LEFT JOIN Species USING (SpeciesId) " + + "LEFT JOIN Tissue USING (TissueId) " + + "LEFT JOIN InbredSet USING (InbredSetId) " + + "LEFT JOIN GeneChip USING (GeneChipId) " + + "LEFT JOIN AvgMethod USING (AvgMethodId) " + + "LEFT JOIN Datasets USING (DatasetId) " + + "LEFT JOIN Investigators USING (InvestigatorId) " + + "LEFT JOIN Organizations USING (OrganizationId) " + + "LEFT JOIN DatasetStatus USING (DatasetStatusId) WHERE ") if self.gn_accession_id: final_query = query_base + \ diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index e50ff50b..48527785 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -111,8 +111,8 @@ class MrnaAssaySearch(DoSearch): else: match_clause = "" - where_clause = (match_clause + - """ProbeSet.Id = ProbeSetXRef.ProbeSetId + where_clause = (match_clause + + """ProbeSet.Id = ProbeSetXRef.ProbeSetId and ProbeSetXRef.ProbeSetFreezeId = %s """ % (escape(str(self.dataset.id)))) @@ -134,8 +134,8 @@ class MrnaAssaySearch(DoSearch): else: match_clause = "" - where_clause = (match_clause + - """ProbeSet.Id = ProbeSetXRef.ProbeSetId + where_clause = (match_clause + + """ProbeSet.Id = ProbeSetXRef.ProbeSetId and ProbeSetXRef.ProbeSetFreezeId = %s """ % (escape(str(self.dataset.id)))) @@ -348,8 +348,8 @@ class GenotypeSearch(DoSearch): from_clause = self.normalize_spaces(from_clause) if self.search_term[0] == "*": - query = (self.base_query + - """WHERE Geno.Id = GenoXRef.GenoId + query = (self.base_query + + """WHERE Geno.Id = GenoXRef.GenoId and GenoXRef.GenoFreezeId = GenoFreeze.Id and GenoFreeze.Id = %s""" % (escape(str(self.dataset.id)))) else: diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py index c51b7a9a..9f9591ad 100644 --- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py +++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py @@ -25,8 +25,8 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo trait_filename = f"{str(this_trait.name)}_{str(this_dataset.name)}_pheno" gen_pheno_txt_file(samples, vals, trait_filename) - output_filename = (f"{this_dataset.group.name}_GWA_" + - ''.join(random.choice(string.ascii_uppercase + string.digits) + output_filename = (f"{this_dataset.group.name}_GWA_" + + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) ) bootstrap_filename = None @@ -34,8 +34,8 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo opt_list = [] if boot_check and num_bootstrap > 0: - bootstrap_filename = (f"{this_dataset.group.name}_BOOTSTRAP_" + - ''.join(random.choice(string.ascii_uppercase + string.digits) + bootstrap_filename = (f"{this_dataset.group.name}_BOOTSTRAP_" + + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) ) @@ -44,9 +44,9 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo opt_list.append( f"--bootstrap_output {webqtlConfig.GENERATED_IMAGE_DIR}{bootstrap_filename}.txt") if num_perm > 0: - permu_filename = ("{this_dataset.group.name}_PERM_" + - ''.join(random.choice(string.ascii_uppercase + - string.digits) for _ in range(6)) + permu_filename = ("{this_dataset.group.name}_PERM_" + + ''.join(random.choice(string.ascii_uppercase + + string.digits) for _ in range(6)) ) opt_list.append("-n " + str(num_perm)) opt_list.append( diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 6419335e..3a63c84e 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -57,8 +57,8 @@ class SampleList: sample = webqtlCaseData.webqtlCaseData(name=sample_name) sample.extra_info = {} - if (self.dataset.group.name == 'AXBXA' and - sample_name in ('AXB18/19/20', 'AXB13/14', 'BXA8/17')): + if (self.dataset.group.name == 'AXBXA' + and sample_name in ('AXB18/19/20', 'AXB13/14', 'BXA8/17')): sample.extra_info['url'] = "/mouseCross.html#AXB/BXA" sample.extra_info['css_class'] = "fs12" diff --git a/wqflask/wqflask/show_trait/show_trait.py b/wqflask/wqflask/show_trait/show_trait.py index 18cadea4..fcebbc4d 100644 --- a/wqflask/wqflask/show_trait/show_trait.py +++ b/wqflask/wqflask/show_trait/show_trait.py @@ -452,8 +452,8 @@ class ShowTrait: for sample in list(self.this_trait.data.keys()): if (self.this_trait.data[sample].name2 != self.this_trait.data[sample].name): - if ((self.this_trait.data[sample].name2 in primary_sample_names) and - (self.this_trait.data[sample].name not in primary_sample_names)): + if ((self.this_trait.data[sample].name2 in primary_sample_names) + and (self.this_trait.data[sample].name not in primary_sample_names)): primary_sample_names.append( self.this_trait.data[sample].name) primary_sample_names.remove( diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index 963288b3..3e543445 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -184,8 +184,8 @@ class UserSession: # ZS: Get user's collections if they exist collections = get_user_collections(self.user_id) collections = [item for item in collections if item['name'] != "Your Default Collection"] + \ - [item for item in collections if item['name'] == - "Your Default Collection"] # ZS: Ensure Default Collection is last in list + [item for item in collections if item['name'] + == "Your Default Collection"] # ZS: Ensure Default Collection is last in list return collections @property diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index 2c53012a..f75209ff 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -140,8 +140,8 @@ def handle_bad_request(e): logger.error(traceback.format_exc()) now = datetime.datetime.utcnow() time_str = now.strftime('%l:%M%p UTC %b %d, %Y') - formatted_lines = [request.url + - " (" + time_str + ")"]+traceback.format_exc().splitlines() + formatted_lines = [request.url + + " (" + time_str + ")"]+traceback.format_exc().splitlines() # Handle random animations # Use a cookie to have one animation on refresh @@ -518,10 +518,10 @@ def export_perm_data(): ["#N_genotypes: " + str(perm_info['n_genotypes'])], ["#Genotype_file: " + perm_info['genofile']], ["#Units_linkage: " + perm_info['units_linkage']], - ["#Permutation_stratified_by: " + - ", ".join([str(cofactor) for cofactor in perm_info['strat_cofactors']])], - ["#RESULTS_1: Suggestive LRS(p=0.63) = " + - str(np.percentile(np.array(perm_info['perm_data']), 67))], + ["#Permutation_stratified_by: " + + ", ".join([str(cofactor) for cofactor in perm_info['strat_cofactors']])], + ["#RESULTS_1: Suggestive LRS(p=0.63) = " + + str(np.percentile(np.array(perm_info['perm_data']), 67))], ["#RESULTS_2: Significant LRS(p=0.05) = " + str( np.percentile(np.array(perm_info['perm_data']), 95))], ["#RESULTS_3: Highly Significant LRS(p=0.01) = " + str( -- cgit v1.2.3 From 114e7d3395f28ddead0ff3a94c10d0bf534fb493 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 30 Apr 2021 13:05:21 +0300 Subject: autopep8: Fix E101, E11 --- wqflask/maintenance/geno_to_json.py | 12 +- .../marker_regression/test_qtlreaper_mapping.py | 26 +- .../wqflask/marker_regression/test_rqtl_mapping.py | 64 ++-- wqflask/utility/Plot.py | 98 +++--- wqflask/utility/__init__.py | 2 +- wqflask/utility/genofile_parser.py | 150 ++++----- wqflask/utility/tools.py | 2 +- wqflask/wqflask/collect.py | 12 +- .../wqflask/correlation_matrix/show_corr_matrix.py | 4 +- wqflask/wqflask/ctl/ctl_analysis.py | 88 +++--- wqflask/wqflask/group_manager.py | 238 +++++++------- wqflask/wqflask/gsearch.py | 14 +- wqflask/wqflask/interval_analyst/GeneUtil.py | 130 ++++---- .../marker_regression/display_mapping_results.py | 44 +-- wqflask/wqflask/marker_regression/rqtl_mapping.py | 14 +- wqflask/wqflask/marker_regression/run_mapping.py | 352 ++++++++++----------- wqflask/wqflask/model.py | 2 +- wqflask/wqflask/search_results.py | 2 +- wqflask/wqflask/user_login.py | 70 ++-- 19 files changed, 662 insertions(+), 662 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py index 27eb6553..c74489a8 100644 --- a/wqflask/maintenance/geno_to_json.py +++ b/wqflask/maintenance/geno_to_json.py @@ -113,13 +113,13 @@ class ConvertGenoFile: with open(self.output_file, 'w') as fh: json.dump(self.markers, fh, indent=" ", sort_keys=True) - # print('configurations:', str(configurations)) - #self.latest_col_pos = item_count + self.skipped_cols - #self.latest_col_value = item + # print('configurations:', str(configurations)) + #self.latest_col_pos = item_count + self.skipped_cols + #self.latest_col_value = item - # if item_count != 0: - # self.output_fh.write(" ") - # self.output_fh.write(self.configurations[item.upper()]) + # if item_count != 0: + # self.output_fh.write(" ") + # self.output_fh.write(self.configurations[item.upper()]) # self.output_fh.write("\n") diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py index 1198740d..47377873 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py @@ -7,17 +7,17 @@ from wqflask.marker_regression.qtlreaper_mapping import gen_pheno_txt_file class TestQtlReaperMapping(unittest.TestCase): - @mock.patch("wqflask.marker_regression.qtlreaper_mapping.TEMPDIR", "/home/user/data") - def test_gen_pheno_txt_file(self): - vals = ["V1", "x", "V4", "V3","x"] - samples = ["S1", "S2", "S3", "S4","S5"] - trait_filename = "trait_file" - with mock.patch("builtins.open", mock.mock_open())as mock_open: - gen_pheno_txt_file(samples=samples, vals=vals, - trait_filename=trait_filename) - mock_open.assert_called_once_with("/home/user/data/gn2/trait_file.txt", "w") - filehandler = mock_open() - write_calls = [mock.call('Trait\t'), mock.call( - 'S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')] + @mock.patch("wqflask.marker_regression.qtlreaper_mapping.TEMPDIR", "/home/user/data") + def test_gen_pheno_txt_file(self): + vals = ["V1", "x", "V4", "V3","x"] + samples = ["S1", "S2", "S3", "S4","S5"] + trait_filename = "trait_file" + with mock.patch("builtins.open", mock.mock_open())as mock_open: + gen_pheno_txt_file(samples=samples, vals=vals, + trait_filename=trait_filename) + mock_open.assert_called_once_with("/home/user/data/gn2/trait_file.txt", "w") + filehandler = mock_open() + write_calls = [mock.call('Trait\t'), mock.call( + 'S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')] - filehandler.write.assert_has_calls(write_calls) + filehandler.write.assert_has_calls(write_calls) diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py index d69a20d3..e518ec22 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py @@ -8,35 +8,35 @@ from wqflask.marker_regression.rqtl_mapping import sanitize_rqtl_names class TestRqtlMapping(unittest.TestCase): - def setUp(self): - self.app_context = app.app_context() - self.app_context.push() - - def tearDown(self): - self.app_context.pop() - - @mock.patch("wqflask.marker_regression.rqtl_mapping.g") - @mock.patch("wqflask.marker_regression.rqtl_mapping.logger") - def test_get_trait_data(self, mock_logger, mock_db): - """test for getting trait data_type return True""" - query_value = """SELECT value FROM TraitMetadata WHERE type='trait_data_type'""" - mock_db.db.execute.return_value.fetchone.return_value = [ - """{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""] - results = get_trait_data_type("traid_id") - mock_db.db.execute.assert_called_with(query_value) - self.assertEqual(results, "fer434f") - - def test_sanitize_rqtl_phenotype(self): - """test for sanitizing rqtl phenotype""" - vals = ['f', "x", "r", "x","x"] - results = sanitize_rqtl_phenotype(vals) - expected_phenotype_string = 'c(f,NA,r,NA,NA)' - - self.assertEqual(results, expected_phenotype_string) - - def test_sanitize_rqtl_names(self): - """test for sanitzing rqtl names""" - vals = ['f', "x", "r", "x","x"] - expected_sanitized_name = "c('f',NA,'r',NA,NA)" - results = sanitize_rqtl_names(vals) - self.assertEqual(expected_sanitized_name, results) + def setUp(self): + self.app_context = app.app_context() + self.app_context.push() + + def tearDown(self): + self.app_context.pop() + + @mock.patch("wqflask.marker_regression.rqtl_mapping.g") + @mock.patch("wqflask.marker_regression.rqtl_mapping.logger") + def test_get_trait_data(self, mock_logger, mock_db): + """test for getting trait data_type return True""" + query_value = """SELECT value FROM TraitMetadata WHERE type='trait_data_type'""" + mock_db.db.execute.return_value.fetchone.return_value = [ + """{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""] + results = get_trait_data_type("traid_id") + mock_db.db.execute.assert_called_with(query_value) + self.assertEqual(results, "fer434f") + + def test_sanitize_rqtl_phenotype(self): + """test for sanitizing rqtl phenotype""" + vals = ['f', "x", "r", "x","x"] + results = sanitize_rqtl_phenotype(vals) + expected_phenotype_string = 'c(f,NA,r,NA,NA)' + + self.assertEqual(results, expected_phenotype_string) + + def test_sanitize_rqtl_names(self): + """test for sanitzing rqtl names""" + vals = ['f', "x", "r", "x","x"] + expected_sanitized_name = "c('f',NA,'r',NA,NA)" + results = sanitize_rqtl_names(vals) + self.assertEqual(expected_sanitized_name, results) diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 00658d10..f61e3b88 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -79,7 +79,7 @@ def frange(start, end=None, inc=1.0): start += 0.0 # force it to be a float count = int((end - start) / inc) if start + count * inc != end: - # Need to adjust the count. AFAICT, it always comes up one short. + # Need to adjust the count. AFAICT, it always comes up one short. count += 1 L = [start] * count for i in range(1, count): @@ -131,16 +131,16 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab plotWidth = canvas.size[0] - xLeftOffset - xRightOffset plotHeight = canvas.size[1] - yTopOffset - yBottomOffset if plotHeight <= 0 or plotWidth <= 0: - return + return if len(data) < 2: - return + return max_D = max(data) min_D = min(data) # add by NL 06-20-2011: fix the error: when max_D is infinite, log function in detScale will go wrong if max_D == float('inf') or max_D > webqtlConfig.MAXLRS: - max_D = webqtlConfig.MAXLRS # maximum LRS value + max_D = webqtlConfig.MAXLRS # maximum LRS value xLow, xTop, stepX = detScale(min_D, max_D) @@ -151,15 +151,15 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab dataXY = [] Count = [] while j <= xTop: - dataXY.append(j) - Count.append(0) - j += step + dataXY.append(j) + Count.append(0) + j += step for i, item in enumerate(data): - if item == float('inf') or item > webqtlConfig.MAXLRS: - item = webqtlConfig.MAXLRS # maximum LRS value - j = int((item - xLow) / step) - Count[j] += 1 + if item == float('inf') or item > webqtlConfig.MAXLRS: + item = webqtlConfig.MAXLRS # maximum LRS value + j = int((item - xLow) / step) + Count[j] += 1 yLow, yTop, stepY = detScale(0, max(Count)) @@ -169,12 +169,12 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab barWidth = xScale * step for i, count in enumerate(Count): - if count: - xc = (dataXY[i] - xLow) * xScale + xLeftOffset - yc = -(count - yLow) * yScale + yTopOffset + plotHeight - im_drawer.rectangle( - xy=((xc + 2, yc), (xc + barWidth - 2, yTopOffset + plotHeight)), - outline=barColor, fill=barColor) + if count: + xc = (dataXY[i] - xLow) * xScale + xLeftOffset + yc = -(count - yLow) * yScale + yTopOffset + plotHeight + im_drawer.rectangle( + xy=((xc + 2, yc), (xc + barWidth - 2, yTopOffset + plotHeight)), + outline=barColor, fill=barColor) # draw drawing region im_drawer.rectangle( @@ -186,39 +186,39 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab scaleFont = ImageFont.truetype(font=COUR_FILE, size=11) x = xLow for i in range(int(stepX) + 1): - xc = xLeftOffset + (x - xLow) * xScale - im_drawer.line( - xy=((xc, yTopOffset + plotHeight), (xc, yTopOffset + plotHeight + 5)), - fill=axesColor) - strX = cformat(d=x, rank=0) - im_drawer.text( - text=strX, - xy=(xc - im_drawer.textsize(strX, font=scaleFont)[0] / 2, - yTopOffset + plotHeight + 14), font=scaleFont) - x += (xTop - xLow) / stepX + xc = xLeftOffset + (x - xLow) * xScale + im_drawer.line( + xy=((xc, yTopOffset + plotHeight), (xc, yTopOffset + plotHeight + 5)), + fill=axesColor) + strX = cformat(d=x, rank=0) + im_drawer.text( + text=strX, + xy=(xc - im_drawer.textsize(strX, font=scaleFont)[0] / 2, + yTopOffset + plotHeight + 14), font=scaleFont) + x += (xTop - xLow) / stepX y = yLow for i in range(int(stepY) + 1): - yc = yTopOffset + plotHeight - (y - yLow) * yScale - im_drawer.line( - xy=((xLeftOffset, yc), (xLeftOffset - 5, yc)), fill=axesColor) - strY = "%d" % y - im_drawer.text( - text=strY, - xy=(xLeftOffset - im_drawer.textsize(strY, - font=scaleFont)[0] - 6, yc + 5), - font=scaleFont) - y += (yTop - yLow) / stepY + yc = yTopOffset + plotHeight - (y - yLow) * yScale + im_drawer.line( + xy=((xLeftOffset, yc), (xLeftOffset - 5, yc)), fill=axesColor) + strY = "%d" % y + im_drawer.text( + text=strY, + xy=(xLeftOffset - im_drawer.textsize(strY, + font=scaleFont)[0] - 6, yc + 5), + font=scaleFont) + y += (yTop - yLow) / stepY # draw label labelFont = ImageFont.truetype(font=TAHOMA_FILE, size=17) if XLabel: - im_drawer.text( - text=XLabel, - xy=(xLeftOffset + ( - plotWidth - im_drawer.textsize(XLabel, font=labelFont)[0]) / 2.0, - yTopOffset + plotHeight + yBottomOffset-10), - font=labelFont, fill=labelColor) + im_drawer.text( + text=XLabel, + xy=(xLeftOffset + ( + plotWidth - im_drawer.textsize(XLabel, font=labelFont)[0]) / 2.0, + yTopOffset + plotHeight + yBottomOffset-10), + font=labelFont, fill=labelColor) if YLabel: draw_rotated_text(canvas, text=YLabel, @@ -230,12 +230,12 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab labelFont = ImageFont.truetype(font=VERDANA_FILE, size=16) if title: - im_drawer.text( - text=title, - xy=(xLeftOffset + (plotWidth - im_drawer.textsize( - title, font=labelFont)[0]) / 2.0, - 20), - font=labelFont, fill=labelColor) + im_drawer.text( + text=title, + xy=(xLeftOffset + (plotWidth - im_drawer.textsize( + title, font=labelFont)[0]) / 2.0, + 20), + font=labelFont, fill=labelColor) # This function determines the scale of the plot diff --git a/wqflask/utility/__init__.py b/wqflask/utility/__init__.py index 816bc4df..6c8cd546 100644 --- a/wqflask/utility/__init__.py +++ b/wqflask/utility/__init__.py @@ -7,7 +7,7 @@ class Bunch: """Like a dictionary but using object notation""" def __init__(self, **kw): - self.__dict__ = kw + self.__dict__ = kw def __repr__(self): return pf(self.__dict__) diff --git a/wqflask/utility/genofile_parser.py b/wqflask/utility/genofile_parser.py index c0629b5d..eb545478 100644 --- a/wqflask/utility/genofile_parser.py +++ b/wqflask/utility/genofile_parser.py @@ -14,87 +14,87 @@ from pprint import pformat as pf class Marker: - def __init__(self): - self.name = None - self.chr = None - self.cM = None - self.Mb = None - self.genotypes = [] + def __init__(self): + self.name = None + self.chr = None + self.cM = None + self.Mb = None + self.genotypes = [] class ConvertGenoFile: - def __init__(self, input_file): - self.mb_exists = False - self.cm_exists = False - self.markers = [] + def __init__(self, input_file): + self.mb_exists = False + self.cm_exists = False + self.markers = [] - self.latest_row_pos = None - self.latest_col_pos = None + self.latest_row_pos = None + self.latest_col_pos = None - self.latest_row_value = None - self.latest_col_value = None - self.input_fh = open(input_file) - print("!!!!!!!!!!!!!!!!PARSER!!!!!!!!!!!!!!!!!!") - self.haplotype_notation = { - '@mat': "1", - '@pat': "2", - '@het': "-999", - '@unk': "-999" - } - self.configurations = {} + self.latest_row_value = None + self.latest_col_value = None + self.input_fh = open(input_file) + print("!!!!!!!!!!!!!!!!PARSER!!!!!!!!!!!!!!!!!!") + self.haplotype_notation = { + '@mat': "1", + '@pat': "2", + '@het': "-999", + '@unk': "-999" + } + self.configurations = {} - def process_rows(self): - for self.latest_row_pos, row in enumerate(self.input_fh): - self.latest_row_value = row - # Take care of headers - if not row.strip(): - continue - if row.startswith('#'): - continue - if row.startswith('Chr'): - if 'Mb' in row.split(): - self.mb_exists = True - if 'cM' in row.split(): - self.cm_exists = True - skip = 2 + self.cm_exists + self.mb_exists - self.individuals = row.split()[skip:] - continue - if row.startswith('@'): - key, _separater, value = row.partition(':') - key = key.strip() - value = value.strip() - if key in self.haplotype_notation: - self.configurations[value] = self.haplotype_notation[key] - continue - if not len(self.configurations): - raise EmptyConfigurations - yield row + def process_rows(self): + for self.latest_row_pos, row in enumerate(self.input_fh): + self.latest_row_value = row + # Take care of headers + if not row.strip(): + continue + if row.startswith('#'): + continue + if row.startswith('Chr'): + if 'Mb' in row.split(): + self.mb_exists = True + if 'cM' in row.split(): + self.cm_exists = True + skip = 2 + self.cm_exists + self.mb_exists + self.individuals = row.split()[skip:] + continue + if row.startswith('@'): + key, _separater, value = row.partition(':') + key = key.strip() + value = value.strip() + if key in self.haplotype_notation: + self.configurations[value] = self.haplotype_notation[key] + continue + if not len(self.configurations): + raise EmptyConfigurations + yield row - def process_csv(self): - for row in self.process_rows(): - row_items = row.split("\t") + def process_csv(self): + for row in self.process_rows(): + row_items = row.split("\t") - this_marker = Marker() - this_marker.name = row_items[1] - this_marker.chr = row_items[0] - if self.cm_exists and self.mb_exists: - this_marker.cM = row_items[2] - this_marker.Mb = row_items[3] - genotypes = row_items[4:] - elif self.cm_exists: - this_marker.cM = row_items[2] - genotypes = row_items[3:] - elif self.mb_exists: - this_marker.Mb = row_items[2] - genotypes = row_items[3:] - else: - genotypes = row_items[2:] - for item_count, genotype in enumerate(genotypes): - if genotype.upper().strip() in self.configurations: - this_marker.genotypes.append( - self.configurations[genotype.upper().strip()]) - else: - print("WARNING:", genotype.upper()) - this_marker.genotypes.append("NA") - self.markers.append(this_marker.__dict__) + this_marker = Marker() + this_marker.name = row_items[1] + this_marker.chr = row_items[0] + if self.cm_exists and self.mb_exists: + this_marker.cM = row_items[2] + this_marker.Mb = row_items[3] + genotypes = row_items[4:] + elif self.cm_exists: + this_marker.cM = row_items[2] + genotypes = row_items[3:] + elif self.mb_exists: + this_marker.Mb = row_items[2] + genotypes = row_items[3:] + else: + genotypes = row_items[2:] + for item_count, genotype in enumerate(genotypes): + if genotype.upper().strip() in self.configurations: + this_marker.genotypes.append( + self.configurations[genotype.upper().strip()]) + else: + print("WARNING:", genotype.upper()) + this_marker.genotypes.append("NA") + self.markers.append(this_marker.__dict__) diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index d82e478d..4f09176a 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -73,7 +73,7 @@ def get_setting(command_id, guess=None): def get_setting_bool(id): v = get_setting(id) if v not in [0, False, 'False', 'FALSE', None]: - return True + return True return False diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index 9fd89524..b06d84ff 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -35,7 +35,7 @@ def process_traits(unprocessed_traits): data, _separator, the_hmac = trait.rpartition(':') data = data.strip() if g.user_session.logged_in: - assert the_hmac == hmac.hmac_creation(data), "Data tampering?" + assert the_hmac == hmac.hmac_creation(data), "Data tampering?" traits.add(str(data)) return traits @@ -52,14 +52,14 @@ def report_change(len_before, len_now): @app.route("/collections/store_trait_list", methods=('POST',)) def store_traits_list(): - params = request.form + params = request.form - traits = params['traits'] - hash = params['hash'] + traits = params['traits'] + hash = params['hash'] - Redis.set(hash, traits) + Redis.set(hash, traits) - return hash + return hash @app.route("/collections/add") diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index 59469428..c04b17be 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -238,9 +238,9 @@ class CorrelationMatrix: for i in range(len(self.trait_list)): loadings_row = [] if len(self.trait_list) > 2: - the_range = 3 + the_range = 3 else: - the_range = 2 + the_range = 2 for j in range(the_range): position = i + len(self.trait_list) * j loadings_row.append(self.loadings[0][position]) diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index f4eafbe7..820e81bc 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -115,8 +115,8 @@ class CTL: markers = [] markernames = [] for marker in parser.markers: - markernames.append(marker["name"]) - markers.append(marker["genotypes"]) + markernames.append(marker["name"]) + markers.append(marker["genotypes"]) genotypes = list(itertools.chain(*markers)) logger.debug(len(genotypes) / len(individuals), @@ -128,16 +128,16 @@ class CTL: # Create a phenotype matrix traits = [] for trait in self.trait_db_list: - logger.debug("retrieving data for", trait) - if trait != "": - ts = trait.split(':') - gt = create_trait(name=ts[0], dataset_name=ts[1]) - gt = retrieve_sample_data(gt, dataset, individuals) - for ind in individuals: - if ind in list(gt.data.keys()): - traits.append(gt.data[ind].value) - else: - traits.append("-999") + logger.debug("retrieving data for", trait) + if trait != "": + ts = trait.split(':') + gt = create_trait(name=ts[0], dataset_name=ts[1]) + gt = retrieve_sample_data(gt, dataset, individuals) + for ind in individuals: + if ind in list(gt.data.keys()): + traits.append(gt.data[ind].value) + else: + traits.append("-999") rPheno = r_t(ro.r.matrix(r_as_numeric(r_unlist(traits)), nrow=len(self.trait_db_list), ncol=len( individuals), dimnames=r_list(self.trait_db_list, individuals), byrow=True)) @@ -177,42 +177,42 @@ class CTL: # We start from 2, since R starts from 1 :) n = 2 for trait in self.trait_db_list: - # Create the QTL like CTL plots - self.results['imgurl' + \ - str(n)] = webqtlUtil.genRandStr("CTL_") + ".png" - self.results['imgloc' + str(n)] = GENERATED_IMAGE_DIR + \ - self.results['imgurl' + str(n)] - r_png(self.results['imgloc' + str(n)], - width=1000, height=600, type='cairo-png') - self.r_plotCTLobject( - res, (n - 1), significance=significance, main='Phenotype ' + trait) - r_dev_off() - n = n + 1 + # Create the QTL like CTL plots + self.results['imgurl' + \ + str(n)] = webqtlUtil.genRandStr("CTL_") + ".png" + self.results['imgloc' + str(n)] = GENERATED_IMAGE_DIR + \ + self.results['imgurl' + str(n)] + r_png(self.results['imgloc' + str(n)], + width=1000, height=600, type='cairo-png') + self.r_plotCTLobject( + res, (n - 1), significance=significance, main='Phenotype ' + trait) + r_dev_off() + n = n + 1 # Flush any output from R sys.stdout.flush() # Create the interactive graph for cytoscape visualization (Nodes and Edges) if not isinstance(significant, ri.RNULLType): - for x in range(len(significant[0])): - logger.debug(significant[0][x], significant[1] - [x], significant[2][x]) # Debug to console - # Source - tsS = significant[0][x].split(':') - # Target - tsT = significant[2][x].split(':') - # Retrieve Source info from the DB - gtS = create_trait(name=tsS[0], dataset_name=tsS[1]) - # Retrieve Target info from the DB - gtT = create_trait(name=tsT[0], dataset_name=tsT[1]) - self.addNode(gtS) - self.addNode(gtT) - self.addEdge(gtS, gtT, significant, x) - - # Update the trait name for the displayed table - significant[0][x] = "{} ({})".format(gtS.symbol, gtS.name) - # Update the trait name for the displayed table - significant[2][x] = "{} ({})".format(gtT.symbol, gtT.name) + for x in range(len(significant[0])): + logger.debug(significant[0][x], significant[1] + [x], significant[2][x]) # Debug to console + # Source + tsS = significant[0][x].split(':') + # Target + tsT = significant[2][x].split(':') + # Retrieve Source info from the DB + gtS = create_trait(name=tsS[0], dataset_name=tsS[1]) + # Retrieve Target info from the DB + gtT = create_trait(name=tsT[0], dataset_name=tsT[1]) + self.addNode(gtS) + self.addNode(gtT) + self.addEdge(gtS, gtT, significant, x) + + # Update the trait name for the displayed table + significant[0][x] = "{} ({})".format(gtS.symbol, gtS.name) + # Update the trait name for the displayed table + significant[2][x] = "{} ({})".format(gtT.symbol, gtT.name) self.elements = json.dumps(self.nodes_list + self.edges_list) @@ -227,8 +227,8 @@ class CTL: self.loadImage("imgloc1", "imgdata1") n = 2 for trait in self.trait_db_list: - self.loadImage("imgloc" + str(n), "imgdata" + str(n)) - n = n + 1 + self.loadImage("imgloc" + str(n), "imgdata" + str(n)) + n = n + 1 def process_results(self, results): logger.info("Processing CTL output") diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py index 55a7da0e..995915a9 100644 --- a/wqflask/wqflask/group_manager.py +++ b/wqflask/wqflask/group_manager.py @@ -16,158 +16,158 @@ logger = getLogger(__name__) @app.route("/groups/manage", methods=('GET', 'POST')) def manage_groups(): - params = request.form if request.form else request.args - if "add_new_group" in params: - return redirect(url_for('add_group')) - else: - admin_groups, member_groups = get_user_groups(g.user_session.user_id) - return render_template("admin/group_manager.html", admin_groups=admin_groups, member_groups=member_groups) + params = request.form if request.form else request.args + if "add_new_group" in params: + return redirect(url_for('add_group')) + else: + admin_groups, member_groups = get_user_groups(g.user_session.user_id) + return render_template("admin/group_manager.html", admin_groups=admin_groups, member_groups=member_groups) @app.route("/groups/view", methods=('GET', 'POST')) def view_group(): - params = request.form if request.form else request.args - group_id = params['id'] - group_info = get_group_info(group_id) - admins_info = [] - user_is_admin = False - if g.user_session.user_id in group_info['admins']: - user_is_admin = True - for user_id in group_info['admins']: - if user_id: - user_info = get_user_by_unique_column("user_id", user_id) - admins_info.append(user_info) - members_info = [] - for user_id in group_info['members']: - if user_id: - user_info = get_user_by_unique_column("user_id", user_id) - members_info.append(user_info) - - # ZS: This whole part might not scale well with many resources - resources_info = [] - all_resources = get_resources() - for resource_id in all_resources: - resource_info = get_resource_info(resource_id) - group_masks = resource_info['group_masks'] - if group_id in group_masks: - this_resource = {} - privileges = group_masks[group_id] - this_resource['id'] = resource_id - this_resource['name'] = resource_info['name'] - this_resource['data'] = privileges['data'] - this_resource['metadata'] = privileges['metadata'] - this_resource['admin'] = privileges['admin'] - resources_info.append(this_resource) - - return render_template("admin/view_group.html", group_info=group_info, admins=admins_info, members=members_info, user_is_admin=user_is_admin, resources=resources_info) + params = request.form if request.form else request.args + group_id = params['id'] + group_info = get_group_info(group_id) + admins_info = [] + user_is_admin = False + if g.user_session.user_id in group_info['admins']: + user_is_admin = True + for user_id in group_info['admins']: + if user_id: + user_info = get_user_by_unique_column("user_id", user_id) + admins_info.append(user_info) + members_info = [] + for user_id in group_info['members']: + if user_id: + user_info = get_user_by_unique_column("user_id", user_id) + members_info.append(user_info) + + # ZS: This whole part might not scale well with many resources + resources_info = [] + all_resources = get_resources() + for resource_id in all_resources: + resource_info = get_resource_info(resource_id) + group_masks = resource_info['group_masks'] + if group_id in group_masks: + this_resource = {} + privileges = group_masks[group_id] + this_resource['id'] = resource_id + this_resource['name'] = resource_info['name'] + this_resource['data'] = privileges['data'] + this_resource['metadata'] = privileges['metadata'] + this_resource['admin'] = privileges['admin'] + resources_info.append(this_resource) + + return render_template("admin/view_group.html", group_info=group_info, admins=admins_info, members=members_info, user_is_admin=user_is_admin, resources=resources_info) @app.route("/groups/remove", methods=('POST',)) def remove_groups(): - group_ids_to_remove = request.form['selected_group_ids'] - for group_id in group_ids_to_remove.split(":"): - delete_group(g.user_session.user_id, group_id) + group_ids_to_remove = request.form['selected_group_ids'] + for group_id in group_ids_to_remove.split(":"): + delete_group(g.user_session.user_id, group_id) - return redirect(url_for('manage_groups')) + return redirect(url_for('manage_groups')) @app.route("/groups/remove_users", methods=('POST',)) def remove_users(): - group_id = request.form['group_id'] - admin_ids_to_remove = request.form['selected_admin_ids'] - member_ids_to_remove = request.form['selected_member_ids'] + group_id = request.form['group_id'] + admin_ids_to_remove = request.form['selected_admin_ids'] + member_ids_to_remove = request.form['selected_member_ids'] - remove_users_from_group(g.user_session.user_id, admin_ids_to_remove.split( - ":"), group_id, user_type="admins") - remove_users_from_group(g.user_session.user_id, member_ids_to_remove.split( - ":"), group_id, user_type="members") + remove_users_from_group(g.user_session.user_id, admin_ids_to_remove.split( + ":"), group_id, user_type="admins") + remove_users_from_group(g.user_session.user_id, member_ids_to_remove.split( + ":"), group_id, user_type="members") - return redirect(url_for('view_group', id=group_id)) + return redirect(url_for('view_group', id=group_id)) @app.route("/groups/add_", methods=('POST',)) def add_users(user_type='members'): - group_id = request.form['group_id'] - if user_type == "admins": - user_emails = request.form['admin_emails_to_add'].split(",") - add_users_to_group(g.user_session.user_id, group_id, - user_emails, admins=True) - elif user_type == "members": - user_emails = request.form['member_emails_to_add'].split(",") - add_users_to_group(g.user_session.user_id, group_id, - user_emails, admins=False) + group_id = request.form['group_id'] + if user_type == "admins": + user_emails = request.form['admin_emails_to_add'].split(",") + add_users_to_group(g.user_session.user_id, group_id, + user_emails, admins=True) + elif user_type == "members": + user_emails = request.form['member_emails_to_add'].split(",") + add_users_to_group(g.user_session.user_id, group_id, + user_emails, admins=False) - return redirect(url_for('view_group', id=group_id)) + return redirect(url_for('view_group', id=group_id)) @app.route("/groups/change_name", methods=('POST',)) def change_name(): - group_id = request.form['group_id'] - new_name = request.form['new_name'] - group_info = change_group_name(g.user_session.user_id, group_id, new_name) + group_id = request.form['group_id'] + new_name = request.form['new_name'] + group_info = change_group_name(g.user_session.user_id, group_id, new_name) - return new_name + return new_name @app.route("/groups/create", methods=('GET', 'POST')) def add_or_edit_group(): - params = request.form if request.form else request.args - if "group_name" in params: - member_user_ids = set() - admin_user_ids = set() - # ZS: Always add the user creating the group as an admin - admin_user_ids.add(g.user_session.user_id) - if "admin_emails_to_add" in params: - admin_emails = params['admin_emails_to_add'].split(",") - for email in admin_emails: - user_details = get_user_by_unique_column("email_address", email) - if user_details: - admin_user_ids.add(user_details['user_id']) - #send_group_invites(params['group_id'], user_email_list = admin_emails, user_type="admins") - if "member_emails_to_add" in params: - member_emails = params['member_emails_to_add'].split(",") - for email in member_emails: - user_details = get_user_by_unique_column("email_address", email) - if user_details: - member_user_ids.add(user_details['user_id']) - #send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members") - - create_group(list(admin_user_ids), list( - member_user_ids), params['group_name']) - return redirect(url_for('manage_groups')) - else: - return render_template("admin/create_group.html") + params = request.form if request.form else request.args + if "group_name" in params: + member_user_ids = set() + admin_user_ids = set() + # ZS: Always add the user creating the group as an admin + admin_user_ids.add(g.user_session.user_id) + if "admin_emails_to_add" in params: + admin_emails = params['admin_emails_to_add'].split(",") + for email in admin_emails: + user_details = get_user_by_unique_column("email_address", email) + if user_details: + admin_user_ids.add(user_details['user_id']) + #send_group_invites(params['group_id'], user_email_list = admin_emails, user_type="admins") + if "member_emails_to_add" in params: + member_emails = params['member_emails_to_add'].split(",") + for email in member_emails: + user_details = get_user_by_unique_column("email_address", email) + if user_details: + member_user_ids.add(user_details['user_id']) + #send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members") + + create_group(list(admin_user_ids), list( + member_user_ids), params['group_name']) + return redirect(url_for('manage_groups')) + else: + return render_template("admin/create_group.html") # ZS: Will integrate this later, for now just letting users be added directly def send_group_invites(group_id, user_email_list=[], user_type="members"): - for user_email in user_email_list: - user_details = get_user_by_unique_column("email_address", user_email) - if user_details: - group_info = get_group_info(group_id) - # ZS: Probably not necessary since the group should normally always exist if group_id is being passed here, - # but it's technically possible to hit it if Redis is cleared out before submitting the new users or something - if group_info: - # ZS: Don't add user if they're already an admin or if they're being added a regular user and are already a regular user, - # but do add them if they're a regular user and are added as an admin - if (user_details['user_id'] in group_info['admins']) or \ - ((user_type == "members") and (user_details['user_id'] in group_info['members'])): - continue - else: - send_verification_email(user_details, template_name="email/group_verification.txt", - key_prefix="verification_code", subject = "You've been invited to join a GeneNetwork user group") - else: - temp_password = ''.join(random.choice( - string.ascii_uppercase + string.digits) for _ in range(6)) - user_details = { - 'user_id': str(uuid.uuid4()), - 'email_address': user_email, - 'registration_info': basic_info(), - 'password': set_password(temp_password), - 'confirmed': 0 - } - save_user(user_details, user_details['user_id']) - send_invitation_email(user_email, temp_password) + for user_email in user_email_list: + user_details = get_user_by_unique_column("email_address", user_email) + if user_details: + group_info = get_group_info(group_id) + # ZS: Probably not necessary since the group should normally always exist if group_id is being passed here, + # but it's technically possible to hit it if Redis is cleared out before submitting the new users or something + if group_info: + # ZS: Don't add user if they're already an admin or if they're being added a regular user and are already a regular user, + # but do add them if they're a regular user and are added as an admin + if (user_details['user_id'] in group_info['admins']) or \ + ((user_type == "members") and (user_details['user_id'] in group_info['members'])): + continue + else: + send_verification_email(user_details, template_name="email/group_verification.txt", + key_prefix="verification_code", subject = "You've been invited to join a GeneNetwork user group") + else: + temp_password = ''.join(random.choice( + string.ascii_uppercase + string.digits) for _ in range(6)) + user_details = { + 'user_id': str(uuid.uuid4()), + 'email_address': user_email, + 'registration_info': basic_info(), + 'password': set_password(temp_password), + 'confirmed': 0 + } + save_user(user_details, user_details['user_id']) + send_invitation_email(user_email, temp_password) # @app.route() diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py index 9548d130..f02da27c 100644 --- a/wqflask/wqflask/gsearch.py +++ b/wqflask/wqflask/gsearch.py @@ -247,13 +247,13 @@ class GSearch: if not trait_ob: continue if this_trait['dataset'] == this_trait['group'] + "Publish": - try: - if trait_ob.locus_chr != "" and trait_ob.locus_mb != "": - this_trait['max_lrs_text'] = "Chr" + \ - str(trait_ob.locus_chr) + \ - ": " + str(trait_ob.locus_mb) - except: - this_trait['max_lrs_text'] = "N/A" + try: + if trait_ob.locus_chr != "" and trait_ob.locus_mb != "": + this_trait['max_lrs_text'] = "Chr" + \ + str(trait_ob.locus_chr) + \ + ": " + str(trait_ob.locus_mb) + except: + this_trait['max_lrs_text'] = "N/A" trait_list.append(this_trait) diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index e624a146..2f1c142c 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -7,79 +7,79 @@ from flask import Flask, g def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): - fetchFields = ['SpeciesId', 'Id', 'GeneSymbol', 'GeneDescription', 'Chromosome', 'TxStart', 'TxEnd', - 'Strand', 'GeneID', 'NM_ID', 'kgID', 'GenBankID', 'UnigenID', 'ProteinID', 'AlignID', - 'exonCount', 'exonStarts', 'exonEnds', 'cdsStart', 'cdsEnd'] + fetchFields = ['SpeciesId', 'Id', 'GeneSymbol', 'GeneDescription', 'Chromosome', 'TxStart', 'TxEnd', + 'Strand', 'GeneID', 'NM_ID', 'kgID', 'GenBankID', 'UnigenID', 'ProteinID', 'AlignID', + 'exonCount', 'exonStarts', 'exonEnds', 'cdsStart', 'cdsEnd'] - # List All Species in the Gene Table - speciesDict = {} - results = g.db.execute(""" + # List All Species in the Gene Table + speciesDict = {} + results = g.db.execute(""" SELECT Species.Name, GeneList.SpeciesId FROM Species, GeneList WHERE GeneList.SpeciesId = Species.Id GROUP BY GeneList.SpeciesId""").fetchall() - for item in results: - speciesDict[item[0]] = item[1] + for item in results: + speciesDict[item[0]] = item[1] - # List current Species and other Species - speciesId = speciesDict[species] - otherSpecies = [[X, speciesDict[X]] for X in list(speciesDict.keys())] - otherSpecies.remove([species, speciesId]) + # List current Species and other Species + speciesId = speciesDict[species] + otherSpecies = [[X, speciesDict[X]] for X in list(speciesDict.keys())] + otherSpecies.remove([species, speciesId]) - results = g.db.execute(""" + results = g.db.execute(""" SELECT %s FROM GeneList WHERE SpeciesId = %d AND Chromosome = '%s' AND ((TxStart > %f and TxStart <= %f) OR (TxEnd > %f and TxEnd <= %f)) ORDER BY txStart """ % (", ".join(fetchFields), - speciesId, chrName, - startMb, endMb, - startMb, endMb)).fetchall() - - GeneList = [] - - if results: - for result in results: - newdict = {} - for j, item in enumerate(fetchFields): - newdict[item] = result[j] - # count SNPs if possible - if diffCol and species == 'mouse': - newdict["snpCount"] = g.db.execute(""" + speciesId, chrName, + startMb, endMb, + startMb, endMb)).fetchall() + + GeneList = [] + + if results: + for result in results: + newdict = {} + for j, item in enumerate(fetchFields): + newdict[item] = result[j] + # count SNPs if possible + if diffCol and species == 'mouse': + newdict["snpCount"] = g.db.execute(""" SELECT count(*) FROM BXDSnpPosition WHERE Chr = '%s' AND Mb >= %2.6f AND Mb < %2.6f AND StrainId1 = %d AND StrainId2 = %d """ % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0] - newdict["snpDensity"] = newdict["snpCount"] / \ - (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0 - else: - newdict["snpDensity"] = newdict["snpCount"] = 0 - - try: - newdict['GeneLength'] = 1000.0 * (newdict['TxEnd'] - newdict['TxStart']) - except: - pass - - # load gene from other Species by the same name - for item in otherSpecies: - othSpec, othSpecId = item - newdict2 = {} - - resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (", ".join(fetchFields), - othSpecId, - newdict["GeneSymbol"])).fetchone() - - if resultsOther: - for j, item in enumerate(fetchFields): - newdict2[item] = resultsOther[j] - - # count SNPs if possible, could be a separate function - if diffCol and othSpec == 'mouse': - newdict2["snpCount"] = g.db.execute(""" + newdict["snpDensity"] = newdict["snpCount"] / \ + (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0 + else: + newdict["snpDensity"] = newdict["snpCount"] = 0 + + try: + newdict['GeneLength'] = 1000.0 * (newdict['TxEnd'] - newdict['TxStart']) + except: + pass + + # load gene from other Species by the same name + for item in otherSpecies: + othSpec, othSpecId = item + newdict2 = {} + + resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (", ".join(fetchFields), + othSpecId, + newdict["GeneSymbol"])).fetchone() + + if resultsOther: + for j, item in enumerate(fetchFields): + newdict2[item] = resultsOther[j] + + # count SNPs if possible, could be a separate function + if diffCol and othSpec == 'mouse': + newdict2["snpCount"] = g.db.execute(""" SELECT count(*) FROM BXDSnpPosition WHERE Chr = '%s' AND @@ -87,19 +87,19 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): StrainId1 = %d AND StrainId2 = %d """ % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0] - newdict2["snpDensity"] = newdict2["snpCount"] / \ - (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0 - else: - newdict2["snpDensity"] = newdict2["snpCount"] = 0 + newdict2["snpDensity"] = newdict2["snpCount"] / \ + (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0 + else: + newdict2["snpDensity"] = newdict2["snpCount"] = 0 - try: - newdict2['GeneLength'] = 1000.0 * \ - (newdict2['TxEnd'] - newdict2['TxStart']) - except: - pass + try: + newdict2['GeneLength'] = 1000.0 * \ + (newdict2['TxEnd'] - newdict2['TxStart']) + except: + pass - newdict['%sGene' % othSpec] = newdict2 + newdict['%sGene' % othSpec] = newdict2 - GeneList.append(newdict) + GeneList.append(newdict) - return GeneList + return GeneList diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index 8d0134d8..c68e0fde 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -395,17 +395,17 @@ class DisplayMappingResults: # Darwing Options try: - if self.selectedChr > -1: - self.graphWidth = min(self.GRAPH_MAX_WIDTH, max( - self.GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) - else: - self.graphWidth = min(self.GRAPH_MAX_WIDTH, max( - self.MULT_GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) + if self.selectedChr > -1: + self.graphWidth = min(self.GRAPH_MAX_WIDTH, max( + self.GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) + else: + self.graphWidth = min(self.GRAPH_MAX_WIDTH, max( + self.MULT_GRAPH_MIN_WIDTH, int(start_vars['graphWidth']))) except: - if self.selectedChr > -1: - self.graphWidth = self.GRAPH_DEFAULT_WIDTH - else: - self.graphWidth = self.MULT_GRAPH_DEFAULT_WIDTH + if self.selectedChr > -1: + self.graphWidth = self.GRAPH_DEFAULT_WIDTH + else: + self.graphWidth = self.MULT_GRAPH_DEFAULT_WIDTH # BEGIN HaplotypeAnalyst if 'haplotypeAnalystCheck' in list(start_vars.keys()): @@ -574,15 +574,15 @@ class DisplayMappingResults: chrName, self.diffCol, self.startMb, self.endMb, "rat") if self.geneCol and self.intervalAnalystChecked: - ####################################################################### - #Nick use GENEID as RefGene to get Literature Correlation Informations# - #For Interval Mapping, Literature Correlation isn't useful, so skip it# - #through set GENEID is None # - ####################################################################### + ####################################################################### + #Nick use GENEID as RefGene to get Literature Correlation Informations# + #For Interval Mapping, Literature Correlation isn't useful, so skip it# + #through set GENEID is None # + ####################################################################### - GENEID = None + GENEID = None - self.geneTable(self.geneCol, GENEID) + self.geneTable(self.geneCol, GENEID) ################################################################ # Plots goes here @@ -2636,11 +2636,11 @@ class DisplayMappingResults: LRSCoordXY.append((Xc, Yc)) if not self.multipleInterval and self.additiveChecked: - if additiveMax == 0.0: - additiveMax = 0.000001 - Yc = yZero - qtlresult['additive'] * \ - AdditiveHeightThresh / additiveMax - AdditiveCoordXY.append((Xc, Yc)) + if additiveMax == 0.0: + additiveMax = 0.000001 + Yc = yZero - qtlresult['additive'] * \ + AdditiveHeightThresh / additiveMax + AdditiveCoordXY.append((Xc, Yc)) m += 1 diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index b3c9fddf..32dbad1f 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -294,13 +294,13 @@ def add_categorical_covar(cross, covar_as_string, i): col_names = [] # logger.info("loop") for x in range(1, (nCol + 1)): - #logger.info("loop" + str(x)); - col_name = "covar_" + str(i) + "_" + str(x) - #logger.info("col_name" + col_name); - ro.r('the_cross$pheno <- cbind(pheno, ' + \ - col_name + ' = newcovar[,' + str(x) + '])') - col_names.append(col_name) - #logger.info("loop" + str(x) + "done"); + #logger.info("loop" + str(x)); + col_name = "covar_" + str(i) + "_" + str(x) + #logger.info("col_name" + col_name); + ro.r('the_cross$pheno <- cbind(pheno, ' + \ + col_name + ' = newcovar[,' + str(x) + '])') + col_names.append(col_name) + #logger.info("loop" + str(x) + "done"); logger.info("returning from add_categorical_covar") return ro.r["the_cross"], col_names diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index d9b28fba..041f4348 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -65,10 +65,10 @@ class RunMapping: # ZS: Sometimes a group may have a genofile that only includes a subset of samples genofile_samplelist = [] if 'genofile' in start_vars: - if start_vars['genofile'] != "": - self.genofile_string = start_vars['genofile'] - self.dataset.group.genofile = self.genofile_string.split(":")[0] - genofile_samplelist = get_genofile_samplelist(self.dataset) + if start_vars['genofile'] != "": + self.genofile_string = start_vars['genofile'] + self.dataset.group.genofile = self.genofile_string.split(":")[0] + genofile_samplelist = get_genofile_samplelist(self.dataset) all_samples_ordered = self.dataset.group.all_samples_ordered() @@ -324,182 +324,182 @@ class RunMapping: self.no_results = False if len(results) == 0: - self.no_results = True + self.no_results = True else: - if self.pair_scan == True: - self.qtl_results = [] - highest_chr = 1 # This is needed in order to convert the highest chr to X/Y - for marker in results: - if marker['chr1'] > 0 or marker['chr1'] == "X" or marker['chr1'] == "X/Y": - if marker['chr1'] > highest_chr or marker['chr1'] == "X" or marker['chr1'] == "X/Y": - highest_chr = marker['chr1'] - if 'lod_score' in list(marker.keys()): - self.qtl_results.append(marker) - - self.trimmed_markers = results - - for qtl in enumerate(self.qtl_results): - self.json_data['chr1'].append(str(qtl['chr1'])) - self.json_data['chr2'].append(str(qtl['chr2'])) - self.json_data['Mb'].append(qtl['Mb']) - self.json_data['markernames'].append(qtl['name']) - - self.js_data = dict( - json_data=self.json_data, - this_trait=self.this_trait.name, - data_set=self.dataset.name, - maf=self.maf, - manhattan_plot=self.manhattan_plot, - mapping_scale=self.mapping_scale, - qtl_results=self.qtl_results - ) - - else: - self.qtl_results = [] - self.results_for_browser = [] - self.annotations_for_browser = [] - highest_chr = 1 # This is needed in order to convert the highest chr to X/Y - for marker in results: - if 'Mb' in marker: - this_ps = marker['Mb'] * 1000000 - else: - this_ps = marker['cM'] * 1000000 - - browser_marker = dict( - chr=str(marker['chr']), - rs=marker['name'], - ps=this_ps, - url="/show_trait?trait_id=" + \ - marker['name'] + "&dataset=" + \ - self.dataset.group.name + "Geno" - ) - - if self.geno_db_exists == "True": - annot_marker = dict( - name=str(marker['name']), - chr=str(marker['chr']), - rs=marker['name'], - pos=this_ps, - url="/show_trait?trait_id=" + \ - marker['name'] + "&dataset=" + \ - self.dataset.group.name + "Geno" - ) - else: - annot_marker = dict( - name=str(marker['name']), - chr=str(marker['chr']), - rs=marker['name'], - pos=this_ps - ) - - if 'lrs_value' in marker and marker['lrs_value'] > 0: - browser_marker['p_wald'] = 10**- \ - (marker['lrs_value'] / 4.61) - elif 'lod_score' in marker and marker['lod_score'] > 0: - browser_marker['p_wald'] = 10**-(marker['lod_score']) - else: - browser_marker['p_wald'] = 0 - - self.results_for_browser.append(browser_marker) - self.annotations_for_browser.append(annot_marker) - if str(marker['chr']) > '0' or str(marker['chr']) == "X" or str(marker['chr']) == "X/Y": - if str(marker['chr']) > str(highest_chr) or str(marker['chr']) == "X" or str(marker['chr']) == "X/Y": - highest_chr = marker['chr'] - if ('lod_score' in marker.keys()) or ('lrs_value' in marker.keys()): - if 'Mb' in marker.keys(): - marker['display_pos'] = "Chr" + \ - str(marker['chr']) + ": " + \ - "{:.6f}".format(marker['Mb']) - elif 'cM' in marker.keys(): - marker['display_pos'] = "Chr" + \ - str(marker['chr']) + ": " + \ - "{:.3f}".format(marker['cM']) - else: - marker['display_pos'] = "N/A" - self.qtl_results.append(marker) - - total_markers = len(self.qtl_results) - - with Bench("Exporting Results"): - export_mapping_results(self.dataset, self.this_trait, self.qtl_results, self.mapping_results_path, - self.mapping_scale, self.score_type, self.transform, self.covariates, self.n_samples) - - with Bench("Trimming Markers for Figure"): - if len(self.qtl_results) > 30000: - self.qtl_results = trim_markers_for_figure( - self.qtl_results) - self.results_for_browser = trim_markers_for_figure( - self.results_for_browser) - filtered_annotations = [] - for marker in self.results_for_browser: - for annot_marker in self.annotations_for_browser: - if annot_marker['rs'] == marker['rs']: - filtered_annotations.append(annot_marker) - break - self.annotations_for_browser = filtered_annotations - browser_files = write_input_for_browser( - self.dataset, self.results_for_browser, self.annotations_for_browser) - else: - browser_files = write_input_for_browser( - self.dataset, self.results_for_browser, self.annotations_for_browser) - - with Bench("Trimming Markers for Table"): - self.trimmed_markers = trim_markers_for_table(results) - - chr_lengths = get_chr_lengths( - self.mapping_scale, self.mapping_method, self.dataset, self.qtl_results) - - # ZS: For zooming into genome browser, need to pass chromosome name instead of number - if self.dataset.group.species == "mouse": - if self.selected_chr == 20: - this_chr = "X" - else: - this_chr = str(self.selected_chr) - elif self.dataset.group.species == "rat": - if self.selected_chr == 21: - this_chr = "X" - else: - this_chr = str(self.selected_chr) - else: - if self.selected_chr == 22: - this_chr = "X" - elif self.selected_chr == 23: - this_chr = "Y" - else: - this_chr = str(self.selected_chr) - - if self.mapping_method != "gemma": - if self.score_type == "LRS": - significant_for_browser = self.significant / 4.61 - else: - significant_for_browser = self.significant - - self.js_data = dict( - #result_score_type = self.score_type, - #this_trait = self.this_trait.name, - #data_set = self.dataset.name, - #maf = self.maf, - #manhattan_plot = self.manhattan_plot, - #mapping_scale = self.mapping_scale, - #chromosomes = chromosome_mb_lengths, - #qtl_results = self.qtl_results, - categorical_vars=self.categorical_vars, - chr_lengths=chr_lengths, - num_perm=self.num_perm, - perm_results=self.perm_output, - significant=significant_for_browser, - browser_files=browser_files, - selected_chr=this_chr, - total_markers=total_markers - ) - else: + if self.pair_scan == True: + self.qtl_results = [] + highest_chr = 1 # This is needed in order to convert the highest chr to X/Y + for marker in results: + if marker['chr1'] > 0 or marker['chr1'] == "X" or marker['chr1'] == "X/Y": + if marker['chr1'] > highest_chr or marker['chr1'] == "X" or marker['chr1'] == "X/Y": + highest_chr = marker['chr1'] + if 'lod_score' in list(marker.keys()): + self.qtl_results.append(marker) + + self.trimmed_markers = results + + for qtl in enumerate(self.qtl_results): + self.json_data['chr1'].append(str(qtl['chr1'])) + self.json_data['chr2'].append(str(qtl['chr2'])) + self.json_data['Mb'].append(qtl['Mb']) + self.json_data['markernames'].append(qtl['name']) + self.js_data = dict( - chr_lengths=chr_lengths, - browser_files=browser_files, - selected_chr=this_chr, - total_markers=total_markers + json_data=self.json_data, + this_trait=self.this_trait.name, + data_set=self.dataset.name, + maf=self.maf, + manhattan_plot=self.manhattan_plot, + mapping_scale=self.mapping_scale, + qtl_results=self.qtl_results ) + else: + self.qtl_results = [] + self.results_for_browser = [] + self.annotations_for_browser = [] + highest_chr = 1 # This is needed in order to convert the highest chr to X/Y + for marker in results: + if 'Mb' in marker: + this_ps = marker['Mb'] * 1000000 + else: + this_ps = marker['cM'] * 1000000 + + browser_marker = dict( + chr=str(marker['chr']), + rs=marker['name'], + ps=this_ps, + url="/show_trait?trait_id=" + \ + marker['name'] + "&dataset=" + \ + self.dataset.group.name + "Geno" + ) + + if self.geno_db_exists == "True": + annot_marker = dict( + name=str(marker['name']), + chr=str(marker['chr']), + rs=marker['name'], + pos=this_ps, + url="/show_trait?trait_id=" + \ + marker['name'] + "&dataset=" + \ + self.dataset.group.name + "Geno" + ) + else: + annot_marker = dict( + name=str(marker['name']), + chr=str(marker['chr']), + rs=marker['name'], + pos=this_ps + ) + + if 'lrs_value' in marker and marker['lrs_value'] > 0: + browser_marker['p_wald'] = 10**- \ + (marker['lrs_value'] / 4.61) + elif 'lod_score' in marker and marker['lod_score'] > 0: + browser_marker['p_wald'] = 10**-(marker['lod_score']) + else: + browser_marker['p_wald'] = 0 + + self.results_for_browser.append(browser_marker) + self.annotations_for_browser.append(annot_marker) + if str(marker['chr']) > '0' or str(marker['chr']) == "X" or str(marker['chr']) == "X/Y": + if str(marker['chr']) > str(highest_chr) or str(marker['chr']) == "X" or str(marker['chr']) == "X/Y": + highest_chr = marker['chr'] + if ('lod_score' in marker.keys()) or ('lrs_value' in marker.keys()): + if 'Mb' in marker.keys(): + marker['display_pos'] = "Chr" + \ + str(marker['chr']) + ": " + \ + "{:.6f}".format(marker['Mb']) + elif 'cM' in marker.keys(): + marker['display_pos'] = "Chr" + \ + str(marker['chr']) + ": " + \ + "{:.3f}".format(marker['cM']) + else: + marker['display_pos'] = "N/A" + self.qtl_results.append(marker) + + total_markers = len(self.qtl_results) + + with Bench("Exporting Results"): + export_mapping_results(self.dataset, self.this_trait, self.qtl_results, self.mapping_results_path, + self.mapping_scale, self.score_type, self.transform, self.covariates, self.n_samples) + + with Bench("Trimming Markers for Figure"): + if len(self.qtl_results) > 30000: + self.qtl_results = trim_markers_for_figure( + self.qtl_results) + self.results_for_browser = trim_markers_for_figure( + self.results_for_browser) + filtered_annotations = [] + for marker in self.results_for_browser: + for annot_marker in self.annotations_for_browser: + if annot_marker['rs'] == marker['rs']: + filtered_annotations.append(annot_marker) + break + self.annotations_for_browser = filtered_annotations + browser_files = write_input_for_browser( + self.dataset, self.results_for_browser, self.annotations_for_browser) + else: + browser_files = write_input_for_browser( + self.dataset, self.results_for_browser, self.annotations_for_browser) + + with Bench("Trimming Markers for Table"): + self.trimmed_markers = trim_markers_for_table(results) + + chr_lengths = get_chr_lengths( + self.mapping_scale, self.mapping_method, self.dataset, self.qtl_results) + + # ZS: For zooming into genome browser, need to pass chromosome name instead of number + if self.dataset.group.species == "mouse": + if self.selected_chr == 20: + this_chr = "X" + else: + this_chr = str(self.selected_chr) + elif self.dataset.group.species == "rat": + if self.selected_chr == 21: + this_chr = "X" + else: + this_chr = str(self.selected_chr) + else: + if self.selected_chr == 22: + this_chr = "X" + elif self.selected_chr == 23: + this_chr = "Y" + else: + this_chr = str(self.selected_chr) + + if self.mapping_method != "gemma": + if self.score_type == "LRS": + significant_for_browser = self.significant / 4.61 + else: + significant_for_browser = self.significant + + self.js_data = dict( + #result_score_type = self.score_type, + #this_trait = self.this_trait.name, + #data_set = self.dataset.name, + #maf = self.maf, + #manhattan_plot = self.manhattan_plot, + #mapping_scale = self.mapping_scale, + #chromosomes = chromosome_mb_lengths, + #qtl_results = self.qtl_results, + categorical_vars=self.categorical_vars, + chr_lengths=chr_lengths, + num_perm=self.num_perm, + perm_results=self.perm_output, + significant=significant_for_browser, + browser_files=browser_files, + selected_chr=this_chr, + total_markers=total_markers + ) + else: + self.js_data = dict( + chr_lengths=chr_lengths, + browser_files=browser_files, + selected_chr=this_chr, + total_markers=total_markers + ) + def run_rqtl_plink(self): # os.chdir("") never do this inside a webserver!! diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py index 7b9ff8fe..55b0278a 100644 --- a/wqflask/wqflask/model.py +++ b/wqflask/wqflask/model.py @@ -36,7 +36,7 @@ class User(Base): # json detailing when they became a superuser, otherwise empty superuser = Column(Text) - # if not superuser + # if not superuser logins = relationship("Login", order_by="desc(Login.timestamp)", diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index 0d2fb2f8..2e1cb992 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -78,7 +78,7 @@ class SearchResultPage: try: self.search() except: - self.search_term_exists = False + self.search_term_exists = False self.too_many_results = False if self.search_term_exists: diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index bfaed9c2..725e7c9e 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -464,49 +464,49 @@ def password_reset_step2(): def register_user(params): - thank_you_mode = False - errors = [] - user_details = {} - - user_details['email_address'] = params.get( - 'email_address', '').encode("utf-8").strip() - if not (5 <= len(user_details['email_address']) <= 50): - errors.append( - 'Email Address needs to be between 5 and 50 characters.') - else: - email_exists = get_user_by_unique_column( - "email_address", user_details['email_address']) - if email_exists: - errors.append('User already exists with that email') + thank_you_mode = False + errors = [] + user_details = {} - user_details['full_name'] = params.get( - 'full_name', '').encode("utf-8").strip() - if not (5 <= len(user_details['full_name']) <= 50): - errors.append('Full Name needs to be between 5 and 50 characters.') + user_details['email_address'] = params.get( + 'email_address', '').encode("utf-8").strip() + if not (5 <= len(user_details['email_address']) <= 50): + errors.append( + 'Email Address needs to be between 5 and 50 characters.') + else: + email_exists = get_user_by_unique_column( + "email_address", user_details['email_address']) + if email_exists: + errors.append('User already exists with that email') - user_details['organization'] = params.get( - 'organization', '').encode("utf-8").strip() - if user_details['organization'] and not (5 <= len(user_details['organization']) <= 50): - errors.append( - 'Organization needs to be empty or between 5 and 50 characters.') + user_details['full_name'] = params.get( + 'full_name', '').encode("utf-8").strip() + if not (5 <= len(user_details['full_name']) <= 50): + errors.append('Full Name needs to be between 5 and 50 characters.') - password = str(params.get('password', '')) - if not (6 <= len(password)): - errors.append('Password needs to be at least 6 characters.') + user_details['organization'] = params.get( + 'organization', '').encode("utf-8").strip() + if user_details['organization'] and not (5 <= len(user_details['organization']) <= 50): + errors.append( + 'Organization needs to be empty or between 5 and 50 characters.') - if params.get('password_confirm') != password: - errors.append("Passwords don't match.") + password = str(params.get('password', '')) + if not (6 <= len(password)): + errors.append('Password needs to be at least 6 characters.') - user_details['password'] = set_password(password) - user_details['user_id'] = str(uuid.uuid4()) - user_details['confirmed'] = 1 + if params.get('password_confirm') != password: + errors.append("Passwords don't match.") - user_details['registration_info'] = basic_info() + user_details['password'] = set_password(password) + user_details['user_id'] = str(uuid.uuid4()) + user_details['confirmed'] = 1 - if len(errors) == 0: - save_user(user_details, user_details['user_id']) + user_details['registration_info'] = basic_info() + + if len(errors) == 0: + save_user(user_details, user_details['user_id']) - return errors + return errors @app.route("/n/register", methods=('GET', 'POST')) -- cgit v1.2.3 From 4e65b73a0f903834f8dbd02d11c49b75d7c935c7 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 30 Apr 2021 13:06:58 +0300 Subject: autopep8: Fix E121,E122,E123,EI24,E125,E126,E127,E128,E129,E131,E133 --- wqflask/base/data_set.py | 2 +- wqflask/maintenance/convert_geno_to_bimbam.py | 6 +- wqflask/maintenance/gen_select_dataset.py | 8 +- .../maintenance/generate_kinship_from_bimbam.py | 4 +- wqflask/maintenance/geno_to_json.py | 4 +- wqflask/maintenance/quantile_normalize.py | 16 +-- wqflask/maintenance/set_resource_defaults.py | 30 ++-- wqflask/tests/unit/base/test_webqtl_case_data.py | 8 +- wqflask/tests/unit/wqflask/api/test_correlation.py | 4 +- .../marker_regression/test_gemma_mapping.py | 14 +- .../marker_regression/test_qtlreaper_mapping.py | 2 +- .../wqflask/marker_regression/test_rqtl_mapping.py | 2 +- .../unit/wqflask/snp_browser/test_snp_browser.py | 12 +- wqflask/tests/unit/wqflask/test_server_side.py | 2 +- wqflask/utility/Plot.py | 2 +- wqflask/utility/__init__.py | 2 +- wqflask/utility/elasticsearch_tools.py | 2 +- wqflask/utility/genofile_parser.py | 10 +- wqflask/utility/startup_config.py | 2 +- wqflask/utility/tools.py | 4 +- wqflask/utility/webqtlUtil.py | 46 +++--- wqflask/wqflask/api/correlation.py | 2 +- wqflask/wqflask/api/mapping.py | 14 +- wqflask/wqflask/api/router.py | 118 ++++++++-------- wqflask/wqflask/collect.py | 14 +- .../comparison_bar_chart/comparison_bar_chart.py | 6 +- wqflask/wqflask/correlation/show_corr_results.py | 122 ++++++++-------- .../wqflask/correlation_matrix/show_corr_matrix.py | 4 +- wqflask/wqflask/ctl/ctl_analysis.py | 18 +-- wqflask/wqflask/do_search.py | 156 ++++++++++----------- wqflask/wqflask/export_traits.py | 6 +- wqflask/wqflask/external_tools/send_to_bnw.py | 2 +- .../wqflask/external_tools/send_to_geneweaver.py | 12 +- .../wqflask/external_tools/send_to_webgestalt.py | 6 +- wqflask/wqflask/group_manager.py | 20 +-- wqflask/wqflask/gsearch.py | 46 +++--- wqflask/wqflask/heatmap/heatmap.py | 16 +-- wqflask/wqflask/interval_analyst/GeneUtil.py | 20 +-- wqflask/wqflask/markdown_routes.py | 2 +- .../marker_regression/display_mapping_results.py | 72 +++++----- .../wqflask/marker_regression/qtlreaper_mapping.py | 46 +++--- wqflask/wqflask/marker_regression/rqtl_mapping.py | 4 +- wqflask/wqflask/marker_regression/run_mapping.py | 34 ++--- wqflask/wqflask/model.py | 4 +- wqflask/wqflask/parser.py | 2 +- wqflask/wqflask/search_results.py | 10 +- wqflask/wqflask/server_side.py | 4 +- wqflask/wqflask/show_trait/SampleList.py | 4 +- wqflask/wqflask/show_trait/export_trait_data.py | 4 +- wqflask/wqflask/snp_browser/snp_browser.py | 22 +-- wqflask/wqflask/user_login.py | 34 ++--- wqflask/wqflask/user_manager.py | 2 +- wqflask/wqflask/user_session.py | 8 +- wqflask/wqflask/wgcna/wgcna_analysis.py | 2 +- 54 files changed, 509 insertions(+), 509 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 5eac695e..e20f2f98 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -151,7 +151,7 @@ class DatasetType: "WHERE InbredSet.Name = '%s' AND " "PublishFreeze.InbredSetId = InbredSet.Id"), 'geno': ("SELECT GenoFreeze.Id FROM GenoFreeze WHERE " - "GenoFreeze.Name = \"%s\" ") + "GenoFreeze.Name = \"%s\" ") } dataset_name_mapping = { diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py index c5af1ca6..078be529 100644 --- a/wqflask/maintenance/convert_geno_to_bimbam.py +++ b/wqflask/maintenance/convert_geno_to_bimbam.py @@ -56,7 +56,7 @@ class ConvertGenoFile: '@pat': "0", '@het': "0.5", '@unk': "NA" - } + } self.configurations = {} self.input_fh = open(self.input_file) @@ -171,7 +171,7 @@ class ConvertGenoFile: snp_output_file = os.path.join( new_directory, group_name + "_snps.txt") output_files = [geno_output_file, - pheno_output_file, snp_output_file] + pheno_output_file, snp_output_file] print("%s -> %s" % ( os.path.join(old_directory, input_file), geno_output_file)) convertob = ConvertGenoFile(input_file, output_files) @@ -184,7 +184,7 @@ class ConvertGenoFile: print(" Exception:", why) print(traceback.print_exc()) print(" Found in row %s at tabular column %s" % (convertob.latest_row_pos, - convertob.latest_col_pos)) + convertob.latest_col_pos)) print(" Column is:", convertob.latest_col_value) print(" Row is:", convertob.latest_row_value) break diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py index 484336a6..db65a11f 100644 --- a/wqflask/maintenance/gen_select_dataset.py +++ b/wqflask/maintenance/gen_select_dataset.py @@ -62,10 +62,10 @@ def parse_db_uri(): parsed_uri = urllib.parse.urlparse(SQL_URI) db_conn_info = dict( - db=parsed_uri.path[1:], - host=parsed_uri.hostname, - user=parsed_uri.username, - passwd=parsed_uri.password) + db=parsed_uri.path[1:], + host=parsed_uri.hostname, + user=parsed_uri.username, + passwd=parsed_uri.password) print(db_conn_info) return db_conn_info diff --git a/wqflask/maintenance/generate_kinship_from_bimbam.py b/wqflask/maintenance/generate_kinship_from_bimbam.py index cd39fceb..9f01d094 100644 --- a/wqflask/maintenance/generate_kinship_from_bimbam.py +++ b/wqflask/maintenance/generate_kinship_from_bimbam.py @@ -23,7 +23,7 @@ class GenerateKinshipMatrices: def generate_kinship(self): gemma_command = "/gnu/store/xhzgjr0jvakxv6h3blj8z496xjig69b0-profile/bin/gemma -g " + self.geno_file + \ " -p " + self.pheno_file + \ - " -gk 1 -outdir /home/zas1024/genotype_files/genotype/bimbam/ -o " + self.group_name + " -gk 1 -outdir /home/zas1024/genotype_files/genotype/bimbam/ -o " + self.group_name print("command:", gemma_command) os.system(gemma_command) @@ -52,7 +52,7 @@ class GenerateKinshipMatrices: print(" Exception:", why) print(traceback.print_exc()) print(" Found in row %s at tabular column %s" % (convertob.latest_row_pos, - convertob.latest_col_pos)) + convertob.latest_col_pos)) print(" Column is:", convertob.latest_col_value) print(" Row is:", convertob.latest_row_value) break diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py index c74489a8..32e0e34b 100644 --- a/wqflask/maintenance/geno_to_json.py +++ b/wqflask/maintenance/geno_to_json.py @@ -63,7 +63,7 @@ class ConvertGenoFile: '@pat': "0", '@het': "0.5", '@unk': "NA" - } + } self.configurations = {} #self.skipped_cols = 3 @@ -172,7 +172,7 @@ class ConvertGenoFile: print(" Exception:", why) print(traceback.print_exc()) print(" Found in row %s at tabular column %s" % (convertob.latest_row_pos, - convertob.latest_col_pos)) + convertob.latest_col_pos)) print(" Column is:", convertob.latest_col_value) print(" Row is:", convertob.latest_row_value) break diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py index ac7689f5..88bb2cb5 100644 --- a/wqflask/maintenance/quantile_normalize.py +++ b/wqflask/maintenance/quantile_normalize.py @@ -21,10 +21,10 @@ def parse_db_uri(): parsed_uri = urllib.parse.urlparse(SQL_URI) db_conn_info = dict( - db=parsed_uri.path[1:], - host=parsed_uri.hostname, - user=parsed_uri.username, - passwd=parsed_uri.password) + db=parsed_uri.path[1:], + host=parsed_uri.hostname, + user=parsed_uri.username, + passwd=parsed_uri.password) print(db_conn_info) return db_conn_info @@ -70,10 +70,10 @@ def set_data(dataset_name): trait_name = line1.split('\t')[0] for i, sample in enumerate(sample_names): this_sample = { - "name": sample, - "value": line1.split('\t')[i + 1], - "qnorm": line2.split('\t')[i + 1] - } + "name": sample, + "value": line1.split('\t')[i + 1], + "qnorm": line2.split('\t')[i + 1] + } sample_list.append(this_sample) query = """SELECT Species.SpeciesName, InbredSet.InbredSetName, ProbeSetFreeze.FullName FROM Species, InbredSet, ProbeSetFreeze, ProbeFreeze, ProbeSetXRef, ProbeSet diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py index c6c4f44c..0f472494 100644 --- a/wqflask/maintenance/set_resource_defaults.py +++ b/wqflask/maintenance/set_resource_defaults.py @@ -44,10 +44,10 @@ def parse_db_uri(): parsed_uri = urllib.parse.urlparse(SQL_URI) db_conn_info = dict( - db=parsed_uri.path[1:], - host=parsed_uri.hostname, - user=parsed_uri.username, - passwd=parsed_uri.password) + db=parsed_uri.path[1:], + host=parsed_uri.hostname, + user=parsed_uri.username, + passwd=parsed_uri.password) print(db_conn_info) return db_conn_info @@ -69,12 +69,12 @@ def insert_probeset_resources(default_owner_id): resource_ob['type'] = "dataset-probeset" if resource[2] < 1 and resource[3] > 0: resource_ob['default_mask'] = {"data": "view", - "metadata": "view", - "admin": "not-admin"} + "metadata": "view", + "admin": "not-admin"} else: resource_ob['default_mask'] = {"data": "no-access", - "metadata": "no-access", - "admin": "not-admin"} + "metadata": "no-access", + "admin": "not-admin"} resource_ob['group_masks'] = {} add_resource(resource_ob, update=False) @@ -101,11 +101,11 @@ def insert_publish_resources(default_owner_id): resource_ob['name'] = str(resource[0]) resource_ob['owner_id'] = default_owner_id resource_ob['data'] = {"dataset": str(resource[1]), - "trait": str(resource[0])} + "trait": str(resource[0])} resource_ob['type'] = "dataset-publish" resource_ob['default_mask'] = {"data": "view", - "metadata": "view", - "admin": "not-admin"} + "metadata": "view", + "admin": "not-admin"} resource_ob['group_masks'] = {} @@ -133,12 +133,12 @@ def insert_geno_resources(default_owner_id): resource_ob['type'] = "dataset-geno" if resource[2] < 1: resource_ob['default_mask'] = {"data": "view", - "metadata": "view", - "admin": "not-admin"} + "metadata": "view", + "admin": "not-admin"} else: resource_ob['default_mask'] = {"data": "no-access", - "metadata": "no-access", - "admin": "not-admin"} + "metadata": "no-access", + "admin": "not-admin"} resource_ob['group_masks'] = {} add_resource(resource_ob, update=False) diff --git a/wqflask/tests/unit/base/test_webqtl_case_data.py b/wqflask/tests/unit/base/test_webqtl_case_data.py index cebd41ce..e1555cb4 100644 --- a/wqflask/tests/unit/base/test_webqtl_case_data.py +++ b/wqflask/tests/unit/base/test_webqtl_case_data.py @@ -10,10 +10,10 @@ class TestWebqtlCaseData(unittest.TestCase): def setUp(self): self.w = webqtlCaseData(name="Test", - value=0, - variance=0.0, - num_cases=10, - name2="Test2") + value=0, + variance=0.0, + num_cases=10, + name2="Test2") def test_webqtl_case_data_repr(self): self.assertEqual( diff --git a/wqflask/tests/unit/wqflask/api/test_correlation.py b/wqflask/tests/unit/wqflask/api/test_correlation.py index 34ffa9ef..1089a36f 100644 --- a/wqflask/tests/unit/wqflask/api/test_correlation.py +++ b/wqflask/tests/unit/wqflask/api/test_correlation.py @@ -106,9 +106,9 @@ class TestCorrelations(unittest.TestCase): target_vals = [3.4, 6.2, 4.1, 3.4, 1.2, 5.6] trait_data = {"S1": AttributeSetter({"value": 2.3}), "S2": AttributeSetter({"value": 1.1}), - "S3": AttributeSetter( + "S3": AttributeSetter( {"value": 6.3}), "S4": AttributeSetter({"value": 3.6}), "S5": AttributeSetter({"value": 4.1}), - "S6": AttributeSetter({"value": 5.0})} + "S6": AttributeSetter({"value": 5.0})} this_trait = AttributeSetter({"data": trait_data}) mock_normalize.return_value = ([2.3, 1.1, 6.3, 3.6, 4.1, 5.0], [3.4, 6.2, 4.1, 3.4, 1.2, 5.6], 6) diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py index 5cbaf0e0..4003d68f 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py @@ -162,13 +162,13 @@ X\tM5\t12\tQ\tE\tMMB\tR\t21.1\tW\t0.65\t0.6""" results = parse_loco_output( this_dataset={}, gwa_output_filename=".xw/") expected_results = [ - {'name': 'M1', 'chr': 'X/Y', 'Mb': 2.8457155e-05, 'p_value': 0.85, - 'additive': 23.3, 'lod_score': 0.07058107428570727}, - {'name': 'M2', 'chr': 4, 'Mb': 1.2e-05, 'p_value': 0.5, - 'additive': 24.0, 'lod_score': 0.3010299956639812}, - {'name': 'M4', 'chr': 'Y', 'Mb': 1.2e-05, 'p_value': 0.7, - 'additive': 11.6, 'lod_score': 0.1549019599857432}, - {'name': 'M5', 'chr': 'X', 'Mb': 1.2e-05, 'p_value': 0.6, 'additive': 21.1, 'lod_score': 0.22184874961635637}] + {'name': 'M1', 'chr': 'X/Y', 'Mb': 2.8457155e-05, 'p_value': 0.85, + 'additive': 23.3, 'lod_score': 0.07058107428570727}, + {'name': 'M2', 'chr': 4, 'Mb': 1.2e-05, 'p_value': 0.5, + 'additive': 24.0, 'lod_score': 0.3010299956639812}, + {'name': 'M4', 'chr': 'Y', 'Mb': 1.2e-05, 'p_value': 0.7, + 'additive': 11.6, 'lod_score': 0.1549019599857432}, + {'name': 'M5', 'chr': 'X', 'Mb': 1.2e-05, 'p_value': 0.6, 'additive': 21.1, 'lod_score': 0.22184874961635637}] self.assertEqual(expected_results, results) diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py index 47377873..93848a84 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py @@ -18,6 +18,6 @@ class TestQtlReaperMapping(unittest.TestCase): mock_open.assert_called_once_with("/home/user/data/gn2/trait_file.txt", "w") filehandler = mock_open() write_calls = [mock.call('Trait\t'), mock.call( - 'S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')] + 'S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')] filehandler.write.assert_has_calls(write_calls) diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py index e518ec22..68686e27 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py @@ -21,7 +21,7 @@ class TestRqtlMapping(unittest.TestCase): """test for getting trait data_type return True""" query_value = """SELECT value FROM TraitMetadata WHERE type='trait_data_type'""" mock_db.db.execute.return_value.fetchone.return_value = [ - """{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""] + """{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""] results = get_trait_data_type("traid_id") mock_db.db.execute.assert_called_with(query_value) self.assertEqual(results, "fer434f") diff --git a/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py b/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py index 8823e1fc..89442c47 100644 --- a/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py +++ b/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py @@ -22,10 +22,10 @@ class TestSnpBrowser(unittest.TestCase): strains = {"mouse": ["S1", "S2", "S3", "S4", "S5"], "rat": []} expected_results = ([['Index', 'SNP ID', 'Chr', 'Mb', 'Alleles', 'ConScore', 'Domain 1', 'Domain 2', 'Details'], - ['S1', 'S2', 'S3', 'S4', 'S5']], 5, - ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles', - 'conservation_score', 'domain_1', 'domain_2', - 'function_details', 'S1', 'S2', 'S3', 'S4', 'S5']) + ['S1', 'S2', 'S3', 'S4', 'S5']], 5, + ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles', + 'conservation_score', 'domain_1', 'domain_2', + 'function_details', 'S1', 'S2', 'S3', 'S4', 'S5']) results_with_snp = get_header_list( variant_type="SNP", strains=strains, species="Mouse", empty_columns=empty_columns) @@ -34,8 +34,8 @@ class TestSnpBrowser(unittest.TestCase): expected_results_with_indel = ( ['Index', 'ID', 'Type', 'InDel Chr', 'Mb Start', 'Mb End', 'Strand', 'Size', 'Sequence', 'Source'], 0, - ['index', 'indel_name', 'indel_type', 'indel_chr', 'indel_mb_s', - 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name']) + ['index', 'indel_name', 'indel_type', 'indel_chr', 'indel_mb_s', + 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name']) self.assertEqual(expected_results, results_with_snp) self.assertEqual(expected_results_with_indel, results_with_indel) diff --git a/wqflask/tests/unit/wqflask/test_server_side.py b/wqflask/tests/unit/wqflask/test_server_side.py index 9d988aea..be7ca2df 100644 --- a/wqflask/tests/unit/wqflask/test_server_side.py +++ b/wqflask/tests/unit/wqflask/test_server_side.py @@ -23,7 +23,7 @@ class TestServerSideTableTests(unittest.TestCase): ] headers = ['first', 'second', 'third'] request_args = {'sEcho': '1', 'iSortCol_0': '1', 'iSortingCols': '1', - 'sSortDir_0': 'asc', 'iDisplayStart': '0', 'iDisplayLength': '3'} + 'sSortDir_0': 'asc', 'iDisplayStart': '0', 'iDisplayLength': '3'} test_page = ServerSideTable( rows_count, table_rows, headers, request_args).get_page() diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index f61e3b88..37a8a1a5 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -206,7 +206,7 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab im_drawer.text( text=strY, xy=(xLeftOffset - im_drawer.textsize(strY, - font=scaleFont)[0] - 6, yc + 5), + font=scaleFont)[0] - 6, yc + 5), font=scaleFont) y += (yTop - yLow) / stepY diff --git a/wqflask/utility/__init__.py b/wqflask/utility/__init__.py index 6c8cd546..25273fa0 100644 --- a/wqflask/utility/__init__.py +++ b/wqflask/utility/__init__.py @@ -32,4 +32,4 @@ class Struct: def __repr__(self): return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for - (k, v) in list(self.__dict__.items()))) + (k, v) in list(self.__dict__.items()))) diff --git a/wqflask/utility/elasticsearch_tools.py b/wqflask/utility/elasticsearch_tools.py index 55907dd5..e56c22eb 100644 --- a/wqflask/utility/elasticsearch_tools.py +++ b/wqflask/utility/elasticsearch_tools.py @@ -50,7 +50,7 @@ from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT def test_elasticsearch_connection(): es = Elasticsearch(['http://' + ELASTICSEARCH_HOST + \ - ":" + str(ELASTICSEARCH_PORT) + '/'], verify_certs=True) + ":" + str(ELASTICSEARCH_PORT) + '/'], verify_certs=True) if not es.ping(): logger.warning("Elasticsearch is DOWN") diff --git a/wqflask/utility/genofile_parser.py b/wqflask/utility/genofile_parser.py index eb545478..86d9823e 100644 --- a/wqflask/utility/genofile_parser.py +++ b/wqflask/utility/genofile_parser.py @@ -37,10 +37,10 @@ class ConvertGenoFile: self.input_fh = open(input_file) print("!!!!!!!!!!!!!!!!PARSER!!!!!!!!!!!!!!!!!!") self.haplotype_notation = { - '@mat': "1", - '@pat': "2", - '@het': "-999", - '@unk': "-999" + '@mat': "1", + '@pat': "2", + '@het': "-999", + '@unk': "-999" } self.configurations = {} @@ -93,7 +93,7 @@ class ConvertGenoFile: for item_count, genotype in enumerate(genotypes): if genotype.upper().strip() in self.configurations: this_marker.genotypes.append( - self.configurations[genotype.upper().strip()]) + self.configurations[genotype.upper().strip()]) else: print("WARNING:", genotype.upper()) this_marker.genotypes.append("NA") diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index 05f8a2b0..6ef759e0 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -39,4 +39,4 @@ def app_config(): # es.test_elasticsearch_connection() print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % - (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL")))) + (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL")))) diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 4f09176a..e28abb48 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -295,8 +295,8 @@ ORCID_CLIENT_SECRET = get_setting('ORCID_CLIENT_SECRET') ORCID_AUTH_URL = None if ORCID_CLIENT_ID != 'UNKNOWN' and ORCID_CLIENT_SECRET: ORCID_AUTH_URL = "https://orcid.org/oauth/authorize?response_type=code&scope=/authenticate&show_login=true&client_id=" + \ - ORCID_CLIENT_ID + "&client_secret=" + ORCID_CLIENT_SECRET + \ - "&redirect_uri=" + GN2_BRANCH_URL + "n/login/orcid_oauth2" + ORCID_CLIENT_ID + "&client_secret=" + ORCID_CLIENT_SECRET + \ + "&redirect_uri=" + GN2_BRANCH_URL + "n/login/orcid_oauth2" ORCID_TOKEN_URL = get_setting('ORCID_TOKEN_URL') ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST') diff --git a/wqflask/utility/webqtlUtil.py b/wqflask/utility/webqtlUtil.py index f355a865..0cb71567 100644 --- a/wqflask/utility/webqtlUtil.py +++ b/wqflask/utility/webqtlUtil.py @@ -35,29 +35,29 @@ from base import webqtlConfig # NL, 07/27/2010. moved from webqtlForm.py # Dict of Parents and F1 information, In the order of [F1, Mat, Pat] ParInfo = { -'BXH': ['BHF1', 'HBF1', 'C57BL/6J', 'C3H/HeJ'], -'AKXD': ['AKF1', 'KAF1', 'AKR/J', 'DBA/2J'], -'BXD': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], -'C57BL-6JxC57BL-6NJF2': ['', '', 'C57BL/6J', 'C57BL/6NJ'], -'BXD300': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], -'B6BTBRF2': ['B6BTBRF1', 'BTBRB6F1', 'C57BL/6J', 'BTBRT<+>tf/J'], -'BHHBF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], -'BHF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], -'B6D2F2': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], -'BDF2-1999': ['B6D2F2', 'D2B6F2', 'C57BL/6J', 'DBA/2J'], -'BDF2-2005': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], -'CTB6F2': ['CTB6F2', 'B6CTF2', 'C57BL/6J', 'Castaneous'], -'CXB': ['CBF1', 'BCF1', 'C57BL/6ByJ', 'BALB/cByJ'], -'AXBXA': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], -'AXB': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], -'BXA': ['BAF1', 'ABF1', 'C57BL/6J', 'A/J'], -'LXS': ['LSF1', 'SLF1', 'ISS', 'ILS'], -'HXBBXH': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'], -'BayXSha': ['BayXShaF1', 'ShaXBayF1', 'Bay-0', 'Shahdara'], -'ColXBur': ['ColXBurF1', 'BurXColF1', 'Col-0', 'Bur-0'], -'ColXCvi': ['ColXCviF1', 'CviXColF1', 'Col-0', 'Cvi'], -'SXM': ['SMF1', 'MSF1', 'Steptoe', 'Morex'], -'HRDP': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'] + 'BXH': ['BHF1', 'HBF1', 'C57BL/6J', 'C3H/HeJ'], + 'AKXD': ['AKF1', 'KAF1', 'AKR/J', 'DBA/2J'], + 'BXD': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], + 'C57BL-6JxC57BL-6NJF2': ['', '', 'C57BL/6J', 'C57BL/6NJ'], + 'BXD300': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], + 'B6BTBRF2': ['B6BTBRF1', 'BTBRB6F1', 'C57BL/6J', 'BTBRT<+>tf/J'], + 'BHHBF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], + 'BHF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'], + 'B6D2F2': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], + 'BDF2-1999': ['B6D2F2', 'D2B6F2', 'C57BL/6J', 'DBA/2J'], + 'BDF2-2005': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'], + 'CTB6F2': ['CTB6F2', 'B6CTF2', 'C57BL/6J', 'Castaneous'], + 'CXB': ['CBF1', 'BCF1', 'C57BL/6ByJ', 'BALB/cByJ'], + 'AXBXA': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], + 'AXB': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'], + 'BXA': ['BAF1', 'ABF1', 'C57BL/6J', 'A/J'], + 'LXS': ['LSF1', 'SLF1', 'ISS', 'ILS'], + 'HXBBXH': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'], + 'BayXSha': ['BayXShaF1', 'ShaXBayF1', 'Bay-0', 'Shahdara'], + 'ColXBur': ['ColXBurF1', 'BurXColF1', 'Col-0', 'Bur-0'], + 'ColXCvi': ['ColXCviF1', 'CviXColF1', 'Col-0', 'Cvi'], + 'SXM': ['SMF1', 'MSF1', 'Steptoe', 'Morex'], + 'HRDP': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'] } ######################################### diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py index 52026a82..870f3275 100644 --- a/wqflask/wqflask/api/correlation.py +++ b/wqflask/wqflask/api/correlation.py @@ -88,7 +88,7 @@ def calculate_results(this_trait, this_dataset, target_dataset, corr_params): corr_results = do_literature_correlation_for_all_traits( this_trait, this_dataset, trait_geneid_dict, corr_params) sorted_results = collections.OrderedDict(sorted(list(corr_results.items()), - key=lambda t: -abs(t[1][1]))) + key=lambda t: -abs(t[1][1]))) else: for target_trait, target_vals in list(target_dataset.trait_data.items()): result = get_sample_r_and_p_values( diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py index e4a3fb77..cbef96eb 100644 --- a/wqflask/wqflask/api/mapping.py +++ b/wqflask/wqflask/api/mapping.py @@ -53,15 +53,15 @@ def do_mapping_for_api(start_vars): header_row = ["name", "chr", "cM", "lod_score"] if mapping_params['num_perm'] > 0: _sperm_output, _suggestive, _significant, result_markers = rqtl_mapping.run_rqtl_geno(vals, dataset, mapping_params['rqtl_method'], mapping_params['rqtl_model'], - mapping_params['perm_check'], mapping_params['num_perm'], - mapping_params['do_control'], mapping_params[ - 'control_marker'], - mapping_params['manhattan_plot'], mapping_params['pair_scan']) + mapping_params['perm_check'], mapping_params['num_perm'], + mapping_params['do_control'], mapping_params[ + 'control_marker'], + mapping_params['manhattan_plot'], mapping_params['pair_scan']) else: result_markers = rqtl_mapping.run_rqtl_geno(vals, dataset, mapping_params['rqtl_method'], mapping_params['rqtl_model'], - mapping_params['perm_check'], mapping_params['num_perm'], - mapping_params['do_control'], mapping_params['control_marker'], - mapping_params['manhattan_plot'], mapping_params['pair_scan']) + mapping_params['perm_check'], mapping_params['num_perm'], + mapping_params['do_control'], mapping_params['control_marker'], + mapping_params['manhattan_plot'], mapping_params['pair_scan']) if mapping_params['limit_to']: result_markers = result_markers[:mapping_params['limit_to']] diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py index f7d52ca3..9d3446db 100644 --- a/wqflask/wqflask/api/router.py +++ b/wqflask/wqflask/api/router.py @@ -41,10 +41,10 @@ def get_species_list(): species_list = [] for species in the_species: species_dict = { - "Id": species[0], - "Name": species[1], - "FullName": species[2], - "TaxonomyId": species[3] + "Id": species[0], + "Name": species[1], + "FullName": species[2], + "TaxonomyId": species[3] } species_list.append(species_dict) @@ -60,10 +60,10 @@ def get_species_info(species_name, file_format="json"): the_species = results.fetchone() species_dict = { - "Id": the_species[0], - "Name": the_species[1], - "FullName": the_species[2], - "TaxonomyId": the_species[3] + "Id": the_species[0], + "Name": the_species[1], + "FullName": the_species[2], + "TaxonomyId": the_species[3] } return flask.jsonify(species_dict) @@ -92,12 +92,12 @@ def get_groups_list(species_name=None): groups_list = [] for group in the_groups: group_dict = { - "Id": group[0], - "SpeciesId": group[1], - "DisplayName": group[2], - "Name": group[3], - "FullName": group[4], - "public": group[5], + "Id": group[0], + "SpeciesId": group[1], + "DisplayName": group[2], + "Name": group[3], + "FullName": group[4], + "public": group[5], "MappingMethodId": group[6], "GeneticType": group[7] } @@ -137,12 +137,12 @@ def get_group_info(group_name, species_name=None, file_format="json"): group = results.fetchone() if group: group_dict = { - "Id": group[0], - "SpeciesId": group[1], - "DisplayName": group[2], - "Name": group[3], - "FullName": group[4], - "public": group[5], + "Id": group[0], + "SpeciesId": group[1], + "DisplayName": group[2], + "Name": group[3], + "FullName": group[4], + "public": group[5], "MappingMethodId": group[6], "GeneticType": group[7] } @@ -186,12 +186,12 @@ def get_datasets_for_group(group_name, species_name=None): datasets_list = [] for dataset in the_datasets: dataset_dict = { - "Id": dataset[0], - "ProbeFreezeId": dataset[1], - "AvgID": dataset[2], - "Short_Abbreviation": dataset[3], - "Long_Abbreviation": dataset[4], - "FullName": dataset[5], + "Id": dataset[0], + "ProbeFreezeId": dataset[1], + "AvgID": dataset[2], + "Short_Abbreviation": dataset[3], + "Long_Abbreviation": dataset[4], + "FullName": dataset[5], "ShortName": dataset[6], "CreateTime": dataset[7], "public": dataset[8], @@ -243,12 +243,12 @@ def get_dataset_info(dataset_name, group_name=None, file_format="json"): if dataset: dataset_dict = { - "dataset_type": "mRNA expression", - "id": dataset[0], - "name": dataset[1], - "full_name": dataset[2], - "short_name": dataset[3], - "data_scale": dataset[4], + "dataset_type": "mRNA expression", + "id": dataset[0], + "name": dataset[1], + "full_name": dataset[2], + "short_name": dataset[3], + "data_scale": dataset[4], "tissue_id": dataset[5], "tissue": dataset[6], "public": dataset[7], @@ -280,25 +280,25 @@ def get_dataset_info(dataset_name, group_name=None, file_format="json"): if dataset: if dataset[5]: dataset_dict = { - "dataset_type": "phenotype", - "id": dataset[0], - "name": dataset[1], - "description": dataset[2], - "pubmed_id": dataset[5], - "title": dataset[6], + "dataset_type": "phenotype", + "id": dataset[0], + "name": dataset[1], + "description": dataset[2], + "pubmed_id": dataset[5], + "title": dataset[6], "year": dataset[7] } elif dataset[4]: dataset_dict = { - "dataset_type": "phenotype", - "id": dataset[0], - "name": dataset[3], - "description": dataset[4] + "dataset_type": "phenotype", + "id": dataset[0], + "name": dataset[3], + "description": dataset[4] } else: dataset_dict = { - "dataset_type": "phenotype", - "id": dataset[0] + "dataset_type": "phenotype", + "id": dataset[0] } datasets_list.append(dataset_dict) @@ -364,7 +364,7 @@ def fetch_traits(dataset_name, file_format="json"): """ field_list = ["Id", "Name", "Symbol", "Description", "Chr", "Mb", - "Aliases", "Mean", "SE", "Locus", "LRS", "P-Value", "Additive", "h2"] + "Aliases", "Mean", "SE", "Locus", "LRS", "P-Value", "Additive", "h2"] elif data_type == "Geno": query = """ SELECT @@ -382,7 +382,7 @@ def fetch_traits(dataset_name, file_format="json"): """ field_list = ["Id", "Name", "Marker_Name", - "Chr", "Mb", "Sequence", "Source"] + "Chr", "Mb", "Sequence", "Source"] else: query = """ SELECT @@ -399,7 +399,7 @@ def fetch_traits(dataset_name, file_format="json"): """ field_list = ["Id", "PhenotypeId", "PublicationId", - "Locus", "LRS", "Additive", "Sequence"] + "Locus", "LRS", "Additive", "Sequence"] if 'limit_to' in request.args: limit_number = request.args['limit_to'] @@ -579,10 +579,10 @@ def trait_sample_data(dataset_name, trait_name, file_format="json"): sample_list = [] for sample in sample_data: sample_dict = { - "sample_name": sample[0], - "sample_name_2": sample[1], - "value": sample[2], - "data_id": sample[3], + "sample_name": sample[0], + "sample_name_2": sample[1], + "value": sample[2], + "data_id": sample[3], } if sample[4]: sample_dict["se"] = sample[4] @@ -626,10 +626,10 @@ def trait_sample_data(dataset_name, trait_name, file_format="json"): sample_list = [] for sample in sample_data: sample_dict = { - "sample_name": sample[0], - "sample_name_2": sample[1], - "value": sample[2], - "data_id": sample[3] + "sample_name": sample[0], + "sample_name_2": sample[1], + "value": sample[2], + "data_id": sample[3] } if sample[4]: sample_dict["se"] = sample[4] @@ -796,9 +796,9 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None): config_file = [filename + ".json", json.dumps(yaml_file)] #config_file = [filename + ".yaml", open("{0}/{1}.yaml".format(flat_files("genotype/rqtl2"), group_name))] geno_file = [filename + "_geno.csv", - open("{0}/{1}_geno.csv".format(flat_files("genotype/rqtl2"), group_name))] + open("{0}/{1}_geno.csv".format(flat_files("genotype/rqtl2"), group_name))] gmap_file = [filename + "_gmap.csv", - open("{0}/{1}_gmap.csv".format(flat_files("genotype/rqtl2"), group_name))] + open("{0}/{1}_gmap.csv".format(flat_files("genotype/rqtl2"), group_name))] if dataset_name: phenotypes = requests.get( "http://gn2.genenetwork.org/api/v_pre1/sample_data/" + dataset_name) @@ -828,7 +828,7 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None): if limit_num and i >= limit_num: break output_lines.append([line.strip() - for line in line.split(",")]) + for line in line.split(",")]) i += 1 csv_writer = csv.writer(si, delimiter=",") @@ -914,7 +914,7 @@ def get_dataset_trait_ids(dataset_name, start_vars): trait_ids = [result[0] for result in results] trait_names = [str(result[2]) + "_" + str(result[1]) - for result in results] + for result in results] return trait_ids, trait_names, data_type, dataset_id diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index b06d84ff..58518639 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -75,14 +75,14 @@ def collections_add(): if 'traits' in request.args: traits = request.args['traits'] return render_template("collections/add.html", - traits=traits, - collections=collections, + traits=traits, + collections=collections, ) else: hash = request.args['hash'] return render_template("collections/add.html", - hash=hash, - collections=collections, + hash=hash, + collections=collections, ) @@ -145,8 +145,8 @@ def list_collections(): user_collections = list(g.user_session.user_collections) return render_template("collections/list.html", - params=params, - collections=user_collections, + params=params, + collections=user_collections, ) @@ -225,7 +225,7 @@ def view_collection(): return json.dumps(json_version) else: return render_template("collections/view.html", - **collection_info + **collection_info ) diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py index d86c8e16..cb88eb53 100644 --- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py +++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py @@ -35,7 +35,7 @@ class ComparisonBarChart: def __init__(self, start_vars): trait_db_list = [trait.strip() - for trait in start_vars['trait_list'].split(',')] + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) @@ -90,8 +90,8 @@ class ComparisonBarChart: #print("dataset_name:", dataset_name) dataset_ob = data_set.create_dataset(dataset_name) trait_ob = create_trait(dataset=dataset_ob, - name=trait_name, - cellid=None) + name=trait_name, + cellid=None) self.trait_list.append((trait_ob, dataset_ob)) #print("trait_list:", self.trait_list) diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index e8b7b057..aa39bc5c 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -82,8 +82,8 @@ class CorrelationResults: dataset_name="Temp", dataset_type="Temp", group_name = start_vars['group']) self.trait_id = start_vars['trait_id'] self.this_trait = create_trait(dataset=self.dataset, - name=self.trait_id, - cellid=None) + name=self.trait_id, + cellid=None) else: helper_functions.get_species_dataset_trait(self, start_vars) @@ -98,7 +98,7 @@ class CorrelationResults: if ('loc_chr' in start_vars and 'min_loc_mb' in start_vars - and 'max_loc_mb' in start_vars): + and 'max_loc_mb' in start_vars): self.location_type = get_string(start_vars, 'location_type') self.location_chr = get_string(start_vars, 'loc_chr') @@ -129,7 +129,7 @@ class CorrelationResults: if corr_samples_group != 'samples_primary': if corr_samples_group == 'samples_other': primary_samples = [x for x in primary_samples if x not in ( - self.dataset.group.parlist + self.dataset.group.f1list)] + self.dataset.group.parlist + self.dataset.group.f1list)] self.process_samples(start_vars, list( self.this_trait.data.keys()), primary_samples) @@ -201,7 +201,7 @@ class CorrelationResults: chr_as_int = order_id if (float(self.correlation_data[trait][0]) >= self.p_range_lower - and float(self.correlation_data[trait][0]) <= self.p_range_upper): + and float(self.correlation_data[trait][0]) <= self.p_range_upper): if (self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Publish") and bool(trait_object.mean): if (self.min_expr != None) and (float(trait_object.mean) < self.min_expr): @@ -221,8 +221,8 @@ class CorrelationResults: continue (trait_object.sample_r, - trait_object.sample_p, - trait_object.num_overlap) = self.correlation_data[trait] + trait_object.sample_p, + trait_object.num_overlap) = self.correlation_data[trait] # Set some sane defaults trait_object.tissue_corr = 0 @@ -277,7 +277,7 @@ class CorrelationResults: trait.symbol for trait in self.correlation_results if trait.symbol] corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( - symbol_list=gene_symbol_list) + symbol_list=gene_symbol_list) for trait in self.correlation_results: if trait.symbol and trait.symbol.lower() in corr_result_tissue_vals_dict: @@ -285,8 +285,8 @@ class CorrelationResults: )] result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values, - this_trait_tissue_values, - self.corr_method) + this_trait_tissue_values, + self.corr_method) trait.tissue_corr = result[0] trait.tissue_pvalue = result[2] @@ -302,7 +302,7 @@ class CorrelationResults: #print("trait_gene_symbols: ", pf(trait_gene_symbols.values())) corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values( - symbol_list=list(self.trait_symbol_dict.values())) + symbol_list=list(self.trait_symbol_dict.values())) #print("corr_result_tissue_vals: ", pf(corr_result_tissue_vals_dict)) @@ -315,13 +315,13 @@ class CorrelationResults: )] result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values, - this_trait_tissue_values, - self.corr_method) + this_trait_tissue_values, + self.corr_method) tissue_corr_data[trait] = [symbol, result[0], result[2]] tissue_corr_data = collections.OrderedDict(sorted(list(tissue_corr_data.items()), - key=lambda t: -abs(t[1][1]))) + key=lambda t: -abs(t[1][1]))) return tissue_corr_data @@ -397,7 +397,7 @@ class CorrelationResults: lit_corr_data[trait] = [gene_id, 0] lit_corr_data = collections.OrderedDict(sorted(list(lit_corr_data.items()), - key=lambda t: -abs(t[1][1]))) + key=lambda t: -abs(t[1][1]))) return lit_corr_data @@ -603,12 +603,12 @@ def get_header_fields(data_type, corr_method): if data_type == "ProbeSet": if corr_method == "spearman": header_fields = ['Index', - 'Record', - 'Symbol', - 'Description', - 'Location', - 'Mean', - 'Sample rho', + 'Record', + 'Symbol', + 'Description', + 'Location', + 'Mean', + 'Sample rho', 'N', 'Sample p(rho)', 'Lit rho', @@ -619,12 +619,12 @@ def get_header_fields(data_type, corr_method): 'Additive Effect'] else: header_fields = ['Index', - 'Record', - 'Symbol', - 'Description', - 'Location', - 'Mean', - 'Sample r', + 'Record', + 'Symbol', + 'Description', + 'Location', + 'Mean', + 'Sample r', 'N', 'Sample p(r)', 'Lit r', @@ -636,47 +636,47 @@ def get_header_fields(data_type, corr_method): elif data_type == "Publish": if corr_method == "spearman": header_fields = ['Index', - 'Record', - 'Abbreviation', - 'Description', - 'Mean', - 'Authors', - 'Year', - 'Sample rho', - 'N', - 'Sample p(rho)', - 'Max LRS', - 'Max LRS Location', - 'Additive Effect'] + 'Record', + 'Abbreviation', + 'Description', + 'Mean', + 'Authors', + 'Year', + 'Sample rho', + 'N', + 'Sample p(rho)', + 'Max LRS', + 'Max LRS Location', + 'Additive Effect'] else: header_fields = ['Index', - 'Record', - 'Abbreviation', - 'Description', - 'Mean', - 'Authors', - 'Year', - 'Sample r', - 'N', - 'Sample p(r)', - 'Max LRS', - 'Max LRS Location', - 'Additive Effect'] + 'Record', + 'Abbreviation', + 'Description', + 'Mean', + 'Authors', + 'Year', + 'Sample r', + 'N', + 'Sample p(r)', + 'Max LRS', + 'Max LRS Location', + 'Additive Effect'] else: if corr_method == "spearman": header_fields = ['Index', - 'ID', - 'Location', - 'Sample rho', - 'N', - 'Sample p(rho)'] + 'ID', + 'Location', + 'Sample rho', + 'N', + 'Sample p(rho)'] else: header_fields = ['Index', - 'ID', - 'Location', - 'Sample r', - 'N', - 'Sample p(r)'] + 'ID', + 'Location', + 'Sample r', + 'N', + 'Sample p(r)'] return header_fields diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index c04b17be..c1bf3daa 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -45,7 +45,7 @@ class CorrelationMatrix: def __init__(self, start_vars): trait_db_list = [trait.strip() - for trait in start_vars['trait_list'].split(',')] + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) @@ -215,7 +215,7 @@ class CorrelationMatrix: temp_dataset.group.get_samplelist() for i, pca_trait in enumerate(pca_traits): trait_id = "PCA" + str(i + 1) + "_" + temp_dataset.group.species + "_" + \ - this_group_name + "_" + datetime.datetime.now().strftime("%m%d%H%M%S") + this_group_name + "_" + datetime.datetime.now().strftime("%m%d%H%M%S") this_vals_string = "" position = 0 for sample in temp_dataset.group.all_samples_ordered(): diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index 820e81bc..a0fb34d8 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -66,12 +66,12 @@ class CTL: def addNode(self, gt): node_dict = {'data': {'id': str(gt.name) + ":" + str(gt.dataset.name), - 'sid': str(gt.name), - 'dataset': str(gt.dataset.name), - 'label': gt.name, - 'symbol': gt.symbol, - 'geneid': gt.geneid, - 'omim': gt.omim}} + 'sid': str(gt.name), + 'dataset': str(gt.dataset.name), + 'label': gt.name, + 'symbol': gt.symbol, + 'geneid': gt.geneid, + 'omim': gt.omim}} self.nodes_list.append(node_dict) def addEdge(self, gtS, gtT, significant, x): @@ -87,7 +87,7 @@ class CTL: def run_analysis(self, requestform): logger.info("Starting CTL analysis on dataset") self.trait_db_list = [trait.strip() - for trait in requestform['trait_list'].split(',')] + for trait in requestform['trait_list'].split(',')] self.trait_db_list = [x for x in self.trait_db_list if x] logger.debug("strategy:", requestform.get("strategy")) @@ -179,9 +179,9 @@ class CTL: for trait in self.trait_db_list: # Create the QTL like CTL plots self.results['imgurl' + \ - str(n)] = webqtlUtil.genRandStr("CTL_") + ".png" + str(n)] = webqtlUtil.genRandStr("CTL_") + ".png" self.results['imgloc' + str(n)] = GENERATED_IMAGE_DIR + \ - self.results['imgurl' + str(n)] + self.results['imgurl' + str(n)] r_png(self.results['imgloc' + str(n)], width=1000, height=600, type='cairo-png') self.r_plotCTLobject( diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py index 48527785..761ae326 100644 --- a/wqflask/wqflask/do_search.py +++ b/wqflask/wqflask/do_search.py @@ -112,7 +112,7 @@ class MrnaAssaySearch(DoSearch): match_clause = "" where_clause = (match_clause - + """ProbeSet.Id = ProbeSetXRef.ProbeSetId + + """ProbeSet.Id = ProbeSetXRef.ProbeSetId and ProbeSetXRef.ProbeSetFreezeId = %s """ % (escape(str(self.dataset.id)))) @@ -135,7 +135,7 @@ class MrnaAssaySearch(DoSearch): match_clause = "" where_clause = (match_clause - + """ProbeSet.Id = ProbeSetXRef.ProbeSetId + + """ProbeSet.Id = ProbeSetXRef.ProbeSetId and ProbeSetXRef.ProbeSetFreezeId = %s """ % (escape(str(self.dataset.id)))) @@ -147,14 +147,14 @@ class MrnaAssaySearch(DoSearch): from_clause = self.normalize_spaces(from_clause) query = (self.base_query + - """%s + """%s WHERE %s and ProbeSet.Id = ProbeSetXRef.ProbeSetId and ProbeSetXRef.ProbeSetFreezeId = %s ORDER BY ProbeSet.symbol ASC """ % (escape(from_clause), - where_clause, - escape(str(self.dataset.id)))) + where_clause, + escape(str(self.dataset.id)))) return query def run_combined(self, from_clause='', where_clause=''): @@ -166,14 +166,14 @@ class MrnaAssaySearch(DoSearch): from_clause = self.normalize_spaces(from_clause) query = (self.base_query + - """%s + """%s WHERE %s and ProbeSet.Id = ProbeSetXRef.ProbeSetId and ProbeSetXRef.ProbeSetFreezeId = %s ORDER BY ProbeSet.symbol ASC """ % (escape(from_clause), - where_clause, - escape(str(self.dataset.id)))) + where_clause, + escape(str(self.dataset.id)))) return self.execute(query) @@ -199,15 +199,15 @@ class PhenotypeSearch(DoSearch): FROM Phenotype, PublishFreeze, Publication, PublishXRef """ search_fields = ('Phenotype.Post_publication_description', - 'Phenotype.Pre_publication_description', - 'Phenotype.Pre_publication_abbreviation', - 'Phenotype.Post_publication_abbreviation', - 'Phenotype.Lab_code', - 'Publication.PubMed_ID', - 'Publication.Abstract', - 'Publication.Title', - 'Publication.Authors', - 'PublishXRef.Id') + 'Phenotype.Pre_publication_description', + 'Phenotype.Pre_publication_abbreviation', + 'Phenotype.Post_publication_abbreviation', + 'Phenotype.Lab_code', + 'Publication.PubMed_ID', + 'Publication.Abstract', + 'Publication.Title', + 'Publication.Authors', + 'PublishXRef.Id') header_fields = ['Index', 'Record', @@ -250,28 +250,28 @@ class PhenotypeSearch(DoSearch): if self.search_term[0] == "*": query = (self.base_query + - """%s + """%s WHERE PublishXRef.InbredSetId = %s and PublishXRef.PhenotypeId = Phenotype.Id and PublishXRef.PublicationId = Publication.Id and PublishFreeze.Id = %s ORDER BY PublishXRef.Id""" % ( - from_clause, - escape(str(self.dataset.group.id)), - escape(str(self.dataset.id)))) + from_clause, + escape(str(self.dataset.group.id)), + escape(str(self.dataset.id)))) else: query = (self.base_query + - """%s + """%s WHERE %s and PublishXRef.InbredSetId = %s and PublishXRef.PhenotypeId = Phenotype.Id and PublishXRef.PublicationId = Publication.Id and PublishFreeze.Id = %s ORDER BY PublishXRef.Id""" % ( - from_clause, - where_clause, - escape(str(self.dataset.group.id)), - escape(str(self.dataset.id)))) + from_clause, + where_clause, + escape(str(self.dataset.group.id)), + escape(str(self.dataset.id)))) return query @@ -283,16 +283,16 @@ class PhenotypeSearch(DoSearch): from_clause = self.normalize_spaces(from_clause) query = (self.base_query + - """%s + """%s WHERE %s PublishXRef.InbredSetId = %s and PublishXRef.PhenotypeId = Phenotype.Id and PublishXRef.PublicationId = Publication.Id and PublishFreeze.Id = %s""" % ( - from_clause, - where_clause, - escape(str(self.dataset.group.id)), - escape(str(self.dataset.id)))) + from_clause, + where_clause, + escape(str(self.dataset.group.id)), + escape(str(self.dataset.id)))) return self.execute(query) @@ -336,7 +336,7 @@ class GenotypeSearch(DoSearch): for field in self.search_fields: where_clause.append('''%s REGEXP "%s"''' % ("%s.%s" % self.mescape(self.dataset.type, field), - self.search_term)) + self.search_term)) logger.debug("hello ;where_clause is:", pf(where_clause)) where_clause = "(%s) " % ' OR '.join(where_clause) @@ -349,16 +349,16 @@ class GenotypeSearch(DoSearch): if self.search_term[0] == "*": query = (self.base_query - + """WHERE Geno.Id = GenoXRef.GenoId + + """WHERE Geno.Id = GenoXRef.GenoId and GenoXRef.GenoFreezeId = GenoFreeze.Id and GenoFreeze.Id = %s""" % (escape(str(self.dataset.id)))) else: query = (self.base_query + - """WHERE %s + """WHERE %s and Geno.Id = GenoXRef.GenoId and GenoXRef.GenoFreezeId = GenoFreeze.Id and GenoFreeze.Id = %s""" % (where_clause, - escape(str(self.dataset.id)))) + escape(str(self.dataset.id)))) return query @@ -526,7 +526,7 @@ class LrsSearch(DoSearch): where_clause += """ and %sXRef.Locus = Geno.name and Geno.SpeciesId = %s """ % self.mescape(self.dataset.type, - self.species_id) + self.species_id) else: # Deal with >, <, >=, and <= logger.debug("self.search_term is:", self.search_term) @@ -535,8 +535,8 @@ class LrsSearch(DoSearch): lrs_val = lrs_val * 4.61 where_clause = """ %sXRef.LRS %s %s """ % self.mescape(self.dataset.type, - self.search_operator, - self.search_term[0]) + self.search_operator, + self.search_term[0]) return where_clause @@ -618,18 +618,18 @@ class CisTransLrsSearch(DoSearch): sub_clause = """ %sXRef.LRS > %s and %sXRef.LRS < %s and """ % ( - escape(self.dataset.type), - escape(str(min(lrs_min, lrs_max))), - escape(self.dataset.type), - escape(str(max(lrs_min, lrs_max))) - ) + escape(self.dataset.type), + escape(str(min(lrs_min, lrs_max))), + escape(self.dataset.type), + escape(str(max(lrs_min, lrs_max))) + ) else: # Deal with >, <, >=, and <= sub_clause = """ %sXRef.LRS %s %s and """ % ( - escape(self.dataset.type), - escape(self.search_operator), - escape(self.search_term[0]) - ) + escape(self.dataset.type), + escape(self.search_operator), + escape(self.search_term[0]) + ) if cis_trans == "cis": where_clause = sub_clause + """ @@ -637,27 +637,27 @@ class CisTransLrsSearch(DoSearch): %sXRef.Locus = Geno.name and Geno.SpeciesId = %s and %s.Chr = Geno.Chr""" % ( - escape(self.dataset.type), - the_operator, - escape(str(self.mb_buffer)), - escape(self.dataset.type), - escape(str(self.species_id)), - escape(self.dataset.type) - ) + escape(self.dataset.type), + the_operator, + escape(str(self.mb_buffer)), + escape(self.dataset.type), + escape(str(self.species_id)), + escape(self.dataset.type) + ) else: if chromosome: location_clause = "(%s.Chr = '%s' and %s.Chr = Geno.Chr and ABS(%s.Mb-Geno.Mb) %s %s) or (%s.Chr != Geno.Chr and Geno.Chr = '%s')" % (escape(self.dataset.type), - chromosome, - escape( + chromosome, + escape( self.dataset.type), - escape( + escape( self.dataset.type), - the_operator, - escape( + the_operator, + escape( str(self.mb_buffer)), - escape( + escape( self.dataset.type), - chromosome) + chromosome) else: location_clause = "(ABS(%s.Mb-Geno.Mb) %s %s and %s.Chr = Geno.Chr) or (%s.Chr != Geno.Chr)" % (escape( self.dataset.type), the_operator, escape(str(self.mb_buffer)), escape(self.dataset.type), escape(self.dataset.type)) @@ -665,10 +665,10 @@ class CisTransLrsSearch(DoSearch): %sXRef.Locus = Geno.name and Geno.SpeciesId = %s and (%s)""" % ( - escape(self.dataset.type), - escape(str(self.species_id)), - location_clause - ) + escape(self.dataset.type), + escape(str(self.species_id)), + location_clause + ) return where_clause @@ -752,15 +752,15 @@ class MeanSearch(MrnaAssaySearch): where_clause = """ %sXRef.mean > %s and %sXRef.mean < %s """ % self.mescape(self.dataset.type, - min(self.mean_min, - self.mean_max), - self.dataset.type, - max(self.mean_min, self.mean_max)) + min(self.mean_min, + self.mean_max), + self.dataset.type, + max(self.mean_min, self.mean_max)) else: # Deal with >, <, >=, and <= where_clause = """ %sXRef.mean %s %s """ % self.mescape(self.dataset.type, - self.search_operator, - self.search_term[0]) + self.search_operator, + self.search_term[0]) return where_clause @@ -893,17 +893,17 @@ class PvalueSearch(MrnaAssaySearch): self.pvalue_min, self.pvalue_max = self.search_term[:2] self.where_clause = """ %sXRef.pValue > %s and %sXRef.pValue < %s """ % self.mescape( - self.dataset.type, - min(self.pvalue_min, self.pvalue_max), - self.dataset.type, - max(self.pvalue_min, self.pvalue_max)) + self.dataset.type, + min(self.pvalue_min, self.pvalue_max), + self.dataset.type, + max(self.pvalue_min, self.pvalue_max)) else: # Deal with >, <, >=, and <= self.where_clause = """ %sXRef.pValue %s %s """ % self.mescape( - self.dataset.type, - self.search_operator, - self.search_term[0]) + self.dataset.type, + self.search_operator, + self.search_term[0]) logger.debug("where_clause is:", pf(self.where_clause)) diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py index 2c180d49..a22d6acc 100644 --- a/wqflask/wqflask/export_traits.py +++ b/wqflask/wqflask/export_traits.py @@ -60,7 +60,7 @@ def export_search_results_csv(targs): trait_list.append(trait_ob) table_headers = ['Index', 'URL', 'Species', 'Group', 'Dataset', 'Record ID', 'Symbol', 'Description', 'ProbeTarget', 'PubMed_ID', 'Chr', 'Mb', 'Alias', 'Gene_ID', 'Homologene_ID', 'UniGene_ID', - 'Strand_Probe', 'Probe_set_specificity', 'Probe_set_BLAT_score', 'Probe_set_BLAT_Mb_start', 'Probe_set_BLAT_Mb_end', 'QTL_Chr', 'QTL_Mb', 'Locus_at_Peak', 'Max_LRS', 'P_value_of_MAX', 'Mean_Expression'] + 'Strand_Probe', 'Probe_set_specificity', 'Probe_set_BLAT_score', 'Probe_set_BLAT_Mb_start', 'Probe_set_BLAT_Mb_end', 'QTL_Chr', 'QTL_Mb', 'Locus_at_Peak', 'Max_LRS', 'P_value_of_MAX', 'Mean_Expression'] traits_by_group = sort_traits_by_group(trait_list) @@ -93,7 +93,7 @@ def export_search_results_csv(targs): row_contents = [ i + 1, "https://genenetwork.org/show_trait?trait_id=" + \ - str(trait.name) + "&dataset=" + str(trait.dataset.name), + str(trait.name) + "&dataset=" + str(trait.dataset.name), trait.dataset.group.species, trait.dataset.group.name, trait.dataset.name, @@ -124,7 +124,7 @@ def export_search_results_csv(targs): for sample in trait.dataset.group.samplelist: if sample in trait.data: row_contents += [trait.data[sample].value, - trait.data[sample].variance] + trait.data[sample].variance] else: row_contents += ["x", "x"] diff --git a/wqflask/wqflask/external_tools/send_to_bnw.py b/wqflask/wqflask/external_tools/send_to_bnw.py index 3c0f2ca7..c1b14ede 100644 --- a/wqflask/wqflask/external_tools/send_to_bnw.py +++ b/wqflask/wqflask/external_tools/send_to_bnw.py @@ -28,7 +28,7 @@ logger = utility.logger.getLogger(__name__) class SendToBNW: def __init__(self, start_vars): trait_db_list = [trait.strip() - for trait in start_vars['trait_list'].split(',')] + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) trait_samples_list = [] diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py index 8af9bee9..9a4f7150 100644 --- a/wqflask/wqflask/external_tools/send_to_geneweaver.py +++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py @@ -33,7 +33,7 @@ logger = utility.logger.getLogger(__name__) class SendToGeneWeaver: def __init__(self, start_vars): trait_db_list = [trait.strip() - for trait in start_vars['trait_list'].split(',')] + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) self.chip_name = test_chip(self.trait_list) @@ -54,11 +54,11 @@ class SendToGeneWeaver: trait_name_list = get_trait_name_list(self.trait_list) self.hidden_vars = { - 'client': "genenetwork", - 'species': species_name, - 'idtype': self.chip_name, - 'list': ",".join(trait_name_list), - } + 'client': "genenetwork", + 'species': species_name, + 'idtype': self.chip_name, + 'list': ",".join(trait_name_list), + } def get_trait_name_list(trait_list): diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py index fcd943ba..6e74f4fe 100644 --- a/wqflask/wqflask/external_tools/send_to_webgestalt.py +++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py @@ -33,7 +33,7 @@ logger = utility.logger.getLogger(__name__) class SendToWebGestalt: def __init__(self, start_vars): trait_db_list = [trait.strip() - for trait in start_vars['trait_list'].split(',')] + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) self.chip_name = test_chip(self.trait_list) @@ -49,7 +49,7 @@ class SendToWebGestalt: id_type = "entrezgene" self.hidden_vars = { - 'gene_list': "\n".join(gene_id_list), + 'gene_list': "\n".join(gene_id_list), 'id_type': "entrezgene", 'ref_set': "genome", 'enriched_database_category': "geneontology", @@ -59,7 +59,7 @@ class SendToWebGestalt: 'enrich_method': "ORA", 'fdr_method': "BH", 'min_num': "2" - } + } species = self.trait_list[0][1].group.species if species == "rat": diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py index 995915a9..b7e7e38a 100644 --- a/wqflask/wqflask/group_manager.py +++ b/wqflask/wqflask/group_manager.py @@ -8,7 +8,7 @@ from wqflask import app from wqflask.user_login import send_verification_email, send_invitation_email, basic_info, set_password from utility.redis_tools import get_user_groups, get_group_info, save_user, create_group, delete_group, add_users_to_group, remove_users_from_group, \ - change_group_name, save_verification_code, check_verification_code, get_user_by_unique_column, get_resources, get_resource_info + change_group_name, save_verification_code, check_verification_code, get_user_by_unique_column, get_resources, get_resource_info from utility.logger import getLogger logger = getLogger(__name__) @@ -78,9 +78,9 @@ def remove_users(): member_ids_to_remove = request.form['selected_member_ids'] remove_users_from_group(g.user_session.user_id, admin_ids_to_remove.split( - ":"), group_id, user_type="admins") + ":"), group_id, user_type="admins") remove_users_from_group(g.user_session.user_id, member_ids_to_remove.split( - ":"), group_id, user_type="members") + ":"), group_id, user_type="members") return redirect(url_for('view_group', id=group_id)) @@ -133,7 +133,7 @@ def add_or_edit_group(): #send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members") create_group(list(admin_user_ids), list( - member_user_ids), params['group_name']) + member_user_ids), params['group_name']) return redirect(url_for('manage_groups')) else: return render_template("admin/create_group.html") @@ -159,13 +159,13 @@ def send_group_invites(group_id, user_email_list=[], user_type="members"): key_prefix="verification_code", subject = "You've been invited to join a GeneNetwork user group") else: temp_password = ''.join(random.choice( - string.ascii_uppercase + string.digits) for _ in range(6)) + string.ascii_uppercase + string.digits) for _ in range(6)) user_details = { - 'user_id': str(uuid.uuid4()), - 'email_address': user_email, - 'registration_info': basic_info(), - 'password': set_password(temp_password), - 'confirmed': 0 + 'user_id': str(uuid.uuid4()), + 'email_address': user_email, + 'registration_info': basic_info(), + 'password': set_password(temp_password), + 'confirmed': 0 } save_user(user_details, user_details['user_id']) send_invitation_email(user_email, temp_password) diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py index f02da27c..fb8bdc55 100644 --- a/wqflask/wqflask/gsearch.py +++ b/wqflask/wqflask/gsearch.py @@ -124,7 +124,7 @@ class GSearch: if this_trait['locus_chr'] != None and this_trait['locus_mb'] != None: max_lrs_text = "Chr" + \ str(this_trait['locus_chr']) + \ - ": " + str(this_trait['locus_mb']) + ": " + str(this_trait['locus_mb']) this_trait['max_lrs_text'] = max_lrs_text trait_list.append(this_trait) @@ -133,18 +133,18 @@ class GSearch: self.trait_list = json.dumps(trait_list) self.header_fields = ['Index', - 'Record', - 'Species', - 'Group', - 'Tissue', - 'Dataset', - 'Symbol', - 'Description', - 'Location', - 'Mean', - 'Max LRS', - 'Max LRS Location', - 'Additive Effect'] + 'Record', + 'Species', + 'Group', + 'Tissue', + 'Dataset', + 'Symbol', + 'Description', + 'Location', + 'Mean', + 'Max LRS', + 'Max LRS Location', + 'Additive Effect'] elif self.type == "phenotype": search_term = self.terms @@ -251,7 +251,7 @@ class GSearch: if trait_ob.locus_chr != "" and trait_ob.locus_mb != "": this_trait['max_lrs_text'] = "Chr" + \ str(trait_ob.locus_chr) + \ - ": " + str(trait_ob.locus_mb) + ": " + str(trait_ob.locus_mb) except: this_trait['max_lrs_text'] = "N/A" @@ -261,12 +261,12 @@ class GSearch: self.trait_list = json.dumps(trait_list) self.header_fields = ['Index', - 'Species', - 'Group', - 'Record', - 'Description', - 'Authors', - 'Year', - 'Max LRS', - 'Max LRS Location', - 'Additive Effect'] + 'Species', + 'Group', + 'Record', + 'Description', + 'Authors', + 'Year', + 'Max LRS', + 'Max LRS Location', + 'Additive Effect'] diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py index 02eb66e5..001bab3b 100644 --- a/wqflask/wqflask/heatmap/heatmap.py +++ b/wqflask/wqflask/heatmap/heatmap.py @@ -19,7 +19,7 @@ class Heatmap: def __init__(self, start_vars, temp_uuid): trait_db_list = [trait.strip() - for trait in start_vars['trait_list'].split(',')] + for trait in start_vars['trait_list'].split(',')] helper_functions.get_trait_db_obs(self, trait_db_list) self.temp_uuid = temp_uuid @@ -35,7 +35,7 @@ class Heatmap: self.species = species.TheSpecies(dataset=self.trait_list[0][1]) for key in list(self.species.chromosomes.chromosomes.keys()): chrnames.append([self.species.chromosomes.chromosomes[key].name, - self.species.chromosomes.chromosomes[key].mb_length]) + self.species.chromosomes.chromosomes[key].mb_length]) for trait_db in self.trait_list: @@ -111,7 +111,7 @@ class Heatmap: trimmed_values.append(values[i]) trait_filename = str(this_trait.name) + "_" + \ - str(self.dataset.name) + "_pheno" + str(self.dataset.name) + "_pheno" gen_pheno_txt_file(trimmed_samples, trimmed_values, trait_filename) output_filename = self.dataset.group.name + "_GWA_" + \ @@ -119,11 +119,11 @@ class Heatmap: for _ in range(6)) reaper_command = REAPER_COMMAND + ' --geno {0}/{1}.geno --traits {2}/gn2/{3}.txt -n 1000 -o {4}{5}.txt'.format(flat_files('genotype'), - genofile_name, - TEMPDIR, - trait_filename, - webqtlConfig.GENERATED_IMAGE_DIR, - output_filename) + genofile_name, + TEMPDIR, + trait_filename, + webqtlConfig.GENERATED_IMAGE_DIR, + output_filename) os.system(reaper_command) diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index 2f1c142c..04980281 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -8,8 +8,8 @@ from flask import Flask, g def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): fetchFields = ['SpeciesId', 'Id', 'GeneSymbol', 'GeneDescription', 'Chromosome', 'TxStart', 'TxEnd', - 'Strand', 'GeneID', 'NM_ID', 'kgID', 'GenBankID', 'UnigenID', 'ProteinID', 'AlignID', - 'exonCount', 'exonStarts', 'exonEnds', 'cdsStart', 'cdsEnd'] + 'Strand', 'GeneID', 'NM_ID', 'kgID', 'GenBankID', 'UnigenID', 'ProteinID', 'AlignID', + 'exonCount', 'exonStarts', 'exonEnds', 'cdsStart', 'cdsEnd'] # List All Species in the Gene Table speciesDict = {} @@ -34,9 +34,9 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): ((TxStart > %f and TxStart <= %f) OR (TxEnd > %f and TxEnd <= %f)) ORDER BY txStart """ % (", ".join(fetchFields), - speciesId, chrName, - startMb, endMb, - startMb, endMb)).fetchall() + speciesId, chrName, + startMb, endMb, + startMb, endMb)).fetchall() GeneList = [] @@ -55,7 +55,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): StrainId1 = %d AND StrainId2 = %d """ % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0] newdict["snpDensity"] = newdict["snpCount"] / \ - (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0 + (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0 else: newdict["snpDensity"] = newdict["snpCount"] = 0 @@ -70,8 +70,8 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): newdict2 = {} resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (", ".join(fetchFields), - othSpecId, - newdict["GeneSymbol"])).fetchone() + othSpecId, + newdict["GeneSymbol"])).fetchone() if resultsOther: for j, item in enumerate(fetchFields): @@ -88,13 +88,13 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): """ % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0] newdict2["snpDensity"] = newdict2["snpCount"] / \ - (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0 + (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0 else: newdict2["snpDensity"] = newdict2["snpCount"] = 0 try: newdict2['GeneLength'] = 1000.0 * \ - (newdict2['TxEnd'] - newdict2['TxStart']) + (newdict2['TxEnd'] - newdict2['TxStart']) except: pass diff --git a/wqflask/wqflask/markdown_routes.py b/wqflask/wqflask/markdown_routes.py index ebf75807..c27ff143 100644 --- a/wqflask/wqflask/markdown_routes.py +++ b/wqflask/wqflask/markdown_routes.py @@ -103,7 +103,7 @@ def environments(): @environments_blueprint.route('/svg-dependency-graph') def svg_graph(): directory, file_name, _ = get_file_from_python_search_path( - "wqflask/dependency-graph.svg").partition("dependency-graph.svg") + "wqflask/dependency-graph.svg").partition("dependency-graph.svg") return send_from_directory(directory, file_name) diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index c68e0fde..f3b1b1fc 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -629,7 +629,7 @@ class DisplayMappingResults: submit=HtmlGenWrapper.create_input_tag(type_='hidden')) hddn = {'FormID': 'showDatabase', 'ProbeSetID': '_', 'database': fd.RISet+ \ - "Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'} + "Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'} for key in hddn.keys(): showLocusForm.append(HtmlGenWrapper.create_input_tag( name=key, value=hddn[key], type_='hidden')) @@ -889,7 +889,7 @@ class DisplayMappingResults: bootY = yZero - bootHeightThresh * item / highestPercent im_drawer.line( xy=((canvas.size[0] - bootOffset + 4, bootY), - (canvas.size[0] - bootOffset, bootY)), + (canvas.size[0] - bootOffset, bootY)), fill=BLACK) im_drawer.text(xy=(canvas.size[0] - bootOffset + 10, bootY+TEXT_Y_DISPLACEMENT), text='%2.1f' % item, font=bootScaleFont, fill=BLACK) @@ -983,7 +983,7 @@ class DisplayMappingResults: break if locPixel >= 0 and self.plotScale == 'physic': traitPixel = ((locPixel, yZero), (locPixel - 7, - yZero + 14), (locPixel + 7, yZero + 14)) + yZero + 14), (locPixel + 7, yZero + 14)) draw_open_polygon(canvas, xy=traitPixel, outline=BLACK, fill=self.TRANSCRIPT_LOCATION_COLOR) @@ -1029,7 +1029,7 @@ class DisplayMappingResults: SNPCounts[i - xLeftOffset] * SNP_HEIGHT_MODIFIER / maxCount) im_drawer.line( xy=((i, drawSNPLocationY + (snpDensity) * zoom), - (i, drawSNPLocationY - (snpDensity) * zoom)), + (i, drawSNPLocationY - (snpDensity) * zoom)), fill=self.SNP_COLOR, width=1) def drawMultiTraitName(self, fd, canvas, gifmap, showLocusForm, offset=(40, 120, 80, 10), zoom=1): @@ -1356,7 +1356,7 @@ class DisplayMappingResults: # always apply colors now, even if SNP Track not checked - Zach 11/24/2010 densities = [1.0000000000000001e-05, 0.094094033555233408, - 0.3306166377816987, 0.88246026851027781, 2.6690084029581951, 4.1, 61.0] + 0.3306166377816987, 0.88246026851027781, 2.6690084029581951, 4.1, 61.0] if SNPdensity < densities[0]: myColor = BLACK elif SNPdensity < densities[1]: @@ -1462,13 +1462,13 @@ class DisplayMappingResults: xy=((geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, geneYLocation), (geneStartPix + xCoord, - geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)), + geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)), fill=arrowColor, width=1) im_drawer.line( xy=((geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH, geneYLocation + self.EACH_GENE_HEIGHT * zoom), (geneStartPix + xCoord, - geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)), + geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)), fill=arrowColor, width=1) # draw the blocks for the exon regions @@ -1476,7 +1476,7 @@ class DisplayMappingResults: exonStartPix = ( exonStarts[i] - startMb) * plotXScale + xLeftOffset exonEndPix = (exonEnds[i] - startMb) * \ - plotXScale + xLeftOffset + plotXScale + xLeftOffset if (exonStartPix < xLeftOffset): exonStartPix = xLeftOffset if (exonEndPix < xLeftOffset): @@ -1493,7 +1493,7 @@ class DisplayMappingResults: # draw gray blocks for 3' and 5' UTR blocks if cdsStart and cdsEnd: utrStartPix = (txStart - startMb) * \ - plotXScale + xLeftOffset + plotXScale + xLeftOffset utrEndPix = (cdsStart - startMb) * plotXScale + xLeftOffset if (utrStartPix < xLeftOffset): utrStartPix = xLeftOffset @@ -1741,9 +1741,9 @@ class DisplayMappingResults: mylineColor = self.HAPLOTYPE_RECOMBINATION im_drawer.line( xy=((plotRight, - geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom), (drawEnd, - geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom)), + geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom)), fill= mylineColor, width=zoom * (self.EACH_GENE_HEIGHT + 2)) if lastGene == 0: @@ -1869,7 +1869,7 @@ class DisplayMappingResults: fill=self.CLICKABLE_WEBQTL_REGION_COLOR) im_drawer.line( xy=((xBrowse1, paddingTop), (xBrowse1, - (paddingTop + self.BAND_HEIGHT))), + (paddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_WEBQTL_REGION_OUTLINE_COLOR) if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat": @@ -1895,7 +1895,7 @@ class DisplayMappingResults: fill=self.CLICKABLE_PHENOGEN_REGION_COLOR) im_drawer.line( xy=((xBrowse1, phenogenPaddingTop), (xBrowse1, - (phenogenPaddingTop + self.BAND_HEIGHT))), + (phenogenPaddingTop + self.BAND_HEIGHT))), fill=self.CLICKABLE_PHENOGEN_REGION_OUTLINE_COLOR) UCSC_COORDS = "%d, %d, %d, %d" % ( @@ -2051,7 +2051,7 @@ class DisplayMappingResults: fill=xAxisLabelColor) else: im_drawer.line(xy=((Xc, yZero), - (Xc, yZero + xMinorTickHeight)), + (Xc, yZero + xMinorTickHeight)), fill=xAxisTickMarkColor, width=X_MINOR_TICK_THICKNESS) # Draw the MINOR tick mark @@ -2159,17 +2159,17 @@ class DisplayMappingResults: if differ: im_drawer.line( xy=((startPosX + Lpos, yZero), (xLeftOffset + offsetA,\ - yZero + 25)), + yZero + 25)), fill=lineColor) im_drawer.line( xy=((xLeftOffset + offsetA, yZero + 25), (xLeftOffset+offsetA,\ - yZero + 40 + Zorder*(LRectWidth+3))), + yZero + 40 + Zorder*(LRectWidth+3))), fill=lineColor) rectColor = ORANGE else: im_drawer.line( xy=((xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3)-3), (\ - xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3))), + xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3))), fill=lineColor) rectColor = DEEPPINK im_drawer.rectangle( @@ -2178,7 +2178,7 @@ class DisplayMappingResults: yZero + 40 + Zorder*(LRectWidth+3)+LRectWidth)), outline=rectColor, fill=rectColor, width=0) COORDS = "%d,%d,%d,%d" % (xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\ - xLeftOffset + offsetA, yZero +40+Zorder*(LRectWidth+3)+LRectWidth) + xLeftOffset + offsetA, yZero +40+Zorder*(LRectWidth+3)+LRectWidth) HREF = "/show_trait?trait_id=%s&dataset=%s" % ( Lname, self.dataset.group.name + "Geno") #HREF="javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm,fd.RISet+"Geno", Lname) @@ -2230,7 +2230,7 @@ class DisplayMappingResults: if self.lrsMax <= 0: # sliding scale if "lrs_value" in self.qtlresults[0]: LRS_LOD_Max = max([result['lrs_value'] - for result in self.qtlresults]) + for result in self.qtlresults]) if self.LRS_LOD == "LOD" or self.LRS_LOD == "-logP": LRS_LOD_Max = LRS_LOD_Max / self.LODFACTOR if self.permChecked and self.nperm > 0 and not self.multipleInterval: @@ -2248,7 +2248,7 @@ class DisplayMappingResults: pass else: LRS_LOD_Max = max([result['lod_score'] - for result in self.qtlresults]) + for result in self.qtlresults]) if self.LRS_LOD == "LRS": LRS_LOD_Max = LRS_LOD_Max * self.LODFACTOR if self.permChecked and self.nperm > 0 and not self.multipleInterval: @@ -2390,7 +2390,7 @@ class DisplayMappingResults: ) im_drawer.line( xy=((start_pos_x + self.SUGGESTIVE_WIDTH / 1.5, significantY), - (rightEdge, significantY)), + (rightEdge, significantY)), fill=self.SIGNIFICANT_COLOR, width=self.SIGNIFICANT_WIDTH * zoom # , clipX=(xLeftOffset, xLeftOffset + plotWidth-2) @@ -2440,7 +2440,7 @@ class DisplayMappingResults: else: if self.additiveChecked: additiveMax = max([abs(X['additive']) - for X in self.qtlresults]) + for X in self.qtlresults]) lrsEdgeWidth = 3 if zoom == 2: @@ -2484,7 +2484,7 @@ class DisplayMappingResults: Xcm = Xc else: Xcm = (yZero - Yc0) / \ - ((Yc - Yc0) / (Xc - Xc0)) + Xc0 + ((Yc - Yc0) / (Xc - Xc0)) + Xc0 if Yc0 < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xcm, yZero)), @@ -2583,12 +2583,12 @@ class DisplayMappingResults: #Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/(LRSAxisList[-1]*self.LODFACTOR) Yc = yZero - webqtlConfig.MAXLRS * \ LRSHeightThresh / \ - (LRS_LOD_Max * self.LODFACTOR) + (LRS_LOD_Max * self.LODFACTOR) else: #Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/(LRSAxisList[-1]*self.LODFACTOR) Yc = yZero - \ qtlresult['lrs_value'] * LRSHeightThresh / \ - (LRS_LOD_Max * self.LODFACTOR) + (LRS_LOD_Max * self.LODFACTOR) else: if qtlresult['lrs_value'] > 460 or qtlresult['lrs_value'] == 'inf': #Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRSAxisList[-1] @@ -2597,7 +2597,7 @@ class DisplayMappingResults: #Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/LRSAxisList[-1] Yc = yZero - \ qtlresult['lrs_value'] * \ - LRSHeightThresh / LRS_LOD_Max + LRSHeightThresh / LRS_LOD_Max else: if qtlresult['lod_score'] > 100 or qtlresult['lod_score'] == 'inf': #Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRSAxisList[-1] @@ -2607,12 +2607,12 @@ class DisplayMappingResults: #Yc = yZero - qtlresult['lod_score']*self.LODFACTOR*LRSHeightThresh/LRSAxisList[-1] Yc = yZero - \ qtlresult['lod_score'] * self.LODFACTOR * \ - LRSHeightThresh / LRS_LOD_Max + LRSHeightThresh / LRS_LOD_Max else: #Yc = yZero - qtlresult['lod_score']*LRSHeightThresh/LRSAxisList[-1] Yc = yZero - \ qtlresult['lod_score'] * \ - LRSHeightThresh / LRS_LOD_Max + LRSHeightThresh / LRS_LOD_Max if self.manhattan_plot == True: if self.color_scheme == "single": @@ -2665,7 +2665,7 @@ class DisplayMappingResults: Xcm = Xc else: Xcm = (yZero - Yc0) / \ - ((Yc - Yc0) / (Xc - Xc0)) + Xc0 + ((Yc - Yc0) / (Xc - Xc0)) + Xc0 if Yc0 < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xcm, yZero)), @@ -2731,7 +2731,7 @@ class DisplayMappingResults: Xcm = Xc else: Xcm = (yZero - Yc0) / \ - ((Yc - Yc0) / (Xc - Xc0)) + Xc0 + ((Yc - Yc0) / (Xc - Xc0)) + Xc0 if Yc0 < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xcm, yZero)), @@ -2947,12 +2947,12 @@ class DisplayMappingResults: if self.dataset.group.species == "mouse": if refGene: gene_table_header_list = ["Index", - "Symbol", - "Mb Start", - "Length (Kb)", - "SNP Count", - "SNP Density", - "Avg Expr", + "Symbol", + "Mb Start", + "Length (Kb)", + "SNP Count", + "SNP Density", + "Avg Expr", "Human Chr", "Mb Start (hg19)", "Literature Correlation", diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py index 9f9591ad..5d16abde 100644 --- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py +++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py @@ -26,18 +26,18 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo gen_pheno_txt_file(samples, vals, trait_filename) output_filename = (f"{this_dataset.group.name}_GWA_" - + ''.join(random.choice(string.ascii_uppercase + string.digits) - for _ in range(6)) - ) + + ''.join(random.choice(string.ascii_uppercase + string.digits) + for _ in range(6)) + ) bootstrap_filename = None permu_filename = None opt_list = [] if boot_check and num_bootstrap > 0: bootstrap_filename = (f"{this_dataset.group.name}_BOOTSTRAP_" - + ''.join(random.choice(string.ascii_uppercase + string.digits) - for _ in range(6)) - ) + + ''.join(random.choice(string.ascii_uppercase + string.digits) + for _ in range(6)) + ) opt_list.append("-b") opt_list.append(f"--n_bootstrap {str(num_bootstrap)}") @@ -45,8 +45,8 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo f"--bootstrap_output {webqtlConfig.GENERATED_IMAGE_DIR}{bootstrap_filename}.txt") if num_perm > 0: permu_filename = ("{this_dataset.group.name}_PERM_" - + ''.join(random.choice(string.ascii_uppercase - + string.digits) for _ in range(6)) + + ''.join(random.choice(string.ascii_uppercase + + string.digits) for _ in range(6)) ) opt_list.append("-n " + str(num_perm)) opt_list.append( @@ -57,15 +57,15 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo opt_list.append("--interval 1") reaper_command = (REAPER_COMMAND + - ' --geno {0}/{1}.geno --traits {2}/gn2/{3}.txt {4} -o {5}{6}.txt'.format(flat_files('genotype'), + ' --geno {0}/{1}.geno --traits {2}/gn2/{3}.txt {4} -o {5}{6}.txt'.format(flat_files('genotype'), - genofile_name, - TEMPDIR, - trait_filename, - " ".join( - opt_list), - webqtlConfig.GENERATED_IMAGE_DIR, - output_filename)) + genofile_name, + TEMPDIR, + trait_filename, + " ".join( + opt_list), + webqtlConfig.GENERATED_IMAGE_DIR, + output_filename)) logger.debug("reaper_command:" + reaper_command) os.system(reaper_command) @@ -82,7 +82,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo significant = permu_vals[int(num_perm * 0.95 - 1)] return (marker_obs, permu_vals, suggestive, significant, bootstrap_vals, - [output_filename, permu_filename, bootstrap_filename]) + [output_filename, permu_filename, bootstrap_filename]) def gen_pheno_txt_file(samples, vals, trait_filename): @@ -231,17 +231,17 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da control_geno.append(control_geno2[_idx]) bootstrap_results = genotype.bootstrap(strains=trimmed_samples, - trait=trimmed_values, - control=control_geno, - nboot=num_bootstrap) + trait=trimmed_values, + control=control_geno, + nboot=num_bootstrap) else: reaper_results = genotype.regression(strains=trimmed_samples, trait=trimmed_values) if bootCheck: bootstrap_results = genotype.bootstrap(strains=trimmed_samples, - trait=trimmed_values, - nboot=num_bootstrap) + trait=trimmed_values, + nboot=num_bootstrap) json_data['chr'] = [] json_data['pos'] = [] @@ -265,7 +265,7 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da # if self.additive: # self.json_data['additive'].append(qtl.additive) locus = {"name": reaper_locus.name, "chr": reaper_locus.chr, - "cM": reaper_locus.cM, "Mb": reaper_locus.Mb} + "cM": reaper_locus.cM, "Mb": reaper_locus.Mb} qtl = {"lrs_value": qtl.lrs, "chr": converted_chr, "Mb": reaper_locus.Mb, "cM": reaper_locus.cM, "name": reaper_locus.name, "additive": qtl.additive, "dominance": qtl.dominance} qtl_results.append(qtl) diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index 32dbad1f..cf8cf514 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -411,7 +411,7 @@ def process_pair_scan_results(result): result = result[1] output = [tuple([result[j][i] for j in range(result.ncol)]) - for i in range(result.nrow)] + for i in range(result.nrow)] for i, line in enumerate(result.iter_row()): marker = {} @@ -441,7 +441,7 @@ def process_rqtl_perm_results(num_perm, results): def process_rqtl_results(result, species_name): qtl_results = [] output = [tuple([result[j][i] for j in range(result.ncol)]) - for i in range(result.nrow)] + for i in range(result.nrow)] for i, line in enumerate(result.iter_row()): marker = {} diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 041f4348..81e0a03f 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -289,18 +289,18 @@ class RunMapping: ",") results, self.perm_output, self.suggestive, self.significant, self.bootstrap_results, self.output_files = qtlreaper_mapping.run_reaper(self.this_trait, - self.dataset, - self.samples, - self.vals, - self.json_data, - self.num_perm, - self.bootCheck, - self.num_bootstrap, - self.do_control, - self.control_marker, - self.manhattan_plot, - self.first_run, - self.output_files) + self.dataset, + self.samples, + self.vals, + self.json_data, + self.num_perm, + self.bootCheck, + self.num_bootstrap, + self.do_control, + self.control_marker, + self.manhattan_plot, + self.first_run, + self.output_files) else: results, self.json_data, self.perm_output, self.suggestive, self.significant, self.bootstrap_results = qtlreaper_mapping.run_original_reaper(self.this_trait, self.dataset, @@ -371,7 +371,7 @@ class RunMapping: ps=this_ps, url="/show_trait?trait_id=" + \ marker['name'] + "&dataset=" + \ - self.dataset.group.name + "Geno" + self.dataset.group.name + "Geno" ) if self.geno_db_exists == "True": @@ -382,7 +382,7 @@ class RunMapping: pos=this_ps, url="/show_trait?trait_id=" + \ marker['name'] + "&dataset=" + \ - self.dataset.group.name + "Geno" + self.dataset.group.name + "Geno" ) else: annot_marker = dict( @@ -409,11 +409,11 @@ class RunMapping: if 'Mb' in marker.keys(): marker['display_pos'] = "Chr" + \ str(marker['chr']) + ": " + \ - "{:.6f}".format(marker['Mb']) + "{:.6f}".format(marker['Mb']) elif 'cM' in marker.keys(): marker['display_pos'] = "Chr" + \ str(marker['chr']) + ": " + \ - "{:.3f}".format(marker['cM']) + "{:.3f}".format(marker['cM']) else: marker['display_pos'] = "N/A" self.qtl_results.append(marker) @@ -776,7 +776,7 @@ def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples): perm_strata_strings.append(combined_string) d = dict([(y, x + 1) - for x, y in enumerate(sorted(set(perm_strata_strings)))]) + for x, y in enumerate(sorted(set(perm_strata_strings)))]) list_to_numbers = [d[x] for x in perm_strata_strings] perm_strata = list_to_numbers diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py index 55b0278a..822900cc 100644 --- a/wqflask/wqflask/model.py +++ b/wqflask/wqflask/model.py @@ -45,8 +45,8 @@ class User(Base): ) user_collections = relationship("UserCollection", - order_by="asc(UserCollection.name)", - lazy='dynamic', + order_by="asc(UserCollection.name)", + lazy='dynamic', ) def display_num_collections(self): diff --git a/wqflask/wqflask/parser.py b/wqflask/wqflask/parser.py index 6b836e20..bd1c4407 100644 --- a/wqflask/wqflask/parser.py +++ b/wqflask/wqflask/parser.py @@ -34,7 +34,7 @@ def parse(pstring): (\w+\s*[=:\>\<][\w\*]+) | # wiki=bar, GO:foobar, etc (".*?") | ('.*?') | # terms in quotes, i.e. "brain weight" ([\w\*\?]+)) # shh, brain, etc """, pstring, - flags=re.VERBOSE) + flags=re.VERBOSE) pstring = [item.strip() for item in pstring if item and item.strip()] diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index 2e1cb992..fc48959e 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -173,10 +173,10 @@ class SearchResultPage: if self.dataset.type == "ProbeSet": self.header_data_names = ['index', 'display_name', 'symbol', 'description', - 'location', 'mean', 'lrs_score', 'lrs_location', 'additive'] + 'location', 'mean', 'lrs_score', 'lrs_location', 'additive'] elif self.dataset.type == "Publish": self.header_data_names = ['index', 'display_name', 'description', 'mean', - 'authors', 'pubmed_text', 'lrs_score', 'lrs_location', 'additive'] + 'authors', 'pubmed_text', 'lrs_score', 'lrs_location', 'additive'] elif self.dataset.type == "Geno": self.header_data_names = ['index', 'display_name', 'location'] @@ -273,9 +273,9 @@ class SearchResultPage: if search_ob: search_class = getattr(do_search, search_ob) the_search = search_class(search_term, - search_operator, - self.dataset, - search_type['key'] + search_operator, + self.dataset, + search_type['key'] ) return the_search else: diff --git a/wqflask/wqflask/server_side.py b/wqflask/wqflask/server_side.py index 8ca3a9eb..7f68efad 100644 --- a/wqflask/wqflask/server_side.py +++ b/wqflask/wqflask/server_side.py @@ -49,8 +49,8 @@ class ServerSideTable: column_name = self.header_data_names[column_number - 1] sort_direction = self.request_values['sSortDir_' + str(i)] self.table_rows = sorted(self.table_rows, - key=lambda x: x[column_name], - reverse=is_reverse(sort_direction)) + key=lambda x: x[column_name], + reverse=is_reverse(sort_direction)) def paginate_rows(self): """ diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py index 3a63c84e..f9d30dba 100644 --- a/wqflask/wqflask/show_trait/SampleList.py +++ b/wqflask/wqflask/show_trait/SampleList.py @@ -75,7 +75,7 @@ class SampleList: if self.dataset.group.species == "mouse": if len(sample.extra_attributes['rrid'].split(":")) > 1: the_rrid = sample.extra_attributes['rrid'].split(":")[ - 1] + 1] sample.extra_attributes['rrid'] = [ sample.extra_attributes['rrid']] sample.extra_attributes['rrid'].append( @@ -83,7 +83,7 @@ class SampleList: elif self.dataset.group.species == "rat": if len(str(sample.extra_attributes['rrid'])): the_rrid = sample.extra_attributes['rrid'].split("_")[ - 1] + 1] sample.extra_attributes['rrid'] = [ sample.extra_attributes['rrid']] sample.extra_attributes['rrid'].append( diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py index 81e7903b..7fabc3f6 100644 --- a/wqflask/wqflask/show_trait/export_trait_data.py +++ b/wqflask/wqflask/show_trait/export_trait_data.py @@ -41,7 +41,7 @@ def get_export_metadata(trait_id, dataset_name): if dataset.type == "Publish": metadata.append(["Phenotype ID: " + trait_id]) metadata.append(["Phenotype URL: " + "http://genenetwork.org/show_trait?trait_id=" + \ - trait_id + "&dataset=" + dataset_name]) + trait_id + "&dataset=" + dataset_name]) metadata.append(["Group: " + dataset.group.name]) metadata.append( ["Phenotype: " + this_trait.description_display.replace(",", "\",\"")]) @@ -56,7 +56,7 @@ def get_export_metadata(trait_id, dataset_name): else: metadata.append(["Record ID: " + trait_id]) metadata.append(["Trait URL: " + "http://genenetwork.org/show_trait?trait_id=" + \ - trait_id + "&dataset=" + dataset_name]) + trait_id + "&dataset=" + dataset_name]) if this_trait.symbol: metadata.append(["Symbol: " + this_trait.symbol]) metadata.append(["Dataset: " + dataset.name]) diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index 5b7a663c..42fe339e 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -294,7 +294,7 @@ class SnpBrowser: effect_info_dict = get_effect_info(effect_list) coding_domain_list = ['Start Gained', 'Start Lost', - 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] + 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] intron_domain_list = ['Splice Site', 'Nonsplice Site'] for key in effect_info_dict: @@ -320,7 +320,7 @@ class SnpBrowser: if self.redundant == "false" or last_mb != mb: # filter redundant if self.include_record(domain, function, snp_source, conservation_score): info_list = [snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain, - function, function_details, snp_source, conservation_score, snp_id] + function, function_details, snp_source, conservation_score, snp_id] info_list.extend(self.allele_list) filtered_results.append(info_list) last_mb = mb @@ -351,7 +351,7 @@ class SnpBrowser: if self.redundant == "false" or last_mb != mb: if self.include_record(domain, function, snp_source, conservation_score): info_list = [snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain, - function, function_details, snp_source, conservation_score, snp_id] + function, function_details, snp_source, conservation_score, snp_id] info_list.extend(self.allele_list) filtered_results.append(info_list) last_mb = mb @@ -366,7 +366,7 @@ class SnpBrowser: domain = conservation_score = snp_id = snp_name = rs = flank_3 = flank_5 = ncbi = function = "" if self.include_record(domain, function, source_name, conservation_score): filtered_results.append([indel_name, indel_chr, indel_mb_start, indel_mb_end, - indel_strand, indel_type, indel_size, indel_sequence, source_name]) + indel_strand, indel_type, indel_size, indel_sequence, source_name]) last_mb = indel_mb_start else: @@ -703,9 +703,9 @@ def get_header_list(variant_type, strains, species=None, empty_columns=None): header_data_names = [] if variant_type == "SNP": header_fields.append(['Index', 'SNP ID', 'Chr', 'Mb', 'Alleles', 'Source', 'ConScore', - 'Gene', 'Transcript', 'Exon', 'Domain 1', 'Domain 2', 'Function', 'Details']) + 'Gene', 'Transcript', 'Exon', 'Domain 1', 'Domain 2', 'Function', 'Details']) header_data_names = ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles', 'snp_source', 'conservation_score', - 'gene_name', 'transcript', 'exon', 'domain_1', 'domain_2', 'function', 'function_details'] + 'gene_name', 'transcript', 'exon', 'domain_1', 'domain_2', 'function', 'function_details'] header_fields.append(strain_list) header_data_names += strain_list @@ -742,9 +742,9 @@ def get_header_list(variant_type, strains, species=None, empty_columns=None): elif variant_type == "InDel": header_fields = ['Index', 'ID', 'Type', 'InDel Chr', - 'Mb Start', 'Mb End', 'Strand', 'Size', 'Sequence', 'Source'] + 'Mb Start', 'Mb End', 'Strand', 'Size', 'Sequence', 'Source'] header_data_names = ['index', 'indel_name', 'indel_type', 'indel_chr', 'indel_mb_s', - 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name'] + 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name'] return header_fields, empty_field_count, header_data_names @@ -758,9 +758,9 @@ def get_effect_details_by_category(effect_name=None, effect_value=None): tmp_list = [] gene_group_list = ['Upstream', 'Downstream', - 'Splice Site', 'Nonsplice Site', '3\' UTR'] + 'Splice Site', 'Nonsplice Site', '3\' UTR'] biotype_group_list = ['Unknown Effect In Exon', 'Start Gained', - 'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] + 'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] new_codon_group_list = ['Start Gained'] codon_effect_group_list = [ 'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous'] @@ -913,7 +913,7 @@ def get_gene_id_name_dict(species_id, gene_name_list): if len(gene_name_list) == 0: return "" gene_name_str_list = ["'" + gene_name + \ - "'" for gene_name in gene_name_list] + "'" for gene_name in gene_name_list] gene_name_str = ",".join(gene_name_str_list) query = """ diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index 725e7c9e..0d5f1f3e 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -59,12 +59,12 @@ def encode_password(pass_gen_fields, unencrypted_password): def set_password(password): pass_gen_fields = { - "unencrypted_password": password, - "algorithm": "pbkdf2", - "hashfunc": "sha256", - "salt": base64.b64encode(os.urandom(32)), - "iterations": 100000, - "keylength": 32, + "unencrypted_password": password, + "algorithm": "pbkdf2", + "hashfunc": "sha256", + "salt": base64.b64encode(os.urandom(32)), + "iterations": 100000, + "keylength": 32, "created_timestamp": timestamp() } @@ -89,18 +89,18 @@ def get_signed_session_id(user): if 'github_id' in user: session = dict(login_time=time.time(), - user_type="github", - user_id=user['user_id'], - github_id=user['github_id'], - user_name=user['name'], - user_url=user['user_url']) + user_type="github", + user_id=user['user_id'], + github_id=user['github_id'], + user_name=user['name'], + user_url=user['user_url']) elif 'orcid' in user: session = dict(login_time=time.time(), - user_type="orcid", - user_id=user['user_id'], - github_id=user['orcid'], - user_name=user['name'], - user_url=user['user_url']) + user_type="orcid", + user_id=user['user_id'], + github_id=user['orcid'], + user_name=user['name'], + user_url=user['user_url']) else: session = dict(login_time=time.time(), user_type="gn2", @@ -269,7 +269,7 @@ def github_oauth2(): result = requests.post( "https://github.com/login/oauth/access_token", json=data) result_dict = {arr[0]: arr[1] - for arr in [tok.split("=") for tok in result.text.split("&")]} + for arr in [tok.split("=") for tok in result.text.split("&")]} github_user = get_github_user_details(result_dict["access_token"]) diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index 9ebec405..fb26bfb1 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -475,7 +475,7 @@ def set_password(password, user): pwfields.encrypt_time = enc_password.encrypt_time user.password = json.dumps(pwfields.__dict__, - sort_keys=True, + sort_keys=True, ) diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py index 3e543445..67e2e158 100644 --- a/wqflask/wqflask/user_session.py +++ b/wqflask/wqflask/user_session.py @@ -105,8 +105,8 @@ class UserSession: if user_cookie: self.logged_in = False self.record = dict(login_time=time.time(), - user_type="anon", - user_id=str(uuid.uuid4())) + user_type="anon", + user_id=str(uuid.uuid4())) Redis.hmset(self.redis_key, self.record) Redis.expire(self.redis_key, THIRTY_DAYS) @@ -117,8 +117,8 @@ class UserSession: return None else: self.record = dict(login_time=time.time(), - user_type="anon", - user_id=str(uuid.uuid4())) + user_type="anon", + user_id=str(uuid.uuid4())) Redis.hmset(self.redis_key, self.record) Redis.expire(self.redis_key, THIRTY_DAYS) else: diff --git a/wqflask/wqflask/wgcna/wgcna_analysis.py b/wqflask/wqflask/wgcna/wgcna_analysis.py index 21516b30..f96892a0 100644 --- a/wqflask/wqflask/wgcna/wgcna_analysis.py +++ b/wqflask/wqflask/wgcna/wgcna_analysis.py @@ -70,7 +70,7 @@ class WGCNA: self.trait_db_list = [trait.strip() for trait in requestform['trait_list'].split(',')] print(("Retrieved phenotype data from database", - requestform['trait_list'])) + requestform['trait_list'])) helper_functions.get_trait_db_obs(self, self.trait_db_list) # self.input contains the phenotype values we need to send to R -- cgit v1.2.3 From 99f8bdbb70e4e00df6671c3f20db4857ffa96d6f Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 30 Apr 2021 13:15:10 +0300 Subject: autopep8: Run autopep8 100 times with target rules Rules used are: E20,E211,E22,E224,E224,E225,E226,E227,E228,E231,E241,E242, E251,E252,E26,E265,E266,E27,E301,E302,E303,E304,E305,E306, E401,E501,E70,E701,W291,W292,W293,W391,W504,E101,E11,E121, E122,E123,E124,E125,E126,E127,E128,E129,E131,E133 --- wqflask/base/webqtlConfig.py | 2 +- wqflask/maintenance/quantile_normalize.py | 2 +- .../marker_regression/test_qtlreaper_mapping.py | 7 +- .../wqflask/marker_regression/test_rqtl_mapping.py | 4 +- wqflask/utility/Plot.py | 5 +- wqflask/utility/elasticsearch_tools.py | 2 +- wqflask/utility/svg.py | 50 ++++----- wqflask/wqflask/api/mapping.py | 3 +- wqflask/wqflask/api/router.py | 30 +++--- wqflask/wqflask/collect.py | 8 +- wqflask/wqflask/correlation/corr_scatter_plot.py | 4 +- wqflask/wqflask/correlation/show_corr_results.py | 34 +++--- .../wqflask/correlation_matrix/show_corr_matrix.py | 2 +- wqflask/wqflask/ctl/ctl_analysis.py | 2 +- wqflask/wqflask/group_manager.py | 8 +- wqflask/wqflask/interval_analyst/GeneUtil.py | 3 +- .../marker_regression/display_mapping_results.py | 120 ++++++++++----------- wqflask/wqflask/marker_regression/gemma_mapping.py | 2 +- .../wqflask/marker_regression/qtlreaper_mapping.py | 9 +- wqflask/wqflask/marker_regression/rqtl_mapping.py | 10 +- wqflask/wqflask/marker_regression/run_mapping.py | 3 +- wqflask/wqflask/model.py | 4 +- wqflask/wqflask/resource_manager.py | 2 +- wqflask/wqflask/search_results.py | 2 +- wqflask/wqflask/snp_browser/snp_browser.py | 4 +- wqflask/wqflask/user_login.py | 6 +- wqflask/wqflask/user_manager.py | 6 +- wqflask/wqflask/views.py | 2 +- 28 files changed, 172 insertions(+), 164 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/base/webqtlConfig.py b/wqflask/base/webqtlConfig.py index 872b52eb..39947158 100644 --- a/wqflask/base/webqtlConfig.py +++ b/wqflask/base/webqtlConfig.py @@ -15,7 +15,7 @@ from utility.tools import valid_path, mk_dir, assert_dir, assert_writable_dir, f DEBUG = 1 # USER privilege -USERDICT = {'guest': 1, 'user': 2, 'admin': 3, 'root':4} +USERDICT = {'guest': 1, 'user': 2, 'admin': 3, 'root': 4} # Set privileges SUPER_PRIVILEGES = {'data': 'edit', 'metadata': 'edit', 'admin': 'edit-admins'} diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py index 88bb2cb5..0cc963e5 100644 --- a/wqflask/maintenance/quantile_normalize.py +++ b/wqflask/maintenance/quantile_normalize.py @@ -123,7 +123,7 @@ if __name__ == '__main__': success, _ = bulk(es, set_data(sys.argv[1])) response = es.search( - index="traits", doc_type="trait", body = { + index="traits", doc_type="trait", body={ "query": {"match": {"name": "ENSMUSG00000028982"}} } ) diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py index 93848a84..8b4337ec 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py @@ -9,13 +9,14 @@ from wqflask.marker_regression.qtlreaper_mapping import gen_pheno_txt_file class TestQtlReaperMapping(unittest.TestCase): @mock.patch("wqflask.marker_regression.qtlreaper_mapping.TEMPDIR", "/home/user/data") def test_gen_pheno_txt_file(self): - vals = ["V1", "x", "V4", "V3","x"] - samples = ["S1", "S2", "S3", "S4","S5"] + vals = ["V1", "x", "V4", "V3", "x"] + samples = ["S1", "S2", "S3", "S4", "S5"] trait_filename = "trait_file" with mock.patch("builtins.open", mock.mock_open())as mock_open: gen_pheno_txt_file(samples=samples, vals=vals, trait_filename=trait_filename) - mock_open.assert_called_once_with("/home/user/data/gn2/trait_file.txt", "w") + mock_open.assert_called_once_with( + "/home/user/data/gn2/trait_file.txt", "w") filehandler = mock_open() write_calls = [mock.call('Trait\t'), mock.call( 'S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')] diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py index 68686e27..91d2c587 100644 --- a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py +++ b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py @@ -28,7 +28,7 @@ class TestRqtlMapping(unittest.TestCase): def test_sanitize_rqtl_phenotype(self): """test for sanitizing rqtl phenotype""" - vals = ['f', "x", "r", "x","x"] + vals = ['f', "x", "r", "x", "x"] results = sanitize_rqtl_phenotype(vals) expected_phenotype_string = 'c(f,NA,r,NA,NA)' @@ -36,7 +36,7 @@ class TestRqtlMapping(unittest.TestCase): def test_sanitize_rqtl_names(self): """test for sanitzing rqtl names""" - vals = ['f', "x", "r", "x","x"] + vals = ['f', "x", "r", "x", "x"] expected_sanitized_name = "c('f',NA,'r',NA,NA)" results = sanitize_rqtl_names(vals) self.assertEqual(expected_sanitized_name, results) diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 37a8a1a5..9b2c6735 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -188,7 +188,8 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab for i in range(int(stepX) + 1): xc = xLeftOffset + (x - xLow) * xScale im_drawer.line( - xy=((xc, yTopOffset + plotHeight), (xc, yTopOffset + plotHeight + 5)), + xy=((xc, yTopOffset + plotHeight), + (xc, yTopOffset + plotHeight + 5)), fill=axesColor) strX = cformat(d=x, rank=0) im_drawer.text( @@ -217,7 +218,7 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab text=XLabel, xy=(xLeftOffset + ( plotWidth - im_drawer.textsize(XLabel, font=labelFont)[0]) / 2.0, - yTopOffset + plotHeight + yBottomOffset-10), + yTopOffset + plotHeight + yBottomOffset - 10), font=labelFont, fill=labelColor) if YLabel: diff --git a/wqflask/utility/elasticsearch_tools.py b/wqflask/utility/elasticsearch_tools.py index e56c22eb..eae3ba03 100644 --- a/wqflask/utility/elasticsearch_tools.py +++ b/wqflask/utility/elasticsearch_tools.py @@ -105,7 +105,7 @@ def get_item_by_unique_column(es, column_name, column_value, index, doc_type): item_details = None try: response = es.search( - index=index, doc_type=doc_type, body = { + index=index, doc_type=doc_type, body={ "query": {"match": {column_name: column_value}} }) if len(response["hits"]["hits"]) > 0: diff --git a/wqflask/utility/svg.py b/wqflask/utility/svg.py index bc3bc833..eddb97da 100644 --- a/wqflask/utility/svg.py +++ b/wqflask/utility/svg.py @@ -229,13 +229,13 @@ class pathdata: def bezier(self, x1, y1, x2, y2, x, y): """bezier with xy1 and xy2 to xy absolut""" - self.path.append('C' + str(x1) + ','+str(y1)+' '+str(x2) - + ',' + str(y2) + ' '+str(x)+','+str(y)) + self.path.append('C' + str(x1) + ',' + str(y1) + ' ' + str(x2) + + ',' + str(y2) + ' ' + str(x) + ',' + str(y)) def relbezier(self, x1, y1, x2, y2, x, y): """bezier with xy1 and xy2 to xy relative""" - self.path.append('c' + str(x1) + ','+str(y1)+' '+str(x2) - + ',' + str(y2) + ' '+str(x)+','+str(y)) + self.path.append('c' + str(x1) + ',' + str(y1) + ' ' + str(x2) + + ',' + str(y2) + ' ' + str(x) + ',' + str(y)) def smbezier(self, x2, y2, x, y): """smooth bezier with xy2 to xy absolut""" @@ -267,13 +267,13 @@ class pathdata: def ellarc(self, rx, ry, xrot, laf, sf, x, y): """elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy absolut""" - self.path.append('A' + str(rx) + ','+str(ry)+' '+str(xrot) - + ' ' + str(laf) + ' '+str(sf)+' '+str(x)+' '+str(y)) + self.path.append('A' + str(rx) + ',' + str(ry) + ' ' + str(xrot) + + ' ' + str(laf) + ' ' + str(sf) + ' ' + str(x) + ' ' + str(y)) def relellarc(self, rx, ry, xrot, laf, sf, x, y): """elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy relative""" - self.path.append('a' + str(rx) + ','+str(ry)+' '+str(xrot) - + ' ' + str(laf) + ' '+str(sf)+' '+str(x)+' '+str(y)) + self.path.append('a' + str(rx) + ',' + str(ry) + ' ' + str(xrot) + + ' ' + str(laf) + ' ' + str(sf) + ' ' + str(x) + ' ' + str(y)) def __repr__(self): return ' '.join(self.path) @@ -471,7 +471,7 @@ class ellipse(SVGelement): an ellipse is defined as a center and a x and y radius. """ - def __init__(self, cx=None, cy=None, rx=None, ry=None,fill=None,stroke=None,stroke_width=None,**args): + def __init__(self, cx=None, cy=None, rx=None, ry=None, fill=None, stroke=None, stroke_width=None, **args): if rx == None or ry == None: raise ValueError('both rx and ry are required') @@ -494,7 +494,7 @@ class circle(SVGelement): The circle creates an element using a x, y and radius values eg """ - def __init__(self, cx=None, cy=None, r=None, fill=None,stroke=None,stroke_width=None,**args): + def __init__(self, cx=None, cy=None, r=None, fill=None, stroke=None, stroke_width=None, **args): if r == None: raise ValueError('r is required') SVGelement.__init__(self, 'circle', {'r': r}, **args) @@ -527,7 +527,7 @@ class line(SVGelement): A line is defined by a begin x,y pair and an end x,y pair """ - def __init__(self, x1=None, y1=None, x2=None, y2=None,stroke=None,stroke_width=None,**args): + def __init__(self, x1=None, y1=None, x2=None, y2=None, stroke=None, stroke_width=None, **args): SVGelement.__init__(self, 'line', **args) if x1 != None: self.attributes['x1'] = x1 @@ -549,7 +549,7 @@ class polyline(SVGelement): a polyline is defined by a list of xy pairs """ - def __init__(self, points, fill=None, stroke=None, stroke_width=None,**args): + def __init__(self, points, fill=None, stroke=None, stroke_width=None, **args): SVGelement.__init__(self, 'polyline', { 'points': _xypointlist(points)}, **args) if fill != None: @@ -566,7 +566,7 @@ class polygon(SVGelement): a polygon is defined by a list of xy pairs """ - def __init__(self, points, fill=None, stroke=None, stroke_width=None,**args): + def __init__(self, points, fill=None, stroke=None, stroke_width=None, **args): SVGelement.__init__( self, 'polygon', {'points': _xypointlist(points)}, **args) if fill != None: @@ -583,7 +583,7 @@ class path(SVGelement): a path is defined by a path object and optional width, stroke and fillcolor """ - def __init__(self, pathdata, fill=None, stroke=None, stroke_width=None,id=None,**args): + def __init__(self, pathdata, fill=None, stroke=None, stroke_width=None, id=None, **args): SVGelement.__init__(self, 'path', {'d': str(pathdata)}, **args) if stroke != None: self.attributes['stroke'] = stroke @@ -601,7 +601,7 @@ class text(SVGelement): a text element can bge used for displaying text on the screen """ - def __init__(self, x=None, y=None, text=None, font_size=None,font_family=None,text_anchor=None,**args): + def __init__(self, x=None, y=None, text=None, font_size=None, font_family=None, text_anchor=None, **args): SVGelement.__init__(self, 'text', **args) if x != None: self.attributes['x'] = x @@ -637,7 +637,7 @@ class pattern(SVGelement): in x and y to cover the areas to be painted. """ - def __init__(self, x=None, y=None, width=None, height=None,patternUnits=None,**args): + def __init__(self, x=None, y=None, width=None, height=None, patternUnits=None, **args): SVGelement.__init__(self, 'pattern', **args) if x != None: self.attributes['x'] = x @@ -684,7 +684,7 @@ class lineargradient(SVGelement): stop elements van be added to define the gradient colors. """ - def __init__(self, x1=None, y1=None, x2=None, y2=None,id=None,**args): + def __init__(self, x1=None, y1=None, x2=None, y2=None, id=None, **args): SVGelement.__init__(self, 'linearGradient', **args) if x1 != None: self.attributes['x1'] = x1 @@ -705,7 +705,7 @@ class radialgradient(SVGelement): stop elements van be added to define the gradient colors. """ - def __init__(self, cx=None, cy=None, r=None, fx=None,fy=None,id=None,**args): + def __init__(self, cx=None, cy=None, r=None, fx=None, fy=None, id=None, **args): SVGelement.__init__(self, 'radialGradient', **args) if cx != None: self.attributes['cx'] = cx @@ -749,11 +749,11 @@ class image(SVGelement): adds an image to the drawing. Supported formats are .png, .jpg and .svg. """ - def __init__(self, url, x=None, y=None, width=None,height=None,**args): + def __init__(self, url, x=None, y=None, width=None, height=None, **args): if width == None or height == None: raise ValueError('both height and width are required') SVGelement.__init__( - self, 'image', {'xlink:href': url, 'width': width, 'height':height}, **args) + self, 'image', {'xlink:href': url, 'width': width, 'height': height}, **args) if x != None: self.attributes['x'] = x if y != None: @@ -777,7 +777,7 @@ class marker(SVGelement): add an element to it which should be used as a marker. """ - def __init__(self, id=None, viewBox=None, refx=None, refy=None,markerWidth=None,markerHeight=None,**args): + def __init__(self, id=None, viewBox=None, refx=None, refy=None, markerWidth=None, markerHeight=None, **args): SVGelement.__init__(self, 'marker', **args) if id != None: self.attributes['id'] = id @@ -851,7 +851,7 @@ class use(SVGelement): references a symbol by linking to its id and its position, height and width """ - def __init__(self, link, x=None, y=None, width=None,height=None,**args): + def __init__(self, link, x=None, y=None, width=None, height=None, **args): SVGelement.__init__(self, 'use', {'xlink:href': link}, **args) if x != None: self.attributes['x'] = x @@ -904,7 +904,7 @@ class animate(SVGelement): animates an attribute. """ - def __init__(self, attribute, fr=None, to=None, dur=None,**args): + def __init__(self, attribute, fr=None, to=None, dur=None, **args): SVGelement.__init__( self, 'animate', {'attributeName': attribute}, **args) if fr != None: @@ -935,7 +935,7 @@ class animateTransform(SVGelement): transform an element from and to a value. """ - def __init__(self, type=None, fr=None, to=None, dur=None,**args): + def __init__(self, type=None, fr=None, to=None, dur=None, **args): SVGelement.__init__(self, 'animateTransform', { 'attributeName': 'transform'}, **args) # As far as I know the attributeName is always transform @@ -955,7 +955,7 @@ class animateColor(SVGelement): Animates the color of a element """ - def __init__(self, attribute, type=None, fr=None, to=None,dur=None,**args): + def __init__(self, attribute, type=None, fr=None, to=None, dur=None, **args): SVGelement.__init__(self, 'animateColor', { 'attributeName': attribute}, **args) if type != None: diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py index cbef96eb..f8b0d8bd 100644 --- a/wqflask/wqflask/api/mapping.py +++ b/wqflask/wqflask/api/mapping.py @@ -53,7 +53,8 @@ def do_mapping_for_api(start_vars): header_row = ["name", "chr", "cM", "lod_score"] if mapping_params['num_perm'] > 0: _sperm_output, _suggestive, _significant, result_markers = rqtl_mapping.run_rqtl_geno(vals, dataset, mapping_params['rqtl_method'], mapping_params['rqtl_model'], - mapping_params['perm_check'], mapping_params['num_perm'], + mapping_params['perm_check'], mapping_params[ + 'num_perm'], mapping_params['do_control'], mapping_params[ 'control_marker'], mapping_params['manhattan_plot'], mapping_params['pair_scan']) diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py index 9d3446db..aec74c9e 100644 --- a/wqflask/wqflask/api/router.py +++ b/wqflask/wqflask/api/router.py @@ -98,8 +98,8 @@ def get_groups_list(species_name=None): "Name": group[3], "FullName": group[4], "public": group[5], - "MappingMethodId": group[6], - "GeneticType": group[7] + "MappingMethodId": group[6], + "GeneticType": group[7] } groups_list.append(group_dict) @@ -143,8 +143,8 @@ def get_group_info(group_name, species_name=None, file_format="json"): "Name": group[3], "FullName": group[4], "public": group[5], - "MappingMethodId": group[6], - "GeneticType": group[7] + "MappingMethodId": group[6], + "GeneticType": group[7] } return flask.jsonify(group_dict) @@ -192,11 +192,11 @@ def get_datasets_for_group(group_name, species_name=None): "Short_Abbreviation": dataset[3], "Long_Abbreviation": dataset[4], "FullName": dataset[5], - "ShortName": dataset[6], - "CreateTime": dataset[7], - "public": dataset[8], - "confidentiality": dataset[9], - "DataScale": dataset[10] + "ShortName": dataset[6], + "CreateTime": dataset[7], + "public": dataset[8], + "confidentiality": dataset[9], + "DataScale": dataset[10] } datasets_list.append(dataset_dict) @@ -249,10 +249,10 @@ def get_dataset_info(dataset_name, group_name=None, file_format="json"): "full_name": dataset[2], "short_name": dataset[3], "data_scale": dataset[4], - "tissue_id": dataset[5], - "tissue": dataset[6], - "public": dataset[7], - "confidential": dataset[8] + "tissue_id": dataset[5], + "tissue": dataset[6], + "public": dataset[7], + "confidential": dataset[8] } datasets_list.append(dataset_dict) @@ -286,7 +286,7 @@ def get_dataset_info(dataset_name, group_name=None, file_format="json"): "description": dataset[2], "pubmed_id": dataset[5], "title": dataset[6], - "year": dataset[7] + "year": dataset[7] } elif dataset[4]: dataset_dict = { @@ -777,7 +777,7 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None): i += 1 csv_writer = csv.writer( - si, delimiter="\t", escapechar="\\", quoting = csv.QUOTE_NONE) + si, delimiter="\t", escapechar="\\", quoting=csv.QUOTE_NONE) else: return return_error(code=204, source=request.url_rule.rule, title="No Results", details="") elif file_format == "rqtl2": diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py index 58518639..01274ba9 100644 --- a/wqflask/wqflask/collect.py +++ b/wqflask/wqflask/collect.py @@ -77,13 +77,13 @@ def collections_add(): return render_template("collections/add.html", traits=traits, collections=collections, - ) + ) else: hash = request.args['hash'] return render_template("collections/add.html", hash=hash, collections=collections, - ) + ) @app.route("/collections/new") @@ -147,7 +147,7 @@ def list_collections(): return render_template("collections/list.html", params=params, collections=user_collections, - ) + ) @app.route("/collections/remove", methods=('POST',)) @@ -226,7 +226,7 @@ def view_collection(): else: return render_template("collections/view.html", **collection_info - ) + ) @app.route("/collections/change_name", methods=('POST',)) diff --git a/wqflask/wqflask/correlation/corr_scatter_plot.py b/wqflask/wqflask/correlation/corr_scatter_plot.py index 4f756f58..cafb9265 100644 --- a/wqflask/wqflask/correlation/corr_scatter_plot.py +++ b/wqflask/wqflask/correlation/corr_scatter_plot.py @@ -18,12 +18,12 @@ class CorrScatterPlot: def __init__(self, params): if "Temp" in params['dataset_1']: self.dataset_1 = data_set.create_dataset( - dataset_name="Temp", dataset_type="Temp", group_name = params['dataset_1'].split("_")[1]) + dataset_name="Temp", dataset_type="Temp", group_name=params['dataset_1'].split("_")[1]) else: self.dataset_1 = data_set.create_dataset(params['dataset_1']) if "Temp" in params['dataset_2']: self.dataset_2 = data_set.create_dataset( - dataset_name="Temp", dataset_type="Temp", group_name = params['dataset_2'].split("_")[1]) + dataset_name="Temp", dataset_type="Temp", group_name=params['dataset_2'].split("_")[1]) else: self.dataset_2 = data_set.create_dataset(params['dataset_2']) diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index aa39bc5c..9b0b6118 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -79,7 +79,7 @@ class CorrelationResults: with Bench("Doing correlations"): if start_vars['dataset'] == "Temp": self.dataset = data_set.create_dataset( - dataset_name="Temp", dataset_type="Temp", group_name = start_vars['group']) + dataset_name="Temp", dataset_type="Temp", group_name=start_vars['group']) self.trait_id = start_vars['trait_id'] self.this_trait = create_trait(dataset=self.dataset, name=self.trait_id, @@ -609,14 +609,14 @@ def get_header_fields(data_type, corr_method): 'Location', 'Mean', 'Sample rho', - 'N', - 'Sample p(rho)', - 'Lit rho', - 'Tissue rho', - 'Tissue p(rho)', - 'Max LRS', - 'Max LRS Location', - 'Additive Effect'] + 'N', + 'Sample p(rho)', + 'Lit rho', + 'Tissue rho', + 'Tissue p(rho)', + 'Max LRS', + 'Max LRS Location', + 'Additive Effect'] else: header_fields = ['Index', 'Record', @@ -625,14 +625,14 @@ def get_header_fields(data_type, corr_method): 'Location', 'Mean', 'Sample r', - 'N', - 'Sample p(r)', - 'Lit r', - 'Tissue r', - 'Tissue p(r)', - 'Max LRS', - 'Max LRS Location', - 'Additive Effect'] + 'N', + 'Sample p(r)', + 'Lit r', + 'Tissue r', + 'Tissue p(r)', + 'Max LRS', + 'Max LRS Location', + 'Additive Effect'] elif data_type == "Publish": if corr_method == "spearman": header_fields = ['Index', diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py index c1bf3daa..3a54a218 100644 --- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py +++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py @@ -211,7 +211,7 @@ class CorrelationMatrix: this_group_name = self.trait_list[0][1].group.name temp_dataset = data_set.create_dataset( - dataset_name="Temp", dataset_type="Temp", group_name = this_group_name) + dataset_name="Temp", dataset_type="Temp", group_name=this_group_name) temp_dataset.group.get_samplelist() for i, pca_trait in enumerate(pca_traits): trait_id = "PCA" + str(i + 1) + "_" + temp_dataset.group.species + "_" + \ diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py index a0fb34d8..bb928ec5 100644 --- a/wqflask/wqflask/ctl/ctl_analysis.py +++ b/wqflask/wqflask/ctl/ctl_analysis.py @@ -154,7 +154,7 @@ class CTL: # Perform the CTL scan res = self.r_CTLscan(rGeno, rPheno, strategy=strategy, - nperm=nperm, parametric = parametric, nthreads=6) + nperm=nperm, parametric=parametric, nthreads=6) # Get significant interactions significant = self.r_CTLsignificant(res, significance=significance) diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py index b7e7e38a..04a100ba 100644 --- a/wqflask/wqflask/group_manager.py +++ b/wqflask/wqflask/group_manager.py @@ -120,14 +120,16 @@ def add_or_edit_group(): if "admin_emails_to_add" in params: admin_emails = params['admin_emails_to_add'].split(",") for email in admin_emails: - user_details = get_user_by_unique_column("email_address", email) + user_details = get_user_by_unique_column( + "email_address", email) if user_details: admin_user_ids.add(user_details['user_id']) #send_group_invites(params['group_id'], user_email_list = admin_emails, user_type="admins") if "member_emails_to_add" in params: member_emails = params['member_emails_to_add'].split(",") for email in member_emails: - user_details = get_user_by_unique_column("email_address", email) + user_details = get_user_by_unique_column( + "email_address", email) if user_details: member_user_ids.add(user_details['user_id']) #send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members") @@ -156,7 +158,7 @@ def send_group_invites(group_id, user_email_list=[], user_type="members"): continue else: send_verification_email(user_details, template_name="email/group_verification.txt", - key_prefix="verification_code", subject = "You've been invited to join a GeneNetwork user group") + key_prefix="verification_code", subject="You've been invited to join a GeneNetwork user group") else: temp_password = ''.join(random.choice( string.ascii_uppercase + string.digits) for _ in range(6)) diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py index 04980281..5e86ae31 100644 --- a/wqflask/wqflask/interval_analyst/GeneUtil.py +++ b/wqflask/wqflask/interval_analyst/GeneUtil.py @@ -60,7 +60,8 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'): newdict["snpDensity"] = newdict["snpCount"] = 0 try: - newdict['GeneLength'] = 1000.0 * (newdict['TxEnd'] - newdict['TxStart']) + newdict['GeneLength'] = 1000.0 * \ + (newdict['TxEnd'] - newdict['TxStart']) except: pass diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py index f3b1b1fc..5bf8822a 100644 --- a/wqflask/wqflask/marker_regression/display_mapping_results.py +++ b/wqflask/wqflask/marker_regression/display_mapping_results.py @@ -591,7 +591,7 @@ class DisplayMappingResults: intCanvas = Image.new("RGBA", size=(self.graphWidth, self.graphHeight)) with Bench("Drawing Plot"): gifmap = self.plotIntMapping( - intCanvas, startMb=self.startMb, endMb=self.endMb, showLocusForm= showLocusForm) + intCanvas, startMb=self.startMb, endMb=self.endMb, showLocusForm=showLocusForm) self.gifmap = gifmap.__str__() @@ -610,7 +610,7 @@ class DisplayMappingResults: intCanvasX2 = Image.new("RGBA", size=( self.graphWidth * 2, self.graphHeight * 2)) gifmapX2 = self.plotIntMapping( - intCanvasX2, startMb=self.startMb, endMb=self.endMb, showLocusForm= showLocusForm, zoom=2) + intCanvasX2, startMb=self.startMb, endMb=self.endMb, showLocusForm=showLocusForm, zoom=2) intCanvasX2.save( "{}.png".format( os.path.join(webqtlConfig.GENERATED_IMAGE_DIR, @@ -628,8 +628,8 @@ class DisplayMappingResults: name=showLocusForm, submit=HtmlGenWrapper.create_input_tag(type_='hidden')) - hddn = {'FormID': 'showDatabase', 'ProbeSetID': '_', 'database': fd.RISet+ \ - "Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'} + hddn = {'FormID': 'showDatabase', 'ProbeSetID': '_', 'database': fd.RISet + \ + "Geno", 'CellID': '_', 'RISet': fd.RISet, 'incparentsf1': 'ON'} for key in hddn.keys(): showLocusForm.append(HtmlGenWrapper.create_input_tag( name=key, value=hddn[key], type_='hidden')) @@ -651,7 +651,7 @@ class DisplayMappingResults: btminfo.append( 'Mapping using genotype data as a trait will result in infinity LRS at one locus. In order to display the result properly, all LRSs higher than 100 are capped at 100.') - def plotIntMapping(self, canvas, offset=(80, 120, 90, 100), zoom=1, startMb= None, endMb = None, showLocusForm = ""): + def plotIntMapping(self, canvas, offset=(80, 120, 90, 100), zoom=1, startMb=None, endMb=None, showLocusForm=""): im_drawer = ImageDraw.Draw(canvas) # calculating margins xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset @@ -730,7 +730,7 @@ class DisplayMappingResults: zoom=zoom, startMb=startMb, endMb=endMb) if self.SNPChecked: self.drawSNPTrackNew( - canvas, offset=newoffset, zoom=2 * zoom, startMb=startMb, endMb = endMb) + canvas, offset=newoffset, zoom=2 * zoom, startMb=startMb, endMb=endMb) # BEGIN HaplotypeAnalyst if self.haplotypeAnalystChecked: self.drawHaplotypeBand( @@ -758,7 +758,7 @@ class DisplayMappingResults: return gifmap - def drawBootStrapResult(self, canvas, nboot, drawAreaHeight, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): + def drawBootStrapResult(self, canvas, nboot, drawAreaHeight, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb=None, endMb=None): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset @@ -873,14 +873,14 @@ class DisplayMappingResults: font=VERDANA_FILE, size=13 * fontZoom) im_drawer.rectangle( xy=((canvas.size[0] - bootOffset, yZero - bootHeightThresh), - (canvas.size[0] - bootOffset - 15*zoom, yZero)), + (canvas.size[0] - bootOffset - 15 * zoom, yZero)), fill=YELLOW, outline=BLACK) im_drawer.line( xy=((canvas.size[0] - bootOffset + 4, yZero), (canvas.size[0] - bootOffset, yZero)), fill=BLACK) TEXT_Y_DISPLACEMENT = -8 - im_drawer.text(xy=(canvas.size[0] - bootOffset + 10, yZero+TEXT_Y_DISPLACEMENT), text='0%', + im_drawer.text(xy=(canvas.size[0] - bootOffset + 10, yZero + TEXT_Y_DISPLACEMENT), text='0%', font=bootScaleFont, fill=BLACK) for item in bootScale: @@ -891,7 +891,7 @@ class DisplayMappingResults: xy=((canvas.size[0] - bootOffset + 4, bootY), (canvas.size[0] - bootOffset, bootY)), fill=BLACK) - im_drawer.text(xy=(canvas.size[0] - bootOffset + 10, bootY+TEXT_Y_DISPLACEMENT), + im_drawer.text(xy=(canvas.size[0] - bootOffset + 10, bootY + TEXT_Y_DISPLACEMENT), text='%2.1f' % item, font=bootScaleFont, fill=BLACK) if self.legendChecked: @@ -910,7 +910,7 @@ class DisplayMappingResults: text='Frequency of the Peak LRS', font=smallLabelFont, fill=BLACK) - def drawProbeSetPosition(self, canvas, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): + def drawProbeSetPosition(self, canvas, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb=None, endMb=None): im_drawer = ImageDraw.Draw(canvas) if len(self.traitList) != 1: return @@ -987,7 +987,7 @@ class DisplayMappingResults: draw_open_polygon(canvas, xy=traitPixel, outline=BLACK, fill=self.TRANSCRIPT_LOCATION_COLOR) - def drawSNPTrackNew(self, canvas, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): + def drawSNPTrackNew(self, canvas, offset=(40, 120, 80, 10), zoom=1, startMb=None, endMb=None): im_drawer = ImageDraw.Draw(canvas) if self.plotScale != 'physic' or self.selectedChr == -1 or not self.diffCol: return @@ -1059,7 +1059,7 @@ class DisplayMappingResults: im_drawer.rectangle( xy=((rightShift, yPaddingTop + kstep * 15), - (rectWidth + rightShift, yPaddingTop + 10+kstep*15)), + (rectWidth + rightShift, yPaddingTop + 10 + kstep * 15)), fill=thisLRSColor, outline=BLACK) im_drawer.text( text=name, xy=(rectWidth + 2 + rightShift, @@ -1303,7 +1303,7 @@ class DisplayMappingResults: text=string4, xy=(xLeftOffset, y_constant * fontZoom), font=labelFont, fill=labelColor) - def drawGeneBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): + def drawGeneBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb=None, endMb=None): im_drawer = ImageDraw.Draw(canvas) if self.plotScale != 'physic' or self.selectedChr == -1 or not self.geneCol: return @@ -1434,7 +1434,7 @@ class DisplayMappingResults: xy=( (geneStartPix, geneYLocation + \ self.EACH_GENE_HEIGHT / 2 * zoom), - (geneEndPix, geneYLocation + self.EACH_GENE_HEIGHT / 2 *zoom)), + (geneEndPix, geneYLocation + self.EACH_GENE_HEIGHT / 2 * zoom)), fill=outlineColor, width=1) # draw the arrows @@ -1558,7 +1558,7 @@ class DisplayMappingResults: target="_blank")) # BEGIN HaplotypeAnalyst - def drawHaplotypeBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): + def drawHaplotypeBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb=None, endMb=None): if self.plotScale != 'physic' or self.selectedChr == -1 or not self.geneCol: return @@ -1630,7 +1630,7 @@ class DisplayMappingResults: geneStartPix = xLeftOffset + plotXScale * \ (float(txStart) - startMb) - 0 geneEndPix = xLeftOffset + plotXScale * \ - (float(txEnd) - startMb) + 0 + (float(txEnd) - startMb) + 0 if oldgeneEndPix >= xLeftOffset: drawStart = oldgeneEndPix + 4 @@ -1707,19 +1707,19 @@ class DisplayMappingResults: im_drawer.line( xy=((drawStart, - geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 7 + 2 * ind * self.EACH_GENE_HEIGHT * zoom), (drawEnd, - geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom)), - fill= mylineColor, width=zoom * (self.EACH_GENE_HEIGHT + 2)) + geneYLocation + 7 + 2 * ind * self.EACH_GENE_HEIGHT * zoom)), + fill=mylineColor, width=zoom * (self.EACH_GENE_HEIGHT + 2)) fillColor = BLACK outlineColor = BLACK if lastGene == 0: im_drawer.rectangle( xy=((geneStartPix, - geneYLocation + 2 * ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 2 * ind * self.EACH_GENE_HEIGHT * zoom), (geneEndPix, - geneYLocation + 2 *ind*self.EACH_GENE_HEIGHT + 2*self.EACH_GENE_HEIGHT*zoom)), + geneYLocation + 2 * ind * self.EACH_GENE_HEIGHT + 2 * self.EACH_GENE_HEIGHT * zoom)), outline=outlineColor, fill=fillColor) COORDS = "%d, %d, %d, %d" % ( @@ -1741,10 +1741,10 @@ class DisplayMappingResults: mylineColor = self.HAPLOTYPE_RECOMBINATION im_drawer.line( xy=((plotRight, - geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 7 + 2 * ind * self.EACH_GENE_HEIGHT * zoom), (drawEnd, - geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom)), - fill= mylineColor, width=zoom * (self.EACH_GENE_HEIGHT + 2)) + geneYLocation + 7 + 2 * ind * self.EACH_GENE_HEIGHT * zoom)), + fill=mylineColor, width=zoom * (self.EACH_GENE_HEIGHT + 2)) if lastGene == 0: draw_rotated_text( @@ -1752,7 +1752,7 @@ class DisplayMappingResults: font=ImageFont.truetype(font=VERDANA_FILE, size=12), xy=(geneStartPix, - geneYLocation + 17 + 2*maxind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 17 + 2 * maxind * self.EACH_GENE_HEIGHT * zoom), fill=BLACK, angle=-90) oldgeneEndPix = geneEndPix @@ -1778,21 +1778,21 @@ class DisplayMappingResults: im_drawer.text( text="%s" % (samplelist[j]), xy=((xLeftOffset + plotWidth + 10), - geneYLocation + 11 + 2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 11 + 2 * ind * self.EACH_GENE_HEIGHT * zoom), font=ImageFont.truetype( font=VERDANA_FILE, size=12), fill=BLACK) im_drawer.text( text="%2.2f" % (expr), xy=((xLeftOffset + plotWidth + 60), - geneYLocation + 11 + 2*ind*self.EACH_GENE_HEIGHT*zoom), + geneYLocation + 11 + 2 * ind * self.EACH_GENE_HEIGHT * zoom), font=ImageFont.truetype( font=VERDANA_FILE, size=12), fill=BLACK) # END HaplotypeAnalyst - def drawClickBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): + def drawClickBand(self, canvas, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb=None, endMb=None): im_drawer = ImageDraw.Draw(canvas) if self.plotScale != 'physic' or self.selectedChr == -1: return @@ -1817,8 +1817,8 @@ class DisplayMappingResults: numBasesCurrentlyOnScreen = self.kONE_MILLION * \ abs(startMb - endMb) # Number of bases on screen now - flankingWidthInBases = int ( - min((float(numBasesCurrentlyOnScreen) / 2.0), (5*self.kONE_MILLION))) + flankingWidthInBases = int( + min((float(numBasesCurrentlyOnScreen) / 2.0), (5 * self.kONE_MILLION))) webqtlZoomWidth = numBasesCurrentlyOnScreen / 16.0 # Flanking width should be such that we either zoom in to a 10 million base region, or we show the clicked region at the same scale as we are currently seeing. @@ -1990,7 +1990,7 @@ class DisplayMappingResults: # end of drawBrowserClickableRegions pass - def drawXAxis(self, canvas, drawAreaHeight, gifmap, plotXScale, showLocusForm, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): + def drawXAxis(self, canvas, drawAreaHeight, gifmap, plotXScale, showLocusForm, offset=(40, 120, 80, 10), zoom=1, startMb=None, endMb=None): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset @@ -2087,7 +2087,7 @@ class DisplayMappingResults: xy=( xLeftOffset + (plotWidth - im_drawer.textsize( "Megabases", font=megabaseLabelFont)[0]) / 2, - strYLoc + MBLabelFont.font.height + 10*(zoom%2)), + strYLoc + MBLabelFont.font.height + 10 * (zoom % 2)), font=megabaseLabelFont, fill=BLACK) pass else: @@ -2110,7 +2110,7 @@ class DisplayMappingResults: thisChr.append( [_locus.name, _locus.cM - Locus0CM]) else: - for j in (0, nLoci / 4, nLoci / 2, nLoci*3/4, -1): + for j in (0, nLoci / 4, nLoci / 2, nLoci * 3 / 4, -1): while _chr[j].name == ' - ': j += 1 if _chr[j].cM != preLpos: @@ -2162,23 +2162,23 @@ class DisplayMappingResults: yZero + 25)), fill=lineColor) im_drawer.line( - xy=((xLeftOffset + offsetA, yZero + 25), (xLeftOffset+offsetA,\ - yZero + 40 + Zorder*(LRectWidth+3))), + xy=((xLeftOffset + offsetA, yZero + 25), (xLeftOffset + offsetA,\ + yZero + 40 + Zorder * (LRectWidth + 3))), fill=lineColor) rectColor = ORANGE else: im_drawer.line( - xy=((xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3)-3), (\ - xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3))), + xy=((xLeftOffset + offsetA, yZero + 40 + Zorder * (LRectWidth + 3) - 3), (\ + xLeftOffset + offsetA, yZero + 40 + Zorder * (LRectWidth + 3))), fill=lineColor) rectColor = DEEPPINK im_drawer.rectangle( - xy=((xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3)), + xy=((xLeftOffset + offsetA, yZero + 40 + Zorder * (LRectWidth + 3)), (xLeftOffset + offsetA - LRectHeight, - yZero + 40 + Zorder*(LRectWidth+3)+LRectWidth)), + yZero + 40 + Zorder * (LRectWidth + 3) + LRectWidth)), outline=rectColor, fill=rectColor, width=0) - COORDS = "%d,%d,%d,%d" % (xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\ - xLeftOffset + offsetA, yZero +40+Zorder*(LRectWidth+3)+LRectWidth) + COORDS = "%d,%d,%d,%d" % (xLeftOffset + offsetA - LRectHeight, yZero + 40 + Zorder * (LRectWidth + 3),\ + xLeftOffset + offsetA, yZero + 40 + Zorder * (LRectWidth + 3) + LRectWidth) HREF = "/show_trait?trait_id=%s&dataset=%s" % ( Lname, self.dataset.group.name + "Geno") #HREF="javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm,fd.RISet+"Geno", Lname) @@ -2203,13 +2203,13 @@ class DisplayMappingResults: text="Centimorgans", xy=(xLeftOffset + (plotWidth - im_drawer.textsize( "Centimorgans", font=centimorganLabelFont)[0]) / 2, - strYLoc + MBLabelFont.font.height + 10 * (zoom %2)), + strYLoc + MBLabelFont.font.height + 10 * (zoom % 2)), font=centimorganLabelFont, fill=BLACK) im_drawer.line(xy=((xLeftOffset, yZero), (xLeftOffset + plotWidth, yZero)), fill=BLACK, width=X_AXIS_THICKNESS) # Draw the X axis itself - def drawQTL(self, canvas, drawAreaHeight, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb= None, endMb = None): + def drawQTL(self, canvas, drawAreaHeight, gifmap, plotXScale, offset=(40, 120, 80, 10), zoom=1, startMb=None, endMb=None): im_drawer = ImageDraw.Draw(canvas) xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset @@ -2364,7 +2364,7 @@ class DisplayMappingResults: TEXT_Y_DISPLACEMENT = -10 im_drawer.text( text=scaleStr, - xy=(xLeftOffset - 4 - im_drawer.textsize(scaleStr, font=LRSScaleFont)[0]-5, + xy=(xLeftOffset - 4 - im_drawer.textsize(scaleStr, font=LRSScaleFont)[0] - 5, yLRS + TEXT_Y_DISPLACEMENT), font=LRSScaleFont, fill=self.LRS_COLOR) @@ -2479,7 +2479,7 @@ class DisplayMappingResults: if k > 0: Xc0, Yc0 = AdditiveCoordXY[k - 1] Xc, Yc = aPoint - if (Yc0 - yZero) * (Yc-yZero) < 0: + if (Yc0 - yZero) * (Yc - yZero) < 0: if Xc == Xc0: # genotype , locus distance is 0 Xcm = Xc else: @@ -2509,7 +2509,7 @@ class DisplayMappingResults: fill=plusColor, width=lineWidth # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) - elif (Yc0 - yZero) * (Yc-yZero) > 0: + elif (Yc0 - yZero) * (Yc - yZero) > 0: if Yc < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xc, Yc)), @@ -2660,7 +2660,7 @@ class DisplayMappingResults: if k > 0: Xc0, Yc0 = AdditiveCoordXY[k - 1] Xc, Yc = aPoint - if (Yc0 - yZero) * (Yc-yZero) < 0: + if (Yc0 - yZero) * (Yc - yZero) < 0: if Xc == Xc0: # genotype , locus distance is 0 Xcm = Xc else: @@ -2689,7 +2689,7 @@ class DisplayMappingResults: fill=plusColor, width=lineWidth # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) - elif (Yc0 - yZero) * (Yc-yZero) > 0: + elif (Yc0 - yZero) * (Yc - yZero) > 0: if Yc < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xc, Yc)), fill=plusColor, @@ -2726,7 +2726,7 @@ class DisplayMappingResults: if k > 0: Xc0, Yc0 = DominanceCoordXY[k - 1] Xc, Yc = aPoint - if (Yc0 - yZero) * (Yc-yZero) < 0: + if (Yc0 - yZero) * (Yc - yZero) < 0: if Xc == Xc0: # genotype , locus distance is 0 Xcm = Xc else: @@ -2754,7 +2754,7 @@ class DisplayMappingResults: fill=plusColor, width=lineWidth # , clipX=(xLeftOffset, xLeftOffset + plotWidth) ) - elif (Yc0 - yZero) * (Yc-yZero) > 0: + elif (Yc0 - yZero) * (Yc - yZero) > 0: if Yc < yZero: im_drawer.line( xy=((Xc0, Yc0), (Xc, Yc)), @@ -2805,8 +2805,8 @@ class DisplayMappingResults: scaleStr = "%2.3f" % item im_drawer.text( text=scaleStr, - xy= (xLeftOffset + plotWidth + 6, - additiveY + TEXT_Y_DISPLACEMENT), + xy=(xLeftOffset + plotWidth + 6, + additiveY + TEXT_Y_DISPLACEMENT), font=additiveScaleFont, fill=self.ADDITIVE_COLOR_POSITIVE) im_drawer.line( @@ -2818,7 +2818,7 @@ class DisplayMappingResults: xy=((xLeftOffset, yZero), (xLeftOffset, yTopOffset + 30 * (zoom - 1))), fill=self.LRS_COLOR, width=1 * zoom) # the blue line running up the y axis - def drawGraphBackground(self, canvas, gifmap, offset=(80, 120, 80, 50), zoom=1, startMb= None, endMb = None): + def drawGraphBackground(self, canvas, gifmap, offset=(80, 120, 80, 50), zoom=1, startMb=None, endMb=None): # conditions # multiple Chromosome view # single Chromosome Physical @@ -2953,10 +2953,10 @@ class DisplayMappingResults: "SNP Count", "SNP Density", "Avg Expr", - "Human Chr", - "Mb Start (hg19)", - "Literature Correlation", - "Gene Description"] + "Human Chr", + "Mb Start (hg19)", + "Literature Correlation", + "Gene Description"] else: gene_table_header_list = ["", "Index", @@ -3073,12 +3073,12 @@ class DisplayMappingResults: geneIdString, theGO["GeneSymbol"], target="_blank") - ), + ), str(HtmlGenWrapper.create_link_tag( mouseStartString, "{:.6f}".format(txStart), target="_blank") - ), + ), str(HtmlGenWrapper.create_link_tag( "javascript:rangeView('{}', {:f}, {:f})".format( str(chr_as_int), diff --git a/wqflask/wqflask/marker_regression/gemma_mapping.py b/wqflask/wqflask/marker_regression/gemma_mapping.py index 289f1d5c..f88c5ac8 100644 --- a/wqflask/wqflask/marker_regression/gemma_mapping.py +++ b/wqflask/wqflask/marker_regression/gemma_mapping.py @@ -150,7 +150,7 @@ def gen_covariates_file(this_dataset, covariates, samples): if dataset_name == "Temp": temp_group = trait_name.split("_")[2] dataset_ob = create_dataset( - dataset_name="Temp", dataset_type="Temp", group_name = temp_group) + dataset_name="Temp", dataset_type="Temp", group_name=temp_group) else: dataset_ob = create_dataset(covariate.split(":")[1]) trait_ob = create_trait(dataset=dataset_ob, diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py index 5d16abde..4d6715ba 100644 --- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py +++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py @@ -2,7 +2,8 @@ import os import math import string import random -import json, re +import json +import re from base import webqtlConfig from base.trait import GeneralTrait @@ -28,7 +29,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo output_filename = (f"{this_dataset.group.name}_GWA_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) - ) + ) bootstrap_filename = None permu_filename = None @@ -37,7 +38,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo bootstrap_filename = (f"{this_dataset.group.name}_BOOTSTRAP_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) - ) + ) opt_list.append("-b") opt_list.append(f"--n_bootstrap {str(num_bootstrap)}") @@ -47,7 +48,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo permu_filename = ("{this_dataset.group.name}_PERM_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) - ) + ) opt_list.append("-n " + str(num_perm)) opt_list.append( "--permu_output " + webqtlConfig.GENERATED_IMAGE_DIR + permu_filename + ".txt") diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py index cf8cf514..1fa3dffe 100644 --- a/wqflask/wqflask/marker_regression/rqtl_mapping.py +++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py @@ -110,7 +110,7 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec if do_control == "true": logger.info("Using covariate") result_data_frame = scantwo( - cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method, n_cluster = 16) + cross_object, pheno="the_pheno", addcovar=covars, model=model, method=method, n_cluster=16) else: logger.info("No covariates") result_data_frame = scantwo( @@ -140,14 +140,14 @@ def run_rqtl_geno(vals, samples, dataset, mapping_scale, method, model, permChec cross_object, strata_ob = add_perm_strata( cross_object, perm_strata_list) if do_control == "true" or cofactors != "": - perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm = int( - num_perm), perm_strata = strata_ob, model=model, method=method) + perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm=int( + num_perm), perm_strata=strata_ob, model=model, method=method) else: perm_data_frame = scanone( - cross_object, pheno_col="the_pheno", n_perm=num_perm, perm_strata = strata_ob, model=model, method=method) + cross_object, pheno_col="the_pheno", n_perm=num_perm, perm_strata=strata_ob, model=model, method=method) else: if do_control == "true" or cofactors != "": - perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm = int( + perm_data_frame = scanone(cross_object, pheno_col="the_pheno", addcovar=covars, n_perm=int( num_perm), model=model, method=method) else: perm_data_frame = scanone( diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py index 81e0a03f..32ccec48 100644 --- a/wqflask/wqflask/marker_regression/run_mapping.py +++ b/wqflask/wqflask/marker_regression/run_mapping.py @@ -67,7 +67,8 @@ class RunMapping: if 'genofile' in start_vars: if start_vars['genofile'] != "": self.genofile_string = start_vars['genofile'] - self.dataset.group.genofile = self.genofile_string.split(":")[0] + self.dataset.group.genofile = self.genofile_string.split(":")[ + 0] genofile_samplelist = get_genofile_samplelist(self.dataset) all_samples_ordered = self.dataset.group.all_samples_ordered() diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py index 822900cc..a222b87c 100644 --- a/wqflask/wqflask/model.py +++ b/wqflask/wqflask/model.py @@ -36,7 +36,7 @@ class User(Base): # json detailing when they became a superuser, otherwise empty superuser = Column(Text) - # if not superuser + # if not superuser logins = relationship("Login", order_by="desc(Login.timestamp)", @@ -47,7 +47,7 @@ class User(Base): user_collections = relationship("UserCollection", order_by="asc(UserCollection.name)", lazy='dynamic', - ) + ) def display_num_collections(self): """ diff --git a/wqflask/wqflask/resource_manager.py b/wqflask/wqflask/resource_manager.py index 61f3b202..b28c1b04 100644 --- a/wqflask/wqflask/resource_manager.py +++ b/wqflask/wqflask/resource_manager.py @@ -116,7 +116,7 @@ def add_group_to_resource(): group_id = request.form['selected_group'] resource_info = get_resource_info(resource_id) default_privileges = resource_info['default_mask'] - return render_template("admin/set_group_privileges.html", resource_id=resource_id, group_id=group_id, default_privileges = default_privileges) + return render_template("admin/set_group_privileges.html", resource_id=resource_id, group_id=group_id, default_privileges=default_privileges) elif all(key in request.form for key in ('data_privilege', 'metadata_privilege', 'admin_privilege')): group_id = request.form['group_id'] group_name = get_group_info(group_id)['name'] diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py index fc48959e..3cbda3dd 100644 --- a/wqflask/wqflask/search_results.py +++ b/wqflask/wqflask/search_results.py @@ -276,7 +276,7 @@ class SearchResultPage: search_operator, self.dataset, search_type['key'] - ) + ) return the_search else: return None diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py index 42fe339e..c4d0e135 100644 --- a/wqflask/wqflask/snp_browser/snp_browser.py +++ b/wqflask/wqflask/snp_browser/snp_browser.py @@ -28,10 +28,10 @@ class SnpBrowser: if self.limit_strains == "true": self.header_fields, self.empty_field_count, self.header_data_names = get_header_list( - variant_type=self.variant_type, strains=self.chosen_strains, empty_columns = self.empty_columns) + variant_type=self.variant_type, strains=self.chosen_strains, empty_columns=self.empty_columns) else: self.header_fields, self.empty_field_count, self.header_data_names = get_header_list( - variant_type=self.variant_type, strains=self.strain_lists, species = self.species_name, empty_columns = self.empty_columns) + variant_type=self.variant_type, strains=self.strain_lists, species=self.species_name, empty_columns=self.empty_columns) def initialize_parameters(self, start_vars): if 'first_run' in start_vars: diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py index 0d5f1f3e..ff77982f 100644 --- a/wqflask/wqflask/user_login.py +++ b/wqflask/wqflask/user_login.py @@ -65,7 +65,7 @@ def set_password(password): "salt": base64.b64encode(os.urandom(32)), "iterations": 100000, "keylength": 32, - "created_timestamp": timestamp() + "created_timestamp": timestamp() } assert len(password) >= 6, "Password shouldn't be shorter than 6 characters" @@ -132,7 +132,7 @@ def send_email(toaddr, msg, fromaddr="no-reply@genenetwork.org"): logger.info("Successfully sent email to " + toaddr) -def send_verification_email(user_details, template_name="email/user_verification.txt", key_prefix="verification_code", subject = "GeneNetwork e-mail verification"): +def send_verification_email(user_details, template_name="email/user_verification.txt", key_prefix="verification_code", subject="GeneNetwork e-mail verification"): verification_code = str(uuid.uuid4()) key = key_prefix + ":" + verification_code @@ -147,7 +147,7 @@ def send_verification_email(user_details, template_name="email/user_verification return {"recipient": recipient, "subject": subject, "body": body} -def send_invitation_email(user_email, temp_password, template_name="email/user_invitation.txt", subject= "You've been added to a GeneNetwork user group"): +def send_invitation_email(user_email, temp_password, template_name="email/user_invitation.txt", subject="You've been added to a GeneNetwork user group"): recipient = user_email body = render_template(template_name, temp_password) send_email(recipient, subject, body) diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py index fb26bfb1..cf84ea73 100644 --- a/wqflask/wqflask/user_manager.py +++ b/wqflask/wqflask/user_manager.py @@ -476,7 +476,7 @@ def set_password(password, user): user.password = json.dumps(pwfields.__dict__, sort_keys=True, - ) + ) class VerificationEmail: @@ -677,7 +677,7 @@ def github_oauth2(): user_details = get_user_by_unique_column("github_id", github_user["id"]) if user_details == None: user_details = { - "user_id": str(uuid.uuid4()), "name": github_user["name"].encode("utf-8"), "github_id": github_user["id"], "user_url": github_user["html_url"].encode("utf-8") , "login_type": "github" , "organization": "" , "active": 1 , "confirmed": 1 + "user_id": str(uuid.uuid4()), "name": github_user["name"].encode("utf-8"), "github_id": github_user["id"], "user_url": github_user["html_url"].encode("utf-8"), "login_type": "github", "organization": "", "active": 1, "confirmed": 1 } save_user(user_details, user_details["user_id"]) @@ -705,7 +705,7 @@ def orcid_oauth2(): user_details = { "user_id": str(uuid4()), "name": result_dict["name"], "orcid": result_dict["orcid"], "user_url": "%s/%s" % ( "/".join(ORCID_AUTH_URL.split("/")[:-2]), - result_dict["orcid"]), "login_type": "orcid", "organization": "", "active": 1 , "confirmed": 1 + result_dict["orcid"]), "login_type": "orcid", "organization": "", "active": 1, "confirmed": 1 } save_user(user_details, user_details["user_id"]) diff --git a/wqflask/wqflask/views.py b/wqflask/wqflask/views.py index f75209ff..f9b8f310 100644 --- a/wqflask/wqflask/views.py +++ b/wqflask/wqflask/views.py @@ -141,7 +141,7 @@ def handle_bad_request(e): now = datetime.datetime.utcnow() time_str = now.strftime('%l:%M%p UTC %b %d, %Y') formatted_lines = [request.url - + " (" + time_str + ")"]+traceback.format_exc().splitlines() + + " (" + time_str + ")"] + traceback.format_exc().splitlines() # Handle random animations # Use a cookie to have one animation on refresh -- cgit v1.2.3 From 1895e953747bb5d9d41f249408590e187027b266 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 7 May 2021 15:57:05 +0000 Subject: Fixed issue that caused /submit_trait page to not load properly --- wqflask/utility/helper_functions.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index ecc075ae..50e00421 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -57,14 +57,16 @@ def get_trait_db_obs(self, trait_db_list): def get_species_groups(): """Group each species into a group""" _menu = {} + for species, group_name in g.db.execute( "SELECT s.MenuName, i.InbredSetName FROM InbredSet i " "INNER JOIN Species s ON s.SpeciesId = i.SpeciesId " "ORDER BY i.SpeciesId ASC, i.Name ASC").fetchall(): - if _menu.get(species): - _menu = _menu[species].append(group_name) - else: - _menu[species] = [group_name] + if species in _menu: + if _menu.get(species): + _menu = _menu[species].append(group_name) + else: + _menu[species] = [group_name] return [{"species": key, "groups": value} for key, value in list(_menu.items())] -- cgit v1.2.3 From 395135fcebae0a2bca476b058f78f9e1ad8b13ee Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 7 May 2021 13:00:12 +0300 Subject: utility: startup_config: Remove dead comments --- wqflask/utility/startup_config.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index 6ef759e0..07322a62 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -16,12 +16,10 @@ def app_config(): if not app.config.get('SECRET_KEY'): import os app.config['SECRET_KEY'] = str(os.urandom(24)) - mode = WEBSERVER_MODE if mode == "DEV" or mode == "DEBUG": app.config['TEMPLATES_AUTO_RELOAD'] = True - # if mode == "DEBUG": - # app.config['EXPLAIN_TEMPLATE_LOADING'] = True <--- use overriding app param instead + print("==========================================") show_settings() @@ -34,9 +32,5 @@ def app_config(): page = requests.get(get_setting("GN_SERVER_URL")) if page.status_code != 200: raise Exception("API server not found!") - - # import utility.elasticsearch_tools as es - # es.test_elasticsearch_connection() - print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL")))) -- cgit v1.2.3 From 74befd19d3d24a89d7d382625648ddade74099f7 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 7 May 2021 13:06:06 +0300 Subject: utility: startup_config: Break up long import statement --- wqflask/utility/startup_config.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index 07322a62..d705a0ff 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -1,9 +1,12 @@ from wqflask import app -from utility.tools import WEBSERVER_MODE, show_settings, get_setting_int, get_setting, get_setting_bool -import utility.logger -logger = utility.logger.getLogger(__name__) +from utility.tools import WEBSERVER_MODE +from utility.tools import show_settings +from utility.tools import get_setting_int +from utility.tools import get_setting +from utility.tools import get_setting_bool + BLUE = '\033[94m' GREEN = '\033[92m' -- cgit v1.2.3 From 809351ae0021f48b8a138212764aab4396f564ef Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 7 May 2021 13:06:31 +0300 Subject: utility: startup_config: Use python3 f-strings --- wqflask/utility/startup_config.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index d705a0ff..56d0af6f 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -24,16 +24,17 @@ def app_config(): app.config['TEMPLATES_AUTO_RELOAD'] = True print("==========================================") + show_settings() port = get_setting_int("SERVER_PORT") if get_setting_bool("USE_GN_SERVER"): - print( - ("GN2 API server URL is [" + BLUE + get_setting("GN_SERVER_URL") + ENDC + "]")) + print(f"GN2 API server URL is [{BLUE}GN_SERVER_URL{ENDC}]") import requests page = requests.get(get_setting("GN_SERVER_URL")) if page.status_code != 200: raise Exception("API server not found!") - print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" % - (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL")))) + print(f"GN2 is running. Visit {BLUE}" + f"[http://localhost:{str(port)}/{ENDC}]" + f"({get_setting('WEBSERVER_URL')})") -- cgit v1.2.3 From f4ad7bc3ebfa9be6ff98c9f935a9e8576974ddd6 Mon Sep 17 00:00:00 2001 From: zsloan Date: Tue, 15 Jun 2021 17:51:15 +0000 Subject: Fix issue where column values were used as str instead of bytes; pretty sure this was introduced by Python3 --- wqflask/utility/redis_tools.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 96a4be12..ff125bd2 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -26,6 +26,8 @@ def is_redis_available(): def load_json_from_redis(item_list, column_value): + if type(column_value) == str: + column_value = str.encode(column_value) return json.loads(item_list[column_value]) -- cgit v1.2.3 From cfc738303e7ddd213919a0a15885d1e846277848 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Mon, 14 Jun 2021 00:52:10 +0300 Subject: remove print statements --- wqflask/utility/helper_functions.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/helper_functions.py b/wqflask/utility/helper_functions.py index 50e00421..27dd0729 100644 --- a/wqflask/utility/helper_functions.py +++ b/wqflask/utility/helper_functions.py @@ -21,15 +21,11 @@ def get_species_dataset_trait(self, start_vars): self.dataset = data_set.create_dataset(start_vars['dataset']) else: self.dataset = data_set.create_dataset(start_vars['dataset']) - logger.debug("After creating dataset") self.species = TheSpecies(dataset=self.dataset) - logger.debug("After creating species") self.this_trait = create_trait(dataset=self.dataset, name=start_vars['trait_id'], cellid=None, get_qtl_info=True) - logger.debug("After creating trait") - def get_trait_db_obs(self, trait_db_list): if isinstance(trait_db_list, str): -- cgit v1.2.3 From 48ac21fda375b8255bec86e83496d730bb57ffff Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 12 Jul 2021 17:49:09 +0000 Subject: Encode user_id as bytestring if not already bytestring to account for some user_ids being stored as different encoding --- wqflask/utility/authentication_tools.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 57dbf8ba..6802d689 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -11,7 +11,6 @@ from utility.redis_tools import (get_redis_conn, add_resource) Redis = get_redis_conn() - def check_resource_availability(dataset, trait_id=None): # At least for now assume temporary entered traits are accessible if type(dataset) == str or dataset.type == "Temp": @@ -133,12 +132,17 @@ def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): else: resource_id = get_resource_id(dataset, trait_id) - if g.user_session.user_id in Redis.smembers("super_users"): + try: + user_id = g.user_session.user_id.encode('utf-8') + except: + user_id = g.user_session.user_id + + if user_id in Redis.smembers("super_users"): return "owner" resource_info = get_resource_info(resource_id) if resource_info: - if g.user_session.user_id == resource_info['owner_id']: + if user_id == resource_info['owner_id']: return "owner" else: return check_admin(resource_id) -- cgit v1.2.3 From 0fb00c0ce2e0c668508859e056f1132d67ad9243 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Thu, 19 Aug 2021 11:37:23 +0300 Subject: startup_config.py: Add DebugToobarExtension for wsgi DEBUG mode * wqflask/utility/startup_config.py (app_config): Add DebugToolbarExtension --- wqflask/utility/startup_config.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py index 56d0af6f..778fb64d 100644 --- a/wqflask/utility/startup_config.py +++ b/wqflask/utility/startup_config.py @@ -20,8 +20,12 @@ def app_config(): import os app.config['SECRET_KEY'] = str(os.urandom(24)) mode = WEBSERVER_MODE - if mode == "DEV" or mode == "DEBUG": + if mode in ["DEV", "DEBUG"]: app.config['TEMPLATES_AUTO_RELOAD'] = True + if mode == "DEBUG": + from flask_debugtoolbar import DebugToolbarExtension + app.debug = True + toolbar = DebugToolbarExtension(app) print("==========================================") -- cgit v1.2.3 From fff90d1a1b194fe5e65e99506bc4fe6fffc9a255 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 23 Aug 2021 19:06:01 +0000 Subject: Account for situations where the minimum permutation value is also above webqtlConfig.MAXLRS; previously threw an error --- wqflask/utility/Plot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py index 9b2c6735..d4256a46 100644 --- a/wqflask/utility/Plot.py +++ b/wqflask/utility/Plot.py @@ -139,7 +139,7 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab max_D = max(data) min_D = min(data) # add by NL 06-20-2011: fix the error: when max_D is infinite, log function in detScale will go wrong - if max_D == float('inf') or max_D > webqtlConfig.MAXLRS: + if (max_D == float('inf') or max_D > webqtlConfig.MAXLRS) and min_D < webqtlConfig.MAXLRS: max_D = webqtlConfig.MAXLRS # maximum LRS value xLow, xTop, stepX = detScale(min_D, max_D) @@ -156,7 +156,7 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab j += step for i, item in enumerate(data): - if item == float('inf') or item > webqtlConfig.MAXLRS: + if (item == float('inf') or item > webqtlConfig.MAXLRS) and min_D < webqtlConfig.MAXLRS: item = webqtlConfig.MAXLRS # maximum LRS value j = int((item - xLow) / step) Count[j] += 1 -- cgit v1.2.3 From f51569c80a4e00ad7dcd99ed9bf3ed6d9aaf4051 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 10 Sep 2021 18:54:44 +0000 Subject: Removed encoding, since it's apparently not needed since the Python 3 switchover (and was causing there to be no matches between user IDs and groups) --- wqflask/utility/redis_tools.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index ff125bd2..99295c67 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -135,10 +135,8 @@ def get_user_groups(user_id): for key in groups_list: try: group_ob = json.loads(groups_list[key]) - group_admins = set([this_admin.encode( - 'utf-8') if this_admin else None for this_admin in group_ob['admins']]) - group_members = set([this_member.encode( - 'utf-8') if this_member else None for this_member in group_ob['members']]) + group_admins = set([this_admin if this_admin else None for this_admin in group_ob['admins']]) + group_members = set([this_member if this_member else None for this_member in group_ob['members']]) if user_id in group_admins: admin_group_ids.append(group_ob['id']) elif user_id in group_members: -- cgit v1.2.3 From 5b116de4aaf796be138ee0ad06c2242b3f3c33c7 Mon Sep 17 00:00:00 2001 From: zsloan Date: Mon, 4 Oct 2021 21:02:37 +0000 Subject: Changed get_user_groups to pull both the ID and details in the for loop from group_list by using group_list.items() --- wqflask/utility/redis_tools.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index 99295c67..de9dde46 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -127,20 +127,20 @@ def check_verification_code(code): def get_user_groups(user_id): - # ZS: Get the groups where a user is an admin or a member and + # Get the groups where a user is an admin or a member and # return lists corresponding to those two sets of groups - admin_group_ids = [] # ZS: Group IDs where user is an admin - user_group_ids = [] # ZS: Group IDs where user is a regular user + admin_group_ids = [] # Group IDs where user is an admin + user_group_ids = [] # Group IDs where user is a regular user groups_list = Redis.hgetall("groups") - for key in groups_list: + for group_id, group_details in groups_list.items(): try: - group_ob = json.loads(groups_list[key]) - group_admins = set([this_admin if this_admin else None for this_admin in group_ob['admins']]) - group_members = set([this_member if this_member else None for this_member in group_ob['members']]) + _details = json.loads(group_details) + group_admins = set([this_admin if this_admin else None for this_admin in _details['admins']]) + group_members = set([this_member if this_member else None for this_member in _details['members']]) if user_id in group_admins: - admin_group_ids.append(group_ob['id']) + admin_group_ids.append(group_id) elif user_id in group_members: - user_group_ids.append(group_ob['id']) + user_group_ids.append(group_id) else: continue except: -- cgit v1.2.3 From cc32cc4f1472ddaf63a5b9428e8a67b8ba139282 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 22 Oct 2021 18:36:45 +0000 Subject: Added proxy and local GN3 URLs in tools.py (which should be set in the settings file) --- wqflask/utility/tools.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'wqflask/utility') diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index e28abb48..1219993a 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -266,6 +266,8 @@ WEBSERVER_MODE = get_setting('WEBSERVER_MODE') GN2_BASE_URL = get_setting('GN2_BASE_URL') GN2_BRANCH_URL = get_setting('GN2_BRANCH_URL') GN_SERVER_URL = get_setting('GN_SERVER_URL') +GN_PROXY_URL = get_setting('GN_PROXY_URL') +GN3_LOCAL_URL = get_setting('GN_LOCAL_URL') SERVER_PORT = get_setting_int('SERVER_PORT') SQL_URI = get_setting('SQL_URI') LOG_LEVEL = get_setting('LOG_LEVEL') -- cgit v1.2.3 From a05d0e4eb9412d9495a0f96980df27acb1526a03 Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 22 Oct 2021 18:45:10 +0000 Subject: Replace hardcoded GN proxy URLs with one pulled from settings --- wqflask/utility/authentication_tools.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index 6802d689..afea69e1 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -4,11 +4,12 @@ import requests from flask import g from base import webqtlConfig - from utility.redis_tools import (get_redis_conn, get_resource_info, get_resource_id, add_resource) +from utility.tools import GN_PROXY_URL + Redis = get_redis_conn() def check_resource_availability(dataset, trait_id=None): @@ -24,19 +25,19 @@ def check_resource_availability(dataset, trait_id=None): if resource_id: resource_info = get_resource_info(resource_id) - # ZS: If resource isn't already in redis, add it with default + # If resource isn't already in redis, add it with default # privileges if not resource_info: resource_info = add_new_resource(dataset, trait_id) - # ZS: Check if super-user - we should probably come up with some + # Check if super-user - we should probably come up with some # way to integrate this into the proxy if g.user_session.user_id in Redis.smembers("super_users"): return webqtlConfig.SUPER_PRIVILEGES response = None - the_url = "http://localhost:8080/available?resource={}&user={}".format( + the_url = GN_PROXY_URL + "available?resource={}&user={}".format( resource_id, g.user_session.user_id) try: @@ -93,7 +94,7 @@ def get_group_code(dataset): def check_admin(resource_id=None): - the_url = "http://localhost:8080/available?resource={}&user={}".format( + the_url = GN_PROXY_URL + "available?resource={}&user={}".format( resource_id, g.user_session.user_id) try: response = json.loads(requests.get(the_url).content)['admin'] -- cgit v1.2.3 From b8f2df1f08c423923aeb72a1b2c2316e6da232dc Mon Sep 17 00:00:00 2001 From: zsloan Date: Fri, 22 Oct 2021 18:51:50 +0000 Subject: Fix line pulling GN3_LOCAL_URL from settings --- wqflask/utility/tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py index 1219993a..0efe8ca9 100644 --- a/wqflask/utility/tools.py +++ b/wqflask/utility/tools.py @@ -267,7 +267,7 @@ GN2_BASE_URL = get_setting('GN2_BASE_URL') GN2_BRANCH_URL = get_setting('GN2_BRANCH_URL') GN_SERVER_URL = get_setting('GN_SERVER_URL') GN_PROXY_URL = get_setting('GN_PROXY_URL') -GN3_LOCAL_URL = get_setting('GN_LOCAL_URL') +GN3_LOCAL_URL = get_setting('GN3_LOCAL_URL') SERVER_PORT = get_setting_int('SERVER_PORT') SQL_URI = get_setting('SQL_URI') LOG_LEVEL = get_setting('LOG_LEVEL') -- cgit v1.2.3 From eb2e43434c137a070db5943db21d3b92afdd9a91 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Tue, 12 Oct 2021 14:19:26 +0300 Subject: Mark `get_resource_info` as deprecated --- wqflask/utility/redis_tools.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index de9dde46..e68397ab 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -4,6 +4,7 @@ import datetime import redis # used for collections +from deprecated import deprecated from utility.hmac import hmac_creation from utility.logger import getLogger logger = getLogger(__name__) @@ -321,6 +322,7 @@ def get_resource_id(dataset, trait_id=None): return resource_id +@deprecated def get_resource_info(resource_id): resource_info = Redis.hget("resources", resource_id) if resource_info: -- cgit v1.2.3 From 217fc72eee688aed4409120d964cb2140ce11961 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Fri, 15 Oct 2021 15:53:07 +0300 Subject: utility.hmac: Label "hmac_creation" as deprecated This function is coupled to "wqflask.app", therefore requiring it's import at the module level. This may lead circular importation issues when working with blueprints. --- wqflask/utility/hmac.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'wqflask/utility') diff --git a/wqflask/utility/hmac.py b/wqflask/utility/hmac.py index 29891677..d6e515ed 100644 --- a/wqflask/utility/hmac.py +++ b/wqflask/utility/hmac.py @@ -1,11 +1,14 @@ import hmac import hashlib +from deprecated import deprecated from flask import url_for from wqflask import app +@deprecated("This function leads to circular imports. " + "If possible use wqflask.decorators.create_hmac instead.") def hmac_creation(stringy): """Helper function to create the actual hmac""" -- cgit v1.2.3 From 8ba81937fec81c7e9790db422a52d99f4234a194 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Mon, 18 Oct 2021 12:02:51 +0300 Subject: authentication_tools: Mark `check_owner_or_admin` as deprecated Use the new auth proxy tools instead. --- wqflask/utility/authentication_tools.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'wqflask/utility') diff --git a/wqflask/utility/authentication_tools.py b/wqflask/utility/authentication_tools.py index afea69e1..c4801c8c 100644 --- a/wqflask/utility/authentication_tools.py +++ b/wqflask/utility/authentication_tools.py @@ -1,6 +1,7 @@ import json import requests +from deprecated import deprecated from flask import g from base import webqtlConfig @@ -126,6 +127,7 @@ def check_owner(dataset=None, trait_id=None, resource_id=None): return False +@deprecated def check_owner_or_admin(dataset=None, trait_id=None, resource_id=None): if not resource_id: if dataset.type == "Temp": -- cgit v1.2.3 From 935270b1cc1e265b785958cf5805bf155d8ae859 Mon Sep 17 00:00:00 2001 From: BonfaceKilz Date: Wed, 20 Oct 2021 14:38:11 +0300 Subject: utility: redis_tools: Remove dead functions --- wqflask/utility/redis_tools.py | 78 ------------------------------------------ 1 file changed, 78 deletions(-) (limited to 'wqflask/utility') diff --git a/wqflask/utility/redis_tools.py b/wqflask/utility/redis_tools.py index e68397ab..c2a3b057 100644 --- a/wqflask/utility/redis_tools.py +++ b/wqflask/utility/redis_tools.py @@ -58,30 +58,6 @@ def get_user_by_unique_column(column_name, column_value): return item_details -def get_users_like_unique_column(column_name, column_value): - """Like previous function, but this only checks if the input is a - subset of a field and can return multiple results - - """ - matched_users = [] - - if column_value != "": - user_list = Redis.hgetall("users") - if column_name != "user_id": - for key in user_list: - user_ob = json.loads(user_list[key]) - if "user_id" not in user_ob: - set_user_attribute(key, "user_id", key) - user_ob["user_id"] = key - if column_name in user_ob: - if column_value in user_ob[column_name]: - matched_users.append(user_ob) - else: - matched_users.append(load_json_from_redis(user_list, column_value)) - - return matched_users - - def set_user_attribute(user_id, column_name, column_value): user_info = json.loads(Redis.hget("users", user_id)) user_info[column_name] = column_value @@ -166,52 +142,6 @@ def get_group_info(group_id): return group_info -def get_group_by_unique_column(column_name, column_value): - """ Get group by column; not sure if there's a faster way to do this """ - - matched_groups = [] - - all_group_list = Redis.hgetall("groups") - for key in all_group_list: - group_info = json.loads(all_group_list[key]) - # ZS: Since these fields are lists, search in the list - if column_name == "admins" or column_name == "members": - if column_value in group_info[column_name]: - matched_groups.append(group_info) - else: - if group_info[column_name] == column_value: - matched_groups.append(group_info) - - return matched_groups - - -def get_groups_like_unique_column(column_name, column_value): - """Like previous function, but this only checks if the input is a - subset of a field and can return multiple results - - """ - matched_groups = [] - - if column_value != "": - group_list = Redis.hgetall("groups") - if column_name != "group_id": - for key in group_list: - group_info = json.loads(group_list[key]) - # ZS: Since these fields are lists, search in the list - if column_name == "admins" or column_name == "members": - if column_value in group_info[column_name]: - matched_groups.append(group_info) - else: - if column_name in group_info: - if column_value in group_info[column_name]: - matched_groups.append(group_info) - else: - matched_groups.append( - load_json_from_redis(group_list, column_value)) - - return matched_groups - - def create_group(admin_user_ids, member_user_ids=[], group_name="Default Group Name"): group_id = str(uuid.uuid4()) @@ -354,11 +284,3 @@ def add_access_mask(resource_id, group_id, access_mask): Redis.hset("resources", resource_id, json.dumps(the_resource)) return the_resource - - -def change_resource_owner(resource_id, new_owner_id): - the_resource = get_resource_info(resource_id) - the_resource['owner_id'] = new_owner_id - - Redis.delete("resource") - Redis.hset("resources", resource_id, json.dumps(the_resource)) -- cgit v1.2.3