aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--wqflask/base/data_set.py2
-rw-r--r--wqflask/maintenance/convert_geno_to_bimbam.py6
-rw-r--r--wqflask/maintenance/gen_select_dataset.py8
-rw-r--r--wqflask/maintenance/generate_kinship_from_bimbam.py4
-rw-r--r--wqflask/maintenance/geno_to_json.py4
-rw-r--r--wqflask/maintenance/quantile_normalize.py16
-rw-r--r--wqflask/maintenance/set_resource_defaults.py30
-rw-r--r--wqflask/tests/unit/base/test_webqtl_case_data.py8
-rw-r--r--wqflask/tests/unit/wqflask/api/test_correlation.py4
-rw-r--r--wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py14
-rw-r--r--wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py2
-rw-r--r--wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py2
-rw-r--r--wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py12
-rw-r--r--wqflask/tests/unit/wqflask/test_server_side.py2
-rw-r--r--wqflask/utility/Plot.py2
-rw-r--r--wqflask/utility/__init__.py2
-rw-r--r--wqflask/utility/elasticsearch_tools.py2
-rw-r--r--wqflask/utility/genofile_parser.py10
-rw-r--r--wqflask/utility/startup_config.py2
-rw-r--r--wqflask/utility/tools.py4
-rw-r--r--wqflask/utility/webqtlUtil.py46
-rw-r--r--wqflask/wqflask/api/correlation.py2
-rw-r--r--wqflask/wqflask/api/mapping.py14
-rw-r--r--wqflask/wqflask/api/router.py118
-rw-r--r--wqflask/wqflask/collect.py14
-rw-r--r--wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py6
-rw-r--r--wqflask/wqflask/correlation/show_corr_results.py122
-rw-r--r--wqflask/wqflask/correlation_matrix/show_corr_matrix.py4
-rw-r--r--wqflask/wqflask/ctl/ctl_analysis.py18
-rw-r--r--wqflask/wqflask/do_search.py156
-rw-r--r--wqflask/wqflask/export_traits.py6
-rw-r--r--wqflask/wqflask/external_tools/send_to_bnw.py2
-rw-r--r--wqflask/wqflask/external_tools/send_to_geneweaver.py12
-rw-r--r--wqflask/wqflask/external_tools/send_to_webgestalt.py6
-rw-r--r--wqflask/wqflask/group_manager.py20
-rw-r--r--wqflask/wqflask/gsearch.py46
-rw-r--r--wqflask/wqflask/heatmap/heatmap.py16
-rw-r--r--wqflask/wqflask/interval_analyst/GeneUtil.py20
-rw-r--r--wqflask/wqflask/markdown_routes.py2
-rw-r--r--wqflask/wqflask/marker_regression/display_mapping_results.py72
-rw-r--r--wqflask/wqflask/marker_regression/qtlreaper_mapping.py46
-rw-r--r--wqflask/wqflask/marker_regression/rqtl_mapping.py4
-rw-r--r--wqflask/wqflask/marker_regression/run_mapping.py34
-rw-r--r--wqflask/wqflask/model.py4
-rw-r--r--wqflask/wqflask/parser.py2
-rw-r--r--wqflask/wqflask/search_results.py10
-rw-r--r--wqflask/wqflask/server_side.py4
-rw-r--r--wqflask/wqflask/show_trait/SampleList.py4
-rw-r--r--wqflask/wqflask/show_trait/export_trait_data.py4
-rw-r--r--wqflask/wqflask/snp_browser/snp_browser.py22
-rw-r--r--wqflask/wqflask/user_login.py34
-rw-r--r--wqflask/wqflask/user_manager.py2
-rw-r--r--wqflask/wqflask/user_session.py8
-rw-r--r--wqflask/wqflask/wgcna/wgcna_analysis.py2
54 files changed, 509 insertions, 509 deletions
diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py
index 5eac695e..e20f2f98 100644
--- a/wqflask/base/data_set.py
+++ b/wqflask/base/data_set.py
@@ -151,7 +151,7 @@ class DatasetType:
"WHERE InbredSet.Name = '%s' AND "
"PublishFreeze.InbredSetId = InbredSet.Id"),
'geno': ("SELECT GenoFreeze.Id FROM GenoFreeze WHERE "
- "GenoFreeze.Name = \"%s\" ")
+ "GenoFreeze.Name = \"%s\" ")
}
dataset_name_mapping = {
diff --git a/wqflask/maintenance/convert_geno_to_bimbam.py b/wqflask/maintenance/convert_geno_to_bimbam.py
index c5af1ca6..078be529 100644
--- a/wqflask/maintenance/convert_geno_to_bimbam.py
+++ b/wqflask/maintenance/convert_geno_to_bimbam.py
@@ -56,7 +56,7 @@ class ConvertGenoFile:
'@pat': "0",
'@het': "0.5",
'@unk': "NA"
- }
+ }
self.configurations = {}
self.input_fh = open(self.input_file)
@@ -171,7 +171,7 @@ class ConvertGenoFile:
snp_output_file = os.path.join(
new_directory, group_name + "_snps.txt")
output_files = [geno_output_file,
- pheno_output_file, snp_output_file]
+ pheno_output_file, snp_output_file]
print("%s -> %s" % (
os.path.join(old_directory, input_file), geno_output_file))
convertob = ConvertGenoFile(input_file, output_files)
@@ -184,7 +184,7 @@ class ConvertGenoFile:
print(" Exception:", why)
print(traceback.print_exc())
print(" Found in row %s at tabular column %s" % (convertob.latest_row_pos,
- convertob.latest_col_pos))
+ convertob.latest_col_pos))
print(" Column is:", convertob.latest_col_value)
print(" Row is:", convertob.latest_row_value)
break
diff --git a/wqflask/maintenance/gen_select_dataset.py b/wqflask/maintenance/gen_select_dataset.py
index 484336a6..db65a11f 100644
--- a/wqflask/maintenance/gen_select_dataset.py
+++ b/wqflask/maintenance/gen_select_dataset.py
@@ -62,10 +62,10 @@ def parse_db_uri():
parsed_uri = urllib.parse.urlparse(SQL_URI)
db_conn_info = dict(
- db=parsed_uri.path[1:],
- host=parsed_uri.hostname,
- user=parsed_uri.username,
- passwd=parsed_uri.password)
+ db=parsed_uri.path[1:],
+ host=parsed_uri.hostname,
+ user=parsed_uri.username,
+ passwd=parsed_uri.password)
print(db_conn_info)
return db_conn_info
diff --git a/wqflask/maintenance/generate_kinship_from_bimbam.py b/wqflask/maintenance/generate_kinship_from_bimbam.py
index cd39fceb..9f01d094 100644
--- a/wqflask/maintenance/generate_kinship_from_bimbam.py
+++ b/wqflask/maintenance/generate_kinship_from_bimbam.py
@@ -23,7 +23,7 @@ class GenerateKinshipMatrices:
def generate_kinship(self):
gemma_command = "/gnu/store/xhzgjr0jvakxv6h3blj8z496xjig69b0-profile/bin/gemma -g " + self.geno_file + \
" -p " + self.pheno_file + \
- " -gk 1 -outdir /home/zas1024/genotype_files/genotype/bimbam/ -o " + self.group_name
+ " -gk 1 -outdir /home/zas1024/genotype_files/genotype/bimbam/ -o " + self.group_name
print("command:", gemma_command)
os.system(gemma_command)
@@ -52,7 +52,7 @@ class GenerateKinshipMatrices:
print(" Exception:", why)
print(traceback.print_exc())
print(" Found in row %s at tabular column %s" % (convertob.latest_row_pos,
- convertob.latest_col_pos))
+ convertob.latest_col_pos))
print(" Column is:", convertob.latest_col_value)
print(" Row is:", convertob.latest_row_value)
break
diff --git a/wqflask/maintenance/geno_to_json.py b/wqflask/maintenance/geno_to_json.py
index c74489a8..32e0e34b 100644
--- a/wqflask/maintenance/geno_to_json.py
+++ b/wqflask/maintenance/geno_to_json.py
@@ -63,7 +63,7 @@ class ConvertGenoFile:
'@pat': "0",
'@het': "0.5",
'@unk': "NA"
- }
+ }
self.configurations = {}
#self.skipped_cols = 3
@@ -172,7 +172,7 @@ class ConvertGenoFile:
print(" Exception:", why)
print(traceback.print_exc())
print(" Found in row %s at tabular column %s" % (convertob.latest_row_pos,
- convertob.latest_col_pos))
+ convertob.latest_col_pos))
print(" Column is:", convertob.latest_col_value)
print(" Row is:", convertob.latest_row_value)
break
diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py
index ac7689f5..88bb2cb5 100644
--- a/wqflask/maintenance/quantile_normalize.py
+++ b/wqflask/maintenance/quantile_normalize.py
@@ -21,10 +21,10 @@ def parse_db_uri():
parsed_uri = urllib.parse.urlparse(SQL_URI)
db_conn_info = dict(
- db=parsed_uri.path[1:],
- host=parsed_uri.hostname,
- user=parsed_uri.username,
- passwd=parsed_uri.password)
+ db=parsed_uri.path[1:],
+ host=parsed_uri.hostname,
+ user=parsed_uri.username,
+ passwd=parsed_uri.password)
print(db_conn_info)
return db_conn_info
@@ -70,10 +70,10 @@ def set_data(dataset_name):
trait_name = line1.split('\t')[0]
for i, sample in enumerate(sample_names):
this_sample = {
- "name": sample,
- "value": line1.split('\t')[i + 1],
- "qnorm": line2.split('\t')[i + 1]
- }
+ "name": sample,
+ "value": line1.split('\t')[i + 1],
+ "qnorm": line2.split('\t')[i + 1]
+ }
sample_list.append(this_sample)
query = """SELECT Species.SpeciesName, InbredSet.InbredSetName, ProbeSetFreeze.FullName
FROM Species, InbredSet, ProbeSetFreeze, ProbeFreeze, ProbeSetXRef, ProbeSet
diff --git a/wqflask/maintenance/set_resource_defaults.py b/wqflask/maintenance/set_resource_defaults.py
index c6c4f44c..0f472494 100644
--- a/wqflask/maintenance/set_resource_defaults.py
+++ b/wqflask/maintenance/set_resource_defaults.py
@@ -44,10 +44,10 @@ def parse_db_uri():
parsed_uri = urllib.parse.urlparse(SQL_URI)
db_conn_info = dict(
- db=parsed_uri.path[1:],
- host=parsed_uri.hostname,
- user=parsed_uri.username,
- passwd=parsed_uri.password)
+ db=parsed_uri.path[1:],
+ host=parsed_uri.hostname,
+ user=parsed_uri.username,
+ passwd=parsed_uri.password)
print(db_conn_info)
return db_conn_info
@@ -69,12 +69,12 @@ def insert_probeset_resources(default_owner_id):
resource_ob['type'] = "dataset-probeset"
if resource[2] < 1 and resource[3] > 0:
resource_ob['default_mask'] = {"data": "view",
- "metadata": "view",
- "admin": "not-admin"}
+ "metadata": "view",
+ "admin": "not-admin"}
else:
resource_ob['default_mask'] = {"data": "no-access",
- "metadata": "no-access",
- "admin": "not-admin"}
+ "metadata": "no-access",
+ "admin": "not-admin"}
resource_ob['group_masks'] = {}
add_resource(resource_ob, update=False)
@@ -101,11 +101,11 @@ def insert_publish_resources(default_owner_id):
resource_ob['name'] = str(resource[0])
resource_ob['owner_id'] = default_owner_id
resource_ob['data'] = {"dataset": str(resource[1]),
- "trait": str(resource[0])}
+ "trait": str(resource[0])}
resource_ob['type'] = "dataset-publish"
resource_ob['default_mask'] = {"data": "view",
- "metadata": "view",
- "admin": "not-admin"}
+ "metadata": "view",
+ "admin": "not-admin"}
resource_ob['group_masks'] = {}
@@ -133,12 +133,12 @@ def insert_geno_resources(default_owner_id):
resource_ob['type'] = "dataset-geno"
if resource[2] < 1:
resource_ob['default_mask'] = {"data": "view",
- "metadata": "view",
- "admin": "not-admin"}
+ "metadata": "view",
+ "admin": "not-admin"}
else:
resource_ob['default_mask'] = {"data": "no-access",
- "metadata": "no-access",
- "admin": "not-admin"}
+ "metadata": "no-access",
+ "admin": "not-admin"}
resource_ob['group_masks'] = {}
add_resource(resource_ob, update=False)
diff --git a/wqflask/tests/unit/base/test_webqtl_case_data.py b/wqflask/tests/unit/base/test_webqtl_case_data.py
index cebd41ce..e1555cb4 100644
--- a/wqflask/tests/unit/base/test_webqtl_case_data.py
+++ b/wqflask/tests/unit/base/test_webqtl_case_data.py
@@ -10,10 +10,10 @@ class TestWebqtlCaseData(unittest.TestCase):
def setUp(self):
self.w = webqtlCaseData(name="Test",
- value=0,
- variance=0.0,
- num_cases=10,
- name2="Test2")
+ value=0,
+ variance=0.0,
+ num_cases=10,
+ name2="Test2")
def test_webqtl_case_data_repr(self):
self.assertEqual(
diff --git a/wqflask/tests/unit/wqflask/api/test_correlation.py b/wqflask/tests/unit/wqflask/api/test_correlation.py
index 34ffa9ef..1089a36f 100644
--- a/wqflask/tests/unit/wqflask/api/test_correlation.py
+++ b/wqflask/tests/unit/wqflask/api/test_correlation.py
@@ -106,9 +106,9 @@ class TestCorrelations(unittest.TestCase):
target_vals = [3.4, 6.2, 4.1, 3.4, 1.2, 5.6]
trait_data = {"S1": AttributeSetter({"value": 2.3}), "S2": AttributeSetter({"value": 1.1}),
- "S3": AttributeSetter(
+ "S3": AttributeSetter(
{"value": 6.3}), "S4": AttributeSetter({"value": 3.6}), "S5": AttributeSetter({"value": 4.1}),
- "S6": AttributeSetter({"value": 5.0})}
+ "S6": AttributeSetter({"value": 5.0})}
this_trait = AttributeSetter({"data": trait_data})
mock_normalize.return_value = ([2.3, 1.1, 6.3, 3.6, 4.1, 5.0],
[3.4, 6.2, 4.1, 3.4, 1.2, 5.6], 6)
diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py
index 5cbaf0e0..4003d68f 100644
--- a/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py
+++ b/wqflask/tests/unit/wqflask/marker_regression/test_gemma_mapping.py
@@ -162,13 +162,13 @@ X\tM5\t12\tQ\tE\tMMB\tR\t21.1\tW\t0.65\t0.6"""
results = parse_loco_output(
this_dataset={}, gwa_output_filename=".xw/")
expected_results = [
- {'name': 'M1', 'chr': 'X/Y', 'Mb': 2.8457155e-05, 'p_value': 0.85,
- 'additive': 23.3, 'lod_score': 0.07058107428570727},
- {'name': 'M2', 'chr': 4, 'Mb': 1.2e-05, 'p_value': 0.5,
- 'additive': 24.0, 'lod_score': 0.3010299956639812},
- {'name': 'M4', 'chr': 'Y', 'Mb': 1.2e-05, 'p_value': 0.7,
- 'additive': 11.6, 'lod_score': 0.1549019599857432},
- {'name': 'M5', 'chr': 'X', 'Mb': 1.2e-05, 'p_value': 0.6, 'additive': 21.1, 'lod_score': 0.22184874961635637}]
+ {'name': 'M1', 'chr': 'X/Y', 'Mb': 2.8457155e-05, 'p_value': 0.85,
+ 'additive': 23.3, 'lod_score': 0.07058107428570727},
+ {'name': 'M2', 'chr': 4, 'Mb': 1.2e-05, 'p_value': 0.5,
+ 'additive': 24.0, 'lod_score': 0.3010299956639812},
+ {'name': 'M4', 'chr': 'Y', 'Mb': 1.2e-05, 'p_value': 0.7,
+ 'additive': 11.6, 'lod_score': 0.1549019599857432},
+ {'name': 'M5', 'chr': 'X', 'Mb': 1.2e-05, 'p_value': 0.6, 'additive': 21.1, 'lod_score': 0.22184874961635637}]
self.assertEqual(expected_results, results)
diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py
index 47377873..93848a84 100644
--- a/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py
+++ b/wqflask/tests/unit/wqflask/marker_regression/test_qtlreaper_mapping.py
@@ -18,6 +18,6 @@ class TestQtlReaperMapping(unittest.TestCase):
mock_open.assert_called_once_with("/home/user/data/gn2/trait_file.txt", "w")
filehandler = mock_open()
write_calls = [mock.call('Trait\t'), mock.call(
- 'S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')]
+ 'S1\tS3\tS4\n'), mock.call('T1\t'), mock.call('V1\tV4\tV3')]
filehandler.write.assert_has_calls(write_calls)
diff --git a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py
index e518ec22..68686e27 100644
--- a/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py
+++ b/wqflask/tests/unit/wqflask/marker_regression/test_rqtl_mapping.py
@@ -21,7 +21,7 @@ class TestRqtlMapping(unittest.TestCase):
"""test for getting trait data_type return True"""
query_value = """SELECT value FROM TraitMetadata WHERE type='trait_data_type'"""
mock_db.db.execute.return_value.fetchone.return_value = [
- """{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""]
+ """{"type":"trait_data_type","name":"T1","traid_id":"fer434f"}"""]
results = get_trait_data_type("traid_id")
mock_db.db.execute.assert_called_with(query_value)
self.assertEqual(results, "fer434f")
diff --git a/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py b/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py
index 8823e1fc..89442c47 100644
--- a/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py
+++ b/wqflask/tests/unit/wqflask/snp_browser/test_snp_browser.py
@@ -22,10 +22,10 @@ class TestSnpBrowser(unittest.TestCase):
strains = {"mouse": ["S1", "S2", "S3", "S4", "S5"], "rat": []}
expected_results = ([['Index', 'SNP ID', 'Chr', 'Mb', 'Alleles', 'ConScore',
'Domain 1', 'Domain 2', 'Details'],
- ['S1', 'S2', 'S3', 'S4', 'S5']], 5,
- ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles',
- 'conservation_score', 'domain_1', 'domain_2',
- 'function_details', 'S1', 'S2', 'S3', 'S4', 'S5'])
+ ['S1', 'S2', 'S3', 'S4', 'S5']], 5,
+ ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles',
+ 'conservation_score', 'domain_1', 'domain_2',
+ 'function_details', 'S1', 'S2', 'S3', 'S4', 'S5'])
results_with_snp = get_header_list(
variant_type="SNP", strains=strains, species="Mouse", empty_columns=empty_columns)
@@ -34,8 +34,8 @@ class TestSnpBrowser(unittest.TestCase):
expected_results_with_indel = (
['Index', 'ID', 'Type', 'InDel Chr', 'Mb Start',
'Mb End', 'Strand', 'Size', 'Sequence', 'Source'], 0,
- ['index', 'indel_name', 'indel_type', 'indel_chr', 'indel_mb_s',
- 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name'])
+ ['index', 'indel_name', 'indel_type', 'indel_chr', 'indel_mb_s',
+ 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name'])
self.assertEqual(expected_results, results_with_snp)
self.assertEqual(expected_results_with_indel, results_with_indel)
diff --git a/wqflask/tests/unit/wqflask/test_server_side.py b/wqflask/tests/unit/wqflask/test_server_side.py
index 9d988aea..be7ca2df 100644
--- a/wqflask/tests/unit/wqflask/test_server_side.py
+++ b/wqflask/tests/unit/wqflask/test_server_side.py
@@ -23,7 +23,7 @@ class TestServerSideTableTests(unittest.TestCase):
]
headers = ['first', 'second', 'third']
request_args = {'sEcho': '1', 'iSortCol_0': '1', 'iSortingCols': '1',
- 'sSortDir_0': 'asc', 'iDisplayStart': '0', 'iDisplayLength': '3'}
+ 'sSortDir_0': 'asc', 'iDisplayStart': '0', 'iDisplayLength': '3'}
test_page = ServerSideTable(
rows_count, table_rows, headers, request_args).get_page()
diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py
index f61e3b88..37a8a1a5 100644
--- a/wqflask/utility/Plot.py
+++ b/wqflask/utility/Plot.py
@@ -206,7 +206,7 @@ def plotBar(canvas, data, barColor=BLUE, axesColor=BLACK, labelColor=BLACK, XLab
im_drawer.text(
text=strY,
xy=(xLeftOffset - im_drawer.textsize(strY,
- font=scaleFont)[0] - 6, yc + 5),
+ font=scaleFont)[0] - 6, yc + 5),
font=scaleFont)
y += (yTop - yLow) / stepY
diff --git a/wqflask/utility/__init__.py b/wqflask/utility/__init__.py
index 6c8cd546..25273fa0 100644
--- a/wqflask/utility/__init__.py
+++ b/wqflask/utility/__init__.py
@@ -32,4 +32,4 @@ class Struct:
def __repr__(self):
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for
- (k, v) in list(self.__dict__.items())))
+ (k, v) in list(self.__dict__.items())))
diff --git a/wqflask/utility/elasticsearch_tools.py b/wqflask/utility/elasticsearch_tools.py
index 55907dd5..e56c22eb 100644
--- a/wqflask/utility/elasticsearch_tools.py
+++ b/wqflask/utility/elasticsearch_tools.py
@@ -50,7 +50,7 @@ from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT
def test_elasticsearch_connection():
es = Elasticsearch(['http://' + ELASTICSEARCH_HOST + \
- ":" + str(ELASTICSEARCH_PORT) + '/'], verify_certs=True)
+ ":" + str(ELASTICSEARCH_PORT) + '/'], verify_certs=True)
if not es.ping():
logger.warning("Elasticsearch is DOWN")
diff --git a/wqflask/utility/genofile_parser.py b/wqflask/utility/genofile_parser.py
index eb545478..86d9823e 100644
--- a/wqflask/utility/genofile_parser.py
+++ b/wqflask/utility/genofile_parser.py
@@ -37,10 +37,10 @@ class ConvertGenoFile:
self.input_fh = open(input_file)
print("!!!!!!!!!!!!!!!!PARSER!!!!!!!!!!!!!!!!!!")
self.haplotype_notation = {
- '@mat': "1",
- '@pat': "2",
- '@het': "-999",
- '@unk': "-999"
+ '@mat': "1",
+ '@pat': "2",
+ '@het': "-999",
+ '@unk': "-999"
}
self.configurations = {}
@@ -93,7 +93,7 @@ class ConvertGenoFile:
for item_count, genotype in enumerate(genotypes):
if genotype.upper().strip() in self.configurations:
this_marker.genotypes.append(
- self.configurations[genotype.upper().strip()])
+ self.configurations[genotype.upper().strip()])
else:
print("WARNING:", genotype.upper())
this_marker.genotypes.append("NA")
diff --git a/wqflask/utility/startup_config.py b/wqflask/utility/startup_config.py
index 05f8a2b0..6ef759e0 100644
--- a/wqflask/utility/startup_config.py
+++ b/wqflask/utility/startup_config.py
@@ -39,4 +39,4 @@ def app_config():
# es.test_elasticsearch_connection()
print(("GN2 is running. Visit %s[http://localhost:%s/%s](%s)" %
- (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL"))))
+ (BLUE, str(port), ENDC, get_setting("WEBSERVER_URL"))))
diff --git a/wqflask/utility/tools.py b/wqflask/utility/tools.py
index 4f09176a..e28abb48 100644
--- a/wqflask/utility/tools.py
+++ b/wqflask/utility/tools.py
@@ -295,8 +295,8 @@ ORCID_CLIENT_SECRET = get_setting('ORCID_CLIENT_SECRET')
ORCID_AUTH_URL = None
if ORCID_CLIENT_ID != 'UNKNOWN' and ORCID_CLIENT_SECRET:
ORCID_AUTH_URL = "https://orcid.org/oauth/authorize?response_type=code&scope=/authenticate&show_login=true&client_id=" + \
- ORCID_CLIENT_ID + "&client_secret=" + ORCID_CLIENT_SECRET + \
- "&redirect_uri=" + GN2_BRANCH_URL + "n/login/orcid_oauth2"
+ ORCID_CLIENT_ID + "&client_secret=" + ORCID_CLIENT_SECRET + \
+ "&redirect_uri=" + GN2_BRANCH_URL + "n/login/orcid_oauth2"
ORCID_TOKEN_URL = get_setting('ORCID_TOKEN_URL')
ELASTICSEARCH_HOST = get_setting('ELASTICSEARCH_HOST')
diff --git a/wqflask/utility/webqtlUtil.py b/wqflask/utility/webqtlUtil.py
index f355a865..0cb71567 100644
--- a/wqflask/utility/webqtlUtil.py
+++ b/wqflask/utility/webqtlUtil.py
@@ -35,29 +35,29 @@ from base import webqtlConfig
# NL, 07/27/2010. moved from webqtlForm.py
# Dict of Parents and F1 information, In the order of [F1, Mat, Pat]
ParInfo = {
-'BXH': ['BHF1', 'HBF1', 'C57BL/6J', 'C3H/HeJ'],
-'AKXD': ['AKF1', 'KAF1', 'AKR/J', 'DBA/2J'],
-'BXD': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
-'C57BL-6JxC57BL-6NJF2': ['', '', 'C57BL/6J', 'C57BL/6NJ'],
-'BXD300': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
-'B6BTBRF2': ['B6BTBRF1', 'BTBRB6F1', 'C57BL/6J', 'BTBRT<+>tf/J'],
-'BHHBF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'],
-'BHF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'],
-'B6D2F2': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
-'BDF2-1999': ['B6D2F2', 'D2B6F2', 'C57BL/6J', 'DBA/2J'],
-'BDF2-2005': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
-'CTB6F2': ['CTB6F2', 'B6CTF2', 'C57BL/6J', 'Castaneous'],
-'CXB': ['CBF1', 'BCF1', 'C57BL/6ByJ', 'BALB/cByJ'],
-'AXBXA': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'],
-'AXB': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'],
-'BXA': ['BAF1', 'ABF1', 'C57BL/6J', 'A/J'],
-'LXS': ['LSF1', 'SLF1', 'ISS', 'ILS'],
-'HXBBXH': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'],
-'BayXSha': ['BayXShaF1', 'ShaXBayF1', 'Bay-0', 'Shahdara'],
-'ColXBur': ['ColXBurF1', 'BurXColF1', 'Col-0', 'Bur-0'],
-'ColXCvi': ['ColXCviF1', 'CviXColF1', 'Col-0', 'Cvi'],
-'SXM': ['SMF1', 'MSF1', 'Steptoe', 'Morex'],
-'HRDP': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv']
+ 'BXH': ['BHF1', 'HBF1', 'C57BL/6J', 'C3H/HeJ'],
+ 'AKXD': ['AKF1', 'KAF1', 'AKR/J', 'DBA/2J'],
+ 'BXD': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
+ 'C57BL-6JxC57BL-6NJF2': ['', '', 'C57BL/6J', 'C57BL/6NJ'],
+ 'BXD300': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
+ 'B6BTBRF2': ['B6BTBRF1', 'BTBRB6F1', 'C57BL/6J', 'BTBRT<+>tf/J'],
+ 'BHHBF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'],
+ 'BHF2': ['B6HF2', 'HB6F2', 'C57BL/6J', 'C3H/HeJ'],
+ 'B6D2F2': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
+ 'BDF2-1999': ['B6D2F2', 'D2B6F2', 'C57BL/6J', 'DBA/2J'],
+ 'BDF2-2005': ['B6D2F1', 'D2B6F1', 'C57BL/6J', 'DBA/2J'],
+ 'CTB6F2': ['CTB6F2', 'B6CTF2', 'C57BL/6J', 'Castaneous'],
+ 'CXB': ['CBF1', 'BCF1', 'C57BL/6ByJ', 'BALB/cByJ'],
+ 'AXBXA': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'],
+ 'AXB': ['ABF1', 'BAF1', 'C57BL/6J', 'A/J'],
+ 'BXA': ['BAF1', 'ABF1', 'C57BL/6J', 'A/J'],
+ 'LXS': ['LSF1', 'SLF1', 'ISS', 'ILS'],
+ 'HXBBXH': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv'],
+ 'BayXSha': ['BayXShaF1', 'ShaXBayF1', 'Bay-0', 'Shahdara'],
+ 'ColXBur': ['ColXBurF1', 'BurXColF1', 'Col-0', 'Bur-0'],
+ 'ColXCvi': ['ColXCviF1', 'CviXColF1', 'Col-0', 'Cvi'],
+ 'SXM': ['SMF1', 'MSF1', 'Steptoe', 'Morex'],
+ 'HRDP': ['SHR_BNF1', 'BN_SHRF1', 'BN-Lx/Cub', 'SHR/OlaIpcv']
}
#########################################
diff --git a/wqflask/wqflask/api/correlation.py b/wqflask/wqflask/api/correlation.py
index 52026a82..870f3275 100644
--- a/wqflask/wqflask/api/correlation.py
+++ b/wqflask/wqflask/api/correlation.py
@@ -88,7 +88,7 @@ def calculate_results(this_trait, this_dataset, target_dataset, corr_params):
corr_results = do_literature_correlation_for_all_traits(
this_trait, this_dataset, trait_geneid_dict, corr_params)
sorted_results = collections.OrderedDict(sorted(list(corr_results.items()),
- key=lambda t: -abs(t[1][1])))
+ key=lambda t: -abs(t[1][1])))
else:
for target_trait, target_vals in list(target_dataset.trait_data.items()):
result = get_sample_r_and_p_values(
diff --git a/wqflask/wqflask/api/mapping.py b/wqflask/wqflask/api/mapping.py
index e4a3fb77..cbef96eb 100644
--- a/wqflask/wqflask/api/mapping.py
+++ b/wqflask/wqflask/api/mapping.py
@@ -53,15 +53,15 @@ def do_mapping_for_api(start_vars):
header_row = ["name", "chr", "cM", "lod_score"]
if mapping_params['num_perm'] > 0:
_sperm_output, _suggestive, _significant, result_markers = rqtl_mapping.run_rqtl_geno(vals, dataset, mapping_params['rqtl_method'], mapping_params['rqtl_model'],
- mapping_params['perm_check'], mapping_params['num_perm'],
- mapping_params['do_control'], mapping_params[
- 'control_marker'],
- mapping_params['manhattan_plot'], mapping_params['pair_scan'])
+ mapping_params['perm_check'], mapping_params['num_perm'],
+ mapping_params['do_control'], mapping_params[
+ 'control_marker'],
+ mapping_params['manhattan_plot'], mapping_params['pair_scan'])
else:
result_markers = rqtl_mapping.run_rqtl_geno(vals, dataset, mapping_params['rqtl_method'], mapping_params['rqtl_model'],
- mapping_params['perm_check'], mapping_params['num_perm'],
- mapping_params['do_control'], mapping_params['control_marker'],
- mapping_params['manhattan_plot'], mapping_params['pair_scan'])
+ mapping_params['perm_check'], mapping_params['num_perm'],
+ mapping_params['do_control'], mapping_params['control_marker'],
+ mapping_params['manhattan_plot'], mapping_params['pair_scan'])
if mapping_params['limit_to']:
result_markers = result_markers[:mapping_params['limit_to']]
diff --git a/wqflask/wqflask/api/router.py b/wqflask/wqflask/api/router.py
index f7d52ca3..9d3446db 100644
--- a/wqflask/wqflask/api/router.py
+++ b/wqflask/wqflask/api/router.py
@@ -41,10 +41,10 @@ def get_species_list():
species_list = []
for species in the_species:
species_dict = {
- "Id": species[0],
- "Name": species[1],
- "FullName": species[2],
- "TaxonomyId": species[3]
+ "Id": species[0],
+ "Name": species[1],
+ "FullName": species[2],
+ "TaxonomyId": species[3]
}
species_list.append(species_dict)
@@ -60,10 +60,10 @@ def get_species_info(species_name, file_format="json"):
the_species = results.fetchone()
species_dict = {
- "Id": the_species[0],
- "Name": the_species[1],
- "FullName": the_species[2],
- "TaxonomyId": the_species[3]
+ "Id": the_species[0],
+ "Name": the_species[1],
+ "FullName": the_species[2],
+ "TaxonomyId": the_species[3]
}
return flask.jsonify(species_dict)
@@ -92,12 +92,12 @@ def get_groups_list(species_name=None):
groups_list = []
for group in the_groups:
group_dict = {
- "Id": group[0],
- "SpeciesId": group[1],
- "DisplayName": group[2],
- "Name": group[3],
- "FullName": group[4],
- "public": group[5],
+ "Id": group[0],
+ "SpeciesId": group[1],
+ "DisplayName": group[2],
+ "Name": group[3],
+ "FullName": group[4],
+ "public": group[5],
"MappingMethodId": group[6],
"GeneticType": group[7]
}
@@ -137,12 +137,12 @@ def get_group_info(group_name, species_name=None, file_format="json"):
group = results.fetchone()
if group:
group_dict = {
- "Id": group[0],
- "SpeciesId": group[1],
- "DisplayName": group[2],
- "Name": group[3],
- "FullName": group[4],
- "public": group[5],
+ "Id": group[0],
+ "SpeciesId": group[1],
+ "DisplayName": group[2],
+ "Name": group[3],
+ "FullName": group[4],
+ "public": group[5],
"MappingMethodId": group[6],
"GeneticType": group[7]
}
@@ -186,12 +186,12 @@ def get_datasets_for_group(group_name, species_name=None):
datasets_list = []
for dataset in the_datasets:
dataset_dict = {
- "Id": dataset[0],
- "ProbeFreezeId": dataset[1],
- "AvgID": dataset[2],
- "Short_Abbreviation": dataset[3],
- "Long_Abbreviation": dataset[4],
- "FullName": dataset[5],
+ "Id": dataset[0],
+ "ProbeFreezeId": dataset[1],
+ "AvgID": dataset[2],
+ "Short_Abbreviation": dataset[3],
+ "Long_Abbreviation": dataset[4],
+ "FullName": dataset[5],
"ShortName": dataset[6],
"CreateTime": dataset[7],
"public": dataset[8],
@@ -243,12 +243,12 @@ def get_dataset_info(dataset_name, group_name=None, file_format="json"):
if dataset:
dataset_dict = {
- "dataset_type": "mRNA expression",
- "id": dataset[0],
- "name": dataset[1],
- "full_name": dataset[2],
- "short_name": dataset[3],
- "data_scale": dataset[4],
+ "dataset_type": "mRNA expression",
+ "id": dataset[0],
+ "name": dataset[1],
+ "full_name": dataset[2],
+ "short_name": dataset[3],
+ "data_scale": dataset[4],
"tissue_id": dataset[5],
"tissue": dataset[6],
"public": dataset[7],
@@ -280,25 +280,25 @@ def get_dataset_info(dataset_name, group_name=None, file_format="json"):
if dataset:
if dataset[5]:
dataset_dict = {
- "dataset_type": "phenotype",
- "id": dataset[0],
- "name": dataset[1],
- "description": dataset[2],
- "pubmed_id": dataset[5],
- "title": dataset[6],
+ "dataset_type": "phenotype",
+ "id": dataset[0],
+ "name": dataset[1],
+ "description": dataset[2],
+ "pubmed_id": dataset[5],
+ "title": dataset[6],
"year": dataset[7]
}
elif dataset[4]:
dataset_dict = {
- "dataset_type": "phenotype",
- "id": dataset[0],
- "name": dataset[3],
- "description": dataset[4]
+ "dataset_type": "phenotype",
+ "id": dataset[0],
+ "name": dataset[3],
+ "description": dataset[4]
}
else:
dataset_dict = {
- "dataset_type": "phenotype",
- "id": dataset[0]
+ "dataset_type": "phenotype",
+ "id": dataset[0]
}
datasets_list.append(dataset_dict)
@@ -364,7 +364,7 @@ def fetch_traits(dataset_name, file_format="json"):
"""
field_list = ["Id", "Name", "Symbol", "Description", "Chr", "Mb",
- "Aliases", "Mean", "SE", "Locus", "LRS", "P-Value", "Additive", "h2"]
+ "Aliases", "Mean", "SE", "Locus", "LRS", "P-Value", "Additive", "h2"]
elif data_type == "Geno":
query = """
SELECT
@@ -382,7 +382,7 @@ def fetch_traits(dataset_name, file_format="json"):
"""
field_list = ["Id", "Name", "Marker_Name",
- "Chr", "Mb", "Sequence", "Source"]
+ "Chr", "Mb", "Sequence", "Source"]
else:
query = """
SELECT
@@ -399,7 +399,7 @@ def fetch_traits(dataset_name, file_format="json"):
"""
field_list = ["Id", "PhenotypeId", "PublicationId",
- "Locus", "LRS", "Additive", "Sequence"]
+ "Locus", "LRS", "Additive", "Sequence"]
if 'limit_to' in request.args:
limit_number = request.args['limit_to']
@@ -579,10 +579,10 @@ def trait_sample_data(dataset_name, trait_name, file_format="json"):
sample_list = []
for sample in sample_data:
sample_dict = {
- "sample_name": sample[0],
- "sample_name_2": sample[1],
- "value": sample[2],
- "data_id": sample[3],
+ "sample_name": sample[0],
+ "sample_name_2": sample[1],
+ "value": sample[2],
+ "data_id": sample[3],
}
if sample[4]:
sample_dict["se"] = sample[4]
@@ -626,10 +626,10 @@ def trait_sample_data(dataset_name, trait_name, file_format="json"):
sample_list = []
for sample in sample_data:
sample_dict = {
- "sample_name": sample[0],
- "sample_name_2": sample[1],
- "value": sample[2],
- "data_id": sample[3]
+ "sample_name": sample[0],
+ "sample_name_2": sample[1],
+ "value": sample[2],
+ "data_id": sample[3]
}
if sample[4]:
sample_dict["se"] = sample[4]
@@ -796,9 +796,9 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None):
config_file = [filename + ".json", json.dumps(yaml_file)]
#config_file = [filename + ".yaml", open("{0}/{1}.yaml".format(flat_files("genotype/rqtl2"), group_name))]
geno_file = [filename + "_geno.csv",
- open("{0}/{1}_geno.csv".format(flat_files("genotype/rqtl2"), group_name))]
+ open("{0}/{1}_geno.csv".format(flat_files("genotype/rqtl2"), group_name))]
gmap_file = [filename + "_gmap.csv",
- open("{0}/{1}_gmap.csv".format(flat_files("genotype/rqtl2"), group_name))]
+ open("{0}/{1}_gmap.csv".format(flat_files("genotype/rqtl2"), group_name))]
if dataset_name:
phenotypes = requests.get(
"http://gn2.genenetwork.org/api/v_pre1/sample_data/" + dataset_name)
@@ -828,7 +828,7 @@ def get_genotypes(group_name, file_format="csv", dataset_name=None):
if limit_num and i >= limit_num:
break
output_lines.append([line.strip()
- for line in line.split(",")])
+ for line in line.split(",")])
i += 1
csv_writer = csv.writer(si, delimiter=",")
@@ -914,7 +914,7 @@ def get_dataset_trait_ids(dataset_name, start_vars):
trait_ids = [result[0] for result in results]
trait_names = [str(result[2]) + "_" + str(result[1])
- for result in results]
+ for result in results]
return trait_ids, trait_names, data_type, dataset_id
diff --git a/wqflask/wqflask/collect.py b/wqflask/wqflask/collect.py
index b06d84ff..58518639 100644
--- a/wqflask/wqflask/collect.py
+++ b/wqflask/wqflask/collect.py
@@ -75,14 +75,14 @@ def collections_add():
if 'traits' in request.args:
traits = request.args['traits']
return render_template("collections/add.html",
- traits=traits,
- collections=collections,
+ traits=traits,
+ collections=collections,
)
else:
hash = request.args['hash']
return render_template("collections/add.html",
- hash=hash,
- collections=collections,
+ hash=hash,
+ collections=collections,
)
@@ -145,8 +145,8 @@ def list_collections():
user_collections = list(g.user_session.user_collections)
return render_template("collections/list.html",
- params=params,
- collections=user_collections,
+ params=params,
+ collections=user_collections,
)
@@ -225,7 +225,7 @@ def view_collection():
return json.dumps(json_version)
else:
return render_template("collections/view.html",
- **collection_info
+ **collection_info
)
diff --git a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
index d86c8e16..cb88eb53 100644
--- a/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
+++ b/wqflask/wqflask/comparison_bar_chart/comparison_bar_chart.py
@@ -35,7 +35,7 @@ class ComparisonBarChart:
def __init__(self, start_vars):
trait_db_list = [trait.strip()
- for trait in start_vars['trait_list'].split(',')]
+ for trait in start_vars['trait_list'].split(',')]
helper_functions.get_trait_db_obs(self, trait_db_list)
@@ -90,8 +90,8 @@ class ComparisonBarChart:
#print("dataset_name:", dataset_name)
dataset_ob = data_set.create_dataset(dataset_name)
trait_ob = create_trait(dataset=dataset_ob,
- name=trait_name,
- cellid=None)
+ name=trait_name,
+ cellid=None)
self.trait_list.append((trait_ob, dataset_ob))
#print("trait_list:", self.trait_list)
diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py
index e8b7b057..aa39bc5c 100644
--- a/wqflask/wqflask/correlation/show_corr_results.py
+++ b/wqflask/wqflask/correlation/show_corr_results.py
@@ -82,8 +82,8 @@ class CorrelationResults:
dataset_name="Temp", dataset_type="Temp", group_name = start_vars['group'])
self.trait_id = start_vars['trait_id']
self.this_trait = create_trait(dataset=self.dataset,
- name=self.trait_id,
- cellid=None)
+ name=self.trait_id,
+ cellid=None)
else:
helper_functions.get_species_dataset_trait(self, start_vars)
@@ -98,7 +98,7 @@ class CorrelationResults:
if ('loc_chr' in start_vars
and 'min_loc_mb' in start_vars
- and 'max_loc_mb' in start_vars):
+ and 'max_loc_mb' in start_vars):
self.location_type = get_string(start_vars, 'location_type')
self.location_chr = get_string(start_vars, 'loc_chr')
@@ -129,7 +129,7 @@ class CorrelationResults:
if corr_samples_group != 'samples_primary':
if corr_samples_group == 'samples_other':
primary_samples = [x for x in primary_samples if x not in (
- self.dataset.group.parlist + self.dataset.group.f1list)]
+ self.dataset.group.parlist + self.dataset.group.f1list)]
self.process_samples(start_vars, list(
self.this_trait.data.keys()), primary_samples)
@@ -201,7 +201,7 @@ class CorrelationResults:
chr_as_int = order_id
if (float(self.correlation_data[trait][0]) >= self.p_range_lower
- and float(self.correlation_data[trait][0]) <= self.p_range_upper):
+ and float(self.correlation_data[trait][0]) <= self.p_range_upper):
if (self.target_dataset.type == "ProbeSet" or self.target_dataset.type == "Publish") and bool(trait_object.mean):
if (self.min_expr != None) and (float(trait_object.mean) < self.min_expr):
@@ -221,8 +221,8 @@ class CorrelationResults:
continue
(trait_object.sample_r,
- trait_object.sample_p,
- trait_object.num_overlap) = self.correlation_data[trait]
+ trait_object.sample_p,
+ trait_object.num_overlap) = self.correlation_data[trait]
# Set some sane defaults
trait_object.tissue_corr = 0
@@ -277,7 +277,7 @@ class CorrelationResults:
trait.symbol for trait in self.correlation_results if trait.symbol]
corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
- symbol_list=gene_symbol_list)
+ symbol_list=gene_symbol_list)
for trait in self.correlation_results:
if trait.symbol and trait.symbol.lower() in corr_result_tissue_vals_dict:
@@ -285,8 +285,8 @@ class CorrelationResults:
)]
result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values,
- this_trait_tissue_values,
- self.corr_method)
+ this_trait_tissue_values,
+ self.corr_method)
trait.tissue_corr = result[0]
trait.tissue_pvalue = result[2]
@@ -302,7 +302,7 @@ class CorrelationResults:
#print("trait_gene_symbols: ", pf(trait_gene_symbols.values()))
corr_result_tissue_vals_dict = correlation_functions.get_trait_symbol_and_tissue_values(
- symbol_list=list(self.trait_symbol_dict.values()))
+ symbol_list=list(self.trait_symbol_dict.values()))
#print("corr_result_tissue_vals: ", pf(corr_result_tissue_vals_dict))
@@ -315,13 +315,13 @@ class CorrelationResults:
)]
result = correlation_functions.cal_zero_order_corr_for_tiss(primary_trait_tissue_values,
- this_trait_tissue_values,
- self.corr_method)
+ this_trait_tissue_values,
+ self.corr_method)
tissue_corr_data[trait] = [symbol, result[0], result[2]]
tissue_corr_data = collections.OrderedDict(sorted(list(tissue_corr_data.items()),
- key=lambda t: -abs(t[1][1])))
+ key=lambda t: -abs(t[1][1])))
return tissue_corr_data
@@ -397,7 +397,7 @@ class CorrelationResults:
lit_corr_data[trait] = [gene_id, 0]
lit_corr_data = collections.OrderedDict(sorted(list(lit_corr_data.items()),
- key=lambda t: -abs(t[1][1])))
+ key=lambda t: -abs(t[1][1])))
return lit_corr_data
@@ -603,12 +603,12 @@ def get_header_fields(data_type, corr_method):
if data_type == "ProbeSet":
if corr_method == "spearman":
header_fields = ['Index',
- 'Record',
- 'Symbol',
- 'Description',
- 'Location',
- 'Mean',
- 'Sample rho',
+ 'Record',
+ 'Symbol',
+ 'Description',
+ 'Location',
+ 'Mean',
+ 'Sample rho',
'N',
'Sample p(rho)',
'Lit rho',
@@ -619,12 +619,12 @@ def get_header_fields(data_type, corr_method):
'Additive Effect']
else:
header_fields = ['Index',
- 'Record',
- 'Symbol',
- 'Description',
- 'Location',
- 'Mean',
- 'Sample r',
+ 'Record',
+ 'Symbol',
+ 'Description',
+ 'Location',
+ 'Mean',
+ 'Sample r',
'N',
'Sample p(r)',
'Lit r',
@@ -636,47 +636,47 @@ def get_header_fields(data_type, corr_method):
elif data_type == "Publish":
if corr_method == "spearman":
header_fields = ['Index',
- 'Record',
- 'Abbreviation',
- 'Description',
- 'Mean',
- 'Authors',
- 'Year',
- 'Sample rho',
- 'N',
- 'Sample p(rho)',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
+ 'Record',
+ 'Abbreviation',
+ 'Description',
+ 'Mean',
+ 'Authors',
+ 'Year',
+ 'Sample rho',
+ 'N',
+ 'Sample p(rho)',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
else:
header_fields = ['Index',
- 'Record',
- 'Abbreviation',
- 'Description',
- 'Mean',
- 'Authors',
- 'Year',
- 'Sample r',
- 'N',
- 'Sample p(r)',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
+ 'Record',
+ 'Abbreviation',
+ 'Description',
+ 'Mean',
+ 'Authors',
+ 'Year',
+ 'Sample r',
+ 'N',
+ 'Sample p(r)',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
else:
if corr_method == "spearman":
header_fields = ['Index',
- 'ID',
- 'Location',
- 'Sample rho',
- 'N',
- 'Sample p(rho)']
+ 'ID',
+ 'Location',
+ 'Sample rho',
+ 'N',
+ 'Sample p(rho)']
else:
header_fields = ['Index',
- 'ID',
- 'Location',
- 'Sample r',
- 'N',
- 'Sample p(r)']
+ 'ID',
+ 'Location',
+ 'Sample r',
+ 'N',
+ 'Sample p(r)']
return header_fields
diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
index c04b17be..c1bf3daa 100644
--- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
+++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
@@ -45,7 +45,7 @@ class CorrelationMatrix:
def __init__(self, start_vars):
trait_db_list = [trait.strip()
- for trait in start_vars['trait_list'].split(',')]
+ for trait in start_vars['trait_list'].split(',')]
helper_functions.get_trait_db_obs(self, trait_db_list)
@@ -215,7 +215,7 @@ class CorrelationMatrix:
temp_dataset.group.get_samplelist()
for i, pca_trait in enumerate(pca_traits):
trait_id = "PCA" + str(i + 1) + "_" + temp_dataset.group.species + "_" + \
- this_group_name + "_" + datetime.datetime.now().strftime("%m%d%H%M%S")
+ this_group_name + "_" + datetime.datetime.now().strftime("%m%d%H%M%S")
this_vals_string = ""
position = 0
for sample in temp_dataset.group.all_samples_ordered():
diff --git a/wqflask/wqflask/ctl/ctl_analysis.py b/wqflask/wqflask/ctl/ctl_analysis.py
index 820e81bc..a0fb34d8 100644
--- a/wqflask/wqflask/ctl/ctl_analysis.py
+++ b/wqflask/wqflask/ctl/ctl_analysis.py
@@ -66,12 +66,12 @@ class CTL:
def addNode(self, gt):
node_dict = {'data': {'id': str(gt.name) + ":" + str(gt.dataset.name),
- 'sid': str(gt.name),
- 'dataset': str(gt.dataset.name),
- 'label': gt.name,
- 'symbol': gt.symbol,
- 'geneid': gt.geneid,
- 'omim': gt.omim}}
+ 'sid': str(gt.name),
+ 'dataset': str(gt.dataset.name),
+ 'label': gt.name,
+ 'symbol': gt.symbol,
+ 'geneid': gt.geneid,
+ 'omim': gt.omim}}
self.nodes_list.append(node_dict)
def addEdge(self, gtS, gtT, significant, x):
@@ -87,7 +87,7 @@ class CTL:
def run_analysis(self, requestform):
logger.info("Starting CTL analysis on dataset")
self.trait_db_list = [trait.strip()
- for trait in requestform['trait_list'].split(',')]
+ for trait in requestform['trait_list'].split(',')]
self.trait_db_list = [x for x in self.trait_db_list if x]
logger.debug("strategy:", requestform.get("strategy"))
@@ -179,9 +179,9 @@ class CTL:
for trait in self.trait_db_list:
# Create the QTL like CTL plots
self.results['imgurl' + \
- str(n)] = webqtlUtil.genRandStr("CTL_") + ".png"
+ str(n)] = webqtlUtil.genRandStr("CTL_") + ".png"
self.results['imgloc' + str(n)] = GENERATED_IMAGE_DIR + \
- self.results['imgurl' + str(n)]
+ self.results['imgurl' + str(n)]
r_png(self.results['imgloc' + str(n)],
width=1000, height=600, type='cairo-png')
self.r_plotCTLobject(
diff --git a/wqflask/wqflask/do_search.py b/wqflask/wqflask/do_search.py
index 48527785..761ae326 100644
--- a/wqflask/wqflask/do_search.py
+++ b/wqflask/wqflask/do_search.py
@@ -112,7 +112,7 @@ class MrnaAssaySearch(DoSearch):
match_clause = ""
where_clause = (match_clause
- + """ProbeSet.Id = ProbeSetXRef.ProbeSetId
+ + """ProbeSet.Id = ProbeSetXRef.ProbeSetId
and ProbeSetXRef.ProbeSetFreezeId = %s
""" % (escape(str(self.dataset.id))))
@@ -135,7 +135,7 @@ class MrnaAssaySearch(DoSearch):
match_clause = ""
where_clause = (match_clause
- + """ProbeSet.Id = ProbeSetXRef.ProbeSetId
+ + """ProbeSet.Id = ProbeSetXRef.ProbeSetId
and ProbeSetXRef.ProbeSetFreezeId = %s
""" % (escape(str(self.dataset.id))))
@@ -147,14 +147,14 @@ class MrnaAssaySearch(DoSearch):
from_clause = self.normalize_spaces(from_clause)
query = (self.base_query +
- """%s
+ """%s
WHERE %s
and ProbeSet.Id = ProbeSetXRef.ProbeSetId
and ProbeSetXRef.ProbeSetFreezeId = %s
ORDER BY ProbeSet.symbol ASC
""" % (escape(from_clause),
- where_clause,
- escape(str(self.dataset.id))))
+ where_clause,
+ escape(str(self.dataset.id))))
return query
def run_combined(self, from_clause='', where_clause=''):
@@ -166,14 +166,14 @@ class MrnaAssaySearch(DoSearch):
from_clause = self.normalize_spaces(from_clause)
query = (self.base_query +
- """%s
+ """%s
WHERE %s
and ProbeSet.Id = ProbeSetXRef.ProbeSetId
and ProbeSetXRef.ProbeSetFreezeId = %s
ORDER BY ProbeSet.symbol ASC
""" % (escape(from_clause),
- where_clause,
- escape(str(self.dataset.id))))
+ where_clause,
+ escape(str(self.dataset.id))))
return self.execute(query)
@@ -199,15 +199,15 @@ class PhenotypeSearch(DoSearch):
FROM Phenotype, PublishFreeze, Publication, PublishXRef """
search_fields = ('Phenotype.Post_publication_description',
- 'Phenotype.Pre_publication_description',
- 'Phenotype.Pre_publication_abbreviation',
- 'Phenotype.Post_publication_abbreviation',
- 'Phenotype.Lab_code',
- 'Publication.PubMed_ID',
- 'Publication.Abstract',
- 'Publication.Title',
- 'Publication.Authors',
- 'PublishXRef.Id')
+ 'Phenotype.Pre_publication_description',
+ 'Phenotype.Pre_publication_abbreviation',
+ 'Phenotype.Post_publication_abbreviation',
+ 'Phenotype.Lab_code',
+ 'Publication.PubMed_ID',
+ 'Publication.Abstract',
+ 'Publication.Title',
+ 'Publication.Authors',
+ 'PublishXRef.Id')
header_fields = ['Index',
'Record',
@@ -250,28 +250,28 @@ class PhenotypeSearch(DoSearch):
if self.search_term[0] == "*":
query = (self.base_query +
- """%s
+ """%s
WHERE PublishXRef.InbredSetId = %s
and PublishXRef.PhenotypeId = Phenotype.Id
and PublishXRef.PublicationId = Publication.Id
and PublishFreeze.Id = %s
ORDER BY PublishXRef.Id""" % (
- from_clause,
- escape(str(self.dataset.group.id)),
- escape(str(self.dataset.id))))
+ from_clause,
+ escape(str(self.dataset.group.id)),
+ escape(str(self.dataset.id))))
else:
query = (self.base_query +
- """%s
+ """%s
WHERE %s
and PublishXRef.InbredSetId = %s
and PublishXRef.PhenotypeId = Phenotype.Id
and PublishXRef.PublicationId = Publication.Id
and PublishFreeze.Id = %s
ORDER BY PublishXRef.Id""" % (
- from_clause,
- where_clause,
- escape(str(self.dataset.group.id)),
- escape(str(self.dataset.id))))
+ from_clause,
+ where_clause,
+ escape(str(self.dataset.group.id)),
+ escape(str(self.dataset.id))))
return query
@@ -283,16 +283,16 @@ class PhenotypeSearch(DoSearch):
from_clause = self.normalize_spaces(from_clause)
query = (self.base_query +
- """%s
+ """%s
WHERE %s
PublishXRef.InbredSetId = %s and
PublishXRef.PhenotypeId = Phenotype.Id and
PublishXRef.PublicationId = Publication.Id and
PublishFreeze.Id = %s""" % (
- from_clause,
- where_clause,
- escape(str(self.dataset.group.id)),
- escape(str(self.dataset.id))))
+ from_clause,
+ where_clause,
+ escape(str(self.dataset.group.id)),
+ escape(str(self.dataset.id))))
return self.execute(query)
@@ -336,7 +336,7 @@ class GenotypeSearch(DoSearch):
for field in self.search_fields:
where_clause.append('''%s REGEXP "%s"''' % ("%s.%s" % self.mescape(self.dataset.type,
field),
- self.search_term))
+ self.search_term))
logger.debug("hello ;where_clause is:", pf(where_clause))
where_clause = "(%s) " % ' OR '.join(where_clause)
@@ -349,16 +349,16 @@ class GenotypeSearch(DoSearch):
if self.search_term[0] == "*":
query = (self.base_query
- + """WHERE Geno.Id = GenoXRef.GenoId
+ + """WHERE Geno.Id = GenoXRef.GenoId
and GenoXRef.GenoFreezeId = GenoFreeze.Id
and GenoFreeze.Id = %s""" % (escape(str(self.dataset.id))))
else:
query = (self.base_query +
- """WHERE %s
+ """WHERE %s
and Geno.Id = GenoXRef.GenoId
and GenoXRef.GenoFreezeId = GenoFreeze.Id
and GenoFreeze.Id = %s""" % (where_clause,
- escape(str(self.dataset.id))))
+ escape(str(self.dataset.id))))
return query
@@ -526,7 +526,7 @@ class LrsSearch(DoSearch):
where_clause += """ and %sXRef.Locus = Geno.name and
Geno.SpeciesId = %s
""" % self.mescape(self.dataset.type,
- self.species_id)
+ self.species_id)
else:
# Deal with >, <, >=, and <=
logger.debug("self.search_term is:", self.search_term)
@@ -535,8 +535,8 @@ class LrsSearch(DoSearch):
lrs_val = lrs_val * 4.61
where_clause = """ %sXRef.LRS %s %s """ % self.mescape(self.dataset.type,
- self.search_operator,
- self.search_term[0])
+ self.search_operator,
+ self.search_term[0])
return where_clause
@@ -618,18 +618,18 @@ class CisTransLrsSearch(DoSearch):
sub_clause = """ %sXRef.LRS > %s and
%sXRef.LRS < %s and """ % (
- escape(self.dataset.type),
- escape(str(min(lrs_min, lrs_max))),
- escape(self.dataset.type),
- escape(str(max(lrs_min, lrs_max)))
- )
+ escape(self.dataset.type),
+ escape(str(min(lrs_min, lrs_max))),
+ escape(self.dataset.type),
+ escape(str(max(lrs_min, lrs_max)))
+ )
else:
# Deal with >, <, >=, and <=
sub_clause = """ %sXRef.LRS %s %s and """ % (
- escape(self.dataset.type),
- escape(self.search_operator),
- escape(self.search_term[0])
- )
+ escape(self.dataset.type),
+ escape(self.search_operator),
+ escape(self.search_term[0])
+ )
if cis_trans == "cis":
where_clause = sub_clause + """
@@ -637,27 +637,27 @@ class CisTransLrsSearch(DoSearch):
%sXRef.Locus = Geno.name and
Geno.SpeciesId = %s and
%s.Chr = Geno.Chr""" % (
- escape(self.dataset.type),
- the_operator,
- escape(str(self.mb_buffer)),
- escape(self.dataset.type),
- escape(str(self.species_id)),
- escape(self.dataset.type)
- )
+ escape(self.dataset.type),
+ the_operator,
+ escape(str(self.mb_buffer)),
+ escape(self.dataset.type),
+ escape(str(self.species_id)),
+ escape(self.dataset.type)
+ )
else:
if chromosome:
location_clause = "(%s.Chr = '%s' and %s.Chr = Geno.Chr and ABS(%s.Mb-Geno.Mb) %s %s) or (%s.Chr != Geno.Chr and Geno.Chr = '%s')" % (escape(self.dataset.type),
- chromosome,
- escape(
+ chromosome,
+ escape(
self.dataset.type),
- escape(
+ escape(
self.dataset.type),
- the_operator,
- escape(
+ the_operator,
+ escape(
str(self.mb_buffer)),
- escape(
+ escape(
self.dataset.type),
- chromosome)
+ chromosome)
else:
location_clause = "(ABS(%s.Mb-Geno.Mb) %s %s and %s.Chr = Geno.Chr) or (%s.Chr != Geno.Chr)" % (escape(
self.dataset.type), the_operator, escape(str(self.mb_buffer)), escape(self.dataset.type), escape(self.dataset.type))
@@ -665,10 +665,10 @@ class CisTransLrsSearch(DoSearch):
%sXRef.Locus = Geno.name and
Geno.SpeciesId = %s and
(%s)""" % (
- escape(self.dataset.type),
- escape(str(self.species_id)),
- location_clause
- )
+ escape(self.dataset.type),
+ escape(str(self.species_id)),
+ location_clause
+ )
return where_clause
@@ -752,15 +752,15 @@ class MeanSearch(MrnaAssaySearch):
where_clause = """ %sXRef.mean > %s and
%sXRef.mean < %s """ % self.mescape(self.dataset.type,
- min(self.mean_min,
- self.mean_max),
- self.dataset.type,
- max(self.mean_min, self.mean_max))
+ min(self.mean_min,
+ self.mean_max),
+ self.dataset.type,
+ max(self.mean_min, self.mean_max))
else:
# Deal with >, <, >=, and <=
where_clause = """ %sXRef.mean %s %s """ % self.mescape(self.dataset.type,
- self.search_operator,
- self.search_term[0])
+ self.search_operator,
+ self.search_term[0])
return where_clause
@@ -893,17 +893,17 @@ class PvalueSearch(MrnaAssaySearch):
self.pvalue_min, self.pvalue_max = self.search_term[:2]
self.where_clause = """ %sXRef.pValue > %s and %sXRef.pValue < %s
""" % self.mescape(
- self.dataset.type,
- min(self.pvalue_min, self.pvalue_max),
- self.dataset.type,
- max(self.pvalue_min, self.pvalue_max))
+ self.dataset.type,
+ min(self.pvalue_min, self.pvalue_max),
+ self.dataset.type,
+ max(self.pvalue_min, self.pvalue_max))
else:
# Deal with >, <, >=, and <=
self.where_clause = """ %sXRef.pValue %s %s
""" % self.mescape(
- self.dataset.type,
- self.search_operator,
- self.search_term[0])
+ self.dataset.type,
+ self.search_operator,
+ self.search_term[0])
logger.debug("where_clause is:", pf(self.where_clause))
diff --git a/wqflask/wqflask/export_traits.py b/wqflask/wqflask/export_traits.py
index 2c180d49..a22d6acc 100644
--- a/wqflask/wqflask/export_traits.py
+++ b/wqflask/wqflask/export_traits.py
@@ -60,7 +60,7 @@ def export_search_results_csv(targs):
trait_list.append(trait_ob)
table_headers = ['Index', 'URL', 'Species', 'Group', 'Dataset', 'Record ID', 'Symbol', 'Description', 'ProbeTarget', 'PubMed_ID', 'Chr', 'Mb', 'Alias', 'Gene_ID', 'Homologene_ID', 'UniGene_ID',
- 'Strand_Probe', 'Probe_set_specificity', 'Probe_set_BLAT_score', 'Probe_set_BLAT_Mb_start', 'Probe_set_BLAT_Mb_end', 'QTL_Chr', 'QTL_Mb', 'Locus_at_Peak', 'Max_LRS', 'P_value_of_MAX', 'Mean_Expression']
+ 'Strand_Probe', 'Probe_set_specificity', 'Probe_set_BLAT_score', 'Probe_set_BLAT_Mb_start', 'Probe_set_BLAT_Mb_end', 'QTL_Chr', 'QTL_Mb', 'Locus_at_Peak', 'Max_LRS', 'P_value_of_MAX', 'Mean_Expression']
traits_by_group = sort_traits_by_group(trait_list)
@@ -93,7 +93,7 @@ def export_search_results_csv(targs):
row_contents = [
i + 1,
"https://genenetwork.org/show_trait?trait_id=" + \
- str(trait.name) + "&dataset=" + str(trait.dataset.name),
+ str(trait.name) + "&dataset=" + str(trait.dataset.name),
trait.dataset.group.species,
trait.dataset.group.name,
trait.dataset.name,
@@ -124,7 +124,7 @@ def export_search_results_csv(targs):
for sample in trait.dataset.group.samplelist:
if sample in trait.data:
row_contents += [trait.data[sample].value,
- trait.data[sample].variance]
+ trait.data[sample].variance]
else:
row_contents += ["x", "x"]
diff --git a/wqflask/wqflask/external_tools/send_to_bnw.py b/wqflask/wqflask/external_tools/send_to_bnw.py
index 3c0f2ca7..c1b14ede 100644
--- a/wqflask/wqflask/external_tools/send_to_bnw.py
+++ b/wqflask/wqflask/external_tools/send_to_bnw.py
@@ -28,7 +28,7 @@ logger = utility.logger.getLogger(__name__)
class SendToBNW:
def __init__(self, start_vars):
trait_db_list = [trait.strip()
- for trait in start_vars['trait_list'].split(',')]
+ for trait in start_vars['trait_list'].split(',')]
helper_functions.get_trait_db_obs(self, trait_db_list)
trait_samples_list = []
diff --git a/wqflask/wqflask/external_tools/send_to_geneweaver.py b/wqflask/wqflask/external_tools/send_to_geneweaver.py
index 8af9bee9..9a4f7150 100644
--- a/wqflask/wqflask/external_tools/send_to_geneweaver.py
+++ b/wqflask/wqflask/external_tools/send_to_geneweaver.py
@@ -33,7 +33,7 @@ logger = utility.logger.getLogger(__name__)
class SendToGeneWeaver:
def __init__(self, start_vars):
trait_db_list = [trait.strip()
- for trait in start_vars['trait_list'].split(',')]
+ for trait in start_vars['trait_list'].split(',')]
helper_functions.get_trait_db_obs(self, trait_db_list)
self.chip_name = test_chip(self.trait_list)
@@ -54,11 +54,11 @@ class SendToGeneWeaver:
trait_name_list = get_trait_name_list(self.trait_list)
self.hidden_vars = {
- 'client': "genenetwork",
- 'species': species_name,
- 'idtype': self.chip_name,
- 'list': ",".join(trait_name_list),
- }
+ 'client': "genenetwork",
+ 'species': species_name,
+ 'idtype': self.chip_name,
+ 'list': ",".join(trait_name_list),
+ }
def get_trait_name_list(trait_list):
diff --git a/wqflask/wqflask/external_tools/send_to_webgestalt.py b/wqflask/wqflask/external_tools/send_to_webgestalt.py
index fcd943ba..6e74f4fe 100644
--- a/wqflask/wqflask/external_tools/send_to_webgestalt.py
+++ b/wqflask/wqflask/external_tools/send_to_webgestalt.py
@@ -33,7 +33,7 @@ logger = utility.logger.getLogger(__name__)
class SendToWebGestalt:
def __init__(self, start_vars):
trait_db_list = [trait.strip()
- for trait in start_vars['trait_list'].split(',')]
+ for trait in start_vars['trait_list'].split(',')]
helper_functions.get_trait_db_obs(self, trait_db_list)
self.chip_name = test_chip(self.trait_list)
@@ -49,7 +49,7 @@ class SendToWebGestalt:
id_type = "entrezgene"
self.hidden_vars = {
- 'gene_list': "\n".join(gene_id_list),
+ 'gene_list': "\n".join(gene_id_list),
'id_type': "entrezgene",
'ref_set': "genome",
'enriched_database_category': "geneontology",
@@ -59,7 +59,7 @@ class SendToWebGestalt:
'enrich_method': "ORA",
'fdr_method': "BH",
'min_num': "2"
- }
+ }
species = self.trait_list[0][1].group.species
if species == "rat":
diff --git a/wqflask/wqflask/group_manager.py b/wqflask/wqflask/group_manager.py
index 995915a9..b7e7e38a 100644
--- a/wqflask/wqflask/group_manager.py
+++ b/wqflask/wqflask/group_manager.py
@@ -8,7 +8,7 @@ from wqflask import app
from wqflask.user_login import send_verification_email, send_invitation_email, basic_info, set_password
from utility.redis_tools import get_user_groups, get_group_info, save_user, create_group, delete_group, add_users_to_group, remove_users_from_group, \
- change_group_name, save_verification_code, check_verification_code, get_user_by_unique_column, get_resources, get_resource_info
+ change_group_name, save_verification_code, check_verification_code, get_user_by_unique_column, get_resources, get_resource_info
from utility.logger import getLogger
logger = getLogger(__name__)
@@ -78,9 +78,9 @@ def remove_users():
member_ids_to_remove = request.form['selected_member_ids']
remove_users_from_group(g.user_session.user_id, admin_ids_to_remove.split(
- ":"), group_id, user_type="admins")
+ ":"), group_id, user_type="admins")
remove_users_from_group(g.user_session.user_id, member_ids_to_remove.split(
- ":"), group_id, user_type="members")
+ ":"), group_id, user_type="members")
return redirect(url_for('view_group', id=group_id))
@@ -133,7 +133,7 @@ def add_or_edit_group():
#send_group_invites(params['group_id'], user_email_list = user_emails, user_type="members")
create_group(list(admin_user_ids), list(
- member_user_ids), params['group_name'])
+ member_user_ids), params['group_name'])
return redirect(url_for('manage_groups'))
else:
return render_template("admin/create_group.html")
@@ -159,13 +159,13 @@ def send_group_invites(group_id, user_email_list=[], user_type="members"):
key_prefix="verification_code", subject = "You've been invited to join a GeneNetwork user group")
else:
temp_password = ''.join(random.choice(
- string.ascii_uppercase + string.digits) for _ in range(6))
+ string.ascii_uppercase + string.digits) for _ in range(6))
user_details = {
- 'user_id': str(uuid.uuid4()),
- 'email_address': user_email,
- 'registration_info': basic_info(),
- 'password': set_password(temp_password),
- 'confirmed': 0
+ 'user_id': str(uuid.uuid4()),
+ 'email_address': user_email,
+ 'registration_info': basic_info(),
+ 'password': set_password(temp_password),
+ 'confirmed': 0
}
save_user(user_details, user_details['user_id'])
send_invitation_email(user_email, temp_password)
diff --git a/wqflask/wqflask/gsearch.py b/wqflask/wqflask/gsearch.py
index f02da27c..fb8bdc55 100644
--- a/wqflask/wqflask/gsearch.py
+++ b/wqflask/wqflask/gsearch.py
@@ -124,7 +124,7 @@ class GSearch:
if this_trait['locus_chr'] != None and this_trait['locus_mb'] != None:
max_lrs_text = "Chr" + \
str(this_trait['locus_chr']) + \
- ": " + str(this_trait['locus_mb'])
+ ": " + str(this_trait['locus_mb'])
this_trait['max_lrs_text'] = max_lrs_text
trait_list.append(this_trait)
@@ -133,18 +133,18 @@ class GSearch:
self.trait_list = json.dumps(trait_list)
self.header_fields = ['Index',
- 'Record',
- 'Species',
- 'Group',
- 'Tissue',
- 'Dataset',
- 'Symbol',
- 'Description',
- 'Location',
- 'Mean',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
+ 'Record',
+ 'Species',
+ 'Group',
+ 'Tissue',
+ 'Dataset',
+ 'Symbol',
+ 'Description',
+ 'Location',
+ 'Mean',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
elif self.type == "phenotype":
search_term = self.terms
@@ -251,7 +251,7 @@ class GSearch:
if trait_ob.locus_chr != "" and trait_ob.locus_mb != "":
this_trait['max_lrs_text'] = "Chr" + \
str(trait_ob.locus_chr) + \
- ": " + str(trait_ob.locus_mb)
+ ": " + str(trait_ob.locus_mb)
except:
this_trait['max_lrs_text'] = "N/A"
@@ -261,12 +261,12 @@ class GSearch:
self.trait_list = json.dumps(trait_list)
self.header_fields = ['Index',
- 'Species',
- 'Group',
- 'Record',
- 'Description',
- 'Authors',
- 'Year',
- 'Max LRS',
- 'Max LRS Location',
- 'Additive Effect']
+ 'Species',
+ 'Group',
+ 'Record',
+ 'Description',
+ 'Authors',
+ 'Year',
+ 'Max LRS',
+ 'Max LRS Location',
+ 'Additive Effect']
diff --git a/wqflask/wqflask/heatmap/heatmap.py b/wqflask/wqflask/heatmap/heatmap.py
index 02eb66e5..001bab3b 100644
--- a/wqflask/wqflask/heatmap/heatmap.py
+++ b/wqflask/wqflask/heatmap/heatmap.py
@@ -19,7 +19,7 @@ class Heatmap:
def __init__(self, start_vars, temp_uuid):
trait_db_list = [trait.strip()
- for trait in start_vars['trait_list'].split(',')]
+ for trait in start_vars['trait_list'].split(',')]
helper_functions.get_trait_db_obs(self, trait_db_list)
self.temp_uuid = temp_uuid
@@ -35,7 +35,7 @@ class Heatmap:
self.species = species.TheSpecies(dataset=self.trait_list[0][1])
for key in list(self.species.chromosomes.chromosomes.keys()):
chrnames.append([self.species.chromosomes.chromosomes[key].name,
- self.species.chromosomes.chromosomes[key].mb_length])
+ self.species.chromosomes.chromosomes[key].mb_length])
for trait_db in self.trait_list:
@@ -111,7 +111,7 @@ class Heatmap:
trimmed_values.append(values[i])
trait_filename = str(this_trait.name) + "_" + \
- str(self.dataset.name) + "_pheno"
+ str(self.dataset.name) + "_pheno"
gen_pheno_txt_file(trimmed_samples, trimmed_values, trait_filename)
output_filename = self.dataset.group.name + "_GWA_" + \
@@ -119,11 +119,11 @@ class Heatmap:
for _ in range(6))
reaper_command = REAPER_COMMAND + ' --geno {0}/{1}.geno --traits {2}/gn2/{3}.txt -n 1000 -o {4}{5}.txt'.format(flat_files('genotype'),
- genofile_name,
- TEMPDIR,
- trait_filename,
- webqtlConfig.GENERATED_IMAGE_DIR,
- output_filename)
+ genofile_name,
+ TEMPDIR,
+ trait_filename,
+ webqtlConfig.GENERATED_IMAGE_DIR,
+ output_filename)
os.system(reaper_command)
diff --git a/wqflask/wqflask/interval_analyst/GeneUtil.py b/wqflask/wqflask/interval_analyst/GeneUtil.py
index 2f1c142c..04980281 100644
--- a/wqflask/wqflask/interval_analyst/GeneUtil.py
+++ b/wqflask/wqflask/interval_analyst/GeneUtil.py
@@ -8,8 +8,8 @@ from flask import Flask, g
def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
fetchFields = ['SpeciesId', 'Id', 'GeneSymbol', 'GeneDescription', 'Chromosome', 'TxStart', 'TxEnd',
- 'Strand', 'GeneID', 'NM_ID', 'kgID', 'GenBankID', 'UnigenID', 'ProteinID', 'AlignID',
- 'exonCount', 'exonStarts', 'exonEnds', 'cdsStart', 'cdsEnd']
+ 'Strand', 'GeneID', 'NM_ID', 'kgID', 'GenBankID', 'UnigenID', 'ProteinID', 'AlignID',
+ 'exonCount', 'exonStarts', 'exonEnds', 'cdsStart', 'cdsEnd']
# List All Species in the Gene Table
speciesDict = {}
@@ -34,9 +34,9 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
((TxStart > %f and TxStart <= %f) OR (TxEnd > %f and TxEnd <= %f))
ORDER BY txStart
""" % (", ".join(fetchFields),
- speciesId, chrName,
- startMb, endMb,
- startMb, endMb)).fetchall()
+ speciesId, chrName,
+ startMb, endMb,
+ startMb, endMb)).fetchall()
GeneList = []
@@ -55,7 +55,7 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
StrainId1 = %d AND StrainId2 = %d
""" % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0]
newdict["snpDensity"] = newdict["snpCount"] / \
- (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0
+ (newdict["TxEnd"] - newdict["TxStart"]) / 1000.0
else:
newdict["snpDensity"] = newdict["snpCount"] = 0
@@ -70,8 +70,8 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
newdict2 = {}
resultsOther = g.db.execute("SELECT %s FROM GeneList WHERE SpeciesId = %d AND geneSymbol= '%s' LIMIT 1" % (", ".join(fetchFields),
- othSpecId,
- newdict["GeneSymbol"])).fetchone()
+ othSpecId,
+ newdict["GeneSymbol"])).fetchone()
if resultsOther:
for j, item in enumerate(fetchFields):
@@ -88,13 +88,13 @@ def loadGenes(chrName, diffCol, startMb, endMb, species='mouse'):
""" % (chrName, newdict["TxStart"], newdict["TxEnd"], diffCol[0], diffCol[1])).fetchone()[0]
newdict2["snpDensity"] = newdict2["snpCount"] / \
- (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0
+ (newdict2["TxEnd"] - newdict2["TxStart"]) / 1000.0
else:
newdict2["snpDensity"] = newdict2["snpCount"] = 0
try:
newdict2['GeneLength'] = 1000.0 * \
- (newdict2['TxEnd'] - newdict2['TxStart'])
+ (newdict2['TxEnd'] - newdict2['TxStart'])
except:
pass
diff --git a/wqflask/wqflask/markdown_routes.py b/wqflask/wqflask/markdown_routes.py
index ebf75807..c27ff143 100644
--- a/wqflask/wqflask/markdown_routes.py
+++ b/wqflask/wqflask/markdown_routes.py
@@ -103,7 +103,7 @@ def environments():
@environments_blueprint.route('/svg-dependency-graph')
def svg_graph():
directory, file_name, _ = get_file_from_python_search_path(
- "wqflask/dependency-graph.svg").partition("dependency-graph.svg")
+ "wqflask/dependency-graph.svg").partition("dependency-graph.svg")
return send_from_directory(directory, file_name)
diff --git a/wqflask/wqflask/marker_regression/display_mapping_results.py b/wqflask/wqflask/marker_regression/display_mapping_results.py
index c68e0fde..f3b1b1fc 100644
--- a/wqflask/wqflask/marker_regression/display_mapping_results.py
+++ b/wqflask/wqflask/marker_regression/display_mapping_results.py
@@ -629,7 +629,7 @@ class DisplayMappingResults:
submit=HtmlGenWrapper.create_input_tag(type_='hidden'))
hddn = {'FormID': 'showDatabase', 'ProbeSetID': '_', 'database': fd.RISet+ \
- "Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'}
+ "Geno",'CellID':'_', 'RISet':fd.RISet, 'incparentsf1':'ON'}
for key in hddn.keys():
showLocusForm.append(HtmlGenWrapper.create_input_tag(
name=key, value=hddn[key], type_='hidden'))
@@ -889,7 +889,7 @@ class DisplayMappingResults:
bootY = yZero - bootHeightThresh * item / highestPercent
im_drawer.line(
xy=((canvas.size[0] - bootOffset + 4, bootY),
- (canvas.size[0] - bootOffset, bootY)),
+ (canvas.size[0] - bootOffset, bootY)),
fill=BLACK)
im_drawer.text(xy=(canvas.size[0] - bootOffset + 10, bootY+TEXT_Y_DISPLACEMENT),
text='%2.1f' % item, font=bootScaleFont, fill=BLACK)
@@ -983,7 +983,7 @@ class DisplayMappingResults:
break
if locPixel >= 0 and self.plotScale == 'physic':
traitPixel = ((locPixel, yZero), (locPixel - 7,
- yZero + 14), (locPixel + 7, yZero + 14))
+ yZero + 14), (locPixel + 7, yZero + 14))
draw_open_polygon(canvas, xy=traitPixel, outline=BLACK,
fill=self.TRANSCRIPT_LOCATION_COLOR)
@@ -1029,7 +1029,7 @@ class DisplayMappingResults:
SNPCounts[i - xLeftOffset] * SNP_HEIGHT_MODIFIER / maxCount)
im_drawer.line(
xy=((i, drawSNPLocationY + (snpDensity) * zoom),
- (i, drawSNPLocationY - (snpDensity) * zoom)),
+ (i, drawSNPLocationY - (snpDensity) * zoom)),
fill=self.SNP_COLOR, width=1)
def drawMultiTraitName(self, fd, canvas, gifmap, showLocusForm, offset=(40, 120, 80, 10), zoom=1):
@@ -1356,7 +1356,7 @@ class DisplayMappingResults:
# always apply colors now, even if SNP Track not checked - Zach 11/24/2010
densities = [1.0000000000000001e-05, 0.094094033555233408,
- 0.3306166377816987, 0.88246026851027781, 2.6690084029581951, 4.1, 61.0]
+ 0.3306166377816987, 0.88246026851027781, 2.6690084029581951, 4.1, 61.0]
if SNPdensity < densities[0]:
myColor = BLACK
elif SNPdensity < densities[1]:
@@ -1462,13 +1462,13 @@ class DisplayMappingResults:
xy=((geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH,
geneYLocation),
(geneStartPix + xCoord,
- geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)),
+ geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)),
fill=arrowColor, width=1)
im_drawer.line(
xy=((geneStartPix + xCoord + self.EACH_GENE_ARROW_WIDTH,
geneYLocation + self.EACH_GENE_HEIGHT * zoom),
(geneStartPix + xCoord,
- geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)),
+ geneYLocation + (self.EACH_GENE_HEIGHT / 2) * zoom)),
fill=arrowColor, width=1)
# draw the blocks for the exon regions
@@ -1476,7 +1476,7 @@ class DisplayMappingResults:
exonStartPix = (
exonStarts[i] - startMb) * plotXScale + xLeftOffset
exonEndPix = (exonEnds[i] - startMb) * \
- plotXScale + xLeftOffset
+ plotXScale + xLeftOffset
if (exonStartPix < xLeftOffset):
exonStartPix = xLeftOffset
if (exonEndPix < xLeftOffset):
@@ -1493,7 +1493,7 @@ class DisplayMappingResults:
# draw gray blocks for 3' and 5' UTR blocks
if cdsStart and cdsEnd:
utrStartPix = (txStart - startMb) * \
- plotXScale + xLeftOffset
+ plotXScale + xLeftOffset
utrEndPix = (cdsStart - startMb) * plotXScale + xLeftOffset
if (utrStartPix < xLeftOffset):
utrStartPix = xLeftOffset
@@ -1741,9 +1741,9 @@ class DisplayMappingResults:
mylineColor = self.HAPLOTYPE_RECOMBINATION
im_drawer.line(
xy=((plotRight,
- geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom),
+ geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom),
(drawEnd,
- geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom)),
+ geneYLocation + 7 + 2*ind*self.EACH_GENE_HEIGHT*zoom)),
fill= mylineColor, width=zoom * (self.EACH_GENE_HEIGHT + 2))
if lastGene == 0:
@@ -1869,7 +1869,7 @@ class DisplayMappingResults:
fill=self.CLICKABLE_WEBQTL_REGION_COLOR)
im_drawer.line(
xy=((xBrowse1, paddingTop), (xBrowse1,
- (paddingTop + self.BAND_HEIGHT))),
+ (paddingTop + self.BAND_HEIGHT))),
fill=self.CLICKABLE_WEBQTL_REGION_OUTLINE_COLOR)
if self.dataset.group.species == "mouse" or self.dataset.group.species == "rat":
@@ -1895,7 +1895,7 @@ class DisplayMappingResults:
fill=self.CLICKABLE_PHENOGEN_REGION_COLOR)
im_drawer.line(
xy=((xBrowse1, phenogenPaddingTop), (xBrowse1,
- (phenogenPaddingTop + self.BAND_HEIGHT))),
+ (phenogenPaddingTop + self.BAND_HEIGHT))),
fill=self.CLICKABLE_PHENOGEN_REGION_OUTLINE_COLOR)
UCSC_COORDS = "%d, %d, %d, %d" % (
@@ -2051,7 +2051,7 @@ class DisplayMappingResults:
fill=xAxisLabelColor)
else:
im_drawer.line(xy=((Xc, yZero),
- (Xc, yZero + xMinorTickHeight)),
+ (Xc, yZero + xMinorTickHeight)),
fill=xAxisTickMarkColor,
width=X_MINOR_TICK_THICKNESS) # Draw the MINOR tick mark
@@ -2159,17 +2159,17 @@ class DisplayMappingResults:
if differ:
im_drawer.line(
xy=((startPosX + Lpos, yZero), (xLeftOffset + offsetA,\
- yZero + 25)),
+ yZero + 25)),
fill=lineColor)
im_drawer.line(
xy=((xLeftOffset + offsetA, yZero + 25), (xLeftOffset+offsetA,\
- yZero + 40 + Zorder*(LRectWidth+3))),
+ yZero + 40 + Zorder*(LRectWidth+3))),
fill=lineColor)
rectColor = ORANGE
else:
im_drawer.line(
xy=((xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3)-3), (\
- xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3))),
+ xLeftOffset + offsetA, yZero + 40+Zorder*(LRectWidth+3))),
fill=lineColor)
rectColor = DEEPPINK
im_drawer.rectangle(
@@ -2178,7 +2178,7 @@ class DisplayMappingResults:
yZero + 40 + Zorder*(LRectWidth+3)+LRectWidth)),
outline=rectColor, fill=rectColor, width=0)
COORDS = "%d,%d,%d,%d" % (xLeftOffset+offsetA-LRectHeight, yZero+40+Zorder*(LRectWidth+3),\
- xLeftOffset + offsetA, yZero +40+Zorder*(LRectWidth+3)+LRectWidth)
+ xLeftOffset + offsetA, yZero +40+Zorder*(LRectWidth+3)+LRectWidth)
HREF = "/show_trait?trait_id=%s&dataset=%s" % (
Lname, self.dataset.group.name + "Geno")
#HREF="javascript:showDatabase3('%s','%s','%s','');" % (showLocusForm,fd.RISet+"Geno", Lname)
@@ -2230,7 +2230,7 @@ class DisplayMappingResults:
if self.lrsMax <= 0: # sliding scale
if "lrs_value" in self.qtlresults[0]:
LRS_LOD_Max = max([result['lrs_value']
- for result in self.qtlresults])
+ for result in self.qtlresults])
if self.LRS_LOD == "LOD" or self.LRS_LOD == "-logP":
LRS_LOD_Max = LRS_LOD_Max / self.LODFACTOR
if self.permChecked and self.nperm > 0 and not self.multipleInterval:
@@ -2248,7 +2248,7 @@ class DisplayMappingResults:
pass
else:
LRS_LOD_Max = max([result['lod_score']
- for result in self.qtlresults])
+ for result in self.qtlresults])
if self.LRS_LOD == "LRS":
LRS_LOD_Max = LRS_LOD_Max * self.LODFACTOR
if self.permChecked and self.nperm > 0 and not self.multipleInterval:
@@ -2390,7 +2390,7 @@ class DisplayMappingResults:
)
im_drawer.line(
xy=((start_pos_x + self.SUGGESTIVE_WIDTH / 1.5, significantY),
- (rightEdge, significantY)),
+ (rightEdge, significantY)),
fill=self.SIGNIFICANT_COLOR,
width=self.SIGNIFICANT_WIDTH * zoom
# , clipX=(xLeftOffset, xLeftOffset + plotWidth-2)
@@ -2440,7 +2440,7 @@ class DisplayMappingResults:
else:
if self.additiveChecked:
additiveMax = max([abs(X['additive'])
- for X in self.qtlresults])
+ for X in self.qtlresults])
lrsEdgeWidth = 3
if zoom == 2:
@@ -2484,7 +2484,7 @@ class DisplayMappingResults:
Xcm = Xc
else:
Xcm = (yZero - Yc0) / \
- ((Yc - Yc0) / (Xc - Xc0)) + Xc0
+ ((Yc - Yc0) / (Xc - Xc0)) + Xc0
if Yc0 < yZero:
im_drawer.line(
xy=((Xc0, Yc0), (Xcm, yZero)),
@@ -2583,12 +2583,12 @@ class DisplayMappingResults:
#Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/(LRSAxisList[-1]*self.LODFACTOR)
Yc = yZero - webqtlConfig.MAXLRS * \
LRSHeightThresh / \
- (LRS_LOD_Max * self.LODFACTOR)
+ (LRS_LOD_Max * self.LODFACTOR)
else:
#Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/(LRSAxisList[-1]*self.LODFACTOR)
Yc = yZero - \
qtlresult['lrs_value'] * LRSHeightThresh / \
- (LRS_LOD_Max * self.LODFACTOR)
+ (LRS_LOD_Max * self.LODFACTOR)
else:
if qtlresult['lrs_value'] > 460 or qtlresult['lrs_value'] == 'inf':
#Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRSAxisList[-1]
@@ -2597,7 +2597,7 @@ class DisplayMappingResults:
#Yc = yZero - qtlresult['lrs_value']*LRSHeightThresh/LRSAxisList[-1]
Yc = yZero - \
qtlresult['lrs_value'] * \
- LRSHeightThresh / LRS_LOD_Max
+ LRSHeightThresh / LRS_LOD_Max
else:
if qtlresult['lod_score'] > 100 or qtlresult['lod_score'] == 'inf':
#Yc = yZero - webqtlConfig.MAXLRS*LRSHeightThresh/LRSAxisList[-1]
@@ -2607,12 +2607,12 @@ class DisplayMappingResults:
#Yc = yZero - qtlresult['lod_score']*self.LODFACTOR*LRSHeightThresh/LRSAxisList[-1]
Yc = yZero - \
qtlresult['lod_score'] * self.LODFACTOR * \
- LRSHeightThresh / LRS_LOD_Max
+ LRSHeightThresh / LRS_LOD_Max
else:
#Yc = yZero - qtlresult['lod_score']*LRSHeightThresh/LRSAxisList[-1]
Yc = yZero - \
qtlresult['lod_score'] * \
- LRSHeightThresh / LRS_LOD_Max
+ LRSHeightThresh / LRS_LOD_Max
if self.manhattan_plot == True:
if self.color_scheme == "single":
@@ -2665,7 +2665,7 @@ class DisplayMappingResults:
Xcm = Xc
else:
Xcm = (yZero - Yc0) / \
- ((Yc - Yc0) / (Xc - Xc0)) + Xc0
+ ((Yc - Yc0) / (Xc - Xc0)) + Xc0
if Yc0 < yZero:
im_drawer.line(
xy=((Xc0, Yc0), (Xcm, yZero)),
@@ -2731,7 +2731,7 @@ class DisplayMappingResults:
Xcm = Xc
else:
Xcm = (yZero - Yc0) / \
- ((Yc - Yc0) / (Xc - Xc0)) + Xc0
+ ((Yc - Yc0) / (Xc - Xc0)) + Xc0
if Yc0 < yZero:
im_drawer.line(
xy=((Xc0, Yc0), (Xcm, yZero)),
@@ -2947,12 +2947,12 @@ class DisplayMappingResults:
if self.dataset.group.species == "mouse":
if refGene:
gene_table_header_list = ["Index",
- "Symbol",
- "Mb Start",
- "Length (Kb)",
- "SNP Count",
- "SNP Density",
- "Avg Expr",
+ "Symbol",
+ "Mb Start",
+ "Length (Kb)",
+ "SNP Count",
+ "SNP Density",
+ "Avg Expr",
"Human Chr",
"Mb Start (hg19)",
"Literature Correlation",
diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
index 9f9591ad..5d16abde 100644
--- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
+++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
@@ -26,18 +26,18 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo
gen_pheno_txt_file(samples, vals, trait_filename)
output_filename = (f"{this_dataset.group.name}_GWA_"
- + ''.join(random.choice(string.ascii_uppercase + string.digits)
- for _ in range(6))
- )
+ + ''.join(random.choice(string.ascii_uppercase + string.digits)
+ for _ in range(6))
+ )
bootstrap_filename = None
permu_filename = None
opt_list = []
if boot_check and num_bootstrap > 0:
bootstrap_filename = (f"{this_dataset.group.name}_BOOTSTRAP_"
- + ''.join(random.choice(string.ascii_uppercase + string.digits)
- for _ in range(6))
- )
+ + ''.join(random.choice(string.ascii_uppercase + string.digits)
+ for _ in range(6))
+ )
opt_list.append("-b")
opt_list.append(f"--n_bootstrap {str(num_bootstrap)}")
@@ -45,8 +45,8 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo
f"--bootstrap_output {webqtlConfig.GENERATED_IMAGE_DIR}{bootstrap_filename}.txt")
if num_perm > 0:
permu_filename = ("{this_dataset.group.name}_PERM_"
- + ''.join(random.choice(string.ascii_uppercase
- + string.digits) for _ in range(6))
+ + ''.join(random.choice(string.ascii_uppercase
+ + string.digits) for _ in range(6))
)
opt_list.append("-n " + str(num_perm))
opt_list.append(
@@ -57,15 +57,15 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo
opt_list.append("--interval 1")
reaper_command = (REAPER_COMMAND +
- ' --geno {0}/{1}.geno --traits {2}/gn2/{3}.txt {4} -o {5}{6}.txt'.format(flat_files('genotype'),
+ ' --geno {0}/{1}.geno --traits {2}/gn2/{3}.txt {4} -o {5}{6}.txt'.format(flat_files('genotype'),
- genofile_name,
- TEMPDIR,
- trait_filename,
- " ".join(
- opt_list),
- webqtlConfig.GENERATED_IMAGE_DIR,
- output_filename))
+ genofile_name,
+ TEMPDIR,
+ trait_filename,
+ " ".join(
+ opt_list),
+ webqtlConfig.GENERATED_IMAGE_DIR,
+ output_filename))
logger.debug("reaper_command:" + reaper_command)
os.system(reaper_command)
@@ -82,7 +82,7 @@ def run_reaper(this_trait, this_dataset, samples, vals, json_data, num_perm, boo
significant = permu_vals[int(num_perm * 0.95 - 1)]
return (marker_obs, permu_vals, suggestive, significant, bootstrap_vals,
- [output_filename, permu_filename, bootstrap_filename])
+ [output_filename, permu_filename, bootstrap_filename])
def gen_pheno_txt_file(samples, vals, trait_filename):
@@ -231,17 +231,17 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da
control_geno.append(control_geno2[_idx])
bootstrap_results = genotype.bootstrap(strains=trimmed_samples,
- trait=trimmed_values,
- control=control_geno,
- nboot=num_bootstrap)
+ trait=trimmed_values,
+ control=control_geno,
+ nboot=num_bootstrap)
else:
reaper_results = genotype.regression(strains=trimmed_samples,
trait=trimmed_values)
if bootCheck:
bootstrap_results = genotype.bootstrap(strains=trimmed_samples,
- trait=trimmed_values,
- nboot=num_bootstrap)
+ trait=trimmed_values,
+ nboot=num_bootstrap)
json_data['chr'] = []
json_data['pos'] = []
@@ -265,7 +265,7 @@ def run_original_reaper(this_trait, dataset, samples_before, trait_vals, json_da
# if self.additive:
# self.json_data['additive'].append(qtl.additive)
locus = {"name": reaper_locus.name, "chr": reaper_locus.chr,
- "cM": reaper_locus.cM, "Mb": reaper_locus.Mb}
+ "cM": reaper_locus.cM, "Mb": reaper_locus.Mb}
qtl = {"lrs_value": qtl.lrs, "chr": converted_chr, "Mb": reaper_locus.Mb,
"cM": reaper_locus.cM, "name": reaper_locus.name, "additive": qtl.additive, "dominance": qtl.dominance}
qtl_results.append(qtl)
diff --git a/wqflask/wqflask/marker_regression/rqtl_mapping.py b/wqflask/wqflask/marker_regression/rqtl_mapping.py
index 32dbad1f..cf8cf514 100644
--- a/wqflask/wqflask/marker_regression/rqtl_mapping.py
+++ b/wqflask/wqflask/marker_regression/rqtl_mapping.py
@@ -411,7 +411,7 @@ def process_pair_scan_results(result):
result = result[1]
output = [tuple([result[j][i] for j in range(result.ncol)])
- for i in range(result.nrow)]
+ for i in range(result.nrow)]
for i, line in enumerate(result.iter_row()):
marker = {}
@@ -441,7 +441,7 @@ def process_rqtl_perm_results(num_perm, results):
def process_rqtl_results(result, species_name):
qtl_results = []
output = [tuple([result[j][i] for j in range(result.ncol)])
- for i in range(result.nrow)]
+ for i in range(result.nrow)]
for i, line in enumerate(result.iter_row()):
marker = {}
diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py
index 041f4348..81e0a03f 100644
--- a/wqflask/wqflask/marker_regression/run_mapping.py
+++ b/wqflask/wqflask/marker_regression/run_mapping.py
@@ -289,18 +289,18 @@ class RunMapping:
",")
results, self.perm_output, self.suggestive, self.significant, self.bootstrap_results, self.output_files = qtlreaper_mapping.run_reaper(self.this_trait,
- self.dataset,
- self.samples,
- self.vals,
- self.json_data,
- self.num_perm,
- self.bootCheck,
- self.num_bootstrap,
- self.do_control,
- self.control_marker,
- self.manhattan_plot,
- self.first_run,
- self.output_files)
+ self.dataset,
+ self.samples,
+ self.vals,
+ self.json_data,
+ self.num_perm,
+ self.bootCheck,
+ self.num_bootstrap,
+ self.do_control,
+ self.control_marker,
+ self.manhattan_plot,
+ self.first_run,
+ self.output_files)
else:
results, self.json_data, self.perm_output, self.suggestive, self.significant, self.bootstrap_results = qtlreaper_mapping.run_original_reaper(self.this_trait,
self.dataset,
@@ -371,7 +371,7 @@ class RunMapping:
ps=this_ps,
url="/show_trait?trait_id=" + \
marker['name'] + "&dataset=" + \
- self.dataset.group.name + "Geno"
+ self.dataset.group.name + "Geno"
)
if self.geno_db_exists == "True":
@@ -382,7 +382,7 @@ class RunMapping:
pos=this_ps,
url="/show_trait?trait_id=" + \
marker['name'] + "&dataset=" + \
- self.dataset.group.name + "Geno"
+ self.dataset.group.name + "Geno"
)
else:
annot_marker = dict(
@@ -409,11 +409,11 @@ class RunMapping:
if 'Mb' in marker.keys():
marker['display_pos'] = "Chr" + \
str(marker['chr']) + ": " + \
- "{:.6f}".format(marker['Mb'])
+ "{:.6f}".format(marker['Mb'])
elif 'cM' in marker.keys():
marker['display_pos'] = "Chr" + \
str(marker['chr']) + ": " + \
- "{:.3f}".format(marker['cM'])
+ "{:.3f}".format(marker['cM'])
else:
marker['display_pos'] = "N/A"
self.qtl_results.append(marker)
@@ -776,7 +776,7 @@ def get_perm_strata(this_trait, sample_list, categorical_vars, used_samples):
perm_strata_strings.append(combined_string)
d = dict([(y, x + 1)
- for x, y in enumerate(sorted(set(perm_strata_strings)))])
+ for x, y in enumerate(sorted(set(perm_strata_strings)))])
list_to_numbers = [d[x] for x in perm_strata_strings]
perm_strata = list_to_numbers
diff --git a/wqflask/wqflask/model.py b/wqflask/wqflask/model.py
index 55b0278a..822900cc 100644
--- a/wqflask/wqflask/model.py
+++ b/wqflask/wqflask/model.py
@@ -45,8 +45,8 @@ class User(Base):
)
user_collections = relationship("UserCollection",
- order_by="asc(UserCollection.name)",
- lazy='dynamic',
+ order_by="asc(UserCollection.name)",
+ lazy='dynamic',
)
def display_num_collections(self):
diff --git a/wqflask/wqflask/parser.py b/wqflask/wqflask/parser.py
index 6b836e20..bd1c4407 100644
--- a/wqflask/wqflask/parser.py
+++ b/wqflask/wqflask/parser.py
@@ -34,7 +34,7 @@ def parse(pstring):
(\w+\s*[=:\>\<][\w\*]+) | # wiki=bar, GO:foobar, etc
(".*?") | ('.*?') | # terms in quotes, i.e. "brain weight"
([\w\*\?]+)) # shh, brain, etc """, pstring,
- flags=re.VERBOSE)
+ flags=re.VERBOSE)
pstring = [item.strip() for item in pstring if item and item.strip()]
diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py
index 2e1cb992..fc48959e 100644
--- a/wqflask/wqflask/search_results.py
+++ b/wqflask/wqflask/search_results.py
@@ -173,10 +173,10 @@ class SearchResultPage:
if self.dataset.type == "ProbeSet":
self.header_data_names = ['index', 'display_name', 'symbol', 'description',
- 'location', 'mean', 'lrs_score', 'lrs_location', 'additive']
+ 'location', 'mean', 'lrs_score', 'lrs_location', 'additive']
elif self.dataset.type == "Publish":
self.header_data_names = ['index', 'display_name', 'description', 'mean',
- 'authors', 'pubmed_text', 'lrs_score', 'lrs_location', 'additive']
+ 'authors', 'pubmed_text', 'lrs_score', 'lrs_location', 'additive']
elif self.dataset.type == "Geno":
self.header_data_names = ['index', 'display_name', 'location']
@@ -273,9 +273,9 @@ class SearchResultPage:
if search_ob:
search_class = getattr(do_search, search_ob)
the_search = search_class(search_term,
- search_operator,
- self.dataset,
- search_type['key']
+ search_operator,
+ self.dataset,
+ search_type['key']
)
return the_search
else:
diff --git a/wqflask/wqflask/server_side.py b/wqflask/wqflask/server_side.py
index 8ca3a9eb..7f68efad 100644
--- a/wqflask/wqflask/server_side.py
+++ b/wqflask/wqflask/server_side.py
@@ -49,8 +49,8 @@ class ServerSideTable:
column_name = self.header_data_names[column_number - 1]
sort_direction = self.request_values['sSortDir_' + str(i)]
self.table_rows = sorted(self.table_rows,
- key=lambda x: x[column_name],
- reverse=is_reverse(sort_direction))
+ key=lambda x: x[column_name],
+ reverse=is_reverse(sort_direction))
def paginate_rows(self):
"""
diff --git a/wqflask/wqflask/show_trait/SampleList.py b/wqflask/wqflask/show_trait/SampleList.py
index 3a63c84e..f9d30dba 100644
--- a/wqflask/wqflask/show_trait/SampleList.py
+++ b/wqflask/wqflask/show_trait/SampleList.py
@@ -75,7 +75,7 @@ class SampleList:
if self.dataset.group.species == "mouse":
if len(sample.extra_attributes['rrid'].split(":")) > 1:
the_rrid = sample.extra_attributes['rrid'].split(":")[
- 1]
+ 1]
sample.extra_attributes['rrid'] = [
sample.extra_attributes['rrid']]
sample.extra_attributes['rrid'].append(
@@ -83,7 +83,7 @@ class SampleList:
elif self.dataset.group.species == "rat":
if len(str(sample.extra_attributes['rrid'])):
the_rrid = sample.extra_attributes['rrid'].split("_")[
- 1]
+ 1]
sample.extra_attributes['rrid'] = [
sample.extra_attributes['rrid']]
sample.extra_attributes['rrid'].append(
diff --git a/wqflask/wqflask/show_trait/export_trait_data.py b/wqflask/wqflask/show_trait/export_trait_data.py
index 81e7903b..7fabc3f6 100644
--- a/wqflask/wqflask/show_trait/export_trait_data.py
+++ b/wqflask/wqflask/show_trait/export_trait_data.py
@@ -41,7 +41,7 @@ def get_export_metadata(trait_id, dataset_name):
if dataset.type == "Publish":
metadata.append(["Phenotype ID: " + trait_id])
metadata.append(["Phenotype URL: " + "http://genenetwork.org/show_trait?trait_id=" + \
- trait_id + "&dataset=" + dataset_name])
+ trait_id + "&dataset=" + dataset_name])
metadata.append(["Group: " + dataset.group.name])
metadata.append(
["Phenotype: " + this_trait.description_display.replace(",", "\",\"")])
@@ -56,7 +56,7 @@ def get_export_metadata(trait_id, dataset_name):
else:
metadata.append(["Record ID: " + trait_id])
metadata.append(["Trait URL: " + "http://genenetwork.org/show_trait?trait_id=" + \
- trait_id + "&dataset=" + dataset_name])
+ trait_id + "&dataset=" + dataset_name])
if this_trait.symbol:
metadata.append(["Symbol: " + this_trait.symbol])
metadata.append(["Dataset: " + dataset.name])
diff --git a/wqflask/wqflask/snp_browser/snp_browser.py b/wqflask/wqflask/snp_browser/snp_browser.py
index 5b7a663c..42fe339e 100644
--- a/wqflask/wqflask/snp_browser/snp_browser.py
+++ b/wqflask/wqflask/snp_browser/snp_browser.py
@@ -294,7 +294,7 @@ class SnpBrowser:
effect_info_dict = get_effect_info(effect_list)
coding_domain_list = ['Start Gained', 'Start Lost',
- 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous']
+ 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous']
intron_domain_list = ['Splice Site', 'Nonsplice Site']
for key in effect_info_dict:
@@ -320,7 +320,7 @@ class SnpBrowser:
if self.redundant == "false" or last_mb != mb: # filter redundant
if self.include_record(domain, function, snp_source, conservation_score):
info_list = [snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain,
- function, function_details, snp_source, conservation_score, snp_id]
+ function, function_details, snp_source, conservation_score, snp_id]
info_list.extend(self.allele_list)
filtered_results.append(info_list)
last_mb = mb
@@ -351,7 +351,7 @@ class SnpBrowser:
if self.redundant == "false" or last_mb != mb:
if self.include_record(domain, function, snp_source, conservation_score):
info_list = [snp_name, rs, chr, mb, alleles, gene, transcript, exon, domain,
- function, function_details, snp_source, conservation_score, snp_id]
+ function, function_details, snp_source, conservation_score, snp_id]
info_list.extend(self.allele_list)
filtered_results.append(info_list)
last_mb = mb
@@ -366,7 +366,7 @@ class SnpBrowser:
domain = conservation_score = snp_id = snp_name = rs = flank_3 = flank_5 = ncbi = function = ""
if self.include_record(domain, function, source_name, conservation_score):
filtered_results.append([indel_name, indel_chr, indel_mb_start, indel_mb_end,
- indel_strand, indel_type, indel_size, indel_sequence, source_name])
+ indel_strand, indel_type, indel_size, indel_sequence, source_name])
last_mb = indel_mb_start
else:
@@ -703,9 +703,9 @@ def get_header_list(variant_type, strains, species=None, empty_columns=None):
header_data_names = []
if variant_type == "SNP":
header_fields.append(['Index', 'SNP ID', 'Chr', 'Mb', 'Alleles', 'Source', 'ConScore',
- 'Gene', 'Transcript', 'Exon', 'Domain 1', 'Domain 2', 'Function', 'Details'])
+ 'Gene', 'Transcript', 'Exon', 'Domain 1', 'Domain 2', 'Function', 'Details'])
header_data_names = ['index', 'snp_name', 'chr', 'mb_formatted', 'alleles', 'snp_source', 'conservation_score',
- 'gene_name', 'transcript', 'exon', 'domain_1', 'domain_2', 'function', 'function_details']
+ 'gene_name', 'transcript', 'exon', 'domain_1', 'domain_2', 'function', 'function_details']
header_fields.append(strain_list)
header_data_names += strain_list
@@ -742,9 +742,9 @@ def get_header_list(variant_type, strains, species=None, empty_columns=None):
elif variant_type == "InDel":
header_fields = ['Index', 'ID', 'Type', 'InDel Chr',
- 'Mb Start', 'Mb End', 'Strand', 'Size', 'Sequence', 'Source']
+ 'Mb Start', 'Mb End', 'Strand', 'Size', 'Sequence', 'Source']
header_data_names = ['index', 'indel_name', 'indel_type', 'indel_chr', 'indel_mb_s',
- 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name']
+ 'indel_mb_e', 'indel_strand', 'indel_size', 'indel_sequence', 'source_name']
return header_fields, empty_field_count, header_data_names
@@ -758,9 +758,9 @@ def get_effect_details_by_category(effect_name=None, effect_value=None):
tmp_list = []
gene_group_list = ['Upstream', 'Downstream',
- 'Splice Site', 'Nonsplice Site', '3\' UTR']
+ 'Splice Site', 'Nonsplice Site', '3\' UTR']
biotype_group_list = ['Unknown Effect In Exon', 'Start Gained',
- 'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous']
+ 'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous']
new_codon_group_list = ['Start Gained']
codon_effect_group_list = [
'Start Lost', 'Stop Gained', 'Stop Lost', 'Nonsynonymous', 'Synonymous']
@@ -913,7 +913,7 @@ def get_gene_id_name_dict(species_id, gene_name_list):
if len(gene_name_list) == 0:
return ""
gene_name_str_list = ["'" + gene_name + \
- "'" for gene_name in gene_name_list]
+ "'" for gene_name in gene_name_list]
gene_name_str = ",".join(gene_name_str_list)
query = """
diff --git a/wqflask/wqflask/user_login.py b/wqflask/wqflask/user_login.py
index 725e7c9e..0d5f1f3e 100644
--- a/wqflask/wqflask/user_login.py
+++ b/wqflask/wqflask/user_login.py
@@ -59,12 +59,12 @@ def encode_password(pass_gen_fields, unencrypted_password):
def set_password(password):
pass_gen_fields = {
- "unencrypted_password": password,
- "algorithm": "pbkdf2",
- "hashfunc": "sha256",
- "salt": base64.b64encode(os.urandom(32)),
- "iterations": 100000,
- "keylength": 32,
+ "unencrypted_password": password,
+ "algorithm": "pbkdf2",
+ "hashfunc": "sha256",
+ "salt": base64.b64encode(os.urandom(32)),
+ "iterations": 100000,
+ "keylength": 32,
"created_timestamp": timestamp()
}
@@ -89,18 +89,18 @@ def get_signed_session_id(user):
if 'github_id' in user:
session = dict(login_time=time.time(),
- user_type="github",
- user_id=user['user_id'],
- github_id=user['github_id'],
- user_name=user['name'],
- user_url=user['user_url'])
+ user_type="github",
+ user_id=user['user_id'],
+ github_id=user['github_id'],
+ user_name=user['name'],
+ user_url=user['user_url'])
elif 'orcid' in user:
session = dict(login_time=time.time(),
- user_type="orcid",
- user_id=user['user_id'],
- github_id=user['orcid'],
- user_name=user['name'],
- user_url=user['user_url'])
+ user_type="orcid",
+ user_id=user['user_id'],
+ github_id=user['orcid'],
+ user_name=user['name'],
+ user_url=user['user_url'])
else:
session = dict(login_time=time.time(),
user_type="gn2",
@@ -269,7 +269,7 @@ def github_oauth2():
result = requests.post(
"https://github.com/login/oauth/access_token", json=data)
result_dict = {arr[0]: arr[1]
- for arr in [tok.split("=") for tok in result.text.split("&")]}
+ for arr in [tok.split("=") for tok in result.text.split("&")]}
github_user = get_github_user_details(result_dict["access_token"])
diff --git a/wqflask/wqflask/user_manager.py b/wqflask/wqflask/user_manager.py
index 9ebec405..fb26bfb1 100644
--- a/wqflask/wqflask/user_manager.py
+++ b/wqflask/wqflask/user_manager.py
@@ -475,7 +475,7 @@ def set_password(password, user):
pwfields.encrypt_time = enc_password.encrypt_time
user.password = json.dumps(pwfields.__dict__,
- sort_keys=True,
+ sort_keys=True,
)
diff --git a/wqflask/wqflask/user_session.py b/wqflask/wqflask/user_session.py
index 3e543445..67e2e158 100644
--- a/wqflask/wqflask/user_session.py
+++ b/wqflask/wqflask/user_session.py
@@ -105,8 +105,8 @@ class UserSession:
if user_cookie:
self.logged_in = False
self.record = dict(login_time=time.time(),
- user_type="anon",
- user_id=str(uuid.uuid4()))
+ user_type="anon",
+ user_id=str(uuid.uuid4()))
Redis.hmset(self.redis_key, self.record)
Redis.expire(self.redis_key, THIRTY_DAYS)
@@ -117,8 +117,8 @@ class UserSession:
return None
else:
self.record = dict(login_time=time.time(),
- user_type="anon",
- user_id=str(uuid.uuid4()))
+ user_type="anon",
+ user_id=str(uuid.uuid4()))
Redis.hmset(self.redis_key, self.record)
Redis.expire(self.redis_key, THIRTY_DAYS)
else:
diff --git a/wqflask/wqflask/wgcna/wgcna_analysis.py b/wqflask/wqflask/wgcna/wgcna_analysis.py
index 21516b30..f96892a0 100644
--- a/wqflask/wqflask/wgcna/wgcna_analysis.py
+++ b/wqflask/wqflask/wgcna/wgcna_analysis.py
@@ -70,7 +70,7 @@ class WGCNA:
self.trait_db_list = [trait.strip()
for trait in requestform['trait_list'].split(',')]
print(("Retrieved phenotype data from database",
- requestform['trait_list']))
+ requestform['trait_list']))
helper_functions.get_trait_db_obs(self, self.trait_db_list)
# self.input contains the phenotype values we need to send to R