aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--wqflask/maintenance/quantile_normalize.py2
-rw-r--r--wqflask/utility/Plot.py2
-rw-r--r--wqflask/wqflask/correlation/correlation_functions.py4
-rw-r--r--wqflask/wqflask/correlation_matrix/show_corr_matrix.py6
-rw-r--r--wqflask/wqflask/marker_regression/qtlreaper_mapping.py2
-rw-r--r--wqflask/wqflask/network_graph/network_graph.py4
-rw-r--r--wqflask/wqflask/pbkdf2.py4
-rw-r--r--wqflask/wqflask/search_results.py2
8 files changed, 13 insertions, 13 deletions
diff --git a/wqflask/maintenance/quantile_normalize.py b/wqflask/maintenance/quantile_normalize.py
index 34886f44..82b695f4 100644
--- a/wqflask/maintenance/quantile_normalize.py
+++ b/wqflask/maintenance/quantile_normalize.py
@@ -37,7 +37,7 @@ def create_dataframe(input_file):
with open(input_file) as f:
ncols = len(f.readline().split("\t"))
- input_array = np.loadtxt(open(input_file, "rb"), delimiter="\t", skiprows=1, usecols=range(1, ncols))
+ input_array = np.loadtxt(open(input_file, "rb"), delimiter="\t", skiprows=1, usecols=list(range(1, ncols)))
return pd.DataFrame(input_array)
#This function taken from https://github.com/ShawnLYU/Quantile_Normalize
diff --git a/wqflask/utility/Plot.py b/wqflask/utility/Plot.py
index 82bf6070..c9053dde 100644
--- a/wqflask/utility/Plot.py
+++ b/wqflask/utility/Plot.py
@@ -86,7 +86,7 @@ def frange(start, end=None, inc=1.0):
# Need to adjust the count. AFAICT, it always comes up one short.
count += 1
L = [start] * count
- for i in xrange(1, count):
+ for i in range(1, count):
L[i] = start + i * inc
return L
diff --git a/wqflask/wqflask/correlation/correlation_functions.py b/wqflask/wqflask/correlation/correlation_functions.py
index 06dec795..abaa212f 100644
--- a/wqflask/wqflask/correlation/correlation_functions.py
+++ b/wqflask/wqflask/correlation/correlation_functions.py
@@ -50,12 +50,12 @@ from flask import Flask, g
def cal_zero_order_corr_for_tiss (primaryValue=[], targetValue=[], method='pearson'):
- R_primary = rpy2.robjects.FloatVector(range(len(primaryValue)))
+ R_primary = rpy2.robjects.FloatVector(list(range(len(primaryValue))))
N = len(primaryValue)
for i in range(len(primaryValue)):
R_primary[i] = primaryValue[i]
- R_target = rpy2.robjects.FloatVector(range(len(targetValue)))
+ R_target = rpy2.robjects.FloatVector(list(range(len(targetValue))))
for i in range(len(targetValue)):
R_target[i]=targetValue[i]
diff --git a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
index e6c817e7..832746bb 100644
--- a/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
+++ b/wqflask/wqflask/correlation_matrix/show_corr_matrix.py
@@ -194,7 +194,7 @@ class CorrelationMatrix(object):
if self.do_PCA == True:
self.pca_works = "True"
self.pca_trait_ids = []
- pca = self.calculate_pca(range(len(self.traits)), corr_eigen_value, corr_eigen_vectors)
+ pca = self.calculate_pca(list(range(len(self.traits))), corr_eigen_value, corr_eigen_vectors)
self.loadings_array = self.process_loadings()
else:
self.pca_works = "False"
@@ -203,8 +203,8 @@ class CorrelationMatrix(object):
self.js_data = dict(traits = [trait.name for trait in self.traits],
groups = groups,
- cols = range(len(self.traits)),
- rows = range(len(self.traits)),
+ cols = list(range(len(self.traits))),
+ rows = list(range(len(self.traits))),
samples = self.all_sample_list,
sample_data = self.sample_data,)
# corr_results = [result[1] for result in result_row for result_row in self.corr_results])
diff --git a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
index 0c560582..189c1985 100644
--- a/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
+++ b/wqflask/wqflask/marker_regression/qtlreaper_mapping.py
@@ -228,4 +228,4 @@ def natural_sort(marker_list):
"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', str(marker_list[key]['chr'])) ]
- return sorted(range(len(marker_list)), key = alphanum_key) \ No newline at end of file
+ return sorted(list(range(len(marker_list))), key = alphanum_key) \ No newline at end of file
diff --git a/wqflask/wqflask/network_graph/network_graph.py b/wqflask/wqflask/network_graph/network_graph.py
index f61c40b4..ac2ff017 100644
--- a/wqflask/wqflask/network_graph/network_graph.py
+++ b/wqflask/wqflask/network_graph/network_graph.py
@@ -202,8 +202,8 @@ class NetworkGraph(object):
self.js_data = dict(traits = [trait.name for trait in self.traits],
groups = groups,
- cols = range(len(self.traits)),
- rows = range(len(self.traits)),
+ cols = list(range(len(self.traits))),
+ rows = list(range(len(self.traits))),
samples = self.all_sample_list,
sample_data = self.sample_data,
elements = self.elements,)
diff --git a/wqflask/wqflask/pbkdf2.py b/wqflask/wqflask/pbkdf2.py
index 0ed50790..917b9d31 100644
--- a/wqflask/wqflask/pbkdf2.py
+++ b/wqflask/wqflask/pbkdf2.py
@@ -68,9 +68,9 @@ def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None):
h.update(x)
return list(map(ord, h.digest()))
buf = []
- for block in xrange(1, -(-keylen // mac.digest_size) + 1):
+ for block in range(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
- for i in xrange(iterations - 1):
+ for i in range(iterations - 1):
u = _pseudorandom(''.join(map(chr, u)))
rv = list(starmap(xor, zip(rv, u)))
buf.extend(rv)
diff --git a/wqflask/wqflask/search_results.py b/wqflask/wqflask/search_results.py
index de4b01eb..5b3946e3 100644
--- a/wqflask/wqflask/search_results.py
+++ b/wqflask/wqflask/search_results.py
@@ -266,7 +266,7 @@ def get_GO_symbols(a_search):
def insert_newlines(string, every=64):
""" This is because it is seemingly impossible to change the width of the description column, so I'm just manually adding line breaks """
lines = []
- for i in xrange(0, len(string), every):
+ for i in range(0, len(string), every):
lines.append(string[i:i+every])
return '\n'.join(lines)