aboutsummaryrefslogtreecommitdiff
path: root/wqflask
diff options
context:
space:
mode:
authorzsloan2021-11-16 16:58:13 -0600
committerGitHub2021-11-16 16:58:13 -0600
commit7e3a6b1d91321f85f8fd67d7713cdafaaadb9a9a (patch)
tree03a2adcc4dd2f017637b7ebbcb0ef85f53d06230 /wqflask
parent92c992f6a311fa4d01e479de1bb8da38a3b798d2 (diff)
parentf5dd4b9f6b02ebe20733df0e8bd5e1b5e89f9e93 (diff)
downloadgenenetwork2-7e3a6b1d91321f85f8fd67d7713cdafaaadb9a9a.tar.gz
Merge pull request #632 from Alexanderlacuna/tempfix/disable_caching
Tempfix for reading from cache
Diffstat (limited to 'wqflask')
-rw-r--r--wqflask/base/data_set.py6
-rw-r--r--wqflask/wqflask/correlation/show_corr_results.py21
2 files changed, 3 insertions, 24 deletions
diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py
index 70c58136..768ad49b 100644
--- a/wqflask/base/data_set.py
+++ b/wqflask/base/data_set.py
@@ -754,8 +754,8 @@ class DataSet:
# Postgres doesn't have that limit, so we can get rid of this after we transition
chunk_size = 50
number_chunks = int(math.ceil(len(sample_ids) / chunk_size))
- cached_results = fetch_cached_results(self.name, self.type)
- # cached_results = None
+ # cached_results = fetch_cached_results(self.name, self.type)
+ cached_results = None
if cached_results is None:
trait_sample_data = []
for sample_ids_step in chunks.divide_into_chunks(sample_ids, number_chunks):
@@ -800,8 +800,6 @@ class DataSet:
results = g.db.execute(query).fetchall()
trait_sample_data.append([list(result) for result in results])
- cache_dataset_results(
- self.name, self.type, trait_sample_data)
else:
trait_sample_data = cached_results
diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py
index 42010a1e..55915a74 100644
--- a/wqflask/wqflask/correlation/show_corr_results.py
+++ b/wqflask/wqflask/correlation/show_corr_results.py
@@ -86,30 +86,17 @@ def correlation_json_for_table(correlation_data, this_trait, this_dataset, targe
corr_results = correlation_data['correlation_results']
results_list = []
- file_name = f"{target_dataset['name']}_metadata.json"
-
- file_path = os.path.join(TMPDIR, file_name)
- new_traits_metadata = {}
-
- try:
- with open(file_path,"r+") as file_handler:
- dataset_metadata = json.load(file_handler)
-
- except FileNotFoundError:
- Path(file_path).touch(exist_ok=True)
- dataset_metadata = {}
for i, trait_dict in enumerate(corr_results):
trait_name = list(trait_dict.keys())[0]
trait = trait_dict[trait_name]
- target_trait = dataset_metadata.get(trait_name)
+ target_trait = None
if target_trait is None:
target_trait_ob = create_trait(dataset=target_dataset_ob,
name=trait_name,
get_qtl_info=True)
target_trait = jsonable(target_trait_ob, target_dataset_ob)
- new_traits_metadata[trait_name] = target_trait
if target_trait['view'] == False:
continue
results_dict = {}
@@ -184,12 +171,6 @@ def correlation_json_for_table(correlation_data, this_trait, this_dataset, targe
results_list.append(results_dict)
-
- if bool(new_traits_metadata):
- # that means new traits exists
- dataset_metadata.update(new_traits_metadata)
- with open(file_path,"w+") as file_handler:
- json.dump(dataset_metadata, file_handler)
return json.dumps(results_list)