From cddae6a6593a3f65cdb9565207a9a58c338e9024 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Wed, 17 Nov 2021 01:19:00 +0300 Subject: temp disable query caching --- wqflask/base/data_set.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py index 70c58136..768ad49b 100644 --- a/wqflask/base/data_set.py +++ b/wqflask/base/data_set.py @@ -754,8 +754,8 @@ class DataSet: # Postgres doesn't have that limit, so we can get rid of this after we transition chunk_size = 50 number_chunks = int(math.ceil(len(sample_ids) / chunk_size)) - cached_results = fetch_cached_results(self.name, self.type) - # cached_results = None + # cached_results = fetch_cached_results(self.name, self.type) + cached_results = None if cached_results is None: trait_sample_data = [] for sample_ids_step in chunks.divide_into_chunks(sample_ids, number_chunks): @@ -800,8 +800,6 @@ class DataSet: results = g.db.execute(query).fetchall() trait_sample_data.append([list(result) for result in results]) - cache_dataset_results( - self.name, self.type, trait_sample_data) else: trait_sample_data = cached_results -- cgit v1.2.3 From f5dd4b9f6b02ebe20733df0e8bd5e1b5e89f9e93 Mon Sep 17 00:00:00 2001 From: Alexander Kabui Date: Wed, 17 Nov 2021 01:31:38 +0300 Subject: temp disable traits metadata cache --- wqflask/wqflask/correlation/show_corr_results.py | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/wqflask/wqflask/correlation/show_corr_results.py b/wqflask/wqflask/correlation/show_corr_results.py index 42010a1e..55915a74 100644 --- a/wqflask/wqflask/correlation/show_corr_results.py +++ b/wqflask/wqflask/correlation/show_corr_results.py @@ -86,30 +86,17 @@ def correlation_json_for_table(correlation_data, this_trait, this_dataset, targe corr_results = correlation_data['correlation_results'] results_list = [] - file_name = f"{target_dataset['name']}_metadata.json" - - file_path = os.path.join(TMPDIR, file_name) - new_traits_metadata = {} - - try: - with open(file_path,"r+") as file_handler: - dataset_metadata = json.load(file_handler) - - except FileNotFoundError: - Path(file_path).touch(exist_ok=True) - dataset_metadata = {} for i, trait_dict in enumerate(corr_results): trait_name = list(trait_dict.keys())[0] trait = trait_dict[trait_name] - target_trait = dataset_metadata.get(trait_name) + target_trait = None if target_trait is None: target_trait_ob = create_trait(dataset=target_dataset_ob, name=trait_name, get_qtl_info=True) target_trait = jsonable(target_trait_ob, target_dataset_ob) - new_traits_metadata[trait_name] = target_trait if target_trait['view'] == False: continue results_dict = {} @@ -184,12 +171,6 @@ def correlation_json_for_table(correlation_data, this_trait, this_dataset, targe results_list.append(results_dict) - - if bool(new_traits_metadata): - # that means new traits exists - dataset_metadata.update(new_traits_metadata) - with open(file_path,"w+") as file_handler: - json.dump(dataset_metadata, file_handler) return json.dumps(results_list) -- cgit v1.2.3