about summary refs log tree commit diff
path: root/wqflask/base/data_set.py
diff options
context:
space:
mode:
authorAlexander Kabui2021-11-16 15:53:50 +0300
committerAlexander Kabui2021-11-16 15:53:50 +0300
commita8ccaf03ba151f9ceca2f0224af33db230a8c8b3 (patch)
treeb5cc33624ecf68e218b7eb4e868c7851a6b348c3 /wqflask/base/data_set.py
parent04452c274d51621a0cab1b8dce5b8101c69496b6 (diff)
downloadgenenetwork2-a8ccaf03ba151f9ceca2f0224af33db230a8c8b3.tar.gz
test generate new files
Diffstat (limited to 'wqflask/base/data_set.py')
-rw-r--r--wqflask/base/data_set.py8
1 files changed, 5 insertions, 3 deletions
diff --git a/wqflask/base/data_set.py b/wqflask/base/data_set.py
index a3a720ad..cae1a2a7 100644
--- a/wqflask/base/data_set.py
+++ b/wqflask/base/data_set.py
@@ -810,7 +810,7 @@ class DataSet:
                 trait_name = trait_sample_data[0][trait_counter][0]
                 for chunk_counter in range(int(number_chunks)):
                     self.trait_data[trait_name] += (
-                    trait_sample_data[chunk_counter][trait_counter][data_start_pos:])
+                        trait_sample_data[chunk_counter][trait_counter][data_start_pos:])
 
             cache_dataset_results(
                 self.name, self.type, self.trait_data)
@@ -818,6 +818,8 @@ class DataSet:
         else:
 
             self.trait_data = cached_results
+
+
 class PhenotypeDataSet(DataSet):
     DS_NAME_MAP['Publish'] = 'PhenotypeDataSet'
 
@@ -1291,7 +1293,7 @@ def cache_dataset_results(dataset_name: str, dataset_type: str, query_results: L
 
     results = r.set(f"{dataset_type}timestamp", table_timestamp)
 
-    file_name = generate_hash_file(dataset_name, table_timestamp)
+    file_name = generate_hash_file(dataset_name, dataset_type, table_timestamp)
     file_path = os.path.join(TMPDIR, f"{file_name}.json")
 
     with open(file_path, "w") as file_handler:
@@ -1308,7 +1310,7 @@ def fetch_cached_results(dataset_name: str, dataset_type: str):
     else:
         table_timestamp = ""
 
-    file_name = generate_hash_file(dataset_name, table_timestamp)
+    file_name = generate_hash_file(dataset_name, dataset_type, table_timestamp)
     file_path = os.path.join(TMPDIR, f"{file_name}.json")
     try:
         with open(file_path, "r") as file_handler: