diff options
author | Alexander Kabui | 2021-11-18 12:03:48 +0300 |
---|---|---|
committer | Alexander Kabui | 2021-11-18 12:03:48 +0300 |
commit | 4de623130dca019d15f956e91ec999fddc2e2a0f (patch) | |
tree | fdf16f2fc44d6835749807f9800125d9365eb164 /wqflask | |
parent | 56b574b903244a64aecaa54e5305b25bb642b254 (diff) | |
download | genenetwork2-4de623130dca019d15f956e91ec999fddc2e2a0f.tar.gz |
remove unused functions rename function names
Diffstat (limited to 'wqflask')
-rw-r--r-- | wqflask/wqflask/correlation/pre_computes.py | 15 |
1 files changed, 4 insertions, 11 deletions
diff --git a/wqflask/wqflask/correlation/pre_computes.py b/wqflask/wqflask/correlation/pre_computes.py index 77592a3a..b95ceba5 100644 --- a/wqflask/wqflask/correlation/pre_computes.py +++ b/wqflask/wqflask/correlation/pre_computes.py @@ -9,7 +9,7 @@ from base.webqtlConfig import TMPDIR def fetch_all_cached_metadata(dataset_name): """in a gvein dataset fetch all the traits metadata""" - file_name = generate_file_name(dataset_name, suffix="metadata") + file_name = generate_filename(dataset_name, suffix="metadata") file_path = os.path.join(TMPDIR, file_name) @@ -33,20 +33,13 @@ def cache_new_traits_metadata(dataset_metadata: dict, new_traits_metadata, file_ json.dump(dataset_metadata, file_handler) -def generate_file_name(*args, suffix="", file_ext="json"): +def generate_filename(*args, suffix="", file_ext="json"): """given a list of args generate a unique filename""" string_unicode = f"{*args,}".encode() return f"{hashlib.md5(string_unicode).hexdigest()}_{suffix}.{file_ext}" -def generate_filename(base_dataset_name, target_dataset_name, base_timestamp, target_dataset_timestamp): - """generate unique filename""" - - string_unicode = f"{base_dataset_name}{target_dataset_name}{base_timestamp}{target_dataset_timestamp}sample_corr_compute".encode() - return hashlib.md5(string_unicode).hexdigest() - - def cache_compute_results(base_dataset_type, base_dataset_name, target_dataset_name, @@ -65,7 +58,7 @@ def cache_compute_results(base_dataset_type, target_dataset_timestamp = base_timestamp - file_name = generate_file_name( + file_name = generate_filename( base_dataset_name, target_dataset_name, base_timestamp, target_dataset_timestamp, suffix="corr_precomputes") @@ -100,7 +93,7 @@ def fetch_precompute_results(base_dataset_name, base_timestamp = target_dataset_timestamp = query_table_timestamp( dataset_type) - file_name = generate_file_name( + file_name = generate_filename( base_dataset_name, target_dataset_name, base_timestamp, target_dataset_timestamp, suffix="corr_precomputes") |