aboutsummaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
authorzsloan2021-10-12 16:08:54 -0500
committerGitHub2021-10-12 16:08:54 -0500
commit35105c816726b58dc376b2c3925d48077aeca675 (patch)
treea7d184e4eea8aeb4b9bf0d17468ba6a1efafef46 /scripts
parent70023c835bdeeffc48efafe96626ac5b01b5a6d2 (diff)
parenta212ad123f902b6a9c74bcac1d98bc274cebbdda (diff)
downloadgenenetwork2-35105c816726b58dc376b2c3925d48077aeca675.tar.gz
Merge branch 'testing' into feature/add_resizeable_columns
Diffstat (limited to 'scripts')
-rw-r--r--scripts/authentication/group.py49
-rw-r--r--scripts/authentication/resource.py8
-rw-r--r--scripts/convert_dol_genotypes.py74
3 files changed, 110 insertions, 21 deletions
diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py
index 76c7fb4f..c8c2caad 100644
--- a/scripts/authentication/group.py
+++ b/scripts/authentication/group.py
@@ -29,8 +29,10 @@ import argparse
import datetime
import redis
import json
+import uuid
+
+from typing import Dict, Optional, Set
-from typing import Dict, List, Optional, Set
def create_group_data(users: Dict, target_group: str,
members: Optional[str] = None,
@@ -39,6 +41,9 @@ def create_group_data(users: Dict, target_group: str,
"field", and "value" that can be used in a redis hash as follows:
HSET key field value
+ The "field" return value is a unique-id that is used to
+ distinguish the groups.
+
Parameters:
- `users`: a list of users for example:
@@ -71,26 +76,31 @@ def create_group_data(users: Dict, target_group: str,
me@test2.com, me@test3.com"
"""
+ # Emails
+ _members: Set = set("".join(members.split()).split(",")
+ if members else [])
+ _admins: Set = set("".join(admins.split()).split(",")
+ if admins else [])
- _members: List = "".join(members.split()).split(",") if members else []
- _admins: List = "".join(admins.split()).split(",") if admins else []
+ # Unique IDs
+ member_ids: Set = set()
+ admin_ids: Set = set()
- user_ids: Dict = dict()
for user_id, user_details in users.items():
_details = json.loads(user_details)
- if _details.get("email_address"):
- user_ids[_details.get("email_address")] = user_id
- print(user_ids)
+ if _details.get("email_address") in _members:
+ member_ids.add(user_id)
+ if _details.get("email_address") in _admins:
+ admin_ids.add(user_id)
+
+ timestamp: str = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
return {"key": "groups",
- "field": target_group,
+ "field": str(uuid.uuid4()),
"value": json.dumps({
- "id": target_group,
"name": target_group,
- "admins": [user_ids[admin] for admin in _admins
- if admin in user_ids],
- "members": [user_ids[member] for member in _members
- if member in user_ids],
- "changed_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
+ "admins": list(admin_ids),
+ "members": list(member_ids),
+ "changed_timestamp": timestamp,
})}
@@ -124,17 +134,18 @@ if __name__ == "__main__":
members=members,
admins=admins)
- if not REDIS_CONN.hget("groups", data.get("field", "")):
- updated_data = json.loads(data["value"])
- updated_data["created_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
- data["value"] = json.dumps(updated_data)
+ if not REDIS_CONN.hget("groups", data.get("field")):
+ updated_data = json.loads(data["value"])
+ timestamp = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
+ updated_data["created_timestamp"] = timestamp
+ data["value"] = json.dumps(updated_data)
created_p = REDIS_CONN.hset(data.get("key", ""),
data.get("field", ""),
data.get("value", ""))
groups = json.loads(REDIS_CONN.hget("groups",
- args.group_name)) # type: ignore
+ data.get("field"))) # type: ignore
if created_p:
exit(f"\nSuccessfully created the group: '{args.group_name}'\n"
f"`HGETALL groups {args.group_name}`: {groups}\n")
diff --git a/scripts/authentication/resource.py b/scripts/authentication/resource.py
index 8fcf09d7..4996f34c 100644
--- a/scripts/authentication/resource.py
+++ b/scripts/authentication/resource.py
@@ -63,12 +63,16 @@ def recover_hash(name: str, file_path: str, set_function) -> bool:
if __name__ == "__main__":
# Initialising the parser CLI arguments
parser = argparse.ArgumentParser()
+ parser.add_argument("--group-id",
+ help="Add the group id to all resources")
parser.add_argument("--restore",
help="Restore from a given backup")
parser.add_argument("--enable-backup", action="store_true",
help="Create a back up before edits")
args = parser.parse_args()
+ if not args.group_id:
+ exit("Please specify the group-id!\n")
if args.restore:
if recover_hash(name="resources",
file_path=args.back_up,
@@ -92,8 +96,8 @@ if __name__ == "__main__":
for resource_id, resource in RESOURCES.items():
_resource = json.loads(resource) # str -> dict conversion
- _resource["group_masks"] = {"editors": {"metadata": "edit",
- "data": "edit"}}
+ _resource["group_masks"] = {args.group_id: {"metadata": "edit",
+ "data": "edit"}}
REDIS_CONN.hset("resources",
resource_id,
json.dumps(_resource))
diff --git a/scripts/convert_dol_genotypes.py b/scripts/convert_dol_genotypes.py
new file mode 100644
index 00000000..81b3bd6d
--- /dev/null
+++ b/scripts/convert_dol_genotypes.py
@@ -0,0 +1,74 @@
+# This is just to convert the Rqtl2 format genotype files for DOL into a .geno file
+# Everything is hard-coded since I doubt this will be re-used and I just wanted to generate the file quickly
+
+import os
+
+geno_dir = "/home/zas1024/gn2-zach/DO_genotypes/"
+markers_file = "/home/zas1024/gn2-zach/DO_genotypes/SNP_Map.txt"
+gn_geno_path = "/home/zas1024/gn2-zach/DO_genotypes/DOL.geno"
+
+# Iterate through the SNP_Map.txt file to get marker positions
+marker_data = {}
+with open(markers_file, "r") as markers_fh:
+ for i, line in enumerate(markers_fh):
+ if i == 0:
+ continue
+ else:
+ line_items = line.split("\t")
+ this_marker = {}
+ this_marker['chr'] = line_items[2] if line_items[2] != "0" else "M"
+ this_marker['pos'] = f'{float(line_items[3])/1000000:.6f}'
+ marker_data[line_items[1]] = this_marker
+
+# Iterate through R/qtl2 format genotype files and pull out the samplelist and genotypes for each marker
+sample_names = []
+for filename in os.listdir(geno_dir):
+ if "gm4qtl2_geno" in filename:
+ with open(geno_dir + "/" + filename, "r") as rqtl_geno_fh:
+ for i, line in enumerate(rqtl_geno_fh):
+ line_items = line.split(",")
+ if i < 3:
+ continue
+ elif not len(sample_names) and i == 3:
+ sample_names = [item.replace("TLB", "TB") for item in line_items[1:]]
+ elif i > 3:
+ marker_data[line_items[0]]['genotypes'] = ["X" if item.strip() == "-" else item.strip() for item in line_items[1:]]
+
+# Generate list of marker obs to iterate through when writing to .geno file
+marker_list = []
+for key, value in marker_data.items():
+ if 'genotypes' in value:
+ this_marker = {
+ 'chr': value['chr'],
+ 'locus': key,
+ 'pos': value['pos'],
+ 'genotypes': value['genotypes']
+ }
+ marker_list.append(this_marker)
+
+def sort_func(e):
+ """For ensuring that X/Y chromosomes/mitochondria are sorted to the end correctly"""
+ try:
+ return float((e['chr']))*1000 + float(e['pos'])
+ except:
+ if e['chr'] == "X":
+ return 20000 + float(e['pos'])
+ elif e['chr'] == "Y":
+ return 21000 + float(e['pos'])
+ elif e['chr'] == "M":
+ return 22000 + float(e['pos'])
+
+# Sort markers by chromosome
+marker_list.sort(key=sort_func)
+
+# Write lines to .geno file
+with open(gn_geno_path, "w") as gn_geno_fh:
+ gn_geno_fh.write("\t".join((["Chr", "Locus", "cM", "Mb"] + sample_names)))
+ for marker in marker_list:
+ row_contents = [
+ marker['chr'],
+ marker['locus'],
+ marker['pos'],
+ marker['pos']
+ ] + marker['genotypes']
+ gn_geno_fh.write("\t".join(row_contents) + "\n")