about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yml16
-rw-r--r--scripts/authentication/group.py49
-rw-r--r--scripts/authentication/resource.py8
-rw-r--r--scripts/convert_dol_genotypes.py74
-rw-r--r--wqflask/wqflask/marker_regression/run_mapping.py25
-rw-r--r--wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js2
-rw-r--r--wqflask/wqflask/templates/show_trait.html2
7 files changed, 132 insertions, 44 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index f279a7e5..8e2c7966 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -55,11 +55,11 @@ jobs:
         GENENETWORK_FILES=/genotype_files/ bin/genenetwork2 \
         etc/default_settings.py -c -m unittest discover -v
 
-    - name: Test for Broken Links
-      run: |
-        env GN2_PROFILE=/gn2-profile \
-        TMPDIR=/tmp\
-        WEBSERVER_MODE=DEBUG LOG_LEVEL=DEBUG \
-        GENENETWORK_FILES=/genotype_files/ bin/genenetwork2 \
-        etc/default_settings.py -c \
-        $PWD/test/requests/links_scraper/genelinks.py
+    # - name: Test for Broken Links
+    #   run: |
+    #     env GN2_PROFILE=/gn2-profile \
+    #     TMPDIR=/tmp\
+    #     WEBSERVER_MODE=DEBUG LOG_LEVEL=DEBUG \
+    #     GENENETWORK_FILES=/genotype_files/ bin/genenetwork2 \
+    #     etc/default_settings.py -c \
+    #     $PWD/test/requests/links_scraper/genelinks.py
diff --git a/scripts/authentication/group.py b/scripts/authentication/group.py
index 76c7fb4f..c8c2caad 100644
--- a/scripts/authentication/group.py
+++ b/scripts/authentication/group.py
@@ -29,8 +29,10 @@ import argparse
 import datetime
 import redis
 import json
+import uuid
+
+from typing import Dict, Optional, Set
 
-from typing import Dict, List, Optional, Set
 
 def create_group_data(users: Dict, target_group: str,
                       members: Optional[str] = None,
@@ -39,6 +41,9 @@ def create_group_data(users: Dict, target_group: str,
     "field", and "value" that can be used in a redis hash as follows:
     HSET key field value
 
+    The "field" return value is a unique-id that is used to
+    distinguish the groups.
+
     Parameters:
 
     - `users`: a list of users for example:
@@ -71,26 +76,31 @@ def create_group_data(users: Dict, target_group: str,
       me@test2.com, me@test3.com"
 
     """
+    # Emails
+    _members: Set = set("".join(members.split()).split(",")
+                        if members else [])
+    _admins: Set = set("".join(admins.split()).split(",")
+                       if admins else [])
 
-    _members: List = "".join(members.split()).split(",") if members else []
-    _admins: List = "".join(admins.split()).split(",") if admins else []
+    # Unique IDs
+    member_ids: Set = set()
+    admin_ids: Set = set()
 
-    user_ids: Dict = dict()
     for user_id, user_details in users.items():
         _details = json.loads(user_details)
-        if _details.get("email_address"):
-            user_ids[_details.get("email_address")] = user_id
-    print(user_ids)
+        if _details.get("email_address") in _members:
+            member_ids.add(user_id)
+        if _details.get("email_address") in _admins:
+            admin_ids.add(user_id)
+
+    timestamp: str = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
     return {"key": "groups",
-            "field": target_group,
+            "field": str(uuid.uuid4()),
             "value": json.dumps({
-                "id": target_group,
                 "name": target_group,
-                "admins": [user_ids[admin] for admin in _admins
-                           if admin in user_ids],
-                "members": [user_ids[member] for member in _members
-                            if member in user_ids],
-                "changed_timestamp": datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
+                "admins": list(admin_ids),
+                "members": list(member_ids),
+                "changed_timestamp": timestamp,
             })}
 
 
@@ -124,17 +134,18 @@ if __name__ == "__main__":
         members=members,
         admins=admins)
 
-    if not REDIS_CONN.hget("groups", data.get("field", "")):
-      updated_data = json.loads(data["value"])
-      updated_data["created_timestamp"] = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
-      data["value"] = json.dumps(updated_data)
+    if not REDIS_CONN.hget("groups", data.get("field")):
+        updated_data = json.loads(data["value"])
+        timestamp = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
+        updated_data["created_timestamp"] = timestamp
+        data["value"] = json.dumps(updated_data)
 
     created_p = REDIS_CONN.hset(data.get("key", ""),
                                 data.get("field", ""),
                                 data.get("value", ""))
 
     groups = json.loads(REDIS_CONN.hget("groups",
-                                        args.group_name))  # type: ignore
+                                        data.get("field")))  # type: ignore
     if created_p:
         exit(f"\nSuccessfully created the group: '{args.group_name}'\n"
              f"`HGETALL groups {args.group_name}`: {groups}\n")
diff --git a/scripts/authentication/resource.py b/scripts/authentication/resource.py
index 8fcf09d7..4996f34c 100644
--- a/scripts/authentication/resource.py
+++ b/scripts/authentication/resource.py
@@ -63,12 +63,16 @@ def recover_hash(name: str, file_path: str, set_function) -> bool:
 if __name__ == "__main__":
     # Initialising the parser CLI arguments
     parser = argparse.ArgumentParser()
+    parser.add_argument("--group-id",
+                        help="Add the group id to all resources")
     parser.add_argument("--restore",
                         help="Restore from a given backup")
     parser.add_argument("--enable-backup", action="store_true",
                         help="Create a back up before edits")
     args = parser.parse_args()
 
+    if not args.group_id:
+        exit("Please specify the group-id!\n")
     if args.restore:
         if recover_hash(name="resources",
                         file_path=args.back_up,
@@ -92,8 +96,8 @@ if __name__ == "__main__":
 
     for resource_id, resource in RESOURCES.items():
         _resource = json.loads(resource)  # str -> dict conversion
-        _resource["group_masks"] = {"editors": {"metadata": "edit",
-                                                "data": "edit"}}
+        _resource["group_masks"] = {args.group_id: {"metadata": "edit",
+                                                    "data": "edit"}}
         REDIS_CONN.hset("resources",
                         resource_id,
                         json.dumps(_resource))
diff --git a/scripts/convert_dol_genotypes.py b/scripts/convert_dol_genotypes.py
new file mode 100644
index 00000000..81b3bd6d
--- /dev/null
+++ b/scripts/convert_dol_genotypes.py
@@ -0,0 +1,74 @@
+# This is just to convert the Rqtl2 format genotype files for DOL into a .geno file
+# Everything is hard-coded since I doubt this will be re-used and I just wanted to generate the file quickly
+
+import os
+
+geno_dir = "/home/zas1024/gn2-zach/DO_genotypes/"
+markers_file = "/home/zas1024/gn2-zach/DO_genotypes/SNP_Map.txt"
+gn_geno_path = "/home/zas1024/gn2-zach/DO_genotypes/DOL.geno"
+
+# Iterate through the SNP_Map.txt file to get marker positions
+marker_data = {}
+with open(markers_file, "r") as markers_fh:
+    for i, line in enumerate(markers_fh):
+        if i == 0:
+            continue
+        else:
+            line_items = line.split("\t")
+            this_marker = {}
+            this_marker['chr'] = line_items[2] if line_items[2] != "0" else "M"
+            this_marker['pos'] = f'{float(line_items[3])/1000000:.6f}'
+            marker_data[line_items[1]] = this_marker
+
+# Iterate through R/qtl2 format genotype files and pull out the samplelist and genotypes for each marker
+sample_names = []
+for filename in os.listdir(geno_dir):
+    if "gm4qtl2_geno" in filename:
+        with open(geno_dir + "/" + filename, "r") as rqtl_geno_fh:
+            for i, line in enumerate(rqtl_geno_fh):
+                line_items = line.split(",")
+                if i < 3:
+                    continue
+                elif not len(sample_names) and i == 3:
+                    sample_names = [item.replace("TLB", "TB") for item in line_items[1:]]
+                elif i > 3:
+                    marker_data[line_items[0]]['genotypes'] = ["X" if item.strip() == "-" else item.strip() for item in line_items[1:]]
+
+# Generate list of marker obs to iterate through when writing to .geno file
+marker_list = []
+for key, value in marker_data.items():
+    if 'genotypes' in value:
+        this_marker = {
+            'chr': value['chr'],
+            'locus': key,
+            'pos': value['pos'],
+            'genotypes': value['genotypes']
+        }
+        marker_list.append(this_marker)
+
+def sort_func(e):
+    """For ensuring that X/Y chromosomes/mitochondria are sorted to the end correctly"""
+    try:
+        return float((e['chr']))*1000 + float(e['pos'])
+    except:
+        if e['chr'] == "X":
+            return 20000 + float(e['pos'])
+        elif e['chr'] == "Y":
+            return 21000 + float(e['pos'])
+        elif e['chr'] == "M":
+            return 22000 + float(e['pos'])
+
+# Sort markers by chromosome
+marker_list.sort(key=sort_func)
+
+# Write lines to .geno file
+with open(gn_geno_path, "w") as gn_geno_fh:
+    gn_geno_fh.write("\t".join((["Chr", "Locus", "cM", "Mb"] + sample_names)))
+    for marker in marker_list:
+        row_contents = [
+            marker['chr'],
+            marker['locus'],
+            marker['pos'],
+            marker['pos']
+        ] + marker['genotypes']
+        gn_geno_fh.write("\t".join(row_contents) + "\n")
diff --git a/wqflask/wqflask/marker_regression/run_mapping.py b/wqflask/wqflask/marker_regression/run_mapping.py
index 290c4a14..80094057 100644
--- a/wqflask/wqflask/marker_regression/run_mapping.py
+++ b/wqflask/wqflask/marker_regression/run_mapping.py
@@ -104,7 +104,7 @@ class RunMapping:
         if "results_path" in start_vars:
             self.mapping_results_path = start_vars['results_path']
         else:
-            mapping_results_filename = "_".join([self.dataset.group.name, self.vals_hash]).replace("/", "_")
+            mapping_results_filename = "_".join([self.dataset.group.name, self.mapping_method, self.vals_hash]).replace("/", "_")
             self.mapping_results_path = "{}{}.csv".format(
                 webqtlConfig.GENERATED_IMAGE_DIR, mapping_results_filename)
 
@@ -405,8 +405,8 @@ class RunMapping:
                 total_markers = len(self.qtl_results)
 
                 with Bench("Exporting Results"):
-                    export_mapping_results(self.dataset, self.this_trait, self.qtl_results,
-                                           self.mapping_results_path, self.mapping_scale, self.score_type,
+                    export_mapping_results(self.dataset, self.this_trait, self.qtl_results, self.mapping_results_path,
+                                           self.mapping_method, self.mapping_scale, self.score_type,
                                            self.transform, self.covariates, self.n_samples, self.vals_hash)
 
                 with Bench("Trimming Markers for Figure"):
@@ -525,7 +525,11 @@ class RunMapping:
         return trimmed_genotype_data
 
 
-def export_mapping_results(dataset, trait, markers, results_path, mapping_scale, score_type, transform, covariates, n_samples, vals_hash):
+def export_mapping_results(dataset, trait, markers, results_path, mapping_method, mapping_scale, score_type, transform, covariates, n_samples, vals_hash):
+    if mapping_scale == "physic":
+        scale_string = "Mb"
+    else:
+        scale_string = "cM"
     with open(results_path, "w+") as output_file:
         output_file.write(
             "Time/Date: " + datetime.datetime.now().strftime("%x / %X") + "\n")
@@ -535,6 +539,7 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale,
         output_file.write("Trait: " + trait.display_name + "\n")
         output_file.write("Trait Hash: " + vals_hash + "\n")
         output_file.write("N Samples: " + str(n_samples) + "\n")
+        output_file.write("Mapping Tool: " + str(mapping_method) + "\n")
         if len(transform) > 0:
             transform_text = "Transform - "
             if transform == "qnorm":
@@ -564,10 +569,7 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale,
         output_file.write("Name,Chr,")
         if score_type.lower() == "-logP":
             score_type = "-logP"
-        if 'Mb' in markers[0]:
-            output_file.write("Mb," + score_type)
-        if 'cM' in markers[0]:
-            output_file.write("Cm," + score_type)
+        output_file.write(scale_string + "," + score_type)
         if "additive" in list(markers[0].keys()):
             output_file.write(",Additive")
         if "dominance" in list(markers[0].keys()):
@@ -575,11 +577,8 @@ def export_mapping_results(dataset, trait, markers, results_path, mapping_scale,
         output_file.write("\n")
         for i, marker in enumerate(markers):
             output_file.write(marker['name'] + "," + str(marker['chr']) + ",")
-            if 'Mb' in marker:
-                output_file.write(str(marker['Mb']) + ",")
-            if 'cM' in marker:
-                output_file.write(str(marker['cM']) + ",")
-            if "lod_score" in marker.keys():
+            output_file.write(str(marker[scale_string]) + ",")
+            if score_type == "-logP":
                 output_file.write(str(marker['lod_score']))
             else:
                 output_file.write(str(marker['lrs_value']))
diff --git a/wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js b/wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js
index 4de1b0ac..0a060cdc 100644
--- a/wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js
+++ b/wqflask/wqflask/static/new/javascript/initialize_show_trait_tables.js
@@ -130,6 +130,7 @@ var primary_table = $('#samples_primary').DataTable( {
       $(row).addClass("value_se");
       if (data.outlier) {
         $(row).addClass("outlier");
+        $(row).attr("style", "background-color: orange;");
       }
       $('td', row).eq(1).addClass("column_name-Index")
       $('td', row).eq(2).addClass("column_name-Sample")
@@ -189,6 +190,7 @@ if (js_data.sample_lists.length > 1){
         $(row).addClass("value_se");
         if (data.outlier) {
           $(row).addClass("outlier");
+          $(row).attr("style", "background-color: orange;");
         }
         $('td', row).eq(1).addClass("column_name-Index")
         $('td', row).eq(2).addClass("column_name-Sample")
diff --git a/wqflask/wqflask/templates/show_trait.html b/wqflask/wqflask/templates/show_trait.html
index 3dbf5f57..f3fa1332 100644
--- a/wqflask/wqflask/templates/show_trait.html
+++ b/wqflask/wqflask/templates/show_trait.html
@@ -254,8 +254,6 @@
                 } );
                 {% endif %}
 
-                $('#samples_primary, #samples_other').find("tr.outlier").css('background-color', 'orange')
-
                 $('.edit_sample_checkbox:checkbox').change(function() {
                     if ($(this).is(":checked")) {
                         if (!$(this).closest('tr').hasClass('selected')) {