aboutsummaryrefslogtreecommitdiff
path: root/gn3
diff options
context:
space:
mode:
authorzsloan2021-11-11 11:23:39 -0600
committerGitHub2021-11-11 11:23:39 -0600
commit8c77af63efae6f06d7c7c3269fc0e41811a8037a (patch)
tree9ffa4b84fd36f09e772db3e218bc980999324c41 /gn3
parent607c6e627c23c1bce3b199b145855182ab51b211 (diff)
parent249b85102063debfeeb1b0565956059b8a3af1cf (diff)
downloadgenenetwork3-8c77af63efae6f06d7c7c3269fc0e41811a8037a.tar.gz
Merge branch 'main' into feature/add_rqtl_pairscan
Diffstat (limited to 'gn3')
-rw-r--r--gn3/api/heatmaps.py6
-rw-r--r--gn3/app.py13
-rw-r--r--gn3/authentication.py162
-rw-r--r--gn3/computations/biweight.py27
-rw-r--r--gn3/computations/correlations.py41
-rw-r--r--gn3/computations/partial_correlations.py289
-rw-r--r--gn3/computations/wgcna.py49
-rw-r--r--gn3/data_helpers.py37
-rw-r--r--gn3/db/correlations.py381
-rw-r--r--gn3/db/species.py27
-rw-r--r--gn3/db/traits.py93
-rw-r--r--gn3/heatmaps.py196
-rw-r--r--gn3/settings.py18
13 files changed, 1152 insertions, 187 deletions
diff --git a/gn3/api/heatmaps.py b/gn3/api/heatmaps.py
index 62ca2ad..633a061 100644
--- a/gn3/api/heatmaps.py
+++ b/gn3/api/heatmaps.py
@@ -17,7 +17,9 @@ def clustered_heatmaps():
Parses the incoming data and responds with the JSON-serialized plotly figure
representing the clustered heatmap.
"""
- traits_names = request.get_json().get("traits_names", tuple())
+ heatmap_request = request.get_json()
+ traits_names = heatmap_request.get("traits_names", tuple())
+ vertical = heatmap_request.get("vertical", False)
if len(traits_names) < 2:
return jsonify({
"message": "You need to provide at least two trait names."
@@ -30,7 +32,7 @@ def clustered_heatmaps():
traits_fullnames = [parse_trait_fullname(trait) for trait in traits_names]
with io.StringIO() as io_str:
- _filename, figure = build_heatmap(traits_fullnames, conn)
+ figure = build_heatmap(traits_fullnames, conn, vertical=vertical)
figure.write_json(io_str)
fig_json = io_str.getvalue()
return fig_json, 200
diff --git a/gn3/app.py b/gn3/app.py
index a25332c..3d68b3f 100644
--- a/gn3/app.py
+++ b/gn3/app.py
@@ -21,12 +21,6 @@ def create_app(config: Union[Dict, str, None] = None) -> Flask:
# Load default configuration
app.config.from_object("gn3.settings")
- CORS(
- app,
- origins=app.config["CORS_ORIGINS"],
- allow_headers=app.config["CORS_HEADERS"],
- supports_credentials=True, intercept_exceptions=False)
-
# Load environment configuration
if "GN3_CONF" in os.environ:
app.config.from_envvar('GN3_CONF')
@@ -37,6 +31,13 @@ def create_app(config: Union[Dict, str, None] = None) -> Flask:
app.config.update(config)
elif config.endswith(".py"):
app.config.from_pyfile(config)
+
+ CORS(
+ app,
+ origins=app.config["CORS_ORIGINS"],
+ allow_headers=app.config["CORS_HEADERS"],
+ supports_credentials=True, intercept_exceptions=False)
+
app.register_blueprint(general, url_prefix="/api/")
app.register_blueprint(gemma, url_prefix="/api/gemma")
app.register_blueprint(rqtl, url_prefix="/api/rqtl")
diff --git a/gn3/authentication.py b/gn3/authentication.py
new file mode 100644
index 0000000..6719631
--- /dev/null
+++ b/gn3/authentication.py
@@ -0,0 +1,162 @@
+"""Methods for interacting with gn-proxy."""
+import functools
+import json
+import uuid
+import datetime
+
+from urllib.parse import urljoin
+from enum import Enum, unique
+from typing import Dict, List, Optional, Union
+
+from redis import Redis
+import requests
+
+
+@functools.total_ordering
+class OrderedEnum(Enum):
+ """A class that ordered Enums in order of position"""
+ @classmethod
+ @functools.lru_cache(None)
+ def _member_list(cls):
+ return list(cls)
+
+ def __lt__(self, other):
+ if self.__class__ is other.__class__:
+ member_list = self.__class__._member_list()
+ return member_list.index(self) < member_list.index(other)
+ return NotImplemented
+
+
+@unique
+class DataRole(OrderedEnum):
+ """Enums for Data Access"""
+ NO_ACCESS = "no-access"
+ VIEW = "view"
+ EDIT = "edit"
+
+
+@unique
+class AdminRole(OrderedEnum):
+ """Enums for Admin status"""
+ NOT_ADMIN = "not-admin"
+ EDIT_ACCESS = "edit-access"
+ EDIT_ADMINS = "edit-admins"
+
+
+def get_user_membership(conn: Redis, user_id: str,
+ group_id: str) -> Dict:
+ """Return a dictionary that indicates whether the `user_id` is a
+ member or admin of `group_id`.
+
+ Args:
+ - conn: a Redis Connection with the responses decoded.
+ - user_id: a user's unique id
+ e.g. '8ad942fe-490d-453e-bd37-56f252e41603'
+ - group_id: a group's unique id
+ e.g. '7fa95d07-0e2d-4bc5-b47c-448fdc1260b2'
+
+ Returns:
+ A dict indicating whether the user is an admin or a member of
+ the group: {"member": True, "admin": False}
+
+ """
+ results = {"member": False, "admin": False}
+ for key, value in conn.hgetall('groups').items():
+ if key == group_id:
+ group_info = json.loads(value)
+ if user_id in group_info.get("admins"):
+ results["admin"] = True
+ if user_id in group_info.get("members"):
+ results["member"] = True
+ break
+ return results
+
+
+def get_highest_user_access_role(
+ resource_id: str,
+ user_id: str,
+ gn_proxy_url: str = "http://localhost:8080") -> Dict:
+ """Get the highest access roles for a given user
+
+ Args:
+ - resource_id: The unique id of a given resource.
+ - user_id: The unique id of a given user.
+ - gn_proxy_url: The URL where gn-proxy is running.
+
+ Returns:
+ A dict indicating the highest access role the user has.
+
+ """
+ role_mapping: Dict[str, Union[DataRole, AdminRole]] = {}
+ for data_role, admin_role in zip(DataRole, AdminRole):
+ role_mapping.update({data_role.value: data_role, })
+ role_mapping.update({admin_role.value: admin_role, })
+ access_role = {}
+ response = requests.get(urljoin(gn_proxy_url,
+ ("available?resource="
+ f"{resource_id}&user={user_id}")))
+ for key, value in json.loads(response.content).items():
+ access_role[key] = max(map(lambda role: role_mapping[role], value))
+ return access_role
+
+
+def get_groups_by_user_uid(user_uid: str, conn: Redis) -> Dict:
+ """Given a user uid, get the groups in which they are a member or admin of.
+
+ Args:
+ - user_uid: A user's unique id
+ - conn: A redis connection
+
+ Returns:
+ - A dictionary containing the list of groups the user is part of e.g.:
+ {"admin": [], "member": ["ce0dddd1-6c50-4587-9eec-6c687a54ad86"]}
+ """
+ admin = []
+ member = []
+ for uuid, group_info in conn.hgetall("groups").items():
+ group_info = json.loads(group_info)
+ group_info["uuid"] = uuid
+ if user_uid in group_info.get('admins'):
+ admin.append(group_info)
+ if user_uid in group_info.get('members'):
+ member.append(group_info)
+ return {
+ "admin": admin,
+ "member": member,
+ }
+
+
+def get_user_info_by_key(key: str, value: str,
+ conn: Redis) -> Optional[Dict]:
+ """Given a key, get a user's information if value is matched"""
+ if key != "user_id":
+ for uuid, user_info in conn.hgetall("users").items():
+ user_info = json.loads(user_info)
+ if (key in user_info and
+ user_info.get(key) == value):
+ user_info["user_id"] = uuid
+ return user_info
+ elif key == "user_id":
+ if user_info := conn.hget("users", value):
+ user_info = json.loads(user_info)
+ user_info["user_id"] = value
+ return user_info
+ return None
+
+
+def create_group(conn: Redis, group_name: Optional[str],
+ admin_user_uids: List = [],
+ member_user_uids: List = []) -> Optional[Dict]:
+ """Create a group given the group name, members and admins of that group."""
+ if group_name and bool(admin_user_uids + member_user_uids):
+ timestamp = datetime.datetime.utcnow().strftime('%b %d %Y %I:%M%p')
+ group = {
+ "id": (group_id := str(uuid.uuid4())),
+ "admins": admin_user_uids,
+ "members": member_user_uids,
+ "name": group_name,
+ "created_timestamp": timestamp,
+ "changed_timestamp": timestamp,
+ }
+ conn.hset("groups", group_id, json.dumps(group))
+ return group
diff --git a/gn3/computations/biweight.py b/gn3/computations/biweight.py
deleted file mode 100644
index 7accd0c..0000000
--- a/gn3/computations/biweight.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""module contains script to call biweight midcorrelation in R"""
-import subprocess
-
-from typing import List
-from typing import Tuple
-
-from gn3.settings import BIWEIGHT_RSCRIPT
-
-
-def calculate_biweight_corr(trait_vals: List,
- target_vals: List,
- path_to_script: str = BIWEIGHT_RSCRIPT,
- command: str = "Rscript"
- ) -> Tuple[float, float]:
- """biweight function"""
-
- args_1 = ' '.join(str(trait_val) for trait_val in trait_vals)
- args_2 = ' '.join(str(target_val) for target_val in target_vals)
- cmd = [command, path_to_script] + [args_1] + [args_2]
-
- results = subprocess.check_output(cmd, universal_newlines=True)
- try:
- (corr_coeff, p_val) = tuple(
- [float(y.strip()) for y in results.split()])
- return (corr_coeff, p_val)
- except Exception as error:
- raise error
diff --git a/gn3/computations/correlations.py b/gn3/computations/correlations.py
index bb13ff1..c5c56db 100644
--- a/gn3/computations/correlations.py
+++ b/gn3/computations/correlations.py
@@ -1,6 +1,7 @@
"""module contains code for correlations"""
import math
import multiprocessing
+from contextlib import closing
from typing import List
from typing import Tuple
@@ -8,7 +9,7 @@ from typing import Optional
from typing import Callable
import scipy.stats
-from gn3.computations.biweight import calculate_biweight_corr
+import pingouin as pg
def map_shared_keys_to_values(target_sample_keys: List,
@@ -49,13 +50,9 @@ def normalize_values(a_values: List,
([2.3, 4.1, 5], [3.4, 6.2, 4.1], 3)
"""
- a_new = []
- b_new = []
for a_val, b_val in zip(a_values, b_values):
if (a_val and b_val is not None):
- a_new.append(a_val)
- b_new.append(b_val)
- return a_new, b_new, len(a_new)
+ yield a_val, b_val
def compute_corr_coeff_p_value(primary_values: List, target_values: List,
@@ -81,8 +78,10 @@ def compute_sample_r_correlation(trait_name, corr_method, trait_vals,
correlation coeff and p value
"""
- (sanitized_traits_vals, sanitized_target_vals,
- num_overlap) = normalize_values(trait_vals, target_samples_vals)
+
+ sanitized_traits_vals, sanitized_target_vals = list(
+ zip(*list(normalize_values(trait_vals, target_samples_vals))))
+ num_overlap = len(sanitized_traits_vals)
if num_overlap > 5:
@@ -102,11 +101,10 @@ package :not packaged in guix
"""
- try:
- results = calculate_biweight_corr(x_val, y_val)
- return results
- except Exception as error:
- raise error
+ results = pg.corr(x_val, y_val, method="bicor")
+ corr_coeff = results["r"].values[0]
+ p_val = results["p-val"].values[0]
+ return (corr_coeff, p_val)
def filter_shared_sample_keys(this_samplelist,
@@ -115,13 +113,9 @@ def filter_shared_sample_keys(this_samplelist,
filter the values using the shared keys
"""
- this_vals = []
- target_vals = []
for key, value in target_samplelist.items():
if key in this_samplelist:
- target_vals.append(value)
- this_vals.append(this_samplelist[key])
- return (this_vals, target_vals)
+ yield this_samplelist[key], value
def fast_compute_all_sample_correlation(this_trait,
@@ -140,9 +134,10 @@ def fast_compute_all_sample_correlation(this_trait,
for target_trait in target_dataset:
trait_name = target_trait.get("trait_id")
target_trait_data = target_trait["trait_sample_data"]
- processed_values.append((trait_name, corr_method, *filter_shared_sample_keys(
- this_trait_samples, target_trait_data)))
- with multiprocessing.Pool(4) as pool:
+ processed_values.append((trait_name, corr_method,
+ list(zip(*list(filter_shared_sample_keys(
+ this_trait_samples, target_trait_data))))))
+ with closing(multiprocessing.Pool()) as pool:
results = pool.starmap(compute_sample_r_correlation, processed_values)
for sample_correlation in results:
@@ -173,8 +168,8 @@ def compute_all_sample_correlation(this_trait,
for target_trait in target_dataset:
trait_name = target_trait.get("trait_id")
target_trait_data = target_trait["trait_sample_data"]
- this_vals, target_vals = filter_shared_sample_keys(
- this_trait_samples, target_trait_data)
+ this_vals, target_vals = list(zip(*list(filter_shared_sample_keys(
+ this_trait_samples, target_trait_data))))
sample_correlation = compute_sample_r_correlation(
trait_name=trait_name,
diff --git a/gn3/computations/partial_correlations.py b/gn3/computations/partial_correlations.py
new file mode 100644
index 0000000..07dc16d
--- /dev/null
+++ b/gn3/computations/partial_correlations.py
@@ -0,0 +1,289 @@
+"""
+This module deals with partial correlations.
+
+It is an attempt to migrate over the partial correlations feature from
+GeneNetwork1.
+"""
+
+from functools import reduce
+from typing import Any, Tuple, Sequence
+from scipy.stats import pearsonr, spearmanr
+
+from gn3.settings import TEXTDIR
+from gn3.data_helpers import parse_csv_line
+
+def control_samples(controls: Sequence[dict], sampleslist: Sequence[str]):
+ """
+ Fetches data for the control traits.
+
+ This migrates `web/webqtl/correlation/correlationFunction.controlStrain` in
+ GN1, with a few modifications to the arguments passed in.
+
+ PARAMETERS:
+ controls: A map of sample names to trait data. Equivalent to the `cvals`
+ value in the corresponding source function in GN1.
+ sampleslist: A list of samples. Equivalent to `strainlst` in the
+ corresponding source function in GN1
+ """
+ def __process_control__(trait_data):
+ def __process_sample__(acc, sample):
+ if sample in trait_data["data"].keys():
+ sample_item = trait_data["data"][sample]
+ val = sample_item["value"]
+ if val is not None:
+ return (
+ acc[0] + (sample,),
+ acc[1] + (val,),
+ acc[2] + (sample_item["variance"],))
+ return acc
+ return reduce(
+ __process_sample__, sampleslist, (tuple(), tuple(), tuple()))
+
+ return reduce(
+ lambda acc, item: (
+ acc[0] + (item[0],),
+ acc[1] + (item[1],),
+ acc[2] + (item[2],),
+ acc[3] + (len(item[0]),),
+ ),
+ [__process_control__(trait_data) for trait_data in controls],
+ (tuple(), tuple(), tuple(), tuple()))
+
+def dictify_by_samples(samples_vals_vars: Sequence[Sequence]) -> Sequence[dict]:
+ """
+ Build a sequence of dictionaries from a sequence of separate sequences of
+ samples, values and variances.
+
+ This is a partial migration of
+ `web.webqtl.correlation.correlationFunction.fixStrains` function in GN1.
+ This implementation extracts code that will find common use, and that will
+ find use in more than one place.
+ """
+ return tuple(
+ {
+ sample: {"sample_name": sample, "value": val, "variance": var}
+ for sample, val, var in zip(*trait_line)
+ } for trait_line in zip(*(samples_vals_vars[0:3])))
+
+def fix_samples(primary_trait: dict, control_traits: Sequence[dict]) -> Sequence[Sequence[Any]]:
+ """
+ Corrects sample_names, values and variance such that they all contain only
+ those samples that are common to the reference trait and all control traits.
+
+ This is a partial migration of the
+ `web.webqtl.correlation.correlationFunction.fixStrain` function in GN1.
+ """
+ primary_samples = tuple(
+ present[0] for present in
+ ((sample, all(sample in control.keys() for control in control_traits))
+ for sample in primary_trait.keys())
+ if present[1])
+ control_vals_vars: tuple = reduce(
+ lambda acc, x: (acc[0] + (x[0],), acc[1] + (x[1],)),
+ ((item["value"], item["variance"])
+ for sublist in [tuple(control.values()) for control in control_traits]
+ for item in sublist),
+ (tuple(), tuple()))
+ return (
+ primary_samples,
+ tuple(primary_trait[sample]["value"] for sample in primary_samples),
+ control_vals_vars[0],
+ tuple(primary_trait[sample]["variance"] for sample in primary_samples),
+ control_vals_vars[1])
+
+def find_identical_traits(
+ primary_name: str, primary_value: float, control_names: Tuple[str, ...],
+ control_values: Tuple[float, ...]) -> Tuple[str, ...]:
+ """
+ Find traits that have the same value when the values are considered to
+ 3 decimal places.
+
+ This is a migration of the
+ `web.webqtl.correlation.correlationFunction.findIdenticalTraits` function in
+ GN1.
+ """
+ def __merge_identicals__(
+ acc: Tuple[str, ...],
+ ident: Tuple[str, Tuple[str, ...]]) -> Tuple[str, ...]:
+ return acc + ident[1]
+
+ def __dictify_controls__(acc, control_item):
+ ckey = "{:.3f}".format(control_item[0])
+ return {**acc, ckey: acc.get(ckey, tuple()) + (control_item[1],)}
+
+ return (reduce(## for identical control traits
+ __merge_identicals__,
+ (item for item in reduce(# type: ignore[var-annotated]
+ __dictify_controls__, zip(control_values, control_names),
+ {}).items() if len(item[1]) > 1),
+ tuple())
+ or
+ reduce(## If no identical control traits, try primary and controls
+ __merge_identicals__,
+ (item for item in reduce(# type: ignore[var-annotated]
+ __dictify_controls__,
+ zip((primary_value,) + control_values,
+ (primary_name,) + control_names), {}).items()
+ if len(item[1]) > 1),
+ tuple()))
+
+def tissue_correlation(
+ primary_trait_values: Tuple[float, ...],
+ target_trait_values: Tuple[float, ...],
+ method: str) -> Tuple[float, float]:
+ """
+ Compute the correlation between the primary trait values, and the values of
+ a single target value.
+
+ This migrates the `cal_tissue_corr` function embedded in the larger
+ `web.webqtl.correlation.correlationFunction.batchCalTissueCorr` function in
+ GeneNetwork1.
+ """
+ def spearman_corr(*args):
+ result = spearmanr(*args)
+ return (result.correlation, result.pvalue)
+
+ method_fns = {"pearson": pearsonr, "spearman": spearman_corr}
+
+ assert len(primary_trait_values) == len(target_trait_values), (
+ "The lengths of the `primary_trait_values` and `target_trait_values` "
+ "must be equal")
+ assert method in method_fns.keys(), (
+ "Method must be one of: {}".format(",".join(method_fns.keys())))
+
+ corr, pvalue = method_fns[method](primary_trait_values, target_trait_values)
+ return (round(corr, 10), round(pvalue, 10))
+
+def batch_computed_tissue_correlation(
+ primary_trait_values: Tuple[float, ...], target_traits_dict: dict,
+ method: str) -> Tuple[dict, dict]:
+ """
+ This is a migration of the
+ `web.webqtl.correlation.correlationFunction.batchCalTissueCorr` function in
+ GeneNetwork1
+ """
+ def __corr__(acc, target):
+ corr = tissue_correlation(primary_trait_values, target[1], method)
+ return ({**acc[0], target[0]: corr[0]}, {**acc[0], target[1]: corr[1]})
+ return reduce(__corr__, target_traits_dict.items(), ({}, {}))
+
+def correlations_of_all_tissue_traits(
+ primary_trait_symbol_value_dict: dict, symbol_value_dict: dict,
+ method: str) -> Tuple[dict, dict]:
+ """
+ Computes and returns the correlation of all tissue traits.
+
+ This is a migration of the
+ `web.webqtl.correlation.correlationFunction.calculateCorrOfAllTissueTrait`
+ function in GeneNetwork1.
+ """
+ primary_trait_values = tuple(primary_trait_symbol_value_dict.values())[0]
+ return batch_computed_tissue_correlation(
+ primary_trait_values, symbol_value_dict, method)
+
+def good_dataset_samples_indexes(
+ samples: Tuple[str, ...],
+ samples_from_file: Tuple[str, ...]) -> Tuple[int, ...]:
+ """
+ Return the indexes of the items in `samples_from_files` that are also found
+ in `samples`.
+
+ This is a partial migration of the
+ `web.webqtl.correlation.PartialCorrDBPage.getPartialCorrelationsFast`
+ function in GeneNetwork1.
+ """
+ return tuple(sorted(
+ samples_from_file.index(good) for good in
+ set(samples).intersection(set(samples_from_file))))
+
+def determine_partials(
+ primary_vals, control_vals, all_target_trait_names,
+ all_target_trait_values, method):
+ """
+ This **WILL** be a migration of
+ `web.webqtl.correlation.correlationFunction.determinePartialsByR` function
+ in GeneNetwork1.
+
+ The function in GeneNetwork1 contains code written in R that is then used to
+ compute the partial correlations.
+ """
+ ## This function is not implemented at this stage
+ return tuple(
+ primary_vals, control_vals, all_target_trait_names,
+ all_target_trait_values, method)
+
+def compute_partial_correlations_fast(# pylint: disable=[R0913, R0914]
+ samples, primary_vals, control_vals, database_filename,
+ fetched_correlations, method: str, correlation_type: str) -> Tuple[
+ float, Tuple[float, ...]]:
+ """
+ This is a partial migration of the
+ `web.webqtl.correlation.PartialCorrDBPage.getPartialCorrelationsFast`
+ function in GeneNetwork1.
+ """
+ assert method in ("spearman", "pearson")
+ with open(f"{TEXTDIR}/{database_filename}", "r") as dataset_file:
+ dataset = tuple(dataset_file.readlines())
+
+ good_dataset_samples = good_dataset_samples_indexes(
+ samples, parse_csv_line(dataset[0])[1:])
+
+ def __process_trait_names_and_values__(acc, line):
+ trait_line = parse_csv_line(line)
+ trait_name = trait_line[0]
+ trait_data = trait_line[1:]
+ if trait_name in fetched_correlations.keys():
+ return (
+ acc[0] + (trait_name,),
+ acc[1] + tuple(
+ trait_data[i] if i in good_dataset_samples else None
+ for i in range(len(trait_data))))
+ return acc
+
+ processed_trait_names_values: tuple = reduce(
+ __process_trait_names_and_values__, dataset[1:], (tuple(), tuple()))
+ all_target_trait_names: Tuple[str, ...] = processed_trait_names_values[0]
+ all_target_trait_values: Tuple[float, ...] = processed_trait_names_values[1]
+
+ all_correlations = determine_partials(
+ primary_vals, control_vals, all_target_trait_names,
+ all_target_trait_values, method)
+ ## Line 772 to 779 in GN1 are the cause of the weird complexity in the
+ ## return below. Once the surrounding code is successfully migrated and
+ ## reworked, this complexity might go away, by getting rid of the
+ ## `correlation_type` parameter
+ return len(all_correlations), tuple(
+ corr + (
+ (fetched_correlations[corr[0]],) if correlation_type == "literature"
+ else fetched_correlations[corr[0]][0:2])
+ for idx, corr in enumerate(all_correlations))
+
+def partial_correlation_matrix(
+ xdata: Tuple[float, ...], ydata: Tuple[float, ...],
+ zdata: Tuple[float, ...], method: str = "pearsons",
+ omit_nones: bool = True) -> float:
+ """
+ Computes the partial correlation coefficient using the
+ 'variance-covariance matrix' method
+
+ This is a partial migration of the
+ `web.webqtl.correlation.correlationFunction.determinPartialsByR` function in
+ GeneNetwork1, specifically the `pcor.mat` function written in the R
+ programming language.
+ """
+ return 0
+
+def partial_correlation_recursive(
+ xdata: Tuple[float, ...], ydata: Tuple[float, ...],
+ zdata: Tuple[float, ...], method: str = "pearsons",
+ omit_nones: bool = True) -> float:
+ """
+ Computes the partial correlation coefficient using the 'recursive formula'
+ method
+
+ This is a partial migration of the
+ `web.webqtl.correlation.correlationFunction.determinPartialsByR` function in
+ GeneNetwork1, specifically the `pcor.rec` function written in the R
+ programming language.
+ """
+ return 0
diff --git a/gn3/computations/wgcna.py b/gn3/computations/wgcna.py
index fd508fa..ab12fe7 100644
--- a/gn3/computations/wgcna.py
+++ b/gn3/computations/wgcna.py
@@ -3,8 +3,11 @@
import os
import json
import uuid
-from gn3.settings import TMPDIR
+import subprocess
+import base64
+
+from gn3.settings import TMPDIR
from gn3.commands import run_cmd
@@ -14,12 +17,46 @@ def dump_wgcna_data(request_data: dict):
temp_file_path = os.path.join(TMPDIR, filename)
+ request_data["TMPDIR"] = TMPDIR
+
with open(temp_file_path, "w") as output_file:
json.dump(request_data, output_file)
return temp_file_path
+def stream_cmd_output(socketio, request_data, cmd: str):
+ """function to stream in realtime"""
+ # xtodo syncing and closing /edge cases
+
+ socketio.emit("output", {"data": f"calling you script {cmd}"},
+ namespace="/", room=request_data["socket_id"])
+ results = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
+
+ if results.stdout is not None:
+
+ for line in iter(results.stdout.readline, b""):
+ socketio.emit("output",
+ {"data": line.decode("utf-8").rstrip()},
+ namespace="/", room=request_data["socket_id"])
+
+ socketio.emit(
+ "output", {"data":
+ "parsing the output results"}, namespace="/",
+ room=request_data["socket_id"])
+
+
+def process_image(image_loc: str) -> bytes:
+ """encode the image"""
+
+ try:
+ with open(image_loc, "rb") as image_file:
+ return base64.b64encode(image_file.read())
+ except FileNotFoundError:
+ return b""
+
+
def compose_wgcna_cmd(rscript_path: str, temp_file_path: str):
"""function to componse wgcna cmd"""
# (todo):issue relative paths to abs paths
@@ -32,6 +69,8 @@ def call_wgcna_script(rscript_path: str, request_data: dict):
generated_file = dump_wgcna_data(request_data)
cmd = compose_wgcna_cmd(rscript_path, generated_file)
+ # stream_cmd_output(request_data, cmd) disable streaming of data
+
try:
run_cmd_results = run_cmd(cmd)
@@ -40,8 +79,14 @@ def call_wgcna_script(rscript_path: str, request_data: dict):
if run_cmd_results["code"] != 0:
return run_cmd_results
+
+ output_file_data = json.load(outputfile)
+ output_file_data["output"]["image_data"] = process_image(
+ output_file_data["output"]["imageLoc"]).decode("ascii")
+ # json format only supports unicode string// to get image data reconvert
+
return {
- "data": json.load(outputfile),
+ "data": output_file_data,
**run_cmd_results
}
except FileNotFoundError:
diff --git a/gn3/data_helpers.py b/gn3/data_helpers.py
new file mode 100644
index 0000000..d3f942b
--- /dev/null
+++ b/gn3/data_helpers.py
@@ -0,0 +1,37 @@
+"""
+This module will hold generic functions that can operate on a wide-array of
+data structures.
+"""
+
+from math import ceil
+from functools import reduce
+from typing import Any, Tuple, Sequence, Optional
+
+def partition_all(num: int, items: Sequence[Any]) -> Tuple[Tuple[Any, ...], ...]:
+ """
+ Given a sequence `items`, return a new sequence of the same type as `items`
+ with the data partitioned into sections of `n` items per partition.
+
+ This is an approximation of clojure's `partition-all` function.
+ """
+ def __compute_start_stop__(acc, iteration):
+ start = iteration * num
+ return acc + ((start, start + num),)
+
+ iterations = range(ceil(len(items) / num))
+ return tuple([# type: ignore[misc]
+ tuple(items[start:stop]) for start, stop # type: ignore[has-type]
+ in reduce(
+ __compute_start_stop__, iterations, tuple())])
+
+def parse_csv_line(
+ line: str, delimiter: str = ",",
+ quoting: Optional[str] = '"') -> Tuple[str, ...]:
+ """
+ Parses a line from a CSV file into a tuple of strings.
+
+ This is a migration of the `web.webqtl.utility.webqtlUtil.readLineCSV`
+ function in GeneNetwork1.
+ """
+ return tuple(
+ col.strip("{} \t\n".format(quoting)) for col in line.split(delimiter))
diff --git a/gn3/db/correlations.py b/gn3/db/correlations.py
new file mode 100644
index 0000000..06b3310
--- /dev/null
+++ b/gn3/db/correlations.py
@@ -0,0 +1,381 @@
+"""
+This module will hold functions that are used in the (partial) correlations
+feature to access the database to retrieve data needed for computations.
+"""
+
+from functools import reduce
+from typing import Any, Dict, Tuple
+
+from gn3.random import random_string
+from gn3.data_helpers import partition_all
+from gn3.db.species import translate_to_mouse_gene_id
+
+from gn3.computations.partial_correlations import correlations_of_all_tissue_traits
+
+def get_filename(target_db_name: str, conn: Any) -> str:
+ """
+ Retrieve the name of the reference database file with which correlations are
+ computed.
+
+ This is a migration of the
+ `web.webqtl.correlation.CorrelationPage.getFileName` function in
+ GeneNetwork1.
+ """
+ with conn.cursor() as cursor:
+ cursor.execute(
+ "SELECT Id, FullName from ProbeSetFreeze WHERE Name-%s",
+ target_db_name)
+ result = cursor.fetchone()
+ if result:
+ return "ProbeSetFreezeId_{tid}_FullName_{fname}.txt".format(
+ tid=result[0],
+ fname=result[1].replace(' ', '_').replace('/', '_'))
+
+ return ""
+
+def build_temporary_literature_table(
+ species: str, gene_id: int, return_number: int, conn: Any) -> str:
+ """
+ Build and populate a temporary table to hold the literature correlation data
+ to be used in computations.
+
+ "This is a migration of the
+ `web.webqtl.correlation.CorrelationPage.getTempLiteratureTable` function in
+ GeneNetwork1.
+ """
+ def __translated_species_id(row, cursor):
+ if species == "mouse":
+ return row[1]
+ query = {
+ "rat": "SELECT rat FROM GeneIDXRef WHERE mouse=%s",
+ "human": "SELECT human FROM GeneIDXRef WHERE mouse=%d"}
+ if species in query.keys():
+ cursor.execute(query[species], row[1])
+ record = cursor.fetchone()
+ if record:
+ return record[0]
+ return None
+ return None
+
+ temp_table_name = f"TOPLITERATURE{random_string(8)}"
+ with conn.cursor as cursor:
+ mouse_geneid = translate_to_mouse_gene_id(species, gene_id, conn)
+ data_query = (
+ "SELECT GeneId1, GeneId2, value FROM LCorrRamin3 "
+ "WHERE GeneId1 = %(mouse_gene_id)s "
+ "UNION ALL "
+ "SELECT GeneId2, GeneId1, value FROM LCorrRamin3 "
+ "WHERE GeneId2 = %(mouse_gene_id)s "
+ "AND GeneId1 != %(mouse_gene_id)s")
+ cursor.execute(
+ (f"CREATE TEMPORARY TABLE {temp_table_name} ("
+ "GeneId1 int(12) unsigned, "
+ "GeneId2 int(12) unsigned PRIMARY KEY, "
+ "value double)"))
+ cursor.execute(data_query, mouse_gene_id=mouse_geneid)
+ literature_data = [
+ {"GeneId1": row[0], "GeneId2": row[1], "value": row[2]}
+ for row in cursor.fetchall()
+ if __translated_species_id(row, cursor)]
+
+ cursor.execute(
+ (f"INSERT INTO {temp_table_name} "
+ "VALUES (%(GeneId1)s, %(GeneId2)s, %(value)s)"),
+ literature_data[0:(2 * return_number)])
+
+ return temp_table_name
+
+def fetch_geno_literature_correlations(temp_table: str) -> str:
+ """
+ Helper function for `fetch_literature_correlations` below, to build query
+ for `Geno*` tables.
+ """
+ return (
+ f"SELECT Geno.Name, {temp_table}.value "
+ "FROM Geno, GenoXRef, GenoFreeze "
+ f"LEFT JOIN {temp_table} ON {temp_table}.GeneId2=ProbeSet.GeneId "
+ "WHERE ProbeSet.GeneId IS NOT NULL "
+ f"AND {temp_table}.value IS NOT NULL "
+ "AND GenoXRef.GenoFreezeId = GenoFreeze.Id "
+ "AND GenoFreeze.Name = %(db_name)s "
+ "AND Geno.Id=GenoXRef.GenoId "
+ "ORDER BY Geno.Id")
+
+def fetch_probeset_literature_correlations(temp_table: str) -> str:
+ """
+ Helper function for `fetch_literature_correlations` below, to build query
+ for `ProbeSet*` tables.
+ """
+ return (
+ f"SELECT ProbeSet.Name, {temp_table}.value "
+ "FROM ProbeSet, ProbeSetXRef, ProbeSetFreeze "
+ "LEFT JOIN {temp_table} ON {temp_table}.GeneId2=ProbeSet.GeneId "
+ "WHERE ProbeSet.GeneId IS NOT NULL "
+ "AND {temp_table}.value IS NOT NULL "
+ "AND ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id "
+ "AND ProbeSetFreeze.Name = %(db_name)s "
+ "AND ProbeSet.Id=ProbeSetXRef.ProbeSetId "
+ "ORDER BY ProbeSet.Id")
+
+def fetch_literature_correlations(
+ species: str, gene_id: int, dataset: dict, return_number: int,
+ conn: Any) -> dict:
+ """
+ Gather the literature correlation data and pair it with trait id string(s).
+
+ This is a migration of the
+ `web.webqtl.correlation.CorrelationPage.fetchLitCorrelations` function in
+ GeneNetwork1.
+ """
+ temp_table = build_temporary_literature_table(
+ species, gene_id, return_number, conn)
+ query_fns = {
+ "Geno": fetch_geno_literature_correlations,
+ # "Temp": fetch_temp_literature_correlations,
+ # "Publish": fetch_publish_literature_correlations,
+ "ProbeSet": fetch_probeset_literature_correlations}
+ with conn.cursor as cursor:
+ cursor.execute(
+ query_fns[dataset["dataset_type"]](temp_table),
+ db_name=dataset["dataset_name"])
+ results = cursor.fetchall()
+ cursor.execute("DROP TEMPORARY TABLE %s", temp_table)
+ return dict(results)
+
+def fetch_symbol_value_pair_dict(
+ symbol_list: Tuple[str, ...], data_id_dict: dict,
+ conn: Any) -> Dict[str, Tuple[float, ...]]:
+ """
+ Map each gene symbols to the corresponding tissue expression data.
+
+ This is a migration of the
+ `web.webqtl.correlation.correlationFunction.getSymbolValuePairDict` function
+ in GeneNetwork1.
+ """
+ data_ids = {
+ symbol: data_id_dict.get(symbol) for symbol in symbol_list
+ if data_id_dict.get(symbol) is not None
+ }
+ query = "SELECT Id, value FROM TissueProbeSetData WHERE Id IN %(data_ids)s"
+ with conn.cursor() as cursor:
+ cursor.execute(
+ query,
+ data_ids=tuple(data_ids.values()))
+ value_results = cursor.fetchall()
+ return {
+ key: tuple(row[1] for row in value_results if row[0] == key)
+ for key in data_ids.keys()
+ }
+
+ return {}
+
+def fetch_gene_symbol_tissue_value_dict(
+ symbol_list: Tuple[str, ...], data_id_dict: dict, conn: Any,
+ limit_num: int = 1000) -> dict:#getGeneSymbolTissueValueDict
+ """
+ Wrapper function for `gn3.db.correlations.fetch_symbol_value_pair_dict`.
+
+ This is a migrations of the
+ `web.webqtl.correlation.correlationFunction.getGeneSymbolTissueValueDict` in
+ GeneNetwork1.
+ """
+ count = len(symbol_list)
+ if count != 0 and count <= limit_num:
+ return fetch_symbol_value_pair_dict(symbol_list, data_id_dict, conn)
+
+ if count > limit_num:
+ return {
+ key: value for dct in [
+ fetch_symbol_value_pair_dict(sl, data_id_dict, conn)
+ for sl in partition_all(limit_num, symbol_list)]
+ for key, value in dct.items()
+ }
+
+ return {}
+
+def fetch_tissue_probeset_xref_info(
+ gene_name_list: Tuple[str, ...], probeset_freeze_id: int,
+ conn: Any) -> Tuple[tuple, dict, dict, dict, dict, dict, dict]:
+ """
+ Retrieve the ProbeSet XRef information for tissues.
+
+ This is a migration of the
+ `web.webqtl.correlation.correlationFunction.getTissueProbeSetXRefInfo`
+ function in GeneNetwork1."""
+ with conn.cursor() as cursor:
+ if len(gene_name_list) == 0:
+ query = (
+ "SELECT t.Symbol, t.GeneId, t.DataId, t.Chr, t.Mb, "
+ "t.description, t.Probe_Target_Description "
+ "FROM "
+ "("
+ " SELECT Symbol, max(Mean) AS maxmean "
+ " FROM TissueProbeSetXRef "
+ " WHERE TissueProbeSetFreezeId=%(probeset_freeze_id)s "
+ " AND Symbol != '' "
+ " AND Symbol IS NOT NULL "
+ " GROUP BY Symbol"
+ ") AS x "
+ "INNER JOIN TissueProbeSetXRef AS t ON t.Symbol = x.Symbol "
+ "AND t.Mean = x.maxmean")
+ cursor.execute(query, probeset_freeze_id=probeset_freeze_id)
+ else:
+ query = (
+ "SELECT t.Symbol, t.GeneId, t.DataId, t.Chr, t.Mb, "
+ "t.description, t.Probe_Target_Description "
+ "FROM "
+ "("
+ " SELECT Symbol, max(Mean) AS maxmean "
+ " FROM TissueProbeSetXRef "
+ " WHERE TissueProbeSetFreezeId=%(probeset_freeze_id)s "
+ " AND Symbol in %(symbols)s "
+ " GROUP BY Symbol"
+ ") AS x "
+ "INNER JOIN TissueProbeSetXRef AS t ON t.Symbol = x.Symbol "
+ "AND t.Mean = x.maxmean")
+ cursor.execute(
+ query, probeset_freeze_id=probeset_freeze_id,
+ symbols=tuple(gene_name_list))
+
+ results = cursor.fetchall()
+
+ return reduce(
+ lambda acc, item: (
+ acc[0] + (item[0],),
+ {**acc[1], item[0].lower(): item[1]},
+ {**acc[1], item[0].lower(): item[2]},
+ {**acc[1], item[0].lower(): item[3]},
+ {**acc[1], item[0].lower(): item[4]},
+ {**acc[1], item[0].lower(): item[5]},
+ {**acc[1], item[0].lower(): item[6]}),
+ results or tuple(),
+ (tuple(), {}, {}, {}, {}, {}, {}))
+
+def fetch_gene_symbol_tissue_value_dict_for_trait(
+ gene_name_list: Tuple[str, ...], probeset_freeze_id: int,
+ conn: Any) -> dict:
+ """
+ Fetches a map of the gene symbols to the tissue values.
+
+ This is a migration of the
+ `web.webqtl.correlation.correlationFunction.getGeneSymbolTissueValueDictForTrait`
+ function in GeneNetwork1.
+ """
+ xref_info = fetch_tissue_probeset_xref_info(
+ gene_name_list, probeset_freeze_id, conn)
+ if xref_info[0]:
+ return fetch_gene_symbol_tissue_value_dict(xref_info[0], xref_info[2], conn)
+ return {}
+
+def build_temporary_tissue_correlations_table(
+ trait_symbol: str, probeset_freeze_id: int, method: str,
+ return_number: int, conn: Any) -> str:
+ """
+ Build a temporary table to hold the tissue correlations data.
+
+ This is a migration of the
+ `web.webqtl.correlation.CorrelationPage.getTempTissueCorrTable` function in
+ GeneNetwork1."""
+ # We should probably pass the `correlations_of_all_tissue_traits` function
+ # as an argument to this function and get rid of the one call immediately
+ # following this comment.
+ symbol_corr_dict, symbol_p_value_dict = correlations_of_all_tissue_traits(
+ fetch_gene_symbol_tissue_value_dict_for_trait(
+ (trait_symbol,), probeset_freeze_id, conn),
+ fetch_gene_symbol_tissue_value_dict_for_trait(
+ tuple(), probeset_freeze_id, conn),
+ method)
+
+ symbol_corr_list = sorted(
+ symbol_corr_dict.items(), key=lambda key_val: key_val[1])
+
+ temp_table_name = f"TOPTISSUE{random_string(8)}"
+ create_query = (
+ "CREATE TEMPORARY TABLE {temp_table_name}"
+ "(Symbol varchar(100) PRIMARY KEY, Correlation float, PValue float)")
+ insert_query = (
+ f"INSERT INTO {temp_table_name}(Symbol, Correlation, PValue) "
+ " VALUES (%(symbol)s, %(correlation)s, %(pvalue)s)")
+
+ with conn.cursor() as cursor:
+ cursor.execute(create_query)
+ cursor.execute(
+ insert_query,
+ tuple({
+ "symbol": symbol,
+ "correlation": corr,
+ "pvalue": symbol_p_value_dict[symbol]
+ } for symbol, corr in symbol_corr_list[0: 2 * return_number]))
+
+ return temp_table_name
+
+def fetch_tissue_correlations(# pylint: disable=R0913
+ dataset: dict, trait_symbol: str, probeset_freeze_id: int, method: str,
+ return_number: int, conn: Any) -> dict:
+ """
+ Pair tissue correlations data with a trait id string.
+
+ This is a migration of the
+ `web.webqtl.correlation.CorrelationPage.fetchTissueCorrelations` function in
+ GeneNetwork1.
+ """
+ temp_table = build_temporary_tissue_correlations_table(
+ trait_symbol, probeset_freeze_id, method, return_number, conn)
+ with conn.cursor() as cursor:
+ cursor.execute(
+ (
+ f"SELECT ProbeSet.Name, {temp_table}.Correlation, "
+ f"{temp_table}.PValue "
+ "FROM (ProbeSet, ProbeSetXRef, ProbeSetFreeze) "
+ "LEFT JOIN {temp_table} ON {temp_table}.Symbol=ProbeSet.Symbol "
+ "WHERE ProbeSetFreeze.Name = %(db_name) "
+ "AND ProbeSetFreeze.Id=ProbeSetXRef.ProbeSetFreezeId "
+ "AND ProbeSet.Id = ProbeSetXRef.ProbeSetId "
+ "AND ProbeSet.Symbol IS NOT NULL "
+ "AND %s.Correlation IS NOT NULL"),
+ db_name=dataset["dataset_name"])
+ results = cursor.fetchall()
+ cursor.execute("DROP TEMPORARY TABLE %s", temp_table)
+ return {
+ trait_name: (tiss_corr, tiss_p_val)
+ for trait_name, tiss_corr, tiss_p_val in results}
+
+def check_for_literature_info(conn: Any, geneid: int) -> bool:
+ """
+ Checks the database to find out whether the trait with `geneid` has any
+ associated literature.
+
+ This is a migration of the
+ `web.webqtl.correlation.CorrelationPage.checkForLitInfo` function in
+ GeneNetwork1.
+ """
+ query = "SELECT 1 FROM LCorrRamin3 WHERE GeneId1=%s LIMIT 1"
+ with conn.cursor() as cursor:
+ cursor.execute(query, geneid)
+ result = cursor.fetchone()
+ if result:
+ return True
+
+ return False
+
+def check_symbol_for_tissue_correlation(
+ conn: Any, tissue_probeset_freeze_id: int, symbol: str = "") -> bool:
+ """
+ Checks whether a symbol has any associated tissue correlations.
+
+ This is a migration of the
+ `web.webqtl.correlation.CorrelationPage.checkSymbolForTissueCorr` function
+ in GeneNetwork1.
+ """
+ query = (
+ "SELECT 1 FROM TissueProbeSetXRef "
+ "WHERE TissueProbeSetFreezeId=%(probeset_freeze_id)s "
+ "AND Symbol=%(symbol)s LIMIT 1")
+ with conn.cursor() as cursor:
+ cursor.execute(
+ query, probeset_freeze_id=tissue_probeset_freeze_id, symbol=symbol)
+ result = cursor.fetchone()
+ if result:
+ return True
+
+ return False
diff --git a/gn3/db/species.py b/gn3/db/species.py
index 0deae4e..702a9a8 100644
--- a/gn3/db/species.py
+++ b/gn3/db/species.py
@@ -30,3 +30,30 @@ def get_chromosome(name: str, is_species: bool, conn: Any) -> Optional[Tuple]:
with conn.cursor() as cursor:
cursor.execute(_sql)
return cursor.fetchall()
+
+def translate_to_mouse_gene_id(species: str, geneid: int, conn: Any) -> int:
+ """
+ Translate rat or human geneid to mouse geneid
+
+ This is a migration of the
+ `web.webqtl.correlation/CorrelationPage.translateToMouseGeneID` function in
+ GN1
+ """
+ assert species in ("rat", "mouse", "human"), "Invalid species"
+ if geneid is None:
+ return 0
+
+ if species == "mouse":
+ return geneid
+
+ with conn.cursor as cursor:
+ query = {
+ "rat": "SELECT mouse FROM GeneIDXRef WHERE rat = %s",
+ "human": "SELECT mouse FROM GeneIDXRef WHERE human = %s"
+ }
+ cursor.execute(query[species], geneid)
+ translated_gene_id = cursor.fetchone()
+ if translated_gene_id:
+ return translated_gene_id[0]
+
+ return 0 # default if all else fails
diff --git a/gn3/db/traits.py b/gn3/db/traits.py
index f2673c8..1c6aaa7 100644
--- a/gn3/db/traits.py
+++ b/gn3/db/traits.py
@@ -1,12 +1,81 @@
"""This class contains functions relating to trait data manipulation"""
import os
+from functools import reduce
from typing import Any, Dict, Union, Sequence
+
from gn3.settings import TMPDIR
from gn3.random import random_string
from gn3.function_helpers import compose
from gn3.db.datasets import retrieve_trait_dataset
+def export_trait_data(
+ trait_data: dict, samplelist: Sequence[str], dtype: str = "val",
+ var_exists: bool = False, n_exists: bool = False):
+ """
+ Export data according to `samplelist`. Mostly used in calculating
+ correlations.
+
+ DESCRIPTION:
+ Migrated from
+ https://github.com/genenetwork/genenetwork1/blob/master/web/webqtl/base/webqtlTrait.py#L166-L211
+
+ PARAMETERS
+ trait: (dict)
+ The dictionary of key-value pairs representing a trait
+ samplelist: (list)
+ A list of sample names
+ dtype: (str)
+ ... verify what this is ...
+ var_exists: (bool)
+ A flag indicating existence of variance
+ n_exists: (bool)
+ A flag indicating existence of ndata
+ """
+ def __export_all_types(tdata, sample):
+ sample_data = []
+ if tdata[sample]["value"]:
+ sample_data.append(tdata[sample]["value"])
+ if var_exists:
+ if tdata[sample]["variance"]:
+ sample_data.append(tdata[sample]["variance"])
+ else:
+ sample_data.append(None)
+ if n_exists:
+ if tdata[sample]["ndata"]:
+ sample_data.append(tdata[sample]["ndata"])
+ else:
+ sample_data.append(None)
+ else:
+ if var_exists and n_exists:
+ sample_data += [None, None, None]
+ elif var_exists or n_exists:
+ sample_data += [None, None]
+ else:
+ sample_data.append(None)
+
+ return tuple(sample_data)
+
+ def __exporter(accumulator, sample):
+ # pylint: disable=[R0911]
+ if sample in trait_data["data"]:
+ if dtype == "val":
+ return accumulator + (trait_data["data"][sample]["value"], )
+ if dtype == "var":
+ return accumulator + (trait_data["data"][sample]["variance"], )
+ if dtype == "N":
+ return accumulator + (trait_data["data"][sample]["ndata"], )
+ if dtype == "all":
+ return accumulator + __export_all_types(trait_data["data"], sample)
+ raise KeyError("Type `%s` is incorrect" % dtype)
+ if var_exists and n_exists:
+ return accumulator + (None, None, None)
+ if var_exists or n_exists:
+ return accumulator + (None, None)
+ return accumulator + (None,)
+
+ return reduce(__exporter, samplelist, tuple())
+
def get_trait_csv_sample_data(conn: Any,
trait_name: int, phenotype_id: int):
"""Fetch a trait and return it as a csv string"""
@@ -674,3 +743,27 @@ def generate_traits_filename(base_path: str = TMPDIR):
"""Generate a unique filename for use with generated traits files."""
return "{}/traits_test_file_{}.txt".format(
os.path.abspath(base_path), random_string(10))
+
+def export_informative(trait_data: dict, inc_var: bool = False) -> tuple:
+ """
+ Export informative strain
+
+ This is a migration of the `exportInformative` function in
+ web/webqtl/base/webqtlTrait.py module in GeneNetwork1.
+
+ There is a chance that the original implementation has a bug, especially
+ dealing with the `inc_var` value. It the `inc_var` value is meant to control
+ the inclusion of the `variance` value, then the current implementation, and
+ that one in GN1 have a bug.
+ """
+ def __exporter__(acc, data_item):
+ if not inc_var or data_item["variance"] is not None:
+ return (
+ acc[0] + (data_item["sample_name"],),
+ acc[1] + (data_item["value"],),
+ acc[2] + (data_item["variance"],))
+ return acc
+ return reduce(
+ __exporter__,
+ filter(lambda td: td["value"] is not None, trait_data["data"].values()),
+ (tuple(), tuple(), tuple()))
diff --git a/gn3/heatmaps.py b/gn3/heatmaps.py
index adbfbc6..bf9dfd1 100644
--- a/gn3/heatmaps.py
+++ b/gn3/heatmaps.py
@@ -14,6 +14,7 @@ from plotly.subplots import make_subplots # type: ignore
from gn3.settings import TMPDIR
from gn3.random import random_string
from gn3.computations.slink import slink
+from gn3.db.traits import export_trait_data
from gn3.computations.correlations2 import compute_correlation
from gn3.db.genotypes import (
build_genotype_file, load_genotype_samples)
@@ -26,72 +27,6 @@ from gn3.computations.qtlreaper import (
parse_reaper_main_results,
organise_reaper_main_results)
-def export_trait_data(
- trait_data: dict, samplelist: Sequence[str], dtype: str = "val",
- var_exists: bool = False, n_exists: bool = False):
- """
- Export data according to `samplelist`. Mostly used in calculating
- correlations.
-
- DESCRIPTION:
- Migrated from
- https://github.com/genenetwork/genenetwork1/blob/master/web/webqtl/base/webqtlTrait.py#L166-L211
-
- PARAMETERS
- trait: (dict)
- The dictionary of key-value pairs representing a trait
- samplelist: (list)
- A list of sample names
- dtype: (str)
- ... verify what this is ...
- var_exists: (bool)
- A flag indicating existence of variance
- n_exists: (bool)
- A flag indicating existence of ndata
- """
- def __export_all_types(tdata, sample):
- sample_data = []
- if tdata[sample]["value"]:
- sample_data.append(tdata[sample]["value"])
- if var_exists:
- if tdata[sample]["variance"]:
- sample_data.append(tdata[sample]["variance"])
- else:
- sample_data.append(None)
- if n_exists:
- if tdata[sample]["ndata"]:
- sample_data.append(tdata[sample]["ndata"])
- else:
- sample_data.append(None)
- else:
- if var_exists and n_exists:
- sample_data += [None, None, None]
- elif var_exists or n_exists:
- sample_data += [None, None]
- else:
- sample_data.append(None)
-
- return tuple(sample_data)
-
- def __exporter(accumulator, sample):
- # pylint: disable=[R0911]
- if sample in trait_data["data"]:
- if dtype == "val":
- return accumulator + (trait_data["data"][sample]["value"], )
- if dtype == "var":
- return accumulator + (trait_data["data"][sample]["variance"], )
- if dtype == "N":
- return accumulator + (trait_data["data"][sample]["ndata"], )
- if dtype == "all":
- return accumulator + __export_all_types(trait_data["data"], sample)
- raise KeyError("Type `%s` is incorrect" % dtype)
- if var_exists and n_exists:
- return accumulator + (None, None, None)
- if var_exists or n_exists:
- return accumulator + (None, None)
- return accumulator + (None,)
-
- return reduce(__exporter, samplelist, tuple())
def trait_display_name(trait: Dict):
"""
@@ -168,7 +103,9 @@ def get_loci_names(
__get_trait_loci, [v[1] for v in organised.items()], {})
return tuple(loci_dict[_chr] for _chr in chromosome_names)
-def build_heatmap(traits_names, conn: Any):
+def build_heatmap(
+ traits_names: Sequence[str], conn: Any,
+ vertical: bool = False) -> go.Figure:
"""
heatmap function
@@ -220,17 +157,21 @@ def build_heatmap(traits_names, conn: Any):
zip(traits_ids,
[traits[idx]["trait_fullname"] for idx in traits_order]))
- return generate_clustered_heatmap(
+ return clustered_heatmap(
process_traits_data_for_heatmap(
organised, traits_ids, chromosome_names),
clustered,
- "single_heatmap_{}".format(random_string(10)),
- y_axis=tuple(
- ordered_traits_names[traits_ids[order]]
- for order in traits_order),
- y_label="Traits",
- x_axis=chromosome_names,
- x_label="Chromosomes",
+ x_axis={
+ "label": "Chromosomes",
+ "data": chromosome_names
+ },
+ y_axis={
+ "label": "Traits",
+ "data": tuple(
+ ordered_traits_names[traits_ids[order]]
+ for order in traits_order)
+ },
+ vertical=vertical,
loci_names=get_loci_names(organised, chromosome_names))
def compute_traits_order(slink_data, neworder: tuple = tuple()):
@@ -349,68 +290,81 @@ def process_traits_data_for_heatmap(data, trait_names, chromosome_names):
for chr_name in chromosome_names]
return hdata
-def generate_clustered_heatmap(
- data, clustering_data, image_filename_prefix, x_axis=None,
- x_label: str = "", y_axis=None, y_label: str = "",
+def clustered_heatmap(
+ data: Sequence[Sequence[float]], clustering_data: Sequence[float],
+ x_axis,#: Dict[Union[str, int], Union[str, Sequence[str]]],
+ y_axis: Dict[str, Union[str, Sequence[str]]],
loci_names: Sequence[Sequence[str]] = tuple(),
- output_dir: str = TMPDIR,
- colorscale=((0.0, '#0000FF'), (0.5, '#00FF00'), (1.0, '#FF0000'))):
+ vertical: bool = False,
+ colorscale: Sequence[Sequence[Union[float, str]]] = (
+ (0.0, '#0000FF'), (0.5, '#00FF00'), (1.0, '#FF0000'))) -> go.Figure:
"""
Generate a dendrogram, and heatmaps for each chromosome, and put them all
into one plot.
"""
# pylint: disable=[R0913, R0914]
- num_cols = 1 + len(x_axis)
+ x_axis_data = x_axis["data"]
+ y_axis_data = y_axis["data"]
+ num_plots = 1 + len(x_axis_data)
fig = make_subplots(
- rows=1,
- cols=num_cols,
- shared_yaxes="rows",
+ rows=num_plots if vertical else 1,
+ cols=1 if vertical else num_plots,
+ shared_xaxes="columns" if vertical else False,
+ shared_yaxes=False if vertical else "rows",
+ vertical_spacing=0.010,
horizontal_spacing=0.001,
- subplot_titles=["distance"] + x_axis,
+ subplot_titles=["" if vertical else x_axis["label"]] + [
+ "Chromosome: {}".format(chromo) if vertical else chromo
+ for chromo in x_axis_data],#+ x_axis_data,
figure=ff.create_dendrogram(
- np.array(clustering_data), orientation="right", labels=y_axis))
+ np.array(clustering_data),
+ orientation="bottom" if vertical else "right",
+ labels=y_axis_data))
hms = [go.Heatmap(
name=chromo,
- x=loci,
- y=y_axis,
+ x=y_axis_data if vertical else loci,
+ y=loci if vertical else y_axis_data,
z=data_array,
+ transpose=vertical,
showscale=False)
for chromo, data_array, loci
- in zip(x_axis, data, loci_names)]
+ in zip(x_axis_data, data, loci_names)]
for i, heatmap in enumerate(hms):
- fig.add_trace(heatmap, row=1, col=(i + 2))
-
- fig.update_layout(
- {
- "width": 1500,
- "height": 800,
- "xaxis": {
+ fig.add_trace(
+ heatmap,
+ row=((i + 2) if vertical else 1),
+ col=(1 if vertical else (i + 2)))
+
+ axes_layouts = {
+ "{axis}axis{count}".format(
+ axis=("y" if vertical else "x"),
+ count=(i+1 if i > 0 else "")): {
"mirror": False,
- "showgrid": True,
- "title": x_label
- },
- "yaxis": {
- "title": y_label
+ "showticklabels": i == 0,
+ "ticks": "outside" if i == 0 else ""
}
- })
+ for i in range(num_plots)}
- x_axes_layouts = {
- "xaxis{}".format(i+1 if i > 0 else ""): {
- "mirror": False,
- "showticklabels": i == 0,
- "ticks": "outside" if i == 0 else ""
- }
- for i in range(num_cols)}
+ print("vertical?: {} ==> {}".format("T" if vertical else "F", axes_layouts))
- fig.update_layout(
- {
- "width": 4000,
- "height": 800,
- "yaxis": {
- "mirror": False,
- "ticks": ""
- },
- **x_axes_layouts})
+ fig.update_layout({
+ "width": 800 if vertical else 4000,
+ "height": 4000 if vertical else 800,
+ "{}axis".format("x" if vertical else "y"): {
+ "mirror": False,
+ "ticks": "",
+ "side": "top" if vertical else "left",
+ "title": y_axis["label"],
+ "tickangle": 90 if vertical else 0,
+ "ticklabelposition": "outside top" if vertical else "outside left"
+ },
+ "{}axis".format("y" if vertical else "x"): {
+ "mirror": False,
+ "showgrid": True,
+ "title": "Distance",
+ "side": "right" if vertical else "top"
+ },
+ **axes_layouts})
fig.update_traces(
showlegend=False,
colorscale=colorscale,
@@ -418,7 +372,5 @@ def generate_clustered_heatmap(
fig.update_traces(
showlegend=True,
showscale=True,
- selector={"name": x_axis[-1]})
- image_filename = "{}/{}.html".format(output_dir, image_filename_prefix)
- fig.write_html(image_filename)
- return image_filename, fig
+ selector={"name": x_axis_data[-1]})
+ return fig
diff --git a/gn3/settings.py b/gn3/settings.py
index 150d96d..57c63df 100644
--- a/gn3/settings.py
+++ b/gn3/settings.py
@@ -22,9 +22,6 @@ SQLALCHEMY_TRACK_MODIFICATIONS = False
GN2_BASE_URL = "http://www.genenetwork.org/"
-# biweight script
-BIWEIGHT_RSCRIPT = "~/genenetwork3/scripts/calculate_biweight.R"
-
# wgcna script
WGCNA_RSCRIPT = "wgcna_analysis.R"
# qtlreaper command
@@ -35,13 +32,24 @@ GENOTYPE_FILES = os.environ.get(
"GENOTYPE_FILES", "{}/genotype_files/genotype".format(os.environ.get("HOME")))
# CROSS-ORIGIN SETUP
-CORS_ORIGINS = [
+def parse_env_cors(default):
+ """Parse comma-separated configuration into list of strings."""
+ origins_str = os.environ.get("CORS_ORIGINS", None)
+ if origins_str:
+ return [
+ origin.strip() for origin in origins_str.split(",") if origin != ""]
+ return default
+
+CORS_ORIGINS = parse_env_cors([
"http://localhost:*",
"http://127.0.0.1:*"
-]
+])
CORS_HEADERS = [
"Content-Type",
"Authorization",
"Access-Control-Allow-Credentials"
]
+
+GNSHARE = os.environ.get("GNSHARE", "/gnshare/gn/")
+TEXTDIR = f"{GNSHARE}/web/ProbeSetFreeze_DataMatrix"