diff options
Diffstat (limited to 'gn3/api')
-rw-r--r-- | gn3/api/async_commands.py | 16 | ||||
-rw-r--r-- | gn3/api/correlation.py | 73 | ||||
-rw-r--r-- | gn3/api/ctl.py | 24 | ||||
-rw-r--r-- | gn3/api/general.py | 7 | ||||
-rw-r--r-- | gn3/api/heatmaps.py | 21 | ||||
-rw-r--r-- | gn3/api/rqtl.py | 2 |
6 files changed, 117 insertions, 26 deletions
diff --git a/gn3/api/async_commands.py b/gn3/api/async_commands.py new file mode 100644 index 0000000..c0cf4bb --- /dev/null +++ b/gn3/api/async_commands.py @@ -0,0 +1,16 @@ +"""Endpoints and functions concerning commands run in external processes.""" +import redis +from flask import jsonify, Blueprint + +async_commands = Blueprint("async_commands", __name__) + +@async_commands.route("/state/<command_id>") +def command_state(command_id): + """Respond with the current state of command identified by `command_id`.""" + with redis.Redis(decode_responses=True) as rconn: + state = rconn.hgetall(name=command_id) + if not state: + return jsonify( + status=404, + error="The command id provided does not exist.") + return jsonify(dict(state.items())) diff --git a/gn3/api/correlation.py b/gn3/api/correlation.py index 46121f8..7eb7cd6 100644 --- a/gn3/api/correlation.py +++ b/gn3/api/correlation.py @@ -1,13 +1,21 @@ """Endpoints for running correlations""" +import sys +from functools import reduce + +import redis from flask import jsonify from flask import Blueprint from flask import request +from flask import current_app -from gn3.computations.correlations import compute_all_sample_correlation -from gn3.computations.correlations import compute_all_lit_correlation -from gn3.computations.correlations import compute_tissue_correlation -from gn3.computations.correlations import map_shared_keys_to_values +from gn3.settings import SQL_URI +from gn3.commands import queue_cmd, compose_pcorrs_command from gn3.db_utils import database_connector +from gn3.responses.pcorrs_responses import build_response +from gn3.computations.correlations import map_shared_keys_to_values +from gn3.computations.correlations import compute_tissue_correlation +from gn3.computations.correlations import compute_all_lit_correlation +from gn3.computations.correlations import compute_all_sample_correlation correlation = Blueprint("correlation", __name__) @@ -58,17 +66,15 @@ def compute_lit_corr(species=None, gene_id=None): might be needed for actual computing of the correlation results """ - conn, _cursor_object = database_connector() - target_traits_gene_ids = request.get_json() - target_trait_gene_list = list(target_traits_gene_ids.items()) + with database_connector() as conn: + target_traits_gene_ids = request.get_json() + target_trait_gene_list = list(target_traits_gene_ids.items()) - lit_corr_results = compute_all_lit_correlation( - conn=conn, trait_lists=target_trait_gene_list, - species=species, gene_id=gene_id) + lit_corr_results = compute_all_lit_correlation( + conn=conn, trait_lists=target_trait_gene_list, + species=species, gene_id=gene_id) - conn.close() - - return jsonify(lit_corr_results) + return jsonify(lit_corr_results) @correlation.route("/tissue_corr/<string:corr_method>", methods=["POST"]) @@ -83,3 +89,44 @@ def compute_tissue_corr(corr_method="pearson"): corr_method=corr_method) return jsonify(results) + +@correlation.route("/partial", methods=["POST"]) +def partial_correlation(): + """API endpoint for partial correlations.""" + def trait_fullname(trait): + return f"{trait['dataset']}::{trait['trait_name']}" + + def __field_errors__(args): + def __check__(acc, field): + if args.get(field) is None: + return acc + (f"Field '{field}' missing",) + return acc + return __check__ + + def __errors__(request_data, fields): + errors = tuple() + if request_data is None: + return ("No request data",) + + return reduce(__field_errors__(request_data), fields, errors) + + args = request.get_json() + request_errors = __errors__( + args, ("primary_trait", "control_traits", "target_db", "method")) + if request_errors: + return build_response({ + "status": "error", + "messages": request_errors, + "error_type": "Client Error"}) + return build_response({ + "status": "success", + "results": queue_cmd( + conn=redis.Redis(), + cmd=compose_pcorrs_command( + trait_fullname(args["primary_trait"]), + tuple( + trait_fullname(trait) for trait in args["control_traits"]), + args["method"], args["target_db"], + int(args.get("criteria", 500))), + job_queue=current_app.config.get("REDIS_JOB_QUEUE"), + env = {"PYTHONPATH": ":".join(sys.path), "SQL_URI": SQL_URI})}) diff --git a/gn3/api/ctl.py b/gn3/api/ctl.py new file mode 100644 index 0000000..ac33d63 --- /dev/null +++ b/gn3/api/ctl.py @@ -0,0 +1,24 @@ +"""module contains endpoints for ctl""" + +from flask import Blueprint +from flask import request +from flask import jsonify + +from gn3.computations.ctl import call_ctl_script + +ctl = Blueprint("ctl", __name__) + + +@ctl.route("/run_ctl", methods=["POST"]) +def run_ctl(): + """endpoint to run ctl + input: request form object + output:json object enum::(response,error) + + """ + ctl_data = request.json + + (cmd_results, response) = call_ctl_script(ctl_data) + return (jsonify({ + "results": response + }), 200) if response is not None else (jsonify({"error": str(cmd_results)}), 401) diff --git a/gn3/api/general.py b/gn3/api/general.py index 69ec343..e0bfc81 100644 --- a/gn3/api/general.py +++ b/gn3/api/general.py @@ -7,7 +7,7 @@ from flask import request from gn3.fs_helpers import extract_uploaded_file from gn3.commands import run_cmd - +from gn3.db import datasets general = Blueprint("general", __name__) @@ -68,3 +68,8 @@ def run_r_qtl(geno_filestr, pheno_filestr): cmd = (f"Rscript {rqtl_wrapper} " f"{geno_filestr} {pheno_filestr}") return jsonify(run_cmd(cmd)), 201 + +@general.route("/dataset/<accession_id>") +def dataset_metadata(accession_id): + """Return info as JSON for dataset with ACCESSION_ID.""" + return jsonify(datasets.dataset_metadata(accession_id)) diff --git a/gn3/api/heatmaps.py b/gn3/api/heatmaps.py index 633a061..80c8ca8 100644 --- a/gn3/api/heatmaps.py +++ b/gn3/api/heatmaps.py @@ -24,15 +24,14 @@ def clustered_heatmaps(): return jsonify({ "message": "You need to provide at least two trait names." }), 400 - conn, _cursor = database_connector() - def parse_trait_fullname(trait): - name_parts = trait.split(":") - return "{dataset_name}::{trait_name}".format( - dataset_name=name_parts[1], trait_name=name_parts[0]) - traits_fullnames = [parse_trait_fullname(trait) for trait in traits_names] + with database_connector() as conn: + def parse_trait_fullname(trait): + name_parts = trait.split(":") + return f"{name_parts[1]}::{name_parts[0]}" + traits_fullnames = [parse_trait_fullname(trait) for trait in traits_names] - with io.StringIO() as io_str: - figure = build_heatmap(traits_fullnames, conn, vertical=vertical) - figure.write_json(io_str) - fig_json = io_str.getvalue() - return fig_json, 200 + with io.StringIO() as io_str: + figure = build_heatmap(traits_fullnames, conn, vertical=vertical) + figure.write_json(io_str) + fig_json = io_str.getvalue() + return fig_json, 200 diff --git a/gn3/api/rqtl.py b/gn3/api/rqtl.py index 85b2460..70ebe12 100644 --- a/gn3/api/rqtl.py +++ b/gn3/api/rqtl.py @@ -25,7 +25,7 @@ run the rqtl_wrapper script and return the results as JSON raise FileNotFoundError # Split kwargs by those with values and boolean ones that just convert to True/False - kwargs = ["model", "method", "nperm", "scale", "control_marker"] + kwargs = ["covarstruct", "model", "method", "nperm", "scale", "control_marker"] boolean_kwargs = ["addcovar", "interval", "pstrata", "pairscan"] all_kwargs = kwargs + boolean_kwargs |