about summary refs log tree commit diff
path: root/uploader/phenotypes
diff options
context:
space:
mode:
Diffstat (limited to 'uploader/phenotypes')
-rw-r--r--uploader/phenotypes/misc.py26
-rw-r--r--uploader/phenotypes/models.py281
-rw-r--r--uploader/phenotypes/views.py235
3 files changed, 493 insertions, 49 deletions
diff --git a/uploader/phenotypes/misc.py b/uploader/phenotypes/misc.py
new file mode 100644
index 0000000..cbe3b7f
--- /dev/null
+++ b/uploader/phenotypes/misc.py
@@ -0,0 +1,26 @@
+"""Miscellaneous functions handling phenotypes and phenotypes data."""
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def phenotypes_data_differences(
+        filedata: tuple[dict, ...], dbdata: tuple[dict, ...]
+) -> tuple[dict, ...]:
+    """Compute differences between file data and db data"""
+    diff = tuple()
+    for filerow, dbrow in zip(
+            sorted(filedata, key=lambda item: (item["phenotype_id"], item["xref_id"])),
+            sorted(dbdata, key=lambda item: (item["PhenotypeId"], item["xref_id"]))):
+        for samplename, value in filerow["data"].items():
+            if value != dbrow["data"].get(samplename, {}).get("value"):
+                diff = diff + ({
+                    "PhenotypeId": filerow["phenotype_id"],
+                    "xref_id": filerow["xref_id"],
+                    "DataId": dbrow["DataId"],
+                    "StrainId": dbrow["data"].get(samplename, {}).get("StrainId"),
+                    "StrainName": samplename,
+                    "value": value
+                },)
+
+    return diff
diff --git a/uploader/phenotypes/models.py b/uploader/phenotypes/models.py
index e1ec0c9..e40155f 100644
--- a/uploader/phenotypes/models.py
+++ b/uploader/phenotypes/models.py
@@ -1,14 +1,31 @@
 """Database and utility functions for phenotypes."""
-from typing import Optional
+import logging
+import tempfile
+from pathlib import Path
 from functools import reduce
 from datetime import datetime
+from typing import Optional, Iterable
 
 import MySQLdb as mdb
 from MySQLdb.cursors import Cursor, DictCursor
-from flask import current_app as app
 
 from gn_libs.mysqldb import debug_query
 
+from functional_tools import take
+
+logger = logging.getLogger(__name__)
+
+
+__PHENO_DATA_TABLES__ = {
+    "PublishData": {
+        "table": "PublishData", "valueCol": "value", "DataIdCol": "Id"},
+    "PublishSE": {
+        "table": "PublishSE", "valueCol": "error", "DataIdCol": "DataId"},
+    "NStrain": {
+        "table": "NStrain", "valueCol": "count", "DataIdCol": "DataId"}
+}
+
+
 def datasets_by_population(
         conn: mdb.Connection,
         species_id: int,
@@ -32,10 +49,10 @@ def dataset_by_id(conn: mdb.Connection,
     """Fetch dataset details by identifier"""
     with conn.cursor(cursorclass=DictCursor) as cursor:
         cursor.execute(
-            "SELECT s.SpeciesId, pf.* FROM Species AS s "
-            "INNER JOIN InbredSet AS iset ON s.Id=iset.SpeciesId "
-            "INNER JOIN PublishFreeze AS pf ON iset.Id=pf.InbredSetId "
-            "WHERE s.Id=%s AND iset.Id=%s AND pf.Id=%s",
+            "SELECT Species.SpeciesId, PublishFreeze.* FROM Species "
+            "INNER JOIN InbredSet ON Species.Id=InbredSet.SpeciesId "
+            "INNER JOIN PublishFreeze ON InbredSet.Id=PublishFreeze.InbredSetId "
+            "WHERE Species.Id=%s AND InbredSet.Id=%s AND PublishFreeze.Id=%s",
             (species_id, population_id, dataset_id))
         return dict(cursor.fetchone())
 
@@ -75,7 +92,8 @@ def dataset_phenotypes(conn: mdb.Connection,
                        limit: Optional[int] = None) -> tuple[dict, ...]:
     """Fetch the actual phenotypes."""
     _query = (
-        "SELECT pheno.*, pxr.Id AS xref_id, ist.InbredSetCode FROM Phenotype AS pheno "
+        "SELECT pheno.*, pxr.Id AS xref_id, pxr.InbredSetId, ist.InbredSetCode "
+        "FROM Phenotype AS pheno "
         "INNER JOIN PublishXRef AS pxr ON pheno.Id=pxr.PhenotypeId "
         "INNER JOIN PublishFreeze AS pf ON pxr.InbredSetId=pf.InbredSetId "
         "INNER JOIN InbredSet AS ist ON pf.InbredSetId=ist.Id "
@@ -83,7 +101,7 @@ def dataset_phenotypes(conn: mdb.Connection,
             f" LIMIT {limit} OFFSET {offset}" if bool(limit) else "")
     with conn.cursor(cursorclass=DictCursor) as cursor:
         cursor.execute(_query, (population_id, dataset_id))
-        debug_query(cursor, app.logger)
+        debug_query(cursor, logger)
         return tuple(dict(row) for row in cursor.fetchall())
 
 
@@ -94,7 +112,7 @@ def __phenotype_se__(cursor: Cursor, xref_id, dataids_and_strainids):
     cursor.execute("SELECT * FROM PublishSE WHERE (DataId, StrainId) IN "
                    f"({paramstr})",
                    flat)
-    debug_query(cursor, app.logger)
+    debug_query(cursor, logger)
     _se = {
         (row["DataId"], row["StrainId"]): {
             "DataId": row["DataId"],
@@ -107,7 +125,7 @@ def __phenotype_se__(cursor: Cursor, xref_id, dataids_and_strainids):
     cursor.execute("SELECT * FROM NStrain WHERE (DataId, StrainId) IN "
                    f"({paramstr})",
                    flat)
-    debug_query(cursor, app.logger)
+    debug_query(cursor, logger)
     _n = {
         (row["DataId"], row["StrainId"]): {
             "DataId": row["DataId"],
@@ -137,6 +155,7 @@ def __organise_by_phenotype__(pheno, row):
             "Pre_publication_abbreviation": row["Pre_publication_abbreviation"],
             "Post_publication_abbreviation": row["Post_publication_abbreviation"],
             "xref_id": row["pxr.Id"],
+            "DataId": row["DataId"],
             "data": {
                 **(_pheno["data"] if bool(_pheno) else {}),
                 (row["DataId"], row["StrainId"]): {
@@ -200,7 +219,7 @@ def phenotype_by_id(
                 ).values())
             }
         if bool(_pheno) and len(_pheno.keys()) > 1:
-            raise Exception(
+            raise Exception(# pylint: disable=[broad-exception-raised]
                 "We found more than one phenotype with the same identifier!")
 
     return None
@@ -225,7 +244,7 @@ def phenotypes_data(conn: mdb.Connection,
                   f" LIMIT {limit} OFFSET {offset}" if bool(limit) else "")
     with conn.cursor(cursorclass=DictCursor) as cursor:
         cursor.execute(_query, (population_id, dataset_id))
-        debug_query(cursor, app.logger)
+        debug_query(cursor, logger)
         return tuple(dict(row) for row in cursor.fetchall())
 
 
@@ -252,5 +271,241 @@ def save_new_dataset(cursor: Cursor,
         "%(created)s, %(public)s, %(population_id)s, %(confidentiality)s, "
         "%(users)s)",
         params)
-    debug_query(cursor, app.logger)
+    debug_query(cursor, logger)
     return {**params, "Id": cursor.lastrowid}
+
+
+def phenotypes_data_by_ids(
+        conn: mdb.Connection,
+        inbred_pheno_xref: dict[str, int]
+) -> tuple[dict, ...]:
+    """Fetch all phenotype data, filtered by the `inbred_pheno_xref` mapping."""
+    _paramstr = ",".join(["(%s, %s, %s)"] * len(inbred_pheno_xref))
+    _query = ("SELECT "
+              "pub.PubMed_ID, pheno.*, pxr.*, pd.*, str.*, iset.InbredSetCode "
+              "FROM Publication AS pub "
+              "RIGHT JOIN PublishXRef AS pxr0 ON pub.Id=pxr0.PublicationId "
+              "INNER JOIN Phenotype AS pheno ON pxr0.PhenotypeId=pheno.id "
+              "INNER JOIN PublishXRef AS pxr ON pheno.Id=pxr.PhenotypeId "
+              "INNER JOIN PublishData AS pd ON pxr.DataId=pd.Id "
+              "INNER JOIN Strain AS str ON pd.StrainId=str.Id "
+              "INNER JOIN StrainXRef AS sxr ON str.Id=sxr.StrainId "
+              "INNER JOIN PublishFreeze AS pf ON sxr.InbredSetId=pf.InbredSetId "
+              "INNER JOIN InbredSet AS iset ON pf.InbredSetId=iset.InbredSetId "
+              f"WHERE (pxr.InbredSetId, pheno.Id, pxr.Id) IN ({_paramstr}) "
+              "ORDER BY pheno.Id")
+    with conn.cursor(cursorclass=DictCursor) as cursor:
+        cursor.execute(_query, tuple(item for row in inbred_pheno_xref
+                                     for item in (row["population_id"],
+                                                  row["phenoid"],
+                                                  row["xref_id"])))
+        debug_query(cursor, logger)
+        return tuple(
+            reduce(__organise_by_phenotype__, cursor.fetchall(), {}).values())
+
+
+def __pre_process_phenotype_data__(row):
+    _desc = row.get("description", "")
+    _pre_pub_desc = row.get("pre_publication_description", _desc)
+    _orig_desc = row.get("original_description", _desc)
+    _post_pub_desc = row.get("post_publication_description", _orig_desc)
+    _pre_pub_abbr = row.get("pre_publication_abbreviation", row["id"])
+    _post_pub_abbr = row.get("post_publication_abbreviation", _pre_pub_abbr)
+    return {
+        "pre_publication_description": _pre_pub_desc,
+        "post_publication_description": _post_pub_desc,
+        "original_description": _orig_desc,
+        "units": row["units"],
+        "pre_publication_abbreviation": _pre_pub_abbr,
+        "post_publication_abbreviation": _post_pub_abbr
+    }
+
+
+def create_new_phenotypes(# pylint: disable=[too-many-locals]
+        conn: mdb.Connection,
+        population_id: int,
+        publication_id: int,
+        phenotypes: Iterable[dict]
+) -> tuple[dict, ...]:
+    """Add entirely new phenotypes to the database. WARNING: Not thread-safe."""
+    _phenos = tuple()
+    with conn.cursor(cursorclass=DictCursor) as cursor:
+        def make_next_id(idcol, table):
+            cursor.execute(f"SELECT MAX({idcol}) AS last_id FROM {table}")
+            _last_id = int(cursor.fetchone()["last_id"])
+            def __next_id__():
+                _next_id = _last_id + 1
+                while True:
+                    yield _next_id
+                    _next_id = _next_id + 1
+
+            return __next_id__
+
+        ### Bottleneck: Everything below makes this function not         ###
+        ###   thread-safe because we have to retrieve the last IDs from  ###
+        ###   the database and increment those to compute the next IDs.  ###
+        ###   This is an unfortunate result from the current schema that ###
+        ###   has a cross-reference table that requires that a phenotype ###
+        ###   be linked to an existing publication, and have data IDs to ###
+        ###   link to that phenotype's data.                             ###
+        ###   The fact that the IDs are sequential also compounds the    ###
+        ###   bottleneck.                                                ###
+        ###
+        ###   For extra safety, ensure the following tables are locked   ###
+        ###   for `WRITE`:                                               ###
+        ###   - PublishXRef                                              ###
+        ###   - Phenotype                                                ###
+        ###   - PublishXRef                                              ###
+        __next_xref_id = make_next_id("Id", "PublishXRef")()
+        __next_pheno_id__ = make_next_id("Id", "Phenotype")()
+        __next_data_id__ = make_next_id("DataId", "PublishXRef")()
+
+        def __build_params_and_prepubabbrevs__(acc, row):
+            processed = __pre_process_phenotype_data__(row)
+            return (
+                acc[0] + ({
+                    **processed,
+                    "population_id": population_id,
+                    "publication_id": publication_id,
+                    "phenotype_id": next(__next_pheno_id__),
+                    "xref_id": next(__next_xref_id),
+                    "data_id": next(__next_data_id__)
+                },),
+                acc[1] + (processed["pre_publication_abbreviation"],))
+        while True:
+            batch = take(phenotypes, 1000)
+            if len(batch) == 0:
+                break
+
+            params, abbrevs = reduce(__build_params_and_prepubabbrevs__,
+                                     batch,
+                                     (tuple(), tuple()))
+            # Check for uniqueness for all "Pre_publication_description" values
+            abbrevs_paramsstr = ", ".join(["%s"] * len(abbrevs))
+            _query = ("SELECT PublishXRef.PhenotypeId, Phenotype.* "
+                      "FROM PublishXRef "
+                      "INNER JOIN Phenotype "
+                      "ON PublishXRef.PhenotypeId=Phenotype.Id "
+                      "WHERE PublishXRef.InbredSetId=%s "
+                      "AND Phenotype.Pre_publication_abbreviation IN "
+                      f"({abbrevs_paramsstr})")
+            cursor.execute(_query,
+                           ((population_id,) + abbrevs))
+            existing = tuple(row["Pre_publication_abbreviation"]
+                             for row in cursor.fetchall())
+            if len(existing) > 0:
+                # Narrow this exception, perhaps?
+                raise Exception(# pylint: disable=[broad-exception-raised]
+                    "Found already existing phenotypes with the following "
+                    "'Pre-publication abbreviations':\n\t"
+                    "\n\t".join(f"* {item}" for item in existing))
+
+            cursor.executemany(
+                (
+                    "INSERT INTO "
+                    "Phenotype("
+                    "Id, "
+                    "Pre_publication_description, "
+                    "Post_publication_description, "
+                    "Original_description, "
+                    "Units, "
+                    "Pre_publication_abbreviation, "
+                    "Post_publication_abbreviation, "
+                    "Authorized_Users"
+                    ")"
+                    "VALUES ("
+                    "%(phenotype_id)s, "
+                    "%(pre_publication_description)s, "
+                    "%(post_publication_description)s, "
+                    "%(original_description)s, "
+                    "%(units)s, "
+                    "%(pre_publication_abbreviation)s, "
+                    "%(post_publication_abbreviation)s, "
+                    "'robwilliams'"
+                    ")"),
+                params)
+            _comments = f"Created at {datetime.now().isoformat()}"
+            cursor.executemany(
+                ("INSERT INTO PublishXRef("
+                 "Id, "
+                 "InbredSetId, "
+                 "PhenotypeId, "
+                 "PublicationId, "
+                 "DataId, "
+                 "comments"
+                 ")"
+                 "VALUES("
+                 "%(xref_id)s, "
+                 "%(population_id)s, "
+                 "%(phenotype_id)s, "
+                 "%(publication_id)s, "
+                 "%(data_id)s, "
+                 f"'{_comments}'"
+                 ")"),
+                params)
+            _phenos = _phenos + params
+
+    return _phenos
+
+
+def save_phenotypes_data(
+        conn: mdb.Connection,
+        table: str,
+        data: Iterable[dict]
+) -> int:
+    """Save new phenotypes data into the database."""
+    _table_details = __PHENO_DATA_TABLES__[table]
+    with conn.cursor(cursorclass=DictCursor) as cursor:
+        _count = 0
+        while True:
+            batch = take(data, 100000)
+            if len(batch) == 0:
+                logger.warning("Got an empty batch. This needs investigation.")
+                break
+
+            logger.debug("Saving batch of %s items.", len(batch))
+            cursor.executemany(
+                (f"INSERT INTO {_table_details['table']}"
+                 f"({_table_details['DataIdCol']}, StrainId, {_table_details['valueCol']}) "
+                 "VALUES "
+                 f"(%(data_id)s, %(sample_id)s, %(value)s) "),
+                tuple(batch))
+            debug_query(cursor, logger)
+            _count = _count + len(batch)
+
+
+    logger.debug("Saved a total of %s data rows", _count)
+    return _count
+
+
+def quick_save_phenotypes_data(
+        conn: mdb.Connection,
+        table: str,
+        dataitems: Iterable[dict],
+        tmpdir: Path
+) -> int:
+    """Save data items to the database, but using """
+    _table_details = __PHENO_DATA_TABLES__[table]
+    with (tempfile.NamedTemporaryFile(
+            prefix=f"{table}_data", mode="wt", dir=tmpdir) as tmpfile,
+          conn.cursor(cursorclass=DictCursor) as cursor):
+        _count = 0
+        logger.debug("Write data rows to text file.")
+        for row in dataitems:
+            tmpfile.write(
+                f'{row["data_id"]}\t{row["sample_id"]}\t{row["value"]}\n')
+            _count = _count + 1
+        tmpfile.flush()
+
+        logger.debug("Load text file into database (table: %s)",
+                      _table_details["table"])
+        cursor.execute(
+            f"LOAD DATA LOCAL INFILE '{tmpfile.name}' "
+            f"INTO TABLE {_table_details['table']} "
+            "("
+            f"{_table_details['DataIdCol']}, "
+            "StrainId, "
+            f"{_table_details['valueCol']}"
+            ")")
+        debug_query(cursor, logger)
+        return _count
diff --git a/uploader/phenotypes/views.py b/uploader/phenotypes/views.py
index dc2df8f..556b5ff 100644
--- a/uploader/phenotypes/views.py
+++ b/uploader/phenotypes/views.py
@@ -1,19 +1,26 @@
-"""Views handling ('classical') phenotypes."""
+"""Views handling ('classical') phenotypes."""# pylint: disable=[too-many-lines]
 import sys
 import uuid
 import json
-import datetime
+import logging
 from typing import Any
 from pathlib import Path
 from zipfile import ZipFile
 from functools import wraps, reduce
-from logging import INFO, ERROR, DEBUG, FATAL, CRITICAL, WARNING
+from urllib.parse import urljoin, urlparse, ParseResult, urlunparse, urlencode
+
+import datetime
 
 from redis import Redis
 from pymonad.either import Left
 from requests.models import Response
 from MySQLdb.cursors import DictCursor
+
+from gn_libs import sqlite3
+from gn_libs import jobs as gnlibs_jobs
+from gn_libs.jobs.jobs import JobNotFound
 from gn_libs.mysqldb import database_connection
+
 from flask import (flash,
                    request,
                    url_for,
@@ -22,19 +29,23 @@ from flask import (flash,
                    Blueprint,
                    current_app as app)
 
-# from r_qtl import r_qtl2 as rqtl2
 from r_qtl import r_qtl2_qc as rqc
 from r_qtl import exceptions as rqe
 
+
 from uploader import jobs
+from uploader import session
 from uploader.files import save_file#, fullpath
 from uploader.ui import make_template_renderer
 from uploader.oauth2.client import oauth2_post
+from uploader.oauth2.tokens import request_token
 from uploader.authorisation import require_login
+from uploader.oauth2 import client as oauth2client
 from uploader.route_utils import generic_select_population
 from uploader.datautils import safe_int, enumerate_sequence
 from uploader.species.models import all_species, species_by_id
 from uploader.monadic_requests import make_either_error_handler
+from uploader.publications.models import fetch_publication_by_id
 from uploader.request_checks import with_species, with_population
 from uploader.input_validation import (encode_errors,
                                        decode_errors,
@@ -241,12 +252,7 @@ def view_phenotype(# pylint: disable=[unused-argument]
                 if (key in ("PubMed_ID", "Authors", "Title", "Journal")
                     and __non_empty__(val))
             },
-            privileges=(privileges
-                        ### For demo! Do not commit this part
-                            + ("group:resource:edit-resource",
-                               "group:resource:delete-resource",)
-                        ### END: For demo! Do not commit this part
-                            ),
+            privileges=privileges,
             activelink="view-phenotype")
 
     def __fail__(error):
@@ -353,10 +359,17 @@ def process_phenotypes_individual_files(error_uri):
     bundlepath = Path(app.config["UPLOAD_FOLDER"],
                       f"{str(uuid.uuid4()).replace('-', '')}.zip")
     with ZipFile(bundlepath,mode="w") as zfile:
-        for rqtlkey, formkey in (("phenocovar", "phenotype-descriptions"),
-                                 ("pheno", "phenotype-data"),
-                                 ("phenose", "phenotype-se"),
-                                 ("phenonum", "phenotype-n")):
+        for rqtlkey, formkey, _type in (
+                ("phenocovar", "phenotype-descriptions", "mandatory"),
+                ("pheno", "phenotype-data", "mandatory"),
+                ("phenose", "phenotype-se", "optional"),
+                ("phenonum", "phenotype-n", "optional")):
+            if _type == "optional" and not bool(form.get(formkey)):
+                continue # skip if an optional key does not exist.
+
+            cdata[f"{rqtlkey}_transposed"] = (
+                (form.get(f"{formkey}-transposed") or "off") == "on")
+
             if form.get("resumable-upload", False):
                 # Chunked upload of large files was used
                 filedata = json.loads(form[formkey])
@@ -365,7 +378,7 @@ def process_phenotypes_individual_files(error_uri):
                     arcname=filedata["original-name"])
                 cdata[rqtlkey] = cdata.get(rqtlkey, []) + [filedata["original-name"]]
             else:
-                # TODO: Check this path: fix any bugs.
+                # T0DO: Check this path: fix any bugs.
                 _sentfile = request.files[formkey]
                 if not bool(_sentfile):
                     flash(f"Expected file ('{formkey}') was not provided.",
@@ -379,6 +392,7 @@ def process_phenotypes_individual_files(error_uri):
                     arcname=filepath.name)
                 cdata[rqtlkey] = cdata.get(rqtlkey, []) + [filepath.name]
 
+
         zfile.writestr("control_data.json", data=json.dumps(cdata, indent=2))
 
     return bundlepath
@@ -403,10 +417,7 @@ def add_phenotypes(species: dict, population: dict, dataset: dict, **kwargs):# p
         dataset_id=dataset["Id"]))
     _redisuri = app.config["REDIS_URL"]
     _sqluri = app.config["SQL_URI"]
-    with (Redis.from_url(_redisuri, decode_responses=True) as rconn,
-          # database_connection(_sqluri) as conn,
-          # conn.cursor(cursorclass=DictCursor) as cursor
-          ):
+    with Redis.from_url(_redisuri, decode_responses=True) as rconn:
         if request.method == "GET":
             today = datetime.date.today()
             return render_template(
@@ -441,24 +452,20 @@ def add_phenotypes(species: dict, population: dict, dataset: dict, **kwargs):# p
                 [sys.executable, "-m", "scripts.rqtl2.phenotypes_qc", _sqluri,
                  _redisuri, _namespace, str(_jobid), str(species["SpeciesId"]),
                  str(population["Id"]),
-                 # str(dataset["Id"]),
              str(phenobundle),
                  "--loglevel",
-                 {
-                     INFO: "INFO",
-                     ERROR: "ERROR",
-                     DEBUG: "DEBUG",
-                     FATAL: "FATAL",
-                     CRITICAL: "CRITICAL",
-                     WARNING: "WARNING"
-                 }[app.logger.getEffectiveLevel()],
+                 logging.getLevelName(
+                     app.logger.getEffectiveLevel()
+                 ).lower(),
                  "--redisexpiry",
                  str(_ttl_seconds)], "phenotype_qc", _ttl_seconds,
                 {"job-metadata": json.dumps({
                     "speciesid": species["SpeciesId"],
                     "populationid": population["Id"],
                     "datasetid": dataset["Id"],
-                    "bundle": str(phenobundle.absolute())})}),
+                    "bundle": str(phenobundle.absolute()),
+                    **({"publicationid": request.form["publication-id"]}
+                       if request.form.get("publication-id") else {})})}),
             _redisuri,
             f"{app.config['UPLOAD_FOLDER']}/job_errors")
 
@@ -531,7 +538,8 @@ def review_job_data(
         **kwargs
 ):# pylint: disable=[unused-argument]
     """Review data one more time before entering it into the database."""
-    with Redis.from_url(app.config["REDIS_URL"], decode_responses=True) as rconn:
+    with (Redis.from_url(app.config["REDIS_URL"], decode_responses=True) as rconn,
+          database_connection(app.config["SQL_URI"]) as conn):
         try:
             job = jobs.job(rconn, jobs.jobsnamespace(), str(job_id))
         except jobs.JobNotFound as _jnf:
@@ -579,6 +587,7 @@ def review_job_data(
             filetype: __summarise__(filetype, meta)
             for filetype,meta in metadata.items()
         }
+        _job_metadata = json.loads(job["job-metadata"])
         return render_template("phenotypes/review-job-data.html",
                                species=species,
                                population=population,
@@ -586,9 +595,103 @@ def review_job_data(
                                job_id=job_id,
                                job=job,
                                summary=summary,
+                               publication=(
+                                   fetch_publication_by_id(
+                                       conn, int(_job_metadata["publicationid"]))
+                                   if _job_metadata.get("publicationid")
+                                   else None),
                                activelink="add-phenotypes")
 
 
+def load_phenotypes_success_handler(job):
+    """Handle loading new phenotypes into the database successfully."""
+    return redirect(url_for(
+        "species.populations.phenotypes.load_data_success",
+        species_id=job["metadata"]["species_id"],
+        population_id=job["metadata"]["population_id"],
+        dataset_id=job["metadata"]["dataset_id"],
+        job_id=job["job_id"]))
+
+
+@phenotypesbp.route(
+    "<int:species_id>/populations/<int:population_id>/phenotypes/datasets"
+    "/<int:dataset_id>/load-data-to-database",
+    methods=["POST"])
+@require_login
+@with_dataset(
+    species_redirect_uri="species.populations.phenotypes.index",
+    population_redirect_uri="species.populations.phenotypes.select_population",
+    redirect_uri="species.populations.phenotypes.list_datasets")
+def load_data_to_database(
+        species: dict,
+        population: dict,
+        dataset: dict,
+        **kwargs
+):# pylint: disable=[unused-argument]
+    """Load the data from the given QC job into the database."""
+    _jobs_db = app.config["ASYNCHRONOUS_JOBS_SQLITE_DB"]
+    with (Redis.from_url(app.config["REDIS_URL"], decode_responses=True) as rconn,
+          sqlite3.connection(_jobs_db) as conn):
+        # T0DO: Maybe break the connection between the jobs here, pass:
+        # - the bundle name (rebuild the full path here.)
+        # - publication details, where separate
+        # - details about the files: e.g. total lines, etc
+        qc_job = jobs.job(rconn, jobs.jobsnamespace(), request.form["data-qc-job-id"])
+        _meta = json.loads(qc_job["job-metadata"])
+        _load_job_id = uuid.uuid4()
+        _loglevel = logging.getLevelName(app.logger.getEffectiveLevel()).lower()
+        command = [
+            sys.executable,
+            "-u",
+            "-m",
+            "scripts.load_phenotypes_to_db",
+            app.config["SQL_URI"],
+            _jobs_db,
+            str(_load_job_id),
+            "--log-level",
+            _loglevel
+        ]
+
+        def __handle_error__(resp):
+            return render_template("http-error.html", *resp.json())
+
+        def __handle_success__(load_job):
+            app.logger.debug("The phenotypes loading job: %s", load_job)
+            return redirect(url_for(
+                "background-jobs.job_status", job_id=load_job["job_id"]))
+
+
+        return request_token(
+            token_uri=urljoin(oauth2client.authserver_uri(), "auth/token"),
+            user_id=session.user_details()["user_id"]
+        ).then(
+            lambda token: gnlibs_jobs.initialise_job(
+                conn,
+                _load_job_id,
+                command,
+                "load-new-phenotypes-data",
+                extra_meta={
+                    "species_id": species["SpeciesId"],
+                    "population_id": population["Id"],
+                    "dataset_id": dataset["Id"],
+                    "bundle_file": _meta["bundle"],
+                    "publication_id": _meta["publicationid"],
+                    "authserver": oauth2client.authserver_uri(),
+                    "token": token["access_token"],
+                    "success_handler": (
+                        "uploader.phenotypes.views"
+                        ".load_phenotypes_success_handler")
+                })
+        ).then(
+            lambda job: gnlibs_jobs.launch_job(
+                job,
+                _jobs_db,
+                Path(f"{app.config['UPLOAD_FOLDER']}/job_errors"),
+                worker_manager="gn_libs.jobs.launcher",
+                loglevel=_loglevel)
+        ).either(__handle_error__, __handle_success__)
+
+
 def update_phenotype_metadata(conn, metadata: dict):
     """Update a phenotype's basic metadata values."""
     with conn.cursor(cursorclass=DictCursor) as cursor:
@@ -741,12 +844,7 @@ def edit_phenotype_data(# pylint: disable=[unused-argument]
     def __render__(**kwargs):
         processed_kwargs = {
             **kwargs,
-            "privileges": (kwargs.get("privileges", tuple())
-                           ### For demo! Do not commit this part
-                            + ("group:resource:edit-resource",
-                               "group:resource:delete-resource",)
-                           ### END: For demo! Do not commit this part
-                            )
+            "privileges": kwargs.get("privileges", tuple())
         }
         return render_template(
             "phenotypes/edit-phenotype.html",
@@ -844,3 +942,68 @@ def edit_phenotype_data(# pylint: disable=[unused-argument]
             population_id=population["Id"],
             dataset_id=dataset["Id"],
             xref_id=xref_id))
+
+
+@phenotypesbp.route(
+    "<int:species_id>/populations/<int:population_id>/phenotypes/datasets"
+    "/<int:dataset_id>/load-data-success/<uuid:job_id>",
+    methods=["GET"])
+@require_login
+@with_dataset(
+    species_redirect_uri="species.populations.phenotypes.index",
+    population_redirect_uri="species.populations.phenotypes.select_population",
+    redirect_uri="species.populations.phenotypes.list_datasets")
+def load_data_success(
+        species: dict,
+        population: dict,
+        dataset: dict,
+        job_id: uuid.UUID,
+        **kwargs
+):# pylint: disable=[unused-argument]
+    """Display success page if loading data to database was successful."""
+    with (database_connection(app.config["SQL_URI"]) as conn,
+          sqlite3.connection(app.config["ASYNCHRONOUS_JOBS_SQLITE_DB"])
+          as jobsconn):
+        try:
+            gn2_uri = urlparse(app.config["GN2_SERVER_URL"])
+            job = gnlibs_jobs.job(jobsconn, job_id, fulldetails=True)
+            app.logger.debug("THE JOB: %s", job)
+            _xref_ids = tuple(
+                str(item) for item
+                in json.loads(job["metadata"].get("xref_ids", "[]")))
+            _publication = fetch_publication_by_id(
+                conn, int(job["metadata"].get("publication_id", "0")))
+            _search_terms = (item for item in
+                             (str(_publication["PubMed_ID"] or ""),
+                              _publication["Authors"],
+                              (_publication["Title"] or ""))
+                             if item != "")
+            return render_template("phenotypes/load-phenotypes-success.html",
+                                   species=species,
+                                   population=population,
+                                   dataset=dataset,
+                                   job=job,
+                                   search_page_uri=urlunparse(ParseResult(
+                                       scheme=gn2_uri.scheme,
+                                       netloc=gn2_uri.netloc,
+                                       path="/search",
+                                       params="",
+                                       query=urlencode({
+                                           "species": species["Name"],
+                                           "group": population["Name"],
+                                           "type": "Phenotypes",
+                                           "dataset": dataset["Name"],
+                                           "search_terms_or": (
+                                               # Very long URLs will cause
+                                               # errors.
+                                               " ".join(_xref_ids)
+                                               if len(_xref_ids) <= 100
+                                               else ""),
+                                           "search_terms_and": " ".join(
+                                               _search_terms).strip(),
+                                           "accession_id": "None",
+                                           "FormID": "searchResult"
+                                       }),
+                                       fragment="")))
+        except JobNotFound as _jnf:
+            return render_template("jobs/job-not-found.html", job_id=job_id)