about summary refs log tree commit diff
path: root/uploader
diff options
context:
space:
mode:
Diffstat (limited to 'uploader')
-rw-r--r--uploader/db/datasets.py4
-rw-r--r--uploader/phenotypes/models.py5
-rw-r--r--uploader/phenotypes/views.py23
-rw-r--r--uploader/publications/models.py4
-rw-r--r--uploader/publications/pubmed.py4
-rw-r--r--uploader/publications/views.py10
-rw-r--r--uploader/samples/views.py8
-rw-r--r--uploader/species/models.py2
8 files changed, 33 insertions, 27 deletions
diff --git a/uploader/db/datasets.py b/uploader/db/datasets.py
index 767ec41..4b263f5 100644
--- a/uploader/db/datasets.py
+++ b/uploader/db/datasets.py
@@ -53,7 +53,7 @@ def probeset_study_by_id(conn: mdb.Connection, studyid) -> Optional[dict]:
         _study = cursor.fetchone()
         return dict(_study) if bool(_study) else None
 
-def probeset_create_study(conn: mdb.Connection,#pylint: disable=[too-many-arguments]
+def probeset_create_study(conn: mdb.Connection,#pylint: disable=[too-many-arguments, too-many-positional-arguments]
                           populationid: int,
                           platformid: int,
                           tissueid: int,
@@ -87,7 +87,7 @@ def probeset_create_study(conn: mdb.Connection,#pylint: disable=[too-many-argume
                        (studyid, studyid))
         return {**studydata, "studyid": studyid}
 
-def probeset_create_dataset(conn: mdb.Connection,#pylint: disable=[too-many-arguments]
+def probeset_create_dataset(conn: mdb.Connection,#pylint: disable=[too-many-arguments, too-many-positional-arguments]
                             studyid: int,
                             averageid: int,
                             datasetname: str,
diff --git a/uploader/phenotypes/models.py b/uploader/phenotypes/models.py
index 4a1b2d5..4b8b223 100644
--- a/uploader/phenotypes/models.py
+++ b/uploader/phenotypes/models.py
@@ -92,7 +92,8 @@ def dataset_phenotypes(conn: mdb.Connection,
                        limit: Optional[int] = None) -> tuple[dict, ...]:
     """Fetch the actual phenotypes."""
     _query = (
-        "SELECT pheno.*, pxr.Id AS xref_id, pxr.InbredSetId, ist.InbredSetCode FROM Phenotype AS pheno "
+        "SELECT pheno.*, pxr.Id AS xref_id, pxr.InbredSetId, ist.InbredSetCode "
+        "FROM Phenotype AS pheno "
         "INNER JOIN PublishXRef AS pxr ON pheno.Id=pxr.PhenotypeId "
         "INNER JOIN PublishFreeze AS pf ON pxr.InbredSetId=pf.InbredSetId "
         "INNER JOIN InbredSet AS ist ON pf.InbredSetId=ist.Id "
@@ -218,7 +219,7 @@ def phenotype_by_id(
                 ).values())
             }
         if bool(_pheno) and len(_pheno.keys()) > 1:
-            raise Exception(
+            raise Exception(# pylint: disable=[broad-exception-raised]
                 "We found more than one phenotype with the same identifier!")
 
     return None
diff --git a/uploader/phenotypes/views.py b/uploader/phenotypes/views.py
index 04cdc3b..0e626ad 100644
--- a/uploader/phenotypes/views.py
+++ b/uploader/phenotypes/views.py
@@ -385,7 +385,7 @@ def process_phenotypes_individual_files(error_uri):
                     arcname=filedata["original-name"])
                 cdata[rqtlkey] = cdata.get(rqtlkey, []) + [filedata["original-name"]]
             else:
-                # TODO: Check this path: fix any bugs.
+                # T0DO: Check this path: fix any bugs.
                 _sentfile = request.files[formkey]
                 if not bool(_sentfile):
                     flash(f"Expected file ('{formkey}') was not provided.",
@@ -640,12 +640,16 @@ def load_data_to_database(
         **kwargs
 ):# pylint: disable=[unused-argument]
     """Load the data from the given QC job into the database."""
-    jobs_db = app.config["ASYNCHRONOUS_JOBS_SQLITE_DB"]
+    _jobs_db = app.config["ASYNCHRONOUS_JOBS_SQLITE_DB"]
     with (Redis.from_url(app.config["REDIS_URL"], decode_responses=True) as rconn,
-          sqlite3.connection(jobs_db) as conn):
+          sqlite3.connection(_jobs_db) as conn):
+        # T0DO: Maybe break the connection between the jobs here, pass:
+        # - the bundle name (rebuild the full path here.)
+        # - publication details, where separate
+        # - details about the files: e.g. total lines, etc
         qc_job = jobs.job(rconn, jobs.jobsnamespace(), request.form["data-qc-job-id"])
         _meta = json.loads(qc_job["job-metadata"])
-        load_job_id = uuid.uuid4()
+        _load_job_id = uuid.uuid4()
         _loglevel = logging.getLevelName(app.logger.getEffectiveLevel()).lower()
         command = [
             sys.executable,
@@ -653,8 +657,8 @@ def load_data_to_database(
             "-m",
             "scripts.load_phenotypes_to_db",
             app.config["SQL_URI"],
-            jobs_db,
-            str(load_job_id),
+            _jobs_db,
+            str(_load_job_id),
             "--log-level",
             _loglevel
         ]
@@ -674,7 +678,7 @@ def load_data_to_database(
         ).then(
             lambda token: gnlibs_jobs.initialise_job(
                 conn,
-                load_job_id,
+                _load_job_id,
                 command,
                 "load-new-phenotypes-data",
                 extra_meta={
@@ -692,7 +696,7 @@ def load_data_to_database(
         ).then(
             lambda job: gnlibs_jobs.launch_job(
                 job,
-                jobs_db,
+                _jobs_db,
                 Path(f"{app.config['UPLOAD_FOLDER']}/job_errors"),
                 worker_manager="gn_libs.jobs.launcher",
                 loglevel=_loglevel)
@@ -972,6 +976,7 @@ def load_data_success(
         job_id: uuid.UUID,
         **kwargs
 ):# pylint: disable=[unused-argument]
+    """Display success page if loading data to database was successful."""
     with (database_connection(app.config["SQL_URI"]) as conn,
           sqlite3.connection(app.config["ASYNCHRONOUS_JOBS_SQLITE_DB"])
           as jobsconn):
@@ -1016,5 +1021,5 @@ def load_data_success(
                                            "FormID": "searchResult"
                                        }),
                                        fragment="")))
-        except JobNotFound as jnf:
+        except JobNotFound as _jnf:
             return render_template("jobs/job-not-found.html", job_id=job_id)
diff --git a/uploader/publications/models.py b/uploader/publications/models.py
index f37b67d..f83be58 100644
--- a/uploader/publications/models.py
+++ b/uploader/publications/models.py
@@ -30,6 +30,7 @@ def create_new_publications(
         conn: Connection,
         publications: tuple[dict, ...]
 ) -> tuple[dict, ...]:
+    """Create new publications in the database."""
     if len(publications) > 0:
         with conn.cursor(cursorclass=DictCursor) as cursor:
             cursor.executemany(
@@ -47,7 +48,8 @@ def create_new_publications(
             return tuple({
                 **row, "publication_id": row["Id"]
             } for row in cursor.fetchall())
-        return tuple()
+
+    return tuple()
 
 
 def update_publications(conn: Connection , publications: tuple[dict, ...]) -> tuple[dict, ...]:
diff --git a/uploader/publications/pubmed.py b/uploader/publications/pubmed.py
index e8b0584..2531c4a 100644
--- a/uploader/publications/pubmed.py
+++ b/uploader/publications/pubmed.py
@@ -29,9 +29,7 @@ def __journal__(journal: etree.Element) -> dict:
     }
 
 def __author__(author: etree.Element) -> str:
-    return "%s %s" % (
-        author.find("LastName").text,
-        author.find("Initials").text)
+    return f'{author.find("LastName").text} {author.find("Initials").text}'
 
 
 def __pages__(pagination: etree.Element) -> str:
diff --git a/uploader/publications/views.py b/uploader/publications/views.py
index e7aa412..a88f754 100644
--- a/uploader/publications/views.py
+++ b/uploader/publications/views.py
@@ -27,21 +27,21 @@ pubbp = Blueprint("publications", __name__)
 @require_login
 def index():
     """Index page for publications."""
-    with database_connection(app.config["SQL_URI"]) as conn:
-        return render_template("publications/index.html")
+    return render_template("publications/index.html")
 
 
 @pubbp.route("/list", methods=["GET"])
 @require_login
 def list_publications():
+    """Fetch publications that fulfill a specific search, or all of them, if
+    there is no search term."""
     # request breakdown:
     # https://datatables.net/manual/server-side
     _page = int(request.args.get("draw"))
     _length = int(request.args.get("length") or '-1')
     _start = int(request.args.get("start") or '0')
     _search = request.args["search[value]"]
-    with (database_connection(app.config["SQL_URI"]) as conn,
-          conn.cursor(cursorclass=DictCursor) as cursor):
+    with database_connection(app.config["SQL_URI"]) as conn:
         _publications, _current_rows, _totalfiltered, _totalrows = fetch_publications(
             conn,
             _search,
@@ -73,7 +73,7 @@ def view_publication(publication_id: int):
 @require_login
 def create_publication():
     """Create a new publication."""
-    if(request.method == "GET"):
+    if request.method == "GET":
         return render_template("publications/create-publication.html")
     form = request.form
     authors = form.get("publication-authors").encode("utf8")
diff --git a/uploader/samples/views.py b/uploader/samples/views.py
index c0adb88..4705a96 100644
--- a/uploader/samples/views.py
+++ b/uploader/samples/views.py
@@ -96,7 +96,7 @@ def list_samples(species: dict, population: dict, **kwargs):# pylint: disable=[u
                                activelink="list-samples")
 
 
-def build_sample_upload_job(# pylint: disable=[too-many-arguments]
+def build_sample_upload_job(# pylint: disable=[too-many-arguments, too-many-positional-arguments]
         speciesid: int,
         populationid: int,
         samplesfile: Path,
@@ -159,7 +159,7 @@ def upload_samples(species_id: int, population_id: int):#pylint: disable=[too-ma
               "alert-error")
         return samples_uploads_page
 
-    firstlineheading = (request.form.get("first_line_heading") == "on")
+    firstlineheading = request.form.get("first_line_heading") == "on"
 
     separator = request.form.get("separator", ",")
     if separator == "other":
@@ -172,7 +172,7 @@ def upload_samples(species_id: int, population_id: int):#pylint: disable=[too-ma
 
     redisuri = app.config["REDIS_URL"]
     with Redis.from_url(redisuri, decode_responses=True) as rconn:
-        #TODO: Add a QC step here — what do we check?
+        #T0DO: Add a QC step here — what do we check?
         # 1. Does any sample in the uploaded file exist within the database?
         #    If yes, what is/are its/their species and population?
         # 2. If yes 1. above, provide error with notes on which species and
@@ -251,7 +251,7 @@ def upload_status(species: dict, population: dict, job_id: uuid.UUID, **kwargs):
 @require_login
 @with_population(species_redirect_uri="species.populations.samples.index",
                  redirect_uri="species.populations.samples.select_population")
-def upload_failure(species: dict, population: dict, job_id: uuid.UUID, **kwargs):
+def upload_failure(species: dict, population: dict, job_id: uuid.UUID, **kwargs):# pylint: disable=[unused-argument]
     """Display the errors of the samples upload failure."""
     job = with_redis_connection(lambda rconn: jobs.job(
         rconn, jobs.jobsnamespace(), job_id))
diff --git a/uploader/species/models.py b/uploader/species/models.py
index db53d48..acfa51e 100644
--- a/uploader/species/models.py
+++ b/uploader/species/models.py
@@ -92,7 +92,7 @@ def save_species(conn: mdb.Connection,
         }
 
 
-def update_species(# pylint: disable=[too-many-arguments]
+def update_species(# pylint: disable=[too-many-arguments, too-many-positional-arguments]
         conn: mdb.Connection,
         species_id: int,
         common_name: str,