about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--mypy.ini3
-rw-r--r--r_qtl/r_qtl2.py8
-rw-r--r--scripts/compute_phenotype_means.py2
-rw-r--r--scripts/load_phenotypes_to_db.py17
-rw-r--r--scripts/qc_on_rqtl2_bundle.py9
-rw-r--r--scripts/rqtl2/install_genotypes.py6
-rw-r--r--scripts/rqtl2/install_phenos.py7
-rw-r--r--scripts/rqtl2/phenotypes_qc.py3
-rw-r--r--scripts/run_qtlreaper.py27
-rw-r--r--uploader/__init__.py4
-rw-r--r--uploader/background_jobs.py12
-rw-r--r--uploader/default_settings.py2
-rw-r--r--uploader/jobs.py8
-rw-r--r--uploader/oauth2/client.py5
-rw-r--r--uploader/phenotypes/misc.py2
-rw-r--r--uploader/phenotypes/models.py13
-rw-r--r--uploader/phenotypes/views.py2
-rw-r--r--uploader/population/views.py2
-rw-r--r--uploader/publications/datatables.py2
-rw-r--r--uploader/publications/misc.py4
-rw-r--r--uploader/publications/pubmed.py3
-rw-r--r--uploader/publications/views.py14
-rw-r--r--uploader/route_utils.py4
23 files changed, 88 insertions, 71 deletions
diff --git a/mypy.ini b/mypy.ini
index 7bed360..263460d 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -1,5 +1,8 @@
 [mypy]
 
+[mypy-lxml.*]
+ignore_missing_imports = True
+
 [mypy-flask.*]
 ignore_missing_imports = True
 
diff --git a/r_qtl/r_qtl2.py b/r_qtl/r_qtl2.py
index 0ef487f..ce1dbf8 100644
--- a/r_qtl/r_qtl2.py
+++ b/r_qtl/r_qtl2.py
@@ -584,16 +584,16 @@ def read_csv_file_headers(
         comment_char: str = "#"
 ) -> tuple[str, ...]:
     """Read the 'true' headers of a CSV file."""
-    headers = tuple()
+    headers: tuple[str, ...] = tuple()
     for line in read_text_file(filepath):
         if line.startswith(comment_char):
             continue
 
-        line = tuple(field.strip() for field in line.split(separator))
+        row = tuple(field.strip() for field in line.split(separator))
         if not transposed:
-            return line
+            return row
 
-        headers = headers + (line[0],)
+        headers = headers + (row[0],)
         continue
 
     return headers
diff --git a/scripts/compute_phenotype_means.py b/scripts/compute_phenotype_means.py
index ef2fabc..6d39ace 100644
--- a/scripts/compute_phenotype_means.py
+++ b/scripts/compute_phenotype_means.py
@@ -51,7 +51,7 @@ def run(args) -> int:
 
 
 T = TypeVar("T")
-def comma_separated_list(val: str, itemstype: T = str) -> tuple[T, ...]:
+def comma_separated_list(val: str, itemstype: type = str) -> tuple[T, ...]:
     """Convert val into a list of items of type 'itemstype'."""
     return tuple(itemstype(item.strip()) for item in val.split(","))
 
diff --git a/scripts/load_phenotypes_to_db.py b/scripts/load_phenotypes_to_db.py
index e449b82..e303bb3 100644
--- a/scripts/load_phenotypes_to_db.py
+++ b/scripts/load_phenotypes_to_db.py
@@ -6,9 +6,9 @@ import time
 import logging
 import argparse
 import datetime
-from typing import Any
 from pathlib import Path
 from zipfile import ZipFile
+from typing import Any, Iterable
 from urllib.parse import urljoin
 from functools import reduce, partial
 
@@ -55,7 +55,7 @@ def save_phenotypes(
 
     if control_data["phenocovar_transposed"]:
         logger.info("Undoing transposition of the files rows and columns.")
-        phenofiles = (
+        phenofiles = tuple(
             rqtl2.transpose_csv_with_rename(
                 _file,
                 build_line_splitter(control_data),
@@ -86,7 +86,7 @@ def __row_to_dataitems__(
         dataidmap: dict,
         pheno_name2id: dict[str, int],
         samples: dict
-) -> tuple[dict, ...]:
+) -> Iterable[dict]:
     samplename = sample_row["id"]
 
     return ({
@@ -134,7 +134,7 @@ def save_numeric_data(# pylint: disable=[too-many-positional-arguments,too-many-
         conn: mysqldb.Connection,
         dataidmap: dict,
         pheno_name2id: dict[str, int],
-        samples: tuple[dict, ...],
+        samples: dict,
         control_data: dict,
         filesdir: Path,
         filetype: str,
@@ -311,7 +311,9 @@ def update_auth(# pylint: disable=[too-many-locals,too-many-positional-arguments
     ).either(__handle_error__, __handle_success__)
 
 
-def load_data(conn: mysqldb.Connection, job: dict) -> int:#pylint: disable=[too-many-locals]
+def load_data(# pylint: disable=[too-many-locals]
+        conn: mysqldb.Connection, job: dict
+) -> tuple[dict, dict, dict, tuple[int, ...]]:
     """Load the data attached in the given job."""
     _job_metadata = job["metadata"]
     # Steps
@@ -365,9 +367,8 @@ def load_data(conn: mysqldb.Connection, job: dict) -> int:#pylint: disable=[too-
                 "publication_id": row["publication_id"],
                 "data_id": row["data_id"]
             },)))
-    dataidmap, pheno_name2id, _xrefs = reduce(__build_phenos_maps__,
-                                      _phenos,
-                                      ({},{}, tuple()))
+    dataidmap, pheno_name2id, _xrefs = reduce(# type: ignore[var-annotated]
+        __build_phenos_maps__, _phenos, ({},{}, tuple()))
     # 3. a. Fetch the strain names and IDS: create name->ID map
     samples = {
         row["Name"]: row
diff --git a/scripts/qc_on_rqtl2_bundle.py b/scripts/qc_on_rqtl2_bundle.py
index 0207938..4e6ef00 100644
--- a/scripts/qc_on_rqtl2_bundle.py
+++ b/scripts/qc_on_rqtl2_bundle.py
@@ -40,7 +40,7 @@ def add_to_errors(rconn: Redis,
     """Add `errors` to a given list of errors"""
     errs = tuple(dict(item) for item in set(
         [dict2tuple(old) for old in
-         json.loads(rconn.hget(fqjobid, key) or "[]")] +
+         json.loads(rconn.hget(fqjobid, key) or "[]")] +# type: ignore[arg-type]
         [dict2tuple({"type": type(error).__name__, **error._asdict()})
          for error in errors]))
     rconn.hset(fqjobid, key, json.dumps(errs))
@@ -83,7 +83,8 @@ def retrieve_errors_with_progress(rconn: Redis,#pylint: disable=[too-many-locals
     count = 0
     checked = 0
     cdata = rqtl2.control_data(zfile)
-    rconn.hset(fqjobid, f"{filetype}-filesize", compute_filesize(zfile, filetype))
+    rconn.hset(
+        fqjobid, f"{filetype}-filesize", str(compute_filesize(zfile, filetype)))
     def __update_processed__(value):
         nonlocal checked
         checked = checked + len(value)
@@ -104,7 +105,7 @@ def retrieve_errors_with_progress(rconn: Redis,#pylint: disable=[too-many-locals
                             yield error
                         __update_processed__(value)
 
-        rconn.hset(fqjobid, f"{filetype}-linecount", count)
+        rconn.hset(fqjobid, f"{filetype}-linecount", count)# type: ignore[arg-type]
     except rqe.MissingFileException:
         fname = cdata.get(filetype)
         yield rqfe.MissingFile(filetype, fname, (
@@ -295,7 +296,7 @@ def run_qc(rconn: Redis,
         return 1
 
     def __fetch_errors__(rkey: str) -> tuple:
-        return tuple(json.loads(rconn.hget(fqjobid, rkey) or "[]"))
+        return tuple(json.loads(rconn.hget(fqjobid, rkey) or "[]")) # type: ignore[arg-type]
 
     return (1 if any((
         bool(__fetch_errors__(key))
diff --git a/scripts/rqtl2/install_genotypes.py b/scripts/rqtl2/install_genotypes.py
index 8762655..5e6abb0 100644
--- a/scripts/rqtl2/install_genotypes.py
+++ b/scripts/rqtl2/install_genotypes.py
@@ -20,7 +20,7 @@ from scripts.rqtl2.entry import build_main
 from scripts.rqtl2.cli_parser import add_common_arguments
 from scripts.cli_parser import init_cli_parser, add_global_data_arguments
 
-__MODULE__ = "scripts.rqtl2.install_genotypes"
+logger = getLogger(__name__)
 
 def insert_markers(
         dbconn: mdb.Connection,
@@ -191,7 +191,7 @@ def install_genotypes(#pylint: disable=[too-many-locals]
         dbconn: mdb.Connection,
         fullyqualifiedjobid: str,#pylint: disable=[unused-argument]
         args: argparse.Namespace,
-        logger: Logger = getLogger(__name__)
+        logger: Logger = logger # pylint: disable=[redefined-outer-name]
 ) -> int:
     """Load any existing genotypes into the database."""
     (speciesid, populationid, datasetid, rqtl2bundle) = (
@@ -257,5 +257,5 @@ if __name__ == "__main__":
 
         return parser.parse_args()
 
-    main = build_main(cli_args(), install_genotypes, __MODULE__)
+    main = build_main(cli_args(), install_genotypes, logger)
     sys.exit(main())
diff --git a/scripts/rqtl2/install_phenos.py b/scripts/rqtl2/install_phenos.py
index 9059cd6..11ac8a4 100644
--- a/scripts/rqtl2/install_phenos.py
+++ b/scripts/rqtl2/install_phenos.py
@@ -19,7 +19,7 @@ from r_qtl import r_qtl2_qc as rqc
 
 from functional_tools import take
 
-__MODULE__ = "scripts.rqtl2.install_phenos"
+logger = getLogger(__name__)
 
 def insert_probesets(dbconn: mdb.Connection,
                      platformid: int,
@@ -101,7 +101,8 @@ def install_pheno_files(#pylint: disable=[too-many-locals]
         dbconn: mdb.Connection,
         fullyqualifiedjobid: str,#pylint: disable=[unused-argument]
         args: argparse.Namespace,
-        logger: Logger = getLogger()) -> int:
+        logger: Logger = logger # pylint: disable=[redefined-outer-name]
+) -> int:
     """Load data in `pheno` files and other related files into the database."""
     (speciesid, platformid, datasetid, rqtl2bundle) = (
         args.speciesid, args.platformid, args.datasetid, args.rqtl2bundle)
@@ -159,5 +160,5 @@ if __name__ == "__main__":
 
         return parser.parse_args()
 
-    main = build_main(cli_args(), install_pheno_files, __MODULE__)
+    main = build_main(cli_args(), install_pheno_files, logger)
     sys.exit(main())
diff --git a/scripts/rqtl2/phenotypes_qc.py b/scripts/rqtl2/phenotypes_qc.py
index 9f11f57..72d6c83 100644
--- a/scripts/rqtl2/phenotypes_qc.py
+++ b/scripts/rqtl2/phenotypes_qc.py
@@ -376,7 +376,8 @@ def run_qc(# pylint: disable=[too-many-locals]
         rconn: Redis,
         dbconn: mdb.Connection,
         fullyqualifiedjobid: str,
-        args: Namespace
+        args: Namespace,
+        logger: Logger = logger # pylint: disable=[redefined-outer-name]
 ) -> int:
     """Run quality control checks on the bundle."""
     print("Beginning the quality assurance checks.")
diff --git a/scripts/run_qtlreaper.py b/scripts/run_qtlreaper.py
index 89cc3ec..7d58402 100644
--- a/scripts/run_qtlreaper.py
+++ b/scripts/run_qtlreaper.py
@@ -6,8 +6,8 @@ import secrets
 import logging
 import subprocess
 from pathlib import Path
-from typing import Union
 from functools import reduce
+from typing import Union, Iterator
 from argparse import Namespace, ArgumentParser
 
 from gn_libs import mysqldb
@@ -56,7 +56,7 @@ def reconcile_samples(
 def generate_qtlreaper_traits_file(
         outdir: Path,
         samples: tuple[str, ...],
-        traits_data: dict[str, Union[int, float]],
+        traits_data: tuple[dict[str, Union[int, float]], ...],
         filename_prefix: str = ""
 ) -> Path:
     """Generate a file for use with qtlreaper that contains the traits' data."""
@@ -65,7 +65,7 @@ def generate_qtlreaper_traits_file(
     _dialect.quoting=0
 
     _traitsfile = outdir.joinpath(
-        f"{filename_prefix}_{secrets.token_urlsafe(15)}.tsv")
+        f"{filename_prefix}_{secrets.token_urlsafe(15)}.tsv")#type: ignore[attr-defined]
     with _traitsfile.open(mode="w", encoding="utf-8") as outptr:
         writer = csv.DictWriter(
             outptr, fieldnames=("Trait",) + samples, dialect=_dialect)
@@ -79,7 +79,7 @@ def generate_qtlreaper_traits_file(
     return _traitsfile
 
 
-def parse_tsv_file(results_file: Path) -> list[dict]:
+def parse_tsv_file(results_file: Path) -> Iterator[dict]:
     """Parse the rust-qtlreaper output into usable python objects."""
     with results_file.open("r", encoding="utf-8") as readptr:
         _dialect = csv.unix_dialect()
@@ -96,7 +96,7 @@ def __qtls_by_trait__(qtls, current):
     }
 
 
-def save_qtl_values_to_db(conn, qtls: dict):
+def save_qtl_values_to_db(conn, qtls: tuple[dict, ...]):
     """Save computed QTLs to the database."""
     with conn.cursor() as cursor:
         cursor.executemany(
@@ -131,11 +131,11 @@ def dispatch(args: Namespace) -> int:
                              ", ".join(_samples_not_in_genofile))
 
             # Fetch traits data: provided list, or all traits in db
-            _traitsdata = phenotypes_vector_data(
+            _traitsdata = tuple(phenotypes_vector_data(
                 conn,
                 args.species_id,
                 args.population_id,
-                xref_ids=tuple(args.xref_ids)).values()
+                xref_ids=tuple(args.xref_ids)).values())
             logger.debug("Successfully got traits data. Generating the QTLReaper's traits file…")
             _traitsfile = generate_qtlreaper_traits_file(
                 args.working_dir,
@@ -145,7 +145,7 @@ def dispatch(args: Namespace) -> int:
             logger.debug("QTLReaper's Traits file: %s", _traitsfile)
 
             _qtlreaper_main_output = args.working_dir.joinpath(
-                f"main-output-{secrets.token_urlsafe(15)}.tsv")
+                f"main-output-{secrets.token_urlsafe(15)}.tsv")#type: ignore[attr-defined]
             logger.debug("Main output filename: %s", _qtlreaper_main_output)
             with subprocess.Popen(
                     ("qtlreaper",
@@ -156,11 +156,12 @@ def dispatch(args: Namespace) -> int:
                 while _qtlreaper.poll() is None:
                     logger.debug("QTLReaper process running…")
                     time.sleep(1)
-                    results = tuple(max(qtls, key=lambda qtl: qtl["LRS"])
-                                    for qtls in
-                                    reduce(__qtls_by_trait__,
-                                           parse_tsv_file(_qtlreaper_main_output),
-                                           {}).values())
+                    results = tuple(#type: ignore[var-annotated]
+                        max(qtls, key=lambda qtl: qtl["LRS"])
+                        for qtls in
+                        reduce(__qtls_by_trait__,
+                               parse_tsv_file(_qtlreaper_main_output),
+                               {}).values())
             save_qtl_values_to_db(conn, results)
             logger.debug("Cleaning up temporary files.")
             _traitsfile.unlink()
diff --git a/uploader/__init__.py b/uploader/__init__.py
index 7425b38..0ba1f81 100644
--- a/uploader/__init__.py
+++ b/uploader/__init__.py
@@ -11,7 +11,7 @@ from cachelib import FileSystemCache
 
 from gn_libs import jobs as gnlibs_jobs
 
-from flask_session import Session
+from flask_session import Session# type: ignore[attr-defined]
 
 
 from uploader.oauth2.client import user_logged_in, authserver_authorise_uri
@@ -103,7 +103,7 @@ def create_app(config: Optional[dict] = None):
     ### END: Application configuration
 
     app.config["SESSION_CACHELIB"] = FileSystemCache(
-        cache_dir=Path(app.config["SESSION_FILESYSTEM_CACHE_PATH"]).absolute(),
+        cache_dir=str(Path(app.config["SESSION_FILESYSTEM_CACHE_PATH"]).absolute()),
         threshold=int(app.config["SESSION_FILESYSTEM_CACHE_THRESHOLD"]),
         default_timeout=int(app.config["SESSION_FILESYSTEM_CACHE_TIMEOUT"]))
 
diff --git a/uploader/background_jobs.py b/uploader/background_jobs.py
index 4aded1d..4e1cd13 100644
--- a/uploader/background_jobs.py
+++ b/uploader/background_jobs.py
@@ -4,9 +4,9 @@ import importlib
 from typing import Callable
 from functools import partial
 
+from werkzeug.wrappers.response import Response
 from flask import (
     redirect,
-    Response,
     Blueprint,
     render_template,
     current_app as app)
@@ -48,7 +48,7 @@ def register_handlers(
     return job_type
 
 
-def register_job_handlers(job: str):
+def register_job_handlers(job: dict):
     """Related to register handlers above."""
     def __load_handler__(absolute_function_path):
         _parts = absolute_function_path.split(".")
@@ -79,8 +79,12 @@ def handler(job: dict, handler_type: str) -> HandlerType:
     ).get(handler_type)
     if bool(_handler):
         return _handler(job)
-    return render_template(sui_template("background-jobs/default-success-page.html"),
-                           job=job)
+
+    def __default_success_handler__(_job):
+        return render_template(
+            sui_template("background-jobs/default-success-page.html"), job=_job)
+
+    return __default_success_handler__
 
 
 error_handler = partial(handler, handler_type="error")
diff --git a/uploader/default_settings.py b/uploader/default_settings.py
index bb3a967..52cdad5 100644
--- a/uploader/default_settings.py
+++ b/uploader/default_settings.py
@@ -32,4 +32,4 @@ JWKS_DELETION_AGE_DAYS = 14 # Days (from creation) to keep a JWK around before d
 
 
 ## --- Feature flags ---
-FEATURE_FLAGS_HTTP = []
+FEATURE_FLAGS_HTTP: list[str] = []
diff --git a/uploader/jobs.py b/uploader/jobs.py
index 5968c03..b2de54b 100644
--- a/uploader/jobs.py
+++ b/uploader/jobs.py
@@ -147,8 +147,8 @@ def job_errors(
     return take(
         (
             json.loads(error)
-            for key in rconn.keys(f"{prefix}:{str(job_id)}:*:errors:*")
-            for error in rconn.lrange(key, 0, -1)),
+            for key in rconn.keys(f"{prefix}:{str(job_id)}:*:errors:*")# type: ignore[union-attr]
+            for error in rconn.lrange(key, 0, -1)),# type: ignore[union-attr]
         count)
 
 
@@ -160,8 +160,8 @@ def job_files_metadata(
     """Get the metadata for specific job file."""
     return {
         key.split(":")[-1]: {
-            **rconn.hgetall(key),
+            **rconn.hgetall(key),# type: ignore[dict-item]
             "filetype": key.split(":")[-3]
         }
-        for key in rconn.keys(f"{prefix}:{str(job_id)}:*:metadata*")
+        for key in rconn.keys(f"{prefix}:{str(job_id)}:*:metadata*")# type: ignore[union-attr]
     }
diff --git a/uploader/oauth2/client.py b/uploader/oauth2/client.py
index b94a044..4e81afd 100644
--- a/uploader/oauth2/client.py
+++ b/uploader/oauth2/client.py
@@ -157,7 +157,10 @@ def fetch_user_details() -> Either:
                 "user_id": uuid.UUID(usrdets["user_id"]),
                 "name": usrdets["name"],
                 "email": usrdets["email"],
-                "token": session.user_token()}))
+                "token": session.user_token(),
+                "logged_in": session.user_token().either(
+                    lambda _e: False, lambda _t: True)
+            }))
         return udets
     return Right(suser)
 
diff --git a/uploader/phenotypes/misc.py b/uploader/phenotypes/misc.py
index cbe3b7f..1924c07 100644
--- a/uploader/phenotypes/misc.py
+++ b/uploader/phenotypes/misc.py
@@ -8,7 +8,7 @@ def phenotypes_data_differences(
         filedata: tuple[dict, ...], dbdata: tuple[dict, ...]
 ) -> tuple[dict, ...]:
     """Compute differences between file data and db data"""
-    diff = tuple()
+    diff: tuple[dict, ...] = tuple()
     for filerow, dbrow in zip(
             sorted(filedata, key=lambda item: (item["phenotype_id"], item["xref_id"])),
             sorted(dbdata, key=lambda item: (item["PhenotypeId"], item["xref_id"]))):
diff --git a/uploader/phenotypes/models.py b/uploader/phenotypes/models.py
index e962e62..7c051d7 100644
--- a/uploader/phenotypes/models.py
+++ b/uploader/phenotypes/models.py
@@ -255,9 +255,9 @@ def phenotypes_vector_data(# pylint: disable=[too-many-arguments, too-many-posit
         xref_ids: tuple[int, ...] = tuple(),
         offset: int = 0,
         limit: Optional[int] = None
-) -> dict[tuple[int, int, int]: dict[str, Union[int,float]]]:
+) -> dict[tuple[int, int, int], dict[str, Union[int,float]]]:
     """Retrieve the vector data values for traits in the database."""
-    _params = (species_id, population_id)
+    _params: tuple[int, ...] = (species_id, population_id)
     _query = ("SELECT "
               "Species.Id AS SpeciesId, iset.Id AS InbredSetId, "
               "pxr.Id AS xref_id, pdata.*, Strain.Id AS StrainId, "
@@ -381,7 +381,7 @@ def create_new_phenotypes(# pylint: disable=[too-many-locals]
         phenotypes: Iterable[dict]
 ) -> tuple[dict, ...]:
     """Add entirely new phenotypes to the database. WARNING: Not thread-safe."""
-    _phenos = tuple()
+    _phenos: tuple[dict, ...] = tuple()
     with conn.cursor(cursorclass=DictCursor) as cursor:
         def make_next_id(idcol, table):
             cursor.execute(f"SELECT MAX({idcol}) AS last_id FROM {table}")
@@ -430,9 +430,10 @@ def create_new_phenotypes(# pylint: disable=[too-many-locals]
             if len(batch) == 0:
                 break
 
-            params, abbrevs = reduce(__build_params_and_prepubabbrevs__,
-                                     batch,
-                                     (tuple(), tuple()))
+            params, abbrevs = reduce(#type: ignore[var-annotated]
+                __build_params_and_prepubabbrevs__,
+                batch,
+                (tuple(), tuple()))
             # Check for uniqueness for all "Pre_publication_description" values
             abbrevs_paramsstr = ", ".join(["%s"] * len(abbrevs))
             _query = ("SELECT PublishXRef.PhenotypeId, Phenotype.* "
diff --git a/uploader/phenotypes/views.py b/uploader/phenotypes/views.py
index 9df7d81..42f2e34 100644
--- a/uploader/phenotypes/views.py
+++ b/uploader/phenotypes/views.py
@@ -805,7 +805,7 @@ def update_phenotype_data(conn, data: dict):
             }
         })
 
-    values, serrs, counts = tuple(
+    values, serrs, counts = tuple(# type: ignore[var-annotated]
         tuple({
             "data_id": row[0].split("::")[0],
             "strain_id": row[0].split("::")[1],
diff --git a/uploader/population/views.py b/uploader/population/views.py
index a6e2358..caee55b 100644
--- a/uploader/population/views.py
+++ b/uploader/population/views.py
@@ -157,7 +157,7 @@ def create_population(species_id: int):
             "FullName": population_fullname,
             "InbredSetCode": request.form.get("population_code") or None,
             "Description": request.form.get("population_description") or None,
-            "Family": request.form.get("population_family").strip() or None,
+            "Family": request.form.get("population_family", "").strip() or None,
             "MappingMethodId": request.form.get("population_mapping_method_id"),
             "GeneticType": request.form.get("population_genetic_type") or None
         })
diff --git a/uploader/publications/datatables.py b/uploader/publications/datatables.py
index e07fafd..8b3d4a0 100644
--- a/uploader/publications/datatables.py
+++ b/uploader/publications/datatables.py
@@ -13,7 +13,7 @@ def fetch_publications(
         search: Optional[str] = None,
         offset: int = 0,
         limit: int = -1
-) -> tuple[dict, int, int, int]:
+) -> tuple[tuple[dict, ...], int, int, int]:
     """Fetch publications from the database."""
     _query = "SELECT * FROM Publication"
     _count_query = "SELECT COUNT(*) FROM Publication"
diff --git a/uploader/publications/misc.py b/uploader/publications/misc.py
index fca6f71..f0ff9c7 100644
--- a/uploader/publications/misc.py
+++ b/uploader/publications/misc.py
@@ -4,10 +4,10 @@
 def publications_differences(
         filedata: tuple[dict, ...],
         dbdata: tuple[dict, ...],
-        pubmedid2pubidmap: tuple[dict, ...]
+        pubmedid2pubidmap: dict[int, int]
 ) -> tuple[dict, ...]:
     """Compute the differences between file data and db data"""
-    diff = tuple()
+    diff: tuple[dict, ...] = tuple()
     for filerow, dbrow in zip(
             sorted(filedata, key=lambda item: (
                 item["phenotype_id"], item["xref_id"])),
diff --git a/uploader/publications/pubmed.py b/uploader/publications/pubmed.py
index 2531c4a..15bf701 100644
--- a/uploader/publications/pubmed.py
+++ b/uploader/publications/pubmed.py
@@ -1,5 +1,6 @@
 """Module to interact with NCBI's PubMed"""
 import logging
+from typing import Optional
 
 import requests
 from lxml import etree
@@ -40,7 +41,7 @@ def __pages__(pagination: etree.Element) -> str:
     )) if start is not None else ""
 
 
-def __abstract__(article: etree.Element) -> str:
+def __abstract__(article: etree.Element) -> Optional[str]:
     abstract = article.find("Abstract/AbstractText")
     return abstract.text if abstract is not None else None
 
diff --git a/uploader/publications/views.py b/uploader/publications/views.py
index f0ec95a..11732db 100644
--- a/uploader/publications/views.py
+++ b/uploader/publications/views.py
@@ -140,14 +140,14 @@ def edit_publication(publication_id: int):
         _pub = update_publications(conn, ({
             "publication_id": publication_id,
             "pubmed_id": form.get("pubmed-id") or None,
-            "abstract": form.get("publication-abstract").encode("utf8") or None,
-            "authors": form.get("publication-authors").encode("utf8"),
-            "title":  form.get("publication-title").encode("utf8") or None,
-            "journal": form.get("publication-journal").encode("utf8") or None,
-            "volume": form.get("publication-volume").encode("utf8") or None,
-            "pages": form.get("publication-pages").encode("utf8") or None,
+            "abstract": (form.get("publication-abstract") or "").encode("utf8") or None,
+            "authors": (form.get("publication-authors") or "").encode("utf8"),
+            "title":  (form.get("publication-title") or "").encode("utf8") or None,
+            "journal": (form.get("publication-journal") or "").encode("utf8") or None,
+            "volume": (form.get("publication-volume") or "").encode("utf8") or None,
+            "pages": (form.get("publication-pages") or "").encode("utf8") or None,
             "month": (form.get("publication-month") or "").encode("utf8").capitalize() or None,
-            "year": form.get("publication-year").encode("utf8") or None
+            "year": (form.get("publication-year") or "").encode("utf8") or None
         },))
 
         if not _pub:
diff --git a/uploader/route_utils.py b/uploader/route_utils.py
index fa63233..426d7eb 100644
--- a/uploader/route_utils.py
+++ b/uploader/route_utils.py
@@ -56,7 +56,7 @@ def generic_select_population(
 def redirect_to_next(default: dict):
     """Redirect to the next uri if specified, else redirect to default."""
     assert "uri" in default, "You must provide at least the 'uri' value."
-    _next = request.args.get("next")
+    _next = request.args.get("next") or ""
     if bool(_next):
         try:
             next_page = base64_decode_to_dict(_next)
@@ -73,7 +73,7 @@ def redirect_to_next(default: dict):
         **{key:value for key,value in default.items() if key != "uri"}))
 
 
-def build_next_argument(uri: str, **kwargs) -> str:
+def build_next_argument(uri: str, **kwargs) -> bytes:
     """Build the `next` URI argument from provided details."""
     dumps_keywords = (
         "skipkeys", "ensure_ascii", "check_circular", "allow_nan", "cls",