From 7def8a574b0879f53a6fae555dc5c24be541e9dc Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 27 Apr 2024 14:57:26 -0400 Subject: [PATCH 001/127] Refactor code using handle_exceptions decorator An exception handling decorator named handle_exceptions was introduced to simplify the handling of exceptions in most major resources. The decorator eliminates the need to manually try/except blocks in each route, thus reducing repetitive code and enhancing readability. All routes in resources like User.py, ResetTokenValidation.py, RequestResetPassword.py, and more were updated to use this decorator. --- resources/Agencies.py | 35 ++--- resources/ApiKey.py | 39 +++--- resources/Archives.py | 59 ++++---- resources/DataSources.py | 221 +++++++++++++----------------- resources/Login.py | 45 +++--- resources/PsycopgResource.py | 38 +++++ resources/RefreshSession.py | 44 +++--- resources/RequestResetPassword.py | 59 ++++---- resources/ResetPassword.py | 55 ++++---- resources/ResetTokenValidation.py | 31 ++--- resources/SearchTokens.py | 195 +++++++++++++------------- resources/User.py | 51 +++---- 12 files changed, 412 insertions(+), 460 deletions(-) diff --git a/resources/Agencies.py b/resources/Agencies.py index 9e88f86c..76acbcac 100644 --- a/resources/Agencies.py +++ b/resources/Agencies.py @@ -1,5 +1,5 @@ from middleware.security import api_required -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions from utilities.common import convert_dates_to_strings from typing import Dict, Any @@ -36,6 +36,7 @@ class Agencies(PsycopgResource): """Represents a resource for fetching approved agency data from the database.""" + @handle_exceptions @api_required def get(self, page: str) -> Dict[str, Any]: """ @@ -47,26 +48,18 @@ def get(self, page: str) -> Dict[str, Any]: Returns: - dict: A dictionary containing the count of returned agencies and their data. """ - try: - cursor = self.psycopg2_connection.cursor() - joined_column_names = ", ".join(approved_columns) - offset = (int(page) - 1) * 1000 - cursor.execute( - f"select {joined_column_names} from agencies where approved = 'TRUE' limit 1000 offset {offset}" - ) - results = cursor.fetchall() - agencies_matches = [ - dict(zip(approved_columns, result)) for result in results - ] + cursor = self.psycopg2_connection.cursor() + joined_column_names = ", ".join(approved_columns) + offset = (int(page) - 1) * 1000 + cursor.execute( + f"select {joined_column_names} from agencies where approved = 'TRUE' limit 1000 offset {offset}" + ) + results = cursor.fetchall() + agencies_matches = [dict(zip(approved_columns, result)) for result in results] - for item in agencies_matches: - convert_dates_to_strings(item) + for item in agencies_matches: + convert_dates_to_strings(item) - agencies = {"count": len(agencies_matches), "data": agencies_matches} + agencies = {"count": len(agencies_matches), "data": agencies_matches} - return agencies - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return "There has been an error pulling data!" + return agencies diff --git a/resources/ApiKey.py b/resources/ApiKey.py index 520fe416..bd1a044b 100644 --- a/resources/ApiKey.py +++ b/resources/ApiKey.py @@ -4,12 +4,13 @@ import uuid from typing import Dict, Any, Optional -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions class ApiKey(PsycopgResource): """Represents a resource for generating an API key for authenticated users.""" + @handle_exceptions def get(self) -> Optional[Dict[str, Any]]: """ Authenticates a user based on provided credentials and generates an API key. @@ -20,24 +21,18 @@ def get(self) -> Optional[Dict[str, Any]]: Returns: - dict: A dictionary containing the generated API key, or None if an error occurs. """ - try: - data = request.get_json() - email = data.get("email") - password = data.get("password") - cursor = self.psycopg2_connection.cursor() - user_data = login_results(cursor, email) - - if check_password_hash(user_data["password_digest"], password): - api_key = uuid.uuid4().hex - user_id = str(user_data["id"]) - cursor.execute( - "UPDATE users SET api_key = %s WHERE id = %s", (api_key, user_id) - ) - payload = {"api_key": api_key} - self.psycopg2_connection.commit() - return payload - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": str(e)} + data = request.get_json() + email = data.get("email") + password = data.get("password") + cursor = self.psycopg2_connection.cursor() + user_data = login_results(cursor, email) + + if check_password_hash(user_data["password_digest"], password): + api_key = uuid.uuid4().hex + user_id = str(user_data["id"]) + cursor.execute( + "UPDATE users SET api_key = %s WHERE id = %s", (api_key, user_id) + ) + payload = {"api_key": api_key} + self.psycopg2_connection.commit() + return payload diff --git a/resources/Archives.py b/resources/Archives.py index 51201b44..398d9d00 100644 --- a/resources/Archives.py +++ b/resources/Archives.py @@ -5,7 +5,7 @@ import json from typing import Dict, Any -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions class Archives(PsycopgResource): @@ -13,6 +13,7 @@ class Archives(PsycopgResource): A resource for managing archive data, allowing retrieval and update of archived data sources. """ + @handle_exceptions @api_required def get(self) -> Any: """ @@ -23,18 +24,13 @@ def get(self) -> Any: Returns: - Any: The cleaned results of archives combined from the database query, or an error message if an exception occurs. """ - try: - archives_combined_results_clean = archives_get_query( - test_query_results=[], conn=self.psycopg2_connection - ) + archives_combined_results_clean = archives_get_query( + test_query_results=[], conn=self.psycopg2_connection + ) - return archives_combined_results_clean - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return "There has been an error pulling data!" + return archives_combined_results_clean + @handle_exceptions @api_required def put(self) -> Dict[str, str]: """ @@ -45,27 +41,22 @@ def put(self) -> Dict[str, str]: Returns: - dict: A status message indicating success or an error message if an exception occurs. """ - try: - json_data = request.get_json() - data = json.loads(json_data) - id = data["id"] if "id" in data else None - broken_as_of = ( - data["broken_source_url_as_of"] - if "broken_source_url_as_of" in data - else None - ) - last_cached = data["last_cached"] if "last_cached" in data else None - - archives_put_query( - id=id, - broken_as_of=broken_as_of, - last_cached=last_cached, - conn=self.psycopg2_connection, - ) - - return {"status": "success"} + json_data = request.get_json() + data = json.loads(json_data) + id = data["id"] if "id" in data else None + broken_as_of = ( + data["broken_source_url_as_of"] + if "broken_source_url_as_of" in data + else None + ) + last_cached = data["last_cached"] if "last_cached" in data else None + + archives_put_query( + id=id, + broken_as_of=broken_as_of, + last_cached=last_cached, + conn=self.psycopg2_connection, + ) + + return {"status": "success"} - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"error": str(e)} diff --git a/resources/DataSources.py b/resources/DataSources.py index 50fdb48e..46a05a39 100644 --- a/resources/DataSources.py +++ b/resources/DataSources.py @@ -6,7 +6,7 @@ import uuid from typing import Dict, Any, Tuple -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions class DataSourceById(PsycopgResource): @@ -15,6 +15,7 @@ class DataSourceById(PsycopgResource): Provides methods for retrieving and updating data source details. """ + @handle_exceptions @api_required def get(self, data_source_id: str) -> Tuple[Dict[str, Any], int]: """ @@ -26,23 +27,19 @@ def get(self, data_source_id: str) -> Tuple[Dict[str, Any], int]: Returns: - Tuple containing the response message with data source details if found, and the HTTP status code. """ - try: - data_source_details = data_source_by_id_query( - conn=self.psycopg2_connection, data_source_id=data_source_id - ) - if data_source_details: - return { - "message": "Successfully found data source", - "data": data_source_details, - } - - else: - return {"message": "Data source not found."}, 404 - - except Exception as e: - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 + data_source_details = data_source_by_id_query( + conn=self.psycopg2_connection, data_source_id=data_source_id + ) + if data_source_details: + return { + "message": "Successfully found data source", + "data": data_source_details, + } + + else: + return {"message": "Data source not found."}, 404 + @handle_exceptions @api_required def put(self, data_source_id: str) -> Dict[str, str]: """ @@ -54,43 +51,38 @@ def put(self, data_source_id: str) -> Dict[str, str]: Returns: - A dictionary containing a message about the update operation. """ - try: - data = request.get_json() - - restricted_columns = [ - "rejection_note", - "data_source_request", - "approval_status", - "airtable_uid", - "airtable_source_last_modified", - ] + data = request.get_json() - data_to_update = "" + restricted_columns = [ + "rejection_note", + "data_source_request", + "approval_status", + "airtable_uid", + "airtable_source_last_modified", + ] - for key, value in data.items(): - if key not in restricted_columns: - if type(value) == str: - data_to_update += f"{key} = '{value}', " - else: - data_to_update += f"{key} = {value}, " + data_to_update = "" - data_to_update = data_to_update[:-2] + for key, value in data.items(): + if key not in restricted_columns: + if type(value) == str: + data_to_update += f"{key} = '{value}', " + else: + data_to_update += f"{key} = {value}, " - cursor = self.psycopg2_connection.cursor() + data_to_update = data_to_update[:-2] - sql_query = f""" - UPDATE data_sources - SET {data_to_update} - WHERE airtable_uid = '{data_source_id}' - """ + cursor = self.psycopg2_connection.cursor() - cursor.execute(sql_query) - self.psycopg2_connection.commit() - return {"message": "Data source updated successfully."} + sql_query = f""" + UPDATE data_sources + SET {data_to_update} + WHERE airtable_uid = '{data_source_id}' + """ - except Exception as e: - print(str(e)) - return {"message": "There has been an error updating the data source"}, 500 + cursor.execute(sql_query) + self.psycopg2_connection.commit() + return {"message": "Data source updated successfully."} class DataSources(PsycopgResource): @@ -99,6 +91,7 @@ class DataSources(PsycopgResource): Provides methods for retrieving all data sources and adding new ones. """ + @handle_exceptions @api_required def get(self) -> Dict[str, Any]: """ @@ -107,23 +100,19 @@ def get(self) -> Dict[str, Any]: Returns: - A dictionary containing the count of data sources and their details. """ - try: - data_source_matches = data_sources_query( - self.psycopg2_connection, [], "approved" - ) - - data_sources = { - "count": len(data_source_matches), - "data": data_source_matches, - } + data_source_matches = data_sources_query( + self.psycopg2_connection, [], "approved" + ) + + data_sources = { + "count": len(data_source_matches), + "data": data_source_matches, + } - return data_sources + return data_sources - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 + @handle_exceptions @api_required def post(self) -> Dict[str, str]: """ @@ -132,69 +121,59 @@ def post(self) -> Dict[str, str]: Returns: - A dictionary containing a message about the addition operation. """ - try: - data = request.get_json() - cursor = self.psycopg2_connection.cursor() + data = request.get_json() + cursor = self.psycopg2_connection.cursor() - restricted_columns = [ - "rejection_note", - "data_source_request", - "approval_status", - "airtable_uid", - "airtable_source_last_modified", - ] + restricted_columns = [ + "rejection_note", + "data_source_request", + "approval_status", + "airtable_uid", + "airtable_source_last_modified", + ] - column_names = "" - column_values = "" - for key, value in data.items(): - if key not in restricted_columns: - column_names += f"{key}, " - if type(value) == str: - column_values += f"'{value}', " - else: - column_values += f"{value}, " + column_names = "" + column_values = "" + for key, value in data.items(): + if key not in restricted_columns: + column_names += f"{key}, " + if type(value) == str: + column_values += f"'{value}', " + else: + column_values += f"{value}, " - now = datetime.now().strftime("%Y-%m-%d") - airtable_uid = str(uuid.uuid4()) + now = datetime.now().strftime("%Y-%m-%d") + airtable_uid = str(uuid.uuid4()) - column_names += ( - "approval_status, url_status, data_source_created, airtable_uid" - ) - column_values += f"False, '[\"ok\"]', '{now}', '{airtable_uid}'" + column_names += ( + "approval_status, url_status, data_source_created, airtable_uid" + ) + column_values += f"False, '[\"ok\"]', '{now}', '{airtable_uid}'" - sql_query = f"INSERT INTO data_sources ({column_names}) VALUES ({column_values}) RETURNING *" + sql_query = f"INSERT INTO data_sources ({column_names}) VALUES ({column_values}) RETURNING *" - cursor.execute(sql_query) - self.psycopg2_connection.commit() + cursor.execute(sql_query) + self.psycopg2_connection.commit() - return {"message": "Data source added successfully."} - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": "There has been an error adding the data source"}, 500 + return {"message": "Data source added successfully."} class DataSourcesNeedsIdentification(PsycopgResource): + @handle_exceptions @api_required def get(self): - try: - data_source_matches = data_sources_query( - self.psycopg2_connection, [], "needs_identification" - ) - - data_sources = { - "count": len(data_source_matches), - "data": data_source_matches, - } + data_source_matches = data_sources_query( + self.psycopg2_connection, [], "needs_identification" + ) + + data_sources = { + "count": len(data_source_matches), + "data": data_source_matches, + } - return data_sources + return data_sources - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 class DataSourcesMap(PsycopgResource): @@ -203,6 +182,7 @@ class DataSourcesMap(PsycopgResource): Provides a method for retrieving all data sources. """ + @handle_exceptions @api_required def get(self) -> Dict[str, Any]: """ @@ -211,19 +191,14 @@ def get(self) -> Dict[str, Any]: Returns: - A dictionary containing the count of data sources and their details. """ - try: - data_source_matches = data_sources_query( - self.psycopg2_connection, [], "approved", True - ) - - data_sources = { - "count": len(data_source_matches), - "data": data_source_matches, - } + data_source_matches = data_sources_query( + self.psycopg2_connection, [], "approved", True + ) + + data_sources = { + "count": len(data_source_matches), + "data": data_source_matches, + } - return data_sources + return data_sources - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 diff --git a/resources/Login.py b/resources/Login.py index 92e076c8..b5c2e7f9 100644 --- a/resources/Login.py +++ b/resources/Login.py @@ -1,7 +1,7 @@ from werkzeug.security import check_password_hash from flask import request from middleware.login_queries import login_results, create_session_token -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions class Login(PsycopgResource): @@ -9,6 +9,7 @@ class Login(PsycopgResource): A resource for authenticating users. Allows users to log in using their email and password. """ + @handle_exceptions def post(self): """ Processes the login request. Validates user credentials against the stored hashed password and, @@ -17,27 +18,21 @@ def post(self): Returns: - A dictionary containing a message of success or failure, and the session token if successful. """ - try: - data = request.get_json() - email = data.get("email") - password = data.get("password") - cursor = self.psycopg2_connection.cursor() - - user_data = login_results(cursor, email) - - if "password_digest" in user_data and check_password_hash( - user_data["password_digest"], password - ): - token = create_session_token(cursor, user_data["id"], email) - self.psycopg2_connection.commit() - return { - "message": "Successfully logged in", - "data": token, - } - - return {"message": "Invalid email or password"}, 401 - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": str(e)}, 500 + data = request.get_json() + email = data.get("email") + password = data.get("password") + cursor = self.psycopg2_connection.cursor() + + user_data = login_results(cursor, email) + + if "password_digest" in user_data and check_password_hash( + user_data["password_digest"], password + ): + token = create_session_token(cursor, user_data["id"], email) + self.psycopg2_connection.commit() + return { + "message": "Successfully logged in", + "data": token, + } + + return {"message": "Invalid email or password"}, 401 diff --git a/resources/PsycopgResource.py b/resources/PsycopgResource.py index c6b84803..d8d8add7 100644 --- a/resources/PsycopgResource.py +++ b/resources/PsycopgResource.py @@ -1,5 +1,43 @@ +import functools +from typing import Callable, Any, Union, Tuple, Dict + from flask_restful import Resource +def handle_exceptions( + func: Callable[..., Any] +) -> Callable[..., Union[Any, Tuple[Dict[str, str], int]]]: + """ + A decorator to handle exceptions raised by a function. + + :param func: The function to be decorated. + :return: The decorated function. + + The decorated function handles any exceptions raised + by the original function. If an exception occurs, the + decorator performs a rollback on the psycopg2 connection, + prints the error message, and returns a dictionary with + the error message and an HTTP status code of 500. + + Example usage: + ``` + @handle_exceptions + def my_function(): + # code goes here + ``` + """ + @functools.wraps(func) + def wrapper( + self, *args: Any, **kwargs: Any + ) -> Union[Any, Tuple[Dict[str, str], int]]: + try: + return func(self, *args, **kwargs) + except Exception as e: + self.psycopg2_connection.rollback() + print(str(e)) + return {"message": str(e)}, 500 + + return wrapper + class PsycopgResource(Resource): def __init__(self, **kwargs): diff --git a/resources/RefreshSession.py b/resources/RefreshSession.py index df03eb06..5bed1727 100644 --- a/resources/RefreshSession.py +++ b/resources/RefreshSession.py @@ -3,7 +3,7 @@ from datetime import datetime as dt from typing import Dict, Any -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions class RefreshSession(PsycopgResource): @@ -12,6 +12,7 @@ class RefreshSession(PsycopgResource): If the provided session token is valid and not expired, it is replaced with a new one. """ + @handle_exceptions def post(self) -> Dict[str, Any]: """ Processes the session token refresh request. If the provided session token is valid, @@ -20,29 +21,24 @@ def post(self) -> Dict[str, Any]: Returns: - A dictionary containing a message of success or failure, and the new session token if successful. """ - try: - data = request.get_json() - old_token = data.get("session_token") - cursor = self.psycopg2_connection.cursor() - user_data = token_results(cursor, old_token) - cursor.execute( - f"delete from session_tokens where token = '{old_token}' and expiration_date < '{dt.utcnow()}'" + data = request.get_json() + old_token = data.get("session_token") + cursor = self.psycopg2_connection.cursor() + user_data = token_results(cursor, old_token) + cursor.execute( + f"delete from session_tokens where token = '{old_token}' and expiration_date < '{dt.utcnow()}'" + ) + self.psycopg2_connection.commit() + + if "id" in user_data: + token = create_session_token( + cursor, user_data["id"], user_data["email"] ) self.psycopg2_connection.commit() + return { + "message": "Successfully refreshed session token", + "data": token, + } + + return {"message": "Invalid session token"}, 403 - if "id" in user_data: - token = create_session_token( - cursor, user_data["id"], user_data["email"] - ) - self.psycopg2_connection.commit() - return { - "message": "Successfully refreshed session token", - "data": token, - } - - return {"message": "Invalid session token"}, 403 - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": str(e)}, 500 diff --git a/resources/RequestResetPassword.py b/resources/RequestResetPassword.py index 373b6756..46e6c007 100644 --- a/resources/RequestResetPassword.py +++ b/resources/RequestResetPassword.py @@ -6,7 +6,7 @@ import requests from typing import Dict, Any -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions class RequestResetPassword(PsycopgResource): @@ -15,6 +15,7 @@ class RequestResetPassword(PsycopgResource): and sends an email to the user with instructions on how to reset their password. """ + @handle_exceptions def post(self) -> Dict[str, Any]: """ Processes a password reset request. Checks if the user's email exists in the database, @@ -23,34 +24,28 @@ def post(self) -> Dict[str, Any]: Returns: - A dictionary containing a success message and the reset token, or an error message if an exception occurs. """ - try: - data = request.get_json() - email = data.get("email") - cursor = self.psycopg2_connection.cursor() - user_data = user_check_email(cursor, email) - id = user_data["id"] - token = uuid.uuid4().hex - add_reset_token(cursor, email, token) - self.psycopg2_connection.commit() - - body = f"To reset your password, click the following link: {os.getenv('VITE_VUE_APP_BASE_URL')}/reset-password/{token}" - r = requests.post( - "https://api.mailgun.net/v3/mail.pdap.io/messages", - auth=("api", os.getenv("MAILGUN_KEY")), - data={ - "from": "mail@pdap.io", - "to": [email], - "subject": "PDAP Data Sources Reset Password", - "text": body, - }, - ) - - return { - "message": "An email has been sent to your email address with a link to reset your password. It will be valid for 15 minutes.", - "token": token, - } - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"error": str(e)}, 500 + data = request.get_json() + email = data.get("email") + cursor = self.psycopg2_connection.cursor() + user_data = user_check_email(cursor, email) + id = user_data["id"] + token = uuid.uuid4().hex + add_reset_token(cursor, email, token) + self.psycopg2_connection.commit() + + body = f"To reset your password, click the following link: {os.getenv('VITE_VUE_APP_BASE_URL')}/reset-password/{token}" + r = requests.post( + "https://api.mailgun.net/v3/mail.pdap.io/messages", + auth=("api", os.getenv("MAILGUN_KEY")), + data={ + "from": "mail@pdap.io", + "to": [email], + "subject": "PDAP Data Sources Reset Password", + "text": body, + }, + ) + + return { + "message": "An email has been sent to your email address with a link to reset your password. It will be valid for 15 minutes.", + "token": token, + } diff --git a/resources/ResetPassword.py b/resources/ResetPassword.py index b3c64428..efa68f72 100644 --- a/resources/ResetPassword.py +++ b/resources/ResetPassword.py @@ -7,7 +7,7 @@ from datetime import datetime as dt from typing import Dict, Any -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions class ResetPassword(PsycopgResource): @@ -16,6 +16,7 @@ class ResetPassword(PsycopgResource): If the token is valid and not expired, allows the user to set a new password. """ + @handle_exceptions def post(self) -> Dict[str, Any]: """ Processes a password reset request. Validates the provided reset token and, @@ -24,32 +25,26 @@ def post(self) -> Dict[str, Any]: Returns: - A dictionary containing a message indicating whether the password was successfully updated or an error occurred. """ - try: - data = request.get_json() - token = data.get("token") - password = data.get("password") - cursor = self.psycopg2_connection.cursor() - token_data = check_reset_token(cursor, token) - email = token_data.get("email") - if "create_date" not in token_data: - return {"message": "The submitted token is invalid"}, 400 - - token_create_date = token_data["create_date"] - token_expired = (dt.utcnow() - token_create_date).total_seconds() > 900 - delete_reset_token(cursor, token_data["email"], token) - if token_expired: - return {"message": "The submitted token is invalid"}, 400 - - password_digest = generate_password_hash(password) - cursor = self.psycopg2_connection.cursor() - cursor.execute( - f"update users set password_digest = '{password_digest}' where email = '{email}'" - ) - self.psycopg2_connection.commit() - - return {"message": "Successfully updated password"} - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": str(e)}, 500 + data = request.get_json() + token = data.get("token") + password = data.get("password") + cursor = self.psycopg2_connection.cursor() + token_data = check_reset_token(cursor, token) + email = token_data.get("email") + if "create_date" not in token_data: + return {"message": "The submitted token is invalid"}, 400 + + token_create_date = token_data["create_date"] + token_expired = (dt.utcnow() - token_create_date).total_seconds() > 900 + delete_reset_token(cursor, token_data["email"], token) + if token_expired: + return {"message": "The submitted token is invalid"}, 400 + + password_digest = generate_password_hash(password) + cursor = self.psycopg2_connection.cursor() + cursor.execute( + f"update users set password_digest = '{password_digest}' where email = '{email}'" + ) + self.psycopg2_connection.commit() + + return {"message": "Successfully updated password"} diff --git a/resources/ResetTokenValidation.py b/resources/ResetTokenValidation.py index b174b7a0..6a537823 100644 --- a/resources/ResetTokenValidation.py +++ b/resources/ResetTokenValidation.py @@ -4,29 +4,24 @@ ) from datetime import datetime as dt -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions class ResetTokenValidation(PsycopgResource): + @handle_exceptions def post(self): - try: - data = request.get_json() - token = data.get("token") - cursor = self.psycopg2_connection.cursor() - token_data = check_reset_token(cursor, token) - if "create_date" not in token_data: - return {"message": "The submitted token is invalid"}, 400 + data = request.get_json() + token = data.get("token") + cursor = self.psycopg2_connection.cursor() + token_data = check_reset_token(cursor, token) + if "create_date" not in token_data: + return {"message": "The submitted token is invalid"}, 400 - token_create_date = token_data["create_date"] - token_expired = (dt.utcnow() - token_create_date).total_seconds() > 900 + token_create_date = token_data["create_date"] + token_expired = (dt.utcnow() - token_create_date).total_seconds() > 900 - if token_expired: - return {"message": "The submitted token is invalid"}, 400 + if token_expired: + return {"message": "The submitted token is invalid"}, 400 - return {"message": "Token is valid"} - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": str(e)}, 500 + return {"message": "Token is valid"} diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index 19090789..38226032 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -12,7 +12,7 @@ import json from typing import Dict, Any -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions sys.path.append("..") @@ -25,6 +25,7 @@ class SearchTokens(PsycopgResource): It supports quick search, data source retrieval by ID, and listing all data sources. """ + @handle_exceptions def get(self) -> Dict[str, Any]: """ Handles GET requests by performing a search operation based on the specified endpoint and arguments. @@ -38,112 +39,106 @@ def get(self) -> Dict[str, Any]: Returns: - A dictionary with the search results or an error message. """ - try: - url_params = request.args - endpoint = url_params.get("endpoint") - arg1 = url_params.get("arg1") - arg2 = url_params.get("arg2") - print(endpoint, arg1, arg2) - data_sources = {"count": 0, "data": []} - if type(self.psycopg2_connection) == dict: - return data_sources - - cursor = self.psycopg2_connection.cursor() - token = uuid.uuid4().hex - expiration = datetime.datetime.now() + datetime.timedelta(minutes=5) - cursor.execute( - f"insert into access_tokens (token, expiration_date) values (%s, %s)", - (token, expiration), - ) - self.psycopg2_connection.commit() - - if endpoint == "quick-search": - try: - data = request.get_json() - test = data.get("test_flag") - except: - test = False - try: - data_sources = quick_search_query( - arg1, arg2, [], self.psycopg2_connection, test - ) - - return data_sources + url_params = request.args + endpoint = url_params.get("endpoint") + arg1 = url_params.get("arg1") + arg2 = url_params.get("arg2") + print(endpoint, arg1, arg2) + data_sources = {"count": 0, "data": []} + if type(self.psycopg2_connection) == dict: + return data_sources + + cursor = self.psycopg2_connection.cursor() + token = uuid.uuid4().hex + expiration = datetime.datetime.now() + datetime.timedelta(minutes=5) + cursor.execute( + f"insert into access_tokens (token, expiration_date) values (%s, %s)", + (token, expiration), + ) + self.psycopg2_connection.commit() + + if endpoint == "quick-search": + try: + data = request.get_json() + test = data.get("test_flag") + except: + test = False + try: + data_sources = quick_search_query( + arg1, arg2, [], self.psycopg2_connection, test + ) - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - webhook_url = os.getenv("WEBHOOK_URL") - user_message = "There was an error during the search operation" - message = { - "content": user_message - + ": " - + str(e) - + "\n" - + f"Search term: {arg1}\n" - + f"Location: {arg2}" - } - requests.post( - webhook_url, - data=json.dumps(message), - headers={"Content-Type": "application/json"}, - ) + return data_sources - return {"count": 0, "message": user_message}, 500 + except Exception as e: + self.psycopg2_connection.rollback() + print(str(e)) + webhook_url = os.getenv("WEBHOOK_URL") + user_message = "There was an error during the search operation" + message = { + "content": user_message + + ": " + + str(e) + + "\n" + + f"Search term: {arg1}\n" + + f"Location: {arg2}" + } + requests.post( + webhook_url, + data=json.dumps(message), + headers={"Content-Type": "application/json"}, + ) + + return {"count": 0, "message": user_message}, 500 + + elif endpoint == "data-sources": + try: + data_source_matches = data_sources_query(self.psycopg2_connection) + + data_sources = { + "count": len(data_source_matches), + "data": data_source_matches, + } - elif endpoint == "data-sources": - try: - data_source_matches = data_sources_query(self.psycopg2_connection) + return data_sources + except Exception as e: + self.psycopg2_connection.rollback() + print(str(e)) + return {"message": "There has been an error pulling data!"}, 500 + + elif endpoint == "data-sources-by-id": + try: + data_source_details = data_source_by_id_query( + arg1, [], self.psycopg2_connection + ) + if data_source_details: + return data_source_details + + else: + return {"message": "Data source not found."}, 404 + + except Exception as e: + print(str(e)) + return {"message": "There has been an error pulling data!"}, 500 + + elif endpoint == "data-sources-map": + try: + data_source_details = data_sources_query( + self.psycopg2_connection, [], "approved", True + ) + if data_source_details: data_sources = { - "count": len(data_source_matches), - "data": data_source_matches, + "count": len(data_source_details), + "data": data_source_details, } - return data_sources - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 - - elif endpoint == "data-sources-by-id": - try: - data_source_details = data_source_by_id_query( - arg1, [], self.psycopg2_connection - ) - if data_source_details: - return data_source_details - - else: - return {"message": "Data source not found."}, 404 - - except Exception as e: - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 - - elif endpoint == "data-sources-map": - try: - data_source_details = data_sources_query( - self.psycopg2_connection, [], "approved", True - ) - if data_source_details: - data_sources = { - "count": len(data_source_details), - "data": data_source_details, - } - return data_sources - - else: - return {"message": "There has been an error pulling data!"}, 500 - - except Exception as e: - print(str(e)) + else: return {"message": "There has been an error pulling data!"}, 500 - else: - return {"message": "Unknown endpoint"}, 500 - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": e}, 500 + except Exception as e: + print(str(e)) + return {"message": "There has been an error pulling data!"}, 500 + else: + return {"message": "Unknown endpoint"}, 500 diff --git a/resources/User.py b/resources/User.py index 8eb51ee0..ded38d2e 100644 --- a/resources/User.py +++ b/resources/User.py @@ -4,7 +4,7 @@ from middleware.security import api_required from typing import Dict, Any -from resources.PsycopgResource import PsycopgResource +from resources.PsycopgResource import PsycopgResource, handle_exceptions class User(PsycopgResource): @@ -12,6 +12,7 @@ class User(PsycopgResource): A resource for user management, allowing new users to sign up and existing users to update their passwords. """ + @handle_exceptions def post(self) -> Dict[str, Any]: """ Allows a new user to sign up by providing an email and password. @@ -22,22 +23,17 @@ def post(self) -> Dict[str, Any]: Returns: - A dictionary containing a success message or an error message if the operation fails. """ - try: - data = request.get_json() - email = data.get("email") - password = data.get("password") - cursor = self.psycopg2_connection.cursor() - user_post_results(cursor, email, password) - self.psycopg2_connection.commit() + data = request.get_json() + email = data.get("email") + password = data.get("password") + cursor = self.psycopg2_connection.cursor() + user_post_results(cursor, email, password) + self.psycopg2_connection.commit() - return {"message": "Successfully added user"} - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": e}, 500 + return {"message": "Successfully added user"} # Endpoint for updating a user's password + @handle_exceptions @api_required def put(self) -> Dict[str, Any]: """ @@ -49,20 +45,13 @@ def put(self) -> Dict[str, Any]: Returns: - A dictionary containing a success message or an error message if the operation fails. """ - try: - data = request.get_json() - email = data.get("email") - password = data.get("password") - password_digest = generate_password_hash(password) - cursor = self.psycopg2_connection.cursor() - cursor.execute( - f"update users set password_digest = '{password_digest}' where email = '{email}'" - ) - self.psycopg2_connection.commit() - return {"message": "Successfully updated password"} - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": e}, 500 - return {"message": e}, 500 + data = request.get_json() + email = data.get("email") + password = data.get("password") + password_digest = generate_password_hash(password) + cursor = self.psycopg2_connection.cursor() + cursor.execute( + f"update users set password_digest = '{password_digest}' where email = '{email}'" + ) + self.psycopg2_connection.commit() + return {"message": "Successfully updated password"} From 5698ab7fae14f3b2f738f86eadf8a9bc189dd84d Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 27 Apr 2024 15:04:17 -0400 Subject: [PATCH 002/127] Black reformatting --- resources/Archives.py | 1 - resources/DataSources.py | 7 +------ resources/PsycopgResource.py | 2 ++ resources/RefreshSession.py | 5 +---- 4 files changed, 4 insertions(+), 11 deletions(-) diff --git a/resources/Archives.py b/resources/Archives.py index 398d9d00..30759723 100644 --- a/resources/Archives.py +++ b/resources/Archives.py @@ -59,4 +59,3 @@ def put(self) -> Dict[str, str]: ) return {"status": "success"} - diff --git a/resources/DataSources.py b/resources/DataSources.py index 46a05a39..4953a5b2 100644 --- a/resources/DataSources.py +++ b/resources/DataSources.py @@ -111,7 +111,6 @@ def get(self) -> Dict[str, Any]: return data_sources - @handle_exceptions @api_required def post(self) -> Dict[str, str]: @@ -145,9 +144,7 @@ def post(self) -> Dict[str, str]: now = datetime.now().strftime("%Y-%m-%d") airtable_uid = str(uuid.uuid4()) - column_names += ( - "approval_status, url_status, data_source_created, airtable_uid" - ) + column_names += "approval_status, url_status, data_source_created, airtable_uid" column_values += f"False, '[\"ok\"]', '{now}', '{airtable_uid}'" sql_query = f"INSERT INTO data_sources ({column_names}) VALUES ({column_values}) RETURNING *" @@ -175,7 +172,6 @@ def get(self): return data_sources - class DataSourcesMap(PsycopgResource): """ A resource for managing collections of data sources for mapping. @@ -201,4 +197,3 @@ def get(self) -> Dict[str, Any]: } return data_sources - diff --git a/resources/PsycopgResource.py b/resources/PsycopgResource.py index d8d8add7..655186a0 100644 --- a/resources/PsycopgResource.py +++ b/resources/PsycopgResource.py @@ -3,6 +3,7 @@ from flask_restful import Resource + def handle_exceptions( func: Callable[..., Any] ) -> Callable[..., Union[Any, Tuple[Dict[str, str], int]]]: @@ -25,6 +26,7 @@ def my_function(): # code goes here ``` """ + @functools.wraps(func) def wrapper( self, *args: Any, **kwargs: Any diff --git a/resources/RefreshSession.py b/resources/RefreshSession.py index 5bed1727..75e77392 100644 --- a/resources/RefreshSession.py +++ b/resources/RefreshSession.py @@ -31,9 +31,7 @@ def post(self) -> Dict[str, Any]: self.psycopg2_connection.commit() if "id" in user_data: - token = create_session_token( - cursor, user_data["id"], user_data["email"] - ) + token = create_session_token(cursor, user_data["id"], user_data["email"]) self.psycopg2_connection.commit() return { "message": "Successfully refreshed session token", @@ -41,4 +39,3 @@ def post(self) -> Dict[str, Any]: } return {"message": "Invalid session token"}, 403 - From 3b2c241544565a597fde8ae80573664786e0ad58 Mon Sep 17 00:00:00 2001 From: Josh <30379833+josh-chamberlain@users.noreply.github.com> Date: Tue, 30 Apr 2024 13:09:04 -0400 Subject: [PATCH 003/127] update readme --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cc0837c7..eb52c4d3 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -# data-sources-app +# data-sources-app-v2 + +Development of the next big iteration of the data sources app according to https://github.com/Police-Data-Accessibility-Project/data-sources-app/issues/248 An API and UI for searching, using, and maintaining Data Sources. From 68199c9c6eedcba2ba5ff665e8eca9b9e34febbf Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 2 May 2024 21:54:46 -0400 Subject: [PATCH 004/127] Add middleware tests for database interactions This commit adds a new python file containing unit tests for middleware functions that interact with the database. These tests include checking for the addition, deletion, retrieval, and update of various data sources, users, and reset tokens to ensure the correct operation and accuracy of these functions. --- tests/test_middleware.py | 288 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 288 insertions(+) create mode 100644 tests/test_middleware.py diff --git a/tests/test_middleware.py b/tests/test_middleware.py new file mode 100644 index 00000000..987f736c --- /dev/null +++ b/tests/test_middleware.py @@ -0,0 +1,288 @@ +""" +This module runs pytests on functions interacting directly with the database -- the middleware +""" + +import os +import uuid +from collections import namedtuple +from unittest.mock import patch + +import psycopg2 +import pytest +from dotenv import load_dotenv + +from middleware.archives_queries import archives_get_results +from middleware.data_source_queries import ( + get_approved_data_sources, + needs_identification_data_sources, + data_source_by_id_results, +) +from middleware.login_queries import ( + login_results, + create_session_token, + token_results, + is_admin, +) +from middleware.quick_search_query import unaltered_search_query +from middleware.reset_token_queries import ( + check_reset_token, + add_reset_token, + delete_reset_token, +) +from middleware.user_queries import user_post_results, user_check_email + + +@pytest.fixture() +def dev_db_connection(): + """ + Sets up connection to development database + and creates a session that is rolled back after the test completes + to undo any operations performed during the test. + :return: + """ + load_dotenv() + dev_db_connection_string = os.getenv("DEV_DB_CONN_STRING") + connection = psycopg2.connect( + dev_db_connection_string, + keepalives=1, + keepalives_idle=30, + keepalives_interval=10, + keepalives_count=5, + ) + connection.autocommit = False + + yield connection + + # Rollback any changes made during the tests + connection.rollback() + + connection.close() + + +@pytest.fixture() +def db_cursor(dev_db_connection): + """ + Create a cursor to execute database operations, with savepoint management. + This is to ensure that changes made during the test can be rolled back. + """ + cur = dev_db_connection.cursor() + + # Start a savepoint + cur.execute("SAVEPOINT test_savepoint") + + yield cur + + # Rollback to the savepoint to ignore commits within the test + cur.execute("ROLLBACK TO SAVEPOINT test_savepoint") + cur.close() + + +def test_unaltered_search_query(db_cursor): + # TODO: Modify + response = unaltered_search_query(db_cursor, "calls", "chicago") + + assert response + + +def test_data_sources(dev_db_connection): + # TODO: Modify + response = get_approved_data_sources(conn=dev_db_connection) + + assert response + + +def test_needs_identification(dev_db_connection): + # TODO: Modify + response = needs_identification_data_sources(conn=dev_db_connection) + + assert response + + +def test_data_sources_approved(dev_db_connection): + # TODO: Adjust this test to insert approved and unapproved data sources prior. + # Ensure the results returned are only approved. + response = get_approved_data_sources(conn=dev_db_connection) + + assert ( + len([d for d in response if "https://joinstatepolice.ny.gov/15-mile-run" in d]) + == 0 + ) + + +def test_data_source_by_id_results(dev_db_connection): + # TODO: Modify; insert data sources with specific id, ensure those are the ONLY data sources returned + # Insert other data sources as well with different id + response = data_source_by_id_results( + data_source_id="rec00T2YLS2jU7Tbn", conn=dev_db_connection + ) + + assert response + + +def test_user_post_query(db_cursor): + user_post_results(db_cursor, "unit_test", "unit_test") + + db_cursor.execute(f"SELECT email FROM users WHERE email = 'unit_test'") + email_check = db_cursor.fetchone()[0] + + assert email_check == "unit_test" + + +TestUser = namedtuple("TestUser", ["id", "email", "password_hash"]) + + +def create_test_user( + cursor, + email="example@example.com", + password_hash="hashed_password_here", + api_key="api_key_here", + role=None, +) -> TestUser: + """ + Creates test user and returns the id of the test user + :param cursor: + :return: user id + """ + cursor.execute( + """ + INSERT INTO users (email, password_digest, api_key, role) + VALUES + (%s, %s, %s, %s) + RETURNING id; + """, + (email, password_hash, api_key, role), + ) + return TestUser( + id=cursor.fetchone()[0], + email=email, + password_hash=password_hash, + ) + + +def test_login_query(db_cursor): + test_user = create_test_user(db_cursor) + + user_data = login_results(db_cursor, "example@example.com") + + assert user_data["password_digest"] == test_user.password_hash + + +def test_create_session_token_results(db_cursor): + test_user = create_test_user(db_cursor) + with patch("os.getenv", return_value="mysecretkey") as mock_getenv: + token = create_session_token(db_cursor, test_user.id, test_user.email) + new_token = token_results(db_cursor, token) + + assert new_token["email"] == test_user.email + + +def test_is_admin(db_cursor): + """ + Creates and inserts two users, one an admin and the other not + And then checks to see if the `is_admin` properly + identifies both + :param db_cursor: + """ + regular_user = create_test_user(db_cursor) + admin_user = create_test_user( + cursor=db_cursor, email="admin@admin.com", role="admin" + ) + assert is_admin(db_cursor, admin_user.email) + assert not is_admin(db_cursor, regular_user.email) + + +def test_user_check_email(db_cursor): + user = create_test_user(db_cursor) + user_data = user_check_email(db_cursor, user.email) + assert user_data["id"] == user.id + + +TestTokenInsert = namedtuple("TestTokenInsert", ["id", "email", "token"]) + + +def create_reset_token(cursor) -> TestTokenInsert: + user = create_test_user(cursor) + token = uuid.uuid4().hex + cursor.execute( + """ + INSERT INTO reset_tokens(email, token) + VALUES (%s, %s) + RETURNING id + """, + (user.email, token), + ) + id = cursor.fetchone()[0] + return TestTokenInsert(id=id, email=user.email, token=token) + + +def test_check_reset_token(db_cursor): + """ + Checks if a token existing in the database + is properly returned by check_reset_token + :param db_cursor: + :return: + """ + test_token_insert = create_reset_token(db_cursor) + + user_data = check_reset_token(db_cursor, test_token_insert.token) + assert test_token_insert.id == user_data["id"] + + +def test_add_reset_token(db_cursor): + """ + Checks if add_reset_token properly inserts a token + for the given email in the database + """ + user = create_test_user(db_cursor) + token = uuid.uuid4().hex + add_reset_token(db_cursor, user.email, token) + db_cursor.execute( + """ + SELECT id, token FROM RESET_TOKENS where email = %s + """, + (user.email,), + ) + results = db_cursor.fetchall() + assert len(results) == 1 + assert results[0][1] == token + + +def get_reset_tokens_for_email(db_cursor, reset_token_insert): + db_cursor.execute( + """ + SELECT email from RESET_TOKENS where email = %s + """, + (reset_token_insert.email,), + ) + results = db_cursor.fetchall() + return results + + +def test_delete_reset_token(db_cursor): + """ + Checks if token previously inserted is deleted + by the delete_reset_token method + """ + reset_token_insert = create_reset_token(db_cursor) + results = get_reset_tokens_for_email(db_cursor, reset_token_insert) + assert len(results) == 1 + delete_reset_token(db_cursor, reset_token_insert.email, reset_token_insert.token) + results = get_reset_tokens_for_email(db_cursor, reset_token_insert) + assert len(results) == 0 + +def test_archives_get_results(dev_db_connection, db_cursor): + """ + Checks if archives_get_results picks up an added valid datasource + """ + original_results = archives_get_results(dev_db_connection) + db_cursor.execute( + """ + INSERT INTO data_sources(airtable_uid, source_url, name, update_frequency, url_status) + VALUES (%s, %s, %s, %s, %s) + """, + ('fake_uid', 'https://www.fake_source_url.com', 'fake_name', 'Annually', 'unbroken') + ) + new_results = archives_get_results(dev_db_connection) + assert len(new_results) == len(original_results) + 1 + + From 3092634df7eca56f88e722445e235613fcac4a3e Mon Sep 17 00:00:00 2001 From: maxachis Date: Fri, 3 May 2024 12:04:12 -0400 Subject: [PATCH 005/127] Add testing helpers for database interactions This commit introduces a new helper file for testing middleware interaction with the database. It includes test setup methods for generating fake data in the database, such as users, data sources, and reset tokens, which will provide a solid basis for thorough testing of database-related middleware functionality. --- tests/helper_test_middleware.py | 100 ++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 tests/helper_test_middleware.py diff --git a/tests/helper_test_middleware.py b/tests/helper_test_middleware.py new file mode 100644 index 00000000..a480fca3 --- /dev/null +++ b/tests/helper_test_middleware.py @@ -0,0 +1,100 @@ +import uuid +from collections import namedtuple + + +TestTokenInsert = namedtuple("TestTokenInsert", ["id", "email", "token"]) +TestUser = namedtuple("TestUser", ["id", "email", "password_hash"]) + +def insert_test_agencies_and_sources(cursor): + """ + + :param cursor: + :return: + """ + + cursor.execute( + """ + INSERT INTO + PUBLIC.DATA_SOURCES ( + airtable_uid, + NAME, + DESCRIPTION, + RECORD_TYPE, + SOURCE_URL, + APPROVAL_STATUS, + URL_STATUS + ) + VALUES + ('SOURCE_UID_1','Source 1','Description of src1','Type A','http://src1.com','approved','available'), + ('SOURCE_UID_2','Source 2','Description of src2','Type B','http://src2.com','approved','available'), + ('SOURCE_UID_3','Source 3', 'Description of src3', 'Type C', 'http://src3.com', 'pending', 'available'); + + INSERT INTO public.agencies (airtable_uid, name, municipality, state_iso, county_name, count_data_sources) + VALUES + ('Agency_UID_1', 'Agency A', 'City A', 'CA', 'County X', 3), + ('Agency_UID_2', 'Agency B', 'City B', 'NY', 'County Y', 2), + ('Agency_UID_3', 'Agency C', 'City C', 'TX', 'County Z', 1); + + INSERT INTO public.agency_source_link (airtable_uid, agency_described_linked_uid) + VALUES + ('SOURCE_UID_1', 'Agency_UID_1'), + ('SOURCE_UID_2', 'Agency_UID_2'), + ('SOURCE_UID_3', 'Agency_UID_3'); + """ + ) + + +def get_reset_tokens_for_email(db_cursor, reset_token_insert): + db_cursor.execute( + """ + SELECT email from RESET_TOKENS where email = %s + """, + (reset_token_insert.email,), + ) + results = db_cursor.fetchall() + return results + + + + +def create_reset_token(cursor) -> TestTokenInsert: + user = create_test_user(cursor) + token = uuid.uuid4().hex + cursor.execute( + """ + INSERT INTO reset_tokens(email, token) + VALUES (%s, %s) + RETURNING id + """, + (user.email, token), + ) + id = cursor.fetchone()[0] + return TestTokenInsert(id=id, email=user.email, token=token) + + +def create_test_user( + cursor, + email="example@example.com", + password_hash="hashed_password_here", + api_key="api_key_here", + role=None, +) -> TestUser: + """ + Creates test user and returns the id of the test user + :param cursor: + :return: user id + """ + cursor.execute( + """ + INSERT INTO users (email, password_digest, api_key, role) + VALUES + (%s, %s, %s, %s) + RETURNING id; + """, + (email, password_hash, api_key, role), + ) + return TestUser( + id=cursor.fetchone()[0], + email=email, + password_hash=password_hash, + ) From 85ec2d794ce3d7346488aff6a37c5de1542c12f6 Mon Sep 17 00:00:00 2001 From: maxachis Date: Fri, 3 May 2024 12:04:38 -0400 Subject: [PATCH 006/127] Refactor test methods and introduce database testing helpers This commit both minimizes test implementation detail within individual tests and also enhances testing utilities. Specifically, large helper functions for creating test users and reset tokens have been moved from the test file into a separate helper file. Additionally, a new comprehensive test for the 'quick_search_query' function has been introduced to ensure proper functionality. --- tests/__init__.py | 0 tests/test_middleware.py | 95 ++++++++++++++-------------------------- 2 files changed, 32 insertions(+), 63 deletions(-) create mode 100644 tests/__init__.py diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_middleware.py b/tests/test_middleware.py index 987f736c..e2f866ac 100644 --- a/tests/test_middleware.py +++ b/tests/test_middleware.py @@ -4,7 +4,6 @@ import os import uuid -from collections import namedtuple from unittest.mock import patch import psycopg2 @@ -23,13 +22,19 @@ token_results, is_admin, ) -from middleware.quick_search_query import unaltered_search_query +from middleware.quick_search_query import unaltered_search_query, quick_search_query from middleware.reset_token_queries import ( check_reset_token, add_reset_token, delete_reset_token, ) from middleware.user_queries import user_post_results, user_check_email +from tests.helper_test_middleware import ( + get_reset_tokens_for_email, + create_reset_token, + create_test_user, + insert_test_agencies_and_sources, +) @pytest.fixture() @@ -128,37 +133,6 @@ def test_user_post_query(db_cursor): assert email_check == "unit_test" -TestUser = namedtuple("TestUser", ["id", "email", "password_hash"]) - - -def create_test_user( - cursor, - email="example@example.com", - password_hash="hashed_password_here", - api_key="api_key_here", - role=None, -) -> TestUser: - """ - Creates test user and returns the id of the test user - :param cursor: - :return: user id - """ - cursor.execute( - """ - INSERT INTO users (email, password_digest, api_key, role) - VALUES - (%s, %s, %s, %s) - RETURNING id; - """, - (email, password_hash, api_key, role), - ) - return TestUser( - id=cursor.fetchone()[0], - email=email, - password_hash=password_hash, - ) - - def test_login_query(db_cursor): test_user = create_test_user(db_cursor) @@ -197,24 +171,6 @@ def test_user_check_email(db_cursor): assert user_data["id"] == user.id -TestTokenInsert = namedtuple("TestTokenInsert", ["id", "email", "token"]) - - -def create_reset_token(cursor) -> TestTokenInsert: - user = create_test_user(cursor) - token = uuid.uuid4().hex - cursor.execute( - """ - INSERT INTO reset_tokens(email, token) - VALUES (%s, %s) - RETURNING id - """, - (user.email, token), - ) - id = cursor.fetchone()[0] - return TestTokenInsert(id=id, email=user.email, token=token) - - def test_check_reset_token(db_cursor): """ Checks if a token existing in the database @@ -247,17 +203,6 @@ def test_add_reset_token(db_cursor): assert results[0][1] == token -def get_reset_tokens_for_email(db_cursor, reset_token_insert): - db_cursor.execute( - """ - SELECT email from RESET_TOKENS where email = %s - """, - (reset_token_insert.email,), - ) - results = db_cursor.fetchall() - return results - - def test_delete_reset_token(db_cursor): """ Checks if token previously inserted is deleted @@ -270,6 +215,7 @@ def test_delete_reset_token(db_cursor): results = get_reset_tokens_for_email(db_cursor, reset_token_insert) assert len(results) == 0 + def test_archives_get_results(dev_db_connection, db_cursor): """ Checks if archives_get_results picks up an added valid datasource @@ -280,9 +226,32 @@ def test_archives_get_results(dev_db_connection, db_cursor): INSERT INTO data_sources(airtable_uid, source_url, name, update_frequency, url_status) VALUES (%s, %s, %s, %s, %s) """, - ('fake_uid', 'https://www.fake_source_url.com', 'fake_name', 'Annually', 'unbroken') + ( + "fake_uid", + "https://www.fake_source_url.com", + "fake_name", + "Annually", + "unbroken", + ), ) new_results = archives_get_results(dev_db_connection) assert len(new_results) == len(original_results) + 1 +def test_quicksearch_columns(dev_db_connection): + try: + insert_test_agencies_and_sources(dev_db_connection.cursor()) + except psycopg2.errors.UniqueViolation: + dev_db_connection.rollback() + # TODO: Something about the quick_search_query might be mucking up the savepoints. Address once you fix quick_search's logic issues + results = quick_search_query( + search="Source 1", location="City A", conn=dev_db_connection + ) + # "Source 3" was listed as pending and shouldn't show up + assert len(results['data']) == 1 + results = quick_search_query( + search="Source 3", location="City C", conn=dev_db_connection + ) + assert len(results['data']) == 0 + + # Test that query inserted into log From 94bf05a2f6def18c74e38e9b18ffc7ce43e6b78f Mon Sep 17 00:00:00 2001 From: maxachis Date: Fri, 3 May 2024 12:04:58 -0400 Subject: [PATCH 007/127] Create (mostly) blank python_tests.yml file --- .github/workflows/python_tests.yml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .github/workflows/python_tests.yml diff --git a/.github/workflows/python_tests.yml b/.github/workflows/python_tests.yml new file mode 100644 index 00000000..5979688e --- /dev/null +++ b/.github/workflows/python_tests.yml @@ -0,0 +1,7 @@ + + + + + + +# TODO: Include command to `python -m spacy download en_core_web_sm` \ No newline at end of file From 8c7392d11c04bc045419d71eaf32dfb8120b537d Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 16:49:29 -0400 Subject: [PATCH 008/127] Refactor test file and update test functions Renamed 'helper_test_middleware.py' to 'middleware/helper_functions.py', and made modifications to test data inserts. Added new testing helper functions for quick search query logs and boolean dictionary creation. This commit includes renaming a test file along with additions and modifications to the test data insert statements. New test helper functions have been introduced to handle the quick search query logs and create a dictionary of booleans. This refactor aims at improving the test coverage and overall project organization. --- tests/middleware/__init__.py | 0 .../helper_functions.py} | 53 ++++++++++++++++--- 2 files changed, 45 insertions(+), 8 deletions(-) create mode 100644 tests/middleware/__init__.py rename tests/{helper_test_middleware.py => middleware/helper_functions.py} (68%) diff --git a/tests/middleware/__init__.py b/tests/middleware/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/helper_test_middleware.py b/tests/middleware/helper_functions.py similarity index 68% rename from tests/helper_test_middleware.py rename to tests/middleware/helper_functions.py index a480fca3..7c5b0feb 100644 --- a/tests/helper_test_middleware.py +++ b/tests/middleware/helper_functions.py @@ -1,10 +1,10 @@ import uuid from collections import namedtuple - TestTokenInsert = namedtuple("TestTokenInsert", ["id", "email", "token"]) TestUser = namedtuple("TestUser", ["id", "email", "password_hash"]) + def insert_test_agencies_and_sources(cursor): """ @@ -26,14 +26,14 @@ def insert_test_agencies_and_sources(cursor): ) VALUES ('SOURCE_UID_1','Source 1','Description of src1','Type A','http://src1.com','approved','available'), - ('SOURCE_UID_2','Source 2','Description of src2','Type B','http://src2.com','approved','available'), + ('SOURCE_UID_2','Source 2','Description of src2','Type B','http://src2.com','needs identification','available'), ('SOURCE_UID_3','Source 3', 'Description of src3', 'Type C', 'http://src3.com', 'pending', 'available'); - INSERT INTO public.agencies (airtable_uid, name, municipality, state_iso, county_name, count_data_sources) + INSERT INTO public.agencies (airtable_uid, name, municipality, state_iso, county_name, count_data_sources, lat, lng) VALUES - ('Agency_UID_1', 'Agency A', 'City A', 'CA', 'County X', 3), - ('Agency_UID_2', 'Agency B', 'City B', 'NY', 'County Y', 2), - ('Agency_UID_3', 'Agency C', 'City C', 'TX', 'County Z', 1); + ('Agency_UID_1', 'Agency A', 'City A', 'CA', 'County X', 3, 30, 20), + ('Agency_UID_2', 'Agency B', 'City B', 'NY', 'County Y', 2, 40, 50), + ('Agency_UID_3', 'Agency C', 'City C', 'TX', 'County Z', 1, 90, 60); INSERT INTO public.agency_source_link (airtable_uid, agency_described_linked_uid) VALUES @@ -55,8 +55,6 @@ def get_reset_tokens_for_email(db_cursor, reset_token_insert): return results - - def create_reset_token(cursor) -> TestTokenInsert: user = create_test_user(cursor) token = uuid.uuid4().hex @@ -98,3 +96,42 @@ def create_test_user( email=email, password_hash=password_hash, ) + + +QuickSearchQueryLogResult = namedtuple( + "QuickSearchQueryLogResult", ["result_count", "updated_at"] +) + + +def get_most_recent_quick_search_query_log(cursor, search: str, location: str): + cursor.execute( + """ + SELECT RESULT_COUNT, UPDATED_AT FROM QUICK_SEARCH_QUERY_LOGS WHERE + search = %s AND location = %s ORDER BY CREATED_AT DESC LIMIT 1 + """, + (search, location), + ) + result = cursor.fetchone() + return QuickSearchQueryLogResult(result_count=result[0], updated_at=result[1]) + + +def has_expected_keys(result_keys: list, expected_keys: list) -> bool: + """ + Check that given result includes expected keys + :param result: + :param expected_keys: + :return: True if has expected keys, false otherwise + """ + return not set(expected_keys).difference(result_keys) + + +def get_boolean_dictionary(keys: tuple) -> dict: + """ + Creates dictionary of booleans, all set to false + :param keys: + :return: dictionary of booleans + """ + d = {} + for key in keys: + d[key] = False + return d \ No newline at end of file From 0b86c49e5af9f2ad1a73b41b2450a205de76357e Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 16:49:46 -0400 Subject: [PATCH 009/127] Add middleware testing fixtures Added two testing fixtures for middleware tests in a new `fixtures.py` file within tests/middleware directory. The fixtures provide functionality for setting up a connection to the development database and creating a cursor for database operations. They also ensure that changes made during tests can be rolled back using PostgreSQL's SAVEPOINT management. This is done to prevent any permanent changes to the database resulting from the testing process. --- tests/middleware/fixtures.py | 50 ++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 tests/middleware/fixtures.py diff --git a/tests/middleware/fixtures.py b/tests/middleware/fixtures.py new file mode 100644 index 00000000..28344e87 --- /dev/null +++ b/tests/middleware/fixtures.py @@ -0,0 +1,50 @@ +import os + +import psycopg2 +import pytest +from dotenv import load_dotenv + + +@pytest.fixture() +def dev_db_connection() -> psycopg2.extensions.cursor: + """ + Sets up connection to development database + and creates a session that is rolled back after the test completes + to undo any operations performed during the test. + :return: + """ + load_dotenv() + dev_db_connection_string = os.getenv("DEV_DB_CONN_STRING") + connection = psycopg2.connect( + dev_db_connection_string, + keepalives=1, + keepalives_idle=30, + keepalives_interval=10, + keepalives_count=5, + ) + connection.autocommit = False + + yield connection + + # Rollback any changes made during the tests + connection.rollback() + + connection.close() + + +@pytest.fixture() +def db_cursor(dev_db_connection: psycopg2.extensions.connection) -> psycopg2.extensions.cursor: + """ + Create a cursor to execute database operations, with savepoint management. + This is to ensure that changes made during the test can be rolled back. + """ + cur = dev_db_connection.cursor() + + # Start a savepoint + cur.execute("SAVEPOINT test_savepoint") + + yield cur + + # Rollback to the savepoint to ignore commits within the test + cur.execute("ROLLBACK TO SAVEPOINT test_savepoint") + cur.close() From ee11b1674dab98dde313cfa69d6ff399fd37d6ba Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 16:50:20 -0400 Subject: [PATCH 010/127] Add tests for data source queries Added several new tests for data source query functions in a new file, `test_data_source_queries.py`. These tests cover different aspects like getting approved data sources, data source by id and data sources for map. Additional setup and teardown functionality were also added through two fixtures, 'inserted_data_sources_found' and 'connection_with_test_data' for necessary test setup and cleanup processes. --- tests/middleware/test_data_source_queries.py | 175 +++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 tests/middleware/test_data_source_queries.py diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py new file mode 100644 index 00000000..c08543bb --- /dev/null +++ b/tests/middleware/test_data_source_queries.py @@ -0,0 +1,175 @@ +import psycopg2 +import pytest + +from app_test_data import DATA_SOURCES_ID_QUERY_RESULTS +from middleware.data_source_queries import ( + get_approved_data_sources, + needs_identification_data_sources, + data_source_by_id_results, + data_sources_query, + DATA_SOURCES_APPROVED_COLUMNS, + data_source_by_id_query, + get_data_sources_for_map, +) +from tests.middleware.helper_functions import ( + insert_test_agencies_and_sources, + has_expected_keys, + get_boolean_dictionary, +) +from tests.middleware.fixtures import dev_db_connection, db_cursor + + +@pytest.fixture +def inserted_data_sources_found(): + """ + A boolean dictionary for identifying if test data sources have been found + :return: boolean dictionary with test data source names as keys, + all values initialized to false + """ + return get_boolean_dictionary(("Source 1", "Source 2", "Source 3")) + + +@pytest.fixture +def connection_with_test_data( + dev_db_connection: psycopg2.extensions.connection, +) -> psycopg2.extensions.connection: + """ + Insert test agencies and sources into test data, rolling back in case of error + :param dev_db_connection: + :return: + """ + try: + insert_test_agencies_and_sources(dev_db_connection.cursor()) + except psycopg2.errors.UniqueViolation: + dev_db_connection.rollback() + return dev_db_connection + + +def test_get_approved_data_sources( + connection_with_test_data: psycopg2.extensions.connection, + inserted_data_sources_found: dict[str, bool], +) -> None: + """ + Test that only one data source -- one set to approved -- is returned by 'get_approved_data_sources + :param connection_with_test_data: + :param inserted_data_sources_found: + :return: + """ + results = get_approved_data_sources(conn=connection_with_test_data) + + for result in results: + name = result[0] + if name in inserted_data_sources_found: + inserted_data_sources_found[name] = True + + assert inserted_data_sources_found["Source 1"] + assert not inserted_data_sources_found["Source 2"] + assert not inserted_data_sources_found["Source 3"] + + +def test_needs_identification( + connection_with_test_data: psycopg2.extensions.connection, + inserted_data_sources_found: dict[str, bool], +) -> None: + """ + Test only source marked as 'Needs Identification' is returned by 'needs_identification_data_sources' + :param connection_with_test_data: + :param inserted_data_sources_found: + :return: + """ + results = needs_identification_data_sources(conn=connection_with_test_data) + for result in results: + name = result[0] + if name in inserted_data_sources_found: + inserted_data_sources_found[name] = True + + assert not inserted_data_sources_found["Source 1"] + assert inserted_data_sources_found["Source 2"] + assert not inserted_data_sources_found["Source 3"] + + +def test_data_source_by_id_results( + connection_with_test_data: psycopg2.extensions.connection, +) -> None: + """ + Test that data_source_by_id properly returns data for an inserted data source + -- and does not return one which was not inserted + :param connection_with_test_data: + :return: + """ + # Insert other data sources as well with different id + result = data_source_by_id_results( + data_source_id="SOURCE_UID_1", conn=connection_with_test_data + ) + assert result + # Check that a data source which was not inserted is not pulled + result = data_source_by_id_results( + data_source_id="SOURCE_UID_4", conn=connection_with_test_data + ) + assert not result + + +def test_data_source_by_id_query( + connection_with_test_data: psycopg2.extensions.connection, +) -> None: + """ + Test that data_source_by_id_query properly returns data for an inserted data source + -- and does not return one which was not inserted + :param connection_with_test_data: + :return: + """ + result = data_source_by_id_query( + data_source_id="SOURCE_UID_1", conn=connection_with_test_data + ) + assert has_expected_keys(result.keys(), DATA_SOURCES_ID_QUERY_RESULTS) + assert result["agency_name"] == "Agency A" + + +def test_data_sources_query( + connection_with_test_data: psycopg2.extensions.connection, + inserted_data_sources_found: dict[str, bool], +) -> None: + """ + Test that data sources query properly returns data for an inserted data source + marked as 'approved', and none others. + :param connection_with_test_data: + :param inserted_data_sources_found: + :return: + """ + results = data_sources_query(connection_with_test_data) + # Check that results include expected keys + assert has_expected_keys(results[0].keys(), DATA_SOURCES_APPROVED_COLUMNS) + for result in results: + name = result["name"] + if name in inserted_data_sources_found: + inserted_data_sources_found[name] = True + + assert inserted_data_sources_found["Source 1"] + assert not inserted_data_sources_found["Source 2"] + assert not inserted_data_sources_found["Source 3"] + + +def test_get_data_sources_for_map( + connection_with_test_data: psycopg2.extensions.connection, + inserted_data_sources_found: dict[str, bool], +) -> None: + """ + Test that get_data_sources_for_map includes only the expected source + with the expected lat/lng coordinates + :param connection_with_test_data: + :param inserted_data_sources_found: + :return: + """ + results = get_data_sources_for_map(conn=connection_with_test_data) + for result in results: + name = result[1] + if name == "Source 1": + lat = result[8] + lng = result[9] + assert lat == 30 and lng == 20 + + if name in inserted_data_sources_found: + inserted_data_sources_found[name] = True + assert inserted_data_sources_found["Source 1"] + assert not inserted_data_sources_found["Source 2"] + assert not inserted_data_sources_found["Source 3"] From 16ebc6507ed81ffbdb6235302e5c872414217644 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 16:57:55 -0400 Subject: [PATCH 011/127] Refactor pytest fixtures Moved the 'connection_with_test_data' fixture from 'test_data_source_queries.py' to 'fixtures.py' to improve organization and reusability.. --- tests/middleware/fixtures.py | 22 ++++++++++++++++++-- tests/middleware/test_data_source_queries.py | 19 +---------------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/tests/middleware/fixtures.py b/tests/middleware/fixtures.py index 28344e87..9402e216 100644 --- a/tests/middleware/fixtures.py +++ b/tests/middleware/fixtures.py @@ -4,8 +4,10 @@ import pytest from dotenv import load_dotenv +from tests.middleware.helper_functions import insert_test_agencies_and_sources -@pytest.fixture() + +@pytest.fixture def dev_db_connection() -> psycopg2.extensions.cursor: """ Sets up connection to development database @@ -32,7 +34,7 @@ def dev_db_connection() -> psycopg2.extensions.cursor: connection.close() -@pytest.fixture() +@pytest.fixture def db_cursor(dev_db_connection: psycopg2.extensions.connection) -> psycopg2.extensions.cursor: """ Create a cursor to execute database operations, with savepoint management. @@ -48,3 +50,19 @@ def db_cursor(dev_db_connection: psycopg2.extensions.connection) -> psycopg2.ext # Rollback to the savepoint to ignore commits within the test cur.execute("ROLLBACK TO SAVEPOINT test_savepoint") cur.close() + + +@pytest.fixture +def connection_with_test_data( + dev_db_connection: psycopg2.extensions.connection, +) -> psycopg2.extensions.connection: + """ + Insert test agencies and sources into test data, rolling back in case of error + :param dev_db_connection: + :return: + """ + try: + insert_test_agencies_and_sources(dev_db_connection.cursor()) + except psycopg2.errors.UniqueViolation: + dev_db_connection.rollback() + return dev_db_connection diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py index c08543bb..4fab8e4b 100644 --- a/tests/middleware/test_data_source_queries.py +++ b/tests/middleware/test_data_source_queries.py @@ -12,11 +12,10 @@ get_data_sources_for_map, ) from tests.middleware.helper_functions import ( - insert_test_agencies_and_sources, has_expected_keys, get_boolean_dictionary, ) -from tests.middleware.fixtures import dev_db_connection, db_cursor +from tests.middleware.fixtures import connection_with_test_data, dev_db_connection @pytest.fixture @@ -29,22 +28,6 @@ def inserted_data_sources_found(): return get_boolean_dictionary(("Source 1", "Source 2", "Source 3")) -@pytest.fixture -def connection_with_test_data( - dev_db_connection: psycopg2.extensions.connection, -) -> psycopg2.extensions.connection: - """ - Insert test agencies and sources into test data, rolling back in case of error - :param dev_db_connection: - :return: - """ - try: - insert_test_agencies_and_sources(dev_db_connection.cursor()) - except psycopg2.errors.UniqueViolation: - dev_db_connection.rollback() - return dev_db_connection - - def test_get_approved_data_sources( connection_with_test_data: psycopg2.extensions.connection, inserted_data_sources_found: dict[str, bool], From 197d80d9b96af9e1a650f4f2f1e17051736da279 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 17:01:31 -0400 Subject: [PATCH 012/127] Added unit tests for archives_queries middleware This commit introduces two new tests for the archives_get_results and archives_get_columns methods in the middleware layer. These tests validate the method returns and interaction with the database, enhancing the overall test coverage. --- tests/middleware/test_archives_queries.py | 49 +++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 tests/middleware/test_archives_queries.py diff --git a/tests/middleware/test_archives_queries.py b/tests/middleware/test_archives_queries.py new file mode 100644 index 00000000..07f3965f --- /dev/null +++ b/tests/middleware/test_archives_queries.py @@ -0,0 +1,49 @@ +import psycopg2 + +from middleware.archives_queries import archives_get_results, archives_get_query, ARCHIVES_GET_COLUMNS +from tests.middleware.helper_functions import insert_test_agencies_and_sources, has_expected_keys +from tests.middleware.fixtures import dev_db_connection, db_cursor, connection_with_test_data + +def test_archives_get_results( + dev_db_connection: psycopg2.extensions.connection, + db_cursor: psycopg2.extensions.cursor +) -> None: + """ + :param dev_db_connection: A connection to the development database. + :param db_cursor: A cursor object for executing database queries. + :return: This method does not return anything. + + This method tests the `archives_get_results` method by inserting a + new record into the `data_sources` table in the development database + and verifying that the number of results returned * by `archives_get_results` + increases by 1. + """ + original_results = archives_get_results(dev_db_connection) + db_cursor.execute( + """ + INSERT INTO data_sources(airtable_uid, source_url, name, update_frequency, url_status) + VALUES (%s, %s, %s, %s, %s) + """, + ( + "fake_uid", + "https://www.fake_source_url.com", + "fake_name", + "Annually", + "unbroken", + ), + ) + new_results = archives_get_results(dev_db_connection) + assert len(new_results) == len(original_results) + 1 + + +def test_archives_get_columns(connection_with_test_data: psycopg2.extensions.connection) -> None: + """ + Test the archives_get_columns method, ensuring it properly returns an inserted source + :param connection_with_test_data: A connection object to the database with test data. + :return: None + """ + results = archives_get_query(conn=connection_with_test_data) + assert has_expected_keys(ARCHIVES_GET_COLUMNS, results[0].keys()) + for result in results: + if result["id"] == "SOURCE_UID_1": + return From 4a0efc8b478130f75e5062a72e9d8eddd0d8d029 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 17:16:18 -0400 Subject: [PATCH 013/127] Added unit tests for login_queries middleware This commit introduces tests for the login_queries middleware, specifically for the login_results, create_session_token, and is_admin methods. These new tests ensure these methods interact properly with the database and improve the test coverage in our middleware layer. --- tests/middleware/test_login_queries.py | 53 ++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 tests/middleware/test_login_queries.py diff --git a/tests/middleware/test_login_queries.py b/tests/middleware/test_login_queries.py new file mode 100644 index 00000000..8c90cbd7 --- /dev/null +++ b/tests/middleware/test_login_queries.py @@ -0,0 +1,53 @@ +from unittest.mock import patch + +import psycopg2 + +from middleware.login_queries import login_results, create_session_token, token_results, is_admin +from tests.middleware.helper_functions import create_test_user +from tests.middleware.fixtures import dev_db_connection, db_cursor + +def test_login_query(db_cursor: psycopg2.extensions.cursor) -> None: + """ + Test the login query by comparing the password digest for a user retrieved from the database + with the password hash of a test user. + + :param db_cursor: The database cursor to execute the query. + :return: None + """ + test_user = create_test_user(db_cursor) + + user_data = login_results(db_cursor, "example@example.com") + + assert user_data["password_digest"] == test_user.password_hash + + +def test_create_session_token_results(db_cursor: psycopg2.extensions.cursor) -> None: + """ + Tests the `create_session_token_results` method properly + creates the expected session token in the database, + associated with the proper user. + + :param db_cursor: The psycopg2 database cursor object. + :return: None + """ + test_user = create_test_user(db_cursor) + with patch("os.getenv", return_value="mysecretkey") as mock_getenv: + token = create_session_token(db_cursor, test_user.id, test_user.email) + new_token = token_results(db_cursor, token) + + assert new_token["email"] == test_user.email + + +def test_is_admin(db_cursor: psycopg2.extensions.cursor) -> None: + """ + Creates and inserts two users, one an admin and the other not + And then checks to see if the `is_admin` properly + identifies both + :param db_cursor: + """ + regular_user = create_test_user(db_cursor) + admin_user = create_test_user( + cursor=db_cursor, email="admin@admin.com", role="admin" + ) + assert is_admin(db_cursor, admin_user.email) + assert not is_admin(db_cursor, regular_user.email) From 171b35b4c2123fe5a0e6c1b977b00abc3ccf69bd Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 17:43:43 -0400 Subject: [PATCH 014/127] Refactor and enhance get_most_recent_quick_search_query_log function Added optional return type and function description for get_most_recent_quick_search_query_log in tests/middleware/helper_functions.py. Also, handling no-result situation by checking if the result is None. These enhancements improve function's readability and robustness. --- tests/middleware/helper_functions.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/tests/middleware/helper_functions.py b/tests/middleware/helper_functions.py index 7c5b0feb..5da5a75c 100644 --- a/tests/middleware/helper_functions.py +++ b/tests/middleware/helper_functions.py @@ -1,5 +1,8 @@ import uuid from collections import namedtuple +from typing import Optional + +import psycopg2.extensions TestTokenInsert = namedtuple("TestTokenInsert", ["id", "email", "token"]) TestUser = namedtuple("TestUser", ["id", "email", "password_hash"]) @@ -103,7 +106,19 @@ def create_test_user( ) -def get_most_recent_quick_search_query_log(cursor, search: str, location: str): +def get_most_recent_quick_search_query_log( + cursor: psycopg2.extensions.cursor, + search: str, + location: str +) -> Optional[QuickSearchQueryLogResult]: + """ + Retrieve the most recent quick search query log for a specific search and location. + + :param cursor: The Cursor object of the database connection. + :param search: The search query string. + :param location: The location string. + :return: A QuickSearchQueryLogResult object containing the result count and updated timestamp. + """ cursor.execute( """ SELECT RESULT_COUNT, UPDATED_AT FROM QUICK_SEARCH_QUERY_LOGS WHERE @@ -112,6 +127,8 @@ def get_most_recent_quick_search_query_log(cursor, search: str, location: str): (search, location), ) result = cursor.fetchone() + if result is None: + return result return QuickSearchQueryLogResult(result_count=result[0], updated_at=result[1]) From 9a3ecf5e022b0e701e5097bc2a94caebd3026e48 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 17:44:04 -0400 Subject: [PATCH 015/127] Add unit tests for quick search query middleware Implemented unit tests for the quick search query methods, specifically `unaltered_search_query` and `quick_search_query`. These tests ensure that search query results and logging function properly and return expected outputs. This includes checks on the length of responses, expected keys, and datetime accuracy in logs. --- tests/middleware/test_quick_search_query.py | 80 +++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 tests/middleware/test_quick_search_query.py diff --git a/tests/middleware/test_quick_search_query.py b/tests/middleware/test_quick_search_query.py new file mode 100644 index 00000000..cb2d93ac --- /dev/null +++ b/tests/middleware/test_quick_search_query.py @@ -0,0 +1,80 @@ +from datetime import datetime + +import psycopg2 +import pytz + +from middleware.quick_search_query import ( + unaltered_search_query, + quick_search_query, + QUICK_SEARCH_COLUMNS, +) +from tests.middleware.helper_functions import ( + insert_test_agencies_and_sources, + has_expected_keys, + get_most_recent_quick_search_query_log, +) +from tests.middleware.fixtures import ( + dev_db_connection, + db_cursor, + connection_with_test_data, +) + + +def test_unaltered_search_query(connection_with_test_data: psycopg2.extensions.connection) -> None: + """ + :param connection_with_test_data: A connection object that is connected to the test database containing the test data. + :return: None + Test the unaltered_search_query method properly returns only one result + """ + response = unaltered_search_query(connection_with_test_data.cursor(), search="Source 1", location="City A") + + assert len(response) == 1 + assert response[0][3] == "Type A" # Record Type + + + +def test_quick_search_query_logging(connection_with_test_data: psycopg2.extensions.connection) -> None: + """ + Tests that quick_search_query properly creates a log of the query + + :param connection_with_test_data: psycopg2.extensions.connection object representing the connection to the test database. + :return: None + """ + # Get datetime of test + test_datetime = datetime.now(pytz.timezone("UTC")) + # Round to the nearest minute + test_datetime = test_datetime.replace(second=0, microsecond=0) + + results = quick_search_query( + search="Source 1", location="City A", conn=connection_with_test_data + ) + + cursor = connection_with_test_data.cursor() + # Test that query inserted into log + result = get_most_recent_quick_search_query_log(cursor, "Source 1", "City A") + assert result.result_count == 1 + # Round both datetimes to the nearest minute and compare, to ensure log was created during this test + result_datetime = result.updated_at.replace(second=0, microsecond=0) + assert result_datetime >= test_datetime + + +def test_quick_search_query_results(connection_with_test_data: psycopg2.extensions.connection) -> None: + """ + Test the `quick_search_query` method returns expected test data + + :param connection_with_test_data: The connection to the test data database. + :return: None + """ + # TODO: Something about the quick_search_query might be mucking up the savepoints. Address once you fix quick_search's logic issues + results = quick_search_query( + search="Source 1", location="City A", conn=connection_with_test_data + ) + # Test that results include expected keys + assert has_expected_keys(results["data"][0].keys(), QUICK_SEARCH_COLUMNS) + assert len(results["data"]) == 1 + assert results["data"][0]["record_type"] == "Type A" + # "Source 3" was listed as pending and shouldn't show up + results = quick_search_query( + search="Source 3", location="City C", conn=connection_with_test_data + ) + assert len(results["data"]) == 0 From 23bee8b69d6145c62c88d3d964b85266731f1fd6 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 17:52:00 -0400 Subject: [PATCH 016/127] Add unit tests for user and reset token query methods Implemented unit tests for user and reset token query methods in the middleware. These tests validate the functionality of creating, checking, and deleting users and reset tokens, ensuring the correct interaction with the database. The tests also validate the return values and the changes made to the database state. --- tests/middleware/test_reset_token_queries.py | 51 ++++++++++++++++++++ tests/middleware/test_user_queries.py | 32 ++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 tests/middleware/test_reset_token_queries.py create mode 100644 tests/middleware/test_user_queries.py diff --git a/tests/middleware/test_reset_token_queries.py b/tests/middleware/test_reset_token_queries.py new file mode 100644 index 00000000..d6bb04c0 --- /dev/null +++ b/tests/middleware/test_reset_token_queries.py @@ -0,0 +1,51 @@ +import uuid + +import psycopg2.extensions + +from middleware.reset_token_queries import check_reset_token, add_reset_token, delete_reset_token +from tests.middleware.helper_functions import create_reset_token, create_test_user, get_reset_tokens_for_email +from tests.middleware.fixtures import dev_db_connection, db_cursor + +def test_check_reset_token(db_cursor: psycopg2.extensions.cursor) -> None: + """ + Checks if a token existing in the database + is properly returned by check_reset_token + :param db_cursor: + :return: + """ + test_token_insert = create_reset_token(db_cursor) + + user_data = check_reset_token(db_cursor, test_token_insert.token) + assert test_token_insert.id == user_data["id"] + + +def test_add_reset_token(db_cursor: psycopg2.extensions.cursor) -> None: + """ + Checks if add_reset_token properly inserts a token + for the given email in the database + """ + user = create_test_user(db_cursor) + token = uuid.uuid4().hex + add_reset_token(db_cursor, user.email, token) + db_cursor.execute( + """ + SELECT id, token FROM RESET_TOKENS where email = %s + """, + (user.email,), + ) + results = db_cursor.fetchall() + assert len(results) == 1 + assert results[0][1] == token + + +def test_delete_reset_token(db_cursor: psycopg2.extensions.cursor) -> None: + """ + Checks if token previously inserted is deleted + by the delete_reset_token method + """ + reset_token_insert = create_reset_token(db_cursor) + results = get_reset_tokens_for_email(db_cursor, reset_token_insert) + assert len(results) == 1 + delete_reset_token(db_cursor, reset_token_insert.email, reset_token_insert.token) + results = get_reset_tokens_for_email(db_cursor, reset_token_insert) + assert len(results) == 0 diff --git a/tests/middleware/test_user_queries.py b/tests/middleware/test_user_queries.py new file mode 100644 index 00000000..7635a44c --- /dev/null +++ b/tests/middleware/test_user_queries.py @@ -0,0 +1,32 @@ +import psycopg2 + +from middleware.user_queries import user_post_results, user_check_email +from tests.middleware.helper_functions import create_test_user +from tests.middleware.fixtures import dev_db_connection, db_cursor + +def test_user_post_query(db_cursor: psycopg2.extensions.cursor) -> None: + """ + Test the `user_post_query` method, ensuring it properly returns the expected results + + :param db_cursor: The database cursor. + :return: None. + """ + user_post_results(db_cursor, "unit_test", "unit_test") + + db_cursor.execute(f"SELECT email FROM users WHERE email = 'unit_test'") + email_check = db_cursor.fetchone()[0] + + assert email_check == "unit_test" + + +def test_user_check_email(db_cursor: psycopg2.extensions.cursor) -> None: + """ + Verify the functionality of the `user_check_email` method. + + :param db_cursor: A `psycopg2.extensions.cursor` object representing the database cursor. + :return: None + + """ + user = create_test_user(db_cursor) + user_data = user_check_email(db_cursor, user.email) + assert user_data["id"] == user.id From e1fff5ac503ebbca8d47b5cd1c9415a1d672eae5 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 17:52:18 -0400 Subject: [PATCH 017/127] Remove redundant tests for middleware functions --- tests/test_middleware.py | 257 --------------------------------------- 1 file changed, 257 deletions(-) delete mode 100644 tests/test_middleware.py diff --git a/tests/test_middleware.py b/tests/test_middleware.py deleted file mode 100644 index e2f866ac..00000000 --- a/tests/test_middleware.py +++ /dev/null @@ -1,257 +0,0 @@ -""" -This module runs pytests on functions interacting directly with the database -- the middleware -""" - -import os -import uuid -from unittest.mock import patch - -import psycopg2 -import pytest -from dotenv import load_dotenv - -from middleware.archives_queries import archives_get_results -from middleware.data_source_queries import ( - get_approved_data_sources, - needs_identification_data_sources, - data_source_by_id_results, -) -from middleware.login_queries import ( - login_results, - create_session_token, - token_results, - is_admin, -) -from middleware.quick_search_query import unaltered_search_query, quick_search_query -from middleware.reset_token_queries import ( - check_reset_token, - add_reset_token, - delete_reset_token, -) -from middleware.user_queries import user_post_results, user_check_email -from tests.helper_test_middleware import ( - get_reset_tokens_for_email, - create_reset_token, - create_test_user, - insert_test_agencies_and_sources, -) - - -@pytest.fixture() -def dev_db_connection(): - """ - Sets up connection to development database - and creates a session that is rolled back after the test completes - to undo any operations performed during the test. - :return: - """ - load_dotenv() - dev_db_connection_string = os.getenv("DEV_DB_CONN_STRING") - connection = psycopg2.connect( - dev_db_connection_string, - keepalives=1, - keepalives_idle=30, - keepalives_interval=10, - keepalives_count=5, - ) - connection.autocommit = False - - yield connection - - # Rollback any changes made during the tests - connection.rollback() - - connection.close() - - -@pytest.fixture() -def db_cursor(dev_db_connection): - """ - Create a cursor to execute database operations, with savepoint management. - This is to ensure that changes made during the test can be rolled back. - """ - cur = dev_db_connection.cursor() - - # Start a savepoint - cur.execute("SAVEPOINT test_savepoint") - - yield cur - - # Rollback to the savepoint to ignore commits within the test - cur.execute("ROLLBACK TO SAVEPOINT test_savepoint") - cur.close() - - -def test_unaltered_search_query(db_cursor): - # TODO: Modify - response = unaltered_search_query(db_cursor, "calls", "chicago") - - assert response - - -def test_data_sources(dev_db_connection): - # TODO: Modify - response = get_approved_data_sources(conn=dev_db_connection) - - assert response - - -def test_needs_identification(dev_db_connection): - # TODO: Modify - response = needs_identification_data_sources(conn=dev_db_connection) - - assert response - - -def test_data_sources_approved(dev_db_connection): - # TODO: Adjust this test to insert approved and unapproved data sources prior. - # Ensure the results returned are only approved. - response = get_approved_data_sources(conn=dev_db_connection) - - assert ( - len([d for d in response if "https://joinstatepolice.ny.gov/15-mile-run" in d]) - == 0 - ) - - -def test_data_source_by_id_results(dev_db_connection): - # TODO: Modify; insert data sources with specific id, ensure those are the ONLY data sources returned - # Insert other data sources as well with different id - response = data_source_by_id_results( - data_source_id="rec00T2YLS2jU7Tbn", conn=dev_db_connection - ) - - assert response - - -def test_user_post_query(db_cursor): - user_post_results(db_cursor, "unit_test", "unit_test") - - db_cursor.execute(f"SELECT email FROM users WHERE email = 'unit_test'") - email_check = db_cursor.fetchone()[0] - - assert email_check == "unit_test" - - -def test_login_query(db_cursor): - test_user = create_test_user(db_cursor) - - user_data = login_results(db_cursor, "example@example.com") - - assert user_data["password_digest"] == test_user.password_hash - - -def test_create_session_token_results(db_cursor): - test_user = create_test_user(db_cursor) - with patch("os.getenv", return_value="mysecretkey") as mock_getenv: - token = create_session_token(db_cursor, test_user.id, test_user.email) - new_token = token_results(db_cursor, token) - - assert new_token["email"] == test_user.email - - -def test_is_admin(db_cursor): - """ - Creates and inserts two users, one an admin and the other not - And then checks to see if the `is_admin` properly - identifies both - :param db_cursor: - """ - regular_user = create_test_user(db_cursor) - admin_user = create_test_user( - cursor=db_cursor, email="admin@admin.com", role="admin" - ) - assert is_admin(db_cursor, admin_user.email) - assert not is_admin(db_cursor, regular_user.email) - - -def test_user_check_email(db_cursor): - user = create_test_user(db_cursor) - user_data = user_check_email(db_cursor, user.email) - assert user_data["id"] == user.id - - -def test_check_reset_token(db_cursor): - """ - Checks if a token existing in the database - is properly returned by check_reset_token - :param db_cursor: - :return: - """ - test_token_insert = create_reset_token(db_cursor) - - user_data = check_reset_token(db_cursor, test_token_insert.token) - assert test_token_insert.id == user_data["id"] - - -def test_add_reset_token(db_cursor): - """ - Checks if add_reset_token properly inserts a token - for the given email in the database - """ - user = create_test_user(db_cursor) - token = uuid.uuid4().hex - add_reset_token(db_cursor, user.email, token) - db_cursor.execute( - """ - SELECT id, token FROM RESET_TOKENS where email = %s - """, - (user.email,), - ) - results = db_cursor.fetchall() - assert len(results) == 1 - assert results[0][1] == token - - -def test_delete_reset_token(db_cursor): - """ - Checks if token previously inserted is deleted - by the delete_reset_token method - """ - reset_token_insert = create_reset_token(db_cursor) - results = get_reset_tokens_for_email(db_cursor, reset_token_insert) - assert len(results) == 1 - delete_reset_token(db_cursor, reset_token_insert.email, reset_token_insert.token) - results = get_reset_tokens_for_email(db_cursor, reset_token_insert) - assert len(results) == 0 - - -def test_archives_get_results(dev_db_connection, db_cursor): - """ - Checks if archives_get_results picks up an added valid datasource - """ - original_results = archives_get_results(dev_db_connection) - db_cursor.execute( - """ - INSERT INTO data_sources(airtable_uid, source_url, name, update_frequency, url_status) - VALUES (%s, %s, %s, %s, %s) - """, - ( - "fake_uid", - "https://www.fake_source_url.com", - "fake_name", - "Annually", - "unbroken", - ), - ) - new_results = archives_get_results(dev_db_connection) - assert len(new_results) == len(original_results) + 1 - - -def test_quicksearch_columns(dev_db_connection): - try: - insert_test_agencies_and_sources(dev_db_connection.cursor()) - except psycopg2.errors.UniqueViolation: - dev_db_connection.rollback() - # TODO: Something about the quick_search_query might be mucking up the savepoints. Address once you fix quick_search's logic issues - results = quick_search_query( - search="Source 1", location="City A", conn=dev_db_connection - ) - # "Source 3" was listed as pending and shouldn't show up - assert len(results['data']) == 1 - results = quick_search_query( - search="Source 3", location="City C", conn=dev_db_connection - ) - assert len(results['data']) == 0 - - # Test that query inserted into log From 6aa21701f8f5282df91815e33e30dd611c3d21ec Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 17:55:41 -0400 Subject: [PATCH 018/127] Remove now-redundant tests --- app_test.py | 197 ---------------------------------------------------- 1 file changed, 197 deletions(-) diff --git a/app_test.py b/app_test.py index def9aead..13ce4840 100644 --- a/app_test.py +++ b/app_test.py @@ -139,194 +139,7 @@ def session(): connection.close() -# unit tests -def test_unaltered_search_query(session): - response = unaltered_search_query(session.cursor(), "calls", "chicago") - assert response - - -def test_data_sources(session): - response = get_approved_data_sources(conn=session) - - assert response - - -def test_needs_identification(session): - response = needs_identification_data_sources(conn=session) - - assert response - - -def test_data_sources_approved(session): - response = get_approved_data_sources(conn=session) - - assert ( - len([d for d in response if "https://joinstatepolice.ny.gov/15-mile-run" in d]) - == 0 - ) - - -def test_data_source_by_id_results(session): - response = data_source_by_id_results( - data_source_id="rec00T2YLS2jU7Tbn", conn=session - ) - - assert response - - -def test_data_source_by_id_approved(session): - response = data_source_by_id_results( - data_source_id="rec013MFNfBnrTpZj", conn=session - ) - - assert not response - - -def test_data_sources(session): - response = get_data_sources_for_map(conn=session) - - assert response - - -def test_user_post_query(session): - curs = session.cursor() - user_post_results(curs, "unit_test", "unit_test") - - email_check = curs.execute( - f"SELECT email FROM users WHERE email = 'unit_test'" - ).fetchone()[0] - - assert email_check == "unit_test" - - -def test_login_query(session): - curs = session.cursor() - user_data = login_results(curs, "test") - - assert user_data["password_digest"] - - -def test_create_session_token_results(session): - curs = session.cursor() - token = create_session_token(curs, 1, "test") - - curs = session.cursor() - new_token = token_results(curs, token) - - assert new_token["email"] - - -def test_is_admin(session): - curs = session.cursor() - admin = is_admin(curs, "mbodenator@gmail.com") - - assert admin - - -def test_not_admin(session): - curs = session.cursor() - admin = is_admin(curs, "test") - - assert not admin - - -def test_user_check_email(session): - curs = session.cursor() - user_data = user_check_email(curs, "test") - print(user_data) - - assert user_data["id"] - - -def test_check_reset_token(session): - curs = session.cursor() - reset_token = check_reset_token(curs, "test") - print(reset_token) - - assert reset_token["id"] - - -def test_add_reset_token(session): - curs = session.cursor() - add_reset_token(curs, "unit_test", "unit_test") - - email_check = curs.execute( - f"SELECT email FROM reset_tokens WHERE email = 'unit_test'" - ).fetchone()[0] - - assert email_check == "unit_test" - - -def test_delete_reset_token(session): - curs = session.cursor() - delete_reset_token(curs, "test", "test") - - email_check = curs.execute( - f"SELECT email FROM reset_tokens WHERE email = 'test'" - ).fetchone() - - assert not email_check - - -def test_archives_get_results(session): - response = archives_get_results(conn=session) - - assert response - - -def test_archives_put_broken_as_of(session): - archives_put_broken_as_of_results( - id="rec00T2YLS2jU7Tbn", - broken_as_of=DATETIME_STRING, - last_cached=DATETIME_STRING, - conn=session, - ) - curs = session.cursor() - broken_check, last_check = curs.execute( - f"SELECT broken_source_url_as_of, last_cached FROM data_sources WHERE airtable_uid = 'rec00T2YLS2jU7Tbn'" - ).fetchone() - - assert broken_check == DATETIME_STRING - assert last_check == DATETIME_STRING - - -def test_archives_put_last_cached(session): - archives_put_last_cached_results( - id="recUGIoPQbJ6laBmr", last_cached=DATETIME_STRING, conn=session - ) - curs = session.cursor() - last_check = curs.execute( - f"SELECT last_cached FROM data_sources WHERE airtable_uid = 'recUGIoPQbJ6laBmr'" - ).fetchone()[0] - - assert last_check == DATETIME_STRING - - -# quick-search -def test_quicksearch_columns(): - response = quick_search_query( - search="", location="", test_query_results=QUICK_SEARCH_QUERY_RESULTS - ) - - assert not set(QUICK_SEARCH_COLUMNS).difference(response["data"][0].keys()) - assert type(response["data"][1]["record_format"]) == list - - -# data-sources -def test_data_sources_columns(): - response = data_sources_query(conn={}, test_query_results=DATA_SOURCE_QUERY_RESULTS) - - assert not set(DATA_SOURCES_APPROVED_COLUMNS).difference(response[0].keys()) - - -def test_data_source_by_id_columns(): - response = data_source_by_id_query("", DATA_SOURCES_ID_QUERY_RESULTS, {}) - - assert not set(DATA_SOURCES_APPROVED_COLUMNS).difference(response.keys()) - - -# user # def test_post_user(client): @@ -341,16 +154,6 @@ def test_data_source_by_id_columns(): # assert response.json["data"] == "Successfully added user" - -# archives -def test_archives_get_columns(): - response = archives_get_query( - test_query_results=ARCHIVES_GET_QUERY_RESULTS, conn={} - ) - - assert not set(ARCHIVES_GET_COLUMNS).difference(response[0].keys()) - - # def test_put_archives(client): # current_datetime = datetime.datetime.now() # datetime_string = current_datetime.strftime("%Y-%m-%d %H:%M:%S") From 41deff051dd2eba3cd31e2b97425e286685944e7 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 17:55:57 -0400 Subject: [PATCH 019/127] Update README with revised testing instructions The README has been updated to reflect changes in the project's testing structure. The new instructions include details on where unit and integration tests for the API are located, and the need to set up an environment variable for tests requiring database access. The instructions on how to run the tests, using the `pytest` command, and their interaction with CI/CD pipelines, remain unchanged. --- README.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cc0837c7..0a3fe406 100644 --- a/README.md +++ b/README.md @@ -102,9 +102,18 @@ npm run dev ## Testing -All unit tests for the API live in the app_test.py file. It is best practice to add tests for any new feature to ensure it is working as expected and that any future code changes do not affect its functionality. All tests will be automatically run when a PR into dev is opened in order to ensure any changes do not break current app functionality. If a test fails, it is a sign that the new code should be checked or possibly that the test needs to be updated. Tests are currently run with pytest and can be run locally with the `pytest` command. +### Location +All unit and integration tests for the API live in the `tests` folder -Endpoints are structured for simplified testing and debugging. Code for interacting with the database is contained in a function suffixed with "_results" and tested against a local sqlite database instance. Limited rows (stored in the DATA_SOURCES_ROWS and AGENCIES_ROWS variables in app_test_data.py) are inserted into this local instance on setup, you may need to add additional rows to test other functionality fully. +It is best practice to add tests for any new feature to ensure it is working as expected and that any future code changes do not affect its functionality. All tests will be automatically run when a PR into dev is opened in order to ensure any changes do not break current app functionality. If a test fails, it is a sign that the new code should be checked or possibly that the test needs to be updated. + + +### How to run tests +Some tests involve interfacing with the development database, which copies the production database's data and schema daily. + +To ensure such tests properly connect to the database, create or amend an `.env` file in the root direct of the project with the environment variable `DEV_DB_CONN_STRING`. Provide as a value a connection string giving you access to the `data_sources_app` user. If you do not have this connection string, DM a database administrator. + +Tests are currently run with pytest and can be run locally with the `pytest` command. Remaining API code is stored in functions suffixed with "_query" tested against static query results stored in app_test_data.py. Tests for hitting the endpoint directly should be included in regular_api_checks.py, makes sure to add the test function name in the list at the bottom so it is included in the Github actions run every 15 minutes. From e3e1e250c09126a396d9ad1d2c9cb358a9589ea4 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 18:04:09 -0400 Subject: [PATCH 020/127] Rename Python test workflow and add configuration The Python test workflow file has been renamed from python_tests.yml to test_api.yml and fully configured to run tests using GitHub Actions. It's been set up to specifically test the project's API on pull requests with environment variables for secrets and the pytest command to run the tests. It also upgrades pip and downloads dependencies. --- .github/workflows/python_tests.yml | 7 ------- .github/workflows/test_api.yml | 24 ++++++++++++++++++++++++ 2 files changed, 24 insertions(+), 7 deletions(-) delete mode 100644 .github/workflows/python_tests.yml create mode 100644 .github/workflows/test_api.yml diff --git a/.github/workflows/python_tests.yml b/.github/workflows/python_tests.yml deleted file mode 100644 index 5979688e..00000000 --- a/.github/workflows/python_tests.yml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - -# TODO: Include command to `python -m spacy download en_core_web_sm` \ No newline at end of file diff --git a/.github/workflows/test_api.yml b/.github/workflows/test_api.yml new file mode 100644 index 00000000..3da6d8d8 --- /dev/null +++ b/.github/workflows/test_api.yml @@ -0,0 +1,24 @@ +name: Test API using Pytest + +on: + pull_request: + +jobs: + test_api: + env: + SECRET_KEY: ${{ secrets.SECRET_KEY }} + DEV_DB_CONN_STRING: ${{secrets.DEV_DB_CONN_STRING}} + name: Test API + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + python -m spacy download en_core_web_sm + pip install pytest pytest-cov + pytest tests --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html From b66aab9103c8e17bf1d154e065ad631804539b8b Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 18:19:13 -0400 Subject: [PATCH 021/127] Update test data source imports in middleware tests The test imports in test_data_source_queries.py have been updated. This revision includes modifying the import path for app_test_data and removing a redundant import from middleware.fixtures. --- tests/middleware/test_data_source_queries.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py index 4fab8e4b..5d2b0849 100644 --- a/tests/middleware/test_data_source_queries.py +++ b/tests/middleware/test_data_source_queries.py @@ -1,7 +1,7 @@ import psycopg2 import pytest -from app_test_data import DATA_SOURCES_ID_QUERY_RESULTS +from tests.resources.app_test_data import DATA_SOURCES_ID_QUERY_RESULTS from middleware.data_source_queries import ( get_approved_data_sources, needs_identification_data_sources, @@ -15,7 +15,7 @@ has_expected_keys, get_boolean_dictionary, ) -from tests.middleware.fixtures import connection_with_test_data, dev_db_connection +from tests.middleware.fixtures import connection_with_test_data @pytest.fixture From f057548b337b1b6ba1cf36708bd141654f21d445 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 6 May 2024 18:19:24 -0400 Subject: [PATCH 022/127] Refactor test files and clean up imports The test files have been relocated to tests/resources directory and their respective import paths have been updated accordingly. Additionally, unused imports have been removed from the middleware tests, thus enhancing code readability and maintenance. --- tests/resources/__init__.py | 0 app_test.py => tests/resources/app_test.py | 50 ++----------------- .../resources/app_test_data.py | 0 .../resources/do_db_ddl_clean.sql | 0 4 files changed, 3 insertions(+), 47 deletions(-) create mode 100644 tests/resources/__init__.py rename app_test.py => tests/resources/app_test.py (82%) rename app_test_data.py => tests/resources/app_test_data.py (100%) rename do_db_ddl_clean.sql => tests/resources/do_db_ddl_clean.sql (100%) diff --git a/tests/resources/__init__.py b/tests/resources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app_test.py b/tests/resources/app_test.py similarity index 82% rename from app_test.py rename to tests/resources/app_test.py index 13ce4840..3ff9374e 100644 --- a/app_test.py +++ b/tests/resources/app_test.py @@ -1,58 +1,14 @@ -import pytest + import os from app import create_app -from flask_restful import Api -from middleware.quick_search_query import ( - unaltered_search_query, - quick_search_query, - QUICK_SEARCH_COLUMNS, -) -from middleware.data_source_queries import ( - data_sources_query, - needs_identification_data_sources, - data_source_by_id_query, - data_source_by_id_results, - DATA_SOURCES_APPROVED_COLUMNS, - get_approved_data_sources, - get_data_sources_for_map, -) -from middleware.user_queries import ( - user_post_results, - user_check_email, -) -from middleware.login_queries import ( - login_results, - create_session_token, - token_results, - is_admin, -) -from middleware.archives_queries import ( - archives_get_results, - archives_get_query, - archives_put_broken_as_of_results, - archives_put_last_cached_results, - ARCHIVES_GET_COLUMNS, -) -from middleware.reset_token_queries import ( - check_reset_token, - add_reset_token, - delete_reset_token, -) -from app_test_data import ( +from tests.resources.app_test_data import ( DATA_SOURCES_ROWS, - DATA_SOURCE_QUERY_RESULTS, - QUICK_SEARCH_QUERY_RESULTS, AGENCIES_ROWS, - DATA_SOURCES_ID_QUERY_RESULTS, - ARCHIVES_GET_QUERY_RESULTS, + ) import datetime import sqlite3 import pytest -from resources.ApiKey import ( - ApiKey, -) # Adjust the import according to your project structure -from werkzeug.security import check_password_hash from unittest.mock import patch, MagicMock api_key = os.getenv("VUE_APP_PDAP_API_KEY") diff --git a/app_test_data.py b/tests/resources/app_test_data.py similarity index 100% rename from app_test_data.py rename to tests/resources/app_test_data.py diff --git a/do_db_ddl_clean.sql b/tests/resources/do_db_ddl_clean.sql similarity index 100% rename from do_db_ddl_clean.sql rename to tests/resources/do_db_ddl_clean.sql From 9767d9ecbdafa8c6678e9fbc365a7a5ba7c5ea89 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 8 May 2024 12:44:37 -0400 Subject: [PATCH 023/127] Improve code readability and ignore max line exceed warning Optimized code for better readability through improving docstrings and line break formatting. Added flake8 argument to ignore E501 (line too long) to avoid warnings related to line exceeding maximum length. --- .github/workflows/python_checks.yml | 1 + tests/middleware/fixtures.py | 17 +++++- tests/middleware/helper_functions.py | 77 +++++++++++++++++++--------- 3 files changed, 70 insertions(+), 25 deletions(-) diff --git a/.github/workflows/python_checks.yml b/.github/workflows/python_checks.yml index 4afec55f..02bd451b 100644 --- a/.github/workflows/python_checks.yml +++ b/.github/workflows/python_checks.yml @@ -18,4 +18,5 @@ jobs: uses: reviewdog/action-flake8@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} + flake8_args: --ignore E501 # Does not check for max line exceed level: warning \ No newline at end of file diff --git a/tests/middleware/fixtures.py b/tests/middleware/fixtures.py index 9402e216..da2b129a 100644 --- a/tests/middleware/fixtures.py +++ b/tests/middleware/fixtures.py @@ -1,3 +1,7 @@ +""" +This module contains pytest fixtures employed by middleware tests +""" + import os import psycopg2 @@ -10,6 +14,8 @@ @pytest.fixture def dev_db_connection() -> psycopg2.extensions.cursor: """ + Create reversible connection to dev database. + Sets up connection to development database and creates a session that is rolled back after the test completes to undo any operations performed during the test. @@ -35,8 +41,12 @@ def dev_db_connection() -> psycopg2.extensions.cursor: @pytest.fixture -def db_cursor(dev_db_connection: psycopg2.extensions.connection) -> psycopg2.extensions.cursor: +def db_cursor( + dev_db_connection: psycopg2.extensions.connection, +) -> psycopg2.extensions.cursor: """ + Create cursor for reversible database operations. + Create a cursor to execute database operations, with savepoint management. This is to ensure that changes made during the test can be rolled back. """ @@ -57,7 +67,10 @@ def connection_with_test_data( dev_db_connection: psycopg2.extensions.connection, ) -> psycopg2.extensions.connection: """ - Insert test agencies and sources into test data, rolling back in case of error + Insert test agencies and sources into test data. + + Will roll back in case of error. + :param dev_db_connection: :return: """ diff --git a/tests/middleware/helper_functions.py b/tests/middleware/helper_functions.py index 5da5a75c..f3e8c530 100644 --- a/tests/middleware/helper_functions.py +++ b/tests/middleware/helper_functions.py @@ -1,3 +1,7 @@ +""" +This module contains helper functions used by middleware pytests. +""" + import uuid from collections import namedtuple from typing import Optional @@ -8,8 +12,9 @@ TestUser = namedtuple("TestUser", ["id", "email", "password_hash"]) -def insert_test_agencies_and_sources(cursor): +def insert_test_agencies_and_sources(cursor: psycopg2.extensions.cursor) -> None: """ + Insert test agencies and sources into database. :param cursor: :return: @@ -28,17 +33,26 @@ def insert_test_agencies_and_sources(cursor): URL_STATUS ) VALUES - ('SOURCE_UID_1','Source 1','Description of src1','Type A','http://src1.com','approved','available'), - ('SOURCE_UID_2','Source 2','Description of src2','Type B','http://src2.com','needs identification','available'), - ('SOURCE_UID_3','Source 3', 'Description of src3', 'Type C', 'http://src3.com', 'pending', 'available'); - - INSERT INTO public.agencies (airtable_uid, name, municipality, state_iso, county_name, count_data_sources, lat, lng) + ('SOURCE_UID_1','Source 1','Description of src1', + 'Type A','http://src1.com','approved','available'), + ('SOURCE_UID_2','Source 2','Description of src2', + 'Type B','http://src2.com','needs identification','available'), + ('SOURCE_UID_3','Source 3', 'Description of src3', + 'Type C', 'http://src3.com', 'pending', 'available'); + + INSERT INTO public.agencies + (airtable_uid, name, municipality, state_iso, + county_name, count_data_sources, lat, lng) VALUES - ('Agency_UID_1', 'Agency A', 'City A', 'CA', 'County X', 3, 30, 20), - ('Agency_UID_2', 'Agency B', 'City B', 'NY', 'County Y', 2, 40, 50), - ('Agency_UID_3', 'Agency C', 'City C', 'TX', 'County Z', 1, 90, 60); - - INSERT INTO public.agency_source_link (airtable_uid, agency_described_linked_uid) + ('Agency_UID_1', 'Agency A', 'City A', + 'CA', 'County X', 3, 30, 20), + ('Agency_UID_2', 'Agency B', 'City B', + 'NY', 'County Y', 2, 40, 50), + ('Agency_UID_3', 'Agency C', 'City C', + 'TX', 'County Z', 1, 90, 60); + + INSERT INTO public.agency_source_link + (airtable_uid, agency_described_linked_uid) VALUES ('SOURCE_UID_1', 'Agency_UID_1'), ('SOURCE_UID_2', 'Agency_UID_2'), @@ -47,7 +61,16 @@ def insert_test_agencies_and_sources(cursor): ) -def get_reset_tokens_for_email(db_cursor, reset_token_insert): +def get_reset_tokens_for_email( + db_cursor: psycopg2.extensions.cursor, reset_token_insert: TestTokenInsert +) -> tuple: + """ + Get all reset tokens associated with an email. + + :param db_cursor: + :param reset_token_insert: + :return: + """ db_cursor.execute( """ SELECT email from RESET_TOKENS where email = %s @@ -58,12 +81,18 @@ def get_reset_tokens_for_email(db_cursor, reset_token_insert): return results -def create_reset_token(cursor) -> TestTokenInsert: +def create_reset_token(cursor: psycopg2.extensions.cursor) -> TestTokenInsert: + """ + Create a test user and associated reset token. + + :param cursor: + :return: + """ user = create_test_user(cursor) token = uuid.uuid4().hex cursor.execute( """ - INSERT INTO reset_tokens(email, token) + INSERT INTO reset_tokens(email, token) VALUES (%s, %s) RETURNING id """, @@ -81,7 +110,8 @@ def create_test_user( role=None, ) -> TestUser: """ - Creates test user and returns the id of the test user + Create test user and return the id of the test user. + :param cursor: :return: user id """ @@ -107,17 +137,16 @@ def create_test_user( def get_most_recent_quick_search_query_log( - cursor: psycopg2.extensions.cursor, - search: str, - location: str + cursor: psycopg2.extensions.cursor, search: str, location: str ) -> Optional[QuickSearchQueryLogResult]: """ - Retrieve the most recent quick search query log for a specific search and location. + Retrieve most recent quick search query log for a search and location. :param cursor: The Cursor object of the database connection. :param search: The search query string. :param location: The location string. - :return: A QuickSearchQueryLogResult object containing the result count and updated timestamp. + :return: A QuickSearchQueryLogResult object + containing the result count and updated timestamp. """ cursor.execute( """ @@ -134,7 +163,8 @@ def get_most_recent_quick_search_query_log( def has_expected_keys(result_keys: list, expected_keys: list) -> bool: """ - Check that given result includes expected keys + Check that given result includes expected keys. + :param result: :param expected_keys: :return: True if has expected keys, false otherwise @@ -144,11 +174,12 @@ def has_expected_keys(result_keys: list, expected_keys: list) -> bool: def get_boolean_dictionary(keys: tuple) -> dict: """ - Creates dictionary of booleans, all set to false + Creates dictionary of booleans, all set to false. + :param keys: :return: dictionary of booleans """ d = {} for key in keys: d[key] = False - return d \ No newline at end of file + return d From 66e13d38b0f532c41f97be047ea0602a1499976a Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 8 May 2024 12:52:01 -0400 Subject: [PATCH 024/127] Update flake8 args and refine code formatting Expanded the ignored warnings in flake8 configuration to include trailing whitespaces, increasing flexibility in code style. Simultaneously, improved clarity and readability of test modules by refining docstrings and line alignment. --- .github/workflows/python_checks.yml | 2 +- tests/middleware/fixtures.py | 4 +--- tests/middleware/helper_functions.py | 21 +++++++++------------ 3 files changed, 11 insertions(+), 16 deletions(-) diff --git a/.github/workflows/python_checks.yml b/.github/workflows/python_checks.yml index 02bd451b..773a0072 100644 --- a/.github/workflows/python_checks.yml +++ b/.github/workflows/python_checks.yml @@ -18,5 +18,5 @@ jobs: uses: reviewdog/action-flake8@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} - flake8_args: --ignore E501 # Does not check for max line exceed + flake8_args: --ignore E501, W291 # Does not check for max line exceed or trailing whitespace level: warning \ No newline at end of file diff --git a/tests/middleware/fixtures.py b/tests/middleware/fixtures.py index da2b129a..a9f79fc4 100644 --- a/tests/middleware/fixtures.py +++ b/tests/middleware/fixtures.py @@ -1,6 +1,4 @@ -""" -This module contains pytest fixtures employed by middleware tests -""" +"""This module contains pytest fixtures employed by middleware tests.""" import os diff --git a/tests/middleware/helper_functions.py b/tests/middleware/helper_functions.py index f3e8c530..e1990466 100644 --- a/tests/middleware/helper_functions.py +++ b/tests/middleware/helper_functions.py @@ -1,6 +1,4 @@ -""" -This module contains helper functions used by middleware pytests. -""" +"""This module contains helper functions used by middleware pytests.""" import uuid from collections import namedtuple @@ -19,7 +17,6 @@ def insert_test_agencies_and_sources(cursor: psycopg2.extensions.cursor) -> None :param cursor: :return: """ - cursor.execute( """ INSERT INTO @@ -37,23 +34,23 @@ def insert_test_agencies_and_sources(cursor: psycopg2.extensions.cursor) -> None 'Type A','http://src1.com','approved','available'), ('SOURCE_UID_2','Source 2','Description of src2', 'Type B','http://src2.com','needs identification','available'), - ('SOURCE_UID_3','Source 3', 'Description of src3', + ('SOURCE_UID_3','Source 3', 'Description of src3', 'Type C', 'http://src3.com', 'pending', 'available'); - INSERT INTO public.agencies - (airtable_uid, name, municipality, state_iso, + INSERT INTO public.agencies + (airtable_uid, name, municipality, state_iso, county_name, count_data_sources, lat, lng) VALUES - ('Agency_UID_1', 'Agency A', 'City A', + ('Agency_UID_1', 'Agency A', 'City A', 'CA', 'County X', 3, 30, 20), - ('Agency_UID_2', 'Agency B', 'City B', + ('Agency_UID_2', 'Agency B', 'City B', 'NY', 'County Y', 2, 40, 50), - ('Agency_UID_3', 'Agency C', 'City C', + ('Agency_UID_3', 'Agency C', 'City C', 'TX', 'County Z', 1, 90, 60); - INSERT INTO public.agency_source_link + INSERT INTO public.agency_source_link (airtable_uid, agency_described_linked_uid) - VALUES + VALUES ('SOURCE_UID_1', 'Agency_UID_1'), ('SOURCE_UID_2', 'Agency_UID_2'), ('SOURCE_UID_3', 'Agency_UID_3'); From a83bfc3466d2d31b5bb0bc4c152c07aac15593be Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 8 May 2024 12:54:28 -0400 Subject: [PATCH 025/127] Update flake8 args and refine code formatting Expanded the ignored warnings in flake8 configuration to include trailing whitespaces, increasing flexibility in code style. Simultaneously, improved clarity and readability of test modules by refining docstrings and line alignment. --- .github/workflows/python_checks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python_checks.yml b/.github/workflows/python_checks.yml index 773a0072..5062258a 100644 --- a/.github/workflows/python_checks.yml +++ b/.github/workflows/python_checks.yml @@ -18,5 +18,5 @@ jobs: uses: reviewdog/action-flake8@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} - flake8_args: --ignore E501, W291 # Does not check for max line exceed or trailing whitespace + flake8_args: --ignore E501,W291 # Does not check for max line exceed or trailing whitespace level: warning \ No newline at end of file From 68204124eea3c23b50095b9d6e89bb5419973932 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 11 May 2024 18:57:03 -0400 Subject: [PATCH 026/127] Added custom exception for psycopg2 connection initialization A custom exception, DatabaseInitializationError, has been added for scenarios where psycopg2 connection initialization fails. The exception handling in the `initialize_psycopg2_connection` function and usage in other places has been adapted accordingly. Furthermore, skeleton tests for successful and unsuccessful psycopg2 connection initialization have been added. --- middleware/initialize_psycopg2_connection.py | 19 +++++++++++++------ resources/QuickSearch.py | 7 +++++-- .../test_initialize_psycopg2_connection.py | 17 +++++++++++++++++ 3 files changed, 35 insertions(+), 8 deletions(-) create mode 100644 tests/middleware/test_initialize_psycopg2_connection.py diff --git a/middleware/initialize_psycopg2_connection.py b/middleware/initialize_psycopg2_connection.py index 6b6e1966..f94c7e81 100644 --- a/middleware/initialize_psycopg2_connection.py +++ b/middleware/initialize_psycopg2_connection.py @@ -4,8 +4,18 @@ from typing import Union, Dict, List +class DatabaseInitializationError(Exception): + """ + Custom Exception to be raised when psycopg2 connection initialization fails. + """ + + def __init__(self, message="Failed to initialize psycopg2 connection."): + self.message = message + super().__init__(self.message) + + def initialize_psycopg2_connection() -> ( - Union[PgConnection, Dict[str, Union[int, List]]] + PgConnection ): """ Initializes a connection to a PostgreSQL database using psycopg2 with connection parameters @@ -27,8 +37,5 @@ def initialize_psycopg2_connection() -> ( keepalives_count=5, ) - except: - print("Error while initializing the DigitalOcean client with psycopg2.") - data_sources = {"count": 0, "data": []} - - return data_sources + except psycopg2.OperationalError as e: + raise DatabaseInitializationError(e) from e diff --git a/resources/QuickSearch.py b/resources/QuickSearch.py index ee85f0ac..69e600f6 100644 --- a/resources/QuickSearch.py +++ b/resources/QuickSearch.py @@ -3,7 +3,10 @@ import requests import json import os -from middleware.initialize_psycopg2_connection import initialize_psycopg2_connection +from middleware.initialize_psycopg2_connection import ( + initialize_psycopg2_connection, + DatabaseInitializationError, +) from flask import request from typing import Dict, Any @@ -60,7 +63,7 @@ def get(self, search: str, location: str) -> Dict[str, Any]: "data": data_sources, } - except Exception as e: + except DatabaseInitializationError as e: self.psycopg2_connection.rollback() print(str(e)) webhook_url = os.getenv("WEBHOOK_URL") diff --git a/tests/middleware/test_initialize_psycopg2_connection.py b/tests/middleware/test_initialize_psycopg2_connection.py new file mode 100644 index 00000000..53a0fa95 --- /dev/null +++ b/tests/middleware/test_initialize_psycopg2_connection.py @@ -0,0 +1,17 @@ + +def test_initialize_psycopg2_connection_success(): + """ + Test that function properly initializes psycopg2 connection + and returns valid connection string, + to be tested by executing a simple select query + :return: + """ + pass + +def test_initialize_psycopg2_connection_failure(): + """ + Check that function raises DatabaseInitializationError if + psycopg2.OperationalError occurs. + :return: + """ + pass \ No newline at end of file From 12d00c53d14fd2e929e57e9488d52531a05eb221 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 11 May 2024 19:17:03 -0400 Subject: [PATCH 027/127] Handle UserNotFoundError in middleware and implement custom exception Updated the middleware to handle the UserNotFoundError and implemented the custom exception for when a user is not found in the login_queries script. Also refined the return type of is_admin function to only boolean, managing the missing user situation with the new custom exception. --- middleware/login_queries.py | 18 ++++++++++++++---- middleware/security.py | 7 +++++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/middleware/login_queries.py b/middleware/login_queries.py index 77c41d57..31f35660 100644 --- a/middleware/login_queries.py +++ b/middleware/login_queries.py @@ -5,6 +5,16 @@ from psycopg2.extensions import cursor as PgCursor +class UserNotFoundError(Exception): + """Exception raised for errors in the input.""" + + def __init__(self, email, message=""): + if message == "": + message = f"User with email {email} not found" + self.email = email + self.message = message.format(email=self.email) + super().__init__(self.message) + def login_results(cursor: PgCursor, email: str) -> Dict[str, Union[int, str]]: """ Retrieves user data by email. @@ -28,7 +38,7 @@ def login_results(cursor: PgCursor, email: str) -> Dict[str, Union[int, str]]: return {"error": "no match"} -def is_admin(cursor: PgCursor, email: str) -> Union[bool, Dict[str, str]]: +def is_admin(cursor: PgCursor, email: str) -> bool: """ Checks if a user has an admin role. @@ -38,14 +48,14 @@ def is_admin(cursor: PgCursor, email: str) -> Union[bool, Dict[str, str]]: """ cursor.execute(f"select role from users where email = '{email}'") results = cursor.fetchall() - if len(results) > 0: + try: role = results[0][0] if role == "admin": return True return False - else: - return {"error": "no match"} + except IndexError: + raise UserNotFoundError(email) def create_session_token(cursor: PgCursor, user_id: int, email: str) -> str: diff --git a/middleware/security.py b/middleware/security.py index 3aa0e430..0cebc73b 100644 --- a/middleware/security.py +++ b/middleware/security.py @@ -3,7 +3,7 @@ from flask import request, jsonify from middleware.initialize_psycopg2_connection import initialize_psycopg2_connection from datetime import datetime as dt -from middleware.login_queries import is_admin +from middleware.login_queries import is_admin, UserNotFoundError import os from typing import Tuple from flask.wrappers import Response @@ -93,7 +93,10 @@ def decorator(*args, **kwargs): "message": "Please provide an 'Authorization' key in the request header" }, 400 # Check if API key is correct and valid - valid, expired = is_valid(api_key, request.endpoint, request.method) + try: + valid, expired = is_valid(api_key, request.endpoint, request.method) + except UserNotFoundError as e: + return {"message": str(e)}, 401 if valid: return func(*args, **kwargs) else: From c1268102630a587c2cd4e2bdf4c27c8db9380108 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 11 May 2024 20:02:58 -0400 Subject: [PATCH 028/127] Refactor query methods for data source handling The code has been refactored to individual query methods for clear data handling and improve readability. The 'data_sources_query' method in 'DataSources.py' which was previously handling all query types is replaced with specific, self-explanatory query methods such as 'get_approved_data_sources', 'needs_identification_data_sources' and 'get_data_sources_for_map'. This also leads to the introduction of 'convert_data_source_matches' in 'data_source_queries.py' file for better format conversion. --- middleware/data_source_queries.py | 63 ++++++++++--------------------- resources/DataSources.py | 19 +++++----- 2 files changed, 29 insertions(+), 53 deletions(-) diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index 1c411c8f..bfd0327c 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -46,6 +46,8 @@ "last_cached", ] +DATA_SOURCES_OUTPUT_COLUMNS = DATA_SOURCES_APPROVED_COLUMNS + ["agency_name"] + AGENCY_APPROVED_COLUMNS = [ "homepage_url", "count_data_sources", @@ -72,6 +74,19 @@ "defunct_year", ] +DATA_SOURCES_MAP_COLUMN = [ + "data_source_id", + "name", + "agency_id", + "agency_name", + "state_iso", + "municipality", + "county_name", + "record_type", + "lat", + "lng", + ] + def data_source_by_id_results( conn: PgConnection, data_source_id: str @@ -189,7 +204,7 @@ def get_approved_data_sources(conn: PgConnection) -> list[tuple[Any, ...]]: results = cursor.fetchall() cursor.close() - return results + return convert_data_source_matches(DATA_SOURCES_OUTPUT_COLUMNS, results) def needs_identification_data_sources(conn) -> list: @@ -213,7 +228,7 @@ def needs_identification_data_sources(conn) -> list: results = cursor.fetchall() cursor.close() - return results + return convert_data_source_matches(DATA_SOURCES_OUTPUT_COLUMNS, results) def get_data_sources_for_map(conn) -> list: @@ -246,55 +261,15 @@ def get_data_sources_for_map(conn) -> list: results = cursor.fetchall() cursor.close() - return results + return convert_data_source_matches(DATA_SOURCES_MAP_COLUMN, results) -def data_sources_query( - conn: Optional[PgConnection] = None, - test_query_results: Optional[List[Dict[str, Any]]] = None, - approval_status: str = "approved", - for_map: bool = False, -) -> List[Dict[str, Any]]: - """ - Processes and formats a list of approved data sources, with an option to use test query results. - - :param approval_status: The approval status of the data sources to query. - :param conn: Optional psycopg2 connection object to a PostgreSQL database. - :param test_query_results: Optional list of test query results to use instead of querying the database. - :return: A list of dictionaries, each formatted with details of a data source and its associated agency. - """ - if for_map: - results = get_data_sources_for_map(conn) - elif conn and approval_status == "approved": - results = get_approved_data_sources(conn) - elif conn and not for_map: - results = needs_identification_data_sources(conn) - else: - results = test_query_results - - if not for_map: - data_source_output_columns = DATA_SOURCES_APPROVED_COLUMNS + ["agency_name"] - else: - data_source_output_columns = [ - "data_source_id", - "name", - "agency_id", - "agency_name", - "state_iso", - "municipality", - "county_name", - "record_type", - "lat", - "lng", - ] - +def convert_data_source_matches(data_source_output_columns, results): data_source_matches = [ dict(zip(data_source_output_columns, result)) for result in results ] data_source_matches_converted = [] - for data_source_match in data_source_matches: data_source_match = convert_dates_to_strings(data_source_match) data_source_matches_converted.append(format_arrays(data_source_match)) - return data_source_matches_converted diff --git a/resources/DataSources.py b/resources/DataSources.py index 50fdb48e..d915e39b 100644 --- a/resources/DataSources.py +++ b/resources/DataSources.py @@ -1,6 +1,11 @@ from flask import request from middleware.security import api_required -from middleware.data_source_queries import data_source_by_id_query, data_sources_query +from middleware.data_source_queries import ( + data_source_by_id_query, + get_data_sources_for_map, + get_approved_data_sources, + needs_identification_data_sources, +) from datetime import datetime import uuid @@ -108,9 +113,7 @@ def get(self) -> Dict[str, Any]: - A dictionary containing the count of data sources and their details. """ try: - data_source_matches = data_sources_query( - self.psycopg2_connection, [], "approved" - ) + data_source_matches = get_approved_data_sources(self.psycopg2_connection) data_sources = { "count": len(data_source_matches), @@ -180,8 +183,8 @@ class DataSourcesNeedsIdentification(PsycopgResource): @api_required def get(self): try: - data_source_matches = data_sources_query( - self.psycopg2_connection, [], "needs_identification" + data_source_matches = needs_identification_data_sources( + self.psycopg2_connection ) data_sources = { @@ -212,9 +215,7 @@ def get(self) -> Dict[str, Any]: - A dictionary containing the count of data sources and their details. """ try: - data_source_matches = data_sources_query( - self.psycopg2_connection, [], "approved", True - ) + data_source_matches = get_data_sources_for_map(self.psycopg2_connection) data_sources = { "count": len(data_source_matches), From 38070f0ed6b2e264c9f16a1048920919e38ef795 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 11 May 2024 20:15:53 -0400 Subject: [PATCH 029/127] Remove archives_put_query function and refactor related code The archives_put_query method has been removed from the archives_queries.py and its functionality has been split into direct calls to the appropriate methods, archives_put_broken_as_of_results or archives_put_last_cached_results. This change simplifies the code and makes it more legible by clearly specifying the function being used in each situation. The refactoring also involves adjustments in the Archives.py resource to accommodate these changes. --- middleware/archives_queries.py | 21 --------------------- resources/Archives.py | 28 ++++++++++++++++------------ 2 files changed, 16 insertions(+), 33 deletions(-) diff --git a/middleware/archives_queries.py b/middleware/archives_queries.py index 22cc6226..cc6314e7 100644 --- a/middleware/archives_queries.py +++ b/middleware/archives_queries.py @@ -92,24 +92,3 @@ def archives_put_last_cached_results( cursor.execute(sql_query.format(last_cached, id)) cursor.close() - -def archives_put_query( - id: str = "", - broken_as_of: str = "", - last_cached: str = "", - conn: Optional[PgConnection] = None, -) -> None: - """ - Updates the data_sources table based on the provided parameters, marking sources as broken or updating the last cached date. - - :param id: The airtable_uid of the data source. - :param broken_as_of: The date when the source was identified as broken, if applicable. - :param last_cached: The last cached date to be updated. - :param conn: A psycopg2 connection object to a PostgreSQL database. - """ - if broken_as_of: - archives_put_broken_as_of_results(id, broken_as_of, last_cached, conn) - else: - archives_put_last_cached_results(id, last_cached, conn) - - conn.commit() diff --git a/resources/Archives.py b/resources/Archives.py index 51201b44..be46ca8b 100644 --- a/resources/Archives.py +++ b/resources/Archives.py @@ -1,5 +1,9 @@ from middleware.security import api_required -from middleware.archives_queries import archives_get_query, archives_put_query +from middleware.archives_queries import ( + archives_get_query, + archives_put_broken_as_of_results, + archives_put_last_cached_results, +) from flask_restful import request import json @@ -49,19 +53,19 @@ def put(self) -> Dict[str, str]: json_data = request.get_json() data = json.loads(json_data) id = data["id"] if "id" in data else None - broken_as_of = ( - data["broken_source_url_as_of"] - if "broken_source_url_as_of" in data - else None - ) last_cached = data["last_cached"] if "last_cached" in data else None - archives_put_query( - id=id, - broken_as_of=broken_as_of, - last_cached=last_cached, - conn=self.psycopg2_connection, - ) + if "broken_source_url_as_of" in data: + archives_put_broken_as_of_results( + id=id, + broken_as_of=data["broken_source_url_as_of"], + last_cached=last_cached, + conn=self.psycopg2_connection, + ) + else: + archives_put_last_cached_results(id, last_cached, self.psycopg2_connection) + + self.psycopg2_connection.commit() return {"status": "success"} From 078d0450c4d3e4fb0b1d53ce11e5b970aabd0ac9 Mon Sep 17 00:00:00 2001 From: maxachis Date: Tue, 14 May 2024 16:29:37 -0400 Subject: [PATCH 030/127] Update SQL commands and refactor unit tests In the helper function and tests module, the SQL queries were updated to use `DATETIME_OF_REQUEST` instead of `UPDATED_AT`. In `test_quick_search_query.py`, the code to get the current date was changed to pull directly from the database using 'SELECT NOW()'. Removed redundancy in data_source_by_id_query assertions and simplified datetime comparison in quick_search_query test. Made necessary import adjustments in `test_data_source_queries.py`. --- tests/middleware/helper_functions.py | 4 ++-- tests/middleware/test_data_source_queries.py | 3 +-- tests/middleware/test_quick_search_query.py | 14 +++++++------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/tests/middleware/helper_functions.py b/tests/middleware/helper_functions.py index e1990466..8ce20233 100644 --- a/tests/middleware/helper_functions.py +++ b/tests/middleware/helper_functions.py @@ -147,8 +147,8 @@ def get_most_recent_quick_search_query_log( """ cursor.execute( """ - SELECT RESULT_COUNT, UPDATED_AT FROM QUICK_SEARCH_QUERY_LOGS WHERE - search = %s AND location = %s ORDER BY CREATED_AT DESC LIMIT 1 + SELECT RESULT_COUNT, DATETIME_OF_REQUEST FROM QUICK_SEARCH_QUERY_LOGS WHERE + search = %s AND location = %s ORDER BY DATETIME_OF_REQUEST DESC LIMIT 1 """, (search, location), ) diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py index 5d2b0849..65317bf7 100644 --- a/tests/middleware/test_data_source_queries.py +++ b/tests/middleware/test_data_source_queries.py @@ -15,7 +15,7 @@ has_expected_keys, get_boolean_dictionary, ) -from tests.middleware.fixtures import connection_with_test_data +from tests.middleware.fixtures import connection_with_test_data, dev_db_connection @pytest.fixture @@ -104,7 +104,6 @@ def test_data_source_by_id_query( result = data_source_by_id_query( data_source_id="SOURCE_UID_1", conn=connection_with_test_data ) - assert has_expected_keys(result.keys(), DATA_SOURCES_ID_QUERY_RESULTS) assert result["agency_name"] == "Agency A" diff --git a/tests/middleware/test_quick_search_query.py b/tests/middleware/test_quick_search_query.py index cb2d93ac..45cbdefd 100644 --- a/tests/middleware/test_quick_search_query.py +++ b/tests/middleware/test_quick_search_query.py @@ -41,11 +41,13 @@ def test_quick_search_query_logging(connection_with_test_data: psycopg2.extensio :return: None """ # Get datetime of test - test_datetime = datetime.now(pytz.timezone("UTC")) - # Round to the nearest minute - test_datetime = test_datetime.replace(second=0, microsecond=0) + with connection_with_test_data.cursor() as cursor: + cursor.execute("SELECT NOW()") + result = cursor.fetchone() + test_datetime = result[0] - results = quick_search_query( + + quick_search_query( search="Source 1", location="City A", conn=connection_with_test_data ) @@ -53,9 +55,7 @@ def test_quick_search_query_logging(connection_with_test_data: psycopg2.extensio # Test that query inserted into log result = get_most_recent_quick_search_query_log(cursor, "Source 1", "City A") assert result.result_count == 1 - # Round both datetimes to the nearest minute and compare, to ensure log was created during this test - result_datetime = result.updated_at.replace(second=0, microsecond=0) - assert result_datetime >= test_datetime + assert result.updated_at >= test_datetime def test_quick_search_query_results(connection_with_test_data: psycopg2.extensions.connection) -> None: From d752468e1e4cf2d07c126ef4f5ffcf28c68e98d5 Mon Sep 17 00:00:00 2001 From: maxachis Date: Tue, 14 May 2024 16:31:40 -0400 Subject: [PATCH 031/127] Refactor using black --- tests/middleware/test_archives_queries.py | 26 +++++++++++++++----- tests/middleware/test_login_queries.py | 8 +++++- tests/middleware/test_quick_search_query.py | 18 +++++++++----- tests/middleware/test_reset_token_queries.py | 13 ++++++++-- tests/middleware/test_user_queries.py | 1 + 5 files changed, 51 insertions(+), 15 deletions(-) diff --git a/tests/middleware/test_archives_queries.py b/tests/middleware/test_archives_queries.py index 07f3965f..beea63ff 100644 --- a/tests/middleware/test_archives_queries.py +++ b/tests/middleware/test_archives_queries.py @@ -1,12 +1,24 @@ import psycopg2 -from middleware.archives_queries import archives_get_results, archives_get_query, ARCHIVES_GET_COLUMNS -from tests.middleware.helper_functions import insert_test_agencies_and_sources, has_expected_keys -from tests.middleware.fixtures import dev_db_connection, db_cursor, connection_with_test_data +from middleware.archives_queries import ( + archives_get_results, + archives_get_query, + ARCHIVES_GET_COLUMNS, +) +from tests.middleware.helper_functions import ( + insert_test_agencies_and_sources, + has_expected_keys, +) +from tests.middleware.fixtures import ( + dev_db_connection, + db_cursor, + connection_with_test_data, +) + def test_archives_get_results( - dev_db_connection: psycopg2.extensions.connection, - db_cursor: psycopg2.extensions.cursor + dev_db_connection: psycopg2.extensions.connection, + db_cursor: psycopg2.extensions.cursor, ) -> None: """ :param dev_db_connection: A connection to the development database. @@ -36,7 +48,9 @@ def test_archives_get_results( assert len(new_results) == len(original_results) + 1 -def test_archives_get_columns(connection_with_test_data: psycopg2.extensions.connection) -> None: +def test_archives_get_columns( + connection_with_test_data: psycopg2.extensions.connection, +) -> None: """ Test the archives_get_columns method, ensuring it properly returns an inserted source :param connection_with_test_data: A connection object to the database with test data. diff --git a/tests/middleware/test_login_queries.py b/tests/middleware/test_login_queries.py index 8c90cbd7..21f48ea8 100644 --- a/tests/middleware/test_login_queries.py +++ b/tests/middleware/test_login_queries.py @@ -2,10 +2,16 @@ import psycopg2 -from middleware.login_queries import login_results, create_session_token, token_results, is_admin +from middleware.login_queries import ( + login_results, + create_session_token, + token_results, + is_admin, +) from tests.middleware.helper_functions import create_test_user from tests.middleware.fixtures import dev_db_connection, db_cursor + def test_login_query(db_cursor: psycopg2.extensions.cursor) -> None: """ Test the login query by comparing the password digest for a user retrieved from the database diff --git a/tests/middleware/test_quick_search_query.py b/tests/middleware/test_quick_search_query.py index 45cbdefd..2ffacdb2 100644 --- a/tests/middleware/test_quick_search_query.py +++ b/tests/middleware/test_quick_search_query.py @@ -20,20 +20,25 @@ ) -def test_unaltered_search_query(connection_with_test_data: psycopg2.extensions.connection) -> None: +def test_unaltered_search_query( + connection_with_test_data: psycopg2.extensions.connection, +) -> None: """ :param connection_with_test_data: A connection object that is connected to the test database containing the test data. :return: None Test the unaltered_search_query method properly returns only one result """ - response = unaltered_search_query(connection_with_test_data.cursor(), search="Source 1", location="City A") + response = unaltered_search_query( + connection_with_test_data.cursor(), search="Source 1", location="City A" + ) assert len(response) == 1 assert response[0][3] == "Type A" # Record Type - -def test_quick_search_query_logging(connection_with_test_data: psycopg2.extensions.connection) -> None: +def test_quick_search_query_logging( + connection_with_test_data: psycopg2.extensions.connection, +) -> None: """ Tests that quick_search_query properly creates a log of the query @@ -46,7 +51,6 @@ def test_quick_search_query_logging(connection_with_test_data: psycopg2.extensio result = cursor.fetchone() test_datetime = result[0] - quick_search_query( search="Source 1", location="City A", conn=connection_with_test_data ) @@ -58,7 +62,9 @@ def test_quick_search_query_logging(connection_with_test_data: psycopg2.extensio assert result.updated_at >= test_datetime -def test_quick_search_query_results(connection_with_test_data: psycopg2.extensions.connection) -> None: +def test_quick_search_query_results( + connection_with_test_data: psycopg2.extensions.connection, +) -> None: """ Test the `quick_search_query` method returns expected test data diff --git a/tests/middleware/test_reset_token_queries.py b/tests/middleware/test_reset_token_queries.py index d6bb04c0..9c7d11f0 100644 --- a/tests/middleware/test_reset_token_queries.py +++ b/tests/middleware/test_reset_token_queries.py @@ -2,10 +2,19 @@ import psycopg2.extensions -from middleware.reset_token_queries import check_reset_token, add_reset_token, delete_reset_token -from tests.middleware.helper_functions import create_reset_token, create_test_user, get_reset_tokens_for_email +from middleware.reset_token_queries import ( + check_reset_token, + add_reset_token, + delete_reset_token, +) +from tests.middleware.helper_functions import ( + create_reset_token, + create_test_user, + get_reset_tokens_for_email, +) from tests.middleware.fixtures import dev_db_connection, db_cursor + def test_check_reset_token(db_cursor: psycopg2.extensions.cursor) -> None: """ Checks if a token existing in the database diff --git a/tests/middleware/test_user_queries.py b/tests/middleware/test_user_queries.py index 7635a44c..3fb67cf3 100644 --- a/tests/middleware/test_user_queries.py +++ b/tests/middleware/test_user_queries.py @@ -4,6 +4,7 @@ from tests.middleware.helper_functions import create_test_user from tests.middleware.fixtures import dev_db_connection, db_cursor + def test_user_post_query(db_cursor: psycopg2.extensions.cursor) -> None: """ Test the `user_post_query` method, ensuring it properly returns the expected results From f7a77f14a984a79eeb3593eabaa10d5b087d7b9a Mon Sep 17 00:00:00 2001 From: maxachis Date: Tue, 14 May 2024 16:31:52 -0400 Subject: [PATCH 032/127] Update pytest command in pull.yaml workflow The pytest command used in the .github/workflows/pull.yaml has been updated to correctly point to the app_test.py file. This adjustment affects the "Test with pytest" segment of the workflow. --- .github/workflows/pull.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull.yaml b/.github/workflows/pull.yaml index 4c76ca61..219d9bff 100644 --- a/.github/workflows/pull.yaml +++ b/.github/workflows/pull.yaml @@ -48,7 +48,7 @@ jobs: - name: Test with pytest run: | pip install pytest pytest-cov - pytest app_test.py --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html + pytest tests/resources/app_test.py --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html setup_client: defaults: From 9bb0d0fc1da1e5f9eec853c14ab140a72f41a881 Mon Sep 17 00:00:00 2001 From: maxachis Date: Tue, 14 May 2024 16:32:53 -0400 Subject: [PATCH 033/127] Refactor using black --- tests/resources/app_test.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/resources/app_test.py b/tests/resources/app_test.py index 3ff9374e..e40b0471 100644 --- a/tests/resources/app_test.py +++ b/tests/resources/app_test.py @@ -1,10 +1,8 @@ - import os from app import create_app from tests.resources.app_test_data import ( DATA_SOURCES_ROWS, AGENCIES_ROWS, - ) import datetime import sqlite3 @@ -95,9 +93,6 @@ def session(): connection.close() - - - # def test_post_user(client): # response = client.post( # "/user", headers=HEADERS, json={"email": "test", "password": "test"} From 10dc54eb37fb8f71f960945a042d1d7723850fef Mon Sep 17 00:00:00 2001 From: maxachis Date: Tue, 14 May 2024 16:49:37 -0400 Subject: [PATCH 034/127] Add test skeletons for UserNotLoggedIn exception handling --- tests/middleware/test_login_queries.py | 7 +++++++ tests/middleware/test_security.py | 8 ++++++++ 2 files changed, 15 insertions(+) create mode 100644 tests/middleware/test_login_queries.py create mode 100644 tests/middleware/test_security.py diff --git a/tests/middleware/test_login_queries.py b/tests/middleware/test_login_queries.py new file mode 100644 index 00000000..94cd67b4 --- /dev/null +++ b/tests/middleware/test_login_queries.py @@ -0,0 +1,7 @@ +def test_is_admin_raises_user_not_logged_in_error(): + """ + Check that when searching for a user by an email that doesn't exist, + the UserNotFoundError is raised + :return: + """ + raise NotImplementedError diff --git a/tests/middleware/test_security.py b/tests/middleware/test_security.py new file mode 100644 index 00000000..4fe9f4c7 --- /dev/null +++ b/tests/middleware/test_security.py @@ -0,0 +1,8 @@ + +def test_api_required_user_not_found(): + """ + Test that the api_required decorator properly returns + the expected result when a user doesn't exist + :return: + """ + raise NotImplementedError From caaf8337e92cf90f89d6b4874c060d992878b240 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 15 May 2024 16:14:38 -0400 Subject: [PATCH 035/127] Implement test for UserNotFoundError in login queries Implemented the test for UserNotFoundError in is_admin function of the login_queries module. Ensured that the function correctly raises UserNotFoundError when a user email doesn't exist in the system. This helps to improve the robustness of error handling in user authentication. --- tests/middleware/test_login_queries.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/middleware/test_login_queries.py b/tests/middleware/test_login_queries.py index 1f7f4e62..d56b3f76 100644 --- a/tests/middleware/test_login_queries.py +++ b/tests/middleware/test_login_queries.py @@ -1,12 +1,15 @@ +import uuid from unittest.mock import patch import psycopg2 +import pytest from middleware.login_queries import ( login_results, create_session_token, token_results, is_admin, + UserNotFoundError, ) from tests.middleware.helper_functions import create_test_user from tests.middleware.fixtures import dev_db_connection, db_cursor @@ -58,10 +61,12 @@ def test_is_admin(db_cursor: psycopg2.extensions.cursor) -> None: assert is_admin(db_cursor, admin_user.email) assert not is_admin(db_cursor, regular_user.email) -def test_is_admin_raises_user_not_logged_in_error(): + +def test_is_admin_raises_user_not_logged_in_error(db_cursor): """ Check that when searching for a user by an email that doesn't exist, the UserNotFoundError is raised :return: """ - raise NotImplementedError + with pytest.raises(UserNotFoundError): + is_admin(cursor=db_cursor, email=str(uuid.uuid4())) From 9f99705b5a749ea760043806c9b8a5fae80cc595 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 15 May 2024 16:59:53 -0400 Subject: [PATCH 036/127] Refactor data access in tests and remove unused test Updated data access in various tests, which replaced indexed access with direct attribute access. Added a test 'convert_data_source_matches' to validate the conversion of data source matches to dictionaries. Removed unused test method 'test_data_sources_query' as it was no longer relevant. These adjustments enhance readability and maintainability of test codes. --- tests/middleware/test_data_source_queries.py | 63 ++++++++++---------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py index 65317bf7..9cbc881b 100644 --- a/tests/middleware/test_data_source_queries.py +++ b/tests/middleware/test_data_source_queries.py @@ -1,12 +1,12 @@ import psycopg2 import pytest +from middleware import data_source_queries from tests.resources.app_test_data import DATA_SOURCES_ID_QUERY_RESULTS from middleware.data_source_queries import ( get_approved_data_sources, needs_identification_data_sources, data_source_by_id_results, - data_sources_query, DATA_SOURCES_APPROVED_COLUMNS, data_source_by_id_query, get_data_sources_for_map, @@ -41,7 +41,7 @@ def test_get_approved_data_sources( results = get_approved_data_sources(conn=connection_with_test_data) for result in results: - name = result[0] + name = result["name"] if name in inserted_data_sources_found: inserted_data_sources_found[name] = True @@ -62,7 +62,7 @@ def test_needs_identification( """ results = needs_identification_data_sources(conn=connection_with_test_data) for result in results: - name = result[0] + name = result["name"] if name in inserted_data_sources_found: inserted_data_sources_found[name] = True @@ -107,30 +107,6 @@ def test_data_source_by_id_query( assert result["agency_name"] == "Agency A" -def test_data_sources_query( - connection_with_test_data: psycopg2.extensions.connection, - inserted_data_sources_found: dict[str, bool], -) -> None: - """ - Test that data sources query properly returns data for an inserted data source - marked as 'approved', and none others. - :param connection_with_test_data: - :param inserted_data_sources_found: - :return: - """ - results = data_sources_query(connection_with_test_data) - # Check that results include expected keys - assert has_expected_keys(results[0].keys(), DATA_SOURCES_APPROVED_COLUMNS) - for result in results: - name = result["name"] - if name in inserted_data_sources_found: - inserted_data_sources_found[name] = True - - assert inserted_data_sources_found["Source 1"] - assert not inserted_data_sources_found["Source 2"] - assert not inserted_data_sources_found["Source 3"] - - def test_get_data_sources_for_map( connection_with_test_data: psycopg2.extensions.connection, inserted_data_sources_found: dict[str, bool], @@ -144,14 +120,39 @@ def test_get_data_sources_for_map( """ results = get_data_sources_for_map(conn=connection_with_test_data) for result in results: - name = result[1] + name = result["name"] if name == "Source 1": - lat = result[8] - lng = result[9] - assert lat == 30 and lng == 20 + assert result["lat"] == 30 and result["lng"] == 20 if name in inserted_data_sources_found: inserted_data_sources_found[name] = True assert inserted_data_sources_found["Source 1"] assert not inserted_data_sources_found["Source 2"] assert not inserted_data_sources_found["Source 3"] + + +def test_convert_data_source_matches(): + """ + Convert_data_source_matches should output a list of + dictionaries based on the provided list of columns + and the list of tuples + """ + + # Define Test case Input and Output data + testcases = [ + { + "data_source_output_columns": ["name", "age"], + "results": [("Joe", 20), ("Annie", 30)], + "output": [{"name": "Joe", "age": 20}, {"name": "Annie", "age": 30}], + }, + # You can add more tests here as per requirement. + ] + + # Execute the tests + for testcase in testcases: + assert ( + data_source_queries.convert_data_source_matches( + testcase["data_source_output_columns"], testcase["results"] + ) + == testcase["output"] + ) From bc4f9bc466676e886024c5cde72040237c36a9a0 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 15 May 2024 17:00:02 -0400 Subject: [PATCH 037/127] Add type hints and docstring to 'convert_data_source_matches' function The function 'convert_data_source_matches' now has type hints and a descriptive docstring. This change provides clarity on what the function expects as parameters and what it returns. Improved documentation aids in understanding the code and enhances maintainability. --- middleware/data_source_queries.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index bfd0327c..7feb7481 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -264,7 +264,15 @@ def get_data_sources_for_map(conn) -> list: return convert_data_source_matches(DATA_SOURCES_MAP_COLUMN, results) -def convert_data_source_matches(data_source_output_columns, results): +def convert_data_source_matches(data_source_output_columns: list[str], results: list[tuple]) -> dict: + """ + Combine a list of output columns with a list of results, + and produce a list of dictionaries where the keys correspond + to the output columns and the values correspond to the results + :param data_source_output_columns: + :param results: + :return: + """ data_source_matches = [ dict(zip(data_source_output_columns, result)) for result in results ] From 2132d0a68bf338a86b775d02f1a6607e2fea6d13 Mon Sep 17 00:00:00 2001 From: Josh <30379833+josh-chamberlain@users.noreply.github.com> Date: Fri, 17 May 2024 11:24:41 -0400 Subject: [PATCH 038/127] Update CODEOWNERS --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a03d45d8..a001991c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ /client/ @joshuagraber -* @mbodeantor +* @josh-chamberlain From ad5b51b48c9281cdeefb3c65b283d6ad937fee3d Mon Sep 17 00:00:00 2001 From: maxachis Date: Fri, 17 May 2024 16:08:08 -0400 Subject: [PATCH 039/127] Refactor DataRequestsManager, add DBRequestMapper class and tests Refactored DataRequestsManager to use the new DBRequestMapper class for generating database queries. Created new data classes RequestInfo and UpdateableRequestColumns to map request data to the database. Added appropriate tests to verify correct operation. --- .github/workflows/test_api.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_api.yml b/.github/workflows/test_api.yml index 3da6d8d8..312b3dd9 100644 --- a/.github/workflows/test_api.yml +++ b/.github/workflows/test_api.yml @@ -21,4 +21,5 @@ jobs: pip install -r requirements.txt python -m spacy download en_core_web_sm pip install pytest pytest-cov - pytest tests --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html + - name: Run tests + run: pytest tests --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html From 3c21385b3c2cc84f10086229616758a086bfcd8f Mon Sep 17 00:00:00 2001 From: maxachis Date: Fri, 17 May 2024 16:31:03 -0400 Subject: [PATCH 040/127] Change psycopg2 to psycopg2-binary --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index bd3aca88..5be15865 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,7 @@ pathy==0.10.2 pluggy==1.2.0 postgrest==0.10.8 preshed==3.0.8 -psycopg2==2.9.7 +psycopg2-binary==2.9.7 py==1.11.0 pycparser==2.21 pydantic==2.2.1 From 83835dae489fa44451ee84f3970c61640b473724 Mon Sep 17 00:00:00 2001 From: maxachis Date: Fri, 17 May 2024 16:31:20 -0400 Subject: [PATCH 041/127] Temporarily comment out github action to prevent execution --- .github/workflows/test_api.yml | 50 +++++++++++++++++----------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/test_api.yml b/.github/workflows/test_api.yml index 312b3dd9..db8f60b1 100644 --- a/.github/workflows/test_api.yml +++ b/.github/workflows/test_api.yml @@ -1,25 +1,25 @@ -name: Test API using Pytest - -on: - pull_request: - -jobs: - test_api: - env: - SECRET_KEY: ${{ secrets.SECRET_KEY }} - DEV_DB_CONN_STRING: ${{secrets.DEV_DB_CONN_STRING}} - name: Test API - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt - python -m spacy download en_core_web_sm - pip install pytest pytest-cov - - name: Run tests - run: pytest tests --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html +#name: Test API using Pytest +# +#on: +# pull_request: +# +#jobs: +# test_api: +# env: +# SECRET_KEY: ${{ secrets.SECRET_KEY }} +# DEV_DB_CONN_STRING: ${{secrets.DEV_DB_CONN_STRING}} +# name: Test API +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v4 +# - uses: actions/setup-python@v4 +# with: +# python-version: '3.11' +# - name: Install dependencies +# run: | +# python -m pip install --upgrade pip +# pip install -r requirements.txt +# python -m spacy download en_core_web_sm +# pip install pytest pytest-cov +# - name: Run tests +# run: pytest tests --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html From 5acc1ff2003f1c3432f2bd6da884945aaa010574 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 19 May 2024 18:29:00 -0400 Subject: [PATCH 042/127] Uncomment test_api.yml --- .github/workflows/test_api.yml | 50 +++++++++++++++++----------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/test_api.yml b/.github/workflows/test_api.yml index db8f60b1..312b3dd9 100644 --- a/.github/workflows/test_api.yml +++ b/.github/workflows/test_api.yml @@ -1,25 +1,25 @@ -#name: Test API using Pytest -# -#on: -# pull_request: -# -#jobs: -# test_api: -# env: -# SECRET_KEY: ${{ secrets.SECRET_KEY }} -# DEV_DB_CONN_STRING: ${{secrets.DEV_DB_CONN_STRING}} -# name: Test API -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@v4 -# - uses: actions/setup-python@v4 -# with: -# python-version: '3.11' -# - name: Install dependencies -# run: | -# python -m pip install --upgrade pip -# pip install -r requirements.txt -# python -m spacy download en_core_web_sm -# pip install pytest pytest-cov -# - name: Run tests -# run: pytest tests --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html +name: Test API using Pytest + +on: + pull_request: + +jobs: + test_api: + env: + SECRET_KEY: ${{ secrets.SECRET_KEY }} + DEV_DB_CONN_STRING: ${{secrets.DEV_DB_CONN_STRING}} + name: Test API + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + python -m spacy download en_core_web_sm + pip install pytest pytest-cov + - name: Run tests + run: pytest tests --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html From a40f55b44a56a5cbe1a9e700aeff8fb508d4dfe9 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 19 May 2024 18:41:51 -0400 Subject: [PATCH 043/127] Comment test_api.yml --- .github/workflows/test_api.yml | 50 +++++++++++++++++----------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/test_api.yml b/.github/workflows/test_api.yml index 312b3dd9..db8f60b1 100644 --- a/.github/workflows/test_api.yml +++ b/.github/workflows/test_api.yml @@ -1,25 +1,25 @@ -name: Test API using Pytest - -on: - pull_request: - -jobs: - test_api: - env: - SECRET_KEY: ${{ secrets.SECRET_KEY }} - DEV_DB_CONN_STRING: ${{secrets.DEV_DB_CONN_STRING}} - name: Test API - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt - python -m spacy download en_core_web_sm - pip install pytest pytest-cov - - name: Run tests - run: pytest tests --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html +#name: Test API using Pytest +# +#on: +# pull_request: +# +#jobs: +# test_api: +# env: +# SECRET_KEY: ${{ secrets.SECRET_KEY }} +# DEV_DB_CONN_STRING: ${{secrets.DEV_DB_CONN_STRING}} +# name: Test API +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v4 +# - uses: actions/setup-python@v4 +# with: +# python-version: '3.11' +# - name: Install dependencies +# run: | +# python -m pip install --upgrade pip +# pip install -r requirements.txt +# python -m spacy download en_core_web_sm +# pip install pytest pytest-cov +# - name: Run tests +# run: pytest tests --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html From aed0d587b559e77c55c325aa95a8348aff073686 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 16:21:10 -0400 Subject: [PATCH 044/127] Post-merge clean-up --- resources/DataSources.py | 3 +-- tests/middleware/test_data_source_queries.py | 3 --- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/resources/DataSources.py b/resources/DataSources.py index 16e9f8f0..b474d0cf 100644 --- a/resources/DataSources.py +++ b/resources/DataSources.py @@ -190,8 +190,7 @@ def get(self) -> Dict[str, Any]: Returns: - A dictionary containing the count of data sources and their details. """ - try: - data_source_matches = get_data_sources_for_map(self.psycopg2_connection) + data_source_matches = get_data_sources_for_map(self.psycopg2_connection) data_sources = { "count": len(data_source_matches), diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py index 9cbc881b..972e59f5 100644 --- a/tests/middleware/test_data_source_queries.py +++ b/tests/middleware/test_data_source_queries.py @@ -2,17 +2,14 @@ import pytest from middleware import data_source_queries -from tests.resources.app_test_data import DATA_SOURCES_ID_QUERY_RESULTS from middleware.data_source_queries import ( get_approved_data_sources, needs_identification_data_sources, data_source_by_id_results, - DATA_SOURCES_APPROVED_COLUMNS, data_source_by_id_query, get_data_sources_for_map, ) from tests.middleware.helper_functions import ( - has_expected_keys, get_boolean_dictionary, ) from tests.middleware.fixtures import connection_with_test_data, dev_db_connection From d407ada518e6cf53aa22927293066a481f70aa37 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 16:35:22 -0400 Subject: [PATCH 045/127] Reformat with Black --- middleware/archives_queries.py | 1 - middleware/data_source_queries.py | 26 ++++++++++++++------------ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/middleware/archives_queries.py b/middleware/archives_queries.py index cc6314e7..b53fa3ec 100644 --- a/middleware/archives_queries.py +++ b/middleware/archives_queries.py @@ -91,4 +91,3 @@ def archives_put_last_cached_results( sql_query = "UPDATE data_sources SET last_cached = '{0}' WHERE airtable_uid = '{1}'" cursor.execute(sql_query.format(last_cached, id)) cursor.close() - diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index 7feb7481..36762125 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -75,17 +75,17 @@ ] DATA_SOURCES_MAP_COLUMN = [ - "data_source_id", - "name", - "agency_id", - "agency_name", - "state_iso", - "municipality", - "county_name", - "record_type", - "lat", - "lng", - ] + "data_source_id", + "name", + "agency_id", + "agency_name", + "state_iso", + "municipality", + "county_name", + "record_type", + "lat", + "lng", +] def data_source_by_id_results( @@ -264,7 +264,9 @@ def get_data_sources_for_map(conn) -> list: return convert_data_source_matches(DATA_SOURCES_MAP_COLUMN, results) -def convert_data_source_matches(data_source_output_columns: list[str], results: list[tuple]) -> dict: +def convert_data_source_matches( + data_source_output_columns: list[str], results: list[tuple] +) -> dict: """ Combine a list of output columns with a list of results, and produce a list of dictionaries where the keys correspond From 073f60db190a8be8ca8c2ab2e48c7b97adcdedd8 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 16:38:23 -0400 Subject: [PATCH 046/127] Remove broken import --- resources/SearchTokens.py | 1 - 1 file changed, 1 deletion(-) diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index 38226032..dc50cdf1 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -1,7 +1,6 @@ from middleware.quick_search_query import quick_search_query from middleware.data_source_queries import ( data_source_by_id_query, - data_sources_query, ) from flask import request import datetime From f3bb63d04cce16354d7519bae566772fa918e2eb Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 16:46:59 -0400 Subject: [PATCH 047/127] Update data source queries in SearchTokens This commit replaces the broken data_sources_query function calls in the SearchTokens module with properly defined functions. This includes using the new get_approved_data_sources function for fetching approved data sources and get_data_sources_for_map for getting data sources specific to maps. --- resources/SearchTokens.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index dc50cdf1..5f9630be 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -1,6 +1,8 @@ from middleware.quick_search_query import quick_search_query from middleware.data_source_queries import ( data_source_by_id_query, + get_data_sources_for_map, + get_approved_data_sources, ) from flask import request import datetime @@ -92,7 +94,9 @@ def get(self) -> Dict[str, Any]: elif endpoint == "data-sources": try: - data_source_matches = data_sources_query(self.psycopg2_connection) + data_source_matches = get_approved_data_sources( + self.psycopg2_connection + ) data_sources = { "count": len(data_source_matches), @@ -123,9 +127,7 @@ def get(self) -> Dict[str, Any]: elif endpoint == "data-sources-map": try: - data_source_details = data_sources_query( - self.psycopg2_connection, [], "approved", True - ) + data_source_details = get_data_sources_for_map(self.psycopg2_connection) if data_source_details: data_sources = { "count": len(data_source_details), From 409d4a8287835de06d3fe4cc9111fb6e66908db6 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 17:04:24 -0400 Subject: [PATCH 048/127] Add conftest.py for pytest fixtures A new file, conftest.py, has been added to define pytest fixtures. These fixtures provide a test client and session for database interaction, enabling testing within the app context and controlling the database connection lifecycle. --- conftest.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 conftest.py diff --git a/conftest.py b/conftest.py new file mode 100644 index 00000000..99a0a6c0 --- /dev/null +++ b/conftest.py @@ -0,0 +1,44 @@ +import os + +import dotenv +import pytest +from sqlalchemy.orm import sessionmaker, scoped_session + +from middleware.models import db + +from app import create_app + + +# Load environment variables +dotenv.load_dotenv() + + +@pytest.fixture(scope="module") +def test_client(): + app = create_app() + app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv( + "DEV_DB_CONN_STRING" + ) # Connect to pre-existing test database + app.config["TESTING"] = True + + db.init_app(app) + + with app.test_client() as testing_client: + with app.app_context(): + yield testing_client + + +@pytest.fixture +def session(): + connection = db.engine.connect() + transaction = connection.begin() + session = scoped_session(sessionmaker(bind=connection)) + + # Overwrite the db.session with the scoped session + db.session = session + + yield session + + session.close() + transaction.rollback() + connection.close() From ebd3ef4cacb52954fe616029f9ef6e3de67c04f8 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 17:06:30 -0400 Subject: [PATCH 049/127] Add User model to middleware A new file named models.py has been added to the middleware directory. It includes the SQLAlchemy model for a 'User' with fields id, created_at, updated_at, email, password_digest, api_key, and role. The creation of this model enables better data handling and interoperability with the database. --- middleware/models.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 middleware/models.py diff --git a/middleware/models.py b/middleware/models.py new file mode 100644 index 00000000..25f39633 --- /dev/null +++ b/middleware/models.py @@ -0,0 +1,28 @@ +from flask_sqlalchemy import SQLAlchemy +from sqlalchemy import ( + Column, + BigInteger, + text, + Text, + String, +) +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.dialects.postgresql import TIMESTAMP + +db = SQLAlchemy() + +Base = declarative_base() + + +class User(Base): + __tablename__ = "users" + __table_args__ = {"schema": "public"} + + id = Column(BigInteger, primary_key=True, autoincrement=True) + created_at = Column(TIMESTAMP(timezone=True), server_default=text("now()")) + updated_at = Column(TIMESTAMP(timezone=True), server_default=text("now()")) + email = Column(Text, nullable=False, unique=True) + password_digest = Column(Text) + api_key = Column(String) + role = Column(Text) + From ba4e4a8655c002c74876905914b19d9b6e3eac6f Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 17:16:36 -0400 Subject: [PATCH 050/127] Reformat with black --- middleware/models.py | 1 - 1 file changed, 1 deletion(-) diff --git a/middleware/models.py b/middleware/models.py index 25f39633..c9b2480e 100644 --- a/middleware/models.py +++ b/middleware/models.py @@ -25,4 +25,3 @@ class User(Base): password_digest = Column(Text) api_key = Column(String) role = Column(Text) - From c092c5d9fad41f698f241552f65508e7bcd33b14 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 17:16:51 -0400 Subject: [PATCH 051/127] Add Flask and Flask-SQLAlchemy to requirements.txt --- requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5be15865..ec7cc00d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -75,4 +75,6 @@ wasabi==1.1.2 websockets==10.4 Werkzeug==3.0.1 zipp==3.16.2 -pytest-mock~=3.12.0 \ No newline at end of file +pytest-mock~=3.12.0 +Flask~=3.03 +Flask-SQLAlchemy~=3.11 \ No newline at end of file From 2f30d898bf9204a06a7eb8d6a29313c0841a9fec Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 17:19:01 -0400 Subject: [PATCH 052/127] Correct requirements.txt --- requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index ec7cc00d..41a8d03b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,6 +20,7 @@ exceptiongroup==1.1.3 Flask==2.3.2 Flask-Cors==4.0.0 Flask-RESTful==0.3.10 +Flask-SQLAlchemy~=3.11 gotrue==1.0.3 gunicorn==21.2.0 h11==0.14.0 @@ -76,5 +77,3 @@ websockets==10.4 Werkzeug==3.0.1 zipp==3.16.2 pytest-mock~=3.12.0 -Flask~=3.03 -Flask-SQLAlchemy~=3.11 \ No newline at end of file From 85e6fcebf1a88247bb5cff392723443a9355a3a1 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 17:21:53 -0400 Subject: [PATCH 053/127] Correct requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 41a8d03b..7f29cd45 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,7 +20,7 @@ exceptiongroup==1.1.3 Flask==2.3.2 Flask-Cors==4.0.0 Flask-RESTful==0.3.10 -Flask-SQLAlchemy~=3.11 +Flask-SQLAlchemy~=3.1.1 gotrue==1.0.3 gunicorn==21.2.0 h11==0.14.0 From ea715115a8dedbff6cee235712edd3d3fad1617d Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 20:25:41 -0400 Subject: [PATCH 054/127] Refactor token management and search endpoints The token generation process has been simplified by extracting code into a standalone module. Additionally, the endpoints in SearchTokens.py have been refactored to use app's test client for queries, significantly reducing error handling and webhook reporting code. This makes the code in the endpoints cleaner and easier to maintain. --- middleware/token_management.py | 12 ++++ resources/SearchTokens.py | 127 ++++++++------------------------- 2 files changed, 43 insertions(+), 96 deletions(-) create mode 100644 middleware/token_management.py diff --git a/middleware/token_management.py b/middleware/token_management.py new file mode 100644 index 00000000..1e22cd96 --- /dev/null +++ b/middleware/token_management.py @@ -0,0 +1,12 @@ +import datetime +import uuid + + +def insert_new_access_token(cursor): + token = uuid.uuid4().hex + expiration = datetime.datetime.now() + datetime.timedelta(minutes=5) + cursor.execute( + f"insert into access_tokens (token, expiration_date) values (%s, %s)", + (token, expiration), + ) + return token diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index 5f9630be..8884726b 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -1,19 +1,12 @@ -from middleware.quick_search_query import quick_search_query -from middleware.data_source_queries import ( - data_source_by_id_query, - get_data_sources_for_map, - get_approved_data_sources, -) from flask import request -import datetime -import uuid import os -import requests import sys import json from typing import Dict, Any +from middleware.token_management import insert_new_access_token from resources.PsycopgResource import PsycopgResource, handle_exceptions +from app import app sys.path.append("..") @@ -24,6 +17,10 @@ class SearchTokens(PsycopgResource): """ A resource that provides various search functionalities based on the specified endpoint. It supports quick search, data source retrieval by ID, and listing all data sources. + + The search tokens endpoint generates an API token valid for 5 minutes and + forwards the search parameters to the Quick Search endpoint. + This endpoint is meant for use by the front end only. """ @handle_exceptions @@ -45,101 +42,39 @@ def get(self) -> Dict[str, Any]: arg1 = url_params.get("arg1") arg2 = url_params.get("arg2") print(endpoint, arg1, arg2) - data_sources = {"count": 0, "data": []} - if type(self.psycopg2_connection) == dict: - return data_sources cursor = self.psycopg2_connection.cursor() - token = uuid.uuid4().hex - expiration = datetime.datetime.now() + datetime.timedelta(minutes=5) - cursor.execute( - f"insert into access_tokens (token, expiration_date) values (%s, %s)", - (token, expiration), - ) + token = insert_new_access_token(cursor) self.psycopg2_connection.commit() if endpoint == "quick-search": - try: - data = request.get_json() - test = data.get("test_flag") - except: - test = False - try: - data_sources = quick_search_query( - arg1, arg2, [], self.psycopg2_connection, test + with app.test_client() as client: + response = client.get( + f"/quick-search/{arg1}/{arg2}", + headers={"Authorization": f"Bearer {token}"}, ) + return json.loads(response.data) - return data_sources - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - webhook_url = os.getenv("WEBHOOK_URL") - user_message = "There was an error during the search operation" - message = { - "content": user_message - + ": " - + str(e) - + "\n" - + f"Search term: {arg1}\n" - + f"Location: {arg2}" - } - requests.post( - webhook_url, - data=json.dumps(message), - headers={"Content-Type": "application/json"}, + if endpoint == "data-sources": + with app.test_client() as client: + response = client.get( + "/data-sources", + headers={"Authorization": f"Bearer {token}"}, ) + return json.loads(response.data) - return {"count": 0, "message": user_message}, 500 - - elif endpoint == "data-sources": - try: - data_source_matches = get_approved_data_sources( - self.psycopg2_connection + if endpoint == "data-sources-by-id": + with app.test_client() as client: + response = client.get( + f"/data-sources-by-id/{arg1}", + headers={"Authorization": f"Bearer {token}"}, ) - - data_sources = { - "count": len(data_source_matches), - "data": data_source_matches, - } - - return data_sources - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 - - elif endpoint == "data-sources-by-id": - try: - data_source_details = data_source_by_id_query( - arg1, [], self.psycopg2_connection + return json.loads(response.data) + if endpoint == "data-sources-map": + with app.test_client() as client: + response = client.get( + "/data-sources-map", + headers={"Authorization": f"Bearer {token}"}, ) - if data_source_details: - return data_source_details - - else: - return {"message": "Data source not found."}, 404 - - except Exception as e: - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 - - elif endpoint == "data-sources-map": - try: - data_source_details = get_data_sources_for_map(self.psycopg2_connection) - if data_source_details: - data_sources = { - "count": len(data_source_details), - "data": data_source_details, - } - return data_sources - - else: - return {"message": "There has been an error pulling data!"}, 500 - - except Exception as e: - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 - else: - return {"message": "Unknown endpoint"}, 500 + return json.loads(response.data) + return {"message": "Unknown endpoint"}, 500 From 7765fe7cb65bfb95a247b58d038d3d76e0da4d7c Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 21:12:55 -0400 Subject: [PATCH 055/127] Refactor search endpoints and token management This commit introduces a major refactor in the SearchTokens.py file. The token generation process now uses a standalone module, and each search endpoint has been encapsulated into its own handler class. This not only simplifies the token generation but also improves the organization and maintainability of the search endpoints. --- resources/SearchTokens.py | 96 ++++++++++++++++++++------------------- 1 file changed, 50 insertions(+), 46 deletions(-) diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index 8884726b..f42462d6 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -2,7 +2,6 @@ import os import sys import json -from typing import Dict, Any from middleware.token_management import insert_new_access_token from resources.PsycopgResource import PsycopgResource, handle_exceptions @@ -13,6 +12,41 @@ BASE_URL = os.getenv("VITE_VUE_API_BASE_URL") +class BaseEndpointHandler: + def __init__(self, app, token): + self.app = app + self.token = token + + def send_request_with_token(self, path, arg1=None, arg2=None): + path = path.format(arg1, arg2) if arg1 or arg2 else path + with self.app.test_client() as client: + response = client.get( + path, + headers={"Authorization": f"Bearer {self.token}"}, + ) + return json.loads(response.data), response.status_code + + +class QuickSearchHandler(BaseEndpointHandler): + def get(self, arg1, arg2): + return self.send_request_with_token("/quick-search/{}/{}/", arg1, arg2) + + +class DataSourcesHandler(BaseEndpointHandler): + def get(self, _1, _2): + return self.send_request_with_token("/data-sources") + + +class DataSourcesByIdHandler(BaseEndpointHandler): + def get(self, arg1, _2): + return self.send_request_with_token("/data-sources-by-id/{}", arg1) + + +class DataSourcesMapHandler(BaseEndpointHandler): + def get(self, _1, _2): + return self.send_request_with_token("/data-sources-map") + + class SearchTokens(PsycopgResource): """ A resource that provides various search functionalities based on the specified endpoint. @@ -23,58 +57,28 @@ class SearchTokens(PsycopgResource): This endpoint is meant for use by the front end only. """ + endpoint_handlers = { + "quick-search": QuickSearchHandler, + "data-sources": DataSourcesHandler, + "data-sources-by-id": DataSourcesByIdHandler, + "data-sources-map": DataSourcesMapHandler, + } + @handle_exceptions - def get(self) -> Dict[str, Any]: - """ - Handles GET requests by performing a search operation based on the specified endpoint and arguments. - - The function supports the following endpoints: - - quick-search: Performs a quick search with specified search terms and location. - - data-sources: Retrieves a list of all data sources. - - data-sources-by-id: Retrieves details of a data source by its ID. - - data-sources-map: Retrieves data sources for the map. - - Returns: - - A dictionary with the search results or an error message. - """ + def get(self): url_params = request.args endpoint = url_params.get("endpoint") arg1 = url_params.get("arg1") arg2 = url_params.get("arg2") - print(endpoint, arg1, arg2) cursor = self.psycopg2_connection.cursor() token = insert_new_access_token(cursor) + self.psycopg2_connection.commit() - if endpoint == "quick-search": - with app.test_client() as client: - response = client.get( - f"/quick-search/{arg1}/{arg2}", - headers={"Authorization": f"Bearer {token}"}, - ) - return json.loads(response.data) - - if endpoint == "data-sources": - with app.test_client() as client: - response = client.get( - "/data-sources", - headers={"Authorization": f"Bearer {token}"}, - ) - return json.loads(response.data) - - if endpoint == "data-sources-by-id": - with app.test_client() as client: - response = client.get( - f"/data-sources-by-id/{arg1}", - headers={"Authorization": f"Bearer {token}"}, - ) - return json.loads(response.data) - if endpoint == "data-sources-map": - with app.test_client() as client: - response = client.get( - "/data-sources-map", - headers={"Authorization": f"Bearer {token}"}, - ) - return json.loads(response.data) - return {"message": "Unknown endpoint"}, 500 + handler = self.endpoint_handlers.get(endpoint) + + if handler is None: + return {"message": "Unknown endpoint"}, 500 + resp_handler = handler(app, token) + return resp_handler.get(arg1, arg2) From 5a9bfabe1b67dc2da5042e415e9f3e0140d16838 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 22:10:55 -0400 Subject: [PATCH 056/127] Refactor search functionalities and improve token handling This commit neatly packages each search functionality into its own function and refactors token management with the creation of the insert_access_token function. This reorganization enhances readability, clarity of function duties and promotes best practices for efficient program structure. --- middleware/data_source_queries.py | 28 ++++++++ middleware/quick_search_query.py | 28 ++++++++ middleware/webhook_logic.py | 14 ++++ resources/DataSources.py | 36 ++-------- resources/QuickSearch.py | 51 +------------- resources/SearchTokens.py | 110 +++++------------------------- 6 files changed, 96 insertions(+), 171 deletions(-) create mode 100644 middleware/webhook_logic.py diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index 36762125..07939359 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -1,4 +1,7 @@ from typing import List, Dict, Any, Optional, Tuple, Union + +from sqlalchemy.dialects.postgresql import psycopg2 + from utilities.common import convert_dates_to_strings, format_arrays from psycopg2.extensions import connection as PgConnection @@ -88,6 +91,31 @@ ] +def get_approved_data_sources_wrapper(conn: PgConnection): + data_source_matches = get_approved_data_sources(conn) + + return { + "count": len(data_source_matches), + "data": data_source_matches, + }, 200 + + +def data_source_by_id_wrapper(arg, conn: PgConnection): + data_source_details = data_source_by_id_query(arg, conn=conn) + if data_source_details: + return data_source_details, 200 + + else: + return {"message": "Data source not found."}, 404 + + +def get_data_sources_for_map_wrapper(conn: PgConnection): + data_source_details = get_data_sources_for_map(conn) + return { + "count": len(data_source_details), + "data": data_source_details, + }, 200 + def data_source_by_id_results( conn: PgConnection, data_source_id: str ) -> Union[tuple[Any, ...], None]: diff --git a/middleware/quick_search_query.py b/middleware/quick_search_query.py index 4584c097..906cc5be 100644 --- a/middleware/quick_search_query.py +++ b/middleware/quick_search_query.py @@ -1,6 +1,10 @@ import spacy import json import datetime + +from sqlalchemy.dialects.postgresql import psycopg2 + +from middleware.webhook_logic import post_to_webhook from utilities.common import convert_dates_to_strings, format_arrays from typing import List, Dict, Any, Optional from psycopg2.extensions import connection as PgConnection, cursor as PgCursor @@ -175,3 +179,27 @@ def quick_search_query( cursor.close() return data_sources + + +def quick_search_query_wrapper(arg1, arg2, conn: psycopg2.extensions.connection): + try: + data_sources = quick_search_query( + arg1, arg2, conn=conn + ) + + return data_sources, 200 + + except Exception as e: + conn.rollback() + user_message = "There was an error during the search operation" + message = { + "content": user_message + + ": " + + str(e) + + "\n" + + f"Search term: {arg1}\n" + + f"Location: {arg2}" + } + post_to_webhook(json.dumps(message)) + + return {"count": 0, "message": user_message}, 500 diff --git a/middleware/webhook_logic.py b/middleware/webhook_logic.py new file mode 100644 index 00000000..b30a022b --- /dev/null +++ b/middleware/webhook_logic.py @@ -0,0 +1,14 @@ +import json +import os + +import requests + + +def post_to_webhook(data: str): + webhook_url = os.getenv("WEBHOOK_URL") + + requests.post( + webhook_url, + data=data, + headers={"Content-Type": "application/json"}, + ) diff --git a/resources/DataSources.py b/resources/DataSources.py index b474d0cf..230baded 100644 --- a/resources/DataSources.py +++ b/resources/DataSources.py @@ -1,10 +1,10 @@ from flask import request from middleware.security import api_required from middleware.data_source_queries import ( - data_source_by_id_query, - get_data_sources_for_map, - get_approved_data_sources, needs_identification_data_sources, + get_approved_data_sources_wrapper, + data_source_by_id_wrapper, + get_data_sources_for_map_wrapper, ) from datetime import datetime @@ -32,17 +32,7 @@ def get(self, data_source_id: str) -> Tuple[Dict[str, Any], int]: Returns: - Tuple containing the response message with data source details if found, and the HTTP status code. """ - data_source_details = data_source_by_id_query( - conn=self.psycopg2_connection, data_source_id=data_source_id - ) - if data_source_details: - return { - "message": "Successfully found data source", - "data": data_source_details, - } - - else: - return {"message": "Data source not found."}, 404 + return data_source_by_id_wrapper(data_source_id, self.psycopg2_connection) @handle_exceptions @api_required @@ -105,14 +95,7 @@ def get(self) -> Dict[str, Any]: Returns: - A dictionary containing the count of data sources and their details. """ - data_source_matches = get_approved_data_sources(self.psycopg2_connection) - - data_sources = { - "count": len(data_source_matches), - "data": data_source_matches, - } - - return data_sources + return get_approved_data_sources_wrapper(self.psycopg2_connection) @handle_exceptions @api_required @@ -190,11 +173,4 @@ def get(self) -> Dict[str, Any]: Returns: - A dictionary containing the count of data sources and their details. """ - data_source_matches = get_data_sources_for_map(self.psycopg2_connection) - - data_sources = { - "count": len(data_source_matches), - "data": data_source_matches, - } - - return data_sources + return get_data_sources_for_map_wrapper(self.psycopg2_connection) diff --git a/resources/QuickSearch.py b/resources/QuickSearch.py index ee85f0ac..a4749775 100644 --- a/resources/QuickSearch.py +++ b/resources/QuickSearch.py @@ -1,5 +1,5 @@ from middleware.security import api_required -from middleware.quick_search_query import quick_search_query +from middleware.quick_search_query import quick_search_query, quick_search_query_wrapper import requests import json import os @@ -32,51 +32,4 @@ def get(self, search: str, location: str) -> Dict[str, Any]: Returns: - A dictionary containing a message about the search results and the data found, if any. """ - try: - data = request.get_json() - test = data.get("test_flag") - except: - test = False - - try: - data_sources = quick_search_query( - search, location, [], self.psycopg2_connection, test - ) - - if data_sources["count"] == 0: - self.psycopg2_connection = initialize_psycopg2_connection() - data_sources = quick_search_query( - search, location, [], self.psycopg2_connection - ) - - if data_sources["count"] == 0: - return { - "count": 0, - "message": "No results found. Please considering requesting a new data source.", - }, 404 - - return { - "message": "Results for search successfully retrieved", - "data": data_sources, - } - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - webhook_url = os.getenv("WEBHOOK_URL") - user_message = "There was an error during the search operation" - message = { - "content": user_message - + ": " - + str(e) - + "\n" - + f"Search term: {search}\n" - + f"Location: {location}" - } - requests.post( - webhook_url, - data=json.dumps(message), - headers={"Content-Type": "application/json"}, - ) - - return {"count": 0, "message": user_message}, 500 + return quick_search_query_wrapper(search, location, self.psycopg2_connection) diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index 5f9630be..e65fac07 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -1,16 +1,17 @@ -from middleware.quick_search_query import quick_search_query +from middleware.quick_search_query import quick_search_query_wrapper from middleware.data_source_queries import ( data_source_by_id_query, get_data_sources_for_map, get_approved_data_sources, + get_approved_data_sources_wrapper, + data_source_by_id_wrapper, + get_data_sources_for_map_wrapper, ) from flask import request import datetime import uuid import os -import requests import sys -import json from typing import Dict, Any from resources.PsycopgResource import PsycopgResource, handle_exceptions @@ -20,6 +21,15 @@ BASE_URL = os.getenv("VITE_VUE_API_BASE_URL") +def insert_access_token(cursor): + token = uuid.uuid4().hex + expiration = datetime.datetime.now() + datetime.timedelta(minutes=5) + cursor.execute( + f"insert into access_tokens (token, expiration_date) values (%s, %s)", + (token, expiration), + ) + + class SearchTokens(PsycopgResource): """ A resource that provides various search functionalities based on the specified endpoint. @@ -44,102 +54,18 @@ def get(self) -> Dict[str, Any]: endpoint = url_params.get("endpoint") arg1 = url_params.get("arg1") arg2 = url_params.get("arg2") - print(endpoint, arg1, arg2) - data_sources = {"count": 0, "data": []} - if type(self.psycopg2_connection) == dict: - return data_sources cursor = self.psycopg2_connection.cursor() - token = uuid.uuid4().hex - expiration = datetime.datetime.now() + datetime.timedelta(minutes=5) - cursor.execute( - f"insert into access_tokens (token, expiration_date) values (%s, %s)", - (token, expiration), - ) + insert_access_token(cursor) self.psycopg2_connection.commit() if endpoint == "quick-search": - try: - data = request.get_json() - test = data.get("test_flag") - except: - test = False - try: - data_sources = quick_search_query( - arg1, arg2, [], self.psycopg2_connection, test - ) - - return data_sources - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - webhook_url = os.getenv("WEBHOOK_URL") - user_message = "There was an error during the search operation" - message = { - "content": user_message - + ": " - + str(e) - + "\n" - + f"Search term: {arg1}\n" - + f"Location: {arg2}" - } - requests.post( - webhook_url, - data=json.dumps(message), - headers={"Content-Type": "application/json"}, - ) - - return {"count": 0, "message": user_message}, 500 - + return quick_search_query_wrapper(arg1, arg2, self.psycopg2_connection) elif endpoint == "data-sources": - try: - data_source_matches = get_approved_data_sources( - self.psycopg2_connection - ) - - data_sources = { - "count": len(data_source_matches), - "data": data_source_matches, - } - - return data_sources - - except Exception as e: - self.psycopg2_connection.rollback() - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 - + return get_approved_data_sources_wrapper(self.psycopg2_connection) elif endpoint == "data-sources-by-id": - try: - data_source_details = data_source_by_id_query( - arg1, [], self.psycopg2_connection - ) - if data_source_details: - return data_source_details - - else: - return {"message": "Data source not found."}, 404 - - except Exception as e: - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 - + return data_source_by_id_wrapper(arg1, self.psycopg2_connection) elif endpoint == "data-sources-map": - try: - data_source_details = get_data_sources_for_map(self.psycopg2_connection) - if data_source_details: - data_sources = { - "count": len(data_source_details), - "data": data_source_details, - } - return data_sources - - else: - return {"message": "There has been an error pulling data!"}, 500 - - except Exception as e: - print(str(e)) - return {"message": "There has been an error pulling data!"}, 500 + get_data_sources_for_map_wrapper(self.psycopg2_connection) else: return {"message": "Unknown endpoint"}, 500 From 41aa4d550ec8c6022cb3acb20d20668c35368e4c Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 22 May 2024 22:11:27 -0400 Subject: [PATCH 057/127] Correct bug in SearchTokens get method --- resources/SearchTokens.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index e65fac07..b81a29b1 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -66,6 +66,6 @@ def get(self) -> Dict[str, Any]: elif endpoint == "data-sources-by-id": return data_source_by_id_wrapper(arg1, self.psycopg2_connection) elif endpoint == "data-sources-map": - get_data_sources_for_map_wrapper(self.psycopg2_connection) + return get_data_sources_for_map_wrapper(self.psycopg2_connection) else: return {"message": "Unknown endpoint"}, 500 From 9c57626b58282c4078d7d54ac500fd95b5d02cca Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 12:00:56 -0400 Subject: [PATCH 058/127] Refactor application startup in app.py Changed app creation method to allow app context for shell usage. The app creation call is now outside the '__main__' condition, making it available at launch. This will help in debugging and interactive command-line usage. --- app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app.py b/app.py index f3a8f341..06964adb 100644 --- a/app.py +++ b/app.py @@ -55,7 +55,7 @@ def create_app() -> Flask: return app +app = create_app() if __name__ == "__main__": - app = create_app() app.run(debug=True, host="0.0.0.0") From 3ac41a4ab9fcb3ef39404a64367979f262ebf657 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 12:01:19 -0400 Subject: [PATCH 059/127] Remove GET and POST methods from PsycopgResource The base implementations of GET and POST methods have been removed from the PsycopgResource module. This implies that these methods should now be strictly defined in the individual subclasses and it avoids raising unnecessary NotImplementedError exceptions. --- resources/PsycopgResource.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/resources/PsycopgResource.py b/resources/PsycopgResource.py index 655186a0..c58feb61 100644 --- a/resources/PsycopgResource.py +++ b/resources/PsycopgResource.py @@ -49,14 +49,3 @@ def __init__(self, **kwargs): """ self.psycopg2_connection = kwargs["psycopg2_connection"] - def get(self): - """ - Base implementation of GET. Override in subclasses as needed. - """ - raise NotImplementedError("This method should be overridden by subclasses") - - def post(self): - """ - Base implementation of POST. Override in subclasses as needed. - """ - raise NotImplementedError("This method should be overridden by subclasses") From ee5435bf4d7bf4836693ac817fd2ad98526c7f11 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 12:01:51 -0400 Subject: [PATCH 060/127] Change connection type in quick_search_query_wrapper function Connection type for the quick_search_query_wrapper function in the middleware quick_search_query module has been changed from psycopg2.extensions.connection to PgConnection, a placeholder for the longer full name. --- middleware/quick_search_query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/middleware/quick_search_query.py b/middleware/quick_search_query.py index 906cc5be..1f78fbec 100644 --- a/middleware/quick_search_query.py +++ b/middleware/quick_search_query.py @@ -181,7 +181,7 @@ def quick_search_query( return data_sources -def quick_search_query_wrapper(arg1, arg2, conn: psycopg2.extensions.connection): +def quick_search_query_wrapper(arg1, arg2, conn: PgConnection): try: data_sources = quick_search_query( arg1, arg2, conn=conn From 7abef15f9ab36724363a863b1a0207a1f8aeac62 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 12:02:08 -0400 Subject: [PATCH 061/127] Add access token insertion logic A new file called access_token_logic.py was added to the middleware. This file contains a function for generating and inserting a new access token with an expiration time into the access_tokens database. --- middleware/access_token_logic.py | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 middleware/access_token_logic.py diff --git a/middleware/access_token_logic.py b/middleware/access_token_logic.py new file mode 100644 index 00000000..ceef6808 --- /dev/null +++ b/middleware/access_token_logic.py @@ -0,0 +1,11 @@ +import datetime +import uuid + + +def insert_access_token(cursor): + token = uuid.uuid4().hex + expiration = datetime.datetime.now() + datetime.timedelta(minutes=5) + cursor.execute( + f"insert into access_tokens (token, expiration_date) values (%s, %s)", + (token, expiration), + ) From 793ef4d259b2925b428c5bd222f2cebbf5bafee5 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 12:02:20 -0400 Subject: [PATCH 062/127] Refactor access token insertion logic The logic for generating and inserting an access token has been moved from `SearchTokens.py` in the resources directory to `access_token_logic.py` in the middleware directory. This enhances modularity and organization of the code base. --- resources/SearchTokens.py | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index b81a29b1..76cf9107 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -1,15 +1,11 @@ +from middleware.access_token_logic import insert_access_token from middleware.quick_search_query import quick_search_query_wrapper from middleware.data_source_queries import ( - data_source_by_id_query, - get_data_sources_for_map, - get_approved_data_sources, get_approved_data_sources_wrapper, data_source_by_id_wrapper, get_data_sources_for_map_wrapper, ) from flask import request -import datetime -import uuid import os import sys from typing import Dict, Any @@ -21,15 +17,6 @@ BASE_URL = os.getenv("VITE_VUE_API_BASE_URL") -def insert_access_token(cursor): - token = uuid.uuid4().hex - expiration = datetime.datetime.now() + datetime.timedelta(minutes=5) - cursor.execute( - f"insert into access_tokens (token, expiration_date) values (%s, %s)", - (token, expiration), - ) - - class SearchTokens(PsycopgResource): """ A resource that provides various search functionalities based on the specified endpoint. From e4797de13330fed08aec2fb528612f18f74489b1 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 12:02:42 -0400 Subject: [PATCH 063/127] Add unit tests for endpoint behavior A new test file called 'test_endpoints.py' has been added. This contains tests that confirm the correct response status for GET, PUT, POST, and DELETE requests to the /search-tokens endpoint. Unit tests help to ensure that our endpoints behave as expected. --- tests/test_endpoints.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 tests/test_endpoints.py diff --git a/tests/test_endpoints.py b/tests/test_endpoints.py new file mode 100644 index 00000000..548f8f05 --- /dev/null +++ b/tests/test_endpoints.py @@ -0,0 +1,26 @@ +import pytest +from unittest.mock import patch +from app import app, SearchTokens + +@pytest.fixture +def client(): + with app.test_client() as client: + yield client + +def test_search_tokens_get_called(client): + with patch.object(SearchTokens, 'get', return_value="Mocked response") as mock_get: + response = client.get('/search-tokens') + assert response.status_code == 200 + mock_get.assert_called_once() + +def test_search_tokens_put_not_allowed(client): + response = client.put('/search-tokens') + assert response.status_code == 405 # 405 Method Not Allowed + +def test_search_tokens_post_not_allowed(client): + response = client.post('/search-tokens') + assert response.status_code == 405 # 405 Method Not Allowed + +def test_search_tokens_delete_not_allowed(client): + response = client.delete('/search-tokens') + assert response.status_code == 405 # 405 Method Not Allowed \ No newline at end of file From d336701aa03af45c564307ef7567d14839b101f6 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 12:03:24 -0400 Subject: [PATCH 064/127] Add unit tests for SearchTokens resource Added a new file, 'test_search_tokens.py', containing unit tests for the SearchTokens resource. These tests confirm that each endpoint calls the correct wrapper function with the appropriate parameters. This helps maintain assurance that our resources behave as intended. --- tests/resources/test_search_tokens.py | 145 ++++++++++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 tests/resources/test_search_tokens.py diff --git a/tests/resources/test_search_tokens.py b/tests/resources/test_search_tokens.py new file mode 100644 index 00000000..e4f5d616 --- /dev/null +++ b/tests/resources/test_search_tokens.py @@ -0,0 +1,145 @@ +import unittest.mock +from collections import namedtuple + +import pytest +from flask import Flask + +from resources.SearchTokens import SearchTokens + +class MockPsycopgConnection: + def cursor(self): + return MockCursor() + + def commit(self): + pass + + def rollback(self): + pass + + +class MockCursor: + def execute(self, query, params=None): + pass + + def fetchall(self): + pass + + +@pytest.fixture +def app(): + app = Flask(__name__) + app.config.update({"TESTING": True}) + return app + + +@pytest.fixture +def client(app): + return app.test_client() + + +@pytest.fixture +def mock_psycopg_connection(): + return MockPsycopgConnection() + + +@pytest.fixture +def search_tokens(mock_psycopg_connection): + return SearchTokens(psycopg2_connection=mock_psycopg_connection) + + +@pytest.fixture +def mock_dependencies(mocker): + mocks = { + "insert_access_token": mocker.patch( + "resources.SearchTokens.insert_access_token", return_value=None + ), + "quick-search": mocker.patch( + "resources.SearchTokens.quick_search_query_wrapper", + return_value={"result": "quick_search"}, + ), + "data-sources": mocker.patch( + "resources.SearchTokens.get_approved_data_sources_wrapper", + return_value={"result": "data_sources"}, + ), + "data-sources-by-id": mocker.patch( + "resources.SearchTokens.data_source_by_id_wrapper", + return_value={"result": "data_source_by_id"}, + ), + "data-sources-map": mocker.patch( + "resources.SearchTokens.get_data_sources_for_map_wrapper", + return_value={"result": "data_sources_map"}, + ), + } + return mocks + + +def perform_test_search_tokens_endpoint( + search_tokens, + mocker, + app, + endpoint, + expected_response, + params=None, + mocked_dependencies: dict[str, unittest.mock.MagicMock] = None, +): + mock_insert_access_token = mocker.patch( + "resources.SearchTokens.insert_access_token" + ) + url = generate_url(endpoint, params) + + with app.test_request_context(url): + response = search_tokens.get() + assert ( + response == expected_response + ), f"{endpoint} endpoint should call {expected_response}, got {response}" + mock_insert_access_token.assert_called_once() + if endpoint in mocked_dependencies: + # Check parameters properly called + mock_dependency = mocked_dependencies[endpoint] + call_args = tuple(params.values()) if params else () + mock_dependency.assert_called_with( + *call_args, search_tokens.psycopg2_connection + ), f"{mock_dependency._mock_name or 'mock'} was not called with the expected parameters" + + +def generate_url(endpoint, params): + url = f"/?endpoint={endpoint}" + if params: + url += "".join([f"&{key}={value}" for key, value in params.items()]) + return url + + +TestCase = namedtuple("TestCase", ["endpoint", "expected_response", "params"]) + +test_cases = [ + TestCase( + "quick-search", {"result": "quick_search"}, {"arg1": "test1", "arg2": "test2"} + ), + TestCase("data-sources", {"result": "data_sources"}, None), + TestCase("data-sources-by-id", {"result": "data_source_by_id"}, {"arg1": "1"}), + TestCase("data-sources-map", {"result": "data_sources_map"}, None), + TestCase("unknown", ({"message": "Unknown endpoint"}, 500), None), +] + + +@pytest.mark.parametrize("test_case", test_cases) +def test_endpoints(search_tokens, mocker, app, test_case, mock_dependencies): + """ + Perform test for endpoints, ensuring each provided endpoint calls + the appropriate wrapper function with the appropriate arguments + + :param search_tokens: The search tokens to be used for the test. + :param mocker: The mocker object. + :param app: The application object. + :param test_case: The test case object. + :return: None + """ + perform_test_search_tokens_endpoint( + search_tokens, + mocker, + app, + test_case.endpoint, + test_case.expected_response, + test_case.params, + mock_dependencies, + ) From f1d1363cccd392c7c8efdea61d77ad47d26a6a3b Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 15:41:18 -0400 Subject: [PATCH 065/127] Refactor return statements in data_source_queries.py Replaced return statements in middleware/data_source_queries.py with Flask's make_response function. This provides a more consistent way of creating response objects and handles application/json content type automatically. It is a recommended way to construct HTTP responses in Flask. --- middleware/data_source_queries.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index 07939359..0cb6fbba 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -1,5 +1,6 @@ from typing import List, Dict, Any, Optional, Tuple, Union +from flask import make_response from sqlalchemy.dialects.postgresql import psycopg2 from utilities.common import convert_dates_to_strings, format_arrays @@ -94,27 +95,27 @@ def get_approved_data_sources_wrapper(conn: PgConnection): data_source_matches = get_approved_data_sources(conn) - return { + return make_response({ "count": len(data_source_matches), "data": data_source_matches, - }, 200 + }, 200) def data_source_by_id_wrapper(arg, conn: PgConnection): data_source_details = data_source_by_id_query(arg, conn=conn) if data_source_details: - return data_source_details, 200 + return make_response(data_source_details, 200) else: - return {"message": "Data source not found."}, 404 + return make_response({"message": "Data source not found."}, 404) def get_data_sources_for_map_wrapper(conn: PgConnection): data_source_details = get_data_sources_for_map(conn) - return { + return make_response({ "count": len(data_source_details), "data": data_source_details, - }, 200 + }, 200) def data_source_by_id_results( conn: PgConnection, data_source_id: str From e6cd7a1ac58a7cb24a3435f73f5c7f5cfc4220bd Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 16:49:17 -0400 Subject: [PATCH 066/127] Update error responses in PsycopgResource.py Adjusted error responses in PsycopgResource.py to use Flask's make_response function. This alteration ensures a more standardized creation of response objects, while automatically handling the application/json content type, in line with best Flask practices. --- resources/PsycopgResource.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/resources/PsycopgResource.py b/resources/PsycopgResource.py index c58feb61..d617c773 100644 --- a/resources/PsycopgResource.py +++ b/resources/PsycopgResource.py @@ -1,6 +1,7 @@ import functools from typing import Callable, Any, Union, Tuple, Dict +from flask import make_response from flask_restful import Resource @@ -36,7 +37,7 @@ def wrapper( except Exception as e: self.psycopg2_connection.rollback() print(str(e)) - return {"message": str(e)}, 500 + return make_response({"message": str(e)}, 500) return wrapper From 06a029747f239b1d80dd8887c543d511864a44ec Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 16:49:33 -0400 Subject: [PATCH 067/127] Update to use Flask's make_response in quick_search_query.py The responses in the quick_search_query.py have been updated to the Flask's make_response function, instead of directly returning a tuple. This change ensures standardized response object creation and better handling of application/json content type. --- middleware/quick_search_query.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/middleware/quick_search_query.py b/middleware/quick_search_query.py index 1f78fbec..9b79a9b3 100644 --- a/middleware/quick_search_query.py +++ b/middleware/quick_search_query.py @@ -2,6 +2,7 @@ import json import datetime +from flask import make_response from sqlalchemy.dialects.postgresql import psycopg2 from middleware.webhook_logic import post_to_webhook @@ -187,7 +188,7 @@ def quick_search_query_wrapper(arg1, arg2, conn: PgConnection): arg1, arg2, conn=conn ) - return data_sources, 200 + return make_response(data_sources, 200) except Exception as e: conn.rollback() From 6a697ef6f8b2e3f79083e03a28b0d4fe073ddfd2 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 16:50:00 -0400 Subject: [PATCH 068/127] Added custom exception for unknown endpoint in SearchTokens Introduced a new 'UnknownEndpointError' to handle the case of unrecognized endpoints in the 'perform_endpoint_logic' method within the 'SearchTokens' class. This provides a more structured approach to error handling in the application, making it easier to identify and diagnose problems related to unknown endpoints. --- resources/SearchTokens.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index 76cf9107..2ea6aff2 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -5,7 +5,7 @@ data_source_by_id_wrapper, get_data_sources_for_map_wrapper, ) -from flask import request +from flask import request, make_response import os import sys from typing import Dict, Any @@ -16,6 +16,10 @@ BASE_URL = os.getenv("VITE_VUE_API_BASE_URL") +class UnknownEndpointError(Exception): + def __init__(self, endpoint): + self.message = f"Unknown endpoint: {endpoint}" + super().__init__(self.message) class SearchTokens(PsycopgResource): """ @@ -46,13 +50,15 @@ def get(self) -> Dict[str, Any]: insert_access_token(cursor) self.psycopg2_connection.commit() + return self.perform_endpoint_logic(arg1, arg2, endpoint) + + def perform_endpoint_logic(self, arg1, arg2, endpoint): if endpoint == "quick-search": return quick_search_query_wrapper(arg1, arg2, self.psycopg2_connection) - elif endpoint == "data-sources": + if endpoint == "data-sources": return get_approved_data_sources_wrapper(self.psycopg2_connection) - elif endpoint == "data-sources-by-id": + if endpoint == "data-sources-by-id": return data_source_by_id_wrapper(arg1, self.psycopg2_connection) - elif endpoint == "data-sources-map": + if endpoint == "data-sources-map": return get_data_sources_for_map_wrapper(self.psycopg2_connection) - else: - return {"message": "Unknown endpoint"}, 500 + raise UnknownEndpointError(endpoint) From 4aa78d47c43c83349a30c760a1a354d36ffc22c8 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 16:50:20 -0400 Subject: [PATCH 069/127] Added tests for search tokens service error handling Implemented separate dedicated test cases to validate error responses from the SearchTokens service. These tests simulate scenarios such as unknown endpoints and exceptions during execution, ensuring the service can gracefully handle errors and return appropriate responses. --- tests/resources/test_search_tokens.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/tests/resources/test_search_tokens.py b/tests/resources/test_search_tokens.py index e4f5d616..22dc5582 100644 --- a/tests/resources/test_search_tokens.py +++ b/tests/resources/test_search_tokens.py @@ -6,6 +6,7 @@ from resources.SearchTokens import SearchTokens + class MockPsycopgConnection: def cursor(self): return MockCursor() @@ -118,7 +119,6 @@ def generate_url(endpoint, params): TestCase("data-sources", {"result": "data_sources"}, None), TestCase("data-sources-by-id", {"result": "data_source_by_id"}, {"arg1": "1"}), TestCase("data-sources-map", {"result": "data_sources_map"}, None), - TestCase("unknown", ({"message": "Unknown endpoint"}, 500), None), ] @@ -143,3 +143,22 @@ def test_endpoints(search_tokens, mocker, app, test_case, mock_dependencies): test_case.params, mock_dependencies, ) + +def test_search_tokens_unknown_endpoint(app, mocker, search_tokens): + url = generate_url("test_endpoint", {"test_param": "test_value"}) + with app.test_request_context(url): + response = search_tokens.get() + assert response.status_code == 500 + assert response.json == {'message': 'Unknown endpoint: test_endpoint'} + +def test_search_tokens_get_exception(app, mocker, search_tokens): + mocker.patch( + "resources.SearchTokens.insert_access_token", + side_effect=Exception("Test exception"), + ) + + url = generate_url("test_endpoint", {"test_param": "test_value"}) + with app.test_request_context(url): + response = search_tokens.get() + assert response.status_code == 500 + assert response.json == {"message": "Test exception"} From 67dd1a56963ae3abbde25d1c5bc572910675e4ad Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 16:52:33 -0400 Subject: [PATCH 070/127] Reformat with Black --- app.py | 1 + middleware/data_source_queries.py | 23 +++++++++++++++-------- resources/PsycopgResource.py | 1 - tests/resources/test_search_tokens.py | 4 +++- tests/test_endpoints.py | 17 +++++++++++------ 5 files changed, 30 insertions(+), 16 deletions(-) diff --git a/app.py b/app.py index 06964adb..ac240ddd 100644 --- a/app.py +++ b/app.py @@ -55,6 +55,7 @@ def create_app() -> Flask: return app + app = create_app() if __name__ == "__main__": diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index 0cb6fbba..7d4df08d 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -95,10 +95,13 @@ def get_approved_data_sources_wrapper(conn: PgConnection): data_source_matches = get_approved_data_sources(conn) - return make_response({ - "count": len(data_source_matches), - "data": data_source_matches, - }, 200) + return make_response( + { + "count": len(data_source_matches), + "data": data_source_matches, + }, + 200, + ) def data_source_by_id_wrapper(arg, conn: PgConnection): @@ -112,10 +115,14 @@ def data_source_by_id_wrapper(arg, conn: PgConnection): def get_data_sources_for_map_wrapper(conn: PgConnection): data_source_details = get_data_sources_for_map(conn) - return make_response({ - "count": len(data_source_details), - "data": data_source_details, - }, 200) + return make_response( + { + "count": len(data_source_details), + "data": data_source_details, + }, + 200, + ) + def data_source_by_id_results( conn: PgConnection, data_source_id: str diff --git a/resources/PsycopgResource.py b/resources/PsycopgResource.py index d617c773..0282df15 100644 --- a/resources/PsycopgResource.py +++ b/resources/PsycopgResource.py @@ -49,4 +49,3 @@ def __init__(self, **kwargs): - kwargs (dict): Keyword arguments containing 'psycopg2_connection' for database connection. """ self.psycopg2_connection = kwargs["psycopg2_connection"] - diff --git a/tests/resources/test_search_tokens.py b/tests/resources/test_search_tokens.py index 22dc5582..cc5eb4bb 100644 --- a/tests/resources/test_search_tokens.py +++ b/tests/resources/test_search_tokens.py @@ -144,12 +144,14 @@ def test_endpoints(search_tokens, mocker, app, test_case, mock_dependencies): mock_dependencies, ) + def test_search_tokens_unknown_endpoint(app, mocker, search_tokens): url = generate_url("test_endpoint", {"test_param": "test_value"}) with app.test_request_context(url): response = search_tokens.get() assert response.status_code == 500 - assert response.json == {'message': 'Unknown endpoint: test_endpoint'} + assert response.json == {"message": "Unknown endpoint: test_endpoint"} + def test_search_tokens_get_exception(app, mocker, search_tokens): mocker.patch( diff --git a/tests/test_endpoints.py b/tests/test_endpoints.py index 548f8f05..7e82f9c4 100644 --- a/tests/test_endpoints.py +++ b/tests/test_endpoints.py @@ -2,25 +2,30 @@ from unittest.mock import patch from app import app, SearchTokens + @pytest.fixture def client(): with app.test_client() as client: yield client + def test_search_tokens_get_called(client): - with patch.object(SearchTokens, 'get', return_value="Mocked response") as mock_get: - response = client.get('/search-tokens') + with patch.object(SearchTokens, "get", return_value="Mocked response") as mock_get: + response = client.get("/search-tokens") assert response.status_code == 200 mock_get.assert_called_once() + def test_search_tokens_put_not_allowed(client): - response = client.put('/search-tokens') + response = client.put("/search-tokens") assert response.status_code == 405 # 405 Method Not Allowed + def test_search_tokens_post_not_allowed(client): - response = client.post('/search-tokens') + response = client.post("/search-tokens") assert response.status_code == 405 # 405 Method Not Allowed + def test_search_tokens_delete_not_allowed(client): - response = client.delete('/search-tokens') - assert response.status_code == 405 # 405 Method Not Allowed \ No newline at end of file + response = client.delete("/search-tokens") + assert response.status_code == 405 # 405 Method Not Allowed From b104b2ed5dd945fd1683fef05b6e1dbe711d1dbf Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 16:55:51 -0400 Subject: [PATCH 071/127] Reformat with Black --- resources/SearchTokens.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index 2ea6aff2..43326ef5 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -16,11 +16,13 @@ BASE_URL = os.getenv("VITE_VUE_API_BASE_URL") + class UnknownEndpointError(Exception): def __init__(self, endpoint): self.message = f"Unknown endpoint: {endpoint}" super().__init__(self.message) + class SearchTokens(PsycopgResource): """ A resource that provides various search functionalities based on the specified endpoint. From 03a6db5011b8bfa6d23ee8abe1da9070d5c1dcd2 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 16:56:31 -0400 Subject: [PATCH 072/127] Reformat with Black --- middleware/quick_search_query.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/middleware/quick_search_query.py b/middleware/quick_search_query.py index 9b79a9b3..6b86d3df 100644 --- a/middleware/quick_search_query.py +++ b/middleware/quick_search_query.py @@ -184,9 +184,7 @@ def quick_search_query( def quick_search_query_wrapper(arg1, arg2, conn: PgConnection): try: - data_sources = quick_search_query( - arg1, arg2, conn=conn - ) + data_sources = quick_search_query(arg1, arg2, conn=conn) return make_response(data_sources, 200) @@ -195,11 +193,11 @@ def quick_search_query_wrapper(arg1, arg2, conn: PgConnection): user_message = "There was an error during the search operation" message = { "content": user_message - + ": " - + str(e) - + "\n" - + f"Search term: {arg1}\n" - + f"Location: {arg2}" + + ": " + + str(e) + + "\n" + + f"Search term: {arg1}\n" + + f"Location: {arg2}" } post_to_webhook(json.dumps(message)) From 58431f39e12179216d590dc5a5546caaeb254d7a Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 20:43:06 -0400 Subject: [PATCH 073/127] Refactor error handling to use custom exceptions Several functions that previously returned errors as dictionary keys have been refactored to instead raise custom exceptions. A new file 'custom_exceptions.py' has been created to define these exceptions, which are UserNotFoundError and TokenNotFoundError. This change enhances readability, and makes error handling more effective and explicit in the codebase. --- middleware/custom_exceptions.py | 15 +++++++++++ middleware/login_queries.py | 42 +++++++++++-------------------- middleware/reset_token_queries.py | 19 +++++++------- middleware/security.py | 7 ++---- middleware/user_queries.py | 11 ++++---- 5 files changed, 48 insertions(+), 46 deletions(-) create mode 100644 middleware/custom_exceptions.py diff --git a/middleware/custom_exceptions.py b/middleware/custom_exceptions.py new file mode 100644 index 00000000..ec9f86a0 --- /dev/null +++ b/middleware/custom_exceptions.py @@ -0,0 +1,15 @@ +class UserNotFoundError(Exception): + """Exception raised for errors in the input.""" + + def __init__(self, email, message=""): + if message == "": + message = f"User with email {email} not found" + self.email = email + self.message = message.format(email=self.email) + super().__init__(self.message) + + +class TokenNotFoundError(Exception): + """Raised when the token is not found in the database.""" + + pass diff --git a/middleware/login_queries.py b/middleware/login_queries.py index 31f35660..dd77becd 100644 --- a/middleware/login_queries.py +++ b/middleware/login_queries.py @@ -4,16 +4,8 @@ from typing import Union, Dict from psycopg2.extensions import cursor as PgCursor +from middleware.custom_exceptions import UserNotFoundError, TokenNotFoundError -class UserNotFoundError(Exception): - """Exception raised for errors in the input.""" - - def __init__(self, email, message=""): - if message == "": - message = f"User with email {email} not found" - self.email = email - self.message = message.format(email=self.email) - super().__init__(self.message) def login_results(cursor: PgCursor, email: str) -> Dict[str, Union[int, str]]: """ @@ -27,15 +19,14 @@ def login_results(cursor: PgCursor, email: str) -> Dict[str, Union[int, str]]: f"select id, password_digest, api_key from users where email = '{email}'" ) results = cursor.fetchall() - if len(results) > 0: - user_data = { - "id": results[0][0], - "password_digest": results[0][1], - "api_key": results[0][2], - } - return user_data - else: - return {"error": "no match"} + if len(results) == 0: + raise UserNotFoundError(email) + return { + "id": results[0][0], + "password_digest": results[0][1], + "api_key": results[0][2], + } + def is_admin(cursor: PgCursor, email: str) -> bool: @@ -53,7 +44,6 @@ def is_admin(cursor: PgCursor, email: str) -> bool: if role == "admin": return True return False - except IndexError: raise UserNotFoundError(email) @@ -91,11 +81,9 @@ def token_results(cursor: PgCursor, token: str) -> Dict[str, Union[int, str]]: """ cursor.execute(f"select id, email from session_tokens where token = '{token}'") results = cursor.fetchall() - if len(results) > 0: - user_data = { - "id": results[0][0], - "email": results[0][1], - } - return user_data - else: - return {"error": "no match"} + if len(results) == 0: + raise TokenNotFoundError("The specified token was not found.") + return { + "id": results[0][0], + "email": results[0][1], + } diff --git a/middleware/reset_token_queries.py b/middleware/reset_token_queries.py index bc0a4762..7e35f695 100644 --- a/middleware/reset_token_queries.py +++ b/middleware/reset_token_queries.py @@ -1,6 +1,8 @@ from psycopg2.extensions import cursor as PgCursor from typing import Dict, Union +from middleware.custom_exceptions import TokenNotFoundError + def check_reset_token(cursor: PgCursor, token: str) -> Dict[str, Union[int, str]]: """ @@ -14,15 +16,14 @@ def check_reset_token(cursor: PgCursor, token: str) -> Dict[str, Union[int, str] f"select id, create_date, email from reset_tokens where token = '{token}'" ) results = cursor.fetchall() - if len(results) > 0: - user_data = { - "id": results[0][0], - "create_date": results[0][1], - "email": results[0][2], - } - return user_data - else: - return {"error": "no match"} + if len(results) == 0: + raise TokenNotFoundError("The specified token was not found.") + return { + "id": results[0][0], + "create_date": results[0][1], + "email": results[0][2], + } + def add_reset_token(cursor: PgCursor, email: str, token: str) -> None: diff --git a/middleware/security.py b/middleware/security.py index 0cebc73b..50bd8b77 100644 --- a/middleware/security.py +++ b/middleware/security.py @@ -1,13 +1,10 @@ import functools -from hmac import compare_digest from flask import request, jsonify from middleware.initialize_psycopg2_connection import initialize_psycopg2_connection from datetime import datetime as dt -from middleware.login_queries import is_admin, UserNotFoundError -import os +from middleware.login_queries import is_admin +from middleware.custom_exceptions import UserNotFoundError from typing import Tuple -from flask.wrappers import Response -from psycopg2.extensions import cursor as PgCursor def is_valid(api_key: str, endpoint: str, method: str) -> Tuple[bool, bool]: diff --git a/middleware/user_queries.py b/middleware/user_queries.py index be050fe4..e5c9f99d 100644 --- a/middleware/user_queries.py +++ b/middleware/user_queries.py @@ -2,6 +2,8 @@ from psycopg2.extensions import cursor as PgCursor from typing import Dict +from middleware.custom_exceptions import UserNotFoundError + def user_check_email(cursor: PgCursor, email: str) -> Dict[str, str]: """ @@ -13,11 +15,10 @@ def user_check_email(cursor: PgCursor, email: str) -> Dict[str, str]: """ cursor.execute(f"select id from users where email = '{email}'") results = cursor.fetchall() - if len(results) > 0: - user_data = {"id": results[0][0]} - return user_data - else: - return {"error": "no match"} + if len(results) == 0: + raise UserNotFoundError(email) + return {"id": results[0][0]} + def user_post_results(cursor: PgCursor, email: str, password: str) -> None: From 0f3cb7ae4026ca0b15c1182cf03909b5a485cbe0 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 20:43:17 -0400 Subject: [PATCH 074/127] Add tests for custom exceptions in authentication process Added new test cases to validate and ensure the proper functioning of the recently implemented custom exceptions in the middleware. These tests ensure that UserNotFoundError and TokenNotFoundError are correctly raised during the authentication process when the user or token does not exist, respectively. --- tests/middleware/test_login_queries.py | 14 +++++++++++++- tests/middleware/test_reset_token_queries.py | 9 +++++++++ tests/middleware/test_user_queries.py | 6 ++++++ 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/tests/middleware/test_login_queries.py b/tests/middleware/test_login_queries.py index d56b3f76..9b86d930 100644 --- a/tests/middleware/test_login_queries.py +++ b/tests/middleware/test_login_queries.py @@ -9,8 +9,8 @@ create_session_token, token_results, is_admin, - UserNotFoundError, ) +from middleware.custom_exceptions import UserNotFoundError, TokenNotFoundError from tests.middleware.helper_functions import create_test_user from tests.middleware.fixtures import dev_db_connection, db_cursor @@ -30,6 +30,12 @@ def test_login_query(db_cursor: psycopg2.extensions.cursor) -> None: assert user_data["password_digest"] == test_user.password_hash +def test_login_results_user_not_found(db_cursor: psycopg2.extensions.cursor) -> None: + """UserNotFoundError should be raised if the user does not exist in the database""" + with pytest.raises(UserNotFoundError): + login_results(cursor=db_cursor, email="nonexistent@example.com") + + def test_create_session_token_results(db_cursor: psycopg2.extensions.cursor) -> None: """ Tests the `create_session_token_results` method properly @@ -70,3 +76,9 @@ def test_is_admin_raises_user_not_logged_in_error(db_cursor): """ with pytest.raises(UserNotFoundError): is_admin(cursor=db_cursor, email=str(uuid.uuid4())) + + +def test_token_results_raises_token_not_found_error(db_cursor): + """token_results() should raise TokenNotFoundError for nonexistent token""" + with pytest.raises(TokenNotFoundError): + token_results(cursor=db_cursor, token=str(uuid.uuid4())) diff --git a/tests/middleware/test_reset_token_queries.py b/tests/middleware/test_reset_token_queries.py index 9c7d11f0..bf246d0c 100644 --- a/tests/middleware/test_reset_token_queries.py +++ b/tests/middleware/test_reset_token_queries.py @@ -1,7 +1,9 @@ import uuid import psycopg2.extensions +import pytest +from middleware.custom_exceptions import TokenNotFoundError from middleware.reset_token_queries import ( check_reset_token, add_reset_token, @@ -28,6 +30,13 @@ def test_check_reset_token(db_cursor: psycopg2.extensions.cursor) -> None: assert test_token_insert.id == user_data["id"] +def test_check_reset_token_raises_token_not_found_error( + db_cursor: psycopg2.extensions, +) -> None: + with pytest.raises(TokenNotFoundError): + check_reset_token(db_cursor, token=str(uuid.uuid4())) + + def test_add_reset_token(db_cursor: psycopg2.extensions.cursor) -> None: """ Checks if add_reset_token properly inserts a token diff --git a/tests/middleware/test_user_queries.py b/tests/middleware/test_user_queries.py index 3fb67cf3..a1076e59 100644 --- a/tests/middleware/test_user_queries.py +++ b/tests/middleware/test_user_queries.py @@ -1,5 +1,7 @@ import psycopg2 +import pytest +from middleware.custom_exceptions import UserNotFoundError from middleware.user_queries import user_post_results, user_check_email from tests.middleware.helper_functions import create_test_user from tests.middleware.fixtures import dev_db_connection, db_cursor @@ -31,3 +33,7 @@ def test_user_check_email(db_cursor: psycopg2.extensions.cursor) -> None: user = create_test_user(db_cursor) user_data = user_check_email(db_cursor, user.email) assert user_data["id"] == user.id + +def test_user_check_email_raises_user_not_found_error(db_cursor: psycopg2.extensions) -> None: + with pytest.raises(UserNotFoundError): + user_check_email(db_cursor, "nonexistent@example.com") \ No newline at end of file From 11cc622cce911da41325a6e25597c1da9c57e848 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 23 May 2024 20:49:43 -0400 Subject: [PATCH 075/127] Reformat with Black --- middleware/initialize_psycopg2_connection.py | 4 +--- middleware/login_queries.py | 1 - middleware/reset_token_queries.py | 1 - middleware/user_queries.py | 1 - tests/middleware/test_initialize_psycopg2_connection.py | 4 ++-- tests/middleware/test_security.py | 1 - tests/middleware/test_user_queries.py | 7 +++++-- 7 files changed, 8 insertions(+), 11 deletions(-) diff --git a/middleware/initialize_psycopg2_connection.py b/middleware/initialize_psycopg2_connection.py index f94c7e81..7d465091 100644 --- a/middleware/initialize_psycopg2_connection.py +++ b/middleware/initialize_psycopg2_connection.py @@ -14,9 +14,7 @@ def __init__(self, message="Failed to initialize psycopg2 connection."): super().__init__(self.message) -def initialize_psycopg2_connection() -> ( - PgConnection -): +def initialize_psycopg2_connection() -> PgConnection: """ Initializes a connection to a PostgreSQL database using psycopg2 with connection parameters obtained from an environment variable. If the connection fails, it returns a default dictionary diff --git a/middleware/login_queries.py b/middleware/login_queries.py index dd77becd..7f85f106 100644 --- a/middleware/login_queries.py +++ b/middleware/login_queries.py @@ -28,7 +28,6 @@ def login_results(cursor: PgCursor, email: str) -> Dict[str, Union[int, str]]: } - def is_admin(cursor: PgCursor, email: str) -> bool: """ Checks if a user has an admin role. diff --git a/middleware/reset_token_queries.py b/middleware/reset_token_queries.py index 7e35f695..f2908f11 100644 --- a/middleware/reset_token_queries.py +++ b/middleware/reset_token_queries.py @@ -25,7 +25,6 @@ def check_reset_token(cursor: PgCursor, token: str) -> Dict[str, Union[int, str] } - def add_reset_token(cursor: PgCursor, email: str, token: str) -> None: """ Inserts a new reset token into the database for a specified email. diff --git a/middleware/user_queries.py b/middleware/user_queries.py index e5c9f99d..7386bf04 100644 --- a/middleware/user_queries.py +++ b/middleware/user_queries.py @@ -20,7 +20,6 @@ def user_check_email(cursor: PgCursor, email: str) -> Dict[str, str]: return {"id": results[0][0]} - def user_post_results(cursor: PgCursor, email: str, password: str) -> None: """ Creates a new user with the provided email and password. diff --git a/tests/middleware/test_initialize_psycopg2_connection.py b/tests/middleware/test_initialize_psycopg2_connection.py index 53a0fa95..9525235b 100644 --- a/tests/middleware/test_initialize_psycopg2_connection.py +++ b/tests/middleware/test_initialize_psycopg2_connection.py @@ -1,4 +1,3 @@ - def test_initialize_psycopg2_connection_success(): """ Test that function properly initializes psycopg2 connection @@ -8,10 +7,11 @@ def test_initialize_psycopg2_connection_success(): """ pass + def test_initialize_psycopg2_connection_failure(): """ Check that function raises DatabaseInitializationError if psycopg2.OperationalError occurs. :return: """ - pass \ No newline at end of file + pass diff --git a/tests/middleware/test_security.py b/tests/middleware/test_security.py index 4fe9f4c7..7aadd7d6 100644 --- a/tests/middleware/test_security.py +++ b/tests/middleware/test_security.py @@ -1,4 +1,3 @@ - def test_api_required_user_not_found(): """ Test that the api_required decorator properly returns diff --git a/tests/middleware/test_user_queries.py b/tests/middleware/test_user_queries.py index a1076e59..e273e79f 100644 --- a/tests/middleware/test_user_queries.py +++ b/tests/middleware/test_user_queries.py @@ -34,6 +34,9 @@ def test_user_check_email(db_cursor: psycopg2.extensions.cursor) -> None: user_data = user_check_email(db_cursor, user.email) assert user_data["id"] == user.id -def test_user_check_email_raises_user_not_found_error(db_cursor: psycopg2.extensions) -> None: + +def test_user_check_email_raises_user_not_found_error( + db_cursor: psycopg2.extensions, +) -> None: with pytest.raises(UserNotFoundError): - user_check_email(db_cursor, "nonexistent@example.com") \ No newline at end of file + user_check_email(db_cursor, "nonexistent@example.com") From a8b88e0b53609cded885fd27c1d9826290524328 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 25 May 2024 18:30:19 -0400 Subject: [PATCH 076/127] Refactor create_app function in app.py The refactoring involved moving the initialization of the psycopg2 connection out of the create_app function and into its parameter list. This allows the function to be more flexible and reusable, capable of accepting different database connections. It allows for easier testing and potential use with different database management systems in the future. --- app.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/app.py b/app.py index f3a8f341..14b487ee 100644 --- a/app.py +++ b/app.py @@ -25,9 +25,7 @@ def add_resource(api, resource, endpoint, **kwargs): api.add_resource(resource, endpoint, resource_class_kwargs=kwargs) -def create_app() -> Flask: - psycopg2_connection = initialize_psycopg2_connection() - +def create_app(psycopg2_connection) -> Flask: app = Flask(__name__) api = Api(app) CORS(app) @@ -57,5 +55,5 @@ def create_app() -> Flask: if __name__ == "__main__": - app = create_app() + app = create_app(initialize_psycopg2_connection()) app.run(debug=True, host="0.0.0.0") From ddc9d26ec76bb09fd0eee901439307ead4eab747 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 25 May 2024 18:30:44 -0400 Subject: [PATCH 077/127] Remove unused methods from PsycopgResource class. --- resources/PsycopgResource.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/resources/PsycopgResource.py b/resources/PsycopgResource.py index 655186a0..787dc623 100644 --- a/resources/PsycopgResource.py +++ b/resources/PsycopgResource.py @@ -48,15 +48,3 @@ def __init__(self, **kwargs): - kwargs (dict): Keyword arguments containing 'psycopg2_connection' for database connection. """ self.psycopg2_connection = kwargs["psycopg2_connection"] - - def get(self): - """ - Base implementation of GET. Override in subclasses as needed. - """ - raise NotImplementedError("This method should be overridden by subclasses") - - def post(self): - """ - Base implementation of POST. Override in subclasses as needed. - """ - raise NotImplementedError("This method should be overridden by subclasses") From 80b55e384970132e35b6492d8a4ab85a440a9f9f Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 25 May 2024 18:31:00 -0400 Subject: [PATCH 078/127] Add thorough tests for all application endpoints MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit includes the creation of a new file, test_endpoints.py, specifically dedicated to thoroughly testing the functionality of all application endpoints. It utilizes Pytest to ensure that each endpoint correctly calls (or doesn’t call) the appropriate methods in their supporting classes, as per the original design. The test checks both allowed and not allowed methods for each endpoint. --- tests/test_endpoints.py | 113 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 tests/test_endpoints.py diff --git a/tests/test_endpoints.py b/tests/test_endpoints.py new file mode 100644 index 00000000..7af98dfa --- /dev/null +++ b/tests/test_endpoints.py @@ -0,0 +1,113 @@ +""" +This module tests the functionality of all endpoints, ensuring that, as designed, they call (or don't call) +the appropriate methods in their supporting classes +""" + +from collections import namedtuple +from typing import Type, Any, List + +import pytest +from unittest.mock import patch + +from flask.testing import FlaskClient +from flask_restful import Resource + +from app import create_app +from resources.Agencies import Agencies +from resources.ApiKey import ApiKey +from resources.Archives import Archives +from resources.DataSources import ( + DataSources, + DataSourcesMap, + DataSourcesNeedsIdentification, + DataSourceById, +) +from resources.Login import Login +from resources.QuickSearch import QuickSearch +from resources.RefreshSession import RefreshSession +from resources.RequestResetPassword import RequestResetPassword +from resources.ResetPassword import ResetPassword +from resources.ResetTokenValidation import ResetTokenValidation +from resources.SearchTokens import SearchTokens +from resources.User import User + + +# Define constants for HTTP methods +GET = "get" +POST = "post" +PUT = "put" +DELETE = "delete" + + +@pytest.fixture +def client(mocker) -> FlaskClient: + """ + Create a client with a mocked database connection + :param mocker: + :return: + """ + app = create_app(mocker.MagicMock()) + with app.test_client() as client: + yield client + + +def run_endpoint_tests( + client: FlaskClient, endpoint: str, class_type: Resource, allowed_methods: list[str] +): + methods = [GET, POST, PUT, DELETE] + for method in methods: + if method in allowed_methods: + with patch.object( + class_type, method, return_value="Mocked response" + ) as mock_method: + response = getattr(client, method)(endpoint) + assert ( + response.status_code == 200 + ), f"{method.upper()} {endpoint} failed with status code {response.status_code}, expected 200" + mock_method.assert_called_once(), f"{method.upper()} {endpoint} should have called the {method} method on {class_type.__name__}" + else: + response = getattr(client, method)(endpoint) + assert ( + response.status_code == 405 + ), f"{method.upper()} {endpoint} failed with status code {response.status_code}, expected 405" + + +TestParameters = namedtuple("Resource", ["class_type", "endpoint", "allowed_methods"]) +test_parameters = [ + TestParameters(User, "/user", [POST, PUT]), + TestParameters(Login, "/login", [POST]), + TestParameters(RefreshSession, "/refresh-session", [POST]), + TestParameters(ApiKey, "/api_key", [GET]), + TestParameters(RequestResetPassword, "/request-reset-password", [POST]), + TestParameters(ResetPassword, "/reset-password", [POST]), + TestParameters(ResetTokenValidation, "/reset-token-validation", [POST]), + TestParameters(QuickSearch, "/quick-search//", [GET]), + TestParameters(Archives, "/archives", [GET, PUT]), + TestParameters(DataSources, "/data-sources", [GET, POST]), + TestParameters(DataSourcesMap, "/data-sources-map", [GET]), + TestParameters( + DataSourcesNeedsIdentification, "/data-sources-needs-identification", [GET] + ), + TestParameters(DataSourceById, "/data-sources-by-id/", [GET, PUT]), + TestParameters(Agencies, "/agencies/", [GET]), + TestParameters(SearchTokens, "/search-tokens", [GET]), +] + + +@pytest.mark.parametrize("test_parameter", test_parameters) +def test_endpoints(client: FlaskClient, test_parameter) -> None: + """ + Using the test_parameters list, this tests all endpoints to ensure that + only the appropriate methods can be called from the endpoints + :param client: the client fixture + :param class_type: + :param endpoint: + :param allowed_methods: + :return: + """ + run_endpoint_tests( + client, + test_parameter.endpoint, + test_parameter.class_type, + test_parameter.allowed_methods, + ) From 06db561cfd91d1d712648b0bd268a23eb1ddc11f Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 25 May 2024 18:48:06 -0400 Subject: [PATCH 079/127] Refactor test_app_with_mock fixture in app_test.py The test_app_with_mock fixture in app_test.py has been simplified. The mocker from pytest-mock has been utilized to replace the previously used patch and MagicMock. Node assertions for database interaction have been removed from the endpoint test function. --- tests/resources/app_test.py | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/tests/resources/app_test.py b/tests/resources/app_test.py index e40b0471..73e24e95 100644 --- a/tests/resources/app_test.py +++ b/tests/resources/app_test.py @@ -32,20 +32,9 @@ def runner(test_app): @pytest.fixture() -def test_app_with_mock(): +def test_app_with_mock(mocker): # Patch the initialize_psycopg2_connection function so it returns a MagicMock - with patch("app.initialize_psycopg2_connection") as mock_init: - mock_connection = MagicMock() - mock_init.return_value = mock_connection - - app = create_app() - # If your app stores the connection in a global or app context, - # you can also directly assign the mock_connection there - - # Provide access to the mock within the app for assertions in tests - app.mock_connection = mock_connection - - yield app + yield create_app(mocker.MagicMock()) @pytest.fixture() @@ -171,8 +160,6 @@ def test_get_api_key(client_with_mock, mocker, test_app_with_mock): json_data = response.get_json() assert "api_key" in json_data assert response.status_code == 200 - test_app_with_mock.mock_connection.cursor().execute.assert_called_once() - test_app_with_mock.mock_connection.commit.assert_called_once() # endregion From 2b2f3a5ed189d9e5247374ef09d4be0583304fe7 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 25 May 2024 19:28:28 -0400 Subject: [PATCH 080/127] Replace tuple return type with namedtuple in security middleware The function 'is_valid' in the security middleware has been refactored. Instead of returning a Tuple, it now returns an instance of APIKeyStatus, which is a namedtuple. This namedtuple structure improves readability and understanding of the code by labeling the boolean values it returns. --- middleware/security.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/middleware/security.py b/middleware/security.py index 50bd8b77..ab4118b4 100644 --- a/middleware/security.py +++ b/middleware/security.py @@ -1,4 +1,6 @@ import functools +from collections import namedtuple + from flask import request, jsonify from middleware.initialize_psycopg2_connection import initialize_psycopg2_connection from datetime import datetime as dt @@ -6,8 +8,10 @@ from middleware.custom_exceptions import UserNotFoundError from typing import Tuple +APIKeyStatus = namedtuple("APIKeyStatus", ["is_valid", "is_expired"]) + -def is_valid(api_key: str, endpoint: str, method: str) -> Tuple[bool, bool]: +def is_valid(api_key: str, endpoint: str, method: str) -> APIKeyStatus: """ Validates the API key and checks if the user has the required role to access a specific endpoint. @@ -17,7 +21,7 @@ def is_valid(api_key: str, endpoint: str, method: str) -> Tuple[bool, bool]: :return: A tuple (isValid, isExpired) indicating whether the API key is valid and not expired. """ if not api_key: - return False, False + return APIKeyStatus(is_valid=False, is_expired=False) psycopg2_connection = initialize_psycopg2_connection() cursor = psycopg2_connection.cursor() @@ -37,7 +41,7 @@ def is_valid(api_key: str, endpoint: str, method: str) -> Tuple[bool, bool]: print(expiration_date, dt.utcnow()) if expiration_date < dt.utcnow(): - return False, True + return APIKeyStatus(False, is_expired=True) if is_admin(cursor, email): role = "admin" @@ -52,16 +56,16 @@ def is_valid(api_key: str, endpoint: str, method: str) -> Tuple[bool, bool]: role = "user" if not results: - return False, False + return APIKeyStatus(is_valid=False, is_expired=False) if endpoint in ("datasources", "datasourcebyid") and method in ("PUT", "POST"): if role != "admin": - return False, False + return APIKeyStatus(is_valid=False, is_expired=False) # Compare the API key in the user table to the API in the request header and proceed # through the protected route if it's valid. Otherwise, compare_digest will return False # and api_required will send an error message to provide a valid API key - return True, False + return APIKeyStatus(is_valid=True, is_expired=False) def api_required(func): @@ -91,13 +95,13 @@ def decorator(*args, **kwargs): }, 400 # Check if API key is correct and valid try: - valid, expired = is_valid(api_key, request.endpoint, request.method) + api_key_status = is_valid(api_key, request.endpoint, request.method) except UserNotFoundError as e: return {"message": str(e)}, 401 - if valid: + if api_key_status.is_valid: return func(*args, **kwargs) else: - if expired: + if api_key_status.is_expired: return {"message": "The provided API key has expired"}, 401 return {"message": "The provided API key is not valid"}, 403 From f53d8f8b7586afde4d72afc8df000042d4919bfd Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 26 May 2024 07:59:28 -0400 Subject: [PATCH 081/127] Add Bandit Security Linting to GitHub Actions A new GitHub Actions workflow 'bandit.yaml' has been added to run Bandit, a Python security linter, on push and pull requests. This will increase the security standards of the codebase by ensuring that potential security vulnerabilities are addressed promptly. The Bandit results will be uploaded as an artifact for further analysis. --- .github/workflows/bandit.yaml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 .github/workflows/bandit.yaml diff --git a/.github/workflows/bandit.yaml b/.github/workflows/bandit.yaml new file mode 100644 index 00000000..79ec902e --- /dev/null +++ b/.github/workflows/bandit.yaml @@ -0,0 +1,32 @@ +name: Bandit Security Linting + +on: [push, pull_request] + +jobs: + bandit: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install bandit + + - name: Run Bandit + run: | + bandit -r middleware resources app.py -f txt -o bandit_output.txt || exit 1 + cat bandit_output.txt + + - name: Upload Bandit results + uses: actions/upload-artifact@v2 + with: + name: bandit-report + path: bandit_output.txt \ No newline at end of file From fae254fc315a4ca04022668962f177c7fc756c26 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 26 May 2024 08:03:43 -0400 Subject: [PATCH 082/127] Update Bandit execution command in workflow Removed the explicit exit command from the Bandit execution command in the GitHub Actions workflow. --- .github/workflows/bandit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bandit.yaml b/.github/workflows/bandit.yaml index 79ec902e..970f5f2e 100644 --- a/.github/workflows/bandit.yaml +++ b/.github/workflows/bandit.yaml @@ -22,7 +22,7 @@ jobs: - name: Run Bandit run: | - bandit -r middleware resources app.py -f txt -o bandit_output.txt || exit 1 + bandit -r middleware resources app.py -f txt -o bandit_output.txt cat bandit_output.txt - name: Upload Bandit results From 006cec2150e3fe9378a246d621fd377c593ead76 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 26 May 2024 08:05:25 -0400 Subject: [PATCH 083/127] Removed cat command from Bandit run in GitHub Actions The command to print out Bandit's output directly to the console in the GitHub Actions workflow was removed. Now, the results are only being uploaded as an artifact for review. --- .github/workflows/bandit.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bandit.yaml b/.github/workflows/bandit.yaml index 970f5f2e..640c1017 100644 --- a/.github/workflows/bandit.yaml +++ b/.github/workflows/bandit.yaml @@ -23,10 +23,10 @@ jobs: - name: Run Bandit run: | bandit -r middleware resources app.py -f txt -o bandit_output.txt - cat bandit_output.txt - name: Upload Bandit results uses: actions/upload-artifact@v2 with: name: bandit-report - path: bandit_output.txt \ No newline at end of file + path: bandit_output.txt + From b931fc1d444f36f7810a967efe50c502985bd2cd Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 26 May 2024 08:06:54 -0400 Subject: [PATCH 084/127] Adjustment --- .github/workflows/bandit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bandit.yaml b/.github/workflows/bandit.yaml index 640c1017..a39d3d12 100644 --- a/.github/workflows/bandit.yaml +++ b/.github/workflows/bandit.yaml @@ -22,7 +22,7 @@ jobs: - name: Run Bandit run: | - bandit -r middleware resources app.py -f txt -o bandit_output.txt + bandit -r middleware -f txt -o bandit_output.txt - name: Upload Bandit results uses: actions/upload-artifact@v2 From 12a9ba955d5c4ac7731ae203470c47763ad5ee90 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 26 May 2024 08:07:45 -0400 Subject: [PATCH 085/127] Adjustment --- .github/workflows/bandit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bandit.yaml b/.github/workflows/bandit.yaml index a39d3d12..8945acb2 100644 --- a/.github/workflows/bandit.yaml +++ b/.github/workflows/bandit.yaml @@ -22,7 +22,7 @@ jobs: - name: Run Bandit run: | - bandit -r middleware -f txt -o bandit_output.txt + bandit -r middleware - name: Upload Bandit results uses: actions/upload-artifact@v2 From e05fbc1cd7d5da76e5499f52ebce5e9ada8b44be Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 26 May 2024 08:08:46 -0400 Subject: [PATCH 086/127] Update Bandit Security Linting trigger event Changed the triggering event of Bandit Security Linting workflow from happening on both push and pull_request events to only on pull_request events. --- .github/workflows/bandit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bandit.yaml b/.github/workflows/bandit.yaml index 8945acb2..d22208f4 100644 --- a/.github/workflows/bandit.yaml +++ b/.github/workflows/bandit.yaml @@ -1,6 +1,6 @@ name: Bandit Security Linting -on: [push, pull_request] +on: [pull_request] jobs: bandit: From 2f3a61d034afd792c1255941c06ba61cec4ee15d Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 26 May 2024 08:09:47 -0400 Subject: [PATCH 087/127] Expand targets for Bandit Security Linting Updated the Bandit Security Linting code to also include 'resources' and 'app.py', expanding the range of files which are scanned for potential security vulnerabilities. Previously, linting was only applied to 'middleware'. --- .github/workflows/bandit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bandit.yaml b/.github/workflows/bandit.yaml index d22208f4..ed64327c 100644 --- a/.github/workflows/bandit.yaml +++ b/.github/workflows/bandit.yaml @@ -22,7 +22,7 @@ jobs: - name: Run Bandit run: | - bandit -r middleware + bandit -r middleware resources app.py - name: Upload Bandit results uses: actions/upload-artifact@v2 From f9e48525dddb5f4912054f61ed64329ef41d6955 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 26 May 2024 19:30:22 -0400 Subject: [PATCH 088/127] Refactor SQL queries for enhanced security Modified SQL queries across multiple sources to use parameterized inputs instead of string formatting. This approach enhances security by preventing potential SQL injection attacks. The updates were applied to queries in files such as `login_queries.py`, `user_queries.py`, `reset_token_queries.py`, and others. Additionally, relevant tests were added to assure correct functioning. --- middleware/archives_queries.py | 15 +++- middleware/data_source_queries.py | 6 +- middleware/login_queries.py | 9 ++- middleware/reset_token_queries.py | 6 +- middleware/user_queries.py | 5 +- tests/middleware/test_archives_queries.py | 96 +++++++++++++++++++++++ 6 files changed, 121 insertions(+), 16 deletions(-) diff --git a/middleware/archives_queries.py b/middleware/archives_queries.py index b53fa3ec..2367be37 100644 --- a/middleware/archives_queries.py +++ b/middleware/archives_queries.py @@ -72,8 +72,15 @@ def archives_put_broken_as_of_results( :param conn: A psycopg2 connection object to a PostgreSQL database. """ cursor = conn.cursor() - sql_query = "UPDATE data_sources SET url_status = 'broken', broken_source_url_as_of = '{0}', last_cached = '{1}' WHERE airtable_uid = '{2}'" - cursor.execute(sql_query.format(broken_as_of, last_cached, id)) + sql_query = """ + UPDATE data_sources + SET + url_status = 'broken', + broken_source_url_as_of = %s, + last_cached = %s + WHERE airtable_uid = %s + """ + cursor.execute(sql_query, (broken_as_of, last_cached, id)) cursor.close() @@ -88,6 +95,6 @@ def archives_put_last_cached_results( :param conn: A psycopg2 connection object to a PostgreSQL database. """ cursor = conn.cursor() - sql_query = "UPDATE data_sources SET last_cached = '{0}' WHERE airtable_uid = '{1}'" - cursor.execute(sql_query.format(last_cached, id)) + sql_query = "UPDATE data_sources SET last_cached = %s WHERE airtable_uid = %s" + cursor.execute(sql_query, (last_cached, id)) cursor.close() diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index 36762125..678aa057 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -123,12 +123,12 @@ def data_source_by_id_results( INNER JOIN agencies ON agency_source_link.agency_described_linked_uid = agencies.airtable_uid WHERE - data_sources.approval_status = 'approved' AND data_sources.airtable_uid = '{1}' + data_sources.approval_status = 'approved' AND data_sources.airtable_uid = %s """.format( - joined_column_names, data_source_id + joined_column_names ) - cursor.execute(sql_query) + cursor.execute(sql_query, (data_source_id,)) result = cursor.fetchone() cursor.close() diff --git a/middleware/login_queries.py b/middleware/login_queries.py index 7f85f106..ae4d4725 100644 --- a/middleware/login_queries.py +++ b/middleware/login_queries.py @@ -16,7 +16,7 @@ def login_results(cursor: PgCursor, email: str) -> Dict[str, Union[int, str]]: :return: A dictionary containing user data or an error message. """ cursor.execute( - f"select id, password_digest, api_key from users where email = '{email}'" + f"select id, password_digest, api_key from users where email = %s", (email,) ) results = cursor.fetchall() if len(results) == 0: @@ -36,7 +36,7 @@ def is_admin(cursor: PgCursor, email: str) -> bool: :param email: User's email. :return: True if user is an admin, False if not, or an error message. """ - cursor.execute(f"select role from users where email = '{email}'") + cursor.execute(f"select role from users where email = %s", (email,)) results = cursor.fetchall() try: role = results[0][0] @@ -64,7 +64,8 @@ def create_session_token(cursor: PgCursor, user_id: int, email: str) -> str: } session_token = jwt.encode(payload, os.getenv("SECRET_KEY"), algorithm="HS256") cursor.execute( - f"insert into session_tokens (token, email, expiration_date) values ('{session_token}', '{email}', '{expiration}')" + f"insert into session_tokens (token, email, expiration_date) values (%s, %s, %s)", + (session_token, email, expiration), ) return session_token @@ -78,7 +79,7 @@ def token_results(cursor: PgCursor, token: str) -> Dict[str, Union[int, str]]: :param token: The session token. :return: A dictionary containing session token data or an error message. """ - cursor.execute(f"select id, email from session_tokens where token = '{token}'") + cursor.execute(f"select id, email from session_tokens where token = %s", (token,)) results = cursor.fetchall() if len(results) == 0: raise TokenNotFoundError("The specified token was not found.") diff --git a/middleware/reset_token_queries.py b/middleware/reset_token_queries.py index f2908f11..573ec5af 100644 --- a/middleware/reset_token_queries.py +++ b/middleware/reset_token_queries.py @@ -13,7 +13,7 @@ def check_reset_token(cursor: PgCursor, token: str) -> Dict[str, Union[int, str] :return: A dictionary containing the user's ID, token creation date, and email if the token exists; otherwise, an error message. """ cursor.execute( - f"select id, create_date, email from reset_tokens where token = '{token}'" + f"select id, create_date, email from reset_tokens where token = %s", (token,) ) results = cursor.fetchall() if len(results) == 0: @@ -34,7 +34,7 @@ def add_reset_token(cursor: PgCursor, email: str, token: str) -> None: :param token: The reset token to add. """ cursor.execute( - f"insert into reset_tokens (email, token) values ('{email}', '{token}')" + f"insert into reset_tokens (email, token) values (%s, %s)", (email, token) ) return @@ -49,7 +49,7 @@ def delete_reset_token(cursor: PgCursor, email: str, token: str) -> None: :param token: The reset token to delete. """ cursor.execute( - f"delete from reset_tokens where email = '{email}' and token = '{token}'" + f"delete from reset_tokens where email = %s and token = %s", (email, token) ) return diff --git a/middleware/user_queries.py b/middleware/user_queries.py index 7386bf04..388d962a 100644 --- a/middleware/user_queries.py +++ b/middleware/user_queries.py @@ -13,7 +13,7 @@ def user_check_email(cursor: PgCursor, email: str) -> Dict[str, str]: :param email: The email address to check against the users in the database. :return: A dictionary with the user's ID if found, otherwise an error message. """ - cursor.execute(f"select id from users where email = '{email}'") + cursor.execute(f"select id from users where email = %s", (email,)) results = cursor.fetchall() if len(results) == 0: raise UserNotFoundError(email) @@ -30,7 +30,8 @@ def user_post_results(cursor: PgCursor, email: str, password: str) -> None: """ password_digest = generate_password_hash(password) cursor.execute( - f"insert into users (email, password_digest) values ('{email}', '{password_digest}')" + f"insert into users (email, password_digest) values (%s, %s)", + (email, password_digest), ) return diff --git a/tests/middleware/test_archives_queries.py b/tests/middleware/test_archives_queries.py index beea63ff..8018624b 100644 --- a/tests/middleware/test_archives_queries.py +++ b/tests/middleware/test_archives_queries.py @@ -1,9 +1,14 @@ +import datetime +import uuid + import psycopg2 from middleware.archives_queries import ( archives_get_results, archives_get_query, ARCHIVES_GET_COLUMNS, + archives_put_broken_as_of_results, + archives_put_last_cached_results, ) from tests.middleware.helper_functions import ( insert_test_agencies_and_sources, @@ -61,3 +66,94 @@ def test_archives_get_columns( for result in results: if result["id"] == "SOURCE_UID_1": return + + +def insert_test_data_source(cursor: psycopg2.extensions.cursor) -> str: + """ + Insert test data source and return id + :param cursor: + :return: randomly generated uuid + """ + test_uid = str(uuid.uuid4()) + cursor.execute( + """ + INSERT INTO + PUBLIC.DATA_SOURCES ( + airtable_uid, + NAME, + DESCRIPTION, + RECORD_TYPE, + SOURCE_URL, + APPROVAL_STATUS, + URL_STATUS + ) + VALUES + (%s,'Example Data Source', 'Example Description', + 'Type A','http://src1.com','approved','available') + """, + (test_uid,), + ) + return test_uid + + +def get_data_sources_archives_info(cursor, test_uid): + cursor.execute( + """ + SELECT URL_STATUS, BROKEN_SOURCE_URL_AS_OF, LAST_CACHED + FROM PUBLIC.DATA_SOURCES + WHERE AIRTABLE_UID = %s + """, + (test_uid,), + ) + row = cursor.fetchone() + return row + + +def test_archives_put_broken_as_of_results( + dev_db_connection: psycopg2.extensions.connection, +) -> None: + cursor = dev_db_connection.cursor() + test_uid = insert_test_data_source(cursor) + + # Check data properly inserted + row = get_data_sources_archives_info(cursor, test_uid) + assert row[0] == "available" + assert row[1] is None + assert row[2] is None + + broken_as_of_date = datetime.datetime.now().strftime("%Y-%m-%d") + last_cached = datetime.datetime.now().strftime("%Y-%m-%d") + + archives_put_broken_as_of_results( + id=test_uid, + broken_as_of=broken_as_of_date, + last_cached=last_cached, + conn=dev_db_connection, + ) + + row = get_data_sources_archives_info(cursor, test_uid) + assert row[0] == "broken" + assert str(row[1]) == broken_as_of_date + assert str(row[2]) == last_cached + + +def test_archives_put_last_cached_results( + dev_db_connection: psycopg2.extensions.connection, +): + cursor = dev_db_connection.cursor() + test_uid = insert_test_data_source(cursor) + + # Check data properly inserted + row = get_data_sources_archives_info(cursor, test_uid) + assert row[0] == "available" + assert row[1] is None + assert row[2] is None + + last_cached = datetime.datetime(year=1999, month=5, day=30).strftime("%Y-%m-%d") + archives_put_last_cached_results( + id=test_uid, last_cached=last_cached, conn=dev_db_connection + ) + row = get_data_sources_archives_info(cursor, test_uid) + assert row[0] == "available" + assert row[1] is None + assert str(row[2]) == last_cached From daf16085eb7d9cc0dce56f9c03cf5c55923398c8 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 27 May 2024 08:52:22 -0400 Subject: [PATCH 089/127] Remove test parameters from query functions The query methods in the middleware and resource files are updated to remove the test parameters, leading to simplified code. The code accessibility is improved by not allowing externally supplied test results to be processed, reducing the possibility of incorrect outputs. All functions now rely only on database queries for data, resulting in a more robust system. --- middleware/archives_queries.py | 8 ++----- middleware/data_source_queries.py | 33 +++++++++++---------------- middleware/quick_search_query.py | 37 ++++++++++--------------------- resources/Archives.py | 2 +- resources/QuickSearch.py | 10 ++------- resources/SearchTokens.py | 11 ++------- 6 files changed, 32 insertions(+), 69 deletions(-) diff --git a/middleware/archives_queries.py b/middleware/archives_queries.py index 2367be37..f350468a 100644 --- a/middleware/archives_queries.py +++ b/middleware/archives_queries.py @@ -37,19 +37,15 @@ def archives_get_results(conn: PgConnection) -> list[tuple[Any, ...]]: def archives_get_query( - test_query_results: Optional[List[Dict[str, Any]]] = None, conn: Optional[PgConnection] = None, ) -> List[Dict[str, Any]]: """ - Processes the archives get results, either from the database or a provided set of test results, and converts dates to strings. + Processes the archives get results, either from the database and converts dates to strings. - :param test_query_results: A list of dictionaries representing test query results, if any. :param conn: A psycopg2 connection object to a PostgreSQL database. :return: A list of dictionaries with the query results after processing and date conversion. """ - results = ( - archives_get_results(conn) if not test_query_results else test_query_results - ) + results = archives_get_results(conn) archives_combined_results = [ dict(zip(ARCHIVES_GET_COLUMNS, result)) for result in results ] diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index 678aa057..ff6dfd9c 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -137,35 +137,28 @@ def data_source_by_id_results( def data_source_by_id_query( data_source_id: str = "", - test_query_results: Optional[List[Dict[str, Any]]] = None, conn: Optional[PgConnection] = None, ) -> Dict[str, Any]: """ - Processes a request to fetch data source details by ID, either from the database or provided test results. + Processes a request to fetch data source details by ID from the database :param data_source_id: The unique identifier for the data source. - :param test_query_results: A list of dictionaries representing test query results, if provided. :param conn: A psycopg2 connection object to a PostgreSQL database. :return: A dictionary with the data source details after processing. """ - if conn: - result = data_source_by_id_results(conn, data_source_id) - else: - result = test_query_results + result = data_source_by_id_results(conn, data_source_id) + if not result: + return [] - if result: - data_source_and_agency_columns = ( - DATA_SOURCES_APPROVED_COLUMNS + AGENCY_APPROVED_COLUMNS - ) - data_source_and_agency_columns.append("data_source_id") - data_source_and_agency_columns.append("agency_id") - data_source_and_agency_columns.append("agency_name") - data_source_details = dict(zip(data_source_and_agency_columns, result)) - data_source_details = convert_dates_to_strings(data_source_details) - data_source_details = format_arrays(data_source_details) - - else: - data_source_details = [] + data_source_and_agency_columns = ( + DATA_SOURCES_APPROVED_COLUMNS + AGENCY_APPROVED_COLUMNS + ) + data_source_and_agency_columns.append("data_source_id") + data_source_and_agency_columns.append("agency_id") + data_source_and_agency_columns.append("agency_name") + data_source_details = dict(zip(data_source_and_agency_columns, result)) + data_source_details = convert_dates_to_strings(data_source_details) + data_source_details = format_arrays(data_source_details) return data_source_details diff --git a/middleware/quick_search_query.py b/middleware/quick_search_query.py index 4584c097..10681244 100644 --- a/middleware/quick_search_query.py +++ b/middleware/quick_search_query.py @@ -105,18 +105,14 @@ def spacy_search_query( def quick_search_query( search: str = "", location: str = "", - test_query_results: Optional[List[Dict[str, Any]]] = None, conn: Optional[PgConnection] = None, - test: bool = False, ) -> Dict[str, Any]: """ Performs a quick search using both unaltered and lemmatized search terms, returning the more fruitful result set. :param search: The search term. :param location: The location term. - :param test_query_results: Predefined results for testing purposes. :param conn: A psycopg2 connection to the database. - :param test: Flag indicating whether the function is being called in a test context. :return: A dictionary with the count of results and the data itself. """ data_sources = {"count": 0, "data": []} @@ -129,16 +125,8 @@ def quick_search_query( if conn: cursor = conn.cursor() - unaltered_results = ( - unaltered_search_query(cursor, search, location) - if not test_query_results - else test_query_results - ) - spacy_results = ( - spacy_search_query(cursor, search, location) - if not test_query_results - else test_query_results - ) + unaltered_results = unaltered_search_query(cursor, search, location) + spacy_results = spacy_search_query(cursor, search, location) # Compare altered search term results with unaltered search term results, return the longer list results = ( @@ -160,18 +148,17 @@ def quick_search_query( "data": data_source_matches_converted, } - if not test_query_results and not test: - current_datetime = datetime.datetime.now() - datetime_string = current_datetime.strftime("%Y-%m-%d %H:%M:%S") + current_datetime = datetime.datetime.now() + datetime_string = current_datetime.strftime("%Y-%m-%d %H:%M:%S") - query_results = json.dumps(data_sources["data"]).replace("'", "") + query_results = json.dumps(data_sources["data"]).replace("'", "") - cursor.execute( - INSERT_LOG_QUERY.format( - search, location, query_results, data_sources["count"], datetime_string - ), - ) - conn.commit() - cursor.close() + cursor.execute( + INSERT_LOG_QUERY.format( + search, location, query_results, data_sources["count"], datetime_string + ), + ) + conn.commit() + cursor.close() return data_sources diff --git a/resources/Archives.py b/resources/Archives.py index 3e82b35d..7e11d39d 100644 --- a/resources/Archives.py +++ b/resources/Archives.py @@ -29,7 +29,7 @@ def get(self) -> Any: - Any: The cleaned results of archives combined from the database query, or an error message if an exception occurs. """ archives_combined_results_clean = archives_get_query( - test_query_results=[], conn=self.psycopg2_connection + conn=self.psycopg2_connection ) return archives_combined_results_clean diff --git a/resources/QuickSearch.py b/resources/QuickSearch.py index 69e600f6..7b1c70b1 100644 --- a/resources/QuickSearch.py +++ b/resources/QuickSearch.py @@ -35,21 +35,15 @@ def get(self, search: str, location: str) -> Dict[str, Any]: Returns: - A dictionary containing a message about the search results and the data found, if any. """ - try: - data = request.get_json() - test = data.get("test_flag") - except: - test = False - try: data_sources = quick_search_query( - search, location, [], self.psycopg2_connection, test + search, location, self.psycopg2_connection ) if data_sources["count"] == 0: self.psycopg2_connection = initialize_psycopg2_connection() data_sources = quick_search_query( - search, location, [], self.psycopg2_connection + search, location, self.psycopg2_connection ) if data_sources["count"] == 0: diff --git a/resources/SearchTokens.py b/resources/SearchTokens.py index 5f9630be..062ee980 100644 --- a/resources/SearchTokens.py +++ b/resources/SearchTokens.py @@ -60,14 +60,7 @@ def get(self) -> Dict[str, Any]: if endpoint == "quick-search": try: - data = request.get_json() - test = data.get("test_flag") - except: - test = False - try: - data_sources = quick_search_query( - arg1, arg2, [], self.psycopg2_connection, test - ) + data_sources = quick_search_query(arg1, arg2, self.psycopg2_connection) return data_sources @@ -113,7 +106,7 @@ def get(self) -> Dict[str, Any]: elif endpoint == "data-sources-by-id": try: data_source_details = data_source_by_id_query( - arg1, [], self.psycopg2_connection + arg1, self.psycopg2_connection ) if data_source_details: return data_source_details From 6cb12c99ae7a1dc631b1f10fa46b8103694cf6ce Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 27 May 2024 12:49:11 -0400 Subject: [PATCH 090/127] Refactor fixture import paths in test files This commit mainly addresses the change in import paths for database connection objects and cursors across multiple test files. `tests.middleware.fixtures` has been moved to `tests.fixtures`, resulting in changes to import statements in the affected test files. This provides a cleaner organization and management of test cases, making code more maintainable. --- tests/{middleware => }/fixtures.py | 0 tests/middleware/test_archives_queries.py | 3 +-- tests/middleware/test_data_source_queries.py | 2 +- tests/middleware/test_login_queries.py | 2 +- tests/middleware/test_quick_search_query.py | 9 ++------- tests/middleware/test_reset_token_queries.py | 2 +- tests/middleware/test_user_queries.py | 2 +- 7 files changed, 7 insertions(+), 13 deletions(-) rename tests/{middleware => }/fixtures.py (100%) diff --git a/tests/middleware/fixtures.py b/tests/fixtures.py similarity index 100% rename from tests/middleware/fixtures.py rename to tests/fixtures.py diff --git a/tests/middleware/test_archives_queries.py b/tests/middleware/test_archives_queries.py index 8018624b..2f701e92 100644 --- a/tests/middleware/test_archives_queries.py +++ b/tests/middleware/test_archives_queries.py @@ -11,10 +11,9 @@ archives_put_last_cached_results, ) from tests.middleware.helper_functions import ( - insert_test_agencies_and_sources, has_expected_keys, ) -from tests.middleware.fixtures import ( +from tests.fixtures import ( dev_db_connection, db_cursor, connection_with_test_data, diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py index 972e59f5..27c28b8c 100644 --- a/tests/middleware/test_data_source_queries.py +++ b/tests/middleware/test_data_source_queries.py @@ -12,7 +12,7 @@ from tests.middleware.helper_functions import ( get_boolean_dictionary, ) -from tests.middleware.fixtures import connection_with_test_data, dev_db_connection +from tests.fixtures import connection_with_test_data, dev_db_connection @pytest.fixture diff --git a/tests/middleware/test_login_queries.py b/tests/middleware/test_login_queries.py index 9b86d930..fb67d493 100644 --- a/tests/middleware/test_login_queries.py +++ b/tests/middleware/test_login_queries.py @@ -12,7 +12,7 @@ ) from middleware.custom_exceptions import UserNotFoundError, TokenNotFoundError from tests.middleware.helper_functions import create_test_user -from tests.middleware.fixtures import dev_db_connection, db_cursor +from tests.fixtures import db_cursor, dev_db_connection def test_login_query(db_cursor: psycopg2.extensions.cursor) -> None: diff --git a/tests/middleware/test_quick_search_query.py b/tests/middleware/test_quick_search_query.py index 2ffacdb2..56de8baa 100644 --- a/tests/middleware/test_quick_search_query.py +++ b/tests/middleware/test_quick_search_query.py @@ -1,7 +1,4 @@ -from datetime import datetime - import psycopg2 -import pytz from middleware.quick_search_query import ( unaltered_search_query, @@ -9,14 +6,12 @@ QUICK_SEARCH_COLUMNS, ) from tests.middleware.helper_functions import ( - insert_test_agencies_and_sources, has_expected_keys, get_most_recent_quick_search_query_log, ) -from tests.middleware.fixtures import ( - dev_db_connection, - db_cursor, +from tests.fixtures import ( connection_with_test_data, + dev_db_connection ) diff --git a/tests/middleware/test_reset_token_queries.py b/tests/middleware/test_reset_token_queries.py index bf246d0c..25b53d83 100644 --- a/tests/middleware/test_reset_token_queries.py +++ b/tests/middleware/test_reset_token_queries.py @@ -14,7 +14,7 @@ create_test_user, get_reset_tokens_for_email, ) -from tests.middleware.fixtures import dev_db_connection, db_cursor +from tests.fixtures import db_cursor, dev_db_connection def test_check_reset_token(db_cursor: psycopg2.extensions.cursor) -> None: diff --git a/tests/middleware/test_user_queries.py b/tests/middleware/test_user_queries.py index e273e79f..9ad4cfc3 100644 --- a/tests/middleware/test_user_queries.py +++ b/tests/middleware/test_user_queries.py @@ -4,7 +4,7 @@ from middleware.custom_exceptions import UserNotFoundError from middleware.user_queries import user_post_results, user_check_email from tests.middleware.helper_functions import create_test_user -from tests.middleware.fixtures import dev_db_connection, db_cursor +from tests.fixtures import db_cursor, dev_db_connection def test_user_post_query(db_cursor: psycopg2.extensions.cursor) -> None: From 926798c5a71c56f3a17c5cc02e246f10998d75af Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 27 May 2024 12:57:35 -0400 Subject: [PATCH 091/127] Move helper functions and refactor tests This commit relocates the helper functions from 'tests/middleware' to 'tests', adjusting import paths in various test files accordingly. Additionally, the 'client' fixture in 'test_endpoints' is replaced with a new 'client_with_mock_db' fixture, and a new 'client_with_db' fixture is introduced in 'fixtures.py'. This enhances the organization and maintainability of the test infrastructure. --- tests/fixtures.py | 27 +++++++++++++++++++- tests/{middleware => }/helper_functions.py | 0 tests/middleware/test_archives_queries.py | 2 +- tests/middleware/test_data_source_queries.py | 2 +- tests/middleware/test_login_queries.py | 2 +- tests/middleware/test_quick_search_query.py | 5 ++-- tests/middleware/test_reset_token_queries.py | 2 +- tests/middleware/test_user_queries.py | 2 +- tests/test_endpoints.py | 20 +++------------ 9 files changed, 36 insertions(+), 26 deletions(-) rename tests/{middleware => }/helper_functions.py (100%) diff --git a/tests/fixtures.py b/tests/fixtures.py index a9f79fc4..6bd560e0 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -5,8 +5,10 @@ import psycopg2 import pytest from dotenv import load_dotenv +from flask.testing import FlaskClient -from tests.middleware.helper_functions import insert_test_agencies_and_sources +from app import create_app +from tests.helper_functions import insert_test_agencies_and_sources @pytest.fixture @@ -77,3 +79,26 @@ def connection_with_test_data( except psycopg2.errors.UniqueViolation: dev_db_connection.rollback() return dev_db_connection + + +@pytest.fixture +def client_with_mock_db(mocker) -> FlaskClient: + """ + Create a client with a mocked database connection + :param mocker: + :return: + """ + app = create_app(mocker.MagicMock()) + with app.test_client() as client: + yield client + +@pytest.fixture +def client_with_db(dev_db_connection: psycopg2.extensions.connection): + """ + Creates a client with database connection + :param dev_db_connection: + :return: + """ + app = create_app(dev_db_connection) + with app.test_client() as client: + yield client \ No newline at end of file diff --git a/tests/middleware/helper_functions.py b/tests/helper_functions.py similarity index 100% rename from tests/middleware/helper_functions.py rename to tests/helper_functions.py diff --git a/tests/middleware/test_archives_queries.py b/tests/middleware/test_archives_queries.py index 2f701e92..ee1b0168 100644 --- a/tests/middleware/test_archives_queries.py +++ b/tests/middleware/test_archives_queries.py @@ -10,7 +10,7 @@ archives_put_broken_as_of_results, archives_put_last_cached_results, ) -from tests.middleware.helper_functions import ( +from tests.helper_functions import ( has_expected_keys, ) from tests.fixtures import ( diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py index 27c28b8c..fb20c658 100644 --- a/tests/middleware/test_data_source_queries.py +++ b/tests/middleware/test_data_source_queries.py @@ -9,7 +9,7 @@ data_source_by_id_query, get_data_sources_for_map, ) -from tests.middleware.helper_functions import ( +from tests.helper_functions import ( get_boolean_dictionary, ) from tests.fixtures import connection_with_test_data, dev_db_connection diff --git a/tests/middleware/test_login_queries.py b/tests/middleware/test_login_queries.py index fb67d493..aacc548e 100644 --- a/tests/middleware/test_login_queries.py +++ b/tests/middleware/test_login_queries.py @@ -11,7 +11,7 @@ is_admin, ) from middleware.custom_exceptions import UserNotFoundError, TokenNotFoundError -from tests.middleware.helper_functions import create_test_user +from tests.helper_functions import create_test_user from tests.fixtures import db_cursor, dev_db_connection diff --git a/tests/middleware/test_quick_search_query.py b/tests/middleware/test_quick_search_query.py index 56de8baa..985da803 100644 --- a/tests/middleware/test_quick_search_query.py +++ b/tests/middleware/test_quick_search_query.py @@ -5,13 +5,12 @@ quick_search_query, QUICK_SEARCH_COLUMNS, ) -from tests.middleware.helper_functions import ( +from tests.helper_functions import ( has_expected_keys, get_most_recent_quick_search_query_log, ) from tests.fixtures import ( - connection_with_test_data, - dev_db_connection + connection_with_test_data, dev_db_connection ) diff --git a/tests/middleware/test_reset_token_queries.py b/tests/middleware/test_reset_token_queries.py index 25b53d83..ee22a689 100644 --- a/tests/middleware/test_reset_token_queries.py +++ b/tests/middleware/test_reset_token_queries.py @@ -9,7 +9,7 @@ add_reset_token, delete_reset_token, ) -from tests.middleware.helper_functions import ( +from tests.helper_functions import ( create_reset_token, create_test_user, get_reset_tokens_for_email, diff --git a/tests/middleware/test_user_queries.py b/tests/middleware/test_user_queries.py index 9ad4cfc3..4e51540d 100644 --- a/tests/middleware/test_user_queries.py +++ b/tests/middleware/test_user_queries.py @@ -3,7 +3,7 @@ from middleware.custom_exceptions import UserNotFoundError from middleware.user_queries import user_post_results, user_check_email -from tests.middleware.helper_functions import create_test_user +from tests.helper_functions import create_test_user from tests.fixtures import db_cursor, dev_db_connection diff --git a/tests/test_endpoints.py b/tests/test_endpoints.py index 7af98dfa..8c631615 100644 --- a/tests/test_endpoints.py +++ b/tests/test_endpoints.py @@ -4,7 +4,6 @@ """ from collections import namedtuple -from typing import Type, Any, List import pytest from unittest.mock import patch @@ -12,7 +11,6 @@ from flask.testing import FlaskClient from flask_restful import Resource -from app import create_app from resources.Agencies import Agencies from resources.ApiKey import ApiKey from resources.Archives import Archives @@ -30,7 +28,7 @@ from resources.ResetTokenValidation import ResetTokenValidation from resources.SearchTokens import SearchTokens from resources.User import User - +from tests.fixtures import client_with_mock_db # Define constants for HTTP methods GET = "get" @@ -39,18 +37,6 @@ DELETE = "delete" -@pytest.fixture -def client(mocker) -> FlaskClient: - """ - Create a client with a mocked database connection - :param mocker: - :return: - """ - app = create_app(mocker.MagicMock()) - with app.test_client() as client: - yield client - - def run_endpoint_tests( client: FlaskClient, endpoint: str, class_type: Resource, allowed_methods: list[str] ): @@ -95,7 +81,7 @@ def run_endpoint_tests( @pytest.mark.parametrize("test_parameter", test_parameters) -def test_endpoints(client: FlaskClient, test_parameter) -> None: +def test_endpoints(client_with_mock_db: FlaskClient, test_parameter) -> None: """ Using the test_parameters list, this tests all endpoints to ensure that only the appropriate methods can be called from the endpoints @@ -106,7 +92,7 @@ def test_endpoints(client: FlaskClient, test_parameter) -> None: :return: """ run_endpoint_tests( - client, + client_with_mock_db, test_parameter.endpoint, test_parameter.class_type, test_parameter.allowed_methods, From 86e6cd39e05b59472ced2251784e5c573f9e0796 Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 29 May 2024 20:46:25 -0400 Subject: [PATCH 092/127] Refactor test suite by relocating helper functions and updating client fixtures This commit moves several helper functions from the `test_archives_queries.py` file to the `helper_functions.py` file which makes the test suite more maintainable and organized. The update also includes the introduction of new `client_with_db` fixture in 'helper_functions.py' for better testing setup. --- tests/helper_functions.py | 100 ++++++++++++++++++++++ tests/middleware/test_archives_queries.py | 29 +------ 2 files changed, 101 insertions(+), 28 deletions(-) diff --git a/tests/helper_functions.py b/tests/helper_functions.py index 8ce20233..4122d5fb 100644 --- a/tests/helper_functions.py +++ b/tests/helper_functions.py @@ -2,9 +2,11 @@ import uuid from collections import namedtuple +from datetime import datetime, timedelta from typing import Optional import psycopg2.extensions +from flask.testing import FlaskClient TestTokenInsert = namedtuple("TestTokenInsert", ["id", "email", "token"]) TestUser = namedtuple("TestUser", ["id", "email", "password_hash"]) @@ -180,3 +182,101 @@ def get_boolean_dictionary(keys: tuple) -> dict: for key in keys: d[key] = False return d + + +UserInfo = namedtuple("UserInfo", ["email", "password"]) + + +def create_test_user_api(client: FlaskClient) -> UserInfo: + email = str(uuid.uuid4()) + password = str(uuid.uuid4()) + response = client.post( + "/user", + json={"email": email, "password": password}, + ) + assert response.status_code == 200, "User creation not successful" + return UserInfo(email=email, password=password) + + +def login_and_return_session_token( + client_with_db: FlaskClient, user_info: UserInfo +) -> str: + response = client_with_db.post( + "/login", + json={"email": user_info.email, "password": user_info.password}, + ) + assert response.status_code == 200, "User login unsuccessful" + session_token = response.json.get("data") + return session_token + + +def get_user_password_digest(cursor: psycopg2.extensions.cursor, user_info): + cursor.execute( + """ + SELECT password_digest from users where email = %s + """, + (user_info.email,), + ) + return cursor.fetchone()[0] + + +def request_reset_password_api(client_with_db, mocker, user_info): + mocker.patch("resources.RequestResetPassword.requests.post") + response = client_with_db.post( + "/request-reset-password", json={"email": user_info.email} + ) + token = response.json.get("token") + return token + + +def create_api_key(client_with_db, user_info): + response = client_with_db.get( + "/api_key", json={"email": user_info.email, "password": user_info.password} + ) + assert response.status_code == 200, "API key creation not successful" + api_key = response.json.get("api_key") + return api_key + + +def insert_test_data_source(cursor: psycopg2.extensions.cursor) -> str: + """ + Insert test data source and return id + :param cursor: + :return: randomly generated uuid + """ + test_uid = str(uuid.uuid4()) + cursor.execute( + """ + INSERT INTO + PUBLIC.DATA_SOURCES ( + airtable_uid, + NAME, + DESCRIPTION, + RECORD_TYPE, + SOURCE_URL, + APPROVAL_STATUS, + URL_STATUS + ) + VALUES + (%s,'Example Data Source', 'Example Description', + 'Type A','http://src1.com','approved','available') + """, + (test_uid,), + ) + return test_uid + + +def give_user_admin_role( + connection: psycopg2.extensions.connection, user_info: UserInfo +): + cursor = connection.cursor() + + # User requires admin privileges + cursor.execute( + """ + UPDATE users + SET role = 'admin' + WHERE email = %s + """, + (user_info.email,), + ) diff --git a/tests/middleware/test_archives_queries.py b/tests/middleware/test_archives_queries.py index ee1b0168..d406963c 100644 --- a/tests/middleware/test_archives_queries.py +++ b/tests/middleware/test_archives_queries.py @@ -12,6 +12,7 @@ ) from tests.helper_functions import ( has_expected_keys, + insert_test_data_source, ) from tests.fixtures import ( dev_db_connection, @@ -67,34 +68,6 @@ def test_archives_get_columns( return -def insert_test_data_source(cursor: psycopg2.extensions.cursor) -> str: - """ - Insert test data source and return id - :param cursor: - :return: randomly generated uuid - """ - test_uid = str(uuid.uuid4()) - cursor.execute( - """ - INSERT INTO - PUBLIC.DATA_SOURCES ( - airtable_uid, - NAME, - DESCRIPTION, - RECORD_TYPE, - SOURCE_URL, - APPROVAL_STATUS, - URL_STATUS - ) - VALUES - (%s,'Example Data Source', 'Example Description', - 'Type A','http://src1.com','approved','available') - """, - (test_uid,), - ) - return test_uid - - def get_data_sources_archives_info(cursor, test_uid): cursor.execute( """ From 087e6a30b49329448d37f4b381eb2068285edb6d Mon Sep 17 00:00:00 2001 From: maxachis Date: Wed, 29 May 2024 20:46:45 -0400 Subject: [PATCH 093/127] Add integration tests for various endpoints This commit adds integration tests for various API endpoints such as search tokens, data sources map, data sources by id, and user interactions among others. These tests ensure that the API endpoints work as expected and return the correct data. They also help to identify any failing or incorrectly functioning endpoints promptly. --- tests/integration/__init__.py | 0 tests/integration/test_agencies.py | 18 ++++++ tests/integration/test_api_key.py | 28 +++++++++ tests/integration/test_archives.py | 55 +++++++++++++++++ tests/integration/test_data_sources.py | 61 +++++++++++++++++++ tests/integration/test_data_sources_by_id.py | 50 +++++++++++++++ tests/integration/test_data_sources_map.py | 26 ++++++++ .../test_data_sources_needs_identification.py | 32 ++++++++++ tests/integration/test_login.py | 28 +++++++++ tests/integration/test_quick_search.py | 31 ++++++++++ tests/integration/test_refresh_session.py | 31 ++++++++++ .../test_request_reset_password.py | 41 +++++++++++++ tests/integration/test_reset_password.py | 32 ++++++++++ .../test_reset_token_validation.py | 18 ++++++ tests/integration/test_search_tokens.py | 27 ++++++++ tests/integration/test_user.py | 53 ++++++++++++++++ 16 files changed, 531 insertions(+) create mode 100644 tests/integration/__init__.py create mode 100644 tests/integration/test_agencies.py create mode 100644 tests/integration/test_api_key.py create mode 100644 tests/integration/test_archives.py create mode 100644 tests/integration/test_data_sources.py create mode 100644 tests/integration/test_data_sources_by_id.py create mode 100644 tests/integration/test_data_sources_map.py create mode 100644 tests/integration/test_data_sources_needs_identification.py create mode 100644 tests/integration/test_login.py create mode 100644 tests/integration/test_quick_search.py create mode 100644 tests/integration/test_refresh_session.py create mode 100644 tests/integration/test_request_reset_password.py create mode 100644 tests/integration/test_reset_password.py create mode 100644 tests/integration/test_reset_token_validation.py create mode 100644 tests/integration/test_search_tokens.py create mode 100644 tests/integration/test_user.py diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/test_agencies.py b/tests/integration/test_agencies.py new file mode 100644 index 00000000..abc4890f --- /dev/null +++ b/tests/integration/test_agencies.py @@ -0,0 +1,18 @@ +import psycopg2 +import pytest +from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db +from tests.helper_functions import create_test_user_api, create_api_key + + +def test_agencies_get( + client_with_db, dev_db_connection: psycopg2.extensions.connection +): + user_info = create_test_user_api(client_with_db) + api_key = create_api_key(client_with_db, user_info) + response = client_with_db.get( + "/agencies/2", + headers={"Authorization": f"Bearer {api_key}"}, + ) + assert response.status_code == 200 + assert len(response.json["data"]) > 0 + diff --git a/tests/integration/test_api_key.py b/tests/integration/test_api_key.py new file mode 100644 index 00000000..ae8c9494 --- /dev/null +++ b/tests/integration/test_api_key.py @@ -0,0 +1,28 @@ +import uuid + +import psycopg2.extensions + +from tests.fixtures import dev_db_connection, client_with_db +from tests.helper_functions import create_test_user_api + + +def test_api_key_get(client_with_db, dev_db_connection: psycopg2.extensions.connection): + user_info = create_test_user_api(client_with_db) + + response = client_with_db.get( + "/api_key", + json={"email": user_info.email, "password": user_info.password}, + ) + assert response.status_code == 200, "API key creation not successful" + + # Check that API key aligned with user + cursor = dev_db_connection.cursor() + cursor.execute( + """ + SELECT api_key from users where email = %s + """, + (user_info.email, ) + ) + db_api_key = cursor.fetchone()[0] + assert db_api_key == response.json.get("api_key"), "API key returned not aligned with user API key in database" + diff --git a/tests/integration/test_archives.py b/tests/integration/test_archives.py new file mode 100644 index 00000000..3d3fb4b4 --- /dev/null +++ b/tests/integration/test_archives.py @@ -0,0 +1,55 @@ +import datetime +import json + +import psycopg2 + +from tests.fixtures import dev_db_connection, client_with_db +from tests.helper_functions import ( + create_test_user_api, + login_and_return_session_token, + get_user_password_digest, + request_reset_password_api, + create_api_key, + insert_test_data_source, +) + + +def test_archives_get( + client_with_db, dev_db_connection: psycopg2.extensions.connection +): + user_info = create_test_user_api(client_with_db) + api_key = create_api_key(client_with_db, user_info) + response = client_with_db.get( + "/archives", + headers={"Authorization": f"Bearer {api_key}"}, + ) + assert response.status_code == 200, "Archives endpoint returned non-200" + assert len(response.json) > 0, "Endpoint should return more than 0 results" + + +def test_archives_put( + client_with_db, dev_db_connection: psycopg2.extensions.connection +): + user_info = create_test_user_api(client_with_db) + api_key = create_api_key(client_with_db, user_info) + data_source_id = insert_test_data_source(dev_db_connection.cursor()) + last_cached = datetime.date(year=2020, month=3, day=4) + broken_as_of = datetime.date(year=1993, month=11, day=13) + response = client_with_db.put( + "/archives", + headers={"Authorization": f"Bearer {api_key}"}, + data=json.dumps({ + "id": data_source_id, + "last_cached": str(last_cached), + "broken_source_url_as_of": str(broken_as_of) + }) + ) + assert response.status_code == 200, "Endpoint returned non-200" + + cursor = dev_db_connection.cursor() + cursor.execute(""" + SELECT last_cached, broken_source_url_as_of FROM data_sources where airtable_uid = %s + """, (data_source_id, )) + row = cursor.fetchone() + assert row[0] == last_cached + assert row[1] == broken_as_of diff --git a/tests/integration/test_data_sources.py b/tests/integration/test_data_sources.py new file mode 100644 index 00000000..0d645029 --- /dev/null +++ b/tests/integration/test_data_sources.py @@ -0,0 +1,61 @@ +import uuid + +import psycopg2 +import pytest +from tests.fixtures import ( + connection_with_test_data, + dev_db_connection, + connection_with_test_data, + client_with_db, +) +from tests.helper_functions import ( + get_boolean_dictionary, + create_test_user_api, + create_api_key, + give_user_admin_role, +) + + +def test_data_sources_get( + client_with_db, connection_with_test_data: psycopg2.extensions.connection +): + inserted_data_sources_found = get_boolean_dictionary( + ("Source 1", "Source 2", "Source 3") + ) + user_info = create_test_user_api(client_with_db) + api_key = create_api_key(client_with_db, user_info) + response = client_with_db.get( + "/data-sources", + headers={"Authorization": f"Bearer {api_key}"}, + ) + assert response.status_code == 200 + data = response.get_json()["data"] + for result in data: + name = result["name"] + if name in inserted_data_sources_found: + inserted_data_sources_found[name] = True + assert inserted_data_sources_found["Source 1"] + assert not inserted_data_sources_found["Source 2"] + assert not inserted_data_sources_found["Source 3"] + + +def test_data_sources_post( + client_with_db, dev_db_connection: psycopg2.extensions.connection +): + user_info = create_test_user_api(client_with_db) + give_user_admin_role(dev_db_connection, user_info) + api_key = create_api_key(client_with_db, user_info) + + name = str(uuid.uuid4()) + response = client_with_db.post( + "/data-sources", + json={"name": name}, + headers={"Authorization": f"Bearer {api_key}"}, + ) + assert response.status_code == 200 + cursor = dev_db_connection.cursor() + cursor.execute(""" + SELECT * from data_sources WHERE name=%s + """, (name,)) + rows = cursor.fetchall() + assert(len(rows)) == 1 diff --git a/tests/integration/test_data_sources_by_id.py b/tests/integration/test_data_sources_by_id.py new file mode 100644 index 00000000..c01bf2ff --- /dev/null +++ b/tests/integration/test_data_sources_by_id.py @@ -0,0 +1,50 @@ +import json +import uuid +from urllib.parse import quote + +import psycopg2 +import pytest +from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db +from tests.helper_functions import ( + create_test_user_api, + create_api_key, + give_user_admin_role, +) + + +def test_data_sources_by_id_get( + client_with_db, connection_with_test_data: psycopg2.extensions.connection +): + user_info = create_test_user_api(client_with_db) + api_key = create_api_key(client_with_db, user_info) + response = client_with_db.get( + "/data-sources-by-id/SOURCE_UID_1", + headers={"Authorization": f"Bearer {api_key}"}, + ) + assert response.status_code == 200 + assert response.json["data"]["homepage_url"] == "http://src1.com" + + +def test_data_sources_by_id_put( + client_with_db, connection_with_test_data: psycopg2.extensions.connection +): + user_info = create_test_user_api(client_with_db) + give_user_admin_role(connection_with_test_data, user_info) + api_key = create_api_key(client_with_db, user_info) + desc = str(uuid.uuid4()) + response = client_with_db.put( + f"/data-sources-by-id/SOURCE_UID_1", + headers={"Authorization": f"Bearer {api_key}"}, + json={"description": desc}, + ) + assert response.status_code == 200 + cursor = connection_with_test_data.cursor() + cursor.execute( + """ + SELECT description + FROM data_sources + WHERE airtable_uid = 'SOURCE_UID_1' + """ + ) + result = cursor.fetchone() + assert result[0] == desc diff --git a/tests/integration/test_data_sources_map.py b/tests/integration/test_data_sources_map.py new file mode 100644 index 00000000..dea0245e --- /dev/null +++ b/tests/integration/test_data_sources_map.py @@ -0,0 +1,26 @@ +import psycopg2 +import pytest +from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db +from tests.helper_functions import create_test_user_api, create_api_key + + +def test_data_sources_map_get( + client_with_db, connection_with_test_data: psycopg2.extensions.connection +): + user_info = create_test_user_api(client_with_db) + api_key = create_api_key(client_with_db, user_info) + response = client_with_db.get( + "/data-sources-map", + headers={"Authorization": f"Bearer {api_key}"}, + ) + assert response.status_code == 200 + data = response.json["data"] + found_source = False + for result in data: + name = result["name"] + if name != "Source 1": + continue + found_source = True + assert result["lat"] == 30 + assert result["lng"] == 20 + assert found_source \ No newline at end of file diff --git a/tests/integration/test_data_sources_needs_identification.py b/tests/integration/test_data_sources_needs_identification.py new file mode 100644 index 00000000..e768fff0 --- /dev/null +++ b/tests/integration/test_data_sources_needs_identification.py @@ -0,0 +1,32 @@ +import psycopg2 +import pytest +from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db +from tests.helper_functions import ( + get_boolean_dictionary, + create_test_user_api, + create_api_key, +) + + +def test_data_sources_needs_identification( + client_with_db, connection_with_test_data: psycopg2.extensions.connection +): + inserted_data_sources_found = get_boolean_dictionary( + ("Source 1", "Source 2", "Source 3") + ) + user_info = create_test_user_api(client_with_db) + api_key = create_api_key(client_with_db, user_info) + response = client_with_db.get( + "/data-sources-needs-identification", + headers={"Authorization": f"Bearer {api_key}"}, + ) + assert response.status_code == 200 + + for result in response.json["data"]: + name = result["name"] + if name in inserted_data_sources_found: + inserted_data_sources_found[name] = True + + assert not inserted_data_sources_found["Source 1"] + assert inserted_data_sources_found["Source 2"] + assert not inserted_data_sources_found["Source 3"] diff --git a/tests/integration/test_login.py b/tests/integration/test_login.py new file mode 100644 index 00000000..e1a3c2a9 --- /dev/null +++ b/tests/integration/test_login.py @@ -0,0 +1,28 @@ +import uuid + +import psycopg2.extensions + +from tests.fixtures import dev_db_connection, client_with_db +from tests.helper_functions import create_test_user_api, login_and_return_session_token + + +def test_login_post(client_with_db, dev_db_connection: psycopg2.extensions.connection): + + # Create user + user_info = create_test_user_api(client_with_db) + session_token = login_and_return_session_token(client_with_db, user_info) + + cursor = dev_db_connection.cursor() + cursor.execute( + """ + SELECT email from session_tokens WHERE token = %s + """, + (session_token,), + ) + rows = cursor.fetchall() + assert len(rows) == 1, "Session token should only exist once in database" + + row = rows[0] + assert ( + row[0] == user_info.email + ), "Email in session_tokens table does not match user email" diff --git a/tests/integration/test_quick_search.py b/tests/integration/test_quick_search.py new file mode 100644 index 00000000..2c2693fc --- /dev/null +++ b/tests/integration/test_quick_search.py @@ -0,0 +1,31 @@ +from urllib.parse import quote + +from tests.fixtures import dev_db_connection, client_with_db, connection_with_test_data +from tests.helper_functions import ( + create_test_user_api, + create_api_key, +) + + +def test_quick_search_get(client_with_db, connection_with_test_data): + user_info = create_test_user_api(client_with_db) + api_key = create_api_key(client_with_db, user_info) + + search_term = "Source 1" + location = "City A" + + # URL encode the search term and location + encoded_search_term = quote(search_term) + encoded_location = quote(location) + + response = client_with_db.get( + f"/quick-search/{encoded_search_term}/{encoded_location}", + headers={"Authorization": f"Bearer {api_key}"}, + ) + assert response.status_code == 200, "Quick Search endpoint call was not successful" + data = response.json.get("data") + assert data["count"] == 1, "Quick Search endpoint response should return only one entry" + entry = data["data"][0] + assert entry["agency_name"] == "Agency A" + assert entry["airtable_uid"] == "SOURCE_UID_1" + diff --git a/tests/integration/test_refresh_session.py b/tests/integration/test_refresh_session.py new file mode 100644 index 00000000..91a12499 --- /dev/null +++ b/tests/integration/test_refresh_session.py @@ -0,0 +1,31 @@ +import psycopg2.extensions + +from tests.fixtures import dev_db_connection, client_with_db +from tests.helper_functions import create_test_user_api, login_and_return_session_token + + +def test_refresh_session_post(client_with_db, dev_db_connection: psycopg2.extensions.connection): + test_user = create_test_user_api(client_with_db) + old_session_token = login_and_return_session_token(client_with_db, test_user) + response = client_with_db.post( + "/refresh-session", + json={"session_token": old_session_token} + ) + assert response.status_code == 200 + new_session_token = response.json.get("data") + + assert old_session_token != new_session_token, "New and old tokens should be different" + + # Check that old_session_token is not in session tokens, and new_session token does + cursor = dev_db_connection.cursor() + cursor.execute(""" + SELECT * FROM session_tokens where token = %s; + """, (new_session_token,)) + rows = cursor.fetchall() + assert len(rows) == 1, "Only one row should exist for the session token in the session_tokens table" + + cursor.execute(""" + SELECT * FROM session_tokens where token = %s; + """, (old_session_token,)) + rows = cursor.fetchall() + assert len(rows) == 0, "No row should exist for the old session token in the session_tokens table" diff --git a/tests/integration/test_request_reset_password.py b/tests/integration/test_request_reset_password.py new file mode 100644 index 00000000..08b2556f --- /dev/null +++ b/tests/integration/test_request_reset_password.py @@ -0,0 +1,41 @@ +import psycopg2 + +from tests.fixtures import dev_db_connection, client_with_db +from tests.helper_functions import create_test_user_api + + +def test_request_reset_password_post( + client_with_db, dev_db_connection: psycopg2.extensions.connection, mocker +): + + user_info = create_test_user_api(client_with_db) + + mock_post = mocker.patch("resources.RequestResetPassword.requests.post") + response = client_with_db.post( + "/request-reset-password", json={"email": user_info.email} + ) + reset_token = response.json.get("token") + assert ( + response.status_code == 200 + ), "Request to Reset Password request was not returned successfully" + assert mock_post.call_count == 1, "request.post should be called only once" + assert ( + mock_post.call_args[0][0] == "https://api.mailgun.net/v3/mail.pdap.io/messages" + ) + + + cursor = dev_db_connection.cursor() + cursor.execute( + """ + SELECT email FROM reset_tokens where token = %s + """, + (reset_token,), + ) + rows = cursor.fetchall() + assert ( + len(rows) == 1 + ), "Only one row should have a reset token associated with this email" + email = rows[0][0] + assert ( + email == user_info.email + ), "Email associated with reset token should match the user's email" diff --git a/tests/integration/test_reset_password.py b/tests/integration/test_reset_password.py new file mode 100644 index 00000000..c47b0b65 --- /dev/null +++ b/tests/integration/test_reset_password.py @@ -0,0 +1,32 @@ +import uuid + +import psycopg2 +from pytest_mock import mocker + +from tests.fixtures import dev_db_connection, client_with_db +from tests.helper_functions import ( + create_test_user_api, + login_and_return_session_token, + get_user_password_digest, + request_reset_password_api, +) + + +def test_reset_password_post( + client_with_db, dev_db_connection: psycopg2.extensions.connection, mocker +): + user_info = create_test_user_api(client_with_db) + cursor = dev_db_connection.cursor() + old_password_digest = get_user_password_digest(cursor, user_info) + + token = request_reset_password_api(client_with_db, mocker, user_info) + new_password = str(uuid.uuid4()) + response = client_with_db.post( + "/reset-password", + json={"email": user_info.email, "token": token, "password": new_password}, + ) + assert response.status_code == 200 + new_password_digest = get_user_password_digest(cursor, user_info) + assert ( + new_password_digest != old_password_digest + ), "Old and new password digests should be distinct" diff --git a/tests/integration/test_reset_token_validation.py b/tests/integration/test_reset_token_validation.py new file mode 100644 index 00000000..c93a9d29 --- /dev/null +++ b/tests/integration/test_reset_token_validation.py @@ -0,0 +1,18 @@ +from pytest_mock import mocker + +from tests.helper_functions import ( + create_test_user_api, + request_reset_password_api, +) +from tests.fixtures import dev_db_connection, client_with_db + + +def test_reset_token_validation(client_with_db, dev_db_connection, mocker): + user_info = create_test_user_api(client_with_db) + token = request_reset_password_api(client_with_db, mocker, user_info) + response = client_with_db.post( + "/reset-token-validation", + json={"token": token} + ) + assert response.status_code == 200, "reset-token-validation endpoint call unsuccessful" + assert response.json.get("message") == "Token is valid", "Message does not return 'Token is valid'" diff --git a/tests/integration/test_search_tokens.py b/tests/integration/test_search_tokens.py new file mode 100644 index 00000000..d5f7bae5 --- /dev/null +++ b/tests/integration/test_search_tokens.py @@ -0,0 +1,27 @@ +import psycopg2 +import pytest +from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db +from tests.helper_functions import create_test_user_api, create_api_key + + +def test_search_tokens_get( + client_with_db, connection_with_test_data: psycopg2.extensions.connection +): + user_info = create_test_user_api(client_with_db) + api_key = create_api_key(client_with_db, user_info) + response = client_with_db.get( + "/search-tokens", + headers={"Authorization": f"Bearer {api_key}"}, + query_string={ + "endpoint": "quick-search", + "arg1": "Source 1", + "arg2": "City A" + } + ) + assert response.status_code == 200 + data = response.json.get("data") + assert data["count"] == 1, "Quick Search endpoint response should return only one entry" + entry = data["data"][0] + assert entry["agency_name"] == "Agency A" + assert entry["airtable_uid"] == "SOURCE_UID_1" + diff --git a/tests/integration/test_user.py b/tests/integration/test_user.py new file mode 100644 index 00000000..81ca3cde --- /dev/null +++ b/tests/integration/test_user.py @@ -0,0 +1,53 @@ +import uuid + +import psycopg2 + +from tests.fixtures import dev_db_connection, client_with_db +from tests.helper_functions import ( + create_test_user_api, + get_user_password_digest, + create_api_key, +) + + +def test_user_post(client_with_db, dev_db_connection: psycopg2.extensions.connection): + user_info = create_test_user_api(client_with_db) + cursor = dev_db_connection.cursor() + cursor.execute( + f"SELECT email, password_digest FROM users WHERE email = %s", (user_info.email,) + ) + rows = cursor.fetchall() + + assert len(rows) == 1, "One row should be returned by user query" + email = rows[0][0] + password_digest = rows[0][1] + assert user_info.email == email, "DB user email and original email do not match" + assert ( + user_info.password != password_digest + ), "DB user password digest should not match password" + + +def test_user_put(client_with_db, dev_db_connection: psycopg2.extensions.connection): + user_info = create_test_user_api(client_with_db) + cursor = dev_db_connection.cursor() + + old_password_hash = get_user_password_digest(cursor, user_info) + + api_key = create_api_key(client_with_db, user_info) + new_password = str(uuid.uuid4()) + + response = client_with_db.put( + "/user", + headers={"Authorization": f"Bearer {api_key}"}, + json={"email": user_info.email, "password": new_password}, + ) + assert response.status_code == 200, "User password update not successful" + + new_password_hash = get_user_password_digest(cursor, user_info) + + assert ( + new_password != new_password_hash + ), "Password and password hash should be distinct after password update" + assert ( + new_password_hash != old_password_hash + ), "Password hashes should be different on update" From 3a264db1c5439ac32900558ea85d07f70a3fbf4d Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 30 May 2024 07:37:54 -0400 Subject: [PATCH 094/127] Improve readability of code via tweaks and docstrings This update introduces improved readability of an extensive code base by adding docstrings to different functions across multiple pages. It also features some optimizations to several test functions for various endpoints, ensuring code clarity and facilitating easier debugging for future development work. --- tests/helper_functions.py | 39 ++++++++++++++++++- tests/integration/test_agencies.py | 5 +++ tests/integration/test_api_key.py | 13 +++++-- tests/integration/test_archives.py | 27 +++++++++---- tests/integration/test_data_sources.py | 18 +++++++-- tests/integration/test_data_sources_by_id.py | 13 +++++-- tests/integration/test_data_sources_map.py | 8 +++- .../test_data_sources_needs_identification.py | 6 ++- tests/integration/test_login.py | 6 ++- tests/integration/test_quick_search.py | 11 +++++- tests/integration/test_refresh_session.py | 39 ++++++++++++++----- .../test_request_reset_password.py | 6 ++- tests/integration/test_reset_password.py | 6 +++ .../test_reset_token_validation.py | 18 ++++++--- tests/integration/test_search_tokens.py | 16 ++++---- tests/integration/test_user.py | 10 +++++ 16 files changed, 192 insertions(+), 49 deletions(-) diff --git a/tests/helper_functions.py b/tests/helper_functions.py index 4122d5fb..cf1ff9eb 100644 --- a/tests/helper_functions.py +++ b/tests/helper_functions.py @@ -188,6 +188,11 @@ def get_boolean_dictionary(keys: tuple) -> dict: def create_test_user_api(client: FlaskClient) -> UserInfo: + """ + Create a test user through calling the /user endpoint via the Flask API + :param client: + :return: + """ email = str(uuid.uuid4()) password = str(uuid.uuid4()) response = client.post( @@ -201,6 +206,13 @@ def create_test_user_api(client: FlaskClient) -> UserInfo: def login_and_return_session_token( client_with_db: FlaskClient, user_info: UserInfo ) -> str: + """ + Login as a given user and return the associated session token, + using the /login endpoint of the Flask API + :param client_with_db: + :param user_info: + :return: + """ response = client_with_db.post( "/login", json={"email": user_info.email, "password": user_info.password}, @@ -211,6 +223,12 @@ def login_and_return_session_token( def get_user_password_digest(cursor: psycopg2.extensions.cursor, user_info): + """ + Get the associated password digest of a user (given their email) from the database + :param cursor: + :param user_info: + :return: + """ cursor.execute( """ SELECT password_digest from users where email = %s @@ -221,6 +239,14 @@ def get_user_password_digest(cursor: psycopg2.extensions.cursor, user_info): def request_reset_password_api(client_with_db, mocker, user_info): + """ + Send a request to reset password via a Flask call to the /request-reset-password endpoint + and return the reset token + :param client_with_db: + :param mocker: + :param user_info: + :return: + """ mocker.patch("resources.RequestResetPassword.requests.post") response = client_with_db.post( "/request-reset-password", json={"email": user_info.email} @@ -230,6 +256,12 @@ def request_reset_password_api(client_with_db, mocker, user_info): def create_api_key(client_with_db, user_info): + """ + Obtain an api key for the given user, via a Flask call to the /api-key endpoint + :param client_with_db: + :param user_info: + :return: + """ response = client_with_db.get( "/api_key", json={"email": user_info.email, "password": user_info.password} ) @@ -269,9 +301,14 @@ def insert_test_data_source(cursor: psycopg2.extensions.cursor) -> str: def give_user_admin_role( connection: psycopg2.extensions.connection, user_info: UserInfo ): + """ + Give the given user an admin role. + :param connection: + :param user_info: + :return: + """ cursor = connection.cursor() - # User requires admin privileges cursor.execute( """ UPDATE users diff --git a/tests/integration/test_agencies.py b/tests/integration/test_agencies.py index abc4890f..01fb5797 100644 --- a/tests/integration/test_agencies.py +++ b/tests/integration/test_agencies.py @@ -1,3 +1,4 @@ +"""Integration tests for /agencies endpoint""" import psycopg2 import pytest from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db @@ -7,6 +8,10 @@ def test_agencies_get( client_with_db, dev_db_connection: psycopg2.extensions.connection ): + """ + Test that GET call to /agencies endpoint properly retrieves a nonzero amount of data + """ + user_info = create_test_user_api(client_with_db) api_key = create_api_key(client_with_db, user_info) response = client_with_db.get( diff --git a/tests/integration/test_api_key.py b/tests/integration/test_api_key.py index ae8c9494..58585f4d 100644 --- a/tests/integration/test_api_key.py +++ b/tests/integration/test_api_key.py @@ -1,3 +1,5 @@ +"""Integration tests for /api_key endpoint""" + import uuid import psycopg2.extensions @@ -7,6 +9,10 @@ def test_api_key_get(client_with_db, dev_db_connection: psycopg2.extensions.connection): + """ + Test that GET call to /api_key endpoint successfully creates an API key and aligns it with the user's API key in the database + """ + user_info = create_test_user_api(client_with_db) response = client_with_db.get( @@ -21,8 +27,9 @@ def test_api_key_get(client_with_db, dev_db_connection: psycopg2.extensions.conn """ SELECT api_key from users where email = %s """, - (user_info.email, ) + (user_info.email,), ) db_api_key = cursor.fetchone()[0] - assert db_api_key == response.json.get("api_key"), "API key returned not aligned with user API key in database" - + assert db_api_key == response.json.get( + "api_key" + ), "API key returned not aligned with user API key in database" diff --git a/tests/integration/test_archives.py b/tests/integration/test_archives.py index 3d3fb4b4..afff86da 100644 --- a/tests/integration/test_archives.py +++ b/tests/integration/test_archives.py @@ -1,3 +1,5 @@ +"""Integration tests for /archives endpoint""" + import datetime import json @@ -17,6 +19,9 @@ def test_archives_get( client_with_db, dev_db_connection: psycopg2.extensions.connection ): + """ + Test that GET call to /archives endpoint successfully retrieves a non-zero amount of data + """ user_info = create_test_user_api(client_with_db) api_key = create_api_key(client_with_db, user_info) response = client_with_db.get( @@ -30,6 +35,9 @@ def test_archives_get( def test_archives_put( client_with_db, dev_db_connection: psycopg2.extensions.connection ): + """ + Test that PUT call to /archives endpoint successfully updates the data source with last_cached and broken_source_url_as_of fields + """ user_info = create_test_user_api(client_with_db) api_key = create_api_key(client_with_db, user_info) data_source_id = insert_test_data_source(dev_db_connection.cursor()) @@ -38,18 +46,23 @@ def test_archives_put( response = client_with_db.put( "/archives", headers={"Authorization": f"Bearer {api_key}"}, - data=json.dumps({ - "id": data_source_id, - "last_cached": str(last_cached), - "broken_source_url_as_of": str(broken_as_of) - }) + data=json.dumps( + { + "id": data_source_id, + "last_cached": str(last_cached), + "broken_source_url_as_of": str(broken_as_of), + } + ), ) assert response.status_code == 200, "Endpoint returned non-200" cursor = dev_db_connection.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT last_cached, broken_source_url_as_of FROM data_sources where airtable_uid = %s - """, (data_source_id, )) + """, + (data_source_id,), + ) row = cursor.fetchone() assert row[0] == last_cached assert row[1] == broken_as_of diff --git a/tests/integration/test_data_sources.py b/tests/integration/test_data_sources.py index 0d645029..1ab5964e 100644 --- a/tests/integration/test_data_sources.py +++ b/tests/integration/test_data_sources.py @@ -1,3 +1,5 @@ +"""Integration tests for /data-sources endpoint""" + import uuid import psycopg2 @@ -19,6 +21,9 @@ def test_data_sources_get( client_with_db, connection_with_test_data: psycopg2.extensions.connection ): + """ + Test that GET call to /data-sources endpoint retrieves data sources and correctly identifies specific sources by name + """ inserted_data_sources_found = get_boolean_dictionary( ("Source 1", "Source 2", "Source 3") ) @@ -42,6 +47,10 @@ def test_data_sources_get( def test_data_sources_post( client_with_db, dev_db_connection: psycopg2.extensions.connection ): + """ + Test that POST call to /data-sources endpoint successfully creates a new data source with a unique name and verifies its existence in the database + """ + user_info = create_test_user_api(client_with_db) give_user_admin_role(dev_db_connection, user_info) api_key = create_api_key(client_with_db, user_info) @@ -54,8 +63,11 @@ def test_data_sources_post( ) assert response.status_code == 200 cursor = dev_db_connection.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT * from data_sources WHERE name=%s - """, (name,)) + """, + (name,), + ) rows = cursor.fetchall() - assert(len(rows)) == 1 + assert (len(rows)) == 1 diff --git a/tests/integration/test_data_sources_by_id.py b/tests/integration/test_data_sources_by_id.py index c01bf2ff..ad4f2620 100644 --- a/tests/integration/test_data_sources_by_id.py +++ b/tests/integration/test_data_sources_by_id.py @@ -1,9 +1,7 @@ -import json -import uuid -from urllib.parse import quote +"""Integration tests for /data-sources-by-id endpoint""" +import uuid import psycopg2 -import pytest from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db from tests.helper_functions import ( create_test_user_api, @@ -15,6 +13,10 @@ def test_data_sources_by_id_get( client_with_db, connection_with_test_data: psycopg2.extensions.connection ): + """ + Test that GET call to /data-sources-by-id/ endpoint retrieves the data source with the correct homepage URL + """ + user_info = create_test_user_api(client_with_db) api_key = create_api_key(client_with_db, user_info) response = client_with_db.get( @@ -28,6 +30,9 @@ def test_data_sources_by_id_get( def test_data_sources_by_id_put( client_with_db, connection_with_test_data: psycopg2.extensions.connection ): + """ + Test that PUT call to /data-sources-by-id/ endpoint successfully updates the description of the data source and verifies the change in the database + """ user_info = create_test_user_api(client_with_db) give_user_admin_role(connection_with_test_data, user_info) api_key = create_api_key(client_with_db, user_info) diff --git a/tests/integration/test_data_sources_map.py b/tests/integration/test_data_sources_map.py index dea0245e..5d1232cf 100644 --- a/tests/integration/test_data_sources_map.py +++ b/tests/integration/test_data_sources_map.py @@ -1,5 +1,6 @@ +"""Integration tests for /data-sources-map endpoint""" + import psycopg2 -import pytest from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db from tests.helper_functions import create_test_user_api, create_api_key @@ -7,6 +8,9 @@ def test_data_sources_map_get( client_with_db, connection_with_test_data: psycopg2.extensions.connection ): + """ + Test that GET call to /data-sources-map endpoint retrieves data sources and verifies the location (latitude and longitude) of a specific source by name + """ user_info = create_test_user_api(client_with_db) api_key = create_api_key(client_with_db, user_info) response = client_with_db.get( @@ -23,4 +27,4 @@ def test_data_sources_map_get( found_source = True assert result["lat"] == 30 assert result["lng"] == 20 - assert found_source \ No newline at end of file + assert found_source diff --git a/tests/integration/test_data_sources_needs_identification.py b/tests/integration/test_data_sources_needs_identification.py index e768fff0..d0e97369 100644 --- a/tests/integration/test_data_sources_needs_identification.py +++ b/tests/integration/test_data_sources_needs_identification.py @@ -1,5 +1,6 @@ +"""Integration tests for /data-sources-needs-identification endpoint""" + import psycopg2 -import pytest from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db from tests.helper_functions import ( get_boolean_dictionary, @@ -11,6 +12,9 @@ def test_data_sources_needs_identification( client_with_db, connection_with_test_data: psycopg2.extensions.connection ): + """ + Test that GET call to /data-sources-needs-identification endpoint retrieves data sources that need identification and correctly identifies specific sources by name + """ inserted_data_sources_found = get_boolean_dictionary( ("Source 1", "Source 2", "Source 3") ) diff --git a/tests/integration/test_login.py b/tests/integration/test_login.py index e1a3c2a9..f6fb2ec3 100644 --- a/tests/integration/test_login.py +++ b/tests/integration/test_login.py @@ -1,4 +1,4 @@ -import uuid +"""Integration tests for /login endpoint""" import psycopg2.extensions @@ -7,7 +7,9 @@ def test_login_post(client_with_db, dev_db_connection: psycopg2.extensions.connection): - + """ + Test that POST call to /login endpoint successfully logs in a user, creates a session token, and verifies the session token exists only once in the database with the correct email + """ # Create user user_info = create_test_user_api(client_with_db) session_token = login_and_return_session_token(client_with_db, user_info) diff --git a/tests/integration/test_quick_search.py b/tests/integration/test_quick_search.py index 2c2693fc..4ee32c1f 100644 --- a/tests/integration/test_quick_search.py +++ b/tests/integration/test_quick_search.py @@ -1,3 +1,5 @@ +"""Integration tests for /quick-search//" endpoint""" + from urllib.parse import quote from tests.fixtures import dev_db_connection, client_with_db, connection_with_test_data @@ -8,6 +10,10 @@ def test_quick_search_get(client_with_db, connection_with_test_data): + """ + Test that GET call to /quick-search// endpoint successfully retrieves a single entry with the correct agency name and airtable UID + """ + user_info = create_test_user_api(client_with_db) api_key = create_api_key(client_with_db, user_info) @@ -24,8 +30,9 @@ def test_quick_search_get(client_with_db, connection_with_test_data): ) assert response.status_code == 200, "Quick Search endpoint call was not successful" data = response.json.get("data") - assert data["count"] == 1, "Quick Search endpoint response should return only one entry" + assert ( + data["count"] == 1 + ), "Quick Search endpoint response should return only one entry" entry = data["data"][0] assert entry["agency_name"] == "Agency A" assert entry["airtable_uid"] == "SOURCE_UID_1" - diff --git a/tests/integration/test_refresh_session.py b/tests/integration/test_refresh_session.py index 91a12499..8d04e329 100644 --- a/tests/integration/test_refresh_session.py +++ b/tests/integration/test_refresh_session.py @@ -1,31 +1,50 @@ +"""Integration tests for /refresh-session endpoint.""" + import psycopg2.extensions from tests.fixtures import dev_db_connection, client_with_db from tests.helper_functions import create_test_user_api, login_and_return_session_token -def test_refresh_session_post(client_with_db, dev_db_connection: psycopg2.extensions.connection): +def test_refresh_session_post( + client_with_db, dev_db_connection: psycopg2.extensions.connection +): + """ + Test that POST call to /refresh-session endpoint successfully generates a new session token, ensures the new token is different from the old one, and verifies the old token is removed while the new token exists in the session tokens table + """ + test_user = create_test_user_api(client_with_db) old_session_token = login_and_return_session_token(client_with_db, test_user) response = client_with_db.post( - "/refresh-session", - json={"session_token": old_session_token} + "/refresh-session", json={"session_token": old_session_token} ) assert response.status_code == 200 new_session_token = response.json.get("data") - assert old_session_token != new_session_token, "New and old tokens should be different" + assert ( + old_session_token != new_session_token + ), "New and old tokens should be different" # Check that old_session_token is not in session tokens, and new_session token does cursor = dev_db_connection.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT * FROM session_tokens where token = %s; - """, (new_session_token,)) + """, + (new_session_token,), + ) rows = cursor.fetchall() - assert len(rows) == 1, "Only one row should exist for the session token in the session_tokens table" + assert ( + len(rows) == 1 + ), "Only one row should exist for the session token in the session_tokens table" - cursor.execute(""" + cursor.execute( + """ SELECT * FROM session_tokens where token = %s; - """, (old_session_token,)) + """, + (old_session_token,), + ) rows = cursor.fetchall() - assert len(rows) == 0, "No row should exist for the old session token in the session_tokens table" + assert ( + len(rows) == 0 + ), "No row should exist for the old session token in the session_tokens table" diff --git a/tests/integration/test_request_reset_password.py b/tests/integration/test_request_reset_password.py index 08b2556f..f713701f 100644 --- a/tests/integration/test_request_reset_password.py +++ b/tests/integration/test_request_reset_password.py @@ -1,3 +1,5 @@ +"""Integration tests for /request-reset-password endpoint.""" + import psycopg2 from tests.fixtures import dev_db_connection, client_with_db @@ -7,6 +9,9 @@ def test_request_reset_password_post( client_with_db, dev_db_connection: psycopg2.extensions.connection, mocker ): + """ + Test that POST call to /request-reset-password endpoint successfully initiates a password reset request, sends a single email via Mailgun, and verifies the reset token is correctly associated with the user's email in the database + """ user_info = create_test_user_api(client_with_db) @@ -23,7 +28,6 @@ def test_request_reset_password_post( mock_post.call_args[0][0] == "https://api.mailgun.net/v3/mail.pdap.io/messages" ) - cursor = dev_db_connection.cursor() cursor.execute( """ diff --git a/tests/integration/test_reset_password.py b/tests/integration/test_reset_password.py index c47b0b65..e67e5c21 100644 --- a/tests/integration/test_reset_password.py +++ b/tests/integration/test_reset_password.py @@ -1,3 +1,5 @@ +"""Integration tests for /reset-password endpoint.""" + import uuid import psycopg2 @@ -15,6 +17,10 @@ def test_reset_password_post( client_with_db, dev_db_connection: psycopg2.extensions.connection, mocker ): + """ + Test that POST call to /reset-password endpoint successfully resets the user's password, and verifies the new password digest is distinct from the old one in the database + """ + user_info = create_test_user_api(client_with_db) cursor = dev_db_connection.cursor() old_password_digest = get_user_password_digest(cursor, user_info) diff --git a/tests/integration/test_reset_token_validation.py b/tests/integration/test_reset_token_validation.py index c93a9d29..c9fd6261 100644 --- a/tests/integration/test_reset_token_validation.py +++ b/tests/integration/test_reset_token_validation.py @@ -1,3 +1,5 @@ +"""Integration tests for /reset-token-validation endpoint.""" + from pytest_mock import mocker from tests.helper_functions import ( @@ -8,11 +10,15 @@ def test_reset_token_validation(client_with_db, dev_db_connection, mocker): + """ + Test that POST call to /reset-token-validation endpoint successfully validates the reset token and returns the correct message indicating token validity + """ user_info = create_test_user_api(client_with_db) token = request_reset_password_api(client_with_db, mocker, user_info) - response = client_with_db.post( - "/reset-token-validation", - json={"token": token} - ) - assert response.status_code == 200, "reset-token-validation endpoint call unsuccessful" - assert response.json.get("message") == "Token is valid", "Message does not return 'Token is valid'" + response = client_with_db.post("/reset-token-validation", json={"token": token}) + assert ( + response.status_code == 200 + ), "reset-token-validation endpoint call unsuccessful" + assert ( + response.json.get("message") == "Token is valid" + ), "Message does not return 'Token is valid'" diff --git a/tests/integration/test_search_tokens.py b/tests/integration/test_search_tokens.py index d5f7bae5..45d0b39b 100644 --- a/tests/integration/test_search_tokens.py +++ b/tests/integration/test_search_tokens.py @@ -1,3 +1,5 @@ +"""Integration tests for /search-tokens endpoint.""" + import psycopg2 import pytest from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db @@ -7,21 +9,21 @@ def test_search_tokens_get( client_with_db, connection_with_test_data: psycopg2.extensions.connection ): + """ + Test that GET call to /search-tokens endpoint with specified query parameters successfully retrieves search tokens and verifies the correct entry with agency name and airtable UID + """ user_info = create_test_user_api(client_with_db) api_key = create_api_key(client_with_db, user_info) response = client_with_db.get( "/search-tokens", headers={"Authorization": f"Bearer {api_key}"}, - query_string={ - "endpoint": "quick-search", - "arg1": "Source 1", - "arg2": "City A" - } + query_string={"endpoint": "quick-search", "arg1": "Source 1", "arg2": "City A"}, ) assert response.status_code == 200 data = response.json.get("data") - assert data["count"] == 1, "Quick Search endpoint response should return only one entry" + assert ( + data["count"] == 1 + ), "Quick Search endpoint response should return only one entry" entry = data["data"][0] assert entry["agency_name"] == "Agency A" assert entry["airtable_uid"] == "SOURCE_UID_1" - diff --git a/tests/integration/test_user.py b/tests/integration/test_user.py index 81ca3cde..2d810d18 100644 --- a/tests/integration/test_user.py +++ b/tests/integration/test_user.py @@ -1,3 +1,5 @@ +"""Integration tests for /user endpoint.""" + import uuid import psycopg2 @@ -11,6 +13,10 @@ def test_user_post(client_with_db, dev_db_connection: psycopg2.extensions.connection): + """ + Test that POST call to /user endpoint successfully creates a new user and verifies the user's email and password digest in the database + """ + user_info = create_test_user_api(client_with_db) cursor = dev_db_connection.cursor() cursor.execute( @@ -28,6 +34,10 @@ def test_user_post(client_with_db, dev_db_connection: psycopg2.extensions.connec def test_user_put(client_with_db, dev_db_connection: psycopg2.extensions.connection): + """ + Test that PUT call to /user endpoint successfully updates the user's password and verifies the new password hash is distinct from both the plain new password and the old password hash in the database + """ + user_info = create_test_user_api(client_with_db) cursor = dev_db_connection.cursor() From 34cbec2654a399602e677b0c071cd522bd5f6738 Mon Sep 17 00:00:00 2001 From: maxachis Date: Thu, 30 May 2024 07:59:12 -0400 Subject: [PATCH 095/127] Update headers in integration tests Refactored the "Authorization" and "Content-Type" headers for request in the integration test_archives.py to enhance readability. This adjustment will make the tests easier to understand and maintain in the future. --- tests/integration/test_archives.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_archives.py b/tests/integration/test_archives.py index afff86da..01a407f1 100644 --- a/tests/integration/test_archives.py +++ b/tests/integration/test_archives.py @@ -45,8 +45,11 @@ def test_archives_put( broken_as_of = datetime.date(year=1993, month=11, day=13) response = client_with_db.put( "/archives", - headers={"Authorization": f"Bearer {api_key}"}, - data=json.dumps( + headers={ + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + }, + json=json.dumps( { "id": data_source_id, "last_cached": str(last_cached), From b3ac9e7b4af9ee4f517cc0ddef80738c79e1a020 Mon Sep 17 00:00:00 2001 From: maxachis Date: Fri, 31 May 2024 08:55:27 -0400 Subject: [PATCH 096/127] Fixed response status code for data source not found to return 200 instead of 404. --- resources/DataSources.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/DataSources.py b/resources/DataSources.py index b474d0cf..2e0db010 100644 --- a/resources/DataSources.py +++ b/resources/DataSources.py @@ -42,7 +42,7 @@ def get(self, data_source_id: str) -> Tuple[Dict[str, Any], int]: } else: - return {"message": "Data source not found."}, 404 + return {"message": "Data source not found."}, 200 @handle_exceptions @api_required From 2cee3cd9318215d02d21aea1e1fdb5157f5e57d0 Mon Sep 17 00:00:00 2001 From: maxachis Date: Fri, 31 May 2024 08:55:39 -0400 Subject: [PATCH 097/127] Fix typo in assertion for data source URL in integration test. --- tests/integration/test_data_sources_by_id.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_data_sources_by_id.py b/tests/integration/test_data_sources_by_id.py index ad4f2620..dbe1ea00 100644 --- a/tests/integration/test_data_sources_by_id.py +++ b/tests/integration/test_data_sources_by_id.py @@ -24,7 +24,7 @@ def test_data_sources_by_id_get( headers={"Authorization": f"Bearer {api_key}"}, ) assert response.status_code == 200 - assert response.json["data"]["homepage_url"] == "http://src1.com" + assert response.json["data"]["source_url"] == "http://src1.com" def test_data_sources_by_id_put( From 42b4f0dd4561d73f5215af043da8a1b595718fbc Mon Sep 17 00:00:00 2001 From: maxachis Date: Fri, 31 May 2024 08:56:08 -0400 Subject: [PATCH 098/127] Add unit tests for getting and updating data sources by ID in test_DataSources.py Explanation: This commit adds unit tests for the functions that get and update data sources by ID in the test_DataSources.py file. The tests cover scenarios where the data source is found and not found, as well as updating the data source. The tests ensure that the expected responses and status codes are returned. --- tests/resources/test_DataSources.py | 40 +++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 tests/resources/test_DataSources.py diff --git a/tests/resources/test_DataSources.py b/tests/resources/test_DataSources.py new file mode 100644 index 00000000..d3b6e885 --- /dev/null +++ b/tests/resources/test_DataSources.py @@ -0,0 +1,40 @@ +# The below line is required to bypass the api_required decorator, +# and must be positioned prior to other imports in order to work. +from unittest.mock import patch, MagicMock +patch("middleware.security.api_required", lambda x: x).start() +from tests.fixtures import client_with_mock_db + + +@patch("resources.DataSources.data_source_by_id_query") +def test_get_data_source_by_id_found( + mock_data_source_by_id_query, + client_with_mock_db, +): + mock_data_source_by_id_query.return_value = {"name": "Test Data Source"} + response = client_with_mock_db.get("/data-sources-by-id/test_id") + assert response.json == { + "message": "Successfully found data source", + "data": {"name": "Test Data Source"}, + } + assert response.status_code == 200 + + +@patch("resources.DataSources.data_source_by_id_query") +def test_get_data_source_by_id_not_found( + mock_data_source_by_id_query, + client_with_mock_db, +): + mock_data_source_by_id_query.return_value = None + response = client_with_mock_db.get("/data-sources-by-id/test_id") + assert response.json == {"message": "Data source not found."} + assert response.status_code == 200 + +def test_put_data_source_by_id( + client_with_mock_db, monkeypatch +): + + monkeypatch.setattr("resources.DataSources.request", MagicMock()) + # mock_request.get_json.return_value = {"name": "Updated Data Source"} + response = client_with_mock_db.put("/data-sources-by-id/test_id") + assert response.status_code == 200 + assert response.json == {"message": "Data source updated successfully."} From 4008157577d4d81e3b0d792b3f5ea7108f11a4f7 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 14:03:23 -0400 Subject: [PATCH 099/127] Refactor helper_functions.py to generate a random email for test users if not provided. Add function to create API key in the database. --- tests/helper_functions.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/helper_functions.py b/tests/helper_functions.py index cf1ff9eb..81411af8 100644 --- a/tests/helper_functions.py +++ b/tests/helper_functions.py @@ -103,7 +103,7 @@ def create_reset_token(cursor: psycopg2.extensions.cursor) -> TestTokenInsert: def create_test_user( cursor, - email="example@example.com", + email="", password_hash="hashed_password_here", api_key="api_key_here", role=None, @@ -114,6 +114,8 @@ def create_test_user( :param cursor: :return: user id """ + if email == "": + email = f"testuser{uuid.uuid4().hex}@example.com" cursor.execute( """ INSERT INTO users (email, password_digest, api_key, role) @@ -269,6 +271,13 @@ def create_api_key(client_with_db, user_info): api_key = response.json.get("api_key") return api_key +def create_api_key_db(cursor, user_id: str): + api_key = uuid.uuid4().hex + cursor.execute( + "UPDATE users SET api_key = %s WHERE id = %s", (api_key, user_id) + ) + return api_key + def insert_test_data_source(cursor: psycopg2.extensions.cursor) -> str: """ From 988a3b473da7da3f9345e62a9801450be41257c1 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 14:03:34 -0400 Subject: [PATCH 100/127] Implemented various test cases to check the validity and expiration status of API keys, session tokens, and access tokens in the security middleware module. --- tests/middleware/test_security.py | 129 ++++++++++++++++++++++++++++-- 1 file changed, 122 insertions(+), 7 deletions(-) diff --git a/tests/middleware/test_security.py b/tests/middleware/test_security.py index 7aadd7d6..9ebe03a6 100644 --- a/tests/middleware/test_security.py +++ b/tests/middleware/test_security.py @@ -1,7 +1,122 @@ -def test_api_required_user_not_found(): - """ - Test that the api_required decorator properly returns - the expected result when a user doesn't exist - :return: - """ - raise NotImplementedError +import datetime +import uuid + +import pytest +from unittest.mock import patch + +from middleware.login_queries import create_session_token +from middleware.security import is_valid, APIKeyStatus +from tests.helper_functions import ( + create_test_user, + UserInfo, + give_user_admin_role, + create_api_key_db, +) +from tests.fixtures import dev_db_connection + + +def test_no_api_key_provided(): + result = is_valid(api_key="", endpoint="", method="") + assert result == APIKeyStatus(is_valid=False, is_expired=False) + + +def test_api_key_exists_in_users_table_with_admin_role(dev_db_connection): + cursor = dev_db_connection.cursor() + test_user = create_test_user(cursor) + give_user_admin_role(dev_db_connection, UserInfo(test_user.email, "")) + api_key = create_api_key_db(cursor, test_user.id) + dev_db_connection.commit() + result = is_valid(api_key, "", "") + assert result == APIKeyStatus(is_valid=True, is_expired=False) + + +def test_api_key_exists_in_users_table_with_non_admin_role(dev_db_connection): + cursor = dev_db_connection.cursor() + test_user = create_test_user(cursor) + api_key = create_api_key_db(cursor, test_user.id) + dev_db_connection.commit() + result = is_valid(api_key, "", "") + assert result == APIKeyStatus(is_valid=True, is_expired=False) + + +def test_api_key_not_in_users_table_but_in_session_tokens_table(dev_db_connection): + cursor = dev_db_connection.cursor() + test_user = create_test_user(cursor) + token = create_session_token(cursor, test_user.id, test_user.email) + dev_db_connection.commit() + result = is_valid(token, "", "") + assert result == APIKeyStatus(is_valid=True, is_expired=False) + + +def test_expired_session_token(dev_db_connection): + cursor = dev_db_connection.cursor() + test_user = create_test_user(cursor) + token = create_session_token(cursor, test_user.id, test_user.email) + cursor.execute( + f"UPDATE session_tokens SET expiration_date = '{datetime.date(year=2020, month=3, day=4)}' WHERE token = '{token}'" + ) + dev_db_connection.commit() + result = is_valid(token, "", "") + assert result == APIKeyStatus(is_valid=False, is_expired=True) + + +def test_session_token_with_admin_role(dev_db_connection): + cursor = dev_db_connection.cursor() + test_user = create_test_user(cursor) + give_user_admin_role(dev_db_connection, UserInfo(test_user.email, "")) + token = create_session_token(cursor, test_user.id, test_user.email) + dev_db_connection.commit() + result = is_valid(token, "", "") + assert result == APIKeyStatus(is_valid=True, is_expired=False) + + +def test_api_key_exists_in_access_tokens_table(dev_db_connection): + cursor = dev_db_connection.cursor() + token = uuid.uuid4().hex + expiration = datetime.datetime(year=2030, month=1, day=1) + cursor.execute( + f"insert into access_tokens (token, expiration_date) values (%s, %s)", + (token, expiration), + ) + dev_db_connection.commit() + result = is_valid(token, "", "") + assert result == APIKeyStatus(is_valid=True, is_expired=False) + + +def test_api_key_not_exist_in_any_table(dev_db_connection): + token = uuid.uuid4().hex + result = is_valid(token, "", "") + assert result == APIKeyStatus(is_valid=False, is_expired=False) + + +def test_expired_access_token_in_access_tokens_table(dev_db_connection): + cursor = dev_db_connection.cursor() + token = uuid.uuid4().hex + expiration = datetime.datetime(year=1999, month=1, day=1) + cursor.execute( + f"insert into access_tokens (token, expiration_date) values (%s, %s)", + (token, expiration), + ) + dev_db_connection.commit() + result = is_valid(token, "", "") + assert result == APIKeyStatus(is_valid=False, is_expired=False) + + +def test_admin_only_action_with_non_admin_role(dev_db_connection): + cursor = dev_db_connection.cursor() + test_user = create_test_user(cursor) + api_key = create_api_key_db(cursor, test_user.id) + dev_db_connection.commit() + result = is_valid(api_key, "datasources", "PUT") + assert result == APIKeyStatus(is_valid=False, is_expired=False) + + +def test_admin_only_action_with_admin_role(dev_db_connection): + cursor = dev_db_connection.cursor() + test_user = create_test_user(cursor) + give_user_admin_role(dev_db_connection, UserInfo(test_user.email, "")) + api_key = create_api_key_db(cursor, test_user.id) + dev_db_connection.commit() + result = is_valid(api_key, "datasources", "PUT") + assert result == APIKeyStatus(is_valid=True, is_expired=False) + From e05622b54ddff5499d19a525f70dc12d62bcd12b Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 14:04:13 -0400 Subject: [PATCH 101/127] Refactor security middleware to improve readability and efficiency in handling API key validation and user roles. --- middleware/security.py | 73 +++++++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 23 deletions(-) diff --git a/middleware/security.py b/middleware/security.py index ab4118b4..b2dab8cf 100644 --- a/middleware/security.py +++ b/middleware/security.py @@ -23,21 +23,17 @@ def is_valid(api_key: str, endpoint: str, method: str) -> APIKeyStatus: if not api_key: return APIKeyStatus(is_valid=False, is_expired=False) + session_token_results = None psycopg2_connection = initialize_psycopg2_connection() cursor = psycopg2_connection.cursor() - cursor.execute(f"select id, api_key, role from users where api_key = '{api_key}'") - results = cursor.fetchall() - if len(results) > 0: - role = results[0][2] - - if not results: - cursor.execute( - f"select email, expiration_date from session_tokens where token = '{api_key}'" + role = get_role(api_key, cursor) + if role is None: + session_token_results = get_session_token( + api_key, cursor, session_token_results ) - results = cursor.fetchall() - if len(results) > 0: - email = results[0][0] - expiration_date = results[0][1] + if len(session_token_results) > 0: + email = session_token_results[0][0] + expiration_date = session_token_results[0][1] print(expiration_date, dt.utcnow()) if expiration_date < dt.utcnow(): @@ -46,21 +42,16 @@ def is_valid(api_key: str, endpoint: str, method: str) -> APIKeyStatus: if is_admin(cursor, email): role = "admin" - if not results: - cursor.execute(f"select id, token from access_tokens where token = '{api_key}'") - results = cursor.fetchall() - cursor.execute( - f"delete from access_tokens where expiration_date < '{dt.utcnow()}'" - ) - psycopg2_connection.commit() + if not session_token_results and role is None: + delete_expired_access_tokens(cursor, psycopg2_connection) + access_token = get_access_token(api_key, cursor) role = "user" - if not results: + if not access_token: return APIKeyStatus(is_valid=False, is_expired=False) - if endpoint in ("datasources", "datasourcebyid") and method in ("PUT", "POST"): - if role != "admin": - return APIKeyStatus(is_valid=False, is_expired=False) + if is_admin_only_action(endpoint, method) and role != "admin": + return APIKeyStatus(is_valid=False, is_expired=False) # Compare the API key in the user table to the API in the request header and proceed # through the protected route if it's valid. Otherwise, compare_digest will return False @@ -68,6 +59,42 @@ def is_valid(api_key: str, endpoint: str, method: str) -> APIKeyStatus: return APIKeyStatus(is_valid=True, is_expired=False) +def get_role(api_key, cursor): + cursor.execute(f"select id, api_key, role from users where api_key = '{api_key}'") + user_results = cursor.fetchall() + if len(user_results) > 0: + role = user_results[0][2] + if role is None: + return "user" + return role + return None + + +def get_session_token(api_key, cursor, session_token_results): + cursor.execute( + f"select email, expiration_date from session_tokens where token = '{api_key}'" + ) + session_token_results = cursor.fetchall() + return session_token_results + + +def get_access_token(api_key, cursor): + cursor.execute(f"select id, token from access_tokens where token = '{api_key}'") + results = cursor.fetchone() + if results: + return results[1] + return None + + +def delete_expired_access_tokens(cursor, psycopg2_connection): + cursor.execute(f"delete from access_tokens where expiration_date < '{dt.utcnow()}'") + psycopg2_connection.commit() + + +def is_admin_only_action(endpoint, method): + return endpoint in ("datasources", "datasourcebyid") and method in ("PUT", "POST") + + def api_required(func): """ The api_required decorator can be added to protect a route so that only authenticated users can access the information From 3e610c544850c76c4204d85e9ab7c6b0c5e1311e Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 14:15:44 -0400 Subject: [PATCH 102/127] Add function to delete session token from database Added delete_session_token function to remove session token from the database table session_tokens. This function is called with the cursor and the old token to be deleted as parameters. --- middleware/login_queries.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/middleware/login_queries.py b/middleware/login_queries.py index ae4d4725..f24fedd5 100644 --- a/middleware/login_queries.py +++ b/middleware/login_queries.py @@ -1,3 +1,5 @@ +from datetime import datetime as dt + import jwt import os import datetime @@ -87,3 +89,9 @@ def token_results(cursor: PgCursor, token: str) -> Dict[str, Union[int, str]]: "id": results[0][0], "email": results[0][1], } + + +def delete_session_token(cursor, old_token): + cursor.execute( + f"delete from session_tokens where token = '{old_token}'" + ) From a4b6ef9860e9d1e48ce3a6a7e299cd7e4a2933a5 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 14:15:53 -0400 Subject: [PATCH 103/127] Refactored RefreshSession.py to include delete_session_token function and improve session token refreshing logic --- resources/RefreshSession.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/resources/RefreshSession.py b/resources/RefreshSession.py index 75e77392..05f9e226 100644 --- a/resources/RefreshSession.py +++ b/resources/RefreshSession.py @@ -1,6 +1,5 @@ from flask import request -from middleware.login_queries import token_results, create_session_token -from datetime import datetime as dt +from middleware.login_queries import token_results, create_session_token, delete_session_token from typing import Dict, Any from resources.PsycopgResource import PsycopgResource, handle_exceptions @@ -25,17 +24,15 @@ def post(self) -> Dict[str, Any]: old_token = data.get("session_token") cursor = self.psycopg2_connection.cursor() user_data = token_results(cursor, old_token) - cursor.execute( - f"delete from session_tokens where token = '{old_token}' and expiration_date < '{dt.utcnow()}'" - ) + delete_session_token(cursor, old_token) self.psycopg2_connection.commit() if "id" in user_data: token = create_session_token(cursor, user_data["id"], user_data["email"]) - self.psycopg2_connection.commit() return { "message": "Successfully refreshed session token", "data": token, } + self.psycopg2_connection.commit() return {"message": "Invalid session token"}, 403 From 192088f7932e664d8cc9fc86ab64de55369bc321 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 14:17:44 -0400 Subject: [PATCH 104/127] Fixed create_test_user so that email is set to a random email if not provided. --- tests/helper_functions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/helper_functions.py b/tests/helper_functions.py index cf1ff9eb..8bed6949 100644 --- a/tests/helper_functions.py +++ b/tests/helper_functions.py @@ -103,7 +103,7 @@ def create_reset_token(cursor: psycopg2.extensions.cursor) -> TestTokenInsert: def create_test_user( cursor, - email="example@example.com", + email="", password_hash="hashed_password_here", api_key="api_key_here", role=None, @@ -114,6 +114,8 @@ def create_test_user( :param cursor: :return: user id """ + if email == "": + email = uuid.uuid4().hex + "@test.com" cursor.execute( """ INSERT INTO users (email, password_digest, api_key, role) From 07f5969d6330852303d6ccdd0c4a4e83ad8db3cd Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:12:17 -0400 Subject: [PATCH 105/127] =?UTF-8?q?=E2=9C=A8=20Add=20ClientWithMockDB=20na?= =?UTF-8?q?medtuple=20in=20fixtures.py=20and=20update=20test=5FDataSources?= =?UTF-8?q?.py=20-=20Add=20ClientWithMockDB=20namedtuple=20in=20fixtures.p?= =?UTF-8?q?y=20to=20enhance=20client=20mocking=20capabilities=20and=20upda?= =?UTF-8?q?te=20test=5FDataSources.py=20to=20utilize=20the=20new=20namedtu?= =?UTF-8?q?ple=20for=20client=20operations.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/fixtures.py | 10 ++++++---- tests/resources/test_DataSources.py | 8 ++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/tests/fixtures.py b/tests/fixtures.py index 6bd560e0..77e2c157 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -1,6 +1,7 @@ """This module contains pytest fixtures employed by middleware tests.""" import os +from collections import namedtuple import psycopg2 import pytest @@ -80,17 +81,18 @@ def connection_with_test_data( dev_db_connection.rollback() return dev_db_connection - +ClientWithMockDB = namedtuple("ClientWithMockDB", ["client", "mock_db"]) @pytest.fixture -def client_with_mock_db(mocker) -> FlaskClient: +def client_with_mock_db(mocker) -> ClientWithMockDB: """ Create a client with a mocked database connection :param mocker: :return: """ - app = create_app(mocker.MagicMock()) + mock_db = mocker.MagicMock() + app = create_app(mock_db) with app.test_client() as client: - yield client + yield ClientWithMockDB(client, mock_db) @pytest.fixture def client_with_db(dev_db_connection: psycopg2.extensions.connection): diff --git a/tests/resources/test_DataSources.py b/tests/resources/test_DataSources.py index d3b6e885..f08ef098 100644 --- a/tests/resources/test_DataSources.py +++ b/tests/resources/test_DataSources.py @@ -4,14 +4,13 @@ patch("middleware.security.api_required", lambda x: x).start() from tests.fixtures import client_with_mock_db - @patch("resources.DataSources.data_source_by_id_query") def test_get_data_source_by_id_found( mock_data_source_by_id_query, client_with_mock_db, ): mock_data_source_by_id_query.return_value = {"name": "Test Data Source"} - response = client_with_mock_db.get("/data-sources-by-id/test_id") + response = client_with_mock_db.client.get("/data-sources-by-id/test_id") assert response.json == { "message": "Successfully found data source", "data": {"name": "Test Data Source"}, @@ -19,13 +18,14 @@ def test_get_data_source_by_id_found( assert response.status_code == 200 + @patch("resources.DataSources.data_source_by_id_query") def test_get_data_source_by_id_not_found( mock_data_source_by_id_query, client_with_mock_db, ): mock_data_source_by_id_query.return_value = None - response = client_with_mock_db.get("/data-sources-by-id/test_id") + response = client_with_mock_db.client.get("/data-sources-by-id/test_id") assert response.json == {"message": "Data source not found."} assert response.status_code == 200 @@ -35,6 +35,6 @@ def test_put_data_source_by_id( monkeypatch.setattr("resources.DataSources.request", MagicMock()) # mock_request.get_json.return_value = {"name": "Updated Data Source"} - response = client_with_mock_db.put("/data-sources-by-id/test_id") + response = client_with_mock_db.client.put("/data-sources-by-id/test_id") assert response.status_code == 200 assert response.json == {"message": "Data source updated successfully."} From 583e855af8acb2852abb3947a436e9ccec30d163 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:12:39 -0400 Subject: [PATCH 106/127] =?UTF-8?q?=E2=9C=A8=20Add=20bypass=20for=20api=5F?= =?UTF-8?q?required=20decorator=20in=20tests/resources/=5F=5Finit=5F=5F.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/resources/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/resources/__init__.py b/tests/resources/__init__.py index e69de29b..b8dfa95d 100644 --- a/tests/resources/__init__.py +++ b/tests/resources/__init__.py @@ -0,0 +1,4 @@ +# The below line is required to bypass the api_required decorator, +# and must be positioned prior to other imports in order to work. +from unittest.mock import patch, MagicMock +patch("middleware.security.api_required", lambda x: x).start() \ No newline at end of file From cf8be37ad50dc3accb826aaa09f670ec7bc8f2b7 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:13:09 -0400 Subject: [PATCH 107/127] =?UTF-8?q?=E2=9C=85=20Add=20check=5Fresponse=5Fst?= =?UTF-8?q?atus=20function=20in=20tests/helper=5Ffunctions.py=20to=20verif?= =?UTF-8?q?y=20response=20status=20code.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/helper_functions.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/helper_functions.py b/tests/helper_functions.py index 8bed6949..70cd249c 100644 --- a/tests/helper_functions.py +++ b/tests/helper_functions.py @@ -319,3 +319,6 @@ def give_user_admin_role( """, (user_info.email,), ) + +def check_response_status(response, status_code): + assert response.status_code == status_code, f"Expected status code {status_code}, got {response.status_code}: {response.text}" \ No newline at end of file From b5cee849309e8c720fe6fcf0f97a92b5e096fb62 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:13:34 -0400 Subject: [PATCH 108/127] =?UTF-8?q?=F0=9F=9B=A0=EF=B8=8F=20refactor:=20Ref?= =?UTF-8?q?actor=20token=20retrieval=20and=20deletion=20functions=20in=20l?= =?UTF-8?q?ogin=5Fqueries.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactor the token_results function to return a named tuple SessionTokenUserData instead of a dictionary for improved readability and consistency. Also, refactor the delete_session_token function for better code formatting and clarity. --- middleware/login_queries.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/middleware/login_queries.py b/middleware/login_queries.py index f24fedd5..679525c9 100644 --- a/middleware/login_queries.py +++ b/middleware/login_queries.py @@ -1,3 +1,4 @@ +from collections import namedtuple from datetime import datetime as dt import jwt @@ -73,25 +74,23 @@ def create_session_token(cursor: PgCursor, user_id: int, email: str) -> str: return session_token -def token_results(cursor: PgCursor, token: str) -> Dict[str, Union[int, str]]: +SessionTokenUserData = namedtuple("SessionTokenUserData", ["id", "email"]) + + +def get_session_token_user_data(cursor: PgCursor, token: str) -> SessionTokenUserData: """ Retrieves session token data. :param cursor: A cursor object from a psycopg2 connection. :param token: The session token. - :return: A dictionary containing session token data or an error message. + :return: Session token data or an error message. """ cursor.execute(f"select id, email from session_tokens where token = %s", (token,)) results = cursor.fetchall() if len(results) == 0: raise TokenNotFoundError("The specified token was not found.") - return { - "id": results[0][0], - "email": results[0][1], - } + return SessionTokenUserData(id=results[0][0], email=results[0][1]) def delete_session_token(cursor, old_token): - cursor.execute( - f"delete from session_tokens where token = '{old_token}'" - ) + cursor.execute(f"delete from session_tokens where token = '{old_token}'") From a92d61c83c352d243793a67b5d5129e1d0a46381 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:14:12 -0400 Subject: [PATCH 109/127] Refactor RefreshSession.py to handle custom exceptions and improve code readability" --- resources/RefreshSession.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/resources/RefreshSession.py b/resources/RefreshSession.py index 05f9e226..a8ea5958 100644 --- a/resources/RefreshSession.py +++ b/resources/RefreshSession.py @@ -1,5 +1,7 @@ from flask import request -from middleware.login_queries import token_results, create_session_token, delete_session_token + +from middleware.custom_exceptions import TokenNotFoundError +from middleware.login_queries import get_session_token_user_data, create_session_token, delete_session_token from typing import Dict, Any from resources.PsycopgResource import PsycopgResource, handle_exceptions @@ -23,16 +25,14 @@ def post(self) -> Dict[str, Any]: data = request.get_json() old_token = data.get("session_token") cursor = self.psycopg2_connection.cursor() - user_data = token_results(cursor, old_token) + try: + user_data = get_session_token_user_data(cursor, old_token) + except TokenNotFoundError: + return {"message": "Invalid session token"}, 403 delete_session_token(cursor, old_token) + token = create_session_token(cursor, user_data.id, user_data.email) self.psycopg2_connection.commit() - - if "id" in user_data: - token = create_session_token(cursor, user_data["id"], user_data["email"]) - return { - "message": "Successfully refreshed session token", - "data": token, - } - self.psycopg2_connection.commit() - - return {"message": "Invalid session token"}, 403 + return { + "message": "Successfully refreshed session token", + "data": token, + } From 2af56347155c581dd5178e1a2f2fe76ff82f2043 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:14:26 -0400 Subject: [PATCH 110/127] Refactor endpoint test fixture import in test_endpoints.py" --- tests/test_endpoints.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_endpoints.py b/tests/test_endpoints.py index 8c631615..d6ab52e4 100644 --- a/tests/test_endpoints.py +++ b/tests/test_endpoints.py @@ -28,7 +28,7 @@ from resources.ResetTokenValidation import ResetTokenValidation from resources.SearchTokens import SearchTokens from resources.User import User -from tests.fixtures import client_with_mock_db +from tests.fixtures import client_with_mock_db, ClientWithMockDB # Define constants for HTTP methods GET = "get" @@ -81,7 +81,7 @@ def run_endpoint_tests( @pytest.mark.parametrize("test_parameter", test_parameters) -def test_endpoints(client_with_mock_db: FlaskClient, test_parameter) -> None: +def test_endpoints(client_with_mock_db: ClientWithMockDB, test_parameter) -> None: """ Using the test_parameters list, this tests all endpoints to ensure that only the appropriate methods can be called from the endpoints @@ -92,7 +92,7 @@ def test_endpoints(client_with_mock_db: FlaskClient, test_parameter) -> None: :return: """ run_endpoint_tests( - client_with_mock_db, + client_with_mock_db.client, test_parameter.endpoint, test_parameter.class_type, test_parameter.allowed_methods, From d84cbabf3fd6f10b5269d45d7670a49c011434c2 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:14:44 -0400 Subject: [PATCH 111/127] refactor token_results to get_session_token_user_data & update tests. --- tests/middleware/test_login_queries.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/middleware/test_login_queries.py b/tests/middleware/test_login_queries.py index aacc548e..8c4da62f 100644 --- a/tests/middleware/test_login_queries.py +++ b/tests/middleware/test_login_queries.py @@ -7,7 +7,7 @@ from middleware.login_queries import ( login_results, create_session_token, - token_results, + get_session_token_user_data, is_admin, ) from middleware.custom_exceptions import UserNotFoundError, TokenNotFoundError @@ -25,7 +25,7 @@ def test_login_query(db_cursor: psycopg2.extensions.cursor) -> None: """ test_user = create_test_user(db_cursor) - user_data = login_results(db_cursor, "example@example.com") + user_data = login_results(db_cursor, test_user.email) assert user_data["password_digest"] == test_user.password_hash @@ -48,9 +48,9 @@ def test_create_session_token_results(db_cursor: psycopg2.extensions.cursor) -> test_user = create_test_user(db_cursor) with patch("os.getenv", return_value="mysecretkey") as mock_getenv: token = create_session_token(db_cursor, test_user.id, test_user.email) - new_token = token_results(db_cursor, token) + new_token = get_session_token_user_data(db_cursor, token) - assert new_token["email"] == test_user.email + assert new_token.email == test_user.email def test_is_admin(db_cursor: psycopg2.extensions.cursor) -> None: @@ -81,4 +81,4 @@ def test_is_admin_raises_user_not_logged_in_error(db_cursor): def test_token_results_raises_token_not_found_error(db_cursor): """token_results() should raise TokenNotFoundError for nonexistent token""" with pytest.raises(TokenNotFoundError): - token_results(cursor=db_cursor, token=str(uuid.uuid4())) + get_session_token_user_data(cursor=db_cursor, token=str(uuid.uuid4())) From 5f00c5a7748aa34bad4a3b1c51709cc421d126fa Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:14:58 -0400 Subject: [PATCH 112/127] =?UTF-8?q?=E2=9C=A8=20Add=20test=5FRefreshSession?= =?UTF-8?q?.py=20with=20refresh=20session=20test=20cases?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ℹ️ Added test_RefreshSession.py file with test cases for refreshing session functionality, including happy path, token not found, and unexpected error scenarios. --- tests/resources/test_RefreshSession.py | 134 +++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 tests/resources/test_RefreshSession.py diff --git a/tests/resources/test_RefreshSession.py b/tests/resources/test_RefreshSession.py new file mode 100644 index 00000000..839d9972 --- /dev/null +++ b/tests/resources/test_RefreshSession.py @@ -0,0 +1,134 @@ +from unittest.mock import MagicMock + +import pytest + +from middleware.custom_exceptions import TokenNotFoundError +from middleware.login_queries import SessionTokenUserData +from tests.fixtures import client_with_mock_db +from tests.helper_functions import check_response_status + + +@pytest.fixture +def mock_cursor(client_with_mock_db): + return client_with_mock_db.mock_db.cursor.return_value + + +@pytest.fixture +def mock_get_session_token_user_data(monkeypatch): + mock = MagicMock() + monkeypatch.setattr("resources.RefreshSession.get_session_token_user_data", mock) + return mock + + +@pytest.fixture +def mock_delete_session_token(monkeypatch): + mock = MagicMock() + monkeypatch.setattr("resources.RefreshSession.delete_session_token", mock) + return mock + + +@pytest.fixture +def mock_create_session_token(monkeypatch): + mock = MagicMock() + monkeypatch.setattr("resources.RefreshSession.create_session_token", mock) + return mock + + +def test_post_refresh_session_happy_path( + client_with_mock_db, + mock_cursor, + mock_get_session_token_user_data, + mock_delete_session_token, + mock_create_session_token, +): + test_session_token_user_data = SessionTokenUserData( + id="test_id", email="test_email" + ) + mock_get_session_token_user_data.return_value = test_session_token_user_data + mock_create_session_token.return_value = "new_test_session_token" + + response = client_with_mock_db.client.post( + "/refresh-session", + json={ + "session_token": "old_test_session_token", + }, + ) + check_response_status(response, 200) + assert response.json == { + "message": "Successfully refreshed session token", + "data": "new_test_session_token", + } + mock_get_session_token_user_data.assert_called_once_with( + mock_cursor, "old_test_session_token" + ) + mock_delete_session_token.assert_called_once_with( + mock_cursor, "old_test_session_token" + ) + mock_create_session_token.assert_called_once_with( + mock_cursor, test_session_token_user_data.id, test_session_token_user_data.email + ) + client_with_mock_db.mock_db.commit.assert_called_once() + + +def test_post_refresh_session_token_not_found( + client_with_mock_db, + mock_cursor, + mock_get_session_token_user_data, + mock_delete_session_token, + mock_create_session_token, +): + """ + Test that RefreshSessionPost behaves as expected when the session token is not found + :param client_with_mock_db: + :return: + """ + mock_get_session_token_user_data.side_effect = TokenNotFoundError + response = client_with_mock_db.client.post( + "/refresh-session", + json={ + "session_token": "old_test_session_token", + }, + ) + + check_response_status(response, 403) + assert response.json == { + "message": "Invalid session token", + } + mock_get_session_token_user_data.assert_called_once_with( + mock_cursor, "old_test_session_token" + ) + mock_delete_session_token.assert_not_called() + mock_create_session_token.assert_not_called() + client_with_mock_db.mock_db.commit.assert_not_called() + + +def test_post_refresh_session_unexpected_error( + client_with_mock_db, + mock_cursor, + mock_get_session_token_user_data, + mock_delete_session_token, + mock_create_session_token, +): + """ + Test that RefreshSessionPost behaves as expected when there is an unexpected error + :param client_with_mock_db: + :return: + """ + mock_get_session_token_user_data.side_effect = Exception("An unexpected error occurred") + response = client_with_mock_db.client.post( + "/refresh-session", + json={ + "session_token": "old_test_session_token", + }, + ) + + check_response_status(response, 500) + assert response.json == { + "message": "An unexpected error occurred", + } + mock_get_session_token_user_data.assert_called_once_with( + mock_cursor, "old_test_session_token" + ) + mock_delete_session_token.assert_not_called() + mock_create_session_token.assert_not_called() + client_with_mock_db.mock_db.commit.assert_not_called() From 04de0449db7ede2d71d3254e368fb19f186c91c9 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:57:31 -0400 Subject: [PATCH 113/127] =?UTF-8?q?=F0=9F=90=9B=20fix(middleware):=20Updat?= =?UTF-8?q?e=20INSERT=5FLOG=5FQUERY=20to=20remove=20redundant=20datetime?= =?UTF-8?q?=20field?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Removed the redundant 'datetime_of_request' field from the INSERT_LOG_QUERY in the quick_search_query.py file in the middleware directory - This fixes an issue where unnecessary data was being inserted into the quick_search_query_logs table - The query now only includes essential fields for logging search queries and results --- middleware/quick_search_query.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/middleware/quick_search_query.py b/middleware/quick_search_query.py index 10681244..8213a65b 100644 --- a/middleware/quick_search_query.py +++ b/middleware/quick_search_query.py @@ -52,7 +52,7 @@ """ -INSERT_LOG_QUERY = "INSERT INTO quick_search_query_logs (search, location, results, result_count, created_at, datetime_of_request) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{4}')" +INSERT_LOG_QUERY = "INSERT INTO quick_search_query_logs (search, location, results, result_count) VALUES ('{0}', '{1}', '{2}', '{3}')" def unaltered_search_query( @@ -148,14 +148,11 @@ def quick_search_query( "data": data_source_matches_converted, } - current_datetime = datetime.datetime.now() - datetime_string = current_datetime.strftime("%Y-%m-%d %H:%M:%S") - query_results = json.dumps(data_sources["data"]).replace("'", "") cursor.execute( INSERT_LOG_QUERY.format( - search, location, query_results, data_sources["count"], datetime_string + search, location, query_results, data_sources["count"] ), ) conn.commit() From 6a8a2cf3077365463dc824133e49f7bdb9ff294b Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:57:56 -0400 Subject: [PATCH 114/127] Update timestamp field name in query --- tests/helper_functions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/helper_functions.py b/tests/helper_functions.py index 70cd249c..f0a4c94b 100644 --- a/tests/helper_functions.py +++ b/tests/helper_functions.py @@ -151,8 +151,8 @@ def get_most_recent_quick_search_query_log( """ cursor.execute( """ - SELECT RESULT_COUNT, DATETIME_OF_REQUEST FROM QUICK_SEARCH_QUERY_LOGS WHERE - search = %s AND location = %s ORDER BY DATETIME_OF_REQUEST DESC LIMIT 1 + SELECT RESULT_COUNT, CREATED_AT FROM QUICK_SEARCH_QUERY_LOGS WHERE + search = %s AND location = %s ORDER BY CREATED_AT DESC LIMIT 1 """, (search, location), ) From 1c0a8a0cb8847818acee12502a7930acc4e4fe28 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:58:10 -0400 Subject: [PATCH 115/127] Refactor code in resources/QuickSearch.py - Removed redundant psycopg2_connection initialization and query duplication to improve code readability and efficiency. --- resources/QuickSearch.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/resources/QuickSearch.py b/resources/QuickSearch.py index 7b1c70b1..c6ef448b 100644 --- a/resources/QuickSearch.py +++ b/resources/QuickSearch.py @@ -40,12 +40,6 @@ def get(self, search: str, location: str) -> Dict[str, Any]: search, location, self.psycopg2_connection ) - if data_sources["count"] == 0: - self.psycopg2_connection = initialize_psycopg2_connection() - data_sources = quick_search_query( - search, location, self.psycopg2_connection - ) - if data_sources["count"] == 0: return { "count": 0, From 24a9a57aa7cd5be49bb56b5419fcab27f7157d4b Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:58:47 -0400 Subject: [PATCH 116/127] Fix bug in test_search_tokens.py --- tests/integration/test_search_tokens.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_search_tokens.py b/tests/integration/test_search_tokens.py index 45d0b39b..bf346ede 100644 --- a/tests/integration/test_search_tokens.py +++ b/tests/integration/test_search_tokens.py @@ -22,8 +22,8 @@ def test_search_tokens_get( assert response.status_code == 200 data = response.json.get("data") assert ( - data["count"] == 1 + len(data) == 1 ), "Quick Search endpoint response should return only one entry" - entry = data["data"][0] + entry = data[0] assert entry["agency_name"] == "Agency A" assert entry["airtable_uid"] == "SOURCE_UID_1" From dcacd66192a795262712386d326174492375df5f Mon Sep 17 00:00:00 2001 From: maxachis Date: Sat, 1 Jun 2024 21:58:56 -0400 Subject: [PATCH 117/127] Add tests for quick search functionality in test_QuickSearch.py" --- tests/resources/test_QuickSearch.py | 38 +++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 tests/resources/test_QuickSearch.py diff --git a/tests/resources/test_QuickSearch.py b/tests/resources/test_QuickSearch.py new file mode 100644 index 00000000..586b3cdb --- /dev/null +++ b/tests/resources/test_QuickSearch.py @@ -0,0 +1,38 @@ +from unittest.mock import patch, MagicMock + +import pytest + +from tests.helper_functions import check_response_status + +patch("middleware.security.api_required", lambda x: x).start() +from tests.fixtures import client_with_mock_db + +@pytest.fixture +def mock_quick_search_query(monkeypatch): + mock = MagicMock() + monkeypatch.setattr("resources.QuickSearch.quick_search_query", mock) + return mock + + +def test_get_quick_search_results_found(client_with_mock_db, mock_quick_search_query): + mock_quick_search_query.return_value = { + "count": "1", + "data": [{"id": "test_id", "name": "test_name"}], + } + response = client_with_mock_db.client.get("/quick-search/test_search/test_location") + check_response_status(response, 200) + response_json = response.json + assert response_json["data"]["data"] == [{'id': 'test_id', 'name': 'test_name'}] + assert response_json["data"]["count"] == '1' + assert response_json["message"] == "Results for search successfully retrieved" + +def test_get_quick_search_results_not_found(client_with_mock_db, mock_quick_search_query): + mock_quick_search_query.return_value = { + "count": 0, + "data": [], + } + response = client_with_mock_db.client.get("/quick-search/test_search/test_location") + check_response_status(response, 404) + response_json = response.json + assert response_json["count"] == 0 + assert response_json["message"] == "No results found. Please considering requesting a new data source." From 8f2aa2dce70dfbb2a85fa6c377d4fff67efe6c17 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 2 Jun 2024 00:41:27 -0400 Subject: [PATCH 118/127] =?UTF-8?q?=E2=9C=A8=F0=9F=94=8D=20Refactor=20test?= =?UTF-8?q?=20functions=20to=20use=20helper=20function=20for=20checking=20?= =?UTF-8?q?response=20status=20code=20and=20add=20missing=20source=5Furl?= =?UTF-8?q?=20key=20check=20in=20test=5Fdata=5Fsources=5Fby=5Fid.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/integration/test_data_sources_by_id.py | 2 +- tests/integration/test_quick_search.py | 3 ++- tests/integration/test_search_tokens.py | 8 ++++++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_data_sources_by_id.py b/tests/integration/test_data_sources_by_id.py index dbe1ea00..eccc81b3 100644 --- a/tests/integration/test_data_sources_by_id.py +++ b/tests/integration/test_data_sources_by_id.py @@ -24,7 +24,7 @@ def test_data_sources_by_id_get( headers={"Authorization": f"Bearer {api_key}"}, ) assert response.status_code == 200 - assert response.json["data"]["source_url"] == "http://src1.com" + assert response.json["source_url"] == "http://src1.com" def test_data_sources_by_id_put( diff --git a/tests/integration/test_quick_search.py b/tests/integration/test_quick_search.py index 4ee32c1f..40849fec 100644 --- a/tests/integration/test_quick_search.py +++ b/tests/integration/test_quick_search.py @@ -6,6 +6,7 @@ from tests.helper_functions import ( create_test_user_api, create_api_key, + check_response_status, ) @@ -28,7 +29,7 @@ def test_quick_search_get(client_with_db, connection_with_test_data): f"/quick-search/{encoded_search_term}/{encoded_location}", headers={"Authorization": f"Bearer {api_key}"}, ) - assert response.status_code == 200, "Quick Search endpoint call was not successful" + check_response_status(response, 200) data = response.json.get("data") assert ( data["count"] == 1 diff --git a/tests/integration/test_search_tokens.py b/tests/integration/test_search_tokens.py index 45d0b39b..c5759d86 100644 --- a/tests/integration/test_search_tokens.py +++ b/tests/integration/test_search_tokens.py @@ -3,7 +3,11 @@ import psycopg2 import pytest from tests.fixtures import connection_with_test_data, dev_db_connection, client_with_db -from tests.helper_functions import create_test_user_api, create_api_key +from tests.helper_functions import ( + create_test_user_api, + create_api_key, + check_response_status, +) def test_search_tokens_get( @@ -19,7 +23,7 @@ def test_search_tokens_get( headers={"Authorization": f"Bearer {api_key}"}, query_string={"endpoint": "quick-search", "arg1": "Source 1", "arg2": "City A"}, ) - assert response.status_code == 200 + check_response_status(response, 200) data = response.json.get("data") assert ( data["count"] == 1 From 745093590adadd95dbee4a138eb97abfaad954d1 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 2 Jun 2024 12:28:00 -0400 Subject: [PATCH 119/127] refactor(api): Update quick_search_query.py function signatures Refactored the function signatures in quick_search_query.py to improve readability and maintainability. Updated the function quick_search_query_wrapper to include type hints for arguments and return value. Made adjustments to the INSERT_LOG_QUERY format function call for consistency. --- middleware/quick_search_query.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/middleware/quick_search_query.py b/middleware/quick_search_query.py index bd107d69..dc09d385 100644 --- a/middleware/quick_search_query.py +++ b/middleware/quick_search_query.py @@ -2,7 +2,7 @@ import json import datetime -from flask import make_response +from flask import make_response, Response from sqlalchemy.dialects.postgresql import psycopg2 from middleware.webhook_logic import post_to_webhook @@ -156,9 +156,7 @@ def quick_search_query( query_results = json.dumps(data_sources["data"]).replace("'", "") cursor.execute( - INSERT_LOG_QUERY.format( - search, location, query_results, data_sources["count"] - ), + INSERT_LOG_QUERY.format(search, location, query_results, data_sources["count"]), ) conn.commit() cursor.close() @@ -166,9 +164,9 @@ def quick_search_query( return data_sources -def quick_search_query_wrapper(arg1, arg2, conn: PgConnection): +def quick_search_query_wrapper(arg1, arg2, conn: PgConnection) -> Response: try: - data_sources = quick_search_query(arg1, arg2, conn=conn) + data_sources = quick_search_query(search=arg1, location=arg2, conn=conn) return make_response(data_sources, 200) @@ -185,4 +183,4 @@ def quick_search_query_wrapper(arg1, arg2, conn: PgConnection): } post_to_webhook(json.dumps(message)) - return {"count": 0, "message": user_message}, 500 + return make_response({"count": 0, "message": user_message}, 500) From be682c7da5347fe51e6d79a717b924d0b37fcb16 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 2 Jun 2024 12:29:07 -0400 Subject: [PATCH 120/127] =?UTF-8?q?=E2=9C=A8=20Add=20Response=20return=20t?= =?UTF-8?q?ype=20to=20data=5Fsource=5Fby=5Fid=5Fwrapper=20function?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ℹ️ Enhance data_source_by_id_wrapper function to explicitly return a Response object for clarity and consistency. --- middleware/data_source_queries.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index e3c6bed2..f4c70e3b 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -1,6 +1,6 @@ from typing import List, Dict, Any, Optional, Tuple, Union -from flask import make_response +from flask import make_response, Response from sqlalchemy.dialects.postgresql import psycopg2 from utilities.common import convert_dates_to_strings, format_arrays @@ -104,8 +104,8 @@ def get_approved_data_sources_wrapper(conn: PgConnection): ) -def data_source_by_id_wrapper(arg, conn: PgConnection): - data_source_details = data_source_by_id_query(arg, conn=conn) +def data_source_by_id_wrapper(arg, conn: PgConnection) -> Response: + data_source_details = data_source_by_id_query(data_source_id=arg, conn=conn) if data_source_details: return make_response(data_source_details, 200) From ba6c4ca0ce945ea8fe8b649a21ea85904d8782e5 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 2 Jun 2024 12:29:26 -0400 Subject: [PATCH 121/127] =?UTF-8?q?=E2=9C=A8=20Add=20test=20for=20data=5Fs?= =?UTF-8?q?ource=5Fby=5Fid=5Fwrapper=20function?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ℹ️ Added tests for the data_source_by_id_wrapper function to cover scenarios when data is found and when data is not found. --- tests/middleware/test_data_source_queries.py | 36 ++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py index fb20c658..98276de8 100644 --- a/tests/middleware/test_data_source_queries.py +++ b/tests/middleware/test_data_source_queries.py @@ -1,3 +1,5 @@ +from unittest.mock import MagicMock + import psycopg2 import pytest @@ -8,6 +10,7 @@ data_source_by_id_results, data_source_by_id_query, get_data_sources_for_map, + data_source_by_id_wrapper, ) from tests.helper_functions import ( get_boolean_dictionary, @@ -153,3 +156,36 @@ def test_convert_data_source_matches(): ) == testcase["output"] ) + + +@pytest.fixture +def mock_make_response(monkeypatch): + mock = MagicMock() + monkeypatch.setattr("middleware.data_source_queries.make_response", mock) + return mock + + +@pytest.fixture +def mock_data_source_by_id_query(monkeypatch): + mock = MagicMock() + monkeypatch.setattr("middleware.data_source_queries.data_source_by_id_query", mock) + return mock + + +def test_data_source_by_id_wrapper_data_found(mock_data_source_by_id_query, mock_make_response): + mock_data_source_by_id_query.return_value = {"agency_name": "Agency A"} + mock_conn = MagicMock() + data_source_by_id_wrapper(arg="SOURCE_UID_1", conn=mock_conn) + mock_data_source_by_id_query.assert_called_with( + data_source_id="SOURCE_UID_1", conn=mock_conn + ) + mock_make_response.assert_called_with({"agency_name": "Agency A"}, 200) + +def test_data_source_by_id_wrapper_data_not_found(mock_data_source_by_id_query, mock_make_response): + mock_data_source_by_id_query.return_value = None + mock_conn = MagicMock() + data_source_by_id_wrapper(arg="SOURCE_UID_1", conn=mock_conn) + mock_data_source_by_id_query.assert_called_with( + data_source_id="SOURCE_UID_1", conn=mock_conn + ) + mock_make_response.assert_called_with({"message": "Data source not found."}, 404) \ No newline at end of file From 43d80ace9993f97f5b66bf988dd3dd961e163329 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 2 Jun 2024 12:29:59 -0400 Subject: [PATCH 122/127] =?UTF-8?q?=E2=9A=A1=EF=B8=8F=20refactor:=20refact?= =?UTF-8?q?or=20quick=20search=20endpoint=20response=20handling=20in=20int?= =?UTF-8?q?egration=20test?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/integration/test_quick_search.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_quick_search.py b/tests/integration/test_quick_search.py index 40849fec..42311075 100644 --- a/tests/integration/test_quick_search.py +++ b/tests/integration/test_quick_search.py @@ -31,9 +31,7 @@ def test_quick_search_get(client_with_db, connection_with_test_data): ) check_response_status(response, 200) data = response.json.get("data") - assert ( - data["count"] == 1 - ), "Quick Search endpoint response should return only one entry" - entry = data["data"][0] + assert len(data) == 1, "Quick Search endpoint response should return only one entry" + entry = data[0] assert entry["agency_name"] == "Agency A" assert entry["airtable_uid"] == "SOURCE_UID_1" From f88860688ebb74230356c71d0ba17c82a653a7a7 Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 2 Jun 2024 12:30:39 -0400 Subject: [PATCH 123/127] =?UTF-8?q?=F0=9F=92=BB=20=20Add=20test=20cases=20?= =?UTF-8?q?for=20quick=20search=20query=20middleware=20and=20eliminate=20n?= =?UTF-8?q?ow-redundant=20tests=20of=20associated=20Resource=20Class?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/middleware/test_quick_search_query.py | 80 ++++++++++++++++++++- tests/resources/test_QuickSearch.py | 38 ---------- 2 files changed, 77 insertions(+), 41 deletions(-) delete mode 100644 tests/resources/test_QuickSearch.py diff --git a/tests/middleware/test_quick_search_query.py b/tests/middleware/test_quick_search_query.py index 985da803..71ead947 100644 --- a/tests/middleware/test_quick_search_query.py +++ b/tests/middleware/test_quick_search_query.py @@ -1,17 +1,20 @@ +import json +from unittest.mock import MagicMock + import psycopg2 +import pytest from middleware.quick_search_query import ( unaltered_search_query, quick_search_query, QUICK_SEARCH_COLUMNS, + quick_search_query_wrapper, ) from tests.helper_functions import ( has_expected_keys, get_most_recent_quick_search_query_log, ) -from tests.fixtures import ( - connection_with_test_data, dev_db_connection -) +from tests.fixtures import connection_with_test_data, dev_db_connection def test_unaltered_search_query( @@ -78,3 +81,74 @@ def test_quick_search_query_results( search="Source 3", location="City C", conn=connection_with_test_data ) assert len(results["data"]) == 0 + + +def test_quick_search_query_no_results( + connection_with_test_data: psycopg2.extensions.connection, +) -> None: + """ + Test the `quick_search_query` method returns no results when there are no matches + + :param connection_with_test_data: The connection to the test data database. + :return: None + """ + results = quick_search_query( + search="Nonexistent Source", + location="Nonexistent Location", + conn=connection_with_test_data, + ) + assert len(results["data"]) == 0 + + +@pytest.fixture +def mock_quick_search_query(monkeypatch): + mock = MagicMock() + monkeypatch.setattr("middleware.quick_search_query.quick_search_query", mock) + return mock + + +@pytest.fixture +def mock_make_response(monkeypatch): + mock = MagicMock() + monkeypatch.setattr("middleware.quick_search_query.make_response", mock) + return mock + + +@pytest.fixture +def mock_post_to_webhook(monkeypatch): + mock = MagicMock() + monkeypatch.setattr("middleware.quick_search_query.post_to_webhook", mock) + return mock + + +def test_quick_search_query_wrapper_happy_path( + mock_quick_search_query, mock_make_response +): + mock_quick_search_query.return_value = [{"record_type": "Type A"}] + mock_conn = MagicMock() + quick_search_query_wrapper(arg1="Source 1", arg2="City A", conn=mock_conn) + mock_quick_search_query.assert_called_with( + search="Source 1", location="City A", conn=mock_conn + ) + mock_make_response.assert_called_with([{"record_type": "Type A"}], 200) + + +def test_quick_search_query_wrapper_exception( + mock_quick_search_query, mock_make_response, mock_post_to_webhook +): + mock_quick_search_query.side_effect = Exception("Test Exception") + arg1 = "Source 1" + arg2 = "City A" + mock_conn = MagicMock() + quick_search_query_wrapper(arg1=arg1, arg2=arg2, conn=mock_conn) + mock_quick_search_query.assert_called_with( + search=arg1, location=arg2, conn=mock_conn + ) + mock_conn.rollback.assert_called_once() + user_message = "There was an error during the search operation" + mock_post_to_webhook.assert_called_with( + json.dumps({'content': 'There was an error during the search operation: Test Exception\nSearch term: Source 1\nLocation: City A'}) + ) + mock_make_response.assert_called_with( + {"count": 0, "message": user_message}, 500 + ) diff --git a/tests/resources/test_QuickSearch.py b/tests/resources/test_QuickSearch.py deleted file mode 100644 index 586b3cdb..00000000 --- a/tests/resources/test_QuickSearch.py +++ /dev/null @@ -1,38 +0,0 @@ -from unittest.mock import patch, MagicMock - -import pytest - -from tests.helper_functions import check_response_status - -patch("middleware.security.api_required", lambda x: x).start() -from tests.fixtures import client_with_mock_db - -@pytest.fixture -def mock_quick_search_query(monkeypatch): - mock = MagicMock() - monkeypatch.setattr("resources.QuickSearch.quick_search_query", mock) - return mock - - -def test_get_quick_search_results_found(client_with_mock_db, mock_quick_search_query): - mock_quick_search_query.return_value = { - "count": "1", - "data": [{"id": "test_id", "name": "test_name"}], - } - response = client_with_mock_db.client.get("/quick-search/test_search/test_location") - check_response_status(response, 200) - response_json = response.json - assert response_json["data"]["data"] == [{'id': 'test_id', 'name': 'test_name'}] - assert response_json["data"]["count"] == '1' - assert response_json["message"] == "Results for search successfully retrieved" - -def test_get_quick_search_results_not_found(client_with_mock_db, mock_quick_search_query): - mock_quick_search_query.return_value = { - "count": 0, - "data": [], - } - response = client_with_mock_db.client.get("/quick-search/test_search/test_location") - check_response_status(response, 404) - response_json = response.json - assert response_json["count"] == 0 - assert response_json["message"] == "No results found. Please considering requesting a new data source." From d4b365d590e6929794e1b8787189a53a678a3b9c Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 2 Jun 2024 12:31:13 -0400 Subject: [PATCH 124/127] =?UTF-8?q?=E2=9A=99=EF=B8=8F:=20remove=20now-redu?= =?UTF-8?q?ndant=20Resource=20tests=20in=20test=5FDataSources.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/resources/test_DataSources.py | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/tests/resources/test_DataSources.py b/tests/resources/test_DataSources.py index f08ef098..bfdc1628 100644 --- a/tests/resources/test_DataSources.py +++ b/tests/resources/test_DataSources.py @@ -4,31 +4,6 @@ patch("middleware.security.api_required", lambda x: x).start() from tests.fixtures import client_with_mock_db -@patch("resources.DataSources.data_source_by_id_query") -def test_get_data_source_by_id_found( - mock_data_source_by_id_query, - client_with_mock_db, -): - mock_data_source_by_id_query.return_value = {"name": "Test Data Source"} - response = client_with_mock_db.client.get("/data-sources-by-id/test_id") - assert response.json == { - "message": "Successfully found data source", - "data": {"name": "Test Data Source"}, - } - assert response.status_code == 200 - - - -@patch("resources.DataSources.data_source_by_id_query") -def test_get_data_source_by_id_not_found( - mock_data_source_by_id_query, - client_with_mock_db, -): - mock_data_source_by_id_query.return_value = None - response = client_with_mock_db.client.get("/data-sources-by-id/test_id") - assert response.json == {"message": "Data source not found."} - assert response.status_code == 200 - def test_put_data_source_by_id( client_with_mock_db, monkeypatch ): From 4e85a2b15d40a12fb7d3165ee318425fa33ac3cb Mon Sep 17 00:00:00 2001 From: maxachis Date: Sun, 2 Jun 2024 12:45:12 -0400 Subject: [PATCH 125/127] =?UTF-8?q?=E2=9A=99=EF=B8=8F=20Adjust=20status=20?= =?UTF-8?q?code=20for=20data=20source=20not=20found=20response?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ℹ️ Fix the status code returned when a data source is not found to be 200 instead of 404 in middleware/data_source_queries.py and tests/middleware/test_data_source_queries.py. --- middleware/data_source_queries.py | 2 +- tests/middleware/test_data_source_queries.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/middleware/data_source_queries.py b/middleware/data_source_queries.py index f4c70e3b..ff823bce 100644 --- a/middleware/data_source_queries.py +++ b/middleware/data_source_queries.py @@ -110,7 +110,7 @@ def data_source_by_id_wrapper(arg, conn: PgConnection) -> Response: return make_response(data_source_details, 200) else: - return make_response({"message": "Data source not found."}, 404) + return make_response({"message": "Data source not found."}, 200) def get_data_sources_for_map_wrapper(conn: PgConnection): diff --git a/tests/middleware/test_data_source_queries.py b/tests/middleware/test_data_source_queries.py index 98276de8..201ca518 100644 --- a/tests/middleware/test_data_source_queries.py +++ b/tests/middleware/test_data_source_queries.py @@ -188,4 +188,4 @@ def test_data_source_by_id_wrapper_data_not_found(mock_data_source_by_id_query, mock_data_source_by_id_query.assert_called_with( data_source_id="SOURCE_UID_1", conn=mock_conn ) - mock_make_response.assert_called_with({"message": "Data source not found."}, 404) \ No newline at end of file + mock_make_response.assert_called_with({"message": "Data source not found."}, 200) \ No newline at end of file From 6ca9cf84177e69ada472185e739aa98d5615f03d Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 3 Jun 2024 12:23:00 -0400 Subject: [PATCH 126/127] =?UTF-8?q?=E2=9A=A1=EF=B8=8F=20Refactor=20and=20u?= =?UTF-8?q?pdate=20security=20middleware=20and=20tests=20for=20API=20key?= =?UTF-8?q?=20validation=20and=20role=20checking.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- middleware/security.py | 162 ++++++++++++++++--------- tests/middleware/test_security.py | 188 ++++++++++++++++++++++++++---- 2 files changed, 272 insertions(+), 78 deletions(-) diff --git a/middleware/security.py b/middleware/security.py index b2dab8cf..84c96782 100644 --- a/middleware/security.py +++ b/middleware/security.py @@ -1,17 +1,33 @@ import functools from collections import namedtuple -from flask import request, jsonify +from http import HTTPStatus +from flask import request from middleware.initialize_psycopg2_connection import initialize_psycopg2_connection from datetime import datetime as dt from middleware.login_queries import is_admin -from middleware.custom_exceptions import UserNotFoundError -from typing import Tuple +from typing import Tuple, Optional APIKeyStatus = namedtuple("APIKeyStatus", ["is_valid", "is_expired"]) -def is_valid(api_key: str, endpoint: str, method: str) -> APIKeyStatus: +class NoAPIKeyError(Exception): + pass + + +class ExpiredAPIKeyError(Exception): + pass + + +class InvalidAPIKeyError(Exception): + pass + + +class InvalidRoleError(Exception): + pass + + +def validate_api_key(api_key: str, endpoint: str, method: str): """ Validates the API key and checks if the user has the required role to access a specific endpoint. @@ -21,42 +37,43 @@ def is_valid(api_key: str, endpoint: str, method: str) -> APIKeyStatus: :return: A tuple (isValid, isExpired) indicating whether the API key is valid and not expired. """ if not api_key: - return APIKeyStatus(is_valid=False, is_expired=False) + raise NoAPIKeyError("API key not provided") - session_token_results = None psycopg2_connection = initialize_psycopg2_connection() cursor = psycopg2_connection.cursor() role = get_role(api_key, cursor) - if role is None: - session_token_results = get_session_token( - api_key, cursor, session_token_results - ) - if len(session_token_results) > 0: - email = session_token_results[0][0] - expiration_date = session_token_results[0][1] - print(expiration_date, dt.utcnow()) + if role: + validate_role(role, endpoint, method) + return + + session_token_results = get_session_token(api_key, cursor) + if session_token_results: - if expiration_date < dt.utcnow(): - return APIKeyStatus(False, is_expired=True) + if session_token_results.expiration_date < dt.utcnow(): + raise ExpiredAPIKeyError("Session token expired") - if is_admin(cursor, email): - role = "admin" + if is_admin(cursor, session_token_results.email): + validate_role(role="admin", endpoint=endpoint, method=method) + return - if not session_token_results and role is None: + if not session_token_results: delete_expired_access_tokens(cursor, psycopg2_connection) access_token = get_access_token(api_key, cursor) role = "user" if not access_token: - return APIKeyStatus(is_valid=False, is_expired=False) + raise InvalidAPIKeyError("API Key not found") + + validate_role(role, endpoint, method) - if is_admin_only_action(endpoint, method) and role != "admin": - return APIKeyStatus(is_valid=False, is_expired=False) +def validate_role(role: str, endpoint: str, method: str): # Compare the API key in the user table to the API in the request header and proceed # through the protected route if it's valid. Otherwise, compare_digest will return False # and api_required will send an error message to provide a valid API key - return APIKeyStatus(is_valid=True, is_expired=False) + if is_admin_only_action(endpoint, method) and role != "admin": + raise InvalidRoleError("You do not have permission to access this endpoint") + def get_role(api_key, cursor): @@ -70,16 +87,25 @@ def get_role(api_key, cursor): return None -def get_session_token(api_key, cursor, session_token_results): +SessionTokenResults = namedtuple("SessionTokenResults", ["email", "expiration_date"]) + + +def get_session_token(api_key, cursor) -> Optional[SessionTokenResults]: cursor.execute( - f"select email, expiration_date from session_tokens where token = '{api_key}'" + f"select email, expiration_date from session_tokens where token = %s", + (api_key,), ) session_token_results = cursor.fetchall() - return session_token_results + if len(session_token_results) > 0: + return SessionTokenResults( + email=session_token_results[0][0], + expiration_date=session_token_results[0][1], + ) + return None def get_access_token(api_key, cursor): - cursor.execute(f"select id, token from access_tokens where token = '{api_key}'") + cursor.execute(f"select id, token from access_tokens where token = %s", (api_key,)) results = cursor.fetchone() if results: return results[1] @@ -87,7 +113,7 @@ def get_access_token(api_key, cursor): def delete_expired_access_tokens(cursor, psycopg2_connection): - cursor.execute(f"delete from access_tokens where expiration_date < '{dt.utcnow()}'") + cursor.execute(f"delete from access_tokens where expiration_date < NOW()") psycopg2_connection.commit() @@ -95,6 +121,56 @@ def is_admin_only_action(endpoint, method): return endpoint in ("datasources", "datasourcebyid") and method in ("PUT", "POST") +class InvalidHeader(Exception): + + def __init__(self, message: str): + super().__init__(message) + + +def validate_header() -> str: + """ + Validates the API key and checks if the user has the required role to access a specific endpoint. + :return: + """ + if not request.headers or "Authorization" not in request.headers: + raise InvalidHeader( + "Please provide an 'Authorization' key in the request header" + ) + + authorization_header = request.headers["Authorization"].split(" ") + if len(authorization_header) < 2 or authorization_header[0] != "Bearer": + raise InvalidHeader( + "Please provide a properly formatted bearer token and API key" + ) + + api_key = authorization_header[1] + if api_key == "undefined": + raise InvalidHeader("Please provide an API key") + return api_key + + +def validate_token() -> Optional[Tuple[dict, int]]: + """ + Validates the API key and checks if the user has the required role to access a specific endpoint. + :return: + """ + try: + api_key = validate_header() + except InvalidHeader as e: + return {"message": str(e)}, HTTPStatus.BAD_REQUEST.value + # Check if API key is correct and valid + try: + validate_api_key(api_key, request.endpoint, request.method) + except NoAPIKeyError as e: + return {"message": str(e)}, HTTPStatus.BAD_REQUEST.value + except ExpiredAPIKeyError as e: + return {"message": str(e)}, HTTPStatus.UNAUTHORIZED.value + except InvalidRoleError as e: + return {"message": str(e)}, HTTPStatus.FORBIDDEN.value + + return None + + def api_required(func): """ The api_required decorator can be added to protect a route so that only authenticated users can access the information @@ -105,31 +181,9 @@ def api_required(func): @functools.wraps(func) def decorator(*args, **kwargs): - api_key = None - if request.headers and "Authorization" in request.headers: - authorization_header = request.headers["Authorization"].split(" ") - if len(authorization_header) >= 2 and authorization_header[0] == "Bearer": - api_key = request.headers["Authorization"].split(" ")[1] - if api_key == "undefined": - return {"message": "Please provide an API key"}, 400 - else: - return { - "message": "Please provide a properly formatted bearer token and API key" - }, 400 - else: - return { - "message": "Please provide an 'Authorization' key in the request header" - }, 400 - # Check if API key is correct and valid - try: - api_key_status = is_valid(api_key, request.endpoint, request.method) - except UserNotFoundError as e: - return {"message": str(e)}, 401 - if api_key_status.is_valid: - return func(*args, **kwargs) - else: - if api_key_status.is_expired: - return {"message": "The provided API key has expired"}, 401 - return {"message": "The provided API key is not valid"}, 403 + validation_error = validate_token() + if validation_error: + return validation_error + return func(*args, **kwargs) return decorator diff --git a/tests/middleware/test_security.py b/tests/middleware/test_security.py index 9ebe03a6..65edd0fb 100644 --- a/tests/middleware/test_security.py +++ b/tests/middleware/test_security.py @@ -1,11 +1,27 @@ import datetime import uuid +from http import HTTPStatus +from typing import Callable +import flask import pytest -from unittest.mock import patch +from unittest.mock import patch, MagicMock +import requests +from flask import Flask + +from middleware import security +from middleware.custom_exceptions import UserNotFoundError from middleware.login_queries import create_session_token -from middleware.security import is_valid, APIKeyStatus +from middleware.security import ( + validate_api_key, + APIKeyStatus, + api_required, + NoAPIKeyError, + ExpiredAPIKeyError, + InvalidAPIKeyError, + InvalidRoleError, +) from tests.helper_functions import ( create_test_user, UserInfo, @@ -16,8 +32,8 @@ def test_no_api_key_provided(): - result = is_valid(api_key="", endpoint="", method="") - assert result == APIKeyStatus(is_valid=False, is_expired=False) + with pytest.raises(NoAPIKeyError) as e: + result = validate_api_key(api_key="", endpoint="", method="") def test_api_key_exists_in_users_table_with_admin_role(dev_db_connection): @@ -26,8 +42,8 @@ def test_api_key_exists_in_users_table_with_admin_role(dev_db_connection): give_user_admin_role(dev_db_connection, UserInfo(test_user.email, "")) api_key = create_api_key_db(cursor, test_user.id) dev_db_connection.commit() - result = is_valid(api_key, "", "") - assert result == APIKeyStatus(is_valid=True, is_expired=False) + result = validate_api_key(api_key, "", "") + assert result is None def test_api_key_exists_in_users_table_with_non_admin_role(dev_db_connection): @@ -35,8 +51,8 @@ def test_api_key_exists_in_users_table_with_non_admin_role(dev_db_connection): test_user = create_test_user(cursor) api_key = create_api_key_db(cursor, test_user.id) dev_db_connection.commit() - result = is_valid(api_key, "", "") - assert result == APIKeyStatus(is_valid=True, is_expired=False) + result = validate_api_key(api_key, "", "") + assert result is None def test_api_key_not_in_users_table_but_in_session_tokens_table(dev_db_connection): @@ -44,8 +60,8 @@ def test_api_key_not_in_users_table_but_in_session_tokens_table(dev_db_connectio test_user = create_test_user(cursor) token = create_session_token(cursor, test_user.id, test_user.email) dev_db_connection.commit() - result = is_valid(token, "", "") - assert result == APIKeyStatus(is_valid=True, is_expired=False) + result = validate_api_key(token, "", "") + assert result is None def test_expired_session_token(dev_db_connection): @@ -56,8 +72,8 @@ def test_expired_session_token(dev_db_connection): f"UPDATE session_tokens SET expiration_date = '{datetime.date(year=2020, month=3, day=4)}' WHERE token = '{token}'" ) dev_db_connection.commit() - result = is_valid(token, "", "") - assert result == APIKeyStatus(is_valid=False, is_expired=True) + with pytest.raises(ExpiredAPIKeyError): + result = validate_api_key(token, "", "") def test_session_token_with_admin_role(dev_db_connection): @@ -66,8 +82,8 @@ def test_session_token_with_admin_role(dev_db_connection): give_user_admin_role(dev_db_connection, UserInfo(test_user.email, "")) token = create_session_token(cursor, test_user.id, test_user.email) dev_db_connection.commit() - result = is_valid(token, "", "") - assert result == APIKeyStatus(is_valid=True, is_expired=False) + result = validate_api_key(token, "", "") + assert result is None def test_api_key_exists_in_access_tokens_table(dev_db_connection): @@ -79,14 +95,15 @@ def test_api_key_exists_in_access_tokens_table(dev_db_connection): (token, expiration), ) dev_db_connection.commit() - result = is_valid(token, "", "") - assert result == APIKeyStatus(is_valid=True, is_expired=False) + result = validate_api_key(token, "", "") + assert result is None def test_api_key_not_exist_in_any_table(dev_db_connection): token = uuid.uuid4().hex - result = is_valid(token, "", "") - assert result == APIKeyStatus(is_valid=False, is_expired=False) + with pytest.raises(InvalidAPIKeyError) as e: + result = validate_api_key(token, "", "") + assert "API Key not found" in str(e.value) def test_expired_access_token_in_access_tokens_table(dev_db_connection): @@ -98,8 +115,9 @@ def test_expired_access_token_in_access_tokens_table(dev_db_connection): (token, expiration), ) dev_db_connection.commit() - result = is_valid(token, "", "") - assert result == APIKeyStatus(is_valid=False, is_expired=False) + with pytest.raises(InvalidAPIKeyError) as e: + result = validate_api_key(token, "", "") + assert "API Key not found" in str(e.value) def test_admin_only_action_with_non_admin_role(dev_db_connection): @@ -107,8 +125,9 @@ def test_admin_only_action_with_non_admin_role(dev_db_connection): test_user = create_test_user(cursor) api_key = create_api_key_db(cursor, test_user.id) dev_db_connection.commit() - result = is_valid(api_key, "datasources", "PUT") - assert result == APIKeyStatus(is_valid=False, is_expired=False) + with pytest.raises(InvalidRoleError) as e: + result = validate_api_key(api_key, "datasources", "PUT") + assert "You do not have permission to access this endpoint" in str(e.value) def test_admin_only_action_with_admin_role(dev_db_connection): @@ -117,6 +136,127 @@ def test_admin_only_action_with_admin_role(dev_db_connection): give_user_admin_role(dev_db_connection, UserInfo(test_user.email, "")) api_key = create_api_key_db(cursor, test_user.id) dev_db_connection.commit() - result = is_valid(api_key, "datasources", "PUT") - assert result == APIKeyStatus(is_valid=True, is_expired=False) + result = validate_api_key(api_key, "datasources", "PUT") + assert result is None + + +@pytest.fixture +def app() -> Flask: + app = Flask(__name__) + return app + + +@pytest.fixture +def client(app: Flask): + return app.test_client() + + +@pytest.fixture +def mock_request_headers(monkeypatch): + mock = MagicMock() + monkeypatch.setattr(flask, "request", mock) + return mock + + +@pytest.fixture +def mock_validate_api_key(monkeypatch): + mock = MagicMock() + monkeypatch.setattr(security, "validate_api_key", mock) + return mock + + +@pytest.fixture +def dummy_route(): + @api_required + def _dummy_route(): + return "This is a protected route", HTTPStatus.OK.value + + return _dummy_route + + +def test_api_required_happy_path( + app, client, mock_request_headers, mock_validate_api_key, dummy_route: Callable +): + mock_validate_api_key.return_value = None + with app.test_request_context(headers={"Authorization": "Bearer valid_api_key"}): + response = dummy_route() + assert response == ("This is a protected route", HTTPStatus.OK.value) + + +def test_api_required_api_key_expired( + app, client, mock_request_headers, mock_validate_api_key, dummy_route: Callable +): + mock_validate_api_key.side_effect = ExpiredAPIKeyError("The provided API key has expired") + with app.test_request_context(headers={"Authorization": "Bearer valid_api_key"}): + response = dummy_route() + assert response == ({"message": "The provided API key has expired"}, HTTPStatus.UNAUTHORIZED.value) + + +def test_api_required_expired_api_key( + app, client, mock_request_headers, mock_validate_api_key, dummy_route: Callable +): + mock_validate_api_key.side_effect = ExpiredAPIKeyError("The provided API key has expired") + with app.test_request_context(headers={"Authorization": "Bearer expired_api_key"}): + response = dummy_route() + assert response == ( + {"message": "The provided API key has expired"}, + HTTPStatus.UNAUTHORIZED.value, + ) + + +def test_api_required_no_api_key_in_request_header( + app, client, mock_request_headers, mock_validate_api_key, dummy_route: Callable +): + with app.test_request_context(headers={"Authorization": "Bearer"}): + response = dummy_route() + assert response == ( + {"message": "Please provide a properly formatted bearer token and API key"}, + HTTPStatus.BAD_REQUEST.value, + ) + + +def test_api_required_invalid_role( + app, client, mock_request_headers, mock_validate_api_key, dummy_route: Callable +): + mock_validate_api_key.side_effect = InvalidRoleError( + "You do not have permission to access this endpoint" + ) + with app.test_request_context(headers={"Authorization": "Bearer valid_api_key"}): + response = dummy_route() + assert response == ( + {"message": "You do not have permission to access this endpoint"}, + HTTPStatus.FORBIDDEN.value, + ) + + +def test_api_required_not_authorization_key_in_request_header( + app, client, mock_request_headers, mock_validate_api_key, dummy_route: Callable +): + with app.test_request_context(headers={}): + response = dummy_route() + assert response == ( + {"message": "Please provide an 'Authorization' key in the request header"}, + HTTPStatus.BAD_REQUEST.value, + ) + + +def test_api_required_improperly_formatted_authorization_key( + app, client, mock_request_headers, mock_validate_api_key, dummy_route: Callable +): + with app.test_request_context(headers={"Authorization": "Bearer"}): + response = dummy_route() + assert response == ( + {"message": "Please provide a properly formatted bearer token and API key"}, + HTTPStatus.BAD_REQUEST.value, + ) + +def test_api_required_undefined_api_key( + app, client, mock_request_headers, mock_validate_api_key, dummy_route: Callable +): + with app.test_request_context(headers={"Authorization": "Bearer undefined"}): + response = dummy_route() + assert response == ( + {"message": "Please provide an API key"}, + HTTPStatus.BAD_REQUEST.value, + ) From 12fbb307bf45f9c83f1d05df9c39c296da2a9cf9 Mon Sep 17 00:00:00 2001 From: maxachis Date: Mon, 3 Jun 2024 12:38:52 -0400 Subject: [PATCH 127/127] Remove redundant NoAPIKeyError handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removed redundant NoAPIKeyError handling in security middleware to improve code readability and maintainability.🛠️ --- middleware/security.py | 4 ---- tests/middleware/test_security.py | 6 ------ 2 files changed, 10 deletions(-) diff --git a/middleware/security.py b/middleware/security.py index 84c96782..c57731a9 100644 --- a/middleware/security.py +++ b/middleware/security.py @@ -36,8 +36,6 @@ def validate_api_key(api_key: str, endpoint: str, method: str): :param method: The HTTP method of the request. :return: A tuple (isValid, isExpired) indicating whether the API key is valid and not expired. """ - if not api_key: - raise NoAPIKeyError("API key not provided") psycopg2_connection = initialize_psycopg2_connection() cursor = psycopg2_connection.cursor() @@ -161,8 +159,6 @@ def validate_token() -> Optional[Tuple[dict, int]]: # Check if API key is correct and valid try: validate_api_key(api_key, request.endpoint, request.method) - except NoAPIKeyError as e: - return {"message": str(e)}, HTTPStatus.BAD_REQUEST.value except ExpiredAPIKeyError as e: return {"message": str(e)}, HTTPStatus.UNAUTHORIZED.value except InvalidRoleError as e: diff --git a/tests/middleware/test_security.py b/tests/middleware/test_security.py index 65edd0fb..16b0c86f 100644 --- a/tests/middleware/test_security.py +++ b/tests/middleware/test_security.py @@ -31,11 +31,6 @@ from tests.fixtures import dev_db_connection -def test_no_api_key_provided(): - with pytest.raises(NoAPIKeyError) as e: - result = validate_api_key(api_key="", endpoint="", method="") - - def test_api_key_exists_in_users_table_with_admin_role(dev_db_connection): cursor = dev_db_connection.cursor() test_user = create_test_user(cursor) @@ -139,7 +134,6 @@ def test_admin_only_action_with_admin_role(dev_db_connection): result = validate_api_key(api_key, "datasources", "PUT") assert result is None - @pytest.fixture def app() -> Flask: app = Flask(__name__)