From 3e255e2194dafbb2a76e304293bf38548a675297 Mon Sep 17 00:00:00 2001 From: TASMAYU Date: Sat, 11 Oct 2025 01:53:44 +0530 Subject: [PATCH 1/3] Add RMSE and Log-Cosh loss functions to loss_functions.py --- machine_learning/loss_functions.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 0bd9aa8b5401..7fe2864c90db 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -667,3 +667,27 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float import doctest doctest.testmod() + + +def root_mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the Root Mean Squared Error (RMSE) between ground truth and predicted values. + # ... docstring continues ... + """ + # LINE 1: Check if input arrays have same length + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + # LINE 2: Calculate squared differences between true and predicted values + # (y_true - y_pred) gives errors, then we square each error + squared_errors = (y_true - y_pred) ** 2 + + # LINE 3: Calculate mean of all squared errors + # This gives Mean Squared Error (MSE) + mean_squared_error = np.mean(squared_errors) + + # LINE 4: Take square root of MSE to get RMSE + # This brings units back to original scale + return np.sqrt(mean_squared_error) + + From 4c95fa7b3784cf01c4eab6eeb390975d0e14c0a0 Mon Sep 17 00:00:00 2001 From: TASMAYU Date: Sat, 11 Oct 2025 20:36:49 +0530 Subject: [PATCH 2/3] Rewrite some comments to explain each step --- machine_learning/loss_functions.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 7fe2864c90db..c21c5ca150cd 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -672,22 +672,20 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float def root_mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: """ Calculate the Root Mean Squared Error (RMSE) between ground truth and predicted values. - # ... docstring continues ... """ - # LINE 1: Check if input arrays have same length + # Checking if input arrays have same length if len(y_true) != len(y_pred): raise ValueError("Input arrays must have the same length.") - # LINE 2: Calculate squared differences between true and predicted values + # Calculating squared differences between true and predicted values # (y_true - y_pred) gives errors, then we square each error squared_errors = (y_true - y_pred) ** 2 - # LINE 3: Calculate mean of all squared errors - # This gives Mean Squared Error (MSE) + # Calculate mean of all squared errors + # So that to get the MSE mean_squared_error = np.mean(squared_errors) - # LINE 4: Take square root of MSE to get RMSE - # This brings units back to original scale + # Taken the square root of MSE to get RMSE return np.sqrt(mean_squared_error) From 876a0e36a653ce28539a04d8a65741c7b4facbaf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 11 Oct 2025 15:19:31 +0000 Subject: [PATCH 3/3] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- machine_learning/loss_functions.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index c21c5ca150cd..be812401a068 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -687,5 +687,3 @@ def root_mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: # Taken the square root of MSE to get RMSE return np.sqrt(mean_squared_error) - -