From 9a768019538da34e0b0ddc13747512812a55e239 Mon Sep 17 00:00:00 2001 From: Addyk-24 Date: Thu, 9 Oct 2025 22:49:02 +0530 Subject: [PATCH 1/9] Adding RMSE - Root Mean Squared Error Loss function for ML Evaluation --- machine_learning/loss_functions.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 0bd9aa8b5401..0cd44fe3da05 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -662,6 +662,32 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float kl_loss = y_true * np.log(y_true / y_pred) return np.sum(kl_loss) +def root_mean_squared_error(y_true, y_pred): + """ + Root Mean Squared Error (RMSE) + + RMSE = sqrt( (1/n) * Σ (y_true - y_pred) ^ 2) + + + Args: + y_pred: Predicted Value + y_true: Actual Value + Returns: + float: The RMSE Loss function between y_Pred and y_true + + Example: + >>> y_true = np.array([100, 200, 300]) + >>> y_pred = np.array([110, 190, 310]) + >>> rmse(A_t, F_t) + 3.42 + + """ + y_true, y_pred = np.array(y_true), np.array(y_pred) + + rmse = np.sqrt(np.mean((y_pred - y_true) ** 2)) + + return rmse + if __name__ == "__main__": import doctest From 6e05e31c7363afec029e7ddb29fe5e51160d6bd9 Mon Sep 17 00:00:00 2001 From: Addyk-24 Date: Thu, 9 Oct 2025 22:52:36 +0530 Subject: [PATCH 2/9] Adding RMSE - Root Mean Squared Error Loss function for ML Evaluation --- machine_learning/loss_functions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 0cd44fe3da05..04d8bfcfeb9b 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -668,7 +668,6 @@ def root_mean_squared_error(y_true, y_pred): RMSE = sqrt( (1/n) * Σ (y_true - y_pred) ^ 2) - Args: y_pred: Predicted Value y_true: Actual Value From 9ae7622ba8d798044027c2001ed59bdc764c7511 Mon Sep 17 00:00:00 2001 From: Addyk-24 Date: Thu, 9 Oct 2025 22:59:53 +0530 Subject: [PATCH 3/9] Adding RMSE - Root Mean Squared Error Loss function for ML Evaluation --- machine_learning/loss_functions.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 04d8bfcfeb9b..db33a5e8a22e 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -666,11 +666,18 @@ def root_mean_squared_error(y_true, y_pred): """ Root Mean Squared Error (RMSE) + Root Mean Squared Error (RMSE) is a standard metric used to evaluate the accuracy of regression models. + It measures the average magnitude of the prediction errors, giving higher weight to larger errors due to squaring. + The RMSE value is always non-negative, and a lower RMSE indicates better model performance. + RMSE = sqrt( (1/n) * Σ (y_true - y_pred) ^ 2) - Args: + Reference: https://en.wikipedia.org/wiki/Root_mean_square_deviation + + Parameters: y_pred: Predicted Value y_true: Actual Value + Returns: float: The RMSE Loss function between y_Pred and y_true From 0072c4cc097aad8a460929e9ad5078e06b6c8de8 Mon Sep 17 00:00:00 2001 From: Addyk-24 Date: Thu, 9 Oct 2025 23:19:52 +0530 Subject: [PATCH 4/9] Adding RMSE - Root Mean Squared Error Loss function for ML Evaluation --- machine_learning/loss_functions.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index db33a5e8a22e..ed819c91a98c 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -665,28 +665,28 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float def root_mean_squared_error(y_true, y_pred): """ Root Mean Squared Error (RMSE) + + Root Mean Squared Error (RMSE) is a standard metric used to evaluate + the accuracy of regression models. + It measures the average magnitude of the prediction errors, giving + higher weight to larger errors due to squaring. - Root Mean Squared Error (RMSE) is a standard metric used to evaluate the accuracy of regression models. - It measures the average magnitude of the prediction errors, giving higher weight to larger errors due to squaring. - The RMSE value is always non-negative, and a lower RMSE indicates better model performance. - - RMSE = sqrt( (1/n) * Σ (y_true - y_pred) ^ 2) + RMSE = sqrt( (1/n) * Σ (y_true - y_pred) ^ 2) Reference: https://en.wikipedia.org/wiki/Root_mean_square_deviation Parameters: y_pred: Predicted Value y_true: Actual Value - + Returns: - float: The RMSE Loss function between y_Pred and y_true + float: The RMSE Loss function between y_pred and y_true Example: >>> y_true = np.array([100, 200, 300]) >>> y_pred = np.array([110, 190, 310]) - >>> rmse(A_t, F_t) + >>> rmse(y_true, y_pred) 3.42 - """ y_true, y_pred = np.array(y_true), np.array(y_pred) From 5af8a2b21b1ef3f1fbaded5ffb925fe87b31ac3f Mon Sep 17 00:00:00 2001 From: Addyk-24 Date: Thu, 9 Oct 2025 23:31:55 +0530 Subject: [PATCH 5/9] Adding RMSE - Root Mean Squared Error Loss function for ML Evaluation --- machine_learning/loss_functions.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index ed819c91a98c..66471892db7f 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -665,7 +665,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float def root_mean_squared_error(y_true, y_pred): """ Root Mean Squared Error (RMSE) - + Root Mean Squared Error (RMSE) is a standard metric used to evaluate the accuracy of regression models. It measures the average magnitude of the prediction errors, giving @@ -678,15 +678,14 @@ def root_mean_squared_error(y_true, y_pred): Parameters: y_pred: Predicted Value y_true: Actual Value - + Returns: float: The RMSE Loss function between y_pred and y_true - - Example: - >>> y_true = np.array([100, 200, 300]) - >>> y_pred = np.array([110, 190, 310]) - >>> rmse(y_true, y_pred) - 3.42 + + >>> y_true = np.array([100, 200, 300]) + >>> y_pred = np.array([110, 190, 310]) + >>> rmse(y_true, y_pred) + 3.42 """ y_true, y_pred = np.array(y_true), np.array(y_pred) From 105990d8df8364578ce523089843f246d684882b Mon Sep 17 00:00:00 2001 From: Addyk-24 Date: Fri, 10 Oct 2025 00:10:37 +0530 Subject: [PATCH 6/9] Adding RMSE - Root Mean Squared Error Loss function for ML Evaluation --- machine_learning/loss_functions.py | 34 ++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 66471892db7f..af95a886a431 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -666,9 +666,8 @@ def root_mean_squared_error(y_true, y_pred): """ Root Mean Squared Error (RMSE) - Root Mean Squared Error (RMSE) is a standard metric used to evaluate - the accuracy of regression models. - It measures the average magnitude of the prediction errors, giving + Root Mean Squared Error (RMSE) is a standard metric, + it measures the average magnitude of the prediction errors, giving higher weight to larger errors due to squaring. RMSE = sqrt( (1/n) * Σ (y_true - y_pred) ^ 2) @@ -676,22 +675,35 @@ def root_mean_squared_error(y_true, y_pred): Reference: https://en.wikipedia.org/wiki/Root_mean_square_deviation Parameters: - y_pred: Predicted Value - y_true: Actual Value + - y_pred: Predicted Value + - y_true: Actual Value Returns: float: The RMSE Loss function between y_pred and y_true - >>> y_true = np.array([100, 200, 300]) - >>> y_pred = np.array([110, 190, 310]) - >>> rmse(y_true, y_pred) + >>> true_labels = np.array([100, 200, 300]) + >>> predicted_probs = np.array([110, 190, 310]) + >>> root_mean_squared_error(true_labels, predicted_probs) 3.42 + + >>> true_labels = [2, 4, 6, 8] + >>> predicted_probs = [3, 5, 7, 10] + >>> root_mean_squared_error(true_labels, predicted_probs) + 1.2247 + + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> root_mean_squared_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") y_true, y_pred = np.array(y_true), np.array(y_pred) - rmse = np.sqrt(np.mean((y_pred - y_true) ** 2)) - - return rmse + mse = np.mean((y_pred - y_true) ** 2) + return np.sqrt(mse) if __name__ == "__main__": From 41d5b58be61556d74ce8e764c80d93f2ad63f216 Mon Sep 17 00:00:00 2001 From: Addyk-24 Date: Fri, 10 Oct 2025 00:15:58 +0530 Subject: [PATCH 7/9] Adding RMSE - Root Mean Squared Error Loss function for ML Evaluation --- machine_learning/loss_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index af95a886a431..03c05c754a20 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -700,7 +700,7 @@ def root_mean_squared_error(y_true, y_pred): """ if len(y_true) != len(y_pred): raise ValueError("Input arrays must have the same length.") - y_true, y_pred = np.array(y_true), np.array(y_pred) + y_true,y_pred = np.array(y_true), np.array(y_pred) mse = np.mean((y_pred - y_true) ** 2) return np.sqrt(mse) From afc057ee5302a13fb84cac65576b6afa19912ccb Mon Sep 17 00:00:00 2001 From: Aditya Katkar Date: Fri, 10 Oct 2025 00:20:05 +0530 Subject: [PATCH 8/9] Update loss_functions.py From bb8ef96ef63df16744fa261b131870fcc44ec308 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 9 Oct 2025 18:50:26 +0000 Subject: [PATCH 9/9] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- machine_learning/loss_functions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 03c05c754a20..b993d08f3a1a 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -662,6 +662,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float kl_loss = y_true * np.log(y_true / y_pred) return np.sum(kl_loss) + def root_mean_squared_error(y_true, y_pred): """ Root Mean Squared Error (RMSE) @@ -700,7 +701,7 @@ def root_mean_squared_error(y_true, y_pred): """ if len(y_true) != len(y_pred): raise ValueError("Input arrays must have the same length.") - y_true,y_pred = np.array(y_true), np.array(y_pred) + y_true, y_pred = np.array(y_true), np.array(y_pred) mse = np.mean((y_pred - y_true) ** 2) return np.sqrt(mse)