|
@@ -10,7 +10,7 @@ from typeguard import typechecked
|
|
tf.compat.v1.set_random_seed(1234)
|
|
tf.compat.v1.set_random_seed(1234)
|
|
|
|
|
|
|
|
|
|
-class Rmse(tf.keras.losses.Loss):
|
|
|
|
|
|
+class MSE(tf.keras.losses.Loss):
|
|
"""
|
|
"""
|
|
自定义损失函数模板
|
|
自定义损失函数模板
|
|
功能:实现名称设置、参数保存、张量运算分离
|
|
功能:实现名称设置、参数保存、张量运算分离
|
|
@@ -36,6 +36,39 @@ class Rmse(tf.keras.losses.Loss):
|
|
return loss
|
|
return loss
|
|
|
|
|
|
|
|
|
|
|
|
+class RMSE(Loss):
|
|
|
|
+ """Root Mean Squared Error 损失函数(兼容单变量/多变量回归)"""
|
|
|
|
+
|
|
|
|
+ def __init__(self,
|
|
|
|
+ name,
|
|
|
|
+ reduction="sum_over_batch_size", # 默认自动选择 'sum_over_batch_size' (等效于 mean)
|
|
|
|
+ **kwargs):
|
|
|
|
+ super().__init__(name=name, reduction=reduction)
|
|
|
|
+
|
|
|
|
+ def call(self, y_true, y_pred):
|
|
|
|
+ """
|
|
|
|
+ 计算逻辑:
|
|
|
|
+ 1. 计算预测值与真实值的平方差
|
|
|
|
+ 2. 沿最后一个维度(特征维度)取均值
|
|
|
|
+ 3. 对每个样本的损失取平方根
|
|
|
|
+ """
|
|
|
|
+ # 确保输入张量形状兼容(如 squeeze 冗余维度)
|
|
|
|
+ y_pred = tf.convert_to_tensor(y_pred)
|
|
|
|
+ y_true = tf.cast(y_true, y_pred.dtype)
|
|
|
|
+
|
|
|
|
+ # 计算均方误差(沿最后一个轴取平均)
|
|
|
|
+ mse_per_sample = tf.reduce_mean(tf.square(y_pred - y_true), axis=-1)
|
|
|
|
+
|
|
|
|
+ # 对每个样本的 MSE 取平方根得到 RMSE
|
|
|
|
+ rmse_per_sample = tf.sqrt(mse_per_sample)
|
|
|
|
+
|
|
|
|
+ return rmse_per_sample
|
|
|
|
+
|
|
|
|
+ def get_config(self):
|
|
|
|
+ """支持序列化配置(用于模型保存/加载)"""
|
|
|
|
+ base_config = super().get_config()
|
|
|
|
+ return base_config
|
|
|
|
+
|
|
class SouthLoss(Loss):
|
|
class SouthLoss(Loss):
|
|
"""
|
|
"""
|
|
南网新规则损失函数(支持完整序列化)
|
|
南网新规则损失函数(支持完整序列化)
|
|
@@ -46,8 +79,8 @@ class SouthLoss(Loss):
|
|
reduction (str): 损失归约方式,默认为 'sum_over_batch_size'
|
|
reduction (str): 损失归约方式,默认为 'sum_over_batch_size'
|
|
|
|
|
|
示例:
|
|
示例:
|
|
- >>> loss = SouthLoss(cap=0.5)
|
|
|
|
- >>> model.compile(loss=loss, optimizer='adam')
|
|
|
|
|
|
+ # >>> loss = SouthLoss(cap=0.5)
|
|
|
|
+ # >>> model.compile(loss=loss, optimizer='adam')
|
|
"""
|
|
"""
|
|
|
|
|
|
@typechecked
|
|
@typechecked
|
|
@@ -137,7 +170,7 @@ class SouthLoss(Loss):
|
|
|
|
|
|
|
|
|
|
region_loss_d = {
|
|
region_loss_d = {
|
|
- 'northeast': lambda region: Rmse(region),
|
|
|
|
|
|
+ 'northeast': lambda region: RMSE(region),
|
|
'south': lambda cap, region: SouthLoss(cap, region)
|
|
'south': lambda cap, region: SouthLoss(cap, region)
|
|
}
|
|
}
|
|
|
|
|
|
@@ -153,4 +186,37 @@ def region_loss(opt):
|
|
else:
|
|
else:
|
|
return handler(opt.region)
|
|
return handler(opt.region)
|
|
else:
|
|
else:
|
|
- raise TypeError("无效的损失函数")
|
|
|
|
|
|
+ raise TypeError("无效的损失函数")
|
|
|
|
+
|
|
|
|
+if __name__ == '__main__':
|
|
|
|
+ # 测试数据
|
|
|
|
+ y_true = tf.constant([[1.0], [2.0], [3.0]], dtype=tf.float32)
|
|
|
|
+ y_pred = tf.constant([[1.5], [2.5], [3.5]], dtype=tf.float32)
|
|
|
|
+
|
|
|
|
+ # 标准 MSE
|
|
|
|
+ mse = tf.keras.losses.MeanSquaredError()(y_true, y_pred).numpy()
|
|
|
|
+
|
|
|
|
+ # 自定义损失(权重=1时等效于MSE)
|
|
|
|
+ custom_mse = MSE(name='test')(y_true, y_pred).numpy()
|
|
|
|
+
|
|
|
|
+ print("标准 MSE:", mse) # 输出: 0.25
|
|
|
|
+ print("自定义 MSE:", custom_mse) # 应输出: 0.25
|
|
|
|
+ assert abs(mse - custom_mse) < 1e-6
|
|
|
|
+
|
|
|
|
+ # 定义变量和优化器
|
|
|
|
+ y_pred_var = tf.Variable([[1.5], [2.5], [3.5]], dtype=tf.float32)
|
|
|
|
+ optimizer = tf.keras.optimizers.Adam()
|
|
|
|
+
|
|
|
|
+ with tf.GradientTape() as tape:
|
|
|
|
+ loss = MSE(name='test')(y_true, y_pred_var)
|
|
|
|
+ grads = tape.gradient(loss, y_pred_var)
|
|
|
|
+
|
|
|
|
+ # 理论梯度公式:2*(y_pred - y_true)/N (N=3)
|
|
|
|
+ expected_grads = 2 * (y_pred_var - y_true) / 3.0
|
|
|
|
+ print("实际梯度:\n", grads.numpy())
|
|
|
|
+ print("理论梯度:\n", expected_grads.numpy())
|
|
|
|
+
|
|
|
|
+ with tf.GradientTape() as tape:
|
|
|
|
+ loss = tf.keras.losses.MeanSquaredError()(y_true, y_pred_var)
|
|
|
|
+ grads = tape.gradient(loss, y_pred_var)
|
|
|
|
+ print("实际梯度1:\n", grads.numpy())
|