|
@@ -5,7 +5,8 @@
|
|
|
# author: David
|
|
|
# company: shenyang JY
|
|
|
import tensorflow as tf
|
|
|
-from tensorflow.keras import backend as K
|
|
|
+from tensorflow.keras.losses import Loss
|
|
|
+from typeguard import typechecked
|
|
|
tf.compat.v1.set_random_seed(1234)
|
|
|
|
|
|
|
|
@@ -35,43 +36,76 @@ class Rmse(tf.keras.losses.Loss):
|
|
|
return loss
|
|
|
|
|
|
|
|
|
-class SouthLoss(tf.keras.losses.Loss):
|
|
|
- def __init__(self, cap, name, reduction='sum_over_batch_size'):
|
|
|
- """
|
|
|
- 南网新规则损失函数
|
|
|
- :param cap:装机容量
|
|
|
- """
|
|
|
- super(SouthLoss, self).__init__(name=name, reduction=reduction)
|
|
|
- self.cap = tf.cast(cap, tf.float32) # 传入的是0.2cap
|
|
|
+class SouthLoss(Loss):
|
|
|
+ """
|
|
|
+ 南网新规则损失函数(支持完整序列化)
|
|
|
+
|
|
|
+ 参数:
|
|
|
+ cap (float): 归一化后的装机容量(需在 [0,1] 范围内)
|
|
|
+ name (str): 损失函数名称,默认为 'south_loss'
|
|
|
+ reduction (str): 损失归约方式,默认为 'sum_over_batch_size'
|
|
|
+
|
|
|
+ 示例:
|
|
|
+ >>> loss = SouthLoss(cap=0.5)
|
|
|
+ >>> model.compile(loss=loss, optimizer='adam')
|
|
|
+ """
|
|
|
+
|
|
|
+ @typechecked
|
|
|
+ def __init__(self,
|
|
|
+ cap: float,
|
|
|
+ name: str = "south_loss",
|
|
|
+ reduction: str = "sum_over_batch_size"):
|
|
|
+ # 参数校验
|
|
|
+ if not 0 <= cap <= 1:
|
|
|
+ raise ValueError("cap 必须为归一化后的值且位于 [0,1] 区间")
|
|
|
+
|
|
|
+ super().__init__(name=name, reduction=reduction)
|
|
|
+
|
|
|
+ # 内部处理缩放逻辑(保持原始 cap 的序列化)
|
|
|
+ self._raw_cap = cap # 保存原始值用于序列化
|
|
|
+ self.cap = tf.constant(cap * 0.2, dtype=tf.float32) # 实际计算值
|
|
|
|
|
|
def get_config(self):
|
|
|
- """返回所有需要序列化的参数"""
|
|
|
+ """获取序列化配置(保存原始 cap 值)"""
|
|
|
config = super().get_config()
|
|
|
config.update({
|
|
|
- 'cap': float(self.cap.numpy()) # 将 Tensor 转换为 Python float
|
|
|
+ "cap": self._raw_cap, # 保存未缩放前的原始值
|
|
|
+ "name": self.name,
|
|
|
+ "reduction": self.reduction
|
|
|
})
|
|
|
return config
|
|
|
|
|
|
@classmethod
|
|
|
def from_config(cls, config):
|
|
|
- """显式处理反序列化逻辑"""
|
|
|
- # 提取 cap 并重建实例
|
|
|
- cap = config.pop('cap') # 从配置中获取原始值
|
|
|
- return cls(cap=cap, **config)
|
|
|
-
|
|
|
- def call(self, y_true, y_predict):
|
|
|
- """
|
|
|
- 自动调用
|
|
|
- :param y_true: 标签
|
|
|
- :param y_predict: 预测
|
|
|
- :return: 损失值
|
|
|
- """
|
|
|
- # 计算实际和预测的差值
|
|
|
- diff = y_true - y_predict
|
|
|
- logistic_values = tf.sigmoid(10000 * (y_true - self.cap))
|
|
|
- base = logistic_values * y_true + (1-logistic_values)*self.cap
|
|
|
- loss = K.square(diff/base)
|
|
|
- # loss = K.mean(loss, axis=-1)
|
|
|
+ """反序列化时重建实例"""
|
|
|
+ return cls(
|
|
|
+ cap=config["cap"],
|
|
|
+ name=config["name"],
|
|
|
+ reduction=config["reduction"]
|
|
|
+ )
|
|
|
+
|
|
|
+ def call(self, y_true, y_pred):
|
|
|
+ """计算损失值(带数值稳定化)"""
|
|
|
+ # 确保数据类型一致
|
|
|
+ y_true = tf.cast(y_true, tf.float32)
|
|
|
+ y_pred = tf.cast(y_pred, tf.float32)
|
|
|
+
|
|
|
+ # 数值稳定化处理
|
|
|
+ diff = y_true - y_pred
|
|
|
+ delta = y_true - self.cap
|
|
|
+
|
|
|
+ # 使用稳定化的 sigmoid 计算
|
|
|
+ logistic_input = tf.clip_by_value(10000.0 * delta, -50.0, 50.0) # 防止梯度爆炸
|
|
|
+ logistic_values = tf.sigmoid(logistic_input)
|
|
|
+
|
|
|
+ # 计算基值
|
|
|
+ base = logistic_values * y_true + (1 - logistic_values) * self.cap
|
|
|
+
|
|
|
+ # 避免除零错误
|
|
|
+ safe_base = tf.where(tf.equal(base, 0.0), 1e-7, base)
|
|
|
+
|
|
|
+ # 计算损失
|
|
|
+ loss = tf.reduce_mean(tf.square(diff / safe_base), axis=-1)
|
|
|
return loss
|
|
|
|
|
|
def call2(self, y_true, y_predict):
|