losses.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # time: 2023/5/8 13:15
  4. # file: loss.py.py
  5. # author: David
  6. # company: shenyang JY
  7. import tensorflow as tf
  8. from tensorflow.keras.losses import Loss
  9. from typeguard import typechecked
  10. tf.compat.v1.set_random_seed(1234)
  11. class Rmse(tf.keras.losses.Loss):
  12. """
  13. 自定义损失函数模板
  14. 功能:实现名称设置、参数保存、张量运算分离
  15. """
  16. def __init__(self,
  17. name, # 设置损失名称
  18. reduction='mean',
  19. **kwargs):
  20. super().__init__(name=name, reduction=reduction)
  21. # 可添加自定义参数(自动序列化)
  22. self.param = kwargs.get('param', 1.0)
  23. def call(self, y_true, y_pred):
  24. """核心计算逻辑(分离张量运算和非张量运算)"""
  25. # 非张量运算(预处理)
  26. coefficient = tf.constant(self.param, dtype=y_pred.dtype)
  27. # 张量运算(保持计算图兼容性)
  28. squared_difference = tf.square(y_pred - y_true)
  29. loss = tf.reduce_mean(squared_difference, axis=-1) * coefficient
  30. return loss
  31. class SouthLoss(Loss):
  32. """
  33. 南网新规则损失函数(支持完整序列化)
  34. 参数:
  35. cap (float): 归一化后的装机容量(需在 [0,1] 范围内)
  36. name (str): 损失函数名称,默认为 'south_loss'
  37. reduction (str): 损失归约方式,默认为 'sum_over_batch_size'
  38. 示例:
  39. >>> loss = SouthLoss(cap=0.5)
  40. >>> model.compile(loss=loss, optimizer='adam')
  41. """
  42. @typechecked
  43. def __init__(self,
  44. cap: float,
  45. name: str = "south_loss",
  46. reduction: str = "sum_over_batch_size"):
  47. # 参数校验
  48. if not 0 <= cap <= 1:
  49. raise ValueError("cap 必须为归一化后的值且位于 [0,1] 区间")
  50. super().__init__(name=name, reduction=reduction)
  51. # 内部处理缩放逻辑(保持原始 cap 的序列化)
  52. self._raw_cap = cap # 保存原始值用于序列化
  53. self.cap = tf.constant(cap * 0.2, dtype=tf.float32) # 实际计算值
  54. def get_config(self):
  55. """获取序列化配置(保存原始 cap 值)"""
  56. config = super().get_config()
  57. config.update({
  58. "cap": self._raw_cap, # 保存未缩放前的原始值
  59. "name": self.name,
  60. "reduction": self.reduction
  61. })
  62. return config
  63. @classmethod
  64. def from_config(cls, config):
  65. """反序列化时重建实例"""
  66. return cls(
  67. cap=config["cap"],
  68. name=config["name"],
  69. reduction=config["reduction"]
  70. )
  71. def call(self, y_true, y_pred):
  72. """计算损失值(带数值稳定化)"""
  73. # 确保数据类型一致
  74. y_true = tf.cast(y_true, tf.float32)
  75. y_pred = tf.cast(y_pred, tf.float32)
  76. # 数值稳定化处理
  77. diff = y_true - y_pred
  78. delta = y_true - self.cap
  79. # 使用稳定化的 sigmoid 计算
  80. logistic_input = tf.clip_by_value(10000.0 * delta, -50.0, 50.0) # 防止梯度爆炸
  81. logistic_values = tf.sigmoid(logistic_input)
  82. # 计算基值
  83. base = logistic_values * y_true + (1 - logistic_values) * self.cap
  84. # 避免除零错误
  85. safe_base = tf.where(tf.equal(base, 0.0), 1e-7, base)
  86. # 计算损失
  87. loss = tf.reduce_mean(tf.square(diff / safe_base), axis=-1)
  88. return loss
  89. def call2(self, y_true, y_predict):
  90. y_true = y_true * self.opt.std['C_REAL_VALUE'] + self.opt.mean['C_REAL_VALUE']
  91. y_predict = y_predict * self.opt.std['C_REAL_VALUE'] + self.opt.mean['C_REAL_VALUE']
  92. y_true = y_true[:, 15]
  93. y_predict = y_predict[:, 15]
  94. diff = y_true - y_predict
  95. logistic_values = tf.sigmoid(10000 * (y_true - self.cap))
  96. base = logistic_values * y_true + (1 - logistic_values) * self.cap
  97. loss = K.square(diff / base)
  98. mask_logical = tf.logical_and(tf.greater(y_true, self.cap01), tf.greater(y_predict, self.cap01))
  99. count = tf.reduce_sum(tf.cast(mask_logical, tf.float32), axis=-1)
  100. safe_count = tf.maximum(count, 1)
  101. # reduce_sum_loss = tf.reduce_sum(loss, axis=-1)
  102. mean_loss = loss / safe_count
  103. return mean_loss
  104. def call1(self, y_true, y_predict):
  105. y_true = y_true * self.opt.std['C_REAL_VALUE'] + self.opt.mean['C_REAL_VALUE']
  106. y_predict = y_predict * self.opt.std['C_REAL_VALUE'] + self.opt.mean['C_REAL_VALUE']
  107. base = tf.where(y_true > self.cap, y_true, tf.ones_like(y_true)*self.cap)
  108. loss = (y_true - y_predict) / base
  109. squared_loss = tf.square(loss)
  110. mean_squared_loss = tf.reduce_mean(squared_loss, axis=[1])
  111. return mean_squared_loss
  112. region_loss_d = {
  113. 'northeast': lambda region: Rmse(region),
  114. 'south': lambda cap, region: SouthLoss(cap, region)
  115. }
  116. # 根据地区调用对应逻辑
  117. def region_loss(opt):
  118. handler = region_loss_d.get(opt.region, opt.region)
  119. # 判断处理类型并执行
  120. if callable(handler):
  121. # 如果是lambda或函数,直接调用
  122. if opt.region == "south": # 需要额外参数的地区
  123. return handler(opt.cap, opt.region)
  124. else:
  125. return handler(opt.region)
  126. else:
  127. raise TypeError("无效的损失函数")