David 1 month ago
parent
commit
29e195c9fe

+ 63 - 29
models_processing/model_koi/losses.py

@@ -5,7 +5,8 @@
 # author: David
 # company: shenyang JY
 import tensorflow as tf
-from tensorflow.keras import backend as K
+from tensorflow.keras.losses import Loss
+from typeguard import typechecked
 tf.compat.v1.set_random_seed(1234)
 
 
@@ -35,43 +36,76 @@ class Rmse(tf.keras.losses.Loss):
         return loss
 
 
-class SouthLoss(tf.keras.losses.Loss):
-    def __init__(self, cap, name, reduction='sum_over_batch_size'):
-        """
-        南网新规则损失函数
-        :param cap:装机容量
-        """
-        super(SouthLoss, self).__init__(name=name, reduction=reduction)
-        self.cap = tf.cast(cap, tf.float32)     # 传入的是0.2cap
+class SouthLoss(Loss):
+    """
+    南网新规则损失函数(支持完整序列化)
+
+    参数:
+        cap (float): 归一化后的装机容量(需在 [0,1] 范围内)
+        name (str): 损失函数名称,默认为 'south_loss'
+        reduction (str): 损失归约方式,默认为 'sum_over_batch_size'
+
+    示例:
+        >>> loss = SouthLoss(cap=0.5)
+        >>> model.compile(loss=loss, optimizer='adam')
+    """
+
+    @typechecked
+    def __init__(self,
+                 cap: float,
+                 name: str = "south_loss",
+                 reduction: str = "sum_over_batch_size"):
+        # 参数校验
+        if not 0 <= cap <= 1:
+            raise ValueError("cap 必须为归一化后的值且位于 [0,1] 区间")
+
+        super().__init__(name=name, reduction=reduction)
+
+        # 内部处理缩放逻辑(保持原始 cap 的序列化)
+        self._raw_cap = cap  # 保存原始值用于序列化
+        self.cap = tf.constant(cap * 0.2, dtype=tf.float32)  # 实际计算值
 
     def get_config(self):
-        """返回所有需要序列化的参数"""
+        """获取序列化配置(保存原始 cap 值)"""
         config = super().get_config()
         config.update({
-            'cap': float(self.cap.numpy())  # 将 Tensor 转换为 Python float
+            "cap": self._raw_cap,  # 保存未缩放前的原始值
+            "name": self.name,
+            "reduction": self.reduction
         })
         return config
 
     @classmethod
     def from_config(cls, config):
-        """显式处理反序列化逻辑"""
-        # 提取 cap 并重建实例
-        cap = config.pop('cap')  # 从配置中获取原始值
-        return cls(cap=cap, **config)
-
-    def call(self, y_true, y_predict):
-        """
-        自动调用
-        :param y_true: 标签
-        :param y_predict: 预测
-        :return: 损失值
-        """
-        # 计算实际和预测的差值
-        diff = y_true - y_predict
-        logistic_values = tf.sigmoid(10000 * (y_true - self.cap))
-        base = logistic_values * y_true + (1-logistic_values)*self.cap
-        loss = K.square(diff/base)
-        # loss = K.mean(loss, axis=-1)
+        """反序列化时重建实例"""
+        return cls(
+            cap=config["cap"],
+            name=config["name"],
+            reduction=config["reduction"]
+        )
+
+    def call(self, y_true, y_pred):
+        """计算损失值(带数值稳定化)"""
+        # 确保数据类型一致
+        y_true = tf.cast(y_true, tf.float32)
+        y_pred = tf.cast(y_pred, tf.float32)
+
+        # 数值稳定化处理
+        diff = y_true - y_pred
+        delta = y_true - self.cap
+
+        # 使用稳定化的 sigmoid 计算
+        logistic_input = tf.clip_by_value(10000.0 * delta, -50.0, 50.0)  # 防止梯度爆炸
+        logistic_values = tf.sigmoid(logistic_input)
+
+        # 计算基值
+        base = logistic_values * y_true + (1 - logistic_values) * self.cap
+
+        # 避免除零错误
+        safe_base = tf.where(tf.equal(base, 0.0), 1e-7, base)
+
+        # 计算损失
+        loss = tf.reduce_mean(tf.square(diff / safe_base), axis=-1)
         return loss
 
     def call2(self, y_true, y_predict):

+ 8 - 6
models_processing/model_koi/tf_bp.py

@@ -9,7 +9,7 @@ from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Con
 from tensorflow.keras.models import Model, load_model
 from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
 from tensorflow.keras import optimizers, regularizers
-from models_processing.losses.loss_cdq import rmse
+from models_processing.model_koi.losses import region_loss
 from models_processing.model_koi.settings import set_deterministic
 import numpy as np
 from common.database_dml_koi import *
@@ -30,13 +30,14 @@ class BPHandler(object):
         """
         try:
             with model_lock:
-                # NPHandler.model = NPHandler.get_keras_model(opt)
-                self.model = get_h5_model_from_mongo(args, {'rmse': rmse})
+                loss = region_loss(self.opt)
+                self.model = get_h5_model_from_mongo(args, {type(loss).__name__: loss})
         except Exception as e:
             self.logger.info("加载模型权重失败:{}".format(e.args))
 
     @staticmethod
     def get_keras_model(opt):
+        loss = region_loss(opt)
         model = Sequential([
             Dense(64, input_dim=opt.Model['input_size'], activation='relu'),  # 输入层和隐藏层,10个神经元
             Dense(32, activation='relu'),  # 隐藏层,8个神经元
@@ -44,21 +45,22 @@ class BPHandler(object):
             Dense(1, activation='linear')  # 输出层,1个神经元(用于回归任务)
         ])
         adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
-        model.compile(loss=rmse, optimizer=adam)
+        model.compile(loss=loss, optimizer=adam)
         return model
 
     def train_init(self):
         try:
             if self.opt.Model['add_train']:
                 # 进行加强训练,支持修模
-                base_train_model = get_h5_model_from_mongo(vars(self.opt), {'rmse': rmse})
+                loss = region_loss(self.opt)
+                base_train_model = get_h5_model_from_mongo(vars(self.opt), {type(loss).__name__: loss})
                 base_train_model.summary()
                 self.logger.info("已加载加强训练基础模型")
             else:
                 base_train_model = self.get_keras_model(self.opt)
             return base_train_model
         except Exception as e:
-            self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
+            self.logger.info("加载模型权重失败:{}".format(e.args))
 
     def training(self, train_and_valid_data):
         model = self.train_init()

+ 1 - 0
models_processing/model_koi/tf_bp_pre.py

@@ -54,6 +54,7 @@ def model_prediction_bp():
         pre_data = get_data_from_mongo(args)
         feature_scaler, target_scaler = get_scaler_model_from_mongo(args)
         scaled_pre_x = dh.pre_data_handler(pre_data, feature_scaler, bp_data=True)
+        bp.opt.cap = round(target_scaler.transform(np.array([[args['cap']]]))[0, 0], 2)
         # ------------ 获取模型,预测结果------------
         bp.get_model(args)
         res = list(chain.from_iterable(target_scaler.inverse_transform([bp.predict(scaled_pre_x).flatten()])))

+ 2 - 1
models_processing/model_koi/tf_bp_train.py

@@ -50,9 +50,10 @@ def model_training_bp():
     try:
         # ------------ 获取数据,预处理训练数据 ------------
         train_data = get_data_from_mongo(args)
-        train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, bp_data=True)
+        train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, bp_data=True)
         # ------------ 训练模型 ------------
         bp.opt.Model['input_size'] = train_x.shape[1]
+        bp.opt.cap = round(scaled_cap, 2)
         bp_model = bp.training([train_x, train_y, valid_x, valid_y])
         # ------------ 保存模型 ------------
         args['params'] = json.dumps(args)

+ 9 - 8
models_processing/model_koi/tf_cnn.py

@@ -10,7 +10,7 @@ from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Con
 from tensorflow.keras.models import Model, load_model
 from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
 from tensorflow.keras import optimizers, regularizers
-from models_processing.losses.loss_cdq import rmse
+from models_processing.model_koi.losses import region_loss
 from models_processing.model_koi.settings import set_deterministic
 import numpy as np
 from common.database_dml_koi import *
@@ -31,15 +31,14 @@ class CNNHandler(object):
         """
         try:
             with model_lock:
-                # NPHandler.model = NPHandler.get_keras_model(opt)
-                self.model = get_h5_model_from_mongo(args, {'rmse': rmse})
+                loss = region_loss(self.opt)
+                self.model = get_h5_model_from_mongo(args, {type(loss).__name__: loss})
         except Exception as e:
             self.logger.info("加载模型权重失败:{}".format(e.args))
 
     @staticmethod
     def get_keras_model(opt):
-        # db_loss = NorthEastLoss(opt)
-        # south_loss = SouthLoss(opt)
+        loss = region_loss(opt)
         l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
         l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
         nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size']), name='nwp')
@@ -53,21 +52,23 @@ class CNNHandler(object):
         model = Model(inputs=nwp_input, outputs=output_f)
         adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
         reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=5, verbose=1)
-        model.compile(loss=rmse, optimizer=adam)
+
+        model.compile(loss=loss, optimizer=adam)
         return model
 
     def train_init(self):
         try:
             if self.opt.Model['add_train']:
                 # 进行加强训练,支持修模
-                base_train_model = get_h5_model_from_mongo(vars(self.opt), {'rmse': rmse})
+                loss = region_loss(self.opt)
+                base_train_model = get_h5_model_from_mongo(vars(self.opt), {type(loss).__name__: loss})
                 base_train_model.summary()
                 self.logger.info("已加载加强训练基础模型")
             else:
                 base_train_model = self.get_keras_model(self.opt)
             return base_train_model
         except Exception as e:
-            self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
+            self.logger.info("加载模型权重失败:{}".format(e.args))
 
     def training(self, train_and_valid_data):
         model = self.train_init()

+ 2 - 0
models_processing/model_koi/tf_cnn_pre.py

@@ -53,6 +53,8 @@ def model_prediction_bp():
         pre_data = get_data_from_mongo(args)
         feature_scaler, target_scaler = get_scaler_model_from_mongo(args)
         scaled_pre_x = dh.pre_data_handler(pre_data, feature_scaler)
+        cnn.opt.cap = round(target_scaler.transform(np.array([[args['cap']]]))[0,0], 2)
+        logger.info("---------cap归一化:{}".format(cnn.opt.cap))
         cnn.get_model(args)
         # result = bp.predict(scaled_pre_x, args)
         res = list(chain.from_iterable(target_scaler.inverse_transform([cnn.predict(scaled_pre_x).flatten()])))

+ 4 - 1
models_processing/model_koi/tf_cnn_train.py

@@ -50,9 +50,12 @@ def model_training_bp():
     try:
         # ------------ 获取数据,预处理训练数据 ------------
         train_data = get_data_from_mongo(args)
-        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data)
+        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
         # ------------ 训练模型,保存模型 ------------
         cnn.opt.Model['input_size'] = train_x.shape[2]
+        cnn.opt.cap = round(scaled_cap, 2)
+        logger.info("---------cap归一化:{}".format(cnn.opt.cap))
+
         bp_model = cnn.training([train_x, valid_x, train_y, valid_y])
 
         args['params'] = json.dumps(args)

+ 8 - 7
models_processing/model_koi/tf_lstm.py

@@ -9,7 +9,7 @@ from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Con
 from tensorflow.keras.models import Model, load_model
 from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
 from tensorflow.keras import optimizers, regularizers
-from models_processing.losses.loss_cdq import rmse
+from models_processing.model_koi.losses import region_loss
 import numpy as np
 from common.database_dml_koi import *
 from models_processing.model_koi.settings import set_deterministic
@@ -30,14 +30,14 @@ class TSHandler(object):
         """
         try:
             with model_lock:
-                self.model = get_h5_model_from_mongo(args, {'rmse': rmse})
+                loss = region_loss(self.opt)
+                self.model = get_h5_model_from_mongo(args, {type(loss).__name__: loss})
         except Exception as e:
             self.logger.info("加载模型权重失败:{}".format(e.args))
 
     @staticmethod
     def get_keras_model(opt):
-        # db_loss = NorthEastLoss(opt)
-        # south_loss = SouthLoss(opt)
+        loss = region_loss(opt)
         l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
         l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
         nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size']), name='nwp')
@@ -50,21 +50,22 @@ class TSHandler(object):
 
         model = Model(nwp_input, output)
         adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
-        model.compile(loss=rmse, optimizer=adam)
+        model.compile(loss=loss, optimizer=adam)
         return model
 
     def train_init(self):
         try:
             if self.opt.Model['add_train']:
                 # 进行加强训练,支持修模
-                base_train_model = get_h5_model_from_mongo(vars(self.opt), {'rmse': rmse})
+                loss = region_loss(self.opt)
+                base_train_model = get_h5_model_from_mongo(vars(self.opt), {type(loss).__name__: loss})
                 base_train_model.summary()
                 self.logger.info("已加载加强训练基础模型")
             else:
                 base_train_model = self.get_keras_model(self.opt)
             return base_train_model
         except Exception as e:
-            self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
+            self.logger.info("加载模型权重失败:{}".format(e.args))
 
     def training(self, train_and_valid_data):
         model = self.train_init()

+ 1 - 0
models_processing/model_koi/tf_lstm_pre.py

@@ -53,6 +53,7 @@ def model_prediction_bp():
         pre_data = get_data_from_mongo(args)
         feature_scaler, target_scaler = get_scaler_model_from_mongo(args)
         scaled_pre_x = dh.pre_data_handler(pre_data, feature_scaler)
+        ts.opt.cap = round(target_scaler.transform(np.array([[args['cap']]]))[0, 0], 2)
         ts.get_model(args)
         # result = bp.predict(scaled_pre_x, args)
         res = list(chain.from_iterable(target_scaler.inverse_transform([ts.predict(scaled_pre_x).flatten()])))

+ 2 - 1
models_processing/model_koi/tf_lstm_train.py

@@ -48,9 +48,10 @@ def model_training_bp():
     try:
         # ------------ 获取数据,预处理训练数据 ------------
         train_data = get_data_from_mongo(args)
-        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data)
+        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
         # ------------ 训练模型,保存模型 ------------
         ts.opt.Model['input_size'] = train_x.shape[2]
+        ts.opt.cap = round(scaled_cap, 2)
         ts_model = ts.training([train_x, valid_x, train_y, valid_y])
 
         args['params'] = json.dumps(args)