#125 06181031

已合并
liudawei 1 周之前 将 4 次代码提交从 anweiguo/dev_david合并至 anweiguo/dev_awg

+ 68 - 36
models_processing/model_tf/tf_transformer.py

@@ -4,8 +4,8 @@
 # @Time      :2025/5/08 14:03
 # @Author    :David
 # @Company: shenyang JY
-
-from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten, LayerNormalization, Dropout
+from tensorflow.keras.initializers import glorot_uniform, orthogonal
+from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten, LayerNormalization, Dropout, Layer, Add, MultiHeadAttention, Dropout
 from tensorflow.keras.models import Model, load_model
 from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
 from tensorflow.keras import optimizers, regularizers
@@ -18,6 +18,49 @@ import argparse
 model_lock = Lock()
 set_deterministic(42)
 
+
+class PositionalEncoding(tf.keras.layers.Layer):
+    """自定义位置编码层(支持序列化)"""
+    def __init__(self, max_len, d_model, **kwargs):
+        super().__init__(**kwargs)
+        self.max_len = max_len  # 将参数保存为实例属性
+        self.d_model = d_model
+        # 位置编码在初始化时生成
+        self.position_embedding = self.positional_encoding(max_len, d_model)
+
+    def get_angles(self, pos, i, d_model):
+        # 计算角度参数
+        angles = 1 / tf.pow(10000., (2 * (i // 2)) / tf.cast(d_model, tf.float32))
+        return pos * angles
+
+    def positional_encoding(self, max_len, d_model):
+        # 生成位置编码矩阵
+        angle_rads = self.get_angles(
+            pos=tf.range(max_len, dtype=tf.float32)[:, tf.newaxis],
+            i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :],
+            d_model=d_model
+        )
+        # 拼接正弦和余弦编码
+        sines = tf.math.sin(angle_rads[:, 0::2])
+        cosines = tf.math.cos(angle_rads[:, 1::2])
+        pos_encoding = tf.concat([sines, cosines], axis=-1)
+        return pos_encoding[tf.newaxis, ...]  # 增加批次维度
+
+    def call(self, inputs):
+        # 动态截取与输入序列长度匹配的部分
+        seq_len = tf.shape(inputs)[1]
+        return inputs + self.position_embedding[:, :seq_len, :]
+
+    def get_config(self):
+        # 将参数序列化(关键步骤!)
+        config = super().get_config()
+        config.update({
+            'max_len': self.max_len,
+            'd_model': self.d_model,
+        })
+        return config
+
+
 class TransformerHandler(object):
     def __init__(self, logger, args):
         self.logger = logger
@@ -32,65 +75,54 @@ class TransformerHandler(object):
         try:
             with model_lock:
                 loss = region_loss(self.opt)
-                self.model, self.model_params = get_keras_model_from_mongo(args, {type(loss).__name__: loss})
+                self.model, self.model_params = get_keras_model_from_mongo(args, {type(loss).__name__: loss, 'PositionalEncoding': PositionalEncoding})
         except Exception as e:
             self.logger.info("加载模型权重失败:{}".format(e.args))
 
     @staticmethod
     def get_transformer_model(opt, time_series=1):
-        time_steps = 48
-        input_features = 21
-        output_steps = 16
         hidden_size = opt.Model.get('hidden_size', 64)
         num_heads = opt.Model.get('num_heads', 4)
         ff_dim = opt.Model.get('ff_dim', 128)
-        l2_reg = regularizers.l2(opt.Model.get('lambda_value_2', 0.0))
+        l2_reg = regularizers.l2(opt.Model.get('lambda_value_2', 0.01))
 
-        nwp_input = Input(shape=(opt.Model['time_step'] * time_series, opt.Model['input_size']), name='nwp')
+        nwp_input = Input(shape=(opt.Model['time_step'] * time_series, opt.Model['input_size']))
 
-        # 输入嵌入
-        x = Conv1D(hidden_size, 1, kernel_regularizer=l2_reg)(nwp_input)
+        # 嵌入层 + 位置编码
+        x = Conv1D(hidden_size, kernel_size=3, padding='same', kernel_regularizer=l2_reg)(nwp_input)
+        x = PositionalEncoding(opt.Model['time_step'], hidden_size)(x)
 
-        # Transformer编码
+        # Transformer编码层(带残差连接)
         for _ in range(opt.Model.get('num_layers', 2)):
-            # 多头自注意力
-            x = tf.keras.layers.MultiHeadAttention(
-                num_heads=num_heads, key_dim=hidden_size,
-                kernel_regularizer=l2_reg
-            )(x, x)
+            # 自注意力
+            residual = x
+            x = MultiHeadAttention(num_heads=num_heads, key_dim=hidden_size)(x, x)
+            x = Dropout(0.1)(x)
+            x = Add()([residual, x])
             x = LayerNormalization()(x)
-            x = tf.keras.layers.Dropout(0.1)(x)
 
             # 前馈网络
-            x = tf.keras.layers.Dense(ff_dim, activation='relu', kernel_regularizer=l2_reg)(x)
-            x = tf.keras.layers.Dense(hidden_size, kernel_regularizer=l2_reg)(x)
+            residual = x
+            x = Dense(ff_dim, activation='relu')(x)
+            x = Dense(hidden_size)(x)
+            x = Dropout(0.1)(x)
+            x = Add()([residual, x])
             x = LayerNormalization()(x)
-            x = tf.keras.layers.Dropout(0.1)(x)
 
-        # 提取中间时间步
-        start_idx = (time_steps - output_steps) // 2
-        x = x[:, start_idx:start_idx + output_steps, :]
-
-        # 输出层
-        output = Dense(output_steps, name='cdq_output')(x[:, -1, :])  # 或者使用所有时间步
+        # 输出层(预测每个时间步)
+        output = Dense(1, activation='linear')(x)
+        # output = tf.keras.layers.Lambda(lambda x: tf.squeeze(x, axis=-1))(output)
+        output = Flatten(name='Flatten')(output)
 
         model = Model(nwp_input, output)
-
-        # 编译模型
-        adam = optimizers.Adam(
-            learning_rate=opt.Model.get('learning_rate', 0.001),
-            beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True
-        )
-        loss = region_loss(opt)
-        model.compile(loss=loss, optimizer=adam)
-
+        model.compile(loss='mse', optimizer=optimizers.Adam(learning_rate=1e-4))
         return model
 
     def train_init(self):
         try:
             # 进行加强训练,支持修模
             loss = region_loss(self.opt)
-            base_train_model, self.model_params = get_keras_model_from_mongo(vars(self.opt), {type(loss).__name__: loss})
+            base_train_model, self.model_params = get_keras_model_from_mongo(vars(self.opt), {type(loss).__name__: loss, 'PositionalEncoding': PositionalEncoding})
             base_train_model.summary()
             self.logger.info("已加载加强训练基础模型")
             return base_train_model

+ 5 - 5
models_processing/model_tf/tf_transformer_pre.py

@@ -26,7 +26,7 @@ np.random.seed(42)  # NumPy随机种子
 app = Flask('tf_lstm_pre——service')
 
 current_dir = os.path.dirname(os.path.abspath(__file__))
-with open(os.path.join(current_dir, 'lstm.yaml'), 'r', encoding='utf-8') as f:
+with open(os.path.join(current_dir, 'transformer.yaml'), 'r', encoding='utf-8') as f:
     global_config = yaml.safe_load(f)  # 只读的全局配置
 
 @app.before_request
@@ -45,8 +45,8 @@ def update_config():
     g.dh = DataHandler(logger, current_config)  # 每个请求独立实例
     g.trans = TransformerHandler(logger, current_config)
 
-@app.route('/tf_lstm_predict', methods=['POST'])
-def model_prediction_lstm():
+@app.route('/tf_transformer_predict', methods=['POST'])
+def model_prediction_transformer():
     # 获取程序开始时间
     start_time = time.time()
     result = {}
@@ -64,7 +64,7 @@ def model_prediction_lstm():
         trans.opt.cap = round(target_scaler.transform(np.array([[float(args['cap'])]]))[0, 0], 2)
         trans.get_model(args)
         dh.opt.features = json.loads(trans.model_params)['Model']['features'].split(',')
-        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler, time_series=args['time_series'], lstm_type=1)
+        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler, time_series=args['time_series'])
         res = list(chain.from_iterable(target_scaler.inverse_transform(trans.predict(scaled_pre_x))))
         pre_data['farm_id'] = args.get('farm_id', 'null')
         if int(args.get('algorithm_test', 0)):
@@ -105,7 +105,7 @@ def model_prediction_lstm():
 if __name__ == "__main__":
     print("Program starts execution!")
     from waitress import serve
-    serve(app, host="0.0.0.0", port=10114,
+    serve(app, host="0.0.0.0", port=10132,
           threads=8,  # 指定线程数(默认4,根据硬件调整)
           channel_timeout=600  # 连接超时时间(秒)
           )

+ 7 - 7
models_processing/model_tf/tf_transformer_train.py

@@ -21,7 +21,7 @@ np.random.seed(42)  # NumPy随机种子
 app = Flask('tf_lstm_train——service')
 
 current_dir = os.path.dirname(os.path.abspath(__file__))
-with open(os.path.join(current_dir, 'lstm.yaml'), 'r', encoding='utf-8') as f:
+with open(os.path.join(current_dir, 'transformer.yaml'), 'r', encoding='utf-8') as f:
     global_config = yaml.safe_load(f)  # 只读的全局配置
 
 @app.before_request
@@ -41,8 +41,8 @@ def update_config():
     g.trans = TransformerHandler(logger, current_config)
 
 
-@app.route('/tf_lstm_training', methods=['POST'])
-def model_training_lstm():
+@app.route('/tf_transformer_training', methods=['POST'])
+def model_training_transformer():
     # 获取程序开始时间
     start_time = time.time()
     result = {}
@@ -60,7 +60,7 @@ def model_training_lstm():
         # ------------ 训练模型,保存模型 ------------
         # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
         # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
-        model = trans.train_init() if trans.opt.Model['add_train'] else trans.get_transformer_model(trans.opt, time_series=args['time_series'], lstm_type=1)
+        model = trans.train_init() if trans.opt.Model['add_train'] else trans.get_transformer_model(trans.opt, time_series=args['time_series'])
         if trans.opt.Model['add_train']:
             if model:
                 feas = json.loads(trans.model_params)['features']
@@ -68,10 +68,10 @@ def model_training_lstm():
                     dh.opt.features = list(feas)
                     train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data, time_series=args['time_series'])
                 else:
-                    model = trans.get_transformer_model(trans.opt, time_series=args['time_series'], lstm_type=1)
+                    model = trans.get_transformer_model(trans.opt, time_series=args['time_series'])
                     logger.info("训练数据特征,不满足,加强训练模型特征")
             else:
-                model = trans.get_transformer_model(trans.opt, time_series=args['time_series'], lstm_type=1)
+                model = trans.get_transformer_model(trans.opt, time_series=args['time_series'])
         ts_model = trans.training(model, [train_x, train_y, valid_x, valid_y])
         args['Model']['features'] = ','.join(dh.opt.features)
         args['params'] = json.dumps(args)
@@ -97,7 +97,7 @@ def model_training_lstm():
 if __name__ == "__main__":
     print("Program starts execution!")
     from waitress import serve
-    serve(app, host="0.0.0.0", port=10115,
+    serve(app, host="0.0.0.0", port=10131,
           threads=8,  # 指定线程数(默认4,根据硬件调整)
           channel_timeout=600  # 连接超时时间(秒)
           )

+ 93 - 0
models_processing/model_tf/transformer.yaml

@@ -0,0 +1,93 @@
+Model:
+  add_train: false
+  batch_size: 64
+  dropout_rate: 0.2
+  epoch: 500
+  fusion: true
+  hidden_size: 64
+  his_points: 16
+  how_long_fill: 10
+  input_size: 24
+  lambda_value_1: 0.02
+  lambda_value_2: 0.01
+  learning_rate: 0.001
+  lstm_layers: 1
+  output_size: 16
+  patience: 10
+  predict_data_fill: true
+  shuffle_train_data: false
+  test_data_fill: false
+  time_step: 16
+  train_data_fill: true
+  use_cuda: false
+  valid_data_rate: 0.15
+use_bidirectional: false
+region: south
+calculate: []
+cap: 153.0
+dataloc: ./data
+env_columns:
+- C_TIME
+- C_CELLT
+- C_DIFFUSER
+- C_GLOBALR
+- C_RH
+- C_REAL_VALUE
+full_field: true
+history_hours: 1
+new_field: true
+features:
+- time
+- temperature10
+- temperature190
+- direction160
+- direction40
+- temperature110
+- direction80
+- speed60
+- mcc
+- temperature150
+- speed20
+- speed110
+- direction120
+- speed190
+- solarZenith
+- temperature90
+- direction200
+- speed150
+- temperature50
+- direction30
+- temperature160
+- direction170
+- temperature20
+- direction70
+- direction130
+- temperature200
+- speed70
+- temperature120
+- speed30
+- speed100
+- speed80
+- speed180
+- dniCalcd
+- speed140
+- temperature60
+- dateTime
+- temperature30
+- temperature170
+- direction20
+- humidity2
+- direction180
+- realPowerAvg
+- direction60
+- direction140
+- speed40
+- hcc
+target: realPower
+repair_days: 81
+repair_model_cycle: 5
+spot_trading: []
+update_add_train_days: 60
+update_coe_days: 3
+version: solar-3.1.0.south
+

+ 2 - 0
run_all.py

@@ -32,6 +32,8 @@ services = [
     ("models_processing/model_tf/tf_lstm3_train.py", 10121),
     ("models_processing/model_tf/tf_lstm_zone_pre.py", 10125),
     ("models_processing/model_tf/tf_lstm_zone_train.py", 10124),
+    ("models_processing/model_tf/tf_transformer_pre.py", 10132),
+    ("models_processing/model_tf/tf_transformer_train.py", 10131),
 
     ("post_processing/post_processing.py", 10098),
     ("evaluation_processing/analysis.py", 10099),