Ver Fonte

03251246

David há 3 meses atrás
pai
commit
5a858ed0cb

+ 94 - 0
app/common/tf_cnn.py

@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_lstm.py
+# @Time      :2025/2/12 14:03
+# @Author    :David
+# @Company: shenyang JY
+
+from tensorflow.keras.layers import Input, Dense, LSTM, Conv1D, MaxPooling1D, Flatten
+from tensorflow.keras.models import Model
+from tensorflow.keras.callbacks import EarlyStopping
+from tensorflow.keras import optimizers, regularizers
+from app.model.losses import region_loss
+import numpy as np
+from app.common.dbmg import MongoUtils
+# from app.model.losses import rmse
+from threading import Lock
+import argparse
+model_lock = Lock()
+
+
+class CNNHandler(object):
+    def __init__(self, logger, args):
+        self.logger = logger
+        self.opt = argparse.Namespace(**args)
+        self.model = None
+        self.model_params = None
+        self.mongoUtils = MongoUtils(logger)
+
+    def get_model(self, args):
+        """
+        单例模式+线程锁,防止在异步加载时引发线程安全
+        """
+        try:
+            with model_lock:
+                loss = region_loss(self.opt)
+                self.model, self.model_params = self.mongoUtils.get_keras_model_from_mongo(args, {type(loss).__name__: loss})
+        except Exception as e:
+            self.logger.info("加载模型权重失败:{}".format(e.args))
+
+    @staticmethod
+    def get_keras_model(opt):
+        loss = region_loss(opt)
+        l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
+        l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
+        nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size']), name='nwp')
+
+        con1 = Conv1D(filters=64, kernel_size=1, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
+        d1 = Dense(32, activation='relu', name='d1', kernel_regularizer=l1_reg)(con1)
+        nwp = Dense(8, activation='relu', name='d2', kernel_regularizer=l1_reg)(d1)
+
+        output = Dense(1, name='d5')(nwp)
+        output_f = Flatten()(output)
+        model = Model(inputs=nwp_input, outputs=output_f)
+        adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
+        # reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=5, verbose=1)
+
+        model.compile(loss=loss, optimizer=adam)
+        return model
+
+
+    def train_init(self):
+        try:
+            # 进行加强训练,支持修模
+            loss = region_loss(self.opt)
+            base_train_model, self.model_params = self.mongoUtils.get_keras_model_from_mongo(vars(self.opt), {type(loss).__name__: loss})
+            base_train_model.summary()
+            self.logger.info("已加载加强训练基础模型")
+            return base_train_model
+        except Exception as e:
+            self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
+            return False
+
+    def training(self, model, train_and_valid_data):
+        model.summary()
+        train_x, train_y, valid_x, valid_y = train_and_valid_data
+        early_stop = EarlyStopping(monitor='val_loss', patience=self.opt.Model['patience'], mode='auto')
+        history = model.fit(train_x, train_y, batch_size=self.opt.Model['batch_size'], epochs=self.opt.Model['epoch'],
+                            verbose=2, validation_data=(valid_x, valid_y), callbacks=[early_stop], shuffle=False)
+        loss = np.round(history.history['loss'], decimals=5)
+        val_loss = np.round(history.history['val_loss'], decimals=5)
+        self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
+        self.logger.info("训练集损失函数为:{}".format(loss))
+        self.logger.info("验证集损失函数为:{}".format(val_loss))
+        return model
+
+    def predict(self, test_x, batch_size=1):
+        result = self.model.predict(test_x, batch_size=batch_size)
+        self.logger.info("执行预测方法")
+        return result
+
+
+
+if __name__ == "__main__":
+    run_code = 0

+ 82 - 19
app/common/tf_fmi.py

@@ -5,10 +5,11 @@
 # @Author    :David
 # @Company: shenyang JY
 
-from tensorflow.keras.layers import Input, Dense, LSTM, Conv1D, MaxPooling1D
-from tensorflow.keras.models import Model
-from tensorflow.keras.callbacks import EarlyStopping
+from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
+from tensorflow.keras.models import Model, load_model
+from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
 from tensorflow.keras import optimizers, regularizers
+from tensorflow.keras.layers import BatchNormalization, GlobalAveragePooling1D, Dropout, Add, Concatenate, Multiply
 from app.model.losses import region_loss
 import numpy as np
 from app.common.dbmg import MongoUtils
@@ -39,22 +40,84 @@ class FMIHandler(object):
 
     @staticmethod
     def get_keras_model(opt):
-        # db_loss = NorthEastLoss(opt)
-        # south_loss = SouthLoss(opt)
-        loss = region_loss(opt)
-        l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
-        l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
-        nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size']), name='nwp')
-
-        con1 = Conv1D(filters=64, kernel_size=5, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
-        con1_p = MaxPooling1D(pool_size=5, strides=1, padding='valid', data_format='channels_last')(con1)
-        nwp_lstm = LSTM(units=opt.Model['hidden_size'], return_sequences=False, kernel_regularizer=l2_reg)(con1_p)
-
-        output = Dense(opt.Model['output_size'], name='cdq_output')(nwp_lstm)
-
-        model = Model(nwp_input, output)
-        adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
-        model.compile(loss=loss, optimizer=adam)
+        """优化后的新能源功率预测模型
+        主要改进点:
+        1. 多尺度特征提取
+        2. 注意力机制
+        3. 残差连接
+        4. 弹性正则化
+        5. 自适应学习率调整
+        """
+        # 正则化配置
+        l1_l2_reg = regularizers.l1_l2(
+            l1=opt.Model['lambda_value_1'],
+            l2=opt.Model['lambda_value_2']
+        )
+
+        # 输入层
+        nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size']), name='nwp_input')
+
+        # %% 多尺度特征提取模块
+        def multi_scale_block(input_layer):
+            # 并行卷积路径
+            conv3 = Conv1D(64, 3, padding='causal', activation='relu')(input_layer)
+            conv5 = Conv1D(64, 5, padding='causal', activation='relu')(input_layer)
+            return Concatenate()([conv3, conv5])
+
+        # 特征主干
+        x = multi_scale_block(nwp_input)
+
+        # %% 残差注意力模块
+        def residual_attention_block(input_layer, filters):
+            # 主路径
+            y = Conv1D(filters, 3, padding='same', activation='relu')(input_layer)
+            y = BatchNormalization()(y)
+
+            # 注意力门控
+            attention = Dense(filters, activation='sigmoid')(y)
+            y = Multiply()([y, attention])
+
+            # 残差连接
+            shortcut = Conv1D(filters, 1, padding='same')(input_layer)
+            return Add()([y, shortcut])
+
+        x = residual_attention_block(x, 128)
+        x = Dropout(0.3)(x)
+
+        # %% 特征聚合
+        x = GlobalAveragePooling1D()(x)  # 替代Flatten保留时序特征
+
+        # %% 深度可调全连接层
+        x = Dense(256, activation='swish', kernel_regularizer=l1_l2_reg)(x)
+        x = BatchNormalization()(x)
+        x = Dropout(0.5)(x)
+
+        # %% 输出层(可扩展为概率预测)
+        output = Dense(16, activation='linear', name='main_output')(x)
+
+        # 概率预测扩展(可选)
+        # variance = Dense(1, activation='softplus')(x)  # 输出方差
+        # output = Concatenate()([output, variance])
+
+        # %% 模型编译
+        model = Model(inputs=nwp_input, outputs=output)
+
+        # 自适应优化器配置
+        adam = optimizers.Adam(
+            learning_rate=opt.Model['learning_rate'],
+            beta_1=0.92,  # 调整动量参数
+            beta_2=0.999,
+            epsilon=1e-07,
+            amsgrad=True
+        )
+
+        # 编译配置(假设region_loss已定义)
+        model.compile(
+            loss=region_loss(opt),  # 自定义损失函数
+            optimizer=adam,
+            metrics=['mae', 'mse']  # 监控指标
+        )
+
         return model
 
     def train_init(self):

+ 0 - 2
app/common/tf_lstm.py

@@ -39,8 +39,6 @@ class TSHandler(object):
 
     @staticmethod
     def get_keras_model(opt):
-        # db_loss = NorthEastLoss(opt)
-        # south_loss = SouthLoss(opt)
         loss = region_loss(opt)
         l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
         l2_reg = regularizers.l2(opt.Model['lambda_value_2'])

+ 8 - 3
app/model/main.py

@@ -11,7 +11,6 @@
 import argparse
 import pandas as pd
 from pathlib import Path
-from tf_lstm_train import model_training
 from app.common.logs import args, logger
 
 """"
@@ -43,14 +42,14 @@ def material(input_file, isDq=True):
         if args['switch_nwp_owner']:
             nwp_v, nwp_v_h = nwp_own, nwp_own_h
         # 如果是风电
-        if plant_type == 0:
+        if plant_type < 2:
             station_info = pd.read_csv(input_file / station_info_w, sep='\s+', header=0)
             station_info_d = pd.read_csv(input_file / station_info_d_w, sep='\s+', header=0)
             nwp = pd.read_csv(input_file / nwp_w, sep='\s+', header=0)
             nwp_h = pd.read_csv(input_file / nwp_w_h, sep='\s+', header=0)
             return station_info, station_info_d, nwp, nwp_h, power, nwp_v, nwp_v_h
         # 如果是光伏
-        elif plant_type == 1:
+        elif plant_type == 2:
             station_info = pd.read_csv(input_file / station_info_s, sep='\s+', header=0)
             station_info_d = pd.read_csv(input_file / station_info_d_s, sep='\s+', header=0)
             nwp = pd.read_csv(input_file / nwp_s, sep='\s+', header=0)
@@ -69,6 +68,12 @@ def input_file_handler(input_file: str):
         # 含有model,训练
         if 'model' in input_file.lower():
             train_data = pd.merge(nwp_v_h, power, on='Datetime')
+            if args['model_name'] == 'fmi':
+                from tf_fmi_train import model_training
+            elif args['model_name'] == 'cnn':
+                from tf_lstm_train import model_training
+            else:
+                from tf_lstm_train import model_training
             model_training(train_data, input_file, cap)
         # 含有predict,预测
         else:

+ 115 - 0
app/model/tf_cnn_train.py

@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_lstm_train.py
+# @Time      :2025/2/13 10:52
+# @Author    :David
+# @Company: shenyang JY
+import json, os
+import numpy as np
+import traceback
+import logging
+
+from app.common.logs import args
+from app.common.data_handler import DataHandler, write_number_to_file
+import time
+from app.common.tf_cnn import CNNHandler
+from app.common.dbmg import MongoUtils
+from app.common.logs import logger
+np.random.seed(42)  # NumPy随机种子
+# tf.set_random_seed(42)  # TensorFlow随机种子
+
+dh = DataHandler(logger, args)
+cnn = CNNHandler(logger, args)
+mgUtils = MongoUtils(logger)
+
+def model_training(train_data, input_file, cap):
+    # 获取程序开始时间
+    start_time = time.time()
+    result = {}
+    success = 0
+    logger.info("Program starts execution!")
+    farm_id = input_file.split('/')[-2]
+    output_file = input_file.replace('IN', 'OUT')
+    status_file = 'STATUS.TXT'
+    try:
+        # ------------ 获取数据,预处理训练数据 ------------
+        dh.opt.cap = cap
+        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
+        cnn.opt.cap = round(scaled_cap, 2)
+        cnn.opt.Model['input_size'] = train_x.shape[2]
+        # ------------ 训练模型,保存模型 ------------
+        # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
+        # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
+        model = cnn.train_init() if cnn.opt.Model['add_train'] else cnn.get_keras_model(cnn.opt)
+        if cnn.opt.Model['add_train']:
+            if model:
+                feas = json.loads(cnn.model_params).get('features', dh.opt.features)
+                if set(feas).issubset(set(dh.opt.features)):
+                    dh.opt.features = list(feas)
+                    train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
+                else:
+                    model = cnn.get_keras_model(cnn.opt)
+                    logger.info("训练数据特征,不满足,加强训练模型特征")
+            else:
+                model = cnn.get_keras_model(cnn.opt)
+
+        ts_model = cnn.training(model, [train_x, valid_x, train_y, valid_y])
+        success = 1
+        # 更新算法状态:1. 启动成功
+        write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
+        # ------------ 组装模型数据 ------------
+        args['Model']['features'] = ','.join(dh.opt.features)
+        args.update({
+            'params': json.dumps(args),
+            'descr': f'南网竞赛-{farm_id}',
+            'gen_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
+            'model_table': args['model_table'] + farm_id,
+            'scaler_table': args['scaler_table'] + farm_id
+        })
+        mgUtils.insert_trained_model_into_mongo(ts_model, args)
+        mgUtils.insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args)
+        # 更新算法状态:正常结束
+        write_number_to_file(os.path.join(output_file, status_file), 2, 2)
+    except Exception as e:
+        # 如果算法状态没启动,不更新
+        if success:
+            write_number_to_file(os.path.join(output_file, status_file), 2, 3)
+        my_exception = traceback.format_exc()
+        my_exception.replace("\n", "\t")
+        result['msg'] = my_exception
+    end_time = time.time()
+
+    result['success'] = success
+    result['args'] = args
+    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
+    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
+    print("Program execution ends!")
+    return result
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("model_training_bp log")
+    from waitress import serve
+
+    serve(app, host="0.0.0.0", port=10103, threads=4)
+    print("server start!")
+    # args_dict = {"mongodb_database": 'realtimeDq', 'scaler_table': 'j00600_scaler', 'model_name': 'lstm1',
+    # 'model_table': 'j00600_model', 'mongodb_read_table': 'j00600', 'col_time': 'dateTime',
+    # 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    # args_dict['features'] = args_dict['features'].split(',')
+    # args.update(args_dict)
+    # dh = DataHandler(logger, args)
+    # ts = TSHandler(logger, args)
+    # opt = argparse.Namespace(**args)
+    # opt.Model['input_size'] = len(opt.features)
+    # train_data = get_data_from_mongo(args_dict)
+    # train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data)
+    # ts_model = ts.training([train_x, train_y, valid_x, valid_y])
+    #
+    # args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+    # args_dict['params'] = args
+    # args_dict['descr'] = '测试'
+    # insert_trained_model_into_mongo(ts_model, args_dict)
+    # insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)

+ 9 - 9
app/model/tf_fmi_train.py

@@ -19,7 +19,7 @@ np.random.seed(42)  # NumPy随机种子
 # tf.set_random_seed(42)  # TensorFlow随机种子
 
 dh = DataHandler(logger, args)
-ts = FMIHandler(logger, args)
+fmi = FMIHandler(logger, args)
 mgUtils = MongoUtils(logger)
 
 def model_training(train_data, input_file, cap):
@@ -35,25 +35,25 @@ def model_training(train_data, input_file, cap):
         # ------------ 获取数据,预处理训练数据 ------------
         dh.opt.cap = cap
         train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
-        ts.opt.cap = round(scaled_cap, 2)
-        ts.opt.Model['input_size'] = train_x.shape[2]
+        fmi.opt.cap = round(scaled_cap, 2)
+        fmi.opt.Model['input_size'] = train_x.shape[2]
         # ------------ 训练模型,保存模型 ------------
         # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
         # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
-        model = ts.train_init() if ts.opt.Model['add_train'] else ts.get_keras_model(ts.opt)
-        if ts.opt.Model['add_train']:
+        model = fmi.train_init() if fmi.opt.Model['add_train'] else fmi.get_keras_model(fmi.opt)
+        if fmi.opt.Model['add_train']:
             if model:
-                feas = json.loads(ts.model_params).get('features', dh.opt.features)
+                feas = json.loads(fmi.model_params).get('features', dh.opt.features)
                 if set(feas).issubset(set(dh.opt.features)):
                     dh.opt.features = list(feas)
                     train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
                 else:
-                    model = ts.get_keras_model(ts.opt)
+                    model = fmi.get_keras_model(fmi.opt)
                     logger.info("训练数据特征,不满足,加强训练模型特征")
             else:
-                model = ts.get_keras_model(ts.opt)
+                model = fmi.get_keras_model(fmi.opt)
 
-        ts_model = ts.training(model, [train_x, valid_x, train_y, valid_y])
+        ts_model = fmi.training(model, [train_x, valid_x, train_y, valid_y])
         success = 1
         # 更新算法状态:1. 启动成功
         write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')

+ 119 - 0
app/predict/tf_cnn_pre.py

@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_lstm_pre.py
+# @Time      :2025/2/13 10:52
+# @Author    :David
+# @Company: shenyang JY
+import os.path
+
+import numpy as np
+import logging, argparse, traceback
+from app.common.data_handler import DataHandler, write_number_to_file
+from threading import Lock
+import time, json
+
+model_lock = Lock()
+from itertools import chain
+from app.common.logs import logger, args
+from app.common.tf_cnn import CNNHandler
+from app.common.dbmg import MongoUtils
+
+np.random.seed(42)  # NumPy随机种子
+
+
+dh = DataHandler(logger, args)
+cnn = CNNHandler(logger, args)
+mgUtils = MongoUtils(logger)
+
+
+def model_prediction(pre_data, input_file, cap):
+    # 获取程序开始时间
+    start_time = time.time()
+    result = {}
+    success = 0
+    print("Program starts execution!")
+    farm_id = input_file.split('/')[-2]
+    output_file = input_file.replace('IN', 'OUT')
+    file = 'DQYC_OUT_PREDICT_POWER.txt'
+    status_file = 'STATUS.TXT'
+    try:
+        args['model_table'] += farm_id
+        args['scaler_table'] += farm_id
+        feature_scaler, target_scaler = mgUtils.get_scaler_model_from_mongo(args)
+        cnn.opt.cap = round(target_scaler.transform(np.array([[float(cap)]]))[0, 0], 2)
+        cnn.get_model(args)
+        dh.opt.features = json.loads(cnn.model_params).get('Model').get('features', ','.join(cnn.opt.features)).split(',')
+        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler)
+
+        success = 1
+        # 更新算法状态:1. 启动成功
+        write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
+        logger.info("算法启动成功")
+        res = list(chain.from_iterable(target_scaler.inverse_transform([cnn.predict(scaled_pre_x).flatten()])))
+        pre_data['Power'] = res[:len(pre_data)]
+        pre_data['PlantID'] = farm_id
+        pre_data = pre_data[['PlantID', args['col_time'], 'Power']]
+
+        pre_data.loc[:, 'Power'] = pre_data['Power'].round(2)
+        pre_data.loc[pre_data['Power'] > args['cap'], 'Power'] = args['cap']
+        pre_data.loc[pre_data['Power'] < 0, 'Power'] = 0
+        pre_data.to_csv(os.path.join(output_file, file), sep=' ', index=False)
+        # 更新算法状态:正常结束
+        write_number_to_file(os.path.join(output_file, status_file), 2, 2)
+        logger.info("算法正常结束")
+    except Exception as e:
+        # 如果算法状态没启动,不更新
+        if success:
+            write_number_to_file(os.path.join(output_file, status_file), 2, 3)
+        my_exception = traceback.format_exc()
+        my_exception.replace("\n", "\t")
+        result['msg'] = my_exception
+        logger.info("算法状态异常:{}".format(my_exception))
+    end_time = time.time()
+
+    result['success'] = success
+    result['args'] = args
+    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
+    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
+    print("Program execution ends!")
+    return result
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("model_training_bp log")
+
+    # serve(app, host="0.0.0.0", port=1010x, threads=4)
+    print("server start!")
+
+    # ------------------------测试代码------------------------
+    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+                 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
+                 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    args_dict['features'] = args_dict['features'].split(',')
+    arguments.update(args_dict)
+    dh = DataHandler(logger, arguments)
+    ts = TSHandler(logger)
+    opt = argparse.Namespace(**arguments)
+
+    opt.Model['input_size'] = len(opt.features)
+    pre_data = get_data_from_mongo(args_dict)
+    feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
+    pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt)
+    ts.get_model(arguments)
+    result = ts.predict(pre_x)
+    result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
+    pre_data['power_forecast'] = result1[:len(pre_data)]
+    pre_data['farm_id'] = 'J00083'
+    pre_data['cdq'] = 1
+    pre_data['dq'] = 1
+    pre_data['zq'] = 1
+    pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
+    pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
+
+    pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
+    pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
+    pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
+
+    insert_data_into_mongo(pre_data, arguments)

+ 5 - 5
app/predict/tf_fmi_pre.py

@@ -22,7 +22,7 @@ np.random.seed(42)  # NumPy随机种子
 
 
 dh = DataHandler(logger, args)
-ts = FMIHandler(logger, args)
+fmi = FMIHandler(logger, args)
 mgUtils = MongoUtils(logger)
 
 
@@ -40,16 +40,16 @@ def model_prediction(pre_data, input_file, cap):
         args['model_table'] += farm_id
         args['scaler_table'] += farm_id
         feature_scaler, target_scaler = mgUtils.get_scaler_model_from_mongo(args)
-        ts.opt.cap = round(target_scaler.transform(np.array([[float(cap)]]))[0, 0], 2)
-        ts.get_model(args)
-        dh.opt.features = json.loads(ts.model_params).get('Model').get('features', ','.join(ts.opt.features)).split(',')
+        fmi.opt.cap = round(target_scaler.transform(np.array([[float(cap)]]))[0, 0], 2)
+        fmi.get_model(args)
+        dh.opt.features = json.loads(fmi.model_params).get('Model').get('features', ','.join(fmi.opt.features)).split(',')
         scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler)
 
         success = 1
         # 更新算法状态:1. 启动成功
         write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
         logger.info("算法启动成功")
-        res = list(chain.from_iterable(target_scaler.inverse_transform([ts.predict(scaled_pre_x).flatten()])))
+        res = list(chain.from_iterable(target_scaler.inverse_transform([fmi.predict(scaled_pre_x).flatten()])))
         pre_data['Power'] = res[:len(pre_data)]
         pre_data['PlantID'] = farm_id
         pre_data = pre_data[['PlantID', args['col_time'], 'Power']]