Ver Fonte

Merge branch 'dev_david' of anweiguo/algorithm_platform into dev_awg

liudawei há 4 meses atrás
pai
commit
7608607e8b

+ 1 - 1
common/data_cleaning.py

@@ -74,7 +74,7 @@ def rm_duplicated(data, logger):
     """
     # 按照时间去重
     rows_pre = len(data)
-    data = data.drop_duplicates(subset='C_TIME')
+    data = data.drop_duplicates(subset='dateTime')
     rows_late = len(data)
     if rows_pre - rows_late > 0:
         logger.info("时间去重的行数有:{}".format(rows_pre - rows_late))

+ 9 - 7
common/database_dml.py

@@ -175,7 +175,7 @@ def insert_h5_model_into_mongo(model,feature_scaler_bytes,target_scaler_bytes ,a
     })
     print("模型成功保存到 MongoDB!")
 
-def insert_trained_model_into_mongo(model ,args):
+def insert_trained_model_into_mongo(model, args):
     mongodb_connection,mongodb_database,model_table,model_name = ("mongodb://root:sdhjfREWFWEF23e@192.168.1.43:30000/",
                                 args['mongodb_database'],args['model_table'],args['model_name'])
 
@@ -204,7 +204,7 @@ def insert_trained_model_into_mongo(model ,args):
     })
     print("模型成功保存到 MongoDB!")
 
-def insert_scaler_model_into_mongo(feature_scaler_bytes, args):
+def insert_scaler_model_into_mongo(feature_scaler_bytes, scaled_target_bytes, args):
     mongodb_connection,mongodb_database,scaler_table,model_table,model_name = ("mongodb://root:sdhjfREWFWEF23e@192.168.1.43:30000/",
                                 args['mongodb_database'],args['scaler_table'],args['model_table'],args['model_name'])
     client = MongoClient(mongodb_connection)
@@ -216,11 +216,12 @@ def insert_scaler_model_into_mongo(feature_scaler_bytes, args):
     # Save the scalers in MongoDB as binary data
     collection.insert_one({
         "feature_scaler": feature_scaler_bytes.read(),
+        "target_scaler": scaled_target_bytes.read()
     })
     print("scaler_model inserted successfully!")
 
 
-def get_h5_model_from_mongo(args):
+def get_h5_model_from_mongo(args, custom=None):
     mongodb_connection,mongodb_database,model_table,model_name = "mongodb://root:sdhjfREWFWEF23e@192.168.1.43:30000/",args['mongodb_database'],args['model_table'],args['model_name']
     client = MongoClient(mongodb_connection)
     # 选择数据库(如果数据库不存在,MongoDB 会自动创建)
@@ -236,7 +237,7 @@ def get_h5_model_from_mongo(args):
         # 从缓冲区加载模型
          # 使用 h5py 和 BytesIO 从内存中加载模型
         with h5py.File(model_buffer, 'r') as f:
-            model = tf.keras.models.load_model(f)
+            model = tf.keras.models.load_model(f, custom_objects=custom)
         print(f"{model_name}模型成功从 MongoDB 加载!")
         client.close()
         return model
@@ -246,9 +247,8 @@ def get_h5_model_from_mongo(args):
         return None
 
 
-def get_scaler_model_from_mongo(args):
-    mongodb_connection, mongodb_database, scaler_table, = ("mongodb://root:sdhjfREWFWEF23e@192.168.1.43:30000/",
-                                                           args['mongodb_database'], args['scaler_table'])
+def get_scaler_model_from_mongo(args, only_feature_scaler=False):
+    mongodb_connection, mongodb_database, scaler_table, = ("mongodb://root:sdhjfREWFWEF23e@192.168.1.43:30000/", args['mongodb_database'], args['scaler_table'])
     client = MongoClient(mongodb_connection)
     # 选择数据库(如果数据库不存在,MongoDB 会自动创建)
     db = client[mongodb_database]
@@ -259,6 +259,8 @@ def get_scaler_model_from_mongo(args):
 
     feature_scaler_bytes = BytesIO(scaler_doc["feature_scaler"])
     feature_scaler = joblib.load(feature_scaler_bytes)
+    if only_feature_scaler:
+        return feature_scaler
     target_scaler_bytes = BytesIO(scaler_doc["target_scaler"])
     target_scaler = joblib.load(target_scaler_bytes)
     return feature_scaler,target_scaler

+ 142 - 28
data_processing/data_operation/data_handler.py

@@ -4,8 +4,12 @@
 # @Time      :2025/1/8 14:56
 # @Author    :David
 # @Company: shenyang JY
-import argparse
+import argparse, numbers, joblib
 import pandas as pd
+from io import BytesIO
+from bson.decimal128 import Decimal128
+from sklearn.preprocessing import MinMaxScaler
+from common.processing_data_common import missing_features, str_to_list
 from common.data_cleaning import *
 
 class DataHandler(object):
@@ -13,58 +17,92 @@ class DataHandler(object):
         self.logger = logger
         self.opt = argparse.Namespace(**args)
 
-    def get_train_data(self, df):
+    def get_train_data(self, dfs, col_time, features, target):
         train_x, valid_x, train_y, valid_y = [], [], [], []
-        if len(df) < self.opt.Model["time_step"]:
-            self.logger.info("特征处理-训练数据-不满足time_step")
-        datax, datay = self.get_timestep_features(df, is_train=True)
-        if len(datax) < 10:
-            self.logger.info("特征处理-训练数据-无法进行最小分割")
-        tx, vx, ty, vy = self.train_valid_split(datax, datay, valid_rate=self.opt.Model["valid_data_rate"], shuffle=self.opt.Model['shuffle_train_data'])
-        train_x.extend(tx)
-        valid_x.extend(vx)
-        train_y.extend(ty)
-        valid_y.extend(vy)
+        for i, df in enumerate(dfs, start=1):
+            if len(df) < self.opt.Model["time_step"]:
+                self.logger.info("特征处理-训练数据-不满足time_step")
+            datax, datay = self.get_timestep_features(df, col_time, features, target, is_train=True)
+            if len(datax) < 10:
+                self.logger.info("特征处理-训练数据-无法进行最小分割")
+                continue
+            tx, vx, ty, vy = self.train_valid_split(datax, datay, valid_rate=self.opt.Model["valid_data_rate"], shuffle=self.opt.Model['shuffle_train_data'])
+            train_x.extend(tx)
+            valid_x.extend(vx)
+            train_y.extend(ty)
+            valid_y.extend(vy)
 
         train_y = np.concatenate([[y.iloc[:, 1].values for y in train_y]], axis=0)
         valid_y = np.concatenate([[y.iloc[:, 1].values for y in valid_y]], axis=0)
 
-        train_x = [np.array([x[0].values for x in train_x]), np.array([x[1].values for x in train_x])]
-        valid_x = [np.array([x[0].values for x in valid_x]), np.array([x[1].values for x in valid_x])]
+        train_x = np.array([x.values for x in train_x])
+        valid_x = np.array([x.values for x in valid_x])
 
         return train_x, valid_x, train_y, valid_y
 
-    def get_timestep_features(self, norm_data, is_train):   # 这段代码基于pandas方法的优化
+    def get_predict_data(self, dfs, features):
+        test_x = []
+        for i, df in enumerate(dfs, start=1):
+            if len(df) < self.opt.Model["time_step"]:
+                self.logger.info("特征处理-预测数据-不满足time_step")
+                continue
+            datax = self.get_predict_features(df, features)
+            test_x.extend(datax)
+        test_x = np.array(test_x)
+        return test_x
+
+    def get_predict_features(self, norm_data, features):
+        """
+        均分数据,获取预测数据集
+        """
+        time_step = self.opt.Model["time_step"]
+        feature_data = norm_data.reset_index(drop=True)
+        time_step_loc = time_step - 1
+        iters = int(len(feature_data)) // self.opt.Model['time_step']
+        end = int(len(feature_data)) % self.opt.Model['time_step']
+        features_x = np.array([feature_data.loc[i*time_step:i*time_step + time_step_loc, features].reset_index(drop=True) for i in range(iters)])
+        if end > 0:
+            df = feature_data.tail(end)
+            df_repeated = pd.concat([df] + [pd.DataFrame([df.iloc[0]]* (time_step-end))]).reset_index(drop=True)
+            features_x = np.concatenate((features_x, np.expand_dims(df_repeated, 0)), axis=0)
+        return features_x
+
+    def get_timestep_features(self, norm_data, col_time, features, target, is_train):
+        """
+        步长分割数据,获取时序训练集
+        """
         time_step = self.opt.Model["time_step"]
         feature_data = norm_data.reset_index(drop=True)
         time_step_loc = time_step - 1
         train_num = int(len(feature_data))
-        label_features = ['C_TIME', 'C_REAL_VALUE'] if is_train is True else ['C_TIME', 'C_REAL_VALUE']
-        nwp_cs = self.opt.features
+        label_features = [col_time, target] if is_train is True else [col_time, target]
+        nwp_cs = features
         nwp = [feature_data.loc[i:i + time_step_loc, nwp_cs].reset_index(drop=True) for i in range(train_num - time_step + 1)]  # 数据库字段 'C_T': 'C_WS170'
         labels = [feature_data.loc[i:i + time_step_loc, label_features].reset_index(drop=True) for i in range(train_num - time_step + 1)]
         features_x, features_y = [], []
-        self.logger.info("匹配环境前,{}组 -> ".format(len(nwp)))
         for i, row in enumerate(zip(nwp, labels)):
             features_x.append(row[0])
             features_y.append(row[1])
-        self.logger.info("匹配环境后,{}组".format(len(features_x)))
         return features_x, features_y
 
-    def fill_train_data(self, unite):
-        unite['C_TIME'] = pd.to_datetime(unite['C_TIME'])
-        unite['time_diff'] = unite['C_TIME'].diff()
+    def fill_train_data(self, unite, col_time):
+        """
+        补值
+        """
+        unite[col_time] = pd.to_datetime(unite[col_time])
+        unite['time_diff'] = unite[col_time].diff()
         dt_short = pd.Timedelta(minutes=15)
         dt_long = pd.Timedelta(minutes=15 * self.opt.Model['how_long_fill'])
-        data_train = self.missing_time_splite(unite, dt_short, dt_long)
+        data_train = self.missing_time_splite(unite, dt_short, dt_long, col_time)
         miss_points = unite[(unite['time_diff'] > dt_short) & (unite['time_diff'] < dt_long)]
         miss_number = miss_points['time_diff'].dt.total_seconds().sum(axis=0) / (15 * 60) - len(miss_points)
         self.logger.info("再次测算,需要插值的总点数为:{}".format(miss_number))
         if miss_number > 0 and self.opt.Model["train_data_fill"]:
-            data_train = self.data_fill(data_train)
+            data_train = self.data_fill(data_train, col_time)
         return data_train
 
-    def missing_time_splite(self, df, dt_short, dt_long):
+    def missing_time_splite(self, df, dt_short, dt_long, col_time):
+        df.reset_index(drop=True, inplace=True)
         n_long, n_short, n_points = 0, 0, 0
         start_index = 0
         dfs = []
@@ -75,7 +113,7 @@ class DataHandler(object):
                 start_index = i
                 n_long += 1
             if df['time_diff'][i] > dt_short:
-                self.logger.info(f"{df['C_TIME'][i-1]} ~ {df['C_TIME'][i]}")
+                self.logger.info(f"{df[col_time][i-1]} ~ {df[col_time][i]}")
                 points = df['time_diff'].dt.total_seconds()[i]/(60*15)-1
                 self.logger.info("缺失点数:{}".format(points))
                 if df['time_diff'][i] < dt_long:
@@ -87,11 +125,11 @@ class DataHandler(object):
         self.logger.info("需要补值的总点数:{}".format(n_points))
         return dfs
 
-    def data_fill(self, dfs, test=False):
+    def data_fill(self, dfs, col_time, test=False):
         dfs_fill, inserts = [], 0
         for i, df in enumerate(dfs):
             df = rm_duplicated(df, self.logger)
-            df1 = df.set_index('C_TIME', inplace=False)
+            df1 = df.set_index(col_time, inplace=False)
             dff = df1.resample('15T').interpolate(method='linear')  # 采用线性补值,其他补值方法需要进一步对比
             dff.reset_index(inplace=True)
             points = len(dff) - len(df1)
@@ -117,3 +155,79 @@ class DataHandler(object):
                 vx.append(data[0])
                 vy.append(data[1])
         return tx, vx, ty, vy
+
+    def train_data_handler(self, data, opt, bp_data=False):
+        """
+        训练数据预处理:
+        清洗+补值+归一化
+        Args:
+            data: 从mongo中加载的数据
+            opt:参数命名空间
+        return:
+            x_train
+            x_valid
+            y_train
+            y_valid
+        """
+        col_time, features, target = opt.col_time, opt.features, opt.target
+        # 清洗处理好的限电记录
+        if 'is_limit' in data.columns:
+            data = data[data['is_limit'] == False]
+        # 筛选特征,数值化
+        train_data = data[[col_time] + features + [target]]
+        # 清洗特征平均缺失率大于20%的天
+        # train_data = missing_features(train_data, features, col_time)
+        train_data = train_data.sort_values(by=col_time)
+        # train_data = train_data.sort_values(by=col_time).fillna(method='ffill').fillna(method='bfill')
+        # 对清洗完限电的数据进行特征预处理:1.空值异常值清洗 2.缺值补值
+        train_data_cleaned = key_field_row_cleaning(train_data, features + [target], self.logger)
+        train_data_cleaned = train_data_cleaned.applymap(
+            lambda x: float(x.to_decimal()) if isinstance(x, Decimal128) else float(x) if isinstance(x, numbers.Number) else x)
+        # 创建特征和目标的标准化器
+        train_scaler = MinMaxScaler(feature_range=(0, 1))
+        target_scaler = MinMaxScaler(feature_range=(0, 1))
+        # 标准化特征和目标
+        scaled_train_data = train_scaler.fit_transform(train_data_cleaned[features])
+        scaled_target = target_scaler.fit_transform(train_data_cleaned[[target]])
+        train_data_cleaned[features] = scaled_train_data
+        train_data_cleaned[[target]] = scaled_target
+
+        train_datas = self.fill_train_data(train_data_cleaned, col_time)
+        # 保存两个scaler
+        scaled_train_bytes = BytesIO()
+        scaled_target_bytes = BytesIO()
+
+        joblib.dump(train_scaler, scaled_train_bytes)
+        joblib.dump(target_scaler, scaled_target_bytes)
+        scaled_train_bytes.seek(0)  # Reset pointer to the beginning of the byte stream
+        scaled_target_bytes.seek(0)
+
+        if bp_data:
+            train_data = pd.concat(train_datas, axis=0)
+            train_x, valid_x, train_y, valid_y = self.train_valid_split(train_data, scaled_target, valid_rate=self.opt.Model["valid_data_rate"], shuffle=self.opt.Model['shuffle_train_data'])
+        else:
+            train_x, valid_x, train_y, valid_y = self.get_train_data(train_datas, col_time, features, target)
+        return train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes
+
+    def pre_data_handler(self, data, feature_scaler, opt, bp_data=False):
+        """
+        预测数据简单处理
+        Args:
+            data: 从mongo中加载的数据
+            opt:参数命名空间
+        return:
+            scaled_features: 反归一化的特征
+        """
+        if 'is_limit' in data.columns:
+            data = data[data['is_limit'] == False]
+        # features, time_steps, col_time, model_name, col_reserve = str_to_list(args['features']), int(
+        #     args['time_steps']), args['col_time'], args['model_name'], str_to_list(args['col_reserve'])
+        col_time, features = opt.col_time, opt.features
+        pre_data = data.sort_values(by=col_time)[features]
+        scaled_features = feature_scaler.transform(pre_data[features])
+        pre_data[features] = scaled_features
+        if bp_data:
+            pre_x = self.get_predict_data([pre_data], features)
+        else:
+            pre_x = pre_data.values
+        return pre_x

+ 2 - 1
evaluation_processing/analysis_cdq.py

@@ -93,6 +93,7 @@ def put_analysis_report_to_html(args, df_predict, df_accuracy):
     label_pre = args['label_pre']
     farmId = args['farmId']
     points = args['points'].split(',')
+    cdq_title = '超短期分析报告' + args.get('title', '')
     acc_flag = df_accuracy.shape[0]
     # 获取所有的模型
     models = df_predict['model'].unique()
@@ -231,7 +232,7 @@ def put_analysis_report_to_html(args, df_predict, df_accuracy):
     </head>
     <body>
         <div class="container">
-            <h1>分析报告</h1>
+            <h1>{ cdq_title }</h1>
             <!-- 曲线对比 -->
             <h2>1. 预测功率与实际功率曲线对比</h2>
    

+ 3 - 0
models_processing/losses/loss_cdq.py

@@ -9,6 +9,9 @@ import tensorflow as tf
 tf.compat.v1.set_random_seed(1234)
 
 
+def rmse(y_true, y_pred):
+    return K.sqrt(K.mean(K.square(y_pred - y_true)))
+
 class SouthLoss(tf.keras.losses.Loss):
     def __init__(self, opt, name='south_loss'):
         """

+ 3 - 4
models_processing/model_koi/bp.yaml

@@ -7,8 +7,7 @@ Model:
   hidden_size: 64
   his_points: 16
   how_long_fill: 10
-  input_size_env: 5
-  input_size_nwp: 24
+  input_size: 24
   lambda_value_1: 0.02
   lambda_value_2: 0.01
   learning_rate: 0.001
@@ -86,8 +85,8 @@ features:
 - direction60
 - direction140
 - speed40
-- hcc', 'clearskyGhi', 'temperature130', 'lcc', 'updater', 'speed90', 'temperature2', 'tcc', 'direction100', 'speed170', 'temperature70', 'speed130', 'direction190', 'openCapacity', 'temperature40', 'creator', 'direction10', 'temperature180', 'direction150', 'direction50', 'speed50', 'updateTime', 'direction90', 'farmId', 'temperature100', 'speed10', 'temperature140', 'speed120', 'deleted', 'speed200', 'realPower', 'createTime', 'radiation', 'surfacePressure', 'tpr', 'direction110', 'time', 'speed160', 'temperature80']
-target: C_REAL_VALUE
+- hcc
+target: realPower
 repair_days: 81
 repair_model_cycle: 5
 spot_trading: []

+ 107 - 0
models_processing/model_koi/cnn.yaml

@@ -0,0 +1,107 @@
+Model:
+  add_train: false
+  batch_size: 64
+  dropout_rate: 0.2
+  epoch: 100
+  fusion: true
+  hidden_size: 64
+  his_points: 16
+  how_long_fill: 10
+  input_size: 24
+  lambda_value_1: 0.02
+  lambda_value_2: 0.01
+  learning_rate: 0.001
+  lstm_layers: 1
+  output_size: 16
+  patience: 10
+  predict_data_fill: true
+  region: south129
+  shuffle_train_data: false
+  test_data_fill: false
+  time_step: 16
+  train_data_fill: false
+  use_cuda: false
+  valid_data_rate: 0.15
+authentication:
+  date: '2025-01-08'
+  full_cap: '2024-04-30'
+  repair: '2025-01-08'
+calculate: []
+cap: 50.0
+dataloc: ./data
+env_columns:
+- C_TIME
+- C_CELLT
+- C_DIFFUSER
+- C_GLOBALR
+- C_RH
+- C_REAL_VALUE
+full_field: true
+history_hours: 1
+new_field: true
+features:
+- time
+- temperature10
+- temperature190
+- direction160
+- direction40
+- temperature110
+- direction80
+- speed60
+- mcc
+- temperature150
+- speed20
+- speed110
+- direction120
+- speed190
+- solarZenith
+- temperature90
+- direction200
+- speed150
+- temperature50
+- direction30
+- temperature160
+- direction170
+- temperature20
+- direction70
+- direction130
+- temperature200
+- speed70
+- temperature120
+- speed30
+- speed100
+- speed80
+- speed180
+- dniCalcd
+- speed140
+- temperature60
+- dateTime
+- temperature30
+- temperature170
+- direction20
+- humidity2
+- direction180
+- realPowerAvg
+- direction60
+- direction140
+- speed40
+- hcc
+target: realPower
+repair_days: 81
+repair_model_cycle: 5
+spot_trading: []
+update_add_train_days: 60
+update_coe_days: 3
+usable_power:
+  api_able_power: true
+  bias: 2.524
+  clean_power_which: 1
+  coe: 4
+  down_fractile: 30
+  env: C_GLOBALR
+  k: 0.04079
+  outliers_threshold: 1.5
+  up_fractile: 70
+version: solar-3.1.0.south
+weatherloc:
+- 1

+ 107 - 0
models_processing/model_koi/lstm.yaml

@@ -0,0 +1,107 @@
+Model:
+  add_train: false
+  batch_size: 64
+  dropout_rate: 0.2
+  epoch: 100
+  fusion: true
+  hidden_size: 64
+  his_points: 16
+  how_long_fill: 10
+  input_size: 24
+  lambda_value_1: 0.02
+  lambda_value_2: 0.01
+  learning_rate: 0.001
+  lstm_layers: 1
+  output_size: 16
+  patience: 10
+  predict_data_fill: true
+  region: south129
+  shuffle_train_data: false
+  test_data_fill: false
+  time_step: 16
+  train_data_fill: false
+  use_cuda: false
+  valid_data_rate: 0.15
+authentication:
+  date: '2025-01-08'
+  full_cap: '2024-04-30'
+  repair: '2025-01-08'
+calculate: []
+cap: 50.0
+dataloc: ./data
+env_columns:
+- C_TIME
+- C_CELLT
+- C_DIFFUSER
+- C_GLOBALR
+- C_RH
+- C_REAL_VALUE
+full_field: true
+history_hours: 1
+new_field: true
+features:
+- time
+- temperature10
+- temperature190
+- direction160
+- direction40
+- temperature110
+- direction80
+- speed60
+- mcc
+- temperature150
+- speed20
+- speed110
+- direction120
+- speed190
+- solarZenith
+- temperature90
+- direction200
+- speed150
+- temperature50
+- direction30
+- temperature160
+- direction170
+- temperature20
+- direction70
+- direction130
+- temperature200
+- speed70
+- temperature120
+- speed30
+- speed100
+- speed80
+- speed180
+- dniCalcd
+- speed140
+- temperature60
+- dateTime
+- temperature30
+- temperature170
+- direction20
+- humidity2
+- direction180
+- realPowerAvg
+- direction60
+- direction140
+- speed40
+- hcc
+target: realPower
+repair_days: 81
+repair_model_cycle: 5
+spot_trading: []
+update_add_train_days: 60
+update_coe_days: 3
+usable_power:
+  api_able_power: true
+  bias: 2.524
+  clean_power_which: 1
+  coe: 4
+  down_fractile: 30
+  env: C_GLOBALR
+  k: 0.04079
+  outliers_threshold: 1.5
+  up_fractile: 70
+version: solar-3.1.0.south
+weatherloc:
+- 1

+ 0 - 237
models_processing/model_koi/nn_bp.py

@@ -1,237 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# time: 2024/5/6 13:25
-# file: time_series.py
-# author: David
-# company: shenyang JY
-import json, copy
-import numpy as np
-from flask import Flask, request
-import time
-import traceback
-import logging, argparse
-from sklearn.preprocessing import MinMaxScaler
-from io import BytesIO
-import joblib
-from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
-from tensorflow.keras.models import Model, load_model
-from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
-from tensorflow.keras import optimizers, regularizers
-import tensorflow.keras.backend as K
-import tensorflow as tf
-from common.data_cleaning import cleaning
-from common.database_dml import *
-from common.processing_data_common import missing_features, str_to_list
-from data_processing.data_operation.data_handler import DataHandler
-from threading import Lock
-import time, yaml
-import random
-import matplotlib.pyplot as plt
-model_lock = Lock()
-from common.logs import Log
-logger = logging.getLogger()
-# logger = Log('models-processing').logger
-np.random.seed(42)  # NumPy随机种子
-tf.random.set_random_seed(42)  # TensorFlow随机种子
-app = Flask('nn_bp——service')
-
-with app.app_context():
-    with open('../model_koi/bp.yaml', 'r', encoding='utf-8') as f:
-        arguments = yaml.safe_load(f)
-
-dh = DataHandler(logger, arguments)
-def train_data_handler(data, opt):
-    col_time, features, target = opt.col_time, opt.features, opt.target
-    if 'is_limit' in data.columns:
-        data = data[data['is_limit'] == False]
-    # 清洗特征平均缺失率大于20%的天
-    data = missing_features(data, features, col_time)
-    train_data = data.sort_values(by=col_time).fillna(method='ffill').fillna(method='bfill')
-
-    train_data = train_data.sort_values(by=col_time)
-    # 对清洗完限电的数据进行特征预处理:1.空值异常值清洗 2.缺值补值
-    train_data_cleaned = cleaning(train_data, 'nn_bp:features', logger, features)
-    train_data = dh.fill_train_data(train_data_cleaned)
-    # 创建特征和目标的标准化器
-    train_scaler = MinMaxScaler(feature_range=(0, 1))
-    # 标准化特征和目标
-    scaled_train_data = train_scaler.fit_transform(train_data[features+[target]])
-    # 保存两个scaler
-    scaled_train_bytes = BytesIO()
-    joblib.dump(scaled_train_data, scaled_train_bytes)
-    scaled_train_bytes.seek(0)  # Reset pointer to the beginning of the byte stream
-    x_train, x_valid, y_train, y_valid = dh.get_train_data(scaled_train_data)
-    return x_train, x_valid, y_train, y_valid, scaled_train_bytes
-
-def pre_data_handler(data, args):
-    if 'is_limit' in data.columns:
-        data = data[data['is_limit'] == False]
-    features, time_steps, col_time, model_name,col_reserve =  str_to_list(args['features']), int(args['time_steps']),args['col_time'],args['model_name'],str_to_list(args['col_reserve'])
-    feature_scaler,target_scaler = get_scaler_model_from_mongo(args)
-    pre_data = data.sort_values(by=col_time)
-    scaled_features = feature_scaler.transform(pre_data[features])
-    return scaled_features
-
-class BPHandler(object):
-    def __init__(self, logger):
-        self.logger = logger
-        self.model = None
-
-    def get_model(self, args):
-        """
-        单例模式+线程锁,防止在异步加载时引发线程安全
-        """
-        try:
-            with model_lock:
-                # NPHandler.model = NPHandler.get_keras_model(opt)
-                self.model = get_h5_model_from_mongo(args)
-        except Exception as e:
-            self.logger.info("加载模型权重失败:{}".format(e.args))
-
-    @staticmethod
-    def get_keras_model(opt):
-        # db_loss = NorthEastLoss(opt)
-        # south_loss = SouthLoss(opt)
-        l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
-        l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
-        nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size_nwp']), name='nwp')
-        env_input = Input(shape=(opt.Model['his_points'], opt.Model['input_size_env']), name='env')
-
-        con1 = Conv1D(filters=64, kernel_size=1, strides=1, padding='valid', activation='relu',
-                      kernel_regularizer=l2_reg)(nwp_input)
-        d1 = Dense(32, activation='relu', name='d1', kernel_regularizer=l1_reg)(con1)
-        nwp = Dense(8, activation='relu', name='d2', kernel_regularizer=l1_reg)(d1)
-
-        output = Dense(opt.Model['output_size'], name='d5')(nwp)
-        model = Model([env_input, nwp_input], output)
-        adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7,
-                               amsgrad=True)
-        reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=5, verbose=1)
-        model.compile(loss='rmse', optimizer=adam)
-        return model
-
-    def train_init(self, opt):
-        try:
-            if opt.Model['add_train']:
-                # 进行加强训练,支持修模
-                base_train_model = get_h5_model_from_mongo(vars(opt))
-                base_train_model.summary()
-                self.logger.info("已加载加强训练基础模型")
-            else:
-                base_train_model = self.get_keras_model(opt)
-            return base_train_model
-        except Exception as e:
-            self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
-
-    def training(self, opt, train_and_valid_data):
-        model = self.train_init(opt)
-        tf.reset_default_graph() # 清除默认图
-        train_x, train_y, valid_x, valid_y = train_and_valid_data
-        print("----------", np.array(train_x[0]).shape)
-        print("++++++++++", np.array(train_x[1]).shape)
-
-        check_point = ModelCheckpoint(filepath='./var/' + 'fmi.h5', monitor='val_loss',
-                                      save_best_only=True, mode='auto')
-        early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
-        history = model.fit(train_x, train_y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,
-                            validation_data=(valid_x, valid_y), callbacks=[check_point, early_stop], shuffle=False)
-        loss = np.round(history.history['loss'], decimals=5)
-        val_loss = np.round(history.history['val_loss'], decimals=5)
-        self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
-        self.logger.info("训练集损失函数为:{}".format(loss))
-        self.logger.info("验证集损失函数为:{}".format(val_loss))
-        return model
-
-    def predict(self, test_X, batch_size=1):
-        result = self.model.predict(test_X, batch_size=batch_size)
-        self.logger.info("执行预测方法")
-        return result
-
-@app.route('/model_training_bp', methods=['POST'])
-def model_training_bp():
-    # 获取程序开始时间
-    start_time = time.time()
-    result = {}
-    success = 0
-    bp = BPHandler(logger)
-    print("Program starts execution!")
-    try:
-        args_dict = request.values.to_dict()
-        args = arguments.deepcopy()
-        opt = argparse.Namespace(**args)
-        logger.info(args_dict)
-        train_data = get_data_from_mongo(args_dict)
-        train_x, valid_x, train_y, valid_y, scaled_train_bytes = train_data_handler(train_data, opt)
-        bp_model = bp.training(opt, [train_x, valid_x, train_y, valid_y])
-        args_dict['params'] = json.dumps(args)
-        args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
-        insert_trained_model_into_mongo(bp_model, args_dict)
-        insert_scaler_model_into_mongo(scaled_train_bytes, args_dict)
-        success = 1
-    except Exception as e:
-        my_exception = traceback.format_exc()
-        my_exception.replace("\n", "\t")
-        result['msg'] = my_exception
-    end_time = time.time()
-
-    result['success'] = success
-    result['args'] = args
-    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
-    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
-    print("Program execution ends!")
-    return result
-
-
-@app.route('/model_prediction_bp', methods=['POST'])
-def model_prediction_bp():
-    # 获取程序开始时间
-    start_time = time.time()
-    result = {}
-    success = 0
-    bp = BPHandler(logger)
-    print("Program starts execution!")
-    try:
-        params_dict = request.values.to_dict()
-        args = arguments.deepcopy()
-        args.update(params_dict)
-        opt = argparse.Namespace(**args)
-        print('args', args)
-        logger.info(args)
-        predict_data = get_data_from_mongo(args)
-        scaled_features = pre_data_handler(predict_data, args)
-        result = bp.predict(scaled_features, args)
-        insert_data_into_mongo(result, args)
-        success = 1
-    except Exception as e:
-        my_exception = traceback.format_exc()
-        my_exception.replace("\n", "\t")
-        result['msg'] = my_exception
-    end_time = time.time()
-
-    result['success'] = success
-    result['args'] = args
-    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
-    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
-    print("Program execution ends!")
-    return result
-
-if __name__ == "__main__":
-    print("Program starts execution!")
-    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-    logger = logging.getLogger("model_training_bp log")
-    from waitress import serve
-
-    # serve(app, host="0.0.0.0", port=10103, threads=4)
-    print("server start!")
-
-    bp = BPHandler(logger)
-    args = copy.deepcopy(bp)
-    opt = argparse.Namespace(**arguments)
-    logger.info(args)
-    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
-            'model_table': 'j00083_model', 'mongodb_read_table': 'j00083'}
-    train_data = get_data_from_mongo(args_dict)
-    train_x, valid_x, train_y, valid_y, scaled_train_bytes = train_data_handler(train_data, opt)
-    bp_model = bp.training(opt, [train_x, valid_x, train_y, valid_y])
-    insert_trained_model_into_mongo(bp_model, args_dict)
-    insert_scaler_model_into_mongo(scaled_train_bytes, args)

+ 91 - 0
models_processing/model_koi/tf_bp.py

@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_bp.py
+# @Time      :2025/2/13 13:34
+# @Author    :David
+# @Company: shenyang JY
+
+from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
+from tensorflow.keras.models import Model, load_model
+from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
+from tensorflow.keras import optimizers, regularizers
+from models_processing.losses.loss_cdq import rmse
+import numpy as np
+from common.database_dml import *
+from threading import Lock
+model_lock = Lock()
+
+class BPHandler(object):
+    def __init__(self, logger):
+        self.logger = logger
+        self.model = None
+
+    def get_model(self, args):
+        """
+        单例模式+线程锁,防止在异步加载时引发线程安全
+        """
+        try:
+            with model_lock:
+                # NPHandler.model = NPHandler.get_keras_model(opt)
+                self.model = get_h5_model_from_mongo(args, {'rmse': rmse})
+        except Exception as e:
+            self.logger.info("加载模型权重失败:{}".format(e.args))
+
+    @staticmethod
+    def get_keras_model(opt):
+        # db_loss = NorthEastLoss(opt)
+        # south_loss = SouthLoss(opt)
+        l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
+        l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
+        nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size']), name='nwp')
+
+        con1 = Conv1D(filters=64, kernel_size=1, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
+        d1 = Dense(32, activation='relu', name='d1', kernel_regularizer=l1_reg)(con1)
+        nwp = Dense(8, activation='relu', name='d2', kernel_regularizer=l1_reg)(d1)
+
+        output = Dense(1, name='d5')(nwp)
+        output_f = Flatten()(output)
+        model = Model(nwp_input, output_f)
+        adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
+        reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=5, verbose=1)
+        model.compile(loss=rmse, optimizer=adam)
+        return model
+
+    def train_init(self, opt):
+        try:
+            if opt.Model['add_train']:
+                # 进行加强训练,支持修模
+                base_train_model = get_h5_model_from_mongo(vars(opt), {'rmse': rmse})
+                base_train_model.summary()
+                self.logger.info("已加载加强训练基础模型")
+            else:
+                base_train_model = self.get_keras_model(opt)
+            return base_train_model
+        except Exception as e:
+            self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
+
+    def training(self, opt, train_and_valid_data):
+        model = self.train_init(opt)
+        # tf.reset_default_graph() # 清除默认图
+        train_x, train_y, valid_x, valid_y = train_and_valid_data
+        print("----------", np.array(train_x[0]).shape)
+        print("++++++++++", np.array(train_x[1]).shape)
+        model.summary()
+        early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
+        history = model.fit(train_x, train_y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,  validation_data=(valid_x, valid_y), callbacks=[early_stop], shuffle=False)
+        loss = np.round(history.history['loss'], decimals=5)
+        val_loss = np.round(history.history['val_loss'], decimals=5)
+        self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
+        self.logger.info("训练集损失函数为:{}".format(loss))
+        self.logger.info("验证集损失函数为:{}".format(val_loss))
+        return model
+
+    def predict(self, test_x, batch_size=1):
+        result = self.model.predict(test_x, batch_size=batch_size)
+        self.logger.info("执行预测方法")
+        return result
+
+
+if __name__ == "__main__":
+    run_code = 0
+

+ 119 - 0
models_processing/model_koi/tf_bp_pre.py

@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_bp_pre.py
+# @Time      :2025/2/13 13:35
+# @Author    :David
+# @Company: shenyang JY
+import json, copy
+import numpy as np
+from flask import Flask, request
+import logging, argparse, traceback
+from common.database_dml import *
+from common.processing_data_common import missing_features, str_to_list
+from data_processing.data_operation.data_handler import DataHandler
+from threading import Lock
+import time, yaml
+model_lock = Lock()
+from itertools import chain
+from common.logs import Log
+from tf_bp import BPHandler
+# logger = Log('tf_bp').logger()
+logger = Log('tf_bp').logger
+np.random.seed(42)  # NumPy随机种子
+# tf.set_random_seed(42)  # TensorFlow随机种子
+app = Flask('tf_bp_pre——service')
+
+with app.app_context():
+    with open('../model_koi/bp.yaml', 'r', encoding='utf-8') as f:
+        arguments = yaml.safe_load(f)
+
+    dh = DataHandler(logger, arguments)
+    bp = BPHandler(logger)
+
+
+@app.route('/nn_bp_predict', methods=['POST'])
+def model_prediction_bp():
+    # 获取程序开始时间
+    start_time = time.time()
+    result = {}
+    success = 0
+    print("Program starts execution!")
+    params_dict = request.values.to_dict()
+    args = arguments.deepcopy()
+    args.update(params_dict)
+    try:
+        print('args', args)
+        logger.info(args)
+        pre_data = get_data_from_mongo(args)
+        feature_scaler, target_scaler = get_scaler_model_from_mongo(args)
+        scaled_pre_x = dh.pre_data_handler(pre_data, feature_scaler, args, bp_data=True)
+        bp.get_model(args)
+        # result = bp.predict(scaled_pre_x, args)
+        result = list(chain.from_iterable(target_scaler.inverse_transform([bp.predict(scaled_pre_x).flatten()])))
+        pre_data['power_forecast'] = result[:len(pre_data)]
+        pre_data['farm_id'] = 'J00083'
+        pre_data['cdq'] = 1
+        pre_data['dq'] = 1
+        pre_data['zq'] = 1
+        pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
+        pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
+
+        pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
+        pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
+        pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
+
+        insert_data_into_mongo(pre_data, arguments)
+        success = 1
+    except Exception as e:
+        my_exception = traceback.format_exc()
+        my_exception.replace("\n", "\t")
+        result['msg'] = my_exception
+    end_time = time.time()
+
+    result['success'] = success
+    result['args'] = args
+    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
+    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
+    print("Program execution ends!")
+    return result
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("model_training_bp log")
+    from waitress import serve
+
+    # serve(app, host="0.0.0.0", port=1010x, threads=4)
+    print("server start!")
+
+    # ------------------------测试代码------------------------
+    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+                 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
+                 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    args_dict['features'] = args_dict['features'].split(',')
+    arguments.update(args_dict)
+    dh = DataHandler(logger, arguments)
+    bp = BPHandler(logger)
+    opt = argparse.Namespace(**arguments)
+
+    opt.Model['input_size'] = len(opt.features)
+    pre_data = get_data_from_mongo(args_dict)
+    feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
+    pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt, bp_data=True)
+    bp.get_model(arguments)
+    result = bp.predict(pre_x)
+    result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
+    pre_data['power_forecast'] = result1[:len(pre_data)]
+    pre_data['farm_id'] = 'J00083'
+    pre_data['cdq'] = 1
+    pre_data['dq'] = 1
+    pre_data['zq'] = 1
+    pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
+    pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
+
+    pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
+    pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
+    pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
+
+    insert_data_into_mongo(pre_data, arguments)

+ 92 - 0
models_processing/model_koi/tf_bp_train.py

@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_bp_train.py
+# @Time      :2025/2/13 13:35
+# @Author    :David
+# @Company: shenyang JY
+
+import json, copy
+import numpy as np
+from flask import Flask, request
+import traceback
+import logging, argparse
+from data_processing.data_operation.data_handler import DataHandler
+import time, yaml
+from models_processing.model_koi.tf_bp import BPHandler
+from common.database_dml import *
+import matplotlib.pyplot as plt
+from common.logs import Log
+logger = logging.getLogger()
+# logger = Log('models-processing').logger
+np.random.seed(42)  # NumPy随机种子
+# tf.set_random_seed(42)  # TensorFlow随机种子
+app = Flask('tf_bp_train——service')
+
+with app.app_context():
+    with open('../model_koi/bp.yaml', 'r', encoding='utf-8') as f:
+        arguments = yaml.safe_load(f)
+
+    dh = DataHandler(logger, arguments)
+    bp = BPHandler(logger)
+
+@app.route('/nn_bp_training', methods=['POST'])
+def model_training_bp():
+    # 获取程序开始时间
+    start_time = time.time()
+    result = {}
+    success = 0
+    print("Program starts execution!")
+    args_dict = request.values.to_dict()
+    args = arguments.deepcopy()
+    args.update(args_dict)
+    try:
+        opt = argparse.Namespace(**args)
+        logger.info(args_dict)
+        train_data = get_data_from_mongo(args_dict)
+        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt, bp_data=True)
+        bp_model = bp.training(opt, [train_x, valid_x, train_y, valid_y])
+        args_dict['params'] = json.dumps(args)
+        args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+        insert_trained_model_into_mongo(bp_model, args_dict)
+        insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args)
+        success = 1
+    except Exception as e:
+        my_exception = traceback.format_exc()
+        my_exception.replace("\n", "\t")
+        result['msg'] = my_exception
+    end_time = time.time()
+
+    result['success'] = success
+    result['args'] = args
+    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
+    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
+    print("Program execution ends!")
+    return result
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("model_training_bp log")
+    from waitress import serve
+
+    # serve(app, host="0.0.0.0", port=10103, threads=4)
+    print("server start!")
+    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+    'model_table': 'j00083_model', 'mongodb_read_table': 'j00083', 'col_time': 'dateTime',
+    'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    args_dict['features'] = args_dict['features'].split(',')
+    arguments.update(args_dict)
+    dh = DataHandler(logger, arguments)
+    bp = BPHandler(logger)
+    opt = argparse.Namespace(**arguments)
+    opt.Model['input_size'] = len(opt.features)
+    train_data = get_data_from_mongo(args_dict)
+    train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt, bp_data=True)
+    bp_model = bp.training(opt, [train_x, train_y, valid_x, valid_y])
+
+    args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+    args_dict['params'] = arguments
+    args_dict['descr'] = '测试'
+    insert_trained_model_into_mongo(bp_model, args_dict)
+    insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)

+ 91 - 0
models_processing/model_koi/tf_cnn.py

@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :nn_bp.py
+# @Time      :2025/2/12 10:41
+# @Author    :David
+# @Company: shenyang JY
+
+
+from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
+from tensorflow.keras.models import Model, load_model
+from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
+from tensorflow.keras import optimizers, regularizers
+from models_processing.losses.loss_cdq import rmse
+import numpy as np
+from common.database_dml import *
+from threading import Lock
+model_lock = Lock()
+
+class CNNHandler(object):
+    def __init__(self, logger):
+        self.logger = logger
+        self.model = None
+
+    def get_model(self, args):
+        """
+        单例模式+线程锁,防止在异步加载时引发线程安全
+        """
+        try:
+            with model_lock:
+                # NPHandler.model = NPHandler.get_keras_model(opt)
+                self.model = get_h5_model_from_mongo(args, {'rmse': rmse})
+        except Exception as e:
+            self.logger.info("加载模型权重失败:{}".format(e.args))
+
+    @staticmethod
+    def get_keras_model(opt):
+        # db_loss = NorthEastLoss(opt)
+        # south_loss = SouthLoss(opt)
+        l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
+        l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
+        nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size']), name='nwp')
+
+        con1 = Conv1D(filters=64, kernel_size=1, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
+        d1 = Dense(32, activation='relu', name='d1', kernel_regularizer=l1_reg)(con1)
+        nwp = Dense(8, activation='relu', name='d2', kernel_regularizer=l1_reg)(d1)
+
+        output = Dense(1, name='d5')(nwp)
+        output_f = Flatten()(output)
+        model = Model(nwp_input, output_f)
+        adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
+        reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=5, verbose=1)
+        model.compile(loss=rmse, optimizer=adam)
+        return model
+
+    def train_init(self, opt):
+        try:
+            if opt.Model['add_train']:
+                # 进行加强训练,支持修模
+                base_train_model = get_h5_model_from_mongo(vars(opt), {'rmse': rmse})
+                base_train_model.summary()
+                self.logger.info("已加载加强训练基础模型")
+            else:
+                base_train_model = self.get_keras_model(opt)
+            return base_train_model
+        except Exception as e:
+            self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
+
+    def training(self, opt, train_and_valid_data):
+        model = self.train_init(opt)
+        # tf.reset_default_graph() # 清除默认图
+        train_x, train_y, valid_x, valid_y = train_and_valid_data
+        print("----------", np.array(train_x[0]).shape)
+        print("++++++++++", np.array(train_x[1]).shape)
+        model.summary()
+        early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
+        history = model.fit(train_x, train_y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,  validation_data=(valid_x, valid_y), callbacks=[early_stop], shuffle=False)
+        loss = np.round(history.history['loss'], decimals=5)
+        val_loss = np.round(history.history['val_loss'], decimals=5)
+        self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
+        self.logger.info("训练集损失函数为:{}".format(loss))
+        self.logger.info("验证集损失函数为:{}".format(val_loss))
+        return model
+
+    def predict(self, test_x, batch_size=1):
+        result = self.model.predict(test_x, batch_size=batch_size)
+        self.logger.info("执行预测方法")
+        return result
+
+
+if __name__ == "__main__":
+    run_code = 0

+ 119 - 0
models_processing/model_koi/tf_cnn_pre.py

@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :nn_bp_pre.py
+# @Time      :2025/2/12 10:39
+# @Author    :David
+# @Company: shenyang JY
+import json, copy
+import numpy as np
+from flask import Flask, request
+import logging, argparse, traceback
+from common.database_dml import *
+from common.processing_data_common import missing_features, str_to_list
+from data_processing.data_operation.data_handler import DataHandler
+from threading import Lock
+import time, yaml
+model_lock = Lock()
+from itertools import chain
+from common.logs import Log
+from tf_cnn import CNNHandler
+# logger = Log('tf_bp').logger()
+logger = Log('tf_bp').logger
+np.random.seed(42)  # NumPy随机种子
+# tf.set_random_seed(42)  # TensorFlow随机种子
+app = Flask('tf_cnn_pre——service')
+
+with app.app_context():
+    with open('../model_koi/bp.yaml', 'r', encoding='utf-8') as f:
+        arguments = yaml.safe_load(f)
+
+    dh = DataHandler(logger, arguments)
+    cnn = CNNHandler(logger)
+
+
+@app.route('/nn_bp_predict', methods=['POST'])
+def model_prediction_bp():
+    # 获取程序开始时间
+    start_time = time.time()
+    result = {}
+    success = 0
+    print("Program starts execution!")
+    params_dict = request.values.to_dict()
+    args = arguments.deepcopy()
+    args.update(params_dict)
+    try:
+        print('args', args)
+        logger.info(args)
+        pre_data = get_data_from_mongo(args)
+        feature_scaler, target_scaler = get_scaler_model_from_mongo(args)
+        scaled_pre_x = dh.pre_data_handler(pre_data, feature_scaler, args)
+        cnn.get_model(args)
+        # result = bp.predict(scaled_pre_x, args)
+        result = list(chain.from_iterable(target_scaler.inverse_transform([cnn.predict(scaled_pre_x).flatten()])))
+        pre_data['power_forecast'] = result[:len(pre_data)]
+        pre_data['farm_id'] = 'J00083'
+        pre_data['cdq'] = 1
+        pre_data['dq'] = 1
+        pre_data['zq'] = 1
+        pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
+        pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
+
+        pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
+        pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
+        pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
+
+        insert_data_into_mongo(pre_data, arguments)
+        success = 1
+    except Exception as e:
+        my_exception = traceback.format_exc()
+        my_exception.replace("\n", "\t")
+        result['msg'] = my_exception
+    end_time = time.time()
+
+    result['success'] = success
+    result['args'] = args
+    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
+    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
+    print("Program execution ends!")
+    return result
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("model_training_bp log")
+    from waitress import serve
+
+    # serve(app, host="0.0.0.0", port=1010x, threads=4)
+    print("server start!")
+
+    # ------------------------测试代码------------------------
+    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+                 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
+                 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    args_dict['features'] = args_dict['features'].split(',')
+    arguments.update(args_dict)
+    dh = DataHandler(logger, arguments)
+    cnn = CNNHandler(logger)
+    opt = argparse.Namespace(**arguments)
+
+    opt.Model['input_size'] = len(opt.features)
+    pre_data = get_data_from_mongo(args_dict)
+    feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
+    pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt)
+    cnn.get_model(arguments)
+    result = cnn.predict(pre_x)
+    result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
+    pre_data['power_forecast'] = result1[:len(pre_data)]
+    pre_data['farm_id'] = 'J00083'
+    pre_data['cdq'] = 1
+    pre_data['dq'] = 1
+    pre_data['zq'] = 1
+    pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
+    pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
+
+    pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
+    pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
+    pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
+
+    insert_data_into_mongo(pre_data, arguments)

+ 91 - 0
models_processing/model_koi/tf_cnn_train.py

@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# time: 2024/5/6 13:25
+# file: time_series.py
+# author: David
+# company: shenyang JY
+import json, copy
+import numpy as np
+from flask import Flask, request
+import traceback
+import logging, argparse
+from data_processing.data_operation.data_handler import DataHandler
+import time, yaml
+from models_processing.model_koi.tf_cnn import CNNHandler
+from common.database_dml import *
+import matplotlib.pyplot as plt
+from common.logs import Log
+logger = logging.getLogger()
+# logger = Log('models-processing').logger
+np.random.seed(42)  # NumPy随机种子
+# tf.set_random_seed(42)  # TensorFlow随机种子
+app = Flask('tf_cnn_train——service')
+
+with app.app_context():
+    with open('../model_koi/cnn.yaml', 'r', encoding='utf-8') as f:
+        arguments = yaml.safe_load(f)
+
+    dh = DataHandler(logger, arguments)
+    cnn = CNNHandler(logger)
+
+@app.route('/nn_bp_training', methods=['POST'])
+def model_training_bp():
+    # 获取程序开始时间
+    start_time = time.time()
+    result = {}
+    success = 0
+    print("Program starts execution!")
+    args_dict = request.values.to_dict()
+    args = arguments.deepcopy()
+    args.update(args_dict)
+    try:
+        opt = argparse.Namespace(**args)
+        logger.info(args_dict)
+        train_data = get_data_from_mongo(args_dict)
+        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt)
+        bp_model = cnn.training(opt, [train_x, valid_x, train_y, valid_y])
+        args_dict['params'] = json.dumps(args)
+        args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+        insert_trained_model_into_mongo(bp_model, args_dict)
+        insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args)
+        success = 1
+    except Exception as e:
+        my_exception = traceback.format_exc()
+        my_exception.replace("\n", "\t")
+        result['msg'] = my_exception
+    end_time = time.time()
+
+    result['success'] = success
+    result['args'] = args
+    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
+    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
+    print("Program execution ends!")
+    return result
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("model_training_bp log")
+    from waitress import serve
+
+    # serve(app, host="0.0.0.0", port=10103, threads=4)
+    print("server start!")
+    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+    'model_table': 'j00083_model', 'mongodb_read_table': 'j00083', 'col_time': 'dateTime',
+    'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    args_dict['features'] = args_dict['features'].split(',')
+    arguments.update(args_dict)
+    dh = DataHandler(logger, arguments)
+    cnn = CNNHandler(logger)
+    opt = argparse.Namespace(**arguments)
+    opt.Model['input_size'] = len(opt.features)
+    train_data = get_data_from_mongo(args_dict)
+    train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt)
+    cnn_model = cnn.training(opt, [train_x, train_y, valid_x, valid_y])
+
+    args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+    args_dict['params'] = arguments
+    args_dict['descr'] = '测试'
+    insert_trained_model_into_mongo(cnn_model, args_dict)
+    insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)

+ 102 - 0
models_processing/model_koi/tf_lstm.py

@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_lstm.py
+# @Time      :2025/2/12 14:03
+# @Author    :David
+# @Company: shenyang JY
+
+import os.path
+from keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, BatchNormalization, Flatten, Dropout
+from keras.models import Model, load_model
+from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
+from keras import optimizers, regularizers
+import keras.backend as K
+from common.database_dml import *
+import numpy as np
+from sqlalchemy.ext.instrumentation import find_native_user_instrumentation_hook
+np.random.seed(42)
+from models_processing.losses.loss_cdq import SouthLoss, NorthEastLoss
+import tensorflow as tf
+tf.compat.v1.set_random_seed(1234)
+from threading import Lock
+model_lock = Lock()
+
+def rmse(y_true, y_pred):
+    return K.sqrt(K.mean(K.square(y_pred - y_true)))
+
+
+var_dir = os.path.dirname(os.path.dirname(__file__))
+
+
+class TSHandler(object):
+    model = None
+    train = False
+
+    def __init__(self, logger):
+        self.logger = logger
+        self.model = None
+
+    def get_model(self, args):
+        """
+        单例模式+线程锁,防止在异步加载时引发线程安全
+        """
+        try:
+            with model_lock:
+                self.model = get_h5_model_from_mongo(args, {'rmse': rmse})
+        except Exception as e:
+            self.logger.info("加载模型权重失败:{}".format(e.args))
+
+    @staticmethod
+    def get_keras_model(opt):
+        # db_loss = NorthEastLoss(opt)
+        south_loss = SouthLoss(opt)
+        l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
+        l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
+        nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size_nwp']), name='nwp')
+
+        con1 = Conv1D(filters=64, kernel_size=5, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
+        nwp = MaxPooling1D(pool_size=5, strides=1, padding='valid', data_format='channels_last')(con1)
+        nwp_lstm = LSTM(units=opt.Model['hidden_size'], return_sequences=False, kernel_regularizer=l2_reg)(nwp)
+
+        output = Dense(opt.Model['output_size'], name='cdq_output')(nwp_lstm)
+
+        model = Model(nwp_input, output)
+        adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
+        model.compile(loss=south_loss, optimizer=adam)
+        return model
+
+    def train_init(self, opt):
+        try:
+            if opt.Model['add_train']:
+                # 进行加强训练,支持修模
+                base_train_model = get_h5_model_from_mongo(vars(opt), {'rmse': rmse})
+                base_train_model.summary()
+                self.logger.info("已加载加强训练基础模型")
+            else:
+                base_train_model = self.get_keras_model(opt)
+            return base_train_model
+        except Exception as e:
+            self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
+
+    def training(self, opt, train_and_valid_data):
+        model = self.train_init(opt)
+        model.summary()
+        train_x, train_y, valid_x, valid_y = train_and_valid_data
+        early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
+        history = model.fit(train_x, train_y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2, validation_data=(valid_x, valid_y), callbacks=[early_stop])
+        loss = np.round(history.history['loss'], decimals=5)
+        val_loss = np.round(history.history['val_loss'], decimals=5)
+        self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
+        self.logger.info("训练集损失函数为:{}".format(loss))
+        self.logger.info("验证集损失函数为:{}".format(val_loss))
+        return model
+
+    def predict(self, test_X, batch_size=1):
+        result = TSHandler.model.predict(test_X, batch_size=batch_size)
+        self.logger.info("执行预测方法")
+        return result
+
+
+
+if __name__ == "__main__":
+    run_code = 0

+ 119 - 0
models_processing/model_koi/tf_lstm_pre.py

@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_lstm_pre.py
+# @Time      :2025/2/13 10:52
+# @Author    :David
+# @Company: shenyang JY
+import json, copy
+import numpy as np
+from flask import Flask, request
+import logging, argparse, traceback
+from common.database_dml import *
+from common.processing_data_common import missing_features, str_to_list
+from data_processing.data_operation.data_handler import DataHandler
+from threading import Lock
+import time, yaml
+model_lock = Lock()
+from itertools import chain
+from common.logs import Log
+from tf_lstm import TSHandler
+# logger = Log('tf_bp').logger()
+logger = Log('tf_bp').logger
+np.random.seed(42)  # NumPy随机种子
+# tf.set_random_seed(42)  # TensorFlow随机种子
+app = Flask('tf_lstm_pre——service')
+
+with app.app_context():
+    with open('../model_koi/bp.yaml', 'r', encoding='utf-8') as f:
+        arguments = yaml.safe_load(f)
+
+    dh = DataHandler(logger, arguments)
+    ts = TSHandler(logger)
+
+
+@app.route('/nn_bp_predict', methods=['POST'])
+def model_prediction_bp():
+    # 获取程序开始时间
+    start_time = time.time()
+    result = {}
+    success = 0
+    print("Program starts execution!")
+    params_dict = request.values.to_dict()
+    args = arguments.deepcopy()
+    args.update(params_dict)
+    try:
+        print('args', args)
+        logger.info(args)
+        pre_data = get_data_from_mongo(args)
+        feature_scaler, target_scaler = get_scaler_model_from_mongo(args)
+        scaled_pre_x = dh.pre_data_handler(pre_data, feature_scaler, args)
+        ts.get_model(args)
+        # result = bp.predict(scaled_pre_x, args)
+        result = list(chain.from_iterable(target_scaler.inverse_transform([ts.predict(scaled_pre_x).flatten()])))
+        pre_data['power_forecast'] = result[:len(pre_data)]
+        pre_data['farm_id'] = 'J00083'
+        pre_data['cdq'] = 1
+        pre_data['dq'] = 1
+        pre_data['zq'] = 1
+        pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
+        pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
+
+        pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
+        pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
+        pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
+
+        insert_data_into_mongo(pre_data, arguments)
+        success = 1
+    except Exception as e:
+        my_exception = traceback.format_exc()
+        my_exception.replace("\n", "\t")
+        result['msg'] = my_exception
+    end_time = time.time()
+
+    result['success'] = success
+    result['args'] = args
+    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
+    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
+    print("Program execution ends!")
+    return result
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("model_training_bp log")
+    from waitress import serve
+
+    # serve(app, host="0.0.0.0", port=1010x, threads=4)
+    print("server start!")
+
+    # ------------------------测试代码------------------------
+    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+                 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
+                 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    args_dict['features'] = args_dict['features'].split(',')
+    arguments.update(args_dict)
+    dh = DataHandler(logger, arguments)
+    ts = TSHandler(logger)
+    opt = argparse.Namespace(**arguments)
+
+    opt.Model['input_size'] = len(opt.features)
+    pre_data = get_data_from_mongo(args_dict)
+    feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
+    pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt)
+    ts.get_model(arguments)
+    result = ts.predict(pre_x)
+    result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
+    pre_data['power_forecast'] = result1[:len(pre_data)]
+    pre_data['farm_id'] = 'J00083'
+    pre_data['cdq'] = 1
+    pre_data['dq'] = 1
+    pre_data['zq'] = 1
+    pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
+    pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
+
+    pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
+    pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
+    pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
+
+    insert_data_into_mongo(pre_data, arguments)

+ 91 - 0
models_processing/model_koi/tf_lstm_train.py

@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_lstm_train.py
+# @Time      :2025/2/13 10:52
+# @Author    :David
+# @Company: shenyang JY
+import json, copy
+import numpy as np
+from flask import Flask, request
+import traceback
+import logging, argparse
+from data_processing.data_operation.data_handler import DataHandler
+import time, yaml
+from models_processing.model_koi.tf_lstm import TSHandler
+from common.database_dml import *
+import matplotlib.pyplot as plt
+from common.logs import Log
+logger = logging.getLogger()
+# logger = Log('models-processing').logger
+np.random.seed(42)  # NumPy随机种子
+# tf.set_random_seed(42)  # TensorFlow随机种子
+app = Flask('tf_lstm_train——service')
+
+with app.app_context():
+    with open('../model_koi/lstm.yaml', 'r', encoding='utf-8') as f:
+        arguments = yaml.safe_load(f)
+
+    dh = DataHandler(logger, arguments)
+    ts = TSHandler(logger)
+
+@app.route('/nn_bp_training', methods=['POST'])
+def model_training_bp():
+    # 获取程序开始时间
+    start_time = time.time()
+    result = {}
+    success = 0
+    print("Program starts execution!")
+    args_dict = request.values.to_dict()
+    args = arguments.deepcopy()
+    args.update(args_dict)
+    try:
+        opt = argparse.Namespace(**args)
+        logger.info(args_dict)
+        train_data = get_data_from_mongo(args_dict)
+        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt)
+        bp_model = ts.training(opt, [train_x, valid_x, train_y, valid_y])
+        args_dict['params'] = json.dumps(args)
+        args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+        insert_trained_model_into_mongo(bp_model, args_dict)
+        insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args)
+        success = 1
+    except Exception as e:
+        my_exception = traceback.format_exc()
+        my_exception.replace("\n", "\t")
+        result['msg'] = my_exception
+    end_time = time.time()
+
+    result['success'] = success
+    result['args'] = args
+    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
+    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
+    print("Program execution ends!")
+    return result
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("model_training_bp log")
+    from waitress import serve
+
+    # serve(app, host="0.0.0.0", port=10103, threads=4)
+    print("server start!")
+    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+    'model_table': 'j00083_model', 'mongodb_read_table': 'j00083', 'col_time': 'dateTime',
+    'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    args_dict['features'] = args_dict['features'].split(',')
+    arguments.update(args_dict)
+    dh = DataHandler(logger, arguments)
+    ts = TSHandler(logger)
+    opt = argparse.Namespace(**arguments)
+    opt.Model['input_size'] = len(opt.features)
+    train_data = get_data_from_mongo(args_dict)
+    train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt)
+    ts_model = ts.training(opt, [train_x, train_y, valid_x, valid_y])
+
+    args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+    args_dict['params'] = arguments
+    args_dict['descr'] = '测试'
+    insert_trained_model_into_mongo(ts_model, args_dict)
+    insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)