Prechádzať zdrojové kódy

Merge branch 'dev_david' into dev_awg
加入超短期报告

David 2 mesiacov pred
rodič
commit
6e146a21ef

+ 12 - 0
.idea/algorithm_platform.iml

@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="PYTHON_MODULE" version="4">
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="jdk" jdkName="py37tf115" jdkType="Python SDK" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+  <component name="PyDocumentationSettings">
+    <option name="format" value="PLAIN" />
+    <option name="myDocStringFormat" value="Plain" />
+  </component>
+</module>

+ 6 - 0
.idea/inspectionProfiles/profiles_settings.xml

@@ -0,0 +1,6 @@
+<component name="InspectionProjectProfileManager">
+  <settings>
+    <option name="USE_PROJECT_PROFILE" value="false" />
+    <version value="1.0" />
+  </settings>
+</component>

+ 7 - 0
.idea/misc.xml

@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="Black">
+    <option name="sdkName" value="D:\anaconda3" />
+  </component>
+  <component name="ProjectRootManager" version="2" project-jdk-name="py37tf115" project-jdk-type="Python SDK" />
+</project>

+ 8 - 0
.idea/modules.xml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/algorithm_platform.iml" filepath="$PROJECT_DIR$/.idea/algorithm_platform.iml" />
+    </modules>
+  </component>
+</project>

+ 6 - 0
.idea/vcs.xml

@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="VcsDirectoryMappings">
+    <mapping directory="" vcs="Git" />
+  </component>
+</project>

+ 341 - 0
.idea/workspace.xml

@@ -0,0 +1,341 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="AutoImportSettings">
+    <option name="autoReloadType" value="SELECTIVE" />
+  </component>
+  <component name="ChangeListManager">
+    <list default="true" id="03b32494-2264-4bfc-811a-22f732290233" name="更改" comment="">
+      <change beforePath="$PROJECT_DIR$/common/database_dml.py" beforeDir="false" afterPath="$PROJECT_DIR$/common/database_dml.py" afterDir="false" />
+      <change beforePath="$PROJECT_DIR$/data_processing/data_operation/data_handler.py" beforeDir="false" afterPath="$PROJECT_DIR$/data_processing/data_operation/data_handler.py" afterDir="false" />
+      <change beforePath="$PROJECT_DIR$/models_processing/model_koi/bp.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/models_processing/model_koi/bp.yaml" afterDir="false" />
+      <change beforePath="$PROJECT_DIR$/models_processing/model_koi/nn_bp.py" beforeDir="false" afterPath="$PROJECT_DIR$/models_processing/model_koi/nn_bp.py" afterDir="false" />
+    </list>
+    <option name="SHOW_DIALOG" value="false" />
+    <option name="HIGHLIGHT_CONFLICTS" value="true" />
+    <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
+    <option name="LAST_RESOLUTION" value="IGNORE" />
+  </component>
+  <component name="FileTemplateManagerImpl">
+    <option name="RECENT_TEMPLATES">
+      <list>
+        <option value="Python Script" />
+      </list>
+    </option>
+  </component>
+  <component name="Git.Settings">
+    <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
+  </component>
+  <component name="ProjectColorInfo">{
+  &quot;associatedIndex&quot;: 2
+}</component>
+  <component name="ProjectId" id="2phKrzXJWWjlWuJkM2S9yOlofMP" />
+  <component name="ProjectViewState">
+    <option name="hideEmptyMiddlePackages" value="true" />
+    <option name="showLibraryContents" value="true" />
+  </component>
+  <component name="PropertiesComponent"><![CDATA[{
+  "keyToString": {
+    "Python.data_nwp_ftp.executor": "Run",
+    "Python.logs.executor": "Debug",
+    "Python.nn_bp.executor": "Debug",
+    "Python.pre_data_ftp.executor": "Debug",
+    "Python.run_all.executor": "Run",
+    "Python.test.executor": "Debug",
+    "RunOnceActivity.ShowReadmeOnStart": "true",
+    "git-widget-placeholder": "dev__david",
+    "last_opened_file_path": "E:/treasure/ipfcst-forecast-wind3-tf1",
+    "settings.editor.selected.configurable": "com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable"
+  }
+}]]></component>
+  <component name="RecentsManager">
+    <key name="CopyFile.RECENT_KEYS">
+      <recent name="E:\algorithm_platform\models_processing\model_koi" />
+      <recent name="E:\algorithm_platform\common" />
+      <recent name="E:\algorithm_platform\models_processing\losses" />
+      <recent name="E:\algorithm_platform\pre_evaluation" />
+      <recent name="E:\algorithm_platform\evaluation" />
+    </key>
+    <key name="MoveFile.RECENT_KEYS">
+      <recent name="E:\algorithm_platform\data_processing\data_operation" />
+      <recent name="E:\algorithm_platform\data_processing" />
+      <recent name="E:\algorithm_platform\models_sklearn" />
+      <recent name="E:\algorithm_platform\evaluation_processing" />
+      <recent name="E:\algorithm_platform\data_processing\processing_limit_power" />
+    </key>
+  </component>
+  <component name="RunManager" selected="Python.nn_bp">
+    <configuration name="data_nwp_ftp" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
+      <module name="algorithm_platform" />
+      <option name="ENV_FILES" value="" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/data_processing/data_operation" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <configuration name="nn_bp" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
+      <module name="algorithm_platform" />
+      <option name="ENV_FILES" value="" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/models_processing/model_koi" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/models_processing/model_koi/nn_bp.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <configuration name="pre_data_ftp" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
+      <module name="algorithm_platform" />
+      <option name="ENV_FILES" value="" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/data_processing/data_operation" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/data_processing/data_operation/pre_data_ftp.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <configuration name="run_all" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
+      <module name="algorithm_platform" />
+      <option name="ENV_FILES" value="" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/run_all.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <configuration name="test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
+      <module name="algorithm_platform" />
+      <option name="ENV_FILES" value="" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <envs>
+        <env name="PYTHONUNBUFFERED" value="1" />
+      </envs>
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/models_processing/model_koi" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/models_processing/model_koi/test.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="false" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <recent_temporary>
+      <list>
+        <item itemvalue="Python.nn_bp" />
+        <item itemvalue="Python.test" />
+        <item itemvalue="Python.pre_data_ftp" />
+        <item itemvalue="Python.data_nwp_ftp" />
+        <item itemvalue="Python.run_all" />
+      </list>
+    </recent_temporary>
+  </component>
+  <component name="SharedIndexes">
+    <attachedChunks>
+      <set>
+        <option value="bundled-python-sdk-d7ad00fb9fc3-c546a90a8094-com.jetbrains.pycharm.community.sharedIndexes.bundled-PC-242.23726.102" />
+      </set>
+    </attachedChunks>
+  </component>
+  <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="应用程序级" UseSingleDictionary="true" transferred="true" />
+  <component name="TaskManager">
+    <task active="true" id="Default" summary="默认任务">
+      <changelist id="03b32494-2264-4bfc-811a-22f732290233" name="更改" comment="" />
+      <created>1733213069535</created>
+      <option name="number" value="Default" />
+      <option name="presentableId" value="Default" />
+      <updated>1733213069535</updated>
+    </task>
+    <servers />
+  </component>
+  <component name="XDebuggerManager">
+    <breakpoint-manager>
+      <breakpoints>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>117</line>
+          <option name="timeStamp" value="105" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>131</line>
+          <option name="timeStamp" value="106" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>134</line>
+          <option name="timeStamp" value="108" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>137</line>
+          <option name="timeStamp" value="109" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>135</line>
+          <option name="timeStamp" value="111" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>144</line>
+          <option name="timeStamp" value="112" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>145</line>
+          <option name="timeStamp" value="113" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>146</line>
+          <option name="timeStamp" value="114" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>171</line>
+          <option name="timeStamp" value="115" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>164</line>
+          <option name="timeStamp" value="116" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>167</line>
+          <option name="timeStamp" value="117" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>159</line>
+          <option name="timeStamp" value="119" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>161</line>
+          <option name="timeStamp" value="120" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>162</line>
+          <option name="timeStamp" value="121" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>166</line>
+          <option name="timeStamp" value="122" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/data_processing/data_operation/data_nwp_ftp.py</url>
+          <line>181</line>
+          <option name="timeStamp" value="123" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>226</line>
+          <option name="timeStamp" value="126" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>227</line>
+          <option name="timeStamp" value="127" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>228</line>
+          <option name="timeStamp" value="128" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>230</line>
+          <option name="timeStamp" value="129" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>232</line>
+          <option name="timeStamp" value="130" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>233</line>
+          <option name="timeStamp" value="131" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>234</line>
+          <option name="timeStamp" value="132" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>235</line>
+          <option name="timeStamp" value="133" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>236</line>
+          <option name="timeStamp" value="134" />
+        </line-breakpoint>
+        <line-breakpoint enabled="true" suspend="THREAD" type="python-line">
+          <url>file://$PROJECT_DIR$/models_processing/model_koi/nn_bp.py</url>
+          <line>43</line>
+          <option name="timeStamp" value="136" />
+        </line-breakpoint>
+      </breakpoints>
+    </breakpoint-manager>
+  </component>
+</project>

BIN
common/__pycache__/data_cleaning.cpython-37.pyc


BIN
common/__pycache__/database_dml.cpython-312.pyc


BIN
common/__pycache__/database_dml.cpython-37.pyc


BIN
common/__pycache__/logs.cpython-37.pyc


BIN
common/__pycache__/processing_data_common.cpython-37.pyc


+ 45 - 0
common/database_dml.py

@@ -1,5 +1,6 @@
 from pymongo import MongoClient, UpdateOne
 import pandas as pd
+from scripts.regsetup import description
 from sqlalchemy import create_engine
 import pickle
 from io import BytesIO
@@ -175,6 +176,50 @@ def insert_h5_model_into_mongo(model,feature_scaler_bytes,target_scaler_bytes ,a
     })
     print("模型成功保存到 MongoDB!")
 
+def insert_trained_model_into_mongo(model ,args):
+    mongodb_connection,mongodb_database,model_table,model_name = ("mongodb://root:sdhjfREWFWEF23e@192.168.1.43:30000/",
+                                args['mongodb_database'],args['model_table'],args['model_name'])
+
+    gen_time, params_json, descr = args['gen_time'], args['params'], args['descr']
+    client = MongoClient(mongodb_connection)
+    db = client[mongodb_database]
+    if model_table in db.list_collection_names():
+        db[model_table].drop()
+        print(f"Collection '{model_table} already exist, deleted successfully!")
+    model_table = db[model_table]
+    # 创建 BytesIO 缓冲区
+    model_buffer = BytesIO()
+    # 将模型保存为 HDF5 格式到内存 (BytesIO)
+    model.save(model_buffer, save_format='h5')
+    # 将指针移到缓冲区的起始位置
+    model_buffer.seek(0)
+    # 获取模型的二进制数据
+    model_data = model_buffer.read()
+    # 将模型保存到 MongoDB
+    model_table.insert_one({
+        "model_name": model_name,
+        "model_data": model_data,
+        "gen_time": gen_time,
+        "params": params_json,
+        "descr": descr
+    })
+    print("模型成功保存到 MongoDB!")
+
+def insert_scaler_model_into_mongo(feature_scaler_bytes, args):
+    mongodb_connection,mongodb_database,scaler_table,model_table,model_name = ("mongodb://root:sdhjfREWFWEF23e@192.168.1.43:30000/",
+                                args['mongodb_database'],args['scaler_table'],args['model_table'],args['model_name'])
+    client = MongoClient(mongodb_connection)
+    db = client[mongodb_database]
+    if scaler_table in db.list_collection_names():
+        db[scaler_table].drop()
+        print(f"Collection '{scaler_table} already exist, deleted successfully!")
+    collection = db[scaler_table]  # 集合名称
+    # Save the scalers in MongoDB as binary data
+    collection.insert_one({
+        "feature_scaler": feature_scaler_bytes.read(),
+    })
+    print("scaler_model inserted successfully!")
+
 
 def get_h5_model_from_mongo(args):
     mongodb_connection,mongodb_database,model_table,model_name = "mongodb://root:sdhjfREWFWEF23e@192.168.1.43:30000/",args['mongodb_database'],args['model_table'],args['model_name']

BIN
data_processing/data_operation/__pycache__/data_handler.cpython-37.pyc


BIN
data_processing/data_operation/__pycache__/pre_data_ftp.cpython-312.pyc


+ 3 - 5
data_processing/data_operation/data_handler.py

@@ -4,14 +4,14 @@
 # @Time      :2025/1/8 14:56
 # @Author    :David
 # @Company: shenyang JY
-import numpy as np
+import argparse
 import pandas as pd
 from common.data_cleaning import *
 
 class DataHandler(object):
     def __init__(self, logger, args):
         self.logger = logger
-        self.opt = args.parse_args_and_yaml()
+        self.opt = argparse.Namespace(**args)
 
     def get_train_data(self, df):
         train_x, valid_x, train_y, valid_y = [], [], [], []
@@ -40,9 +40,7 @@ class DataHandler(object):
         time_step_loc = time_step - 1
         train_num = int(len(feature_data))
         label_features = ['C_TIME', 'C_REAL_VALUE'] if is_train is True else ['C_TIME', 'C_REAL_VALUE']
-        nwp_cs = self.opt.nwp_columns.copy()
-        if 'C_TIME' in nwp_cs:
-            nwp_cs.pop(nwp_cs.index('C_TIME'))
+        nwp_cs = self.opt.features
         nwp = [feature_data.loc[i:i + time_step_loc, nwp_cs].reset_index(drop=True) for i in range(train_num - time_step + 1)]  # 数据库字段 'C_T': 'C_WS170'
         labels = [feature_data.loc[i:i + time_step_loc, label_features].reset_index(drop=True) for i in range(train_num - time_step + 1)]
         features_x, features_y = [], []

+ 307 - 0
evaluation_processing/analysis_cdq.py

@@ -0,0 +1,307 @@
+# -*- coding: utf-8 -*-
+import numpy as np
+import pandas as pd
+from flask import Flask, request
+import time
+import random
+import logging
+import traceback
+import os
+from matplotlib.pyplot import title
+from common.database_dml import get_df_list_from_mongo, insert_data_into_mongo
+import plotly.graph_objects as go
+from plotly.subplots import make_subplots
+import plotly.io as pio
+from bson.decimal128 import Decimal128
+import numbers
+
+app = Flask('analysis_report——service')
+
+def create_fig(df_predict, col_time, label, label_pre):
+    # 创建一个图表对象
+    fig = make_subplots(rows=1, cols=18, subplot_titles=['超短期-第{}点'.format(p) for p in range(1, 17)]+['超短期-平均值'])
+    for point in range(1, 18):
+        point_data = df_predict[df_predict['howLongAgo']==point]
+        # 获取所有的模型
+        models = df_predict['model'].unique()
+        # 添加实际功率曲线
+        fig.add_trace(go.Scatter(
+            x=df_predict[col_time],
+            y=df_predict[label],
+            mode='lines+markers',
+            name='实际功率',  # 实际功率
+            line=dict(width=1),  # 虚线
+            marker=dict(symbol='circle'),
+        ), row=1, col=point)
+        # 为每个模型添加预测值和实际功率的曲线
+        for model in models:
+            # 筛选该模型的数据
+            model_data = point_data[point_data['model'] == model]
+
+            # 添加预测值曲线
+            fig.add_trace(go.Scatter(
+                x=model_data[col_time],
+                y=model_data[label_pre],
+                mode='lines+markers',
+                name=f'{model} 预测值',  # 预测值
+                marker=dict(symbol='circle'),
+                line=dict(width=2)
+            ), row=1, col=point)
+
+    # 设置图表的标题和标签
+    fig.update_layout(
+        template='seaborn',  # 使用 seaborn 模板
+        title=dict(
+            # text=f"{label_pre} 与 {label} 对比",  # 标题
+            x=0.5, font=dict(size=20, color='darkblue')  # 标题居中并设置字体大小和颜色
+        ),
+        plot_bgcolor='rgba(255, 255, 255, 0.8)',  # 背景色
+        xaxis=dict(
+            showgrid=True,
+            gridcolor='rgba(200, 200, 200, 0.5)',  # 网格线颜色
+            title='时间',  # 时间轴标题
+            rangeslider=dict(visible=True),  # 显示滚动条
+            rangeselector=dict(visible=True)  # 显示预设的时间范围选择器
+        ),
+        yaxis=dict(
+            showgrid=True,
+            gridcolor='rgba(200, 200, 200, 0.5)',
+            title='功率'  # y轴标题
+        ),
+        legend=dict(
+            x=0.01,
+            y=0.99,
+            bgcolor='rgba(255, 255, 255, 0.7)',  # 背景透明
+            bordercolor='black',
+            borderwidth=1,
+            font=dict(size=12)  # 字体大小
+        ),
+        hovermode='x unified',  # 鼠标悬停时显示统一的提示框
+        hoverlabel=dict(
+            bgcolor='white',
+            font_size=14,
+            font_family="Rockwell",  # 设置字体样式
+            bordercolor='black'
+        ),
+        margin=dict(l=50, r=50, t=50, b=50)  # 调整边距,避免标题或标签被遮挡
+    )
+    return fig
+
+
+def put_analysis_report_to_html(args, df_predict, df_accuracy):
+    col_time = args['col_time']
+    label = args['label']
+    label_pre = args['label_pre']
+    farmId = args['farmId']
+    acc_flag = df_accuracy.shape[0]
+    df_predict = df_predict.applymap(lambda x: float(x.to_decimal()) if isinstance(x, Decimal128) else float(x) if isinstance(x,  numbers.Number) else x).sort_values(by=col_time)
+    if acc_flag > 0:
+        df_accuracy = df_accuracy.applymap(lambda x: float(x.to_decimal()) if isinstance(x, Decimal128) else float(x) if isinstance(x, numbers.Number) else x).sort_values(by=col_time)
+    # 获取所有的模型
+    models = df_predict['model'].unique()
+    aves = []
+    # 添加超短期16个点平均值
+    for model in models:
+        # 筛选该模型的数据
+        model_data = df_predict[df_predict['model'] == model]
+        # 添加超短期16个点平均值
+        ave = model_data.groupby(col_time).agg({
+            label: 'first',
+            'model': 'first',
+            label_pre: 'mean',
+            'farm_id': 'first'
+        }).reset_index()
+        ave['howLongAgo'] = 17
+        ave = ave.reindex(columns=df_predict.columns.tolist())
+        aves.append(ave)
+
+    df_predict = pd.concat([df_predict]+aves)
+    fig = create_fig(df_predict, col_time, label, label_pre)
+    # 将折线图保存为 HTML 片段
+    power_htmls = pio.to_html(fig, full_html=False)
+    # -------------------- 准确率表展示--------------------
+    acc_html = ''
+    if acc_flag > 0:
+        acc_html = df_accuracy.sort_values(by=col_time).to_html(classes='table table-bordered table-striped',
+                                                                index=False)
+    # -------------------- 准确率汇总展示--------------------
+    summary_html = ''
+    if acc_flag > 0:
+        # 指定需要转换的列
+        cols_to_convert = ['MAE', 'accuracy', 'RMSE', 'deviationElectricity', 'deviationAssessment']
+        for col in cols_to_convert:
+            if col in df_accuracy.columns:
+                df_accuracy[col] = df_accuracy[col].apply(
+                    lambda x: float(x.to_decimal()) if isinstance(x, Decimal128) else float(x) if isinstance(x,
+                                                                                                             numbers.Number) else np.nan)
+
+        # 确定存在的列
+        agg_dict = {}
+        rename_cols = ['model']
+        if 'MAE' in df_accuracy.columns:
+            agg_dict['MAE'] = np.nanmean
+            rename_cols.append('MAE平均值')
+        if 'accuracy' in df_accuracy.columns:
+            agg_dict['accuracy'] = np.nanmean
+            rename_cols.append('准确率平均值')
+        if 'RMSE' in df_accuracy.columns:
+            agg_dict['RMSE'] = np.nanmean
+            rename_cols.append('RMSE平均值')
+        if 'deviationElectricity' in df_accuracy.columns:
+            agg_dict['deviationElectricity'] = [np.nanmean, np.nansum]
+            rename_cols.append('考核电量平均值')
+            rename_cols.append('考核总电量')
+        if 'deviationAssessment' in df_accuracy.columns:
+            agg_dict['deviationAssessment'] = [np.nanmean, np.nansum]
+            rename_cols.append('考核分数平均值')
+            rename_cols.append('考核总分数')
+        # 进行分组聚合,如果有需要聚合的列
+        summary_df = df_accuracy.groupby('model').agg(agg_dict).reset_index()
+        summary_df.columns = rename_cols
+        summary_html = summary_df.to_html(classes='table table-bordered table-striped', index=False)
+    # -------------------- 生成完整 HTML 页面 --------------------
+
+    html_content = f"""
+    <!DOCTYPE html>
+    <html lang="en">
+    <head>
+        <meta charset="UTF-8">
+        <meta name="viewport" content="width=device-width, initial-scale=1.0">
+        <title>Data Analysis Report</title>
+        <!-- 引入 Bootstrap CSS -->
+        <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css" rel="stylesheet">
+        <style>
+         justify-between;{{
+                display: flex;
+                justify-content: space-between;
+           }}
+            body {{
+                background-color: #f4f4f9;
+                font-family: Arial, sans-serif;
+                padding: 20px;
+            }}
+            .container {{
+                background-color: #fff;
+                padding: 20px;
+                border-radius: 10px;
+                box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
+                margin-bottom: 30px;
+            }}
+           h1 {{
+                text-align: center;
+                color: #333;
+                margin-bottom: 20px;
+            }}
+            .plot-container {{
+                margin: 20px 0;
+                max-height: 500px;  /* 限制高度 */
+                overflow-y: auto;   /* 显示垂直滚动条 */
+            }}
+            .table-container {{
+                margin-top: 30px;
+                overflow-x: auto;   /* 水平滚动条 */
+                max-width: 100%;     /* 限制宽度 */
+                white-space: nowrap; /* 防止内容换行 */
+                max-height: 500px;  /* 限制高度 */
+                overflow-y: auto;   /* 显示垂直滚动条 */
+            }}
+             .fixed-table thead tr > th:first-child,
+             .fixed-table tbody tr > td:first-child {{
+             position: sticky;
+             left: 0;
+             z-index: 1;
+
+             }}
+            .fixed-table-header thead tr > th {{
+                position: sticky;
+                top: 0;
+                z-index: 2;
+            }}
+            table {{
+                width: 100%;
+                font-size: 12px;  /* 设置字体大小为12px */
+            }}
+            th, td {{
+                text-align: center;  /* 表头和单元格文字居中 */
+            }}
+        }}
+
+        </style>
+    </head>
+    <body>
+        <div class="container">
+            <h1>分析报告</h1>
+            <!-- 曲线对比 -->
+            <div class="plot-container">
+                <h2>1. 预测功率与实际功率曲线对比</h2>
+                {power_htmls}
+            </div>
+            <!-- Pandas DataFrame 表格 -->
+            <div style="display:flex; justify-content: space-between;">
+                <h2>2. 准确率对比</h2>
+                <span>
+                    <a href="/formula.xlsx">公式</a>
+                </span>
+            </div>
+            <div class="table-container fixed-table-header"> 
+                {acc_html}
+            </div>
+            <!-- Pandas DataFrame 表格 -->
+            <div class="table-container">
+                <h2>3. 准确率汇总对比</h2>
+                {summary_html}
+            </div>
+        </div>
+    </body>
+    </html>
+    """
+    filename = f"{farmId}_{int(time.time() * 1000)}_{random.randint(1000, 9999)}.html"
+    # 保存为 HTML
+    directory = '/usr/share/nginx/html'
+    if not os.path.exists(directory):
+        os.makedirs(directory)
+    file_path = os.path.join(directory, filename)
+    path = f"http://ds3:10010/{filename}"
+    # 将 HTML 内容写入文件
+    with open(file_path, "w", encoding="utf-8") as f:
+        f.write(html_content)
+    print("HTML report generated successfully!")
+    return path
+
+@app.route('/analysis_report_small', methods=['POST'])
+def analysis_report():
+    start_time = time.time()
+    result = {}
+    success = 0
+    path = ""
+    print("Program starts execution!")
+    try:
+        args = request.values.to_dict()
+        print('args', args)
+        logger.info(args)
+        # 获取数据
+        df_predict, df_accuracy = get_df_list_from_mongo(args)[0], get_df_list_from_mongo(args)[1]
+        path = put_analysis_report_to_html(args, df_predict, df_accuracy)
+        success = 1
+    except Exception as e:
+        my_exception = traceback.format_exc()
+        my_exception.replace("\n", "\t")
+        result['msg'] = my_exception
+    end_time = time.time()
+    result['success'] = success
+    result['args'] = args
+    result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
+    result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
+    result['file_path'] = path
+    print("Program execution ends!")
+    return result
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("analysis_report log")
+    from waitress import serve
+
+    serve(app, host="0.0.0.0", port=10099)
+    print("server start!")

+ 108 - 0
models_processing/model_koi/bp.yaml

@@ -0,0 +1,108 @@
+Model:
+  add_train: false
+  batch_size: 64
+  dropout_rate: 0.2
+  epoch: 100
+  fusion: true
+  hidden_size: 64
+  his_points: 16
+  how_long_fill: 10
+  input_size_env: 5
+  input_size_nwp: 24
+  lambda_value_1: 0.02
+  lambda_value_2: 0.01
+  learning_rate: 0.001
+  lstm_layers: 1
+  output_size: 16
+  patience: 10
+  predict_data_fill: true
+  region: south129
+  shuffle_train_data: false
+  test_data_fill: false
+  time_step: 16
+  train_data_fill: false
+  use_cuda: false
+  valid_data_rate: 0.15
+authentication:
+  date: '2025-01-08'
+  full_cap: '2024-04-30'
+  repair: '2025-01-08'
+calculate: []
+cap: 50.0
+dataloc: ./data
+env_columns:
+- C_TIME
+- C_CELLT
+- C_DIFFUSER
+- C_GLOBALR
+- C_RH
+- C_REAL_VALUE
+full_field: true
+history_hours: 1
+new_field: true
+features:
+- time
+- temperature10
+- temperature190
+- direction160
+- direction40
+- temperature110
+- direction80
+- speed60
+- mcc
+- temperature150
+- speed20
+- speed110
+- direction120
+- speed190
+- solarZenith
+- temperature90
+- direction200
+- speed150
+- temperature50
+- direction30
+- temperature160
+- direction170
+- temperature20
+- direction70
+- direction130
+- temperature200
+- speed70
+- temperature120
+- speed30
+- speed100
+- speed80
+- speed180
+- dniCalcd
+- speed140
+- temperature60
+- dateTime
+- temperature30
+- temperature170
+- direction20
+- humidity2
+- direction180
+- realPowerAvg
+- direction60
+- direction140
+- speed40
+- hcc', 'clearskyGhi', 'temperature130', 'lcc', 'updater', 'speed90', 'temperature2', 'tcc', 'direction100', 'speed170', 'temperature70', 'speed130', 'direction190', 'openCapacity', 'temperature40', 'creator', 'direction10', 'temperature180', 'direction150', 'direction50', 'speed50', 'updateTime', 'direction90', 'farmId', 'temperature100', 'speed10', 'temperature140', 'speed120', 'deleted', 'speed200', 'realPower', 'createTime', 'radiation', 'surfacePressure', 'tpr', 'direction110', 'time', 'speed160', 'temperature80']
+target: C_REAL_VALUE
+repair_days: 81
+repair_model_cycle: 5
+spot_trading: []
+update_add_train_days: 60
+update_coe_days: 3
+usable_power:
+  api_able_power: true
+  bias: 2.524
+  clean_power_which: 1
+  coe: 4
+  down_fractile: 30
+  env: C_GLOBALR
+  k: 0.04079
+  outliers_threshold: 1.5
+  up_fractile: 70
+version: solar-3.1.0.south
+weatherloc:
+- 1

+ 70 - 108
models_processing/model_koi/nn_bp.py

@@ -4,13 +4,12 @@
 # file: time_series.py
 # author: David
 # company: shenyang JY
-
+import json, copy
 import numpy as np
-from sklearn.model_selection import train_test_split
 from flask import Flask, request
 import time
 import traceback
-import logging
+import logging, argparse
 from sklearn.preprocessing import MinMaxScaler
 from io import BytesIO
 import joblib
@@ -20,51 +19,43 @@ from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoa
 from tensorflow.keras import optimizers, regularizers
 import tensorflow.keras.backend as K
 import tensorflow as tf
-
 from common.data_cleaning import cleaning
 from common.database_dml import *
 from common.processing_data_common import missing_features, str_to_list
 from data_processing.data_operation.data_handler import DataHandler
 from threading import Lock
-import time
+import time, yaml
 import random
 import matplotlib.pyplot as plt
 model_lock = Lock()
-
-
-app = Flask('model_training_bp——service')
-
-def draw_loss(history):
-    # 绘制训练集和验证集损失
-    plt.figure(figsize=(20, 8))
-    plt.plot(history.history['loss'], label='Training Loss')
-    plt.plot(history.history['val_loss'], label='Validation Loss')
-    plt.title('Loss Curve')
-    plt.xlabel('Epochs')
-    plt.ylabel('Loss')
-    plt.legend()
-    plt.show()
-
-dh = DataHandler()
-def train_data_handler(data, args):
-    sleep_time = random.uniform(1, 20)  # 生成 5 到 20 之间的随机浮动秒数
-    time.sleep(sleep_time)
-    tf.keras.backend.clear_session()  # 清除当前的图和会话
-    # 设置随机种子
-    np.random.seed(42)  # NumPy随机种子
-    tf.random.set_seed(42)  # TensorFlow随机种子
-    col_time, features, target = args['col_time'], str_to_list(args['features']), args['target']
+from common.logs import Log
+logger = logging.getLogger()
+# logger = Log('models-processing').logger
+np.random.seed(42)  # NumPy随机种子
+tf.random.set_random_seed(42)  # TensorFlow随机种子
+app = Flask('nn_bp——service')
+
+with app.app_context():
+    with open('../model_koi/bp.yaml', 'r', encoding='utf-8') as f:
+        arguments = yaml.safe_load(f)
+
+dh = DataHandler(logger, arguments)
+def train_data_handler(data, opt):
+    col_time, features, target = opt.col_time, opt.features, opt.target
     if 'is_limit' in data.columns:
         data = data[data['is_limit'] == False]
     # 清洗特征平均缺失率大于20%的天
-    train_data = data.sort_values(by=col_time)
+    data = missing_features(data, features, col_time)
+    train_data = data.sort_values(by=col_time).fillna(method='ffill').fillna(method='bfill')
+
+    train_data = train_data.sort_values(by=col_time)
     # 对清洗完限电的数据进行特征预处理:1.空值异常值清洗 2.缺值补值
-    train_data_cleaned = cleaning(train_data, '', logger, train_data.columns.tolist())
+    train_data_cleaned = cleaning(train_data, 'nn_bp:features', logger, features)
     train_data = dh.fill_train_data(train_data_cleaned)
     # 创建特征和目标的标准化器
     train_scaler = MinMaxScaler(feature_range=(0, 1))
     # 标准化特征和目标
-    scaled_train_data = train_scaler.fit_transform(train_data)
+    scaled_train_data = train_scaler.fit_transform(train_data[features+[target]])
     # 保存两个scaler
     scaled_train_bytes = BytesIO()
     joblib.dump(scaled_train_data, scaled_train_bytes)
@@ -78,20 +69,12 @@ def pre_data_handler(data, args):
     features, time_steps, col_time, model_name,col_reserve =  str_to_list(args['features']), int(args['time_steps']),args['col_time'],args['model_name'],str_to_list(args['col_reserve'])
     feature_scaler,target_scaler = get_scaler_model_from_mongo(args)
     pre_data = data.sort_values(by=col_time)
-    # 对预测数据进行特征预处理:1.空值异常值清洗 2.缺值补值
-    pre_data_cleaned = cleaning(pre_data, '', logger, pre_data.columns.tolist())
-    pre_data = dh.fill_train_data(pre_data_cleaned)
     scaled_features = feature_scaler.transform(pre_data[features])
     return scaled_features
 
-class NPHandler(object):
-    train = False
-
-    def __init__(self, log, args, graph, sess):
-        self.logger = log
-        self.graph = graph
-        self.sess = sess
-        opt = args.parse_args_and_yaml()
+class BPHandler(object):
+    def __init__(self, logger):
+        self.logger = logger
         self.model = None
 
     def get_model(self, args):
@@ -103,7 +86,7 @@ class NPHandler(object):
                 # NPHandler.model = NPHandler.get_keras_model(opt)
                 self.model = get_h5_model_from_mongo(args)
         except Exception as e:
-            print("加载模型权重失败:{}".format(e.args))
+            self.logger.info("加载模型权重失败:{}".format(e.args))
 
     @staticmethod
     def get_keras_model(opt):
@@ -124,14 +107,14 @@ class NPHandler(object):
         adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7,
                                amsgrad=True)
         reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=5, verbose=1)
-        model.compile(loss=rmse, optimizer=adam)
+        model.compile(loss='rmse', optimizer=adam)
         return model
 
-    def train_init(self, opt, args):
+    def train_init(self, opt):
         try:
             if opt.Model['add_train']:
                 # 进行加强训练,支持修模
-                base_train_model = get_h5_model_from_mongo(args)
+                base_train_model = get_h5_model_from_mongo(vars(opt))
                 base_train_model.summary()
                 self.logger.info("已加载加强训练基础模型")
             else:
@@ -142,22 +125,16 @@ class NPHandler(object):
 
     def training(self, opt, train_and_valid_data):
         model = self.train_init(opt)
-        train_X, train_Y, valid_X, valid_Y = train_and_valid_data
-        print("----------", np.array(train_X[0]).shape)
-        print("++++++++++", np.array(train_X[1]).shape)
-        # weight_lstm_1, bias_lstm_1 = model.get_layer('d1').get_weights()
-        # print("weight_lstm_1 = ", weight_lstm_1)
-        # print("bias_lstm_1 = ", bias_lstm_1)
+        tf.reset_default_graph() # 清除默认图
+        train_x, train_y, valid_x, valid_y = train_and_valid_data
+        print("----------", np.array(train_x[0]).shape)
+        print("++++++++++", np.array(train_x[1]).shape)
 
         check_point = ModelCheckpoint(filepath='./var/' + 'fmi.h5', monitor='val_loss',
                                       save_best_only=True, mode='auto')
         early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
-        # tbCallBack = TensorBoard(log_dir='../figure',
-        #                          histogram_freq=0,
-        #                          write_graph=True,
-        #                          write_images=True)
-        history = model.fit(train_X, train_Y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,
-                            validation_data=(valid_X, valid_Y), callbacks=[check_point, early_stop], shuffle=False)
+        history = model.fit(train_x, train_y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,
+                            validation_data=(valid_x, valid_y), callbacks=[check_point, early_stop], shuffle=False)
         loss = np.round(history.history['loss'], decimals=5)
         val_loss = np.round(history.history['val_loss'], decimals=5)
         self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
@@ -170,56 +147,26 @@ class NPHandler(object):
         self.logger.info("执行预测方法")
         return result
 
-def build_model(data, args):
-    # 划分训练集和测试集
-    X_train, X_test, y_train, y_test = train_test_split(scaled_features, scaled_target, test_size=0.2, random_state=43)
-
-    # 构建 LSTM 模型
-    model = Sequential([
-        Dense(64, input_dim=X_train.shape[1], activation='relu'),  # 输入层和隐藏层,10个神经元
-        Dropout(0.2),
-        Dense(32, activation='relu'),  # 隐藏层,8个神经元
-        Dropout(0.3),  # Dropout层,30%的神经元输出会被随机丢弃
-        Dense(1, activation='linear')  # 输出层,1个神经元(用于回归任务)
-    ])
-
-    # 编译模型
-    model.compile(optimizer='adam', loss='mean_squared_error')
-    # 定义 EarlyStopping 和 ReduceLROnPlateau 回调
-    early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True, verbose=1)
-    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1)
-    # 训练模型
-    # 使用GPU进行训练
-    with tf.device('/GPU:1'):
-        history = model.fit(X_train, y_train,
-                            epochs=100,
-                            batch_size=32,
-                            validation_data=(X_test, y_test),
-                            verbose=2,
-                            shuffle=False,
-                            callbacks=[early_stopping, reduce_lr])
-    draw_loss(history)
-    return model, feature_scaler_bytes, target_scaler_bytes
-
-
 @app.route('/model_training_bp', methods=['POST'])
 def model_training_bp():
     # 获取程序开始时间
     start_time = time.time()
     result = {}
     success = 0
-    nh = NPHandler()
+    bp = BPHandler(logger)
     print("Program starts execution!")
     try:
-        args = request.values.to_dict()
-        print('args', args)
-        logger.info(args)
-        power_df = get_data_from_mongo(args)
-        train_x, valid_x, train_y, valid_y, train_data_handler = dh.get_train_data(power_df)
-        np_model = nh.training(opt, [train_x, valid_x, train_y, valid_y])
-        model, feature_scaler_bytes, target_scaler_bytes = build_model(power_df, args)
-
-        insert_h5_model_into_mongo(np_model, train_data_handler, args)
+        args_dict = request.values.to_dict()
+        args = arguments.deepcopy()
+        opt = argparse.Namespace(**args)
+        logger.info(args_dict)
+        train_data = get_data_from_mongo(args_dict)
+        train_x, valid_x, train_y, valid_y, scaled_train_bytes = train_data_handler(train_data, opt)
+        bp_model = bp.training(opt, [train_x, valid_x, train_y, valid_y])
+        args_dict['params'] = json.dumps(args)
+        args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+        insert_trained_model_into_mongo(bp_model, args_dict)
+        insert_scaler_model_into_mongo(scaled_train_bytes, args_dict)
         success = 1
     except Exception as e:
         my_exception = traceback.format_exc()
@@ -241,15 +188,18 @@ def model_prediction_bp():
     start_time = time.time()
     result = {}
     success = 0
-    nh = NPHandler()
+    bp = BPHandler(logger)
     print("Program starts execution!")
     try:
-        args = request.values.to_dict()
+        params_dict = request.values.to_dict()
+        args = arguments.deepcopy()
+        args.update(params_dict)
+        opt = argparse.Namespace(**args)
         print('args', args)
         logger.info(args)
-        power_df = get_data_from_mongo(args)
-        scaled_features = pre_data_handler(power_df, args)
-        result = nh.predict(power_df, args)
+        predict_data = get_data_from_mongo(args)
+        scaled_features = pre_data_handler(predict_data, args)
+        result = bp.predict(scaled_features, args)
         insert_data_into_mongo(result, args)
         success = 1
     except Exception as e:
@@ -271,5 +221,17 @@ if __name__ == "__main__":
     logger = logging.getLogger("model_training_bp log")
     from waitress import serve
 
-    serve(app, host="0.0.0.0", port=10103, threads=4)
-    print("server start!")
+    # serve(app, host="0.0.0.0", port=10103, threads=4)
+    print("server start!")
+
+    bp = BPHandler(logger)
+    args = copy.deepcopy(bp)
+    opt = argparse.Namespace(**arguments)
+    logger.info(args)
+    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+            'model_table': 'j00083_model', 'mongodb_read_table': 'j00083'}
+    train_data = get_data_from_mongo(args_dict)
+    train_x, valid_x, train_y, valid_y, scaled_train_bytes = train_data_handler(train_data, opt)
+    bp_model = bp.training(opt, [train_x, valid_x, train_y, valid_y])
+    insert_trained_model_into_mongo(bp_model, args_dict)
+    insert_scaler_model_into_mongo(scaled_train_bytes, args)