David il y a 6 jours
Parent
commit
f48de14efe

+ 7 - 0
app/common/config.py

@@ -48,6 +48,13 @@ class myargparse(argparse.ArgumentParser):
             type=str,
             metavar='FILE',
             help='模型选择')
+        self.add_argument(
+            '-o',
+            '--train_mode',
+            default=False,
+            type=bool,
+            metavar='train mode',
+            help='训练')
 
     def _parse_args_and_yaml(self):
         base_parser = argparse.ArgumentParser(add_help=False)

+ 375 - 0
app/logs/2025-05-26/south-forecast.2025-05-26.0.log

@@ -0,0 +1,375 @@
+2025-05-26 09:10:54,470 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:12:03,996 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:12:46,560 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:13:35,837 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:15:14,753 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:15:43,984 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:18:39,622 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:19:09,611 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:19:09,724 - main.py - INFO - 输入文件目录: E:/compete/app/model/data/DQYC/qy/62/1002/2025-04-21/IN - main
+2025-05-26 09:20:01,910 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:20:24,979 - main.py - INFO - 输入文件目录: E:/compete/app/model/data/DQYC/qy/62/1002/2025-04-21/IN - main
+2025-05-26 09:20:38,240 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:20:38,353 - main.py - INFO - 输入文件目录: E:/compete/app/model/data/DQYC/qy/62/1002/2025-04-21/IN - main
+2025-05-26 09:20:40,863 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:20:43,657 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:20:43,660 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:20:43,772 - task_worker.py - ERROR - Station 1086 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 09:20:43,772 - task_worker.py - ERROR - Station 2361 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 09:20:44,370 - task_worker.py - ERROR - Area -99 failed: 'NoneType' object has no attribute 'area_id' - region_task
+2025-05-26 09:26:46,082 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:27:10,262 - main.py - INFO - 输入文件目录: E:/compete/app/model/data/DQYC/qy/62/1002/2025-04-21/IN - main
+2025-05-26 09:27:34,736 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:28:17,447 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:28:17,451 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:28:17,831 - task_worker.py - ERROR - Station 1086 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 09:28:17,832 - task_worker.py - ERROR - Station 2361 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 09:35:28,332 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:35:34,654 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 09:36:04,560 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:36:12,678 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 09:41:05,002 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:41:11,193 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 09:41:44,865 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:41:51,449 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 09:46:40,350 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:47:08,760 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 09:47:48,682 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:47:48,799 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 09:47:48,799 - main.py - INFO - 执行脚本路径: E:\compete\app\model\main.py - main
+2025-05-26 09:47:51,328 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:47:54,140 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:47:54,143 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 09:47:54,265 - task_worker.py - ERROR - Station 1086 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 09:47:54,265 - task_worker.py - ERROR - Station 2361 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 09:47:54,895 - task_worker.py - ERROR - Area -99 failed: 'NoneType' object has no attribute 'area_id' - region_task
+2025-05-26 10:33:44,175 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:38:28,394 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:38:40,146 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 10:40:16,560 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:40:55,718 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 10:41:24,424 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:43:53,243 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:43:53,532 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:43:53,622 - task_worker.py - ERROR - Station 1086 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 10:43:53,622 - task_worker.py - ERROR - Station 2361 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 10:45:38,015 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:45:38,143 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 10:45:40,780 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:45:43,622 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:45:43,622 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:45:43,733 - task_worker.py - ERROR - Station 1086 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 10:45:43,733 - task_worker.py - ERROR - Station 2361 failed: 'NoneType' object has no attribute 'nwp' - station_task
+2025-05-26 10:45:44,346 - task_worker.py - ERROR - Area -99 failed: 'NoneType' object has no attribute 'area_id' - region_task
+2025-05-26 10:48:52,987 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:48:53,102 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 10:48:55,608 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:48:58,429 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:48:58,433 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:48:58,709 - tf_model_train.py - INFO - GPU 1 allocated - _setup_resources
+2025-05-26 10:48:58,733 - data_cleaning.py - INFO - 开始清洗:训练集…… - cleaning
+2025-05-26 10:48:58,734 - data_cleaning.py - INFO - 开始清洗:训练集…… - cleaning
+2025-05-26 10:48:58,741 - data_cleaning.py - INFO - 行清洗:清洗的行数有:68,缺失的列有: - key_field_row_cleaning
+2025-05-26 10:48:58,742 - data_cleaning.py - INFO - 行清洗:清洗的行数有:69,缺失的列有: - key_field_row_cleaning
+2025-05-26 10:48:58,779 - data_handler.py - INFO - 数据总数:2908, 时序缺失的间隔:0, 其中,较长的时间间隔:0 - missing_time_splite
+2025-05-26 10:48:58,779 - data_handler.py - INFO - 需要补值的总点数:0 - missing_time_splite
+2025-05-26 10:48:58,779 - data_handler.py - INFO - 数据总数:2907, 时序缺失的间隔:0, 其中,较长的时间间隔:0 - missing_time_splite
+2025-05-26 10:48:58,779 - data_handler.py - INFO - 需要补值的总点数:0 - missing_time_splite
+2025-05-26 10:48:58,780 - data_handler.py - INFO - 再次测算,需要插值的总点数为:0.0 - fill_train_data
+2025-05-26 10:48:58,780 - data_handler.py - INFO - 再次测算,需要插值的总点数为:0.0 - fill_train_data
+2025-05-26 10:48:59,746 - dbmg.py - INFO - ⚠️ 未找到模型 'lstm' 的有效记录 - get_keras_model_from_mongo
+2025-05-26 10:48:59,746 - dbmg.py - INFO - ⚠️ 未找到模型 'lstm' 的有效记录 - get_keras_model_from_mongo
+2025-05-26 10:48:59,747 - tf_lstm.py - INFO - 加强训练加载模型权重失败:('cannot unpack non-iterable NoneType object',) - train_init
+2025-05-26 10:48:59,747 - tf_lstm.py - INFO - 加强训练加载模型权重失败:('cannot unpack non-iterable NoneType object',) - train_init
+2025-05-26 10:49:12,523 - tf_lstm.py - INFO - -----模型训练经过100轮迭代----- - training
+2025-05-26 10:49:12,524 - tf_lstm.py - INFO - 训练集损失函数为:[8.9447e-01 3.1291e-01 9.6900e-02 2.6540e-02 6.4300e-03 1.4200e-03
+ 3.2000e-04 1.0000e-04 6.0000e-05 6.0000e-05 6.0000e-05 6.0000e-05
+ 6.0000e-05 6.0000e-05 6.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05] - training
+2025-05-26 10:49:12,524 - tf_lstm.py - INFO - 训练集损失函数为:[9.0170e-01 3.1713e-01 9.9050e-02 2.7600e-02 7.0700e-03 1.9400e-03
+ 8.1000e-04 5.9000e-04 5.5000e-04 5.4000e-04 5.4000e-04 5.4000e-04
+ 5.4000e-04 5.4000e-04 5.4000e-04 5.3000e-04 5.3000e-04 5.3000e-04
+ 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04
+ 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04
+ 5.3000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04
+ 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04
+ 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04] - training
+2025-05-26 10:49:12,524 - tf_lstm.py - INFO - 验证集损失函数为:[5.0590e-01 1.6401e-01 4.7160e-02 1.1950e-02 2.7200e-03 6.1000e-04
+ 1.7000e-04 1.0000e-04 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05] - training
+2025-05-26 10:49:12,524 - tf_lstm.py - INFO - 验证集损失函数为:[0.51183 0.16731 0.04894 0.01308 0.00363 0.00145 0.001   0.00092 0.0009
+ 0.00089 0.00089 0.00088 0.00088 0.00088 0.00087 0.00087 0.00087 0.00087
+ 0.00087 0.00086 0.00086 0.00086 0.00086 0.00086 0.00086 0.00085 0.00085
+ 0.00085 0.00085 0.00085 0.00085 0.00085 0.00085 0.00085 0.00085 0.00084
+ 0.00084 0.00084 0.00084 0.00084 0.00084 0.00084 0.00084 0.00084 0.00084
+ 0.00084 0.00084 0.00084 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083
+ 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083
+ 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00082
+ 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082
+ 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082
+ 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082
+ 0.00082] - training
+2025-05-26 10:49:12,642 - dbmg.py - INFO - ✅ 模型 lstm 保存成功 | 文档ID: 6833d6a876c6370eb176b800 - insert_trained_model_into_mongo
+2025-05-26 10:49:12,645 - dbmg.py - INFO - ✅ 模型 lstm 保存成功 | 文档ID: 6833d6a88a9ccd464b193ac6 - insert_trained_model_into_mongo
+2025-05-26 10:49:12,664 - dbmg.py - INFO - ✅ 缩放器 lstm 保存成功 | 文档ID: 6833d6a88a9ccd464b193ac8 - insert_scaler_model_into_mongo
+2025-05-26 10:49:12,673 - dbmg.py - INFO - ✅ 缩放器 lstm 保存成功 | 文档ID: 6833d6a876c6370eb176b802 - insert_scaler_model_into_mongo
+2025-05-26 10:49:13,994 - tf_model_train.py - INFO - GPU 1 allocated - _setup_resources
+2025-05-26 10:49:14,012 - data_cleaning.py - INFO - 开始清洗:训练集…… - cleaning
+2025-05-26 10:49:14,019 - data_cleaning.py - INFO - 行清洗:清洗的行数有:68,缺失的列有: - key_field_row_cleaning
+2025-05-26 10:49:14,046 - data_handler.py - INFO - 数据总数:2908, 时序缺失的间隔:0, 其中,较长的时间间隔:0 - missing_time_splite
+2025-05-26 10:49:14,046 - data_handler.py - INFO - 需要补值的总点数:0 - missing_time_splite
+2025-05-26 10:49:14,046 - data_handler.py - INFO - 再次测算,需要插值的总点数为:0.0 - fill_train_data
+2025-05-26 10:49:14,961 - dbmg.py - INFO - ⚠️ 未找到模型 'lstm' 的有效记录 - get_keras_model_from_mongo
+2025-05-26 10:49:14,962 - tf_lstm.py - INFO - 加强训练加载模型权重失败:('cannot unpack non-iterable NoneType object',) - train_init
+2025-05-26 10:49:26,513 - tf_lstm.py - INFO - -----模型训练经过100轮迭代----- - training
+2025-05-26 10:49:26,514 - tf_lstm.py - INFO - 训练集损失函数为:[9.0123e-01 3.1717e-01 9.8830e-02 2.7190e-02 6.5700e-03 1.4100e-03
+ 2.7000e-04 5.0000e-05 1.0000e-05 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00] - training
+2025-05-26 10:49:26,514 - tf_lstm.py - INFO - 验证集损失函数为:[5.1155e-01 1.6690e-01 4.8240e-02 1.2230e-02 2.7300e-03 5.4000e-04
+ 1.0000e-04 2.0000e-05 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00] - training
+2025-05-26 10:49:26,589 - dbmg.py - INFO - ✅ 模型 lstm 保存成功 | 文档ID: 6833d6b6407675b5bca4c083 - insert_trained_model_into_mongo
+2025-05-26 10:49:26,626 - dbmg.py - INFO - ✅ 缩放器 lstm 保存成功 | 文档ID: 6833d6b6407675b5bca4c085 - insert_scaler_model_into_mongo
+2025-05-26 10:55:40,499 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:55:40,612 - main.py - INFO - 输入文件目录: 62/1002/2025-04-21/IN - main
+2025-05-26 10:55:43,103 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:55:45,883 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:55:45,885 - module_wrapper.py - WARNING - From E:\compete\app\model\losses.py:10: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.
+ - _tfmw_add_deprecation_warning
+2025-05-26 10:55:46,085 - tf_model_train.py - INFO - GPU 2 allocated - _setup_resources
+2025-05-26 10:55:46,086 - tf_model_train.py - INFO - GPU 1 allocated - _setup_resources
+2025-05-26 10:55:46,104 - data_cleaning.py - INFO - 开始清洗:训练集…… - cleaning
+2025-05-26 10:55:46,105 - data_cleaning.py - INFO - 开始清洗:训练集…… - cleaning
+2025-05-26 10:55:46,111 - data_cleaning.py - INFO - 行清洗:清洗的行数有:69,缺失的列有: - key_field_row_cleaning
+2025-05-26 10:55:46,112 - data_cleaning.py - INFO - 行清洗:清洗的行数有:68,缺失的列有: - key_field_row_cleaning
+2025-05-26 10:55:46,139 - data_handler.py - INFO - 数据总数:2907, 时序缺失的间隔:0, 其中,较长的时间间隔:0 - missing_time_splite
+2025-05-26 10:55:46,139 - data_handler.py - INFO - 需要补值的总点数:0 - missing_time_splite
+2025-05-26 10:55:46,139 - data_handler.py - INFO - 再次测算,需要插值的总点数为:0.0 - fill_train_data
+2025-05-26 10:55:46,140 - data_handler.py - INFO - 数据总数:2908, 时序缺失的间隔:0, 其中,较长的时间间隔:0 - missing_time_splite
+2025-05-26 10:55:46,140 - data_handler.py - INFO - 需要补值的总点数:0 - missing_time_splite
+2025-05-26 10:55:46,141 - data_handler.py - INFO - 再次测算,需要插值的总点数为:0.0 - fill_train_data
+2025-05-26 10:55:47,068 - dbmg.py - INFO - ⚠️ 未找到模型 'lstm' 的有效记录 - get_keras_model_from_mongo
+2025-05-26 10:55:47,068 - dbmg.py - INFO - ⚠️ 未找到模型 'lstm' 的有效记录 - get_keras_model_from_mongo
+2025-05-26 10:55:47,068 - tf_lstm.py - INFO - 加强训练加载模型权重失败:('cannot unpack non-iterable NoneType object',) - train_init
+2025-05-26 10:55:47,068 - tf_lstm.py - INFO - 加强训练加载模型权重失败:('cannot unpack non-iterable NoneType object',) - train_init
+2025-05-26 10:55:59,678 - tf_lstm.py - INFO - -----模型训练经过100轮迭代----- - training
+2025-05-26 10:55:59,678 - tf_lstm.py - INFO - -----模型训练经过100轮迭代----- - training
+2025-05-26 10:55:59,678 - tf_lstm.py - INFO - 训练集损失函数为:[9.0739e-01 3.1934e-01 9.9630e-02 2.7470e-02 6.6900e-03 1.4800e-03
+ 3.3000e-04 1.0000e-04 6.0000e-05 6.0000e-05 6.0000e-05 6.0000e-05
+ 6.0000e-05 6.0000e-05 6.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05
+ 5.0000e-05 5.0000e-05 5.0000e-05 5.0000e-05] - training
+2025-05-26 10:55:59,678 - tf_lstm.py - INFO - 训练集损失函数为:[9.0427e-01 3.1717e-01 9.8750e-02 2.7500e-02 7.0700e-03 1.9500e-03
+ 8.2000e-04 5.9000e-04 5.5000e-04 5.4000e-04 5.4000e-04 5.4000e-04
+ 5.4000e-04 5.4000e-04 5.4000e-04 5.3000e-04 5.3000e-04 5.3000e-04
+ 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04
+ 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04 5.3000e-04
+ 5.3000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04 5.2000e-04
+ 5.2000e-04 5.2000e-04 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04
+ 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04
+ 5.1000e-04 5.1000e-04 5.1000e-04 5.1000e-04] - training
+2025-05-26 10:55:59,679 - tf_lstm.py - INFO - 验证集损失函数为:[5.1485e-01 1.6816e-01 4.8690e-02 1.2410e-02 2.8400e-03 6.3000e-04
+ 1.8000e-04 1.0000e-04 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05 8.0000e-05
+ 8.0000e-05 8.0000e-05 8.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05
+ 7.0000e-05 7.0000e-05 7.0000e-05 7.0000e-05] - training
+2025-05-26 10:55:59,679 - tf_lstm.py - INFO - 验证集损失函数为:[0.51265 0.16698 0.04873 0.01305 0.00364 0.00146 0.001   0.00092 0.0009
+ 0.00089 0.00089 0.00088 0.00088 0.00088 0.00087 0.00087 0.00087 0.00087
+ 0.00086 0.00086 0.00086 0.00086 0.00086 0.00086 0.00086 0.00085 0.00085
+ 0.00085 0.00085 0.00085 0.00085 0.00085 0.00085 0.00085 0.00085 0.00084
+ 0.00084 0.00084 0.00084 0.00084 0.00084 0.00084 0.00084 0.00084 0.00084
+ 0.00084 0.00084 0.00084 0.00084 0.00083 0.00083 0.00083 0.00083 0.00083
+ 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083
+ 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083 0.00083
+ 0.00083 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082
+ 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082
+ 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082 0.00082
+ 0.00082] - training
+2025-05-26 10:55:59,716 - dbmg.py - INFO - ✅ 模型 lstm 保存成功 | 文档ID: 6833d83f6162e6249af02c4c - insert_trained_model_into_mongo
+2025-05-26 10:55:59,716 - dbmg.py - INFO - ✅ 模型 lstm 保存成功 | 文档ID: 6833d83f7a77586f9dc561a2 - insert_trained_model_into_mongo
+2025-05-26 10:55:59,723 - dbmg.py - INFO - ✅ 缩放器 lstm 保存成功 | 文档ID: 6833d83f7a77586f9dc561a4 - insert_scaler_model_into_mongo
+2025-05-26 10:55:59,759 - dbmg.py - INFO - ✅ 缩放器 lstm 保存成功 | 文档ID: 6833d83f6162e6249af02c4e - insert_scaler_model_into_mongo
+2025-05-26 10:56:01,061 - tf_model_train.py - INFO - GPU 1 allocated - _setup_resources
+2025-05-26 10:56:01,078 - data_cleaning.py - INFO - 开始清洗:训练集…… - cleaning
+2025-05-26 10:56:01,087 - data_cleaning.py - INFO - 行清洗:清洗的行数有:68,缺失的列有: - key_field_row_cleaning
+2025-05-26 10:56:01,112 - data_handler.py - INFO - 数据总数:2908, 时序缺失的间隔:0, 其中,较长的时间间隔:0 - missing_time_splite
+2025-05-26 10:56:01,112 - data_handler.py - INFO - 需要补值的总点数:0 - missing_time_splite
+2025-05-26 10:56:01,113 - data_handler.py - INFO - 再次测算,需要插值的总点数为:0.0 - fill_train_data
+2025-05-26 10:56:02,019 - dbmg.py - INFO - ⚠️ 未找到模型 'lstm' 的有效记录 - get_keras_model_from_mongo
+2025-05-26 10:56:02,019 - tf_lstm.py - INFO - 加强训练加载模型权重失败:('cannot unpack non-iterable NoneType object',) - train_init
+2025-05-26 10:56:13,741 - tf_lstm.py - INFO - -----模型训练经过100轮迭代----- - training
+2025-05-26 10:56:13,742 - tf_lstm.py - INFO - 训练集损失函数为:[8.9571e-01 3.1434e-01 9.7760e-02 2.6850e-02 6.4800e-03 1.3800e-03
+ 2.6000e-04 5.0000e-05 1.0000e-05 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00] - training
+2025-05-26 10:56:13,742 - tf_lstm.py - INFO - 验证集损失函数为:[5.0752e-01 1.6519e-01 4.7670e-02 1.2060e-02 2.6900e-03 5.3000e-04
+ 9.0000e-05 2.0000e-05 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00
+ 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00] - training
+2025-05-26 10:56:13,779 - dbmg.py - INFO - ✅ 模型 lstm 保存成功 | 文档ID: 6833d84d8a59ed23c934ddf3 - insert_trained_model_into_mongo
+2025-05-26 10:56:13,800 - dbmg.py - INFO - ✅ 缩放器 lstm 保存成功 | 文档ID: 6833d84d8a59ed23c934ddf5 - insert_scaler_model_into_mongo

+ 87 - 14
app/model/main.py

@@ -9,11 +9,8 @@
 模型调参及系统功能配置
 """
 import concurrent.futures
+import os.path
 import types
-
-from pyexpat import features
-
-from tensorflow import add_n
 from tqdm import tqdm
 import pandas as pd
 from pathlib import Path
@@ -21,7 +18,7 @@ from copy import deepcopy
 from concurrent.futures import ProcessPoolExecutor
 from app.common.config import parser, logger
 from app.model.resource_manager import ResourceController
-from app.model.task_worker import Task
+from app.model.task_worker import TaskTrain, TaskPre
 from app.model.material import MaterialLoader
 from multiprocessing import Manager, Lock
 
@@ -57,9 +54,10 @@ def dq_train(opt):
     )
 
     # 生成任务列表
-    all_stations = [str(child.parts[-1]) for child in Path(opt.input_file).iterdir() if child.is_dir()]
-    loader = MaterialLoader(opt.input_file)
-    task = Task(loader)
+    target_dir = os.path.join(opt.dqyc_base_path, opt.input_file)
+    all_stations = [str(child.parts[-1]) for child in Path(str(target_dir)).iterdir() if child.is_dir()]
+    loader = MaterialLoader(target_dir)
+    task = TaskTrain(loader)
     # ---------------------------- 监控任务,进度跟踪 ----------------------------
     # 场站级功率预测训练
     completed = 0
@@ -121,7 +119,82 @@ def dq_train(opt):
     task.region_task(task_config, data_nwps)
 
 def dq_predict(opt):
-    pass
+    # ---------------------------- 配置计算资源和任务 ----------------------------
+    config = opt.__dict__
+    # 初始化资源管理器
+    rc = ResourceController(
+        max_workers=opt.system['max_workers'],
+        gpu_list=opt.system['gpu_devices']
+    )
+
+    # 生成任务列表
+    target_dir = os.path.join(opt.dqyc_base_path, opt.input_file)
+    all_stations = [str(child.parts[-1]) for child in Path(str(target_dir)).iterdir() if child.is_dir()]
+    loader = MaterialLoader(target_dir)
+    task = TaskPre(loader)
+    # ---------------------------- 监控任务,进度跟踪 ----------------------------
+    # 场站级功率预测训练
+    completed = 0
+    with tqdm(total=len(all_stations)) as pbar:
+        with ProcessPoolExecutor(max_workers=rc.cpu_cores) as executor:
+            futures = []
+            for sid in all_stations:
+                # 动态分配GPU
+                task_config = deepcopy(config)
+                gpu_id = rc.get_gpu()
+                task_config['gpu_assignment'] = gpu_id
+                task_config['station_id'] = sid
+                # 提交任务
+                future = executor.submit(task.station_task, task_config)
+                future.add_done_callback(
+                    lambda _: rc.release_gpu(task_config['gpu_assignment']))
+                futures.append(future)
+
+            total_cap = 0
+            weighted_nwp = pd.DataFrame()
+            weighted_nwp_h = pd.DataFrame()
+            weighted_nwp_v = pd.DataFrame()
+            weighted_nwp_v_h = pd.DataFrame()
+
+            # 处理完成情况
+            for future in concurrent.futures.as_completed(futures):
+                try:
+                    result = future.result()
+                    if result['status'] == 'success':
+                        # 分治-汇总策略得到加权后的nwp
+                        completed += 1
+                        local = result['weights']
+                        total_cap += local['cap']
+                        weighted_nwp = add_nwp(weighted_nwp, local['nwp'])
+                        weighted_nwp_h = add_nwp(weighted_nwp_h, local['nwp_h'])
+                        weighted_nwp_v = add_nwp(weighted_nwp_v, local['nwp_v'])
+                        weighted_nwp_v_h = add_nwp(weighted_nwp_v_h, local['nwp_v_h'])
+                    pbar.update(1)
+                    pbar.set_postfix_str(f"Completed: {completed}/{len(all_stations)}")
+                except Exception as e:
+                    print(f"Task failed: {e}")
+    # 归一化处理
+    use_cols = [col for col in weighted_nwp.columns if
+                col not in ['PlantID', 'PlantName', 'PlantType', 'Qbsj', 'Datetime']]
+    use_cols_v = [col for col in weighted_nwp_v.columns if
+                  col not in ['PlantID', 'PlantName', 'PlantType', 'Qbsj', 'Datetime']]
+    weighted_nwp[use_cols] /= total_cap
+    weighted_nwp_h[use_cols] /= total_cap
+    weighted_nwp[use_cols] = weighted_nwp[use_cols].round(2)
+    weighted_nwp_h[use_cols] = weighted_nwp_h[use_cols].round(2)
+    weighted_nwp_v[use_cols_v] /= total_cap
+    weighted_nwp_v_h[use_cols_v] /= total_cap
+    weighted_nwp_v[use_cols_v] = weighted_nwp_v[use_cols_v].round(2)
+    weighted_nwp_v_h[use_cols_v] = weighted_nwp_v_h[use_cols_v].round(2)
+    data_nwps = types.SimpleNamespace(
+        **{'nwp': weighted_nwp, 'nwp_h': weighted_nwp_h, 'nwp_v': weighted_nwp_v, 'nwp_v_h': weighted_nwp_v_h,
+           'total_cap': total_cap})
+    print(f"Final result: {completed} stations trained successfully")
+    # 区域级功率预测训练
+    task_config = deepcopy(config)
+    gpu_id = rc.get_gpu()
+    task_config['gpu_assignment'] = gpu_id
+    task.region_task(task_config, data_nwps)
 
 def cdq_train(opt):
     pass
@@ -133,16 +206,16 @@ def main():
     # ---------------------------- 解析参数 ----------------------------
     # 解析参数,将固定参数和任务参数合并
     opt = parser.parse_args_and_yaml()
-    config = opt.__dict__
     # 打印参数
     logger.info(f"输入文件目录: {opt.input_file}")
-    if 'dqyc' in opt.input_file.lower():
-        if 'model' in opt.input_file.lower():
-            dq_predict(opt)
+    is_dq = opt.input_file.split('/')
+    if len(is_dq) == 4:  # 根据input_file第一个位置参数判断训练还是预测
+        if opt.train_mode:
+            dq_train(opt)
         else:
             dq_predict(opt)
     else:
-        if 'model' in opt.input_file.lower():
+        if opt.train_mode:
             cdq_train(opt)
         else:
             cdq_predict(opt)

+ 6 - 7
app/model/material.py

@@ -14,18 +14,17 @@ from functools import partial
 
 
 class MaterialLoader:
-    def __init__(self, input_file, lazy_load=True):
+    def __init__(self, target_dir, lazy_load=True):
         self.lazy_load = lazy_load
         self._data_cache = {}
         self.opt = parser.parse_args_and_yaml()
-        self.base_path = Path(self.opt.dqyc_base_path)
-        self.base_path_cdq = Path(self.opt.cdqyc_base_path)
+        self.target_dir = target_dir
 
     def wrapper_path(self, station_id, spec):
-        return f"{self.base_path/station_id/spec}.txt"
+        return f"{Path(self.target_dir)/station_id/spec}.txt"
 
     def wrapper_path_cdq(self, area_id, spec):
-        return f"{self.base_path_cdq/area_id/spec}.txt"
+        return f"{self.target_dir/area_id/spec}.txt"
 
     def _load_material(self, station_id):
         """核心数据加载方法"""
@@ -174,8 +173,8 @@ class MaterialLoader:
 
     def get_material_region(self):
         try:
-            basic = pd.read_csv(os.path.join(self.base_path, self.opt.doc_mapping['basic_area']+'.txt'), sep=r'\s+', header=0)
-            power = pd.read_csv(os.path.join(self.base_path, self.opt.doc_mapping['power_area']+'.txt'), sep=r'\s+', header=0)
+            basic = pd.read_csv(os.path.join(str(self.target_dir), self.opt.doc_mapping['basic_area']+'.txt'), sep=r'\s+', header=0)
+            power = pd.read_csv(os.path.join(str(self.target_dir), self.opt.doc_mapping['power_area']+'.txt'), sep=r'\s+', header=0)
             plant_type = int(basic.loc[basic['PropertyID'].tolist().index('PlantType'), 'Value'])
             area_id = int(basic.loc[basic['PropertyID'].tolist().index('AreaId'), 'Value'])
             assert plant_type == 0 or plant_type == 1

+ 59 - 1
app/model/task_worker.py

@@ -8,11 +8,12 @@
 import logging
 import pandas as pd
 from app.model.tf_model_train import ModelTrainer
+from app.predict.tf_model_pre import ModelPre
 from app.model.tf_region_train import RegionTrainer
 from app.model.material import MaterialLoader
 
 
-class Task(object):
+class TaskTrain(object):
     def __init__(self, loader):
         self.loader = loader
 
@@ -67,4 +68,61 @@ class Task(object):
             return {'status': 'success', 'area_id': area_id}
         except Exception as e:
             logging.error(f"Area {area_id} failed: {str(e)}")
+            return {'status': 'failed', 'area_id': area_id}
+
+class TaskPre(object):
+    def __init__(self, loader):
+        self.loader = loader
+
+    def station_task(self, config):
+        """场站级训练任务"""
+        station_id = -99
+        try:
+            print("111")
+            station_id = config['station_id']
+            # 动态生成场站数据路径
+            print("222")
+            # 加载数据
+            data_objects = self.loader.get_material(station_id)
+            local_weights = self.loader.add_weights(data_objects)
+            print("333")
+            # 数据合并
+            pre_data = data_objects.nwp_v
+            print("444")
+            # 模型训练
+            # model = ModelTrainer(station_id, train_data, capacity=data_objects.cap, gpu_id=config.get('gpu_assignment'))
+            model = ModelPre(pre_data, capacity=data_objects.cap, config=config)
+            model.train()
+            print("555")
+            return {'status': 'success', 'station_id': station_id, 'weights': local_weights}
+        except Exception as e:
+            logging.error(f"Station {station_id} failed: {str(e)}")
+            return {'status': 'failed', 'station_id': station_id}
+
+
+    def region_task(self, config, data_nwps):
+        """区域级训练任务"""
+        area_id = -99
+        try:
+            print("111")
+            # 动态生成场站数据路径
+            print("222")
+            # 加载数据
+            data_objects = self.loader.get_material_region()
+            config['area_id'] = data_objects.area_id
+            area_id = data_objects.area_id
+            print("333")
+            # 数据合并
+            print(data_nwps.nwp)
+            print(data_nwps.nwp_v)
+            print("累加的区域装机量{},实际区域装机量{}".format(data_nwps.total_cap, data_objects.area_cap))
+            train_data = pd.merge(data_nwps.nwp_v_h, data_objects.power, on=config['col_time'])
+            print("444")
+            # 模型训练
+            model = ModelTrainer(train_data, capacity=data_objects.area_cap, config=config)
+            model.train(pre_area=True)
+            print("555")
+            return {'status': 'success', 'area_id': area_id}
+        except Exception as e:
+            logging.error(f"Area {area_id} failed: {str(e)}")
             return {'status': 'failed', 'area_id': area_id}

+ 0 - 109
app/model/tf_cnn_train.py

@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-# -*- coding:utf-8 -*-
-# @FileName  :tf_lstm_train.py
-# @Time      :2025/2/13 10:52
-# @Author    :David
-# @Company: shenyang JY
-import json, os
-import numpy as np
-import traceback
-import logging
-from app.common.logs import params
-from app.common.data_handler import DataHandler, write_number_to_file
-import time
-from app.common.tf_cnn import CNNHandler
-from app.common.dbmg import MongoUtils
-from app.common.logs import logger
-from copy import deepcopy
-np.random.seed(42)  # NumPy随机种子
-# tf.set_random_seed(42)  # TensorFlow随机种子
-
-
-mgUtils = MongoUtils(logger)
-
-def model_training(train_data, input_file, cap):
-    # 获取程序开始时间
-    start_time = time.time()
-    success = 0
-    logger.info("Program starts execution!")
-    farm_id = input_file.split('/')[-2]
-    output_file = input_file.replace('IN', 'OUT')
-    status_file = 'STATUS.TXT'
-    local_params = deepcopy(params)
-    dh = DataHandler(logger, local_params)
-    cnn = CNNHandler(logger, local_params)
-    try:
-        # ------------ 获取数据,预处理训练数据 ------------
-        dh.opt.cap = cap
-        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
-        cnn.opt.cap = round(scaled_cap, 2)
-        cnn.opt.Model['input_size'] = train_x.shape[2]
-        # ------------ 训练模型,保存模型 ------------
-        # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
-        # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
-        model = cnn.train_init() if cnn.opt.Model['add_train'] else cnn.get_keras_model(cnn.opt)
-        if cnn.opt.Model['add_train']:
-            if model:
-                feas = json.loads(cnn.model_params).get('features', dh.opt.features)
-                if set(feas).issubset(set(dh.opt.features)):
-                    dh.opt.features = list(feas)
-                    train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
-                else:
-                    model = cnn.get_keras_model(cnn.opt)
-                    logger.info("训练数据特征,不满足,加强训练模型特征")
-            else:
-                model = cnn.get_keras_model(cnn.opt)
-
-        ts_model = cnn.training(model, [train_x, valid_x, train_y, valid_y])
-        success = 1
-        # 更新算法状态:1. 启动成功
-        write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
-        # ------------ 组装模型数据 ------------
-        local_params['Model']['features'] = ','.join(dh.opt.features)
-        local_params.update({
-            'params': json.dumps(local_params),
-            'descr': f'南网竞赛-{farm_id}',
-            'gen_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
-            'model_table': local_params['model_table'] + farm_id,
-            'scaler_table': local_params['scaler_table'] + farm_id
-        })
-        mgUtils.insert_trained_model_into_mongo(ts_model, local_params)
-        mgUtils.insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, local_params)
-        # 更新算法状态:正常结束
-        write_number_to_file(os.path.join(output_file, status_file), 2, 2)
-    except Exception as e:
-        # 如果算法状态没启动,不更新
-        if success:
-            write_number_to_file(os.path.join(output_file, status_file), 2, 3)
-        my_exception = traceback.format_exc()
-        my_exception.replace("\n", "\t")
-    end_time = time.time()
-    logger.info("cnn训练任务:用了 %s 秒 " % (end_time-start_time))
-
-
-if __name__ == "__main__":
-    print("Program starts execution!")
-    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-    logger = logging.getLogger("model_training_bp log")
-    from waitress import serve
-
-    serve(app, host="0.0.0.0", port=10103, threads=4)
-    print("server start!")
-    # args_dict = {"mongodb_database": 'realtimeDq', 'scaler_table': 'j00600_scaler', 'model_name': 'lstm1',
-    # 'model_table': 'j00600_model', 'mongodb_read_table': 'j00600', 'col_time': 'dateTime',
-    # 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
-    # args_dict['features'] = args_dict['features'].split(',')
-    # args.update(args_dict)
-    # dh = DataHandler(logger, args)
-    # ts = TSHandler(logger, args)
-    # opt = argparse.Namespace(**args)
-    # opt.Model['input_size'] = len(opt.features)
-    # train_data = get_data_from_mongo(args_dict)
-    # train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data)
-    # ts_model = ts.training([train_x, train_y, valid_x, valid_y])
-    #
-    # args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
-    # args_dict['params'] = args
-    # args_dict['descr'] = '测试'
-    # insert_trained_model_into_mongo(ts_model, args_dict)
-    # insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)

+ 0 - 109
app/model/tf_fmi_train.py

@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-# -*- coding:utf-8 -*-
-# @FileName  :tf_lstm_train.py
-# @Time      :2025/2/13 10:52
-# @Author    :David
-# @Company: shenyang JY
-import json, os
-import numpy as np
-import traceback
-import logging
-from app.common.logs import params
-from app.common.data_handler import DataHandler, write_number_to_file
-import time
-from app.common.tf_fmi import FMIHandler
-from app.common.dbmg import MongoUtils
-from app.common.logs import logger
-from copy import deepcopy
-np.random.seed(42)  # NumPy随机种子
-# tf.set_random_seed(42)  # TensorFlow随机种子
-
-mgUtils = MongoUtils(logger)
-
-def model_training(train_data, input_file, cap):
-    # 获取程序开始时间
-    start_time = time.time()
-    success = 0
-    logger.info("Program starts execution!")
-    farm_id = input_file.split('/')[-2]
-    output_file = input_file.replace('IN', 'OUT')
-    status_file = 'STATUS.TXT'
-    # 创建线程独立的实例
-    local_params = deepcopy(params)
-    dh = DataHandler(logger, local_params)
-    fmi = FMIHandler(logger, local_params)
-    try:
-        # ------------ 获取数据,预处理训练数据 ------------
-        dh.opt.cap = cap
-        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
-        fmi.opt.cap = round(scaled_cap, 2)
-        fmi.opt.Model['input_size'] = train_x.shape[2]
-        # ------------ 训练模型,保存模型 ------------
-        # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
-        # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
-        model = fmi.train_init() if fmi.opt.Model['add_train'] else fmi.get_keras_model(fmi.opt)
-        if fmi.opt.Model['add_train']:
-            if model:
-                feas = json.loads(fmi.model_params).get('features', dh.opt.features)
-                if set(feas).issubset(set(dh.opt.features)):
-                    dh.opt.features = list(feas)
-                    train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
-                else:
-                    model = fmi.get_keras_model(fmi.opt)
-                    logger.info("训练数据特征,不满足,加强训练模型特征")
-            else:
-                model = fmi.get_keras_model(fmi.opt)
-
-        ts_model = fmi.training(model, [train_x, valid_x, train_y, valid_y])
-        success = 1
-        # 更新算法状态:1. 启动成功
-        write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
-        # ------------ 组装模型数据 ------------
-        local_params['Model']['features'] = ','.join(dh.opt.features)
-        local_params.update({
-            'params': json.dumps(local_params),
-            'descr': f'南网竞赛-{farm_id}',
-            'gen_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
-            'model_table': local_params['model_table'] + farm_id,
-            'scaler_table': local_params['scaler_table'] + farm_id
-        })
-        mgUtils.insert_trained_model_into_mongo(ts_model, local_params)
-        mgUtils.insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, local_params)
-        # 更新算法状态:正常结束
-        write_number_to_file(os.path.join(output_file, status_file), 2, 2)
-    except Exception as e:
-        # 如果算法状态没启动,不更新
-        if success:
-            write_number_to_file(os.path.join(output_file, status_file), 2, 3)
-        my_exception = traceback.format_exc()
-        my_exception.replace("\n", "\t")
-    end_time = time.time()
-    logger.info("fmi训练任务:用了 %s 秒 " % (end_time-start_time))
-
-
-if __name__ == "__main__":
-    print("Program starts execution!")
-    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-    logger = logging.getLogger("model_training_bp log")
-    from waitress import serve
-
-    serve(app, host="0.0.0.0", port=10103, threads=4)
-    print("server start!")
-    # args_dict = {"mongodb_database": 'realtimeDq', 'scaler_table': 'j00600_scaler', 'model_name': 'lstm1',
-    # 'model_table': 'j00600_model', 'mongodb_read_table': 'j00600', 'col_time': 'dateTime',
-    # 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
-    # args_dict['features'] = args_dict['features'].split(',')
-    # args.update(args_dict)
-    # dh = DataHandler(logger, args)
-    # ts = TSHandler(logger, args)
-    # opt = argparse.Namespace(**args)
-    # opt.Model['input_size'] = len(opt.features)
-    # train_data = get_data_from_mongo(args_dict)
-    # train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data)
-    # ts_model = ts.training([train_x, train_y, valid_x, valid_y])
-    #
-    # args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
-    # args_dict['params'] = args
-    # args_dict['descr'] = '测试'
-    # insert_trained_model_into_mongo(ts_model, args_dict)
-    # insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)

+ 0 - 109
app/model/tf_lstm_train.py

@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-# -*- coding:utf-8 -*-
-# @FileName  :tf_lstm_train.py
-# @Time      :2025/2/13 10:52
-# @Author    :David
-# @Company: shenyang JY
-import json, os
-import numpy as np
-import traceback
-import logging
-from app.common.logs import params
-from app.common.data_handler import DataHandler, write_number_to_file
-import time
-from app.common.tf_lstm import TSHandler
-from app.common.dbmg import MongoUtils
-from app.common.logs import logger
-from copy import deepcopy
-np.random.seed(42)  # NumPy随机种子
-# tf.set_random_seed(42)  # TensorFlow随机种子
-
-mgUtils = MongoUtils(logger)
-
-def model_training(train_data, input_file, cap):
-    # 获取程序开始时间
-    start_time = time.time()
-    success = 0
-    logger.info("Program starts execution!")
-    farm_id = input_file.split('/')[-2]
-    output_file = input_file.replace('IN', 'OUT')
-    status_file = 'STATUS.TXT'
-    # 创建线程独立的实例
-    local_params = deepcopy(params)
-    dh = DataHandler(logger, local_params)
-    ts = TSHandler(logger, local_params)
-    try:
-        # ------------ 获取数据,预处理训练数据 ------------
-        dh.opt.cap = cap
-        train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
-        ts.opt.cap = round(scaled_cap, 2)
-        ts.opt.Model['input_size'] = train_x.shape[2]
-        # ------------ 训练模型,保存模型 ------------
-        # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
-        # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
-        model = ts.train_init() if ts.opt.Model['add_train'] else ts.get_keras_model(ts.opt)
-        if ts.opt.Model['add_train']:
-            if model:
-                feas = json.loads(ts.model_params).get('features', dh.opt.features)
-                if set(feas).issubset(set(dh.opt.features)):
-                    dh.opt.features = list(feas)
-                    train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
-                else:
-                    model = ts.get_keras_model(ts.opt)
-                    logger.info("训练数据特征,不满足,加强训练模型特征")
-            else:
-                model = ts.get_keras_model(ts.opt)
-
-        ts_model = ts.training(model, [train_x, valid_x, train_y, valid_y])
-        success = 1
-        # 更新算法状态:1. 启动成功
-        write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
-        # ------------ 组装模型数据 ------------
-        local_params['Model']['features'] = ','.join(dh.opt.features)
-        local_params.update({
-            'params': json.dumps(local_params),
-            'descr': f'南网竞赛-{farm_id}',
-            'gen_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
-            'model_table': local_params['model_table'] + farm_id,
-            'scaler_table': local_params['scaler_table'] + farm_id
-        })
-        mgUtils.insert_trained_model_into_mongo(ts_model, local_params)
-        mgUtils.insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, local_params)
-        # 更新算法状态:正常结束
-        write_number_to_file(os.path.join(output_file, status_file), 2, 2)
-    except Exception as e:
-        # 如果算法状态没启动,不更新
-        if success:
-            write_number_to_file(os.path.join(output_file, status_file), 2, 3)
-        my_exception = traceback.format_exc()
-        my_exception.replace("\n", "\t")
-    end_time = time.time()
-    logger.info("lstm训练任务:用了 %s 秒 " % (end_time-start_time))
-
-
-if __name__ == "__main__":
-    print("Program starts execution!")
-    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-    logger = logging.getLogger("model_training_bp log")
-    from waitress import serve
-
-    serve(app, host="0.0.0.0", port=10103, threads=4)
-    print("server start!")
-    # args_dict = {"mongodb_database": 'realtimeDq', 'scaler_table': 'j00600_scaler', 'model_name': 'lstm1',
-    # 'model_table': 'j00600_model', 'mongodb_read_table': 'j00600', 'col_time': 'dateTime',
-    # 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
-    # args_dict['features'] = args_dict['features'].split(',')
-    # args.update(args_dict)
-    # dh = DataHandler(logger, args)
-    # ts = TSHandler(logger, args)
-    # opt = argparse.Namespace(**args)
-    # opt.Model['input_size'] = len(opt.features)
-    # train_data = get_data_from_mongo(args_dict)
-    # train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data)
-    # ts_model = ts.training([train_x, train_y, valid_x, valid_y])
-    #
-    # args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
-    # args_dict['params'] = args
-    # args_dict['descr'] = '测试'
-    # insert_trained_model_into_mongo(ts_model, args_dict)
-    # insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)

+ 4 - 15
app/model/tf_model_train.py

@@ -54,7 +54,8 @@ class ModelTrainer:
         # 预测编号:场站级,场站id,区域级,区域id
         pre_id = self.config['area_id'] if pre_area else self.config['station_id']
         pre_type = 'a' if pre_area else 's'
-        output_file = self.input_file.replace('IN', 'OUT')
+        output_file = os.path.join(self.opt.dqyc_base_path, self.input_file)
+        output_file = output_file.replace('IN', 'OUT')
         status_file = 'STATUS.TXT'
         try:
             # ------------ 获取数据,预处理训练数据 ------------
@@ -84,7 +85,7 @@ class ModelTrainer:
             success = 1
             print('ddd')
             # 更新算法状态:1. 启动成功
-            write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
+            write_number_to_file(os.path.join(str(output_file), status_file), 1, 1, 'rewrite')
             # ------------ 组装模型数据 ------------
             self.opt.Model['features'] = ','.join(self.dh.opt.features)
             self.config.update({
@@ -98,24 +99,12 @@ class ModelTrainer:
             self.mgUtils.insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, self.config)
             # 更新算法状态:正常结束
             print("eee")
-            write_number_to_file(os.path.join(output_file, status_file), 2, 2)
+            write_number_to_file(os.path.join(str(output_file), status_file), 2, 2)
             return True
         except Exception as e:
             self._handle_error(e)
             return False
 
-    def _initialize_model(self):
-        """模型初始化策略"""
-        if self.ts.opt.Model['add_train']:
-            pretrained = self.ts.train_init()
-            return pretrained if self._check_feature_compatibility(pretrained) else self.ts.get_keras_model()
-        return self.ts.get_keras_model()
-
-    def _check_feature_compatibility(self, model) -> bool:
-        """检查特征兼容性"""
-        # 原始逻辑中的特征校验实现
-        pass
-
 
     def _handle_error(self, error: Exception):
         """统一错误处理"""

+ 0 - 111
app/predict/tf_cnn_pre.py

@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-# -*- coding:utf-8 -*-
-# @FileName  :tf_lstm_pre.py
-# @Time      :2025/2/13 10:52
-# @Author    :David
-# @Company: shenyang JY
-import os.path
-
-import numpy as np
-import logging, argparse, traceback
-from app.common.data_handler import DataHandler, write_number_to_file
-from threading import Lock
-import time, json
-
-model_lock = Lock()
-from itertools import chain
-from app.common.logs import logger, params
-from app.common.tf_cnn import CNNHandler
-from app.common.dbmg import MongoUtils
-from copy import deepcopy
-np.random.seed(42)  # NumPy随机种子
-
-mgUtils = MongoUtils(logger)
-
-
-def model_prediction(pre_data, input_file, cap):
-    # 获取程序开始时间
-    start_time = time.time()
-    success = 0
-    print("Program starts execution!")
-    farm_id = input_file.split('/')[-2]
-    output_file = input_file.replace('IN', 'OUT')
-    file = 'DQYC_OUT_PREDICT_POWER.txt'
-    status_file = 'STATUS.TXT'
-    local_params = deepcopy(params)
-    dh = DataHandler(logger, local_params)
-    cnn = CNNHandler(logger, local_params)
-    try:
-        local_params['model_table'] += farm_id
-        local_params['scaler_table'] += farm_id
-        feature_scaler, target_scaler = mgUtils.get_scaler_model_from_mongo(local_params)
-        cnn.opt.cap = round(target_scaler.transform(np.array([[cap]]))[0, 0], 2)
-        cnn.get_model(local_params)
-        dh.opt.features = json.loads(cnn.model_params).get('Model').get('features', ','.join(cnn.opt.features)).split(',')
-        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler)
-
-        success = 1
-        # 更新算法状态:1. 启动成功
-        write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
-        logger.info("算法启动成功")
-        res = list(chain.from_iterable(target_scaler.inverse_transform([cnn.predict(scaled_pre_x).flatten()])))
-        pre_data['Power'] = res[:len(pre_data)]
-        pre_data['PlantID'] = farm_id
-        pre_data = pre_data[['PlantID', local_params['col_time'], 'Power']]
-
-        pre_data.loc[:, 'Power'] = pre_data['Power'].round(2)
-        pre_data.loc[pre_data['Power'] > cap, 'Power'] = cap
-        pre_data.loc[pre_data['Power'] < 0, 'Power'] = 0
-        pre_data.to_csv(os.path.join(output_file, file), sep=' ', index=False)
-        # 更新算法状态:正常结束
-        write_number_to_file(os.path.join(output_file, status_file), 2, 2)
-        logger.info("算法正常结束")
-    except Exception as e:
-        # 如果算法状态没启动,不更新
-        if success:
-            write_number_to_file(os.path.join(output_file, status_file), 2, 3)
-        my_exception = traceback.format_exc()
-        my_exception.replace("\n", "\t")
-        logger.info("算法状态异常:{}".format(my_exception))
-    end_time = time.time()
-    logger.info("cnn预测任务:用了 %s 秒 " % (end_time - start_time))
-
-
-if __name__ == "__main__":
-    print("Program starts execution!")
-    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-    logger = logging.getLogger("model_training_bp log")
-
-    # serve(app, host="0.0.0.0", port=1010x, threads=4)
-    print("server start!")
-
-    # ------------------------测试代码------------------------
-    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
-                 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
-                 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
-    args_dict['features'] = args_dict['features'].split(',')
-    arguments.update(args_dict)
-    dh = DataHandler(logger, arguments)
-    ts = TSHandler(logger)
-    opt = argparse.Namespace(**arguments)
-
-    opt.Model['input_size'] = len(opt.features)
-    pre_data = get_data_from_mongo(args_dict)
-    feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
-    pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt)
-    ts.get_model(arguments)
-    result = ts.predict(pre_x)
-    result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
-    pre_data['power_forecast'] = result1[:len(pre_data)]
-    pre_data['farm_id'] = 'J00083'
-    pre_data['cdq'] = 1
-    pre_data['dq'] = 1
-    pre_data['zq'] = 1
-    pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
-    pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
-
-    pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
-    pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
-    pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
-
-    insert_data_into_mongo(pre_data, arguments)

+ 0 - 111
app/predict/tf_fmi_pre.py

@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-# -*- coding:utf-8 -*-
-# @FileName  :tf_lstm_pre.py
-# @Time      :2025/2/13 10:52
-# @Author    :David
-# @Company: shenyang JY
-import os.path
-
-import numpy as np
-import logging, argparse, traceback
-from app.common.data_handler import DataHandler, write_number_to_file
-from threading import Lock
-import time, json
-
-model_lock = Lock()
-from itertools import chain
-from app.common.logs import logger, params
-from app.common.tf_fmi import FMIHandler
-from app.common.dbmg import MongoUtils
-from copy import deepcopy
-np.random.seed(42)  # NumPy随机种子
-
-mgUtils = MongoUtils(logger)
-
-
-def model_prediction(pre_data, input_file, cap):
-    # 获取程序开始时间
-    start_time = time.time()
-    success = 0
-    print("Program starts execution!")
-    farm_id = input_file.split('/')[-2]
-    output_file = input_file.replace('IN', 'OUT')
-    file = 'DQYC_OUT_PREDICT_POWER.txt'
-    status_file = 'STATUS.TXT'
-    local_params = deepcopy(params)
-    dh = DataHandler(logger, local_params)
-    fmi = FMIHandler(logger, local_params)
-    try:
-        local_params['model_table'] += farm_id
-        local_params['scaler_table'] += farm_id
-        feature_scaler, target_scaler = mgUtils.get_scaler_model_from_mongo(local_params)
-        fmi.opt.cap = round(target_scaler.transform(np.array([[cap]]))[0, 0], 2)
-        fmi.get_model(local_params)
-        dh.opt.features = json.loads(fmi.model_params).get('Model').get('features', ','.join(fmi.opt.features)).split(',')
-        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler)
-
-        success = 1
-        # 更新算法状态:1. 启动成功
-        write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
-        logger.info("算法启动成功")
-        res = list(chain.from_iterable(target_scaler.inverse_transform([fmi.predict(scaled_pre_x).flatten()])))
-        pre_data['Power'] = res[:len(pre_data)]
-        pre_data['PlantID'] = farm_id
-        pre_data = pre_data[['PlantID', local_params['col_time'], 'Power']]
-
-        pre_data.loc[:, 'Power'] = pre_data['Power'].round(2)
-        pre_data.loc[pre_data['Power'] > cap, 'Power'] = cap
-        pre_data.loc[pre_data['Power'] < 0, 'Power'] = 0
-        pre_data.to_csv(os.path.join(output_file, file), sep=' ', index=False)
-        # 更新算法状态:正常结束
-        write_number_to_file(os.path.join(output_file, status_file), 2, 2)
-        logger.info("算法正常结束")
-    except Exception as e:
-        # 如果算法状态没启动,不更新
-        if success:
-            write_number_to_file(os.path.join(output_file, status_file), 2, 3)
-        my_exception = traceback.format_exc()
-        my_exception.replace("\n", "\t")
-        logger.info("算法状态异常:{}".format(my_exception))
-    end_time = time.time()
-    logger.info("fmi预测任务:用了 %s 秒 " % (end_time - start_time))
-
-
-if __name__ == "__main__":
-    print("Program starts execution!")
-    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-    logger = logging.getLogger("model_training_bp log")
-
-    # serve(app, host="0.0.0.0", port=1010x, threads=4)
-    print("server start!")
-
-    # ------------------------测试代码------------------------
-    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
-                 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
-                 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
-    args_dict['features'] = args_dict['features'].split(',')
-    arguments.update(args_dict)
-    dh = DataHandler(logger, arguments)
-    ts = TSHandler(logger)
-    opt = argparse.Namespace(**arguments)
-
-    opt.Model['input_size'] = len(opt.features)
-    pre_data = get_data_from_mongo(args_dict)
-    feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
-    pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt)
-    ts.get_model(arguments)
-    result = ts.predict(pre_x)
-    result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
-    pre_data['power_forecast'] = result1[:len(pre_data)]
-    pre_data['farm_id'] = 'J00083'
-    pre_data['cdq'] = 1
-    pre_data['dq'] = 1
-    pre_data['zq'] = 1
-    pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
-    pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
-
-    pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
-    pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
-    pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
-
-    insert_data_into_mongo(pre_data, arguments)

+ 0 - 112
app/predict/tf_lstm_pre.py

@@ -1,112 +0,0 @@
-#!/usr/bin/env python
-# -*- coding:utf-8 -*-
-# @FileName  :tf_lstm_pre.py
-# @Time      :2025/2/13 10:52
-# @Author    :David
-# @Company: shenyang JY
-import os.path
-
-import numpy as np
-import logging, argparse, traceback
-from app.common.data_handler import DataHandler, write_number_to_file
-from threading import Lock
-import time, json
-
-model_lock = Lock()
-from itertools import chain
-from app.common.logs import logger, params
-from app.common.tf_lstm import TSHandler
-from app.common.dbmg import MongoUtils
-from copy import deepcopy
-
-np.random.seed(42)  # NumPy随机种子
-
-mgUtils = MongoUtils(logger)
-
-
-def model_prediction(pre_data, input_file, cap):
-    # 获取程序开始时间
-    start_time = time.time()
-    success = 0
-    print("Program starts execution!")
-    farm_id = input_file.split('/')[-2]
-    output_file = input_file.replace('IN', 'OUT')
-    file = 'DQYC_OUT_PREDICT_POWER.txt'
-    status_file = 'STATUS.TXT'
-    local_params = deepcopy(params)
-    ts = TSHandler(logger, local_params)
-    dh = DataHandler(logger, local_params)
-    try:
-        local_params['model_table'] += farm_id
-        local_params['scaler_table'] += farm_id
-        feature_scaler, target_scaler = mgUtils.get_scaler_model_from_mongo(local_params)
-        ts.opt.cap = round(target_scaler.transform(np.array([[cap]]))[0, 0], 2)
-        ts.get_model(local_params)
-        dh.opt.features = json.loads(ts.model_params).get('Model').get('features', ','.join(ts.opt.features)).split(',')
-        scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler)
-
-        success = 1
-        # 更新算法状态:1. 启动成功
-        write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
-        logger.info("算法启动成功")
-        res = list(chain.from_iterable(target_scaler.inverse_transform([ts.predict(scaled_pre_x).flatten()])))
-        pre_data['Power'] = res[:len(pre_data)]
-        pre_data['PlantID'] = farm_id
-        pre_data = pre_data[['PlantID', local_params['col_time'], 'Power']]
-
-        pre_data.loc[:, 'Power'] = pre_data['Power'].round(2)
-        pre_data.loc[pre_data['Power'] > cap, 'Power'] = cap
-        pre_data.loc[pre_data['Power'] < 0, 'Power'] = 0
-        pre_data.to_csv(os.path.join(output_file, file), sep=' ', index=False)
-        # 更新算法状态:正常结束
-        write_number_to_file(os.path.join(output_file, status_file), 2, 2)
-        logger.info("算法正常结束")
-    except Exception as e:
-        # 如果算法状态没启动,不更新
-        if success:
-            write_number_to_file(os.path.join(output_file, status_file), 2, 3)
-        my_exception = traceback.format_exc()
-        my_exception.replace("\n", "\t")
-        logger.info("算法状态异常:{}".format(my_exception))
-    end_time = time.time()
-    logger.info("lstm预测任务:用了 %s 秒 " % (end_time - start_time))
-
-
-if __name__ == "__main__":
-    print("Program starts execution!")
-    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-    logger = logging.getLogger("model_training_bp log")
-
-    # serve(app, host="0.0.0.0", port=1010x, threads=4)
-    print("server start!")
-
-    # ------------------------测试代码------------------------
-    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
-                 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
-                 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
-    args_dict['features'] = args_dict['features'].split(',')
-    arguments.update(args_dict)
-    dh = DataHandler(logger, arguments)
-    ts = TSHandler(logger)
-    opt = argparse.Namespace(**arguments)
-
-    opt.Model['input_size'] = len(opt.features)
-    pre_data = get_data_from_mongo(args_dict)
-    feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
-    pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt)
-    ts.get_model(arguments)
-    result = ts.predict(pre_x)
-    result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
-    pre_data['power_forecast'] = result1[:len(pre_data)]
-    pre_data['farm_id'] = 'J00083'
-    pre_data['cdq'] = 1
-    pre_data['dq'] = 1
-    pre_data['zq'] = 1
-    pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
-    pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
-
-    pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
-    pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
-    pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
-
-    insert_data_into_mongo(pre_data, arguments)

+ 141 - 0
app/predict/tf_model_pre.py

@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# @FileName  :tf_lstm_pre.py
+# @Time      :2025/2/13 10:52
+# @Author    :David
+# @Company: shenyang JY
+import os.path
+import pandas as pd
+import numpy as np
+import logging, argparse, traceback
+from app.common.data_handler import DataHandler, write_number_to_file
+from threading import Lock
+import time, json
+
+model_lock = Lock()
+from itertools import chain
+from typing import Dict, Any
+from app.common.logs import logger, params
+from app.common.tf_lstm import TSHandler
+from app.common.dbmg import MongoUtils
+from copy import deepcopy
+
+np.random.seed(42)  # NumPy随机种子
+
+mgUtils = MongoUtils(logger)
+class ModelPre:
+    """模型训练器封装类"""
+
+    def __init__(self,
+                 pre_data: pd.DataFrame,
+                 capacity: float,
+                 config: Dict[str, Any] = None,
+                 ):
+        self.config = config
+        self.logger = logger
+        self.train_data = pre_data
+        self.capacity = capacity
+        self.gpu_id = config.get('gpu_assignment')
+        self._setup_resources()
+
+        # 初始化组件
+        self.input_file = config.get("input_file")
+        self.opt = argparse.Namespace(**config)
+        self.dh = DataHandler(logger, self.opt)
+        self.ts = TSHandler(logger, self.opt)
+        self.mgUtils = MongoUtils(logger)
+
+    def _setup_resources(self):
+        """GPU资源分配"""
+        if self.gpu_id is not None:
+            os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpu_id)
+            self.logger.info(f"GPU {self.gpu_id} allocated")
+
+    def model_prediction(self, pre_data):
+        # 获取程序开始时间
+        start_time = time.time()
+        success = 0
+        print("Program starts execution!")
+        pre_id = self.config['area_id'] if pre_area else self.config['station_id']
+        pre_type = 'a' if pre_area else 's'
+        output_file = os.path.join(self.opt.dqyc_base_path, self.input_file)
+        output_file = output_file.replace('IN', 'OUT')
+        file = 'DQYC_OUT_PREDICT_POWER.txt'
+        status_file = 'STATUS.TXT'
+        local_params = deepcopy(params)
+        ts = TSHandler(logger, local_params)
+        dh = DataHandler(logger, local_params)
+        try:
+            local_params['model_table'] = local_params['model_table'] + f'_{pre_type}_'+pre_id
+            local_params['scaler_table'] = local_params['scaler_table'] + f'_{pre_type}_'+ str(pre_id)
+            feature_scaler, target_scaler = mgUtils.get_scaler_model_from_mongo(local_params)
+            ts.opt.cap = round(target_scaler.transform(np.array([[self.capacity]]))[0, 0], 2)
+            ts.get_model(local_params)
+            dh.opt.features = json.loads(ts.model_params).get('Model').get('features', ','.join(ts.opt.features)).split(',')
+            scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler)
+
+            success = 1
+            # 更新算法状态:1. 启动成功
+            write_number_to_file(os.path.join(str(output_file), status_file), 1, 1, 'rewrite')
+            logger.info("算法启动成功")
+            res = list(chain.from_iterable(target_scaler.inverse_transform([ts.predict(scaled_pre_x).flatten()])))
+            pre_data['Power'] = res[:len(pre_data)]
+            pre_data['PlantID'] = pre_id
+            pre_data = pre_data[['PlantID', local_params['col_time'], 'Power']]
+
+            pre_data.loc[:, 'Power'] = pre_data['Power'].round(2)
+            pre_data.loc[pre_data['Power'] > self.capacity, 'Power'] = self.capacity
+            pre_data.loc[pre_data['Power'] < 0, 'Power'] = 0
+            pre_data.to_csv(os.path.join(str(output_file), file), sep=' ', index=False)
+            # 更新算法状态:正常结束
+            write_number_to_file(os.path.join(str(output_file), status_file), 2, 2)
+            logger.info("算法正常结束")
+        except Exception as e:
+            # 如果算法状态没启动,不更新
+            if success:
+                write_number_to_file(os.path.join(str(output_file), status_file), 2, 3)
+            my_exception = traceback.format_exc()
+            my_exception.replace("\n", "\t")
+            logger.info("算法状态异常:{}".format(my_exception))
+        end_time = time.time()
+        logger.info("lstm预测任务:用了 %s 秒 " % (end_time - start_time))
+
+
+if __name__ == "__main__":
+    print("Program starts execution!")
+    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    logger = logging.getLogger("model_training_bp log")
+
+    # serve(app, host="0.0.0.0", port=1010x, threads=4)
+    print("server start!")
+
+    # ------------------------测试代码------------------------
+    args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
+                 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
+                 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
+    args_dict['features'] = args_dict['features'].split(',')
+    arguments.update(args_dict)
+    dh = DataHandler(logger, arguments)
+    ts = TSHandler(logger)
+    opt = argparse.Namespace(**arguments)
+
+    opt.Model['input_size'] = len(opt.features)
+    pre_data = get_data_from_mongo(args_dict)
+    feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
+    pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt)
+    ts.get_model(arguments)
+    result = ts.predict(pre_x)
+    result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
+    pre_data['power_forecast'] = result1[:len(pre_data)]
+    pre_data['farm_id'] = 'J00083'
+    pre_data['cdq'] = 1
+    pre_data['dq'] = 1
+    pre_data['zq'] = 1
+    pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
+    pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
+
+    pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
+    pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
+    pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
+
+    insert_data_into_mongo(pre_data, arguments)