tf_model_pre.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. #!/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. # @FileName :tf_lstm_pre.py
  4. # @Time :2025/2/13 10:52
  5. # @Author :David
  6. # @Company: shenyang JY
  7. import os.path
  8. import pandas as pd
  9. import numpy as np
  10. import logging, argparse, traceback
  11. from app.common.data_handler import DataHandler, write_number_to_file
  12. from threading import Lock
  13. import time, json
  14. model_lock = Lock()
  15. from itertools import chain
  16. from typing import Dict, Any
  17. from app.common.logs import logger, params
  18. from app.common.tf_lstm import TSHandler
  19. from app.common.dbmg import MongoUtils
  20. from copy import deepcopy
  21. np.random.seed(42) # NumPy随机种子
  22. mgUtils = MongoUtils(logger)
  23. class ModelPre:
  24. """模型训练器封装类"""
  25. def __init__(self,
  26. pre_data: pd.DataFrame,
  27. capacity: float,
  28. config: Dict[str, Any] = None,
  29. ):
  30. self.config = config
  31. self.logger = logger
  32. self.train_data = pre_data
  33. self.capacity = capacity
  34. self.gpu_id = config.get('gpu_assignment')
  35. self._setup_resources()
  36. # 初始化组件
  37. self.input_file = config.get("input_file")
  38. self.opt = argparse.Namespace(**config)
  39. self.dh = DataHandler(logger, self.opt)
  40. self.ts = TSHandler(logger, self.opt)
  41. self.mgUtils = MongoUtils(logger)
  42. def _setup_resources(self):
  43. """GPU资源分配"""
  44. if self.gpu_id is not None:
  45. os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpu_id)
  46. self.logger.info(f"GPU {self.gpu_id} allocated")
  47. def model_prediction(self, pre_data):
  48. # 获取程序开始时间
  49. start_time = time.time()
  50. success = 0
  51. print("Program starts execution!")
  52. pre_id = self.config['area_id'] if pre_area else self.config['station_id']
  53. pre_type = 'a' if pre_area else 's'
  54. output_file = os.path.join(self.opt.dqyc_base_path, self.input_file)
  55. output_file = output_file.replace('IN', 'OUT')
  56. file = 'DQYC_OUT_PREDICT_POWER.txt'
  57. status_file = 'STATUS.TXT'
  58. local_params = deepcopy(params)
  59. ts = TSHandler(logger, local_params)
  60. dh = DataHandler(logger, local_params)
  61. try:
  62. local_params['model_table'] = local_params['model_table'] + f'_{pre_type}_'+pre_id
  63. local_params['scaler_table'] = local_params['scaler_table'] + f'_{pre_type}_'+ str(pre_id)
  64. feature_scaler, target_scaler = mgUtils.get_scaler_model_from_mongo(local_params)
  65. ts.opt.cap = round(target_scaler.transform(np.array([[self.capacity]]))[0, 0], 2)
  66. ts.get_model(local_params)
  67. dh.opt.features = json.loads(ts.model_params).get('Model').get('features', ','.join(ts.opt.features)).split(',')
  68. scaled_pre_x, pre_data = dh.pre_data_handler(pre_data, feature_scaler)
  69. success = 1
  70. # 更新算法状态:1. 启动成功
  71. write_number_to_file(os.path.join(str(output_file), status_file), 1, 1, 'rewrite')
  72. logger.info("算法启动成功")
  73. res = list(chain.from_iterable(target_scaler.inverse_transform([ts.predict(scaled_pre_x).flatten()])))
  74. pre_data['Power'] = res[:len(pre_data)]
  75. pre_data['PlantID'] = pre_id
  76. pre_data = pre_data[['PlantID', local_params['col_time'], 'Power']]
  77. pre_data.loc[:, 'Power'] = pre_data['Power'].round(2)
  78. pre_data.loc[pre_data['Power'] > self.capacity, 'Power'] = self.capacity
  79. pre_data.loc[pre_data['Power'] < 0, 'Power'] = 0
  80. pre_data.to_csv(os.path.join(str(output_file), file), sep=' ', index=False)
  81. # 更新算法状态:正常结束
  82. write_number_to_file(os.path.join(str(output_file), status_file), 2, 2)
  83. logger.info("算法正常结束")
  84. except Exception as e:
  85. # 如果算法状态没启动,不更新
  86. if success:
  87. write_number_to_file(os.path.join(str(output_file), status_file), 2, 3)
  88. my_exception = traceback.format_exc()
  89. my_exception.replace("\n", "\t")
  90. logger.info("算法状态异常:{}".format(my_exception))
  91. end_time = time.time()
  92. logger.info("lstm预测任务:用了 %s 秒 " % (end_time - start_time))
  93. if __name__ == "__main__":
  94. print("Program starts execution!")
  95. logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  96. logger = logging.getLogger("model_training_bp log")
  97. # serve(app, host="0.0.0.0", port=1010x, threads=4)
  98. print("server start!")
  99. # ------------------------测试代码------------------------
  100. args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
  101. 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
  102. 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
  103. args_dict['features'] = args_dict['features'].split(',')
  104. arguments.update(args_dict)
  105. dh = DataHandler(logger, arguments)
  106. ts = TSHandler(logger)
  107. opt = argparse.Namespace(**arguments)
  108. opt.Model['input_size'] = len(opt.features)
  109. pre_data = get_data_from_mongo(args_dict)
  110. feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
  111. pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt)
  112. ts.get_model(arguments)
  113. result = ts.predict(pre_x)
  114. result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
  115. pre_data['power_forecast'] = result1[:len(pre_data)]
  116. pre_data['farm_id'] = 'J00083'
  117. pre_data['cdq'] = 1
  118. pre_data['dq'] = 1
  119. pre_data['zq'] = 1
  120. pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
  121. pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
  122. pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
  123. pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
  124. pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
  125. insert_data_into_mongo(pre_data, arguments)