tf_model_pre.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. #!/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. # @FileName :tf_lstm_pre.py
  4. # @Time :2025/2/13 10:52
  5. # @Author :David
  6. # @Company: shenyang JY
  7. import os.path
  8. import pandas as pd
  9. import numpy as np
  10. import logging, argparse, traceback
  11. from app.common.data_handler import DataHandler, write_number_to_file
  12. from threading import Lock
  13. import time, json
  14. model_lock = Lock()
  15. from itertools import chain
  16. from typing import Dict, Any
  17. from app.common.config import logger, parser
  18. from app.common.tf_lstm import TSHandler
  19. from app.common.dbmg import MongoUtils
  20. np.random.seed(42) # NumPy随机种子
  21. mgUtils = MongoUtils(logger)
  22. class ModelPre(object):
  23. """模型训练器封装类"""
  24. def __init__(self,
  25. pre_data: pd.DataFrame,
  26. capacity: float,
  27. config: Dict[str, Any] = None,
  28. ):
  29. self.config = config
  30. self.logger = logger
  31. self.pre_data = pre_data
  32. self.capacity = capacity
  33. self.gpu_id = config.get('gpu_assignment')
  34. self._setup_resources()
  35. # 初始化组件
  36. self.input_file = config.get("input_file")
  37. self.opt = argparse.Namespace(**config)
  38. self.dh = DataHandler(logger, self.opt)
  39. self.ts = TSHandler(logger, self.opt)
  40. self.mgUtils = MongoUtils(logger)
  41. def _setup_resources(self):
  42. """GPU资源分配"""
  43. if self.gpu_id is not None:
  44. os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpu_id)
  45. self.logger.info(f"GPU {self.gpu_id} allocated")
  46. def predict(self, pre_area=False):
  47. # 获取程序开始时间
  48. start_time = time.time()
  49. success = 0
  50. print("Program starts execution!")
  51. pre_id = self.config['area_id'] if pre_area else self.config['station_id']
  52. pre_type = 'a' if pre_area else 's'
  53. output_file = os.path.join(self.opt.dqyc_base_path, self.input_file)
  54. output_file = output_file.replace('IN', 'OUT') if pre_area else os.path.join(str(output_file), pre_id)
  55. file = 'DQYC_OUT_PREDICT_POWER.txt'
  56. status_file = 'STATUS.TXT'
  57. try:
  58. self.config['model_table'] = self.config['model_table'] + f'_{pre_type}_'+str(pre_id)
  59. self.config['scaler_table'] = self.config['scaler_table'] + f'_{pre_type}_'+ str(pre_id)
  60. feature_scaler, target_scaler = mgUtils.get_scaler_model_from_mongo(self.config)
  61. self.ts.opt.cap = round(target_scaler.transform(np.array([[self.capacity]]))[0, 0], 2)
  62. self.ts.get_model(self.config)
  63. print("!!!!", self.ts.model_params)
  64. self.dh.opt.features = json.loads(self.ts.model_params).get('Model').get('features', ','.join(self.ts.opt.features)).split(',')
  65. scaled_pre_x, pre_data = self.dh.pre_data_handler(self.pre_data, feature_scaler)
  66. success = 1
  67. # 更新算法状态:1. 启动成功
  68. write_number_to_file(os.path.join(str(output_file), status_file), 1, 1, 'rewrite')
  69. logger.info("算法启动成功")
  70. res = list(chain.from_iterable(target_scaler.inverse_transform([self.ts.predict(scaled_pre_x).flatten()])))
  71. pre_data['Power'] = res[:len(pre_data)]
  72. pre_data['PlantID'] = pre_id
  73. pre_data = pre_data[['PlantID', self.config['col_time'], 'Power']]
  74. pre_data.loc[:, 'Power'] = pre_data['Power'].round(2)
  75. pre_data.loc[pre_data['Power'] > self.capacity, 'Power'] = self.capacity
  76. pre_data.loc[pre_data['Power'] < 0, 'Power'] = 0
  77. pre_data.to_csv(os.path.join(str(output_file), file), sep=' ', index=False)
  78. # 更新算法状态:正常结束
  79. write_number_to_file(os.path.join(str(output_file), status_file), 2, 2)
  80. logger.info("算法正常结束")
  81. except Exception as e:
  82. # 如果算法状态没启动,不更新
  83. if success:
  84. write_number_to_file(os.path.join(str(output_file), status_file), 2, 3)
  85. my_exception = traceback.format_exc()
  86. my_exception.replace("\n", "\t")
  87. logger.info("算法状态异常:{}".format(my_exception))
  88. end_time = time.time()
  89. logger.info("lstm预测任务:用了 %s 秒 " % (end_time - start_time))
  90. if __name__ == "__main__":
  91. print("Program starts execution!")
  92. logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  93. logger = logging.getLogger("model_training_bp log")
  94. # serve(app, host="0.0.0.0", port=1010x, threads=4)
  95. print("server start!")
  96. # ------------------------测试代码------------------------
  97. args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test',
  98. 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083_test', 'col_time': 'date_time', 'mongodb_write_table': 'j00083_rs',
  99. 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
  100. args_dict['features'] = args_dict['features'].split(',')
  101. arguments.update(args_dict)
  102. dh = DataHandler(logger, arguments)
  103. ts = TSHandler(logger)
  104. opt = argparse.Namespace(**arguments)
  105. opt.Model['input_size'] = len(opt.features)
  106. pre_data = get_data_from_mongo(args_dict)
  107. feature_scaler, target_scaler = get_scaler_model_from_mongo(arguments)
  108. pre_x = dh.pre_data_handler(pre_data, feature_scaler, opt)
  109. ts.get_model(arguments)
  110. result = ts.predict(pre_x)
  111. result1 = list(chain.from_iterable(target_scaler.inverse_transform([result.flatten()])))
  112. pre_data['power_forecast'] = result1[:len(pre_data)]
  113. pre_data['farm_id'] = 'J00083'
  114. pre_data['cdq'] = 1
  115. pre_data['dq'] = 1
  116. pre_data['zq'] = 1
  117. pre_data.rename(columns={arguments['col_time']: 'date_time'}, inplace=True)
  118. pre_data = pre_data[['date_time', 'power_forecast', 'farm_id', 'cdq', 'dq', 'zq']]
  119. pre_data['power_forecast'] = pre_data['power_forecast'].round(2)
  120. pre_data.loc[pre_data['power_forecast'] > opt.cap, 'power_forecast'] = opt.cap
  121. pre_data.loc[pre_data['power_forecast'] < 0, 'power_forecast'] = 0
  122. insert_data_into_mongo(pre_data, arguments)