cdq_coe_gen.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. #!/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. # @FileName :cdq_coe_gen.py
  4. # @Time :2025/3/28 16:20
  5. # @Author :David
  6. # @Company: shenyang JY
  7. import os, requests, json, time, traceback
  8. import pandas as pd
  9. import numpy as np
  10. from bayes_opt import BayesianOptimization
  11. from common.database_dml_koi import get_data_from_mongo
  12. from flask import Flask, request, g
  13. from datetime import datetime
  14. from common.logs import Log
  15. logger = Log('post-processing').logger
  16. current_path = os.path.dirname(__file__)
  17. API_URL = "http://ds2:18080/accuracyAndBiasByJSON"
  18. app = Flask('cdq_coe_gen——service')
  19. @app.before_request
  20. def update_config():
  21. # ------------ 整理参数,整合请求参数 ------------
  22. g.coe = {'T'+str(x):{} for x in range(1, 17)}
  23. def iterate_coe_simple(pre_data, point, config, coe):
  24. """
  25. 更新16个点系数
  26. """
  27. T = 'T' + str(point + 1)
  28. col_pre = config['col_pre']
  29. best_acc, best_score1, best_coe_m, best_coe_n = 0, 0, 0, 0
  30. best_score, best_acc1, best_score_m, best_score_n = 999, 0, 999, 0
  31. pre_data = history_error(pre_data, config['col_power'], config['col_pre'], int(coe[T]['hour']//0.25))
  32. pre_data = curve_limited(pre_data, config, 'his_fix')
  33. req_his_fix = prepare_request_body(pre_data, config, 'his_fix')
  34. req_dq = prepare_request_body(pre_data, config, col_pre)
  35. his_fix_acc, his_fix_score = calculate_acc(API_URL, req_his_fix)
  36. dq_acc, dq_score = calculate_acc(API_URL, req_dq)
  37. for i in range(5, 210):
  38. for j in range(5, 210):
  39. pre_data["new"] = round(i / 170 * pre_data[col_pre] + j / 170 * pre_data['his_fix'], 3)
  40. req_new = prepare_request_body(pre_data, config, 'new')
  41. acc, acc_score = calculate_acc(API_URL, req_new)
  42. if acc > best_acc:
  43. best_acc = acc
  44. best_score1 = acc_score
  45. best_coe_m = i / 170
  46. best_coe_n = j / 170
  47. if acc_score < best_score:
  48. best_score = acc_score
  49. best_acc1 = acc
  50. best_score_m = i / 170
  51. best_score_n = j / 170
  52. pre_data["coe-acc"] = round(best_coe_m * pre_data[col_pre] + best_coe_n * pre_data['his_fix'], 3)
  53. pre_data["coe-ass"] = round(best_score_m * pre_data[col_pre] + best_score_n * pre_data['his_fix'], 3)
  54. logger.info("1.过去{} - {}的短期的准确率:{:.4f},自动确认系数后,{} 超短期的准确率:{:.4f},历史功率:{:.4f}".format(pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_acc, T, best_acc, his_fix_acc))
  55. logger.info("2.过去{} - {}的短期的考核分:{:.4f},自动确认系数后,{} 超短期的考核分:{:.4f},历史功率:{:.4f}".format(pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_score, T, best_score1, his_fix_score))
  56. logger.info("3.过去{} - {}的短期的准确率:{:.4f},自动确认系数后,{} 超短期的准确率:{:.4f},历史功率:{:.4f}".format(pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_acc, T, best_acc1, his_fix_acc))
  57. logger.info("4.过去{} - {}的短期的考核分:{:.4f},自动确认系数后,{} 超短期的考核分:{:.4f},历史功率:{:.4f}".format(pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_score, T, best_score, his_fix_score))
  58. coe[T]['score_m'] = round(best_score_m, 3)
  59. coe[T]['score_n'] = round(best_score_n, 3)
  60. coe[T]['acc_m'] = round(best_coe_m, 3)
  61. coe[T]['acc_n'] = round(best_coe_n, 3)
  62. logger.info("系数轮询后,最终调整的系数为:{}".format(coe))
  63. def iterate_coe(pre_data, point, config, coe):
  64. """使用贝叶斯优化进行系数寻优"""
  65. T = 'T' + str(point + 1)
  66. col_pre = config['col_pre']
  67. col_time = config['col_time']
  68. # 历史数据处理(保持原逻辑)
  69. pre_data = history_error(pre_data, config['col_power'], config['col_pre'], int(coe[T]['hour'] // 0.25))
  70. pre_data = curve_limited(pre_data, config, 'his_fix')
  71. req_his_fix = prepare_request_body(pre_data, config, 'his_fix')
  72. req_dq = prepare_request_body(pre_data, config, col_pre)
  73. # 获取基准值(保持原逻辑)
  74. his_fix_acc, his_fix_score = calculate_acc(API_URL, req_his_fix)
  75. dq_acc, dq_score = calculate_acc(API_URL, req_dq)
  76. # 定义贝叶斯优化目标函数
  77. def evaluate_coefficients(m, n):
  78. """评估函数返回准确率和考核分的元组"""
  79. local_data = pre_data.copy()
  80. local_data["new"] = round(m * local_data[col_pre] + n * local_data['his_fix'], 3)
  81. local_data = curve_limited(local_data, config, 'new')
  82. req_new = prepare_request_body(local_data, config, 'new')
  83. acc, score = calculate_acc(API_URL, req_new)
  84. return acc, score
  85. # 优化准确率
  86. def acc_optimizer(m, n):
  87. acc, _ = evaluate_coefficients(m, n)
  88. return acc
  89. # 优化考核分
  90. def score_optimizer(m, n):
  91. _, score = evaluate_coefficients(m, n)
  92. return -score # 取负数因为要最大化负分即最小化原分数
  93. # 参数空间(保持原参数范围)
  94. pbounds = {
  95. 'm': (5 / 170, 210 / 170), # 原始范围映射到[0.0294, 1.235]
  96. 'n': (5 / 170, 210 / 170)
  97. }
  98. # 执行准确率优化
  99. acc_bo = BayesianOptimization(f=acc_optimizer, pbounds=pbounds, random_state=42)
  100. acc_bo.maximize(init_points=70, n_iter=400) # 增大初始点和迭代次数,捕捉可能的多峰结构
  101. best_acc_params = acc_bo.max['params']
  102. best_coe_m, best_coe_n = best_acc_params['m'], best_acc_params['n']
  103. best_acc = acc_bo.max['target']
  104. # 执行考核分优化
  105. # score_bo = BayesianOptimization(f=score_optimizer, pbounds=pbounds, random_state=42)
  106. # score_bo.maximize(init_points=10, n_iter=20)
  107. # best_score_params = score_bo.max['params']
  108. # best_score_m, best_score_n = best_score_params['m'], best_score_params['n']
  109. # best_score = -score_bo.max['target'] # 恢复原始分数
  110. # 应用最优系数(保持原处理逻辑)
  111. pre_data["coe-acc"] = round(best_coe_m * pre_data[col_pre] + best_coe_n * pre_data['his_fix'], 3)
  112. # pre_data["coe-ass"] = round(best_score_m * pre_data[col_pre] + best_score_n * pre_data['his_fix'], 3)
  113. # 记录日志(保持原格式)
  114. logger.info("过去{} - {}的短期的准确率:{:.4f},历史功率:{:.4f},自动确认系数后,{} 超短期的准确率:{:.4f}".format(pre_data[col_time][0], pre_data[col_time].iloc[-1], dq_acc, his_fix_acc, T, best_acc))
  115. # 更新系数表(保持原逻辑)
  116. coe[T].update({
  117. # 'score_m': round(best_score_m, 3),
  118. # 'score_n': round(best_score_n, 3),
  119. 'acc_m': round(best_coe_m, 3),
  120. 'acc_n': round(best_coe_n, 3)
  121. })
  122. logger.info("贝叶斯优化后,最终调整的系数为:{}".format(coe))
  123. def iterate_his_coe(pre_data, point, config, coe):
  124. """
  125. 更新临近时长Δ
  126. """
  127. T = 'T' + str(point + 1)
  128. best_acc, best_hour = 0, 1
  129. for hour in np.arange(0.25, 4.25, 0.25):
  130. data = pre_data.copy()
  131. his_window = int(hour // 0.25)
  132. pre_data_f = history_error(data, config['col_power'], config['col_pre'], his_window)
  133. pre_data_f = curve_limited(pre_data_f, config, 'his_fix')
  134. req_his_fix = prepare_request_body(pre_data_f, config, 'his_fix')
  135. his_fix_acc, his_fix_score = calculate_acc(API_URL, req_his_fix)
  136. if his_fix_acc > best_acc:
  137. best_acc = his_fix_acc
  138. best_hour = float(round(hour, 2))
  139. coe[T]['hour'] = best_hour
  140. logger.info(f"{T} 点的最优临近时长:{best_hour}")
  141. def prepare_request_body(df, config, predict):
  142. """
  143. 准备请求体,动态保留MongoDB中的所有字段
  144. """
  145. data = df.copy()
  146. # 转换时间格式为字符串
  147. if config['col_time'] in data.columns and isinstance(data[config['col_time']].iloc[0], datetime):
  148. data[config['col_time'] ] = data[config['col_time'] ].dt.strftime('%Y-%m-%d %H:%M:%S')
  149. data['model'] = predict
  150. # 保留必要的字段
  151. data = data[[config['col_time'], config['col_power'], predict, 'model']].to_dict('records')
  152. # 构造请求体(固定部分+动态数据部分)
  153. request_body = {
  154. "stationCode": config['stationCode'],
  155. "realPowerColumn": config['col_power'],
  156. "ablePowerColumn": config['col_power'],
  157. "predictPowerColumn": predict,
  158. "inStalledCapacityName": config['inStalledCapacityName'],
  159. "computTypeEnum": "E2",
  160. "computMeasEnum": config.get('computMeasEnum', 'E2'),
  161. "openCapacityName": config['openCapacityName'],
  162. "onGridEnergy": config.get('onGridEnergy', 1),
  163. "price": config.get('price', 1),
  164. "fault": config.get('fault', -99),
  165. "colTime": config['col_time'], #时间列名(可选,要与上面'dateTime一致')
  166. # "computPowersEnum": "E4" # 计算功率类型(可选)
  167. "data": data # MongoDB数据
  168. }
  169. return request_body
  170. def calculate_acc(api_url, request_body):
  171. """
  172. 调用API接口
  173. """
  174. headers = {
  175. 'Content-Type': 'application/json',
  176. 'Accept': 'application/json'
  177. }
  178. try:
  179. response = requests.post(
  180. api_url,
  181. data=json.dumps(request_body, ensure_ascii=False),
  182. headers=headers
  183. )
  184. result = response.json()
  185. if response.status_code == 200:
  186. acc = np.average([res['accuracy'] for res in result])
  187. # ass = np.average([res['accuracyAssessment'] for res in result])
  188. return acc, 0
  189. else:
  190. logger.info(f"{response.status_code}失败:{result['status']},{result['error']}")
  191. except requests.exceptions.RequestException as e:
  192. logger.info(f"准确率接口调用失败: {e}")
  193. return None
  194. def history_error(data, col_power, col_pre, his_window):
  195. data['error'] = data[col_power] - data[col_pre]
  196. data['error'] = data['error'].round(2)
  197. data.reset_index(drop=True, inplace=True)
  198. # 用前面5个点的平均error,和象心力相加
  199. numbers = len(data) - his_window
  200. datas = [data.iloc[x: x+his_window, :].reset_index(drop=True) for x in range(0, numbers)]
  201. data_error = [np.mean(d.iloc[0:his_window, -1]) for d in datas]
  202. pad_data_error = np.pad(data_error, (his_window, 0), mode='constant', constant_values=0)
  203. data['his_fix'] = data[col_pre] + pad_data_error
  204. data = data.iloc[his_window:, :].reset_index(drop=True)
  205. return data
  206. def curve_limited(pre_data, config, predict):
  207. """
  208. plant_type: 0 风 1 光
  209. """
  210. data = pre_data.copy()
  211. col_time, cap = config['col_time'], float(config['openCapacityName'])
  212. data[col_time] = pd.to_datetime(data[col_time])
  213. data.loc[data[predict] < 0, [predict]] = 0
  214. data.loc[data[predict] > cap, [predict]] = cap
  215. return data
  216. @app.route('/cdq_coe_gen', methods=['POST'])
  217. def get_station_cdq_coe():
  218. # 获取程序开始时间
  219. start_time = time.time()
  220. result = {}
  221. success = 0
  222. args = {}
  223. coe = g.coe
  224. try:
  225. args = request.values.to_dict()
  226. logger.info(args)
  227. data = get_data_from_mongo(args).sort_values(by=args['col_time'], ascending=True)
  228. for point in range(0, 16, 1):
  229. iterate_his_coe(data, point, args, coe)
  230. iterate_coe(data, point, args, coe)
  231. success = 1
  232. except Exception as e:
  233. my_exception = traceback.format_exc()
  234. my_exception.replace("\n", "\t")
  235. result['msg'] = my_exception
  236. logger.info("调系数出错:{}".format(my_exception))
  237. end_time = time.time()
  238. result['success'] = success
  239. result['args'] = args
  240. result['coe'] = coe
  241. result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
  242. result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
  243. return result
  244. if __name__ == "__main__":
  245. # args = {
  246. # 'mongodb_database': 'ldw_ftp',
  247. # 'mongodb_read_table': 'j00600',
  248. # # 'timeBegin': '2025-01-01 00:00:00',
  249. # # 'timeEnd': '2025-01-03 23:45:00'
  250. # }
  251. # data = get_data_from_mongo(args).sort_values(by='dateTime', ascending=True)
  252. # pre_data = history_error(data, col_power='realPower', col_pre='dq')
  253. # for point in range(0, 16, 1):
  254. # iterate_coe(pre_data, point, 'realPower', 'dq')
  255. # run_code = 0
  256. print("Program starts execution!")
  257. from waitress import serve
  258. serve(app, host="0.0.0.0", port=10123,
  259. threads=8, # 指定线程数(默认4,根据硬件调整)
  260. channel_timeout=600 # 连接超时时间(秒)
  261. )
  262. print("server start!")