cdq_coe_gen.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. #!/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. # @FileName :cdq_coe_gen.py
  4. # @Time :2025/3/28 16:20
  5. # @Author :David
  6. # @Company: shenyang JY
  7. import os, requests, json, time, traceback
  8. import pandas as pd
  9. import numpy as np
  10. from bayes_opt import BayesianOptimization
  11. from common.database_dml_koi import get_data_from_mongo
  12. from flask import Flask, request, g
  13. from datetime import datetime
  14. from common.logs import Log
  15. logger = Log('post-processing').logger
  16. current_path = os.path.dirname(__file__)
  17. API_URL = "http://ds2:18080/accuracyAndBiasByJSON"
  18. app = Flask('cdq_coe_gen——service')
  19. @app.before_request
  20. def update_config():
  21. # ------------ 整理参数,整合请求参数 ------------
  22. g.coe = {'T'+str(x):{} for x in range(1, 17)}
  23. def iterate_coe_simple(pre_data, point, config, coe):
  24. """
  25. 更新16个点系数
  26. """
  27. T = 'T' + str(point + 1)
  28. col_pre = config['col_pre']
  29. best_acc, best_score1, best_coe_m, best_coe_n = 0, 0, 0, 0
  30. best_score, best_acc1, best_score_m, best_score_n = 999, 0, 999, 0
  31. pre_data = history_error(pre_data, config['col_power'], config['col_pre'], int(coe[T]['hour']//0.25))
  32. req_his_fix = prepare_request_body(pre_data, config, 'his_fix')
  33. req_dq = prepare_request_body(pre_data, config, col_pre)
  34. his_fix_acc, his_fix_score = calculate_acc(API_URL, req_his_fix)
  35. dq_acc, dq_score = calculate_acc(API_URL, req_dq)
  36. for i in range(5, 210):
  37. for j in range(5, 210):
  38. pre_data["new"] = round(i / 170 * pre_data[col_pre] + j / 170 * pre_data['his_fix'], 3)
  39. req_new = prepare_request_body(pre_data, config, 'new')
  40. acc, acc_score = calculate_acc(API_URL, req_new)
  41. if acc > best_acc:
  42. best_acc = acc
  43. best_score1 = acc_score
  44. best_coe_m = i / 170
  45. best_coe_n = j / 170
  46. if acc_score < best_score:
  47. best_score = acc_score
  48. best_acc1 = acc
  49. best_score_m = i / 170
  50. best_score_n = j / 170
  51. pre_data["coe-acc"] = round(best_coe_m * pre_data[col_pre] + best_coe_n * pre_data['his_fix'], 3)
  52. pre_data["coe-ass"] = round(best_score_m * pre_data[col_pre] + best_score_n * pre_data['his_fix'], 3)
  53. logger.info("1.过去{} - {}的短期的准确率:{:.4f},自动确认系数后,{} 超短期的准确率:{:.4f},历史功率:{:.4f}".format(pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_acc, T, best_acc, his_fix_acc))
  54. logger.info("2.过去{} - {}的短期的考核分:{:.4f},自动确认系数后,{} 超短期的考核分:{:.4f},历史功率:{:.4f}".format(pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_score, T, best_score1, his_fix_score))
  55. logger.info("3.过去{} - {}的短期的准确率:{:.4f},自动确认系数后,{} 超短期的准确率:{:.4f},历史功率:{:.4f}".format(pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_acc, T, best_acc1, his_fix_acc))
  56. logger.info("4.过去{} - {}的短期的考核分:{:.4f},自动确认系数后,{} 超短期的考核分:{:.4f},历史功率:{:.4f}".format(pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_score, T, best_score, his_fix_score))
  57. coe[T]['score_m'] = round(best_score_m, 3)
  58. coe[T]['score_n'] = round(best_score_n, 3)
  59. coe[T]['acc_m'] = round(best_coe_m, 3)
  60. coe[T]['acc_n'] = round(best_coe_n, 3)
  61. logger.info("系数轮询后,最终调整的系数为:{}".format(coe))
  62. def iterate_coe(pre_data, point, config, coe):
  63. """使用贝叶斯优化进行系数寻优"""
  64. T = 'T' + str(point + 1)
  65. col_pre = config['col_pre']
  66. # 初始化最优解存储变量
  67. best_acc, best_score = 0, 999
  68. best_coe_m, best_coe_n = 0, 0
  69. best_score_m, best_score_n = 0, 0
  70. # 历史数据处理(保持原逻辑)
  71. pre_data = history_error(pre_data, config['col_power'], config['col_pre'], int(coe[T]['hour'] // 0.25))
  72. req_his_fix = prepare_request_body(pre_data, config, 'his_fix')
  73. req_dq = prepare_request_body(pre_data, config, col_pre)
  74. # 获取基准值(保持原逻辑)
  75. his_fix_acc, his_fix_score = calculate_acc(API_URL, req_his_fix)
  76. dq_acc, dq_score = calculate_acc(API_URL, req_dq)
  77. # 定义贝叶斯优化目标函数
  78. def evaluate_coefficients(m, n):
  79. """评估函数返回准确率和考核分的元组"""
  80. local_data = pre_data.copy()
  81. local_data["new"] = round(m * local_data[col_pre] + n * local_data['his_fix'], 3)
  82. req_new = prepare_request_body(local_data, config, 'new')
  83. acc, score = calculate_acc(API_URL, req_new)
  84. return acc, score
  85. # 优化准确率
  86. def acc_optimizer(m, n):
  87. acc, _ = evaluate_coefficients(m, n)
  88. return acc
  89. # 优化考核分
  90. def score_optimizer(m, n):
  91. _, score = evaluate_coefficients(m, n)
  92. return -score # 取负数因为要最大化负分即最小化原分数
  93. # 参数空间(保持原参数范围)
  94. pbounds = {
  95. 'm': (5 / 170, 210 / 170), # 原始范围映射到[0.0294, 1.235]
  96. 'n': (5 / 170, 210 / 170)
  97. }
  98. # 执行准确率优化
  99. acc_bo = BayesianOptimization(f=acc_optimizer, pbounds=pbounds, random_state=42)
  100. acc_bo.maximize(init_points=10, n_iter=20)
  101. best_acc_params = acc_bo.max['params']
  102. best_coe_m, best_coe_n = best_acc_params['m'], best_acc_params['n']
  103. best_acc = acc_bo.max['target']
  104. # 执行考核分优化
  105. # score_bo = BayesianOptimization(f=score_optimizer, pbounds=pbounds, random_state=42)
  106. # score_bo.maximize(init_points=10, n_iter=20)
  107. # best_score_params = score_bo.max['params']
  108. # best_score_m, best_score_n = best_score_params['m'], best_score_params['n']
  109. # best_score = -score_bo.max['target'] # 恢复原始分数
  110. # 应用最优系数(保持原处理逻辑)
  111. pre_data["coe-acc"] = round(best_coe_m * pre_data[col_pre] + best_coe_n * pre_data['his_fix'], 3)
  112. pre_data["coe-ass"] = round(best_score_m * pre_data[col_pre] + best_score_n * pre_data['his_fix'], 3)
  113. # 记录日志(保持原格式)
  114. logger.info("1.过去{} - {}的短期的准确率:{:.4f},自动确认系数后,{} 超短期的准确率:{:.4f},历史功率:{:.4f}".format(
  115. pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_acc, T, best_acc, his_fix_acc))
  116. logger.info("2.过去{} - {}的短期的考核分:{:.4f},自动确认系数后,{} 超短期的考核分:{:.4f},历史功率:{:.4f}".format(
  117. pre_data['C_TIME'][0], pre_data['C_TIME'].iloc[-1], dq_score, T, best_score, his_fix_score))
  118. # 更新系数表(保持原逻辑)
  119. coe[T].update({
  120. 'score_m': round(best_score_m, 3),
  121. 'score_n': round(best_score_n, 3),
  122. 'acc_m': round(best_coe_m, 3),
  123. 'acc_n': round(best_coe_n, 3)
  124. })
  125. logger.info("贝叶斯优化后,最终调整的系数为:{}".format(coe))
  126. def iterate_his_coe(pre_data, point, config, coe):
  127. """
  128. 更新临近时长Δ
  129. """
  130. T = 'T' + str(point + 1)
  131. best_acc, best_hour = 0, 1
  132. for hour in np.arange(0.25, 4.25, 0.25):
  133. data = pre_data.copy()
  134. his_window = int(hour // 0.25)
  135. pre_data_f = history_error(data, config['col_power'], config['col_pre'], his_window)
  136. req_his_fix = prepare_request_body(pre_data_f, config, 'his_fix')
  137. his_fix_acc, his_fix_score = calculate_acc(API_URL, req_his_fix)
  138. if his_fix_acc > best_acc:
  139. best_acc = his_fix_acc
  140. best_hour = float(round(hour, 2))
  141. coe[T]['hour'] = best_hour
  142. logger.info(f"{T} 点的最优临近时长:{best_hour}")
  143. def prepare_request_body(df, config, predict):
  144. """
  145. 准备请求体,动态保留MongoDB中的所有字段
  146. """
  147. data = df.copy()
  148. # 转换时间格式为字符串
  149. if 'dateTime' in data.columns and isinstance(data['dateTime'].iloc[0], datetime):
  150. data['dateTime'] = data['dateTime'].dt.strftime('%Y-%m-%d %H:%M:%S')
  151. data['model'] = predict
  152. # 排除不需要的字段(如果有)
  153. exclude_fields = ['_id'] # 通常排除MongoDB的默认_id字段
  154. # 获取所有字段名(排除不需要的字段)
  155. available_fields = [col for col in data.columns if col not in exclude_fields]
  156. # 转换为记录列表(保留所有字段)
  157. data = data[available_fields].to_dict('records')
  158. # 构造请求体(固定部分+动态数据部分)
  159. request_body = {
  160. "stationCode": config['stationCode'],
  161. "realPowerColumn": config['col_power'],
  162. "ablePowerColumn": config['col_power'],
  163. "predictPowerColumn": predict,
  164. "inStalledCapacityName": config['inStalledCapacityName'],
  165. "computTypeEnum": "E2",
  166. "computMeasEnum": config.get('computMeasEnum', 'E2'),
  167. "openCapacityName": config['openCapacityName'],
  168. "onGridEnergy": config.get('onGridEnergy', 1),
  169. "price": config.get('price', 1),
  170. "fault": config.get('fault', -99),
  171. "colTime": config['col_time'], #时间列名(可选,要与上面'dateTime一致')
  172. # "computPowersEnum": "E4" # 计算功率类型(可选)
  173. "data": data # MongoDB数据
  174. }
  175. return request_body
  176. def calculate_acc(api_url, request_body):
  177. """
  178. 调用API接口
  179. """
  180. headers = {
  181. 'Content-Type': 'application/json',
  182. 'Accept': 'application/json'
  183. }
  184. try:
  185. response = requests.post(
  186. api_url,
  187. data=json.dumps(request_body, ensure_ascii=False),
  188. headers=headers
  189. )
  190. result = response.json()
  191. if response.status_code == 200:
  192. acc = np.average([res['accuracy'] for res in result])
  193. # ass = np.average([res['accuracyAssessment'] for res in result])
  194. return acc, 0
  195. else:
  196. logger.info(f"失败:{result['status']},{result['error']}")
  197. print(f"失败:{result['status']},{result['error']}")
  198. print("22222222")
  199. except requests.exceptions.RequestException as e:
  200. print(f"API调用失败: {e}")
  201. print("333333333")
  202. return None
  203. def history_error(data, col_power, col_pre, his_window):
  204. data['error'] = data[col_power] - data[col_pre]
  205. data['error'] = data['error'].round(2)
  206. data.reset_index(drop=True, inplace=True)
  207. # 用前面5个点的平均error,和象心力相加
  208. numbers = len(data) - his_window
  209. datas = [data.iloc[x: x+his_window, :].reset_index(drop=True) for x in range(0, numbers)]
  210. data_error = [np.mean(d.iloc[0:his_window, -1]) for d in datas]
  211. pad_data_error = np.pad(data_error, (his_window, 0), mode='constant', constant_values=0)
  212. print("!!!", his_window, numbers, len(pad_data_error))
  213. print("???", len(data), len(pad_data_error))
  214. data['his_fix'] = data[col_pre] + pad_data_error
  215. data = data.iloc[his_window:, :].reset_index(drop=True)
  216. data.loc[data[col_pre] <= 0, ['his_fix']] = 0
  217. data['dateTime'] = pd.to_datetime(data['dateTime'])
  218. data = data.loc[:, ['dateTime', col_power, col_pre, 'his_fix']]
  219. # data.to_csv('J01080原始数据.csv', index=False)
  220. return data
  221. @app.route('/cdq_coe_gen', methods=['POST'])
  222. def get_station_cdq_coe():
  223. # 获取程序开始时间
  224. start_time = time.time()
  225. result = {}
  226. success = 0
  227. args = {}
  228. coe = g.coe
  229. try:
  230. args = request.values.to_dict()
  231. logger.info(args)
  232. data = get_data_from_mongo(args).sort_values(by=args['col_time'], ascending=True)
  233. for point in range(0, 16, 1):
  234. iterate_his_coe(data, point, args, coe)
  235. iterate_coe(data, point, args, coe)
  236. success = 1
  237. except Exception as e:
  238. my_exception = traceback.format_exc()
  239. my_exception.replace("\n", "\t")
  240. result['msg'] = my_exception
  241. logger.info("调系数出错:{}".format(my_exception))
  242. end_time = time.time()
  243. result['success'] = success
  244. result['args'] = args
  245. result['coe'] = coe
  246. result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
  247. result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
  248. return result
  249. if __name__ == "__main__":
  250. # args = {
  251. # 'mongodb_database': 'ldw_ftp',
  252. # 'mongodb_read_table': 'j00600',
  253. # # 'timeBegin': '2025-01-01 00:00:00',
  254. # # 'timeEnd': '2025-01-03 23:45:00'
  255. # }
  256. # data = get_data_from_mongo(args).sort_values(by='dateTime', ascending=True)
  257. # pre_data = history_error(data, col_power='realPower', col_pre='dq')
  258. # for point in range(0, 16, 1):
  259. # iterate_coe(pre_data, point, 'realPower', 'dq')
  260. # run_code = 0
  261. print("Program starts execution!")
  262. from waitress import serve
  263. serve(app, host="0.0.0.0", port=10123,
  264. threads=8, # 指定线程数(默认4,根据硬件调整)
  265. channel_timeout=600 # 连接超时时间(秒)
  266. )
  267. print("server start!")