model_prediction_lstm.py 3.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. from flask import Flask,request
  2. import time
  3. import logging
  4. import traceback
  5. import numpy as np
  6. from itertools import chain
  7. from common.database_dml import get_data_from_mongo,insert_data_into_mongo,get_h5_model_from_mongo,get_scaler_model_from_mongo
  8. from common.processing_data_common import str_to_list
  9. app = Flask('model_prediction_lstm——service')
  10. # 创建时间序列数据
  11. def create_sequences(data_features,data_target,time_steps):
  12. X, y = [], []
  13. if len(data_features)<time_steps:
  14. print("数据长度不能比时间步长小!")
  15. return np.array(X), np.array(y)
  16. else:
  17. for i in range(len(data_features) - time_steps+1):
  18. X.append(data_features[i:(i + time_steps)])
  19. if len(data_target)>0:
  20. y.append(data_target[i + time_steps -1])
  21. return np.array(X), np.array(y)
  22. def model_prediction(df,args):
  23. if 'is_limit' in df.columns:
  24. df = df[df['is_limit'] == False]
  25. features, time_steps, col_time, model_name,col_reserve = str_to_list(args['features']), int(args['time_steps']),args['col_time'],args['model_name'],str_to_list(args['col_reserve'])
  26. feature_scaler,target_scaler = get_scaler_model_from_mongo(args)
  27. df = df.sort_values(by=col_time).fillna(method='ffill').fillna(method='bfill')
  28. scaled_features = feature_scaler.transform(df[features])
  29. X_predict, _ = create_sequences(scaled_features, [], time_steps)
  30. # 加载模型时传入自定义损失函数
  31. # model = load_model(f'{farmId}_model.h5', custom_objects={'rmse': rmse})
  32. model = get_h5_model_from_mongo(args)
  33. y_predict = list(chain.from_iterable(target_scaler.inverse_transform([model.predict(X_predict).flatten()])))
  34. result = df[-len(y_predict):]
  35. result['predict'] = y_predict
  36. result.loc[result['predict'] < 0, 'predict'] = 0
  37. result['model'] = model_name
  38. features_reserve = col_reserve + ['model', 'predict']
  39. return result[set(features_reserve)]
  40. @app.route('/model_prediction_lstm', methods=['POST'])
  41. def model_prediction_lstm():
  42. # 获取程序开始时间
  43. start_time = time.time()
  44. result = {}
  45. success = 0
  46. print("Program starts execution!")
  47. try:
  48. args = request.values.to_dict()
  49. print('args',args)
  50. logger.info(args)
  51. power_df = get_data_from_mongo(args)
  52. model = model_prediction(power_df,args)
  53. insert_data_into_mongo(model,args)
  54. success = 1
  55. except Exception as e:
  56. my_exception = traceback.format_exc()
  57. my_exception.replace("\n","\t")
  58. result['msg'] = my_exception
  59. end_time = time.time()
  60. result['success'] = success
  61. result['args'] = args
  62. result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
  63. result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
  64. print("Program execution ends!")
  65. return result
  66. if __name__=="__main__":
  67. print("Program starts execution!")
  68. logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  69. logger = logging.getLogger("model_prediction_lstm log")
  70. from waitress import serve
  71. serve(app, host="0.0.0.0", port=10097)
  72. print("server start!")