tf_fmi_train.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. #!/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. # @FileName :tf_lstm_train.py
  4. # @Time :2025/2/13 10:52
  5. # @Author :David
  6. # @Company: shenyang JY
  7. import json, os
  8. import numpy as np
  9. import traceback
  10. import logging
  11. from app.common.logs import args
  12. from app.common.data_handler import DataHandler, write_number_to_file
  13. import time
  14. from app.common.tf_fmi import FMIHandler
  15. from app.common.dbmg import MongoUtils
  16. from app.common.logs import logger
  17. np.random.seed(42) # NumPy随机种子
  18. # tf.set_random_seed(42) # TensorFlow随机种子
  19. dh = DataHandler(logger, args)
  20. ts = FMIHandler(logger, args)
  21. mgUtils = MongoUtils(logger)
  22. def model_training(train_data, input_file, cap):
  23. # 获取程序开始时间
  24. start_time = time.time()
  25. result = {}
  26. success = 0
  27. logger.info("Program starts execution!")
  28. farm_id = input_file.split('/')[-2]
  29. output_file = input_file.replace('IN', 'OUT')
  30. status_file = 'STATUS.TXT'
  31. try:
  32. # ------------ 获取数据,预处理训练数据 ------------
  33. dh.opt.cap = cap
  34. train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
  35. ts.opt.cap = round(scaled_cap, 2)
  36. ts.opt.Model['input_size'] = train_x.shape[2]
  37. # ------------ 训练模型,保存模型 ------------
  38. # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据
  39. # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型
  40. model = ts.train_init() if ts.opt.Model['add_train'] else ts.get_keras_model(ts.opt)
  41. if ts.opt.Model['add_train']:
  42. if model:
  43. feas = json.loads(ts.model_params).get('features', dh.opt.features)
  44. if set(feas).issubset(set(dh.opt.features)):
  45. dh.opt.features = list(feas)
  46. train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data)
  47. else:
  48. model = ts.get_keras_model(ts.opt)
  49. logger.info("训练数据特征,不满足,加强训练模型特征")
  50. else:
  51. model = ts.get_keras_model(ts.opt)
  52. ts_model = ts.training(model, [train_x, valid_x, train_y, valid_y])
  53. success = 1
  54. # 更新算法状态:1. 启动成功
  55. write_number_to_file(os.path.join(output_file, status_file), 1, 1, 'rewrite')
  56. # ------------ 组装模型数据 ------------
  57. args['Model']['features'] = ','.join(dh.opt.features)
  58. args.update({
  59. 'params': json.dumps(args),
  60. 'descr': f'南网竞赛-{farm_id}',
  61. 'gen_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
  62. 'model_table': args['model_table'] + farm_id,
  63. 'scaler_table': args['scaler_table'] + farm_id
  64. })
  65. mgUtils.insert_trained_model_into_mongo(ts_model, args)
  66. mgUtils.insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args)
  67. # 更新算法状态:正常结束
  68. write_number_to_file(os.path.join(output_file, status_file), 2, 2)
  69. except Exception as e:
  70. # 如果算法状态没启动,不更新
  71. if success:
  72. write_number_to_file(os.path.join(output_file, status_file), 2, 3)
  73. my_exception = traceback.format_exc()
  74. my_exception.replace("\n", "\t")
  75. result['msg'] = my_exception
  76. end_time = time.time()
  77. result['success'] = success
  78. result['args'] = args
  79. result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))
  80. result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
  81. print("Program execution ends!")
  82. return result
  83. if __name__ == "__main__":
  84. print("Program starts execution!")
  85. logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  86. logger = logging.getLogger("model_training_bp log")
  87. from waitress import serve
  88. serve(app, host="0.0.0.0", port=10103, threads=4)
  89. print("server start!")
  90. # args_dict = {"mongodb_database": 'realtimeDq', 'scaler_table': 'j00600_scaler', 'model_name': 'lstm1',
  91. # 'model_table': 'j00600_model', 'mongodb_read_table': 'j00600', 'col_time': 'dateTime',
  92. # 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'}
  93. # args_dict['features'] = args_dict['features'].split(',')
  94. # args.update(args_dict)
  95. # dh = DataHandler(logger, args)
  96. # ts = TSHandler(logger, args)
  97. # opt = argparse.Namespace(**args)
  98. # opt.Model['input_size'] = len(opt.features)
  99. # train_data = get_data_from_mongo(args_dict)
  100. # train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data)
  101. # ts_model = ts.training([train_x, train_y, valid_x, valid_y])
  102. #
  103. # args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
  104. # args_dict['params'] = args
  105. # args_dict['descr'] = '测试'
  106. # insert_trained_model_into_mongo(ts_model, args_dict)
  107. # insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)