#!/usr/bin/env python # -*- coding: utf-8 -*- # time: 2024/5/6 13:25 # file: time_series.py # author: David # company: shenyang JY import json, copy import numpy as np from flask import Flask, request, g import traceback import logging, argparse from data_processing.data_operation.data_handler import DataHandler import time, yaml from copy import deepcopy from models_processing.model_tf.tf_cnn import CNNHandler from common.database_dml_koi import * import matplotlib.pyplot as plt from common.logs import Log # logger = logging.getLogger() logger = Log('tf_cnn').logger np.random.seed(42) # NumPy随机种子 app = Flask('tf_cnn_train——service') current_dir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(current_dir, 'cnn.yaml'), 'r', encoding='utf-8') as f: global_config = yaml.safe_load(f) # 只读的全局配置 @app.before_request def update_config(): # ------------ 整理参数,整合请求参数 ------------ # 深拷贝全局配置 + 合并请求参数 current_config = deepcopy(global_config) request_args = request.values.to_dict() # features参数规则:1.有传入,解析,覆盖 2. 无传入,不覆盖,原始值 request_args['features'] = request_args['features'].split(',') if 'features' in request_args else current_config['features'] current_config.update(request_args) # 存储到请求上下文 g.opt = argparse.Namespace(**current_config) g.dh = DataHandler(logger, current_config) # 每个请求独立实例 g.cnn = CNNHandler(logger, current_config) @app.route('/tf_cnn_training', methods=['POST']) def model_training_bp(): # 获取程序开始时间 start_time = time.time() result = {} success = 0 dh = g.dh cnn = g.cnn args = deepcopy(g.opt.__dict__) logger.info("Program starts execution!") try: # ------------ 获取数据,预处理训练数据 ------------ train_data = get_data_from_mongo(args) train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data) cnn.opt.Model['input_size'] = len(dh.opt.features) cnn.opt.cap = round(scaled_cap, 2) # ------------ 训练模型,保存模型 ------------ # 1. 如果是加强训练模式,先加载预训练模型特征参数,再预处理训练数据 # 2. 如果是普通模式,先预处理训练数据,再根据训练数据特征加载模型 logger.info("---------cap归一化:{}".format(cnn.opt.cap)) model = cnn.train_init() if cnn.opt.Model['add_train'] else cnn.get_keras_model(cnn.opt) if cnn.opt.Model['add_train']: if model: feas = json.loads(cnn.model_params)['features'] if set(feas).issubset(set(dh.opt.features)): dh.opt.features = list(feas) train_x, train_y, valid_x, valid_y, scaled_train_bytes, scaled_target_bytes, scaled_cap = dh.train_data_handler(train_data) else: model = cnn.get_keras_model(cnn.opt) logger.info("训练数据特征,不满足,加强训练模型特征") else: model = cnn.get_keras_model(cnn.opt) bp_model = cnn.training(model, [train_x, train_y, valid_x, valid_y]) args['Model']['features'] = ','.join(dh.opt.features) args['params'] = json.dumps(args) args['descr'] = '测试' args['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) insert_trained_model_into_mongo(bp_model, args) insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args) success = 1 except Exception as e: my_exception = traceback.format_exc() my_exception.replace("\n", "\t") result['msg'] = my_exception end_time = time.time() result['success'] = success result['args'] = args result['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time)) result['end_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time)) print("Program execution ends!") return result if __name__ == "__main__": print("Program starts execution!") from waitress import serve serve(app, host="0.0.0.0", port=10113, threads=8, # 指定线程数(默认4,根据硬件调整) channel_timeout=600 # 连接超时时间(秒) ) # print("server start!") # args_dict = {"mongodb_database": 'david_test', 'scaler_table': 'j00083_scaler', 'model_name': 'bp1.0.test', # 'model_table': 'j00083_model', 'mongodb_read_table': 'j00083', 'col_time': 'dateTime', # 'features': 'speed10,direction10,speed30,direction30,speed50,direction50,speed70,direction70,speed90,direction90,speed110,direction110,speed150,direction150,speed170,direction170'} # args_dict['features'] = args_dict['features'].split(',') # arguments.update(args_dict) # dh = DataHandler(logger, arguments) # cnn = CNNHandler(logger) # opt = argparse.Namespace(**arguments) # opt.Model['input_size'] = len(opt.features) # train_data = get_data_from_mongo(args_dict) # train_x, valid_x, train_y, valid_y, scaled_train_bytes, scaled_target_bytes = dh.train_data_handler(train_data, opt) # cnn_model = cnn.training(opt, [train_x, train_y, valid_x, valid_y]) # # args_dict['gen_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) # args_dict['params'] = arguments # args_dict['descr'] = '测试' # insert_trained_model_into_mongo(cnn_model, args_dict) # insert_scaler_model_into_mongo(scaled_train_bytes, scaled_target_bytes, args_dict)