123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384 |
- #!/usr/bin/env python
- # -*- coding:utf-8 -*-
- # @FileName :tf_bp.py
- # @Time :2025/2/13 13:34
- # @Author :David
- # @Company: shenyang JY
- from tensorflow.keras.models import Sequential
- from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
- from tensorflow.keras.models import Model, load_model
- from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
- from tensorflow.keras import optimizers, regularizers
- from models_processing.losses.loss_cdq import rmse
- import numpy as np
- from common.database_dml import *
- from threading import Lock
- import argparse
- model_lock = Lock()
- class BPHandler(object):
- def __init__(self, logger, args):
- self.logger = logger
- self.opt = argparse.Namespace(**args)
- self.model = None
- def get_model(self, args):
- """
- 单例模式+线程锁,防止在异步加载时引发线程安全
- """
- try:
- with model_lock:
- # NPHandler.model = NPHandler.get_keras_model(opt)
- self.model = get_h5_model_from_mongo(args, {'rmse': rmse})
- except Exception as e:
- self.logger.info("加载模型权重失败:{}".format(e.args))
- @staticmethod
- def get_keras_model(opt):
- model = Sequential([
- Dense(64, input_dim=opt.Model['input_size'], activation='relu'), # 输入层和隐藏层,10个神经元
- Dense(32, activation='relu'), # 隐藏层,8个神经元
- Dense(1, activation='linear') # 输出层,1个神经元(用于回归任务)
- ])
- adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
- model.compile(loss=rmse, optimizer=adam)
- return model
- def train_init(self):
- try:
- if self.opt.Model['add_train']:
- # 进行加强训练,支持修模
- base_train_model = get_h5_model_from_mongo(vars(self.opt), {'rmse': rmse})
- base_train_model.summary()
- self.logger.info("已加载加强训练基础模型")
- else:
- base_train_model = self.get_keras_model(self.opt)
- return base_train_model
- except Exception as e:
- self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
- def training(self, train_and_valid_data):
- model = self.train_init()
- # tf.reset_default_graph() # 清除默认图
- train_x, train_y, valid_x, valid_y = train_and_valid_data
- print("----------", np.array(train_x[0]).shape)
- print("++++++++++", np.array(train_x[1]).shape)
- model.summary()
- early_stop = EarlyStopping(monitor='val_loss', patience=self.opt.Model['patience'], mode='auto')
- history = model.fit(train_x, train_y, batch_size=self.opt.Model['batch_size'], epochs=self.opt.Model['epoch'], verbose=2, validation_data=(valid_x, valid_y), callbacks=[early_stop], shuffle=False)
- loss = np.round(history.history['loss'], decimals=5)
- val_loss = np.round(history.history['val_loss'], decimals=5)
- self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
- self.logger.info("训练集损失函数为:{}".format(loss))
- self.logger.info("验证集损失函数为:{}".format(val_loss))
- return model
- def predict(self, test_x, batch_size=1):
- result = self.model.predict(test_x, batch_size=batch_size)
- self.logger.info("执行预测方法")
- return result
- if __name__ == "__main__":
- run_code = 0
|