tf_lstm.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. #!/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. # @FileName :tf_lstm.py
  4. # @Time :2025/2/12 14:03
  5. # @Author :David
  6. # @Company: shenyang JY
  7. # 固定NumPy随机种子
  8. import os
  9. os.environ['PYTHONHASHSEED'] = '42'
  10. import numpy as np
  11. np.random.seed(42)
  12. # 固定TensorFlow随机种子
  13. import tensorflow as tf
  14. tf.random.set_seed(42)
  15. from tensorflow.keras.initializers import glorot_uniform, orthogonal
  16. from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
  17. from tensorflow.keras.models import Model, load_model
  18. from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
  19. from tensorflow.keras import optimizers, regularizers
  20. from models_processing.model_tf.losses import region_loss
  21. import numpy as np
  22. from common.database_dml import *
  23. from threading import Lock
  24. import argparse
  25. model_lock = Lock()
  26. class TSHandler(object):
  27. def __init__(self, logger, args):
  28. self.logger = logger
  29. self.opt = argparse.Namespace(**args)
  30. self.model = None
  31. self.model_params = None
  32. def get_model(self, args):
  33. """
  34. 单例模式+线程锁,防止在异步加载时引发线程安全
  35. """
  36. try:
  37. with model_lock:
  38. loss = region_loss(self.opt)
  39. self.model, self.model_params = get_keras_model_from_mongo(args, {type(loss).__name__: loss})
  40. except Exception as e:
  41. self.logger.info("加载模型权重失败:{}".format(e.args))
  42. @staticmethod
  43. def get_keras_model_20250514(opt, time_series=1, lstm_type=1):
  44. loss = region_loss(opt)
  45. l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
  46. l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
  47. nwp_input = Input(shape=(opt.Model['time_step']*time_series, opt.Model['input_size']), name='nwp')
  48. con1 = Conv1D(filters=64, kernel_size=5, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg, kernel_initializer=glorot_uniform(seed=42))(nwp_input)
  49. con1_p = MaxPooling1D(pool_size=5, strides=1, padding='valid', data_format='channels_last')(con1)
  50. nwp_lstm = LSTM(units=opt.Model['hidden_size'], return_sequences=False, kernel_regularizer=l2_reg, kernel_initializer=glorot_uniform(seed=43),
  51. recurrent_initializer=orthogonal(seed=44), # LSTM特有的初始化
  52. bias_initializer='zeros')(con1_p)
  53. if lstm_type == 2:
  54. output = Dense(opt.Model['time_step'], name='cdq_output', kernel_initializer=glorot_uniform(seed=45),
  55. bias_initializer='zeros')(nwp_lstm)
  56. else:
  57. output = Dense(opt.Model['time_step']*time_series, name='cdq_output', kernel_initializer=glorot_uniform(seed=45),
  58. bias_initializer='zeros')(nwp_lstm)
  59. model = Model(nwp_input, output)
  60. adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
  61. model.compile(loss=loss, optimizer=adam)
  62. return model
  63. @staticmethod
  64. def get_keras_model(opt, time_series=1, lstm_type=1):
  65. loss = region_loss(opt)
  66. l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
  67. l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
  68. nwp_input = Input(shape=(opt.Model['time_step'] * time_series, opt.Model['input_size']), name='nwp')
  69. con1 = Conv1D(filters=64, kernel_size=1, strides=1, padding='valid', activation='relu',
  70. kernel_regularizer=l2_reg, kernel_initializer=glorot_uniform(seed=42))(nwp_input)
  71. con1_p = MaxPooling1D(pool_size=1, strides=1, padding='valid', data_format='channels_last')(con1)
  72. nwp_lstm = LSTM(units=opt.Model['hidden_size'], return_sequences=False, kernel_regularizer=l2_reg, kernel_initializer=glorot_uniform(seed=43),
  73. recurrent_initializer=orthogonal(seed=44), # LSTM特有的初始化
  74. bias_initializer='zeros')(con1_p)
  75. if lstm_type == 2:
  76. output = Dense(opt.Model['time_step'], name='cdq_output', kernel_initializer=glorot_uniform(seed=45),
  77. bias_initializer='zeros')(nwp_lstm)
  78. else:
  79. output = Dense(opt.Model['time_step'] * time_series, name='cdq_output', kernel_initializer=glorot_uniform(seed=45),
  80. bias_initializer='zeros')(nwp_lstm)
  81. model = Model(nwp_input, output)
  82. adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
  83. model.compile(loss=loss, optimizer=adam)
  84. return model
  85. def train_init(self):
  86. try:
  87. # 进行加强训练,支持修模
  88. loss = region_loss(self.opt)
  89. base_train_model, self.model_params = get_keras_model_from_mongo(vars(self.opt), {type(loss).__name__: loss})
  90. base_train_model.summary()
  91. self.logger.info("已加载加强训练基础模型")
  92. return base_train_model
  93. except Exception as e:
  94. self.logger.info("加载加强训练模型权重失败:{}".format(e.args))
  95. return False
  96. def training(self, model, train_and_valid_data):
  97. model.summary()
  98. train_x, train_y, valid_x, valid_y = train_and_valid_data
  99. early_stop = EarlyStopping(monitor='val_loss', patience=self.opt.Model['patience'], mode='auto')
  100. history = model.fit(train_x, train_y, batch_size=self.opt.Model['batch_size'], epochs=self.opt.Model['epoch'],
  101. verbose=2, validation_data=(valid_x, valid_y), callbacks=[early_stop], shuffle=False)
  102. loss = np.round(history.history['loss'], decimals=5)
  103. val_loss = np.round(history.history['val_loss'], decimals=5)
  104. self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
  105. self.logger.info("训练集损失函数为:{}".format(loss))
  106. self.logger.info("验证集损失函数为:{}".format(val_loss))
  107. return model
  108. def predict(self, test_x, batch_size=1):
  109. result = self.model.predict(test_x, batch_size=batch_size)
  110. self.logger.info("执行预测方法")
  111. return result
  112. if __name__ == "__main__":
  113. run_code = 0