tf_transformer.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. #!/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. # @FileName :tf_transformer.py
  4. # @Time :2025/5/08 14:03
  5. # @Author :David
  6. # @Company: shenyang JY
  7. from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten, LayerNormalization, Dropout
  8. from tensorflow.keras.models import Model, load_model
  9. from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
  10. from tensorflow.keras import optimizers, regularizers
  11. from models_processing.model_tf.losses import region_loss
  12. import numpy as np
  13. from common.database_dml_koi import *
  14. from models_processing.model_tf.settings import set_deterministic
  15. from threading import Lock
  16. import argparse
  17. model_lock = Lock()
  18. set_deterministic(42)
  19. class TransformerHandler(object):
  20. def __init__(self, logger, args):
  21. self.logger = logger
  22. self.opt = argparse.Namespace(**args)
  23. self.model = None
  24. self.model_params = None
  25. def get_model(self, args):
  26. """
  27. 单例模式+线程锁,防止在异步加载时引发线程安全
  28. """
  29. try:
  30. with model_lock:
  31. loss = region_loss(self.opt)
  32. self.model, self.model_params = get_keras_model_from_mongo(args, {type(loss).__name__: loss})
  33. except Exception as e:
  34. self.logger.info("加载模型权重失败:{}".format(e.args))
  35. @staticmethod
  36. def get_keras_model(opt, time_series=1, lstm_type=1):
  37. loss = region_loss(opt)
  38. l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
  39. l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
  40. nwp_input = Input(shape=(opt.Model['time_step']*time_series, opt.Model['input_size']), name='nwp')
  41. con1 = Conv1D(filters=64, kernel_size=5, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
  42. con1_p = MaxPooling1D(pool_size=5, strides=1, padding='valid', data_format='channels_last')(con1)
  43. nwp_lstm = LSTM(units=opt.Model['hidden_size'], return_sequences=False, kernel_regularizer=l2_reg)(con1_p)
  44. if lstm_type == 2:
  45. output = Dense(opt.Model['time_step'], name='cdq_output')(nwp_lstm)
  46. else:
  47. output = Dense(opt.Model['time_step']*time_series, name='cdq_output')(nwp_lstm)
  48. model = Model(nwp_input, output)
  49. adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
  50. model.compile(loss=loss, optimizer=adam)
  51. return model
  52. @staticmethod
  53. def get_transformer_model(opt, time_series=1):
  54. time_steps = 48
  55. input_features = 21
  56. output_steps = 16
  57. hidden_size = opt.Model.get('hidden_size', 64)
  58. num_heads = opt.Model.get('num_heads', 4)
  59. ff_dim = opt.Model.get('ff_dim', 128)
  60. l2_reg = regularizers.l2(opt.Model.get('lambda_value_2', 0.0))
  61. nwp_input = Input(shape=(opt.Model['time_step'] * time_series, opt.Model['input_size']), name='nwp')
  62. # 输入嵌入
  63. x = Conv1D(hidden_size, 1, kernel_regularizer=l2_reg)(nwp_input)
  64. # Transformer编码器层
  65. for _ in range(opt.Model.get('num_layers', 2)):
  66. # 多头自注意力
  67. x = tf.keras.layers.MultiHeadAttention(
  68. num_heads=num_heads, key_dim=hidden_size,
  69. kernel_regularizer=l2_reg
  70. )(x, x)
  71. x = LayerNormalization()(x)
  72. x = tf.keras.layers.Dropout(0.1)(x)
  73. # 前馈网络
  74. x = tf.keras.layers.Dense(ff_dim, activation='relu', kernel_regularizer=l2_reg)(x)
  75. x = tf.keras.layers.Dense(hidden_size, kernel_regularizer=l2_reg)(x)
  76. x = LayerNormalization()(x)
  77. x = tf.keras.layers.Dropout(0.1)(x)
  78. # 提取中间时间步
  79. start_idx = (time_steps - output_steps) // 2
  80. x = x[:, start_idx:start_idx + output_steps, :]
  81. # 输出层
  82. output = Dense(output_steps, name='cdq_output')(x[:, -1, :]) # 或者使用所有时间步
  83. model = Model(nwp_input, output)
  84. # 编译模型
  85. adam = optimizers.Adam(
  86. learning_rate=opt.Model.get('learning_rate', 0.001),
  87. beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True
  88. )
  89. loss = region_loss(opt)
  90. model.compile(loss=loss, optimizer=adam)
  91. return model
  92. def train_init(self):
  93. try:
  94. # 进行加强训练,支持修模
  95. loss = region_loss(self.opt)
  96. base_train_model, self.model_params = get_keras_model_from_mongo(vars(self.opt), {type(loss).__name__: loss})
  97. base_train_model.summary()
  98. self.logger.info("已加载加强训练基础模型")
  99. return base_train_model
  100. except Exception as e:
  101. self.logger.info("加载加强训练模型权重失败:{}".format(e.args))
  102. return False
  103. def training(self, model, train_and_valid_data):
  104. model.summary()
  105. train_x, train_y, valid_x, valid_y = train_and_valid_data
  106. early_stop = EarlyStopping(monitor='val_loss', patience=self.opt.Model['patience'], mode='auto')
  107. history = model.fit(train_x, train_y, batch_size=self.opt.Model['batch_size'], epochs=self.opt.Model['epoch'],
  108. verbose=2, validation_data=(valid_x, valid_y), callbacks=[early_stop], shuffle=False)
  109. loss = np.round(history.history['loss'], decimals=5)
  110. val_loss = np.round(history.history['val_loss'], decimals=5)
  111. self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
  112. self.logger.info("训练集损失函数为:{}".format(loss))
  113. self.logger.info("验证集损失函数为:{}".format(val_loss))
  114. return model
  115. def predict(self, test_x, batch_size=1):
  116. result = self.model.predict(test_x, batch_size=batch_size)
  117. self.logger.info("执行预测方法")
  118. return result
  119. if __name__ == "__main__":
  120. run_code = 0