nn_zone.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # time: 2024/5/6 13:25
  4. # file: time_series.py
  5. # author: David
  6. # company: shenyang JY
  7. import os.path
  8. from keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
  9. from keras.models import Model, load_model
  10. from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
  11. from keras import optimizers, regularizers
  12. import keras.backend as K
  13. import numpy as np
  14. from cache.sloss import NorthEastLoss, SouthLoss
  15. np.random.seed(42)
  16. import tensorflow as tf
  17. tf.compat.v1.set_random_seed(1234)
  18. from threading import Lock
  19. model_lock = Lock()
  20. def rmse(y_true, y_pred):
  21. offset = tf.square(y_pred - y_true)
  22. mean = tf.reduce_mean(offset)
  23. sqrt = tf.sqrt(mean)
  24. return sqrt
  25. var_dir = os.path.dirname(os.path.dirname(__file__))
  26. class FMI(object):
  27. model = None
  28. train = False
  29. def __init__(self, log, args, graph, sess):
  30. self.logger = log
  31. self.graph = graph
  32. self.sess = sess
  33. opt = args.parse_args_and_yaml()
  34. with self.graph.as_default():
  35. tf.compat.v1.keras.backend.set_session(self.sess)
  36. FMI.get_model(opt)
  37. @staticmethod
  38. def get_model(opt):
  39. """
  40. 单例模式+线程锁,防止在异步加载时引发线程安全
  41. """
  42. try:
  43. if FMI.model is None or FMI.train is True:
  44. with model_lock:
  45. FMI.model = FMI.get_keras_model(opt)
  46. FMI.model.load_weights(os.path.join(var_dir, 'var', 'fmi.h5'))
  47. except Exception as e:
  48. print("加载模型权重失败:{}".format(e.args))
  49. @staticmethod
  50. def get_keras_model(opt):
  51. db_loss = NorthEastLoss(opt)
  52. south_loss = SouthLoss(opt)
  53. l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
  54. l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
  55. nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size_nwp']), name='nwp')
  56. env_input = Input(shape=(opt.Model['his_points'], opt.Model['input_size_env']), name='env')
  57. con1 = Conv1D(filters=64, kernel_size=1, strides=1, padding='valid', activation='relu',
  58. kernel_regularizer=l2_reg)(nwp_input)
  59. nwp = MaxPooling1D(pool_size=1, strides=1, padding='valid', data_format='channels_last')(con1)
  60. nwp = LSTM(units=opt.Model['hidden_size'], return_sequences=True, name='env_lstm', kernel_regularizer=l2_reg)(nwp)
  61. zone = Dense(len(opt.zone.keys()), name='zone')(nwp)
  62. zonef = Flatten()(zone)
  63. # nwp = Dense(1, activation='relu', name='d2', kernel_regularizer=l1_reg)(nwp_lstm)
  64. output = Dense(opt.Model['output_size'], name='cdq_output')(zonef)
  65. model = Model([env_input, nwp_input], [zone, output])
  66. adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
  67. model.compile(loss={"zone": rmse, "cdq_output": rmse}, loss_weights={"zone": 0.7, "cdq_output": 0.3}, optimizer=adam)
  68. return model
  69. @staticmethod
  70. def get_keras_model1(opt):
  71. db_loss = NorthEastLoss(opt)
  72. south_loss = SouthLoss(opt)
  73. l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
  74. l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
  75. nwp_input = Input(shape=(opt.Model['time_step'], opt.Model['input_size_nwp']), name='nwp')
  76. env_input = Input(shape=(opt.Model['his_points'], opt.Model['input_size_env']), name='env')
  77. con1 = Conv1D(filters=64, kernel_size=1, strides=1, padding='valid', activation='relu',
  78. kernel_regularizer=l2_reg)(nwp_input)
  79. nwp = MaxPooling1D(pool_size=1, strides=1, padding='valid', data_format='channels_last')(con1)
  80. nwp = LSTM(units=opt.Model['hidden_size'], return_sequences=True, name='env_lstm', kernel_regularizer=l2_reg)(
  81. nwp)
  82. zone = Dense(len(opt.zone.keys()), name='zone')(nwp)
  83. zonef = Flatten()(zone)
  84. # nwp = Dense(1, activation='relu', name='d2', kernel_regularizer=l1_reg)(nwp_lstm)
  85. output = Dense(opt.Model['output_size'], name='cdq_output')(zonef)
  86. model = Model([env_input, nwp_input], [zone, output])
  87. adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7,
  88. amsgrad=True)
  89. model.compile(loss={"cdq_output": south_loss}, optimizer=adam)
  90. return model
  91. def train_init(self, opt):
  92. tf.compat.v1.keras.backend.set_session(self.sess)
  93. model = FMI.get_keras_model(opt)
  94. try:
  95. if opt.Model['add_train'] and opt.authentication['repair'] != "null":
  96. # 进行加强训练,支持修模
  97. model.load_weights(os.path.join(var_dir, 'var', 'fmi.h5'))
  98. self.logger.info("已加载加强训练基础模型")
  99. except Exception as e:
  100. self.logger.info("加强训练加载模型权重失败:{}".format(e.args))
  101. model.summary()
  102. return model
  103. def training(self, opt, train_and_valid_data):
  104. model = self.train_init(opt)
  105. train_X, train_Y, valid_X, valid_Y = train_and_valid_data
  106. # print("----------", np.array(train_X[0]).shape)
  107. # print("++++++++++", np.array(train_X[1]).shape)
  108. # weight_lstm_1, bias_lstm_1 = model.get_layer('d1').get_weights()
  109. # print("weight_lstm_1 = ", weight_lstm_1)
  110. # print("bias_lstm_1 = ", bias_lstm_1)
  111. check_point = ModelCheckpoint(filepath='./var/' + 'fmi.h5', monitor='val_loss',
  112. save_best_only=True, mode='auto')
  113. early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
  114. # tbCallBack = TensorBoard(log_dir='../figure',
  115. # histogram_freq=0,
  116. # write_graph=True,
  117. # write_images=True)
  118. history = model.fit(train_X, train_Y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,
  119. validation_data=(valid_X, valid_Y), callbacks=[check_point, early_stop])
  120. loss = np.round(history.history['loss'], decimals=2)
  121. val_loss = np.round(history.history['val_loss'], decimals=2)
  122. self.logger.info("-----模型训练经过{}轮迭代-----".format(len(loss)))
  123. self.logger.info("训练集损失函数为:{}".format(loss))
  124. self.logger.info("验证集损失函数为:{}".format(val_loss))
  125. self.logger.info("训练结束,原模型地址:{}".format(id(FMI.model)))
  126. with self.graph.as_default():
  127. tf.compat.v1.keras.backend.set_session(self.sess)
  128. FMI.train = True
  129. FMI.get_model(opt)
  130. FMI.train = False
  131. self.logger.info("保护线程,加载模型,地址:{}".format(id(FMI.model)))
  132. def predict(self, test_X, batch_size=1):
  133. with self.graph.as_default():
  134. with self.sess.as_default():
  135. result = FMI.model.predict(test_X, batch_size=batch_size)
  136. self.logger.info("执行预测方法")
  137. return result