model_keras.py 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. # -*- coding: UTF-8 -*-
  2. from keras.layers import Input, Dense, LSTM, Lambda, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
  3. from keras.models import Model
  4. from keras.callbacks import ModelCheckpoint, EarlyStopping
  5. def get_keras_model(opt):
  6. lstm_input = Input(shape=(opt.predict_points, opt.input_size_lstm))
  7. lstm = lstm_input
  8. for i in range(opt.Model['lstm_layers']):
  9. rs = True
  10. # if i == opt.Model['lstm_layers']-1:
  11. # rs = False
  12. lstm = LSTM(units=opt.Model['hidden_size'], dropout=opt.Model['dropout_rate'], return_sequences=rs)(lstm)
  13. output = Dense(1)(lstm)
  14. output = Flatten(data_format='channels_last')(output)
  15. lstm1 = lstm_input
  16. for i in range(opt.Model['lstm_layers']):
  17. rs = True
  18. # if i == opt.Model['lstm_layers']-1:
  19. # rs = False
  20. lstm1 = LSTM(units=opt.Model['hidden_size'], dropout=opt.Model['dropout_rate'], return_sequences=rs)(lstm1)
  21. output1 = Dense(1)(lstm1)
  22. output1 = Flatten(data_format='channels_last')(output1)
  23. outputs = Lambda(sum)([output, output1])
  24. # outputs = Dense(opt.output_size)(outputs)
  25. model = Model(lstm_input, [output, output1])
  26. # model = Model(lstm_input, outputs)
  27. model.compile(loss='mse', optimizer='adam') # metrics=["mae"]
  28. return model
  29. def gpu_train_init():
  30. import tensorflow as tf
  31. from keras.backend.tensorflow_backend import set_session
  32. sess_config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)
  33. sess_config.gpu_options.per_process_gpu_memory_fraction = 0.7 # 最多使用70%GPU内存
  34. sess_config.gpu_options.allow_growth=True # 初始化时不全部占满GPU显存, 按需分配
  35. sess = tf.Session(config=sess_config)
  36. set_session(sess)
  37. def train(opt, train_and_valid_data):
  38. if opt.use_cuda: gpu_train_init()
  39. train_X, train_Y, valid_X, valid_Y = train_and_valid_data
  40. import numpy as np
  41. print("----------", np.array(train_X[0]).shape)
  42. print("++++++++++", np.array(train_X[1]).shape)
  43. model = get_keras_model(opt)
  44. model.summary()
  45. if opt.add_train:
  46. model.load_weights(opt.model_save_path + opt.model_name)
  47. check_point = ModelCheckpoint(filepath=opt.model_save_path + opt.model_name, monitor='val_loss',
  48. save_best_only=True, mode='auto')
  49. early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
  50. model.fit(train_X, train_Y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,
  51. validation_data=(valid_X, valid_Y), callbacks=[check_point, early_stop])
  52. def predict(config, test_X):
  53. model = get_keras_model(config)
  54. model.load_weights(config.model_save_path + config.model_name)
  55. result = model.predict(test_X, batch_size=1)
  56. # result = result.reshape((-1, config.output_size))
  57. return result