1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768 |
- # -*- coding: UTF-8 -*-
- from keras.layers import Input, Dense, LSTM, Lambda, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
- from keras.models import Model
- from keras.callbacks import ModelCheckpoint, EarlyStopping
- def get_keras_model(opt):
- lstm_input = Input(shape=(opt.predict_points, opt.input_size_lstm))
- lstm = lstm_input
- for i in range(opt.Model['lstm_layers']):
- rs = True
- # if i == opt.Model['lstm_layers']-1:
- # rs = False
- lstm = LSTM(units=opt.Model['hidden_size'], dropout=opt.Model['dropout_rate'], return_sequences=rs)(lstm)
- output = Dense(1)(lstm)
- output = Flatten(data_format='channels_last')(output)
- lstm1 = lstm_input
- for i in range(opt.Model['lstm_layers']):
- rs = True
- # if i == opt.Model['lstm_layers']-1:
- # rs = False
- lstm1 = LSTM(units=opt.Model['hidden_size'], dropout=opt.Model['dropout_rate'], return_sequences=rs)(lstm1)
- output1 = Dense(1)(lstm1)
- output1 = Flatten(data_format='channels_last')(output1)
- outputs = Lambda(sum)([output, output1])
- # outputs = Dense(opt.output_size)(outputs)
- model = Model(lstm_input, [output, output1])
- # model = Model(lstm_input, outputs)
- model.compile(loss='mse', optimizer='adam') # metrics=["mae"]
- return model
- def gpu_train_init():
- import tensorflow as tf
- from keras.backend.tensorflow_backend import set_session
- sess_config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)
- sess_config.gpu_options.per_process_gpu_memory_fraction = 0.7 # 最多使用70%GPU内存
- sess_config.gpu_options.allow_growth=True # 初始化时不全部占满GPU显存, 按需分配
- sess = tf.Session(config=sess_config)
- set_session(sess)
- def train(opt, train_and_valid_data):
- if opt.use_cuda: gpu_train_init()
- train_X, train_Y, valid_X, valid_Y = train_and_valid_data
- import numpy as np
- print("----------", np.array(train_X[0]).shape)
- print("++++++++++", np.array(train_X[1]).shape)
- model = get_keras_model(opt)
- model.summary()
- if opt.add_train:
- model.load_weights(opt.model_save_path + opt.model_name)
- check_point = ModelCheckpoint(filepath=opt.model_save_path + opt.model_name, monitor='val_loss',
- save_best_only=True, mode='auto')
- early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
- model.fit(train_X, train_Y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,
- validation_data=(valid_X, valid_Y), callbacks=[check_point, early_stop])
- def predict(config, test_X):
- model = get_keras_model(config)
- model.load_weights(config.model_save_path + config.model_name)
- result = model.predict(test_X, batch_size=1)
- # result = result.reshape((-1, config.output_size))
- return result
|