123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596 |
- # -*- coding: UTF-8 -*-
- from keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten, Lambda
- from keras.models import Model, load_model
- from keras.callbacks import ModelCheckpoint, EarlyStopping
- from keras import optimizers
- from keras.callbacks import TensorBoard
- import matplotlib.pyplot as plt
- import numpy as np
- from keras.callbacks import TensorBoard, EarlyStopping
- def get_keras_model(opt):
- lstm_input = Input(shape=(opt.Model['time_step'], opt.input_size_lstm))
- lstm = lstm_input
- for i in range(opt.Model['lstm_layers']):
- rs = True
- if i == opt.Model['lstm_layers']-1:
- rs = False
- lstm = LSTM(units=opt.Model['hidden_size'], dropout=opt.Model['dropout_rate'], return_sequences=rs)(lstm)
- output = Dense(16, name='dense_1')(lstm)
- # output = Flatten(data_format='channels_last')(output)
- lstm1 = lstm_input
- for i in range(opt.Model['lstm_layers']):
- rs = True
- if i == opt.Model['lstm_layers']-1:
- rs = False
- lstm1 = LSTM(units=opt.Model['hidden_size'], dropout=opt.Model['dropout_rate'], return_sequences=rs)(lstm1)
- output1 = Dense(16, name='dense_2')(lstm1)
- # output1 = Flatten(data_format='channels_last')(output1)
- outputs = Lambda(sum)([output, output1])
- # outputs = Dense(16, name='dense_3')(outputs)
- model = Model(lstm_input, [output, output1])
- # model = Model(lstm_input, outputs)
- # model.compile(loss={'dense_1': 'mse', 'dense_2': 'mse', 'dense_3': 'mse'},
- # loss_weights={'dense_1': 500, 'dense_2': 500, 'dense_3': 0.04},
- # metrics={'dense_1': ['accuracy', 'mse'], 'dense_2': ['accuracy', 'mse'], 'dense_3': ['accuracy', 'mse']},
- # optimizer='adam') # metrics=["mae"]
- model.compile(loss={'dense_1': 'mse', 'dense_2': 'mse'},
- metrics={'dense_1': ['accuracy', 'mse'], 'dense_2': ['accuracy', 'mse'],},
- optimizer='adam') # metrics=["mae"]
- return model
- def train_init(use_cuda=False):
- import tensorflow as tf
- from keras.backend.tensorflow_backend import set_session
- if use_cuda:
- # gpu init
- sess_config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)
- sess_config.gpu_options.per_process_gpu_memory_fraction = 0.7 # 最多使用70%GPU内存
- sess_config.gpu_options.allow_growth=True # 初始化时不全部占满GPU显存, 按需分配
- sess = tf.Session(config=sess_config)
- set_session(sess)
- else:
- session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
- tf.set_random_seed(1234)
- sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
- set_session(sess)
- def train(opt, train_and_valid_data):
- train_init(opt.use_cuda)
- train_X, train_Y, valid_X, valid_Y = train_and_valid_data
- print("----------", np.array(train_X[0]).shape)
- print("++++++++++", np.array(train_X[1]).shape)
- model = get_keras_model(opt)
- model.summary()
- weight_lstm_1, bias_lstm_1 = model.get_layer('dense_1').get_weights()
- print("weight_lstm_1 = ", weight_lstm_1)
- print("bias_lstm_1 = ", bias_lstm_1)
- if opt.add_train:
- model.load_weights(opt.model_save_path + 'model_kerass.h5')
- check_point = ModelCheckpoint(filepath=opt.model_save_path + opt.save_name, monitor='val_loss',
- save_best_only=True, mode='auto')
- early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
- history = model.fit(train_X, train_Y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,
- validation_data=(valid_X, valid_Y), callbacks=[check_point, early_stop])
- loss = history.history['loss']
- epochs = range(1, len(loss) + 1)
- plt.title('Loss')
- # plt.plot(epochs, acc, 'red', label='Training acc')
- plt.plot(epochs, loss, 'blue', label='Validation loss')
- plt.legend()
- # plt.show()
- def predict(config, test_X):
- model = get_keras_model(config)
- model.load_weights(config.model_save_path + 'model_' + config.save_frame + '.h5')
- result = model.predict(test_X, batch_size=1)
- # result = result.reshape((-1, config.output_size))
- return result
|