123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125 |
- # -*- coding: UTF-8 -*-
- from keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
- from keras.models import Model, load_model
- from keras.callbacks import ModelCheckpoint, EarlyStopping
- from keras import optimizers
- from keras.callbacks import TensorBoard
- import matplotlib.pyplot as plt
- import numpy as np
- from keras.callbacks import TensorBoard, EarlyStopping
- # model_history = model.fit_generator(train_generator, epochs=30, validation_data=evaluate_generator,
- # callbacks=[early_stopping, tbCallBack])
- def get_keras_model(opt):
- lstm_input = Input(shape=(opt.Model['time_step'], opt.input_size_lstm))
- lstm = lstm_input
- for i in range(opt.Model['lstm_layers']):
- rs = True
- if i == opt.Model['lstm_layers']-1:
- rs = False
- lstm = LSTM(units=opt.Model['hidden_size'], return_sequences=rs)(lstm)
- lstm = Dense(16)(lstm)
- # lstm = Flatten()(lstm)
- # lstm = concatenate([lstm, cnn])
- # lstm = Dense(16)(lstm)
- # lstm = Flatten()(lstm)
- # output = Dense(opt.output_size)(lstm)
- model = Model(lstm_input, lstm)
- adam = optimizers.adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
- model.compile(loss='mse', optimizer='adam')
- return model
- def train_init(use_cuda=False):
- import tensorflow as tf
- from keras.backend.tensorflow_backend import set_session
- if use_cuda:
- # gpu init
- sess_config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)
- sess_config.gpu_options.per_process_gpu_memory_fraction = 0.7 # 最多使用70%GPU内存
- sess_config.gpu_options.allow_growth=True # 初始化时不全部占满GPU显存, 按需分配
- sess = tf.Session(config=sess_config)
- set_session(sess)
- else:
- session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
- tf.set_random_seed(1234)
- sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
- set_session(sess)
- def train(opt, train_and_valid_data):
- train_init(opt.use_cuda)
- train_X, train_Y, valid_X, valid_Y = train_and_valid_data
- print("----------", np.array(train_X[0]).shape)
- print("++++++++++", np.array(train_X[1]).shape)
- model = get_keras_model(opt)
- model.summary()
- weight_lstm_1, bias_lstm_1 = model.get_layer('dense_1').get_weights()
- print("weight_lstm_1 = ", weight_lstm_1)
- print("bias_lstm_1 = ", bias_lstm_1)
- if opt.add_train:
- model.load_weights(opt.model_save_path + 'model_kerass.h5')
- check_point = ModelCheckpoint(filepath=opt.model_save_path + opt.save_name, monitor='val_loss',
- save_best_only=True, mode='auto')
- early_stop = EarlyStopping(monitor='val_loss', patience=opt.Model['patience'], mode='auto')
- history = model.fit(train_X, train_Y, batch_size=opt.Model['batch_size'], epochs=opt.Model['epoch'], verbose=2,
- validation_data=(valid_X, valid_Y), callbacks=[check_point, early_stop])
- # acc = history.history['acc']
- loss = history.history['loss']
- epochs = range(1, len(loss) + 1)
- plt.title('Loss')
- # plt.plot(epochs, acc, 'red', label='Training acc')
- plt.plot(epochs, loss, 'blue', label='Validation loss')
- plt.legend()
- # plt.show()
- def predict(config, test_X):
- model = get_keras_model(config)
- model.load_weights(config.model_save_path + config.save_name)
- result = model.predict(test_X, batch_size=1)
- # result = result.reshape((-1, config.output_size))
- return result
- def predict_cls(config, test_X, df_Y):
- results, results1, results2, results3, results4 = [], [], [], [], []
- dfy1, dfy2, dfy3, dfy4 = [], [], [],[]
- model = get_keras_model(config)
- model1 = get_keras_model(config)
- for i, X in enumerate(zip(test_X[0], test_X[1])):
- X = [np.array([X[0]]), np.array([X[1]])]
- # X = np.array([X[1]])
- print("label=", df_Y[i]['label'][0])
- if df_Y[i]['label'][0] == 1:
- model1.load_weights('./checkpoint/model_keras.h5/' + 'model_keras1.h5')
- result = model1.predict(X, batch_size=1)
- results1.append(result[0])
- results.append(result[0])
- dfy1.append(df_Y[i])
- elif df_Y[i]['label'][0] == 2:
- model.load_weights('./checkpoint/model_keras.h5/' + 'model_keras2.h5')
- result = model.predict(X, batch_size=1)
- results2.append(result[0])
- results.append(result[0])
- dfy2.append(df_Y[i])
- elif df_Y[i]['label'][0] == 3:
- model.load_weights('./checkpoint/model_keras.h5/' + 'model_keras3.h5')
- result = model.predict(X, batch_size=1)
- results3.append(result[0])
- results.append(result[0])
- dfy3.append(df_Y[i])
- elif df_Y[i]['label'][0] == 4:
- model.load_weights('./checkpoint/model_keras.h5/' + 'model_keras4.h5')
- result = model.predict(X, batch_size=1)
- results4.append(result[0])
- results.append(result[0])
- dfy4.append(df_Y[i])
- # result = result.reshape((-1, config.output_size))
- return np.array(results), np.array(results1), np.array(results2), np.array(results3), np.array(results4), dfy1, dfy2, dfy3, dfy4
|