|
@@ -5,8 +5,9 @@
|
|
|
# @Author :David
|
|
|
# @Company: shenyang JY
|
|
|
from tensorflow.keras.models import Sequential
|
|
|
-from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
|
|
|
+from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten, Dropout, BatchNormalization, LeakyReLU
|
|
|
from tensorflow.keras.models import Model, load_model
|
|
|
+from tensorflow.keras.regularizers import l2
|
|
|
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
|
|
|
from tensorflow.keras import optimizers, regularizers
|
|
|
from models_processing.model_tf.losses import region_loss
|
|
@@ -35,16 +36,51 @@ class BPHandler(object):
|
|
|
except Exception as e:
|
|
|
self.logger.info("加载模型权重失败:{}".format(e.args))
|
|
|
|
|
|
+ # @staticmethod
|
|
|
+ # def get_keras_model(opt):
|
|
|
+ # loss = region_loss(opt)
|
|
|
+ # model = Sequential([
|
|
|
+ # Dense(64, input_dim=opt.Model['input_size'], activation='relu'), # 输入层和隐藏层,10个神经元
|
|
|
+ # Dropout(0.2),
|
|
|
+ # Dense(32, activation='relu'), # 隐藏层,8个神经元
|
|
|
+ # Dropout(0.3),
|
|
|
+ # Dense(16, activation='relu'), # 隐藏层,8个神经元
|
|
|
+ # Dense(1, activation='linear') # 输出层,1个神经元(用于回归任务)
|
|
|
+ # ])
|
|
|
+ # adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
|
|
|
+ # model.compile(loss=loss, optimizer=adam)
|
|
|
+ # return model
|
|
|
+
|
|
|
@staticmethod
|
|
|
def get_keras_model(opt):
|
|
|
+ # 自定义损失函数(需确保正确性)
|
|
|
loss = region_loss(opt)
|
|
|
+
|
|
|
+ # 网络结构
|
|
|
model = Sequential([
|
|
|
- Dense(64, input_dim=opt.Model['input_size'], activation='relu'), # 输入层和隐藏层,10个神经元
|
|
|
- Dense(32, activation='relu'), # 隐藏层,8个神经元
|
|
|
- Dense(16, activation='relu'), # 隐藏层,8个神经元
|
|
|
- Dense(1, activation='linear') # 输出层,1个神经元(用于回归任务)
|
|
|
+ Dense(128, input_dim=opt.Model['input_size'], kernel_regularizer=l2(0.001)),
|
|
|
+ LeakyReLU(alpha=0.1),
|
|
|
+ BatchNormalization(),
|
|
|
+ Dropout(0.3),
|
|
|
+
|
|
|
+ Dense(64, kernel_regularizer=l2(0.001)),
|
|
|
+ LeakyReLU(alpha=0.1),
|
|
|
+ BatchNormalization(),
|
|
|
+ Dropout(0.4),
|
|
|
+
|
|
|
+ Dense(32, kernel_regularizer=l2(0.001)),
|
|
|
+ LeakyReLU(alpha=0.1),
|
|
|
+ Dense(1, activation='linear')
|
|
|
])
|
|
|
- adam = optimizers.Adam(learning_rate=opt.Model['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
|
|
|
+
|
|
|
+ # 优化器配置
|
|
|
+ adam = optimizers.Adam(
|
|
|
+ learning_rate=0.001,
|
|
|
+ beta_1=0.9,
|
|
|
+ beta_2=0.999,
|
|
|
+ epsilon=1e-6
|
|
|
+ )
|
|
|
+
|
|
|
model.compile(loss=loss, optimizer=adam)
|
|
|
return model
|
|
|
|