|
@@ -5,7 +5,7 @@
|
|
# @Author :David
|
|
# @Author :David
|
|
# @Company: shenyang JY
|
|
# @Company: shenyang JY
|
|
|
|
|
|
-from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten
|
|
|
|
|
|
+from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Conv1D, Conv2D, MaxPooling1D, Reshape, Flatten, LayerNormalization, Dropout
|
|
from tensorflow.keras.models import Model, load_model
|
|
from tensorflow.keras.models import Model, load_model
|
|
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
|
|
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, ReduceLROnPlateau
|
|
from tensorflow.keras import optimizers, regularizers
|
|
from tensorflow.keras import optimizers, regularizers
|
|
@@ -18,7 +18,7 @@ import argparse
|
|
model_lock = Lock()
|
|
model_lock = Lock()
|
|
set_deterministic(42)
|
|
set_deterministic(42)
|
|
|
|
|
|
-class TSHandler(object):
|
|
|
|
|
|
+class TCNHandler(object):
|
|
def __init__(self, logger, args):
|
|
def __init__(self, logger, args):
|
|
self.logger = logger
|
|
self.logger = logger
|
|
self.opt = argparse.Namespace(**args)
|
|
self.opt = argparse.Namespace(**args)
|
|
@@ -37,21 +37,55 @@ class TSHandler(object):
|
|
self.logger.info("加载模型权重失败:{}".format(e.args))
|
|
self.logger.info("加载模型权重失败:{}".format(e.args))
|
|
|
|
|
|
@staticmethod
|
|
@staticmethod
|
|
- def get_keras_model(opt, time_series=1, lstm_type=1):
|
|
|
|
|
|
+ def get_keras_model(opt, time_series=1):
|
|
|
|
+ # 参数设置
|
|
loss = region_loss(opt)
|
|
loss = region_loss(opt)
|
|
- l1_reg = regularizers.l1(opt.Model['lambda_value_1'])
|
|
|
|
|
|
+ time_steps = opt.Model['time_step']*time_series # 输入时间步长 (16*3)
|
|
|
|
+ output_steps = opt.Model['time_step'] # 输出时间步长
|
|
|
|
+ hidden_size = opt.Model.get('hidden_size', 64)
|
|
l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
|
|
l2_reg = regularizers.l2(opt.Model['lambda_value_2'])
|
|
- nwp_input = Input(shape=(opt.Model['time_step']*time_series, opt.Model['input_size']), name='nwp')
|
|
|
|
|
|
+ dropout_rate = opt.Model.get('dropout_rate', 0.2)
|
|
|
|
|
|
- con1 = Conv1D(filters=64, kernel_size=5, strides=1, padding='valid', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
|
|
|
|
- con1_p = MaxPooling1D(pool_size=5, strides=1, padding='valid', data_format='channels_last')(con1)
|
|
|
|
- nwp_lstm = LSTM(units=opt.Model['hidden_size'], return_sequences=False, kernel_regularizer=l2_reg)(con1_p)
|
|
|
|
- if lstm_type == 2:
|
|
|
|
- output = Dense(opt.Model['time_step'], name='cdq_output')(nwp_lstm)
|
|
|
|
- else:
|
|
|
|
- output = Dense(opt.Model['time_step']*time_series, name='cdq_output')(nwp_lstm)
|
|
|
|
|
|
+ # 输入层
|
|
|
|
+ nwp_input = Input(shape=(time_steps, opt.Model['input_size']), name='nwp')
|
|
|
|
|
|
|
|
+ # 初始卷积层 (将通道数扩展到hidden_size)
|
|
|
|
+ x = Conv1D(filters=hidden_size, kernel_size=3, strides=1, padding='causal', activation='relu', kernel_regularizer=l2_reg)(nwp_input)
|
|
|
|
+
|
|
|
|
+ # 时序卷积块 (TCN块)
|
|
|
|
+ for d in [1, 2, 4, 8]: # 扩张系数
|
|
|
|
+ # 扩张因果卷积
|
|
|
|
+ conv = Conv1D(filters=hidden_size, kernel_size=3, strides=1,
|
|
|
|
+ padding='causal', activation='relu',
|
|
|
|
+ dilation_rate=d,
|
|
|
|
+ kernel_regularizer=l2_reg)
|
|
|
|
+ x = conv(x)
|
|
|
|
+ # 残差连接
|
|
|
|
+ skip = Conv1D(filters=hidden_size, kernel_size=1, padding='same')(x)
|
|
|
|
+ # 层归一化
|
|
|
|
+ x = LayerNormalization()(x)
|
|
|
|
+ x = tf.keras.activations.relu(x)
|
|
|
|
+ x = Dropout(dropout_rate)(x)
|
|
|
|
+ x = x + skip # 残差连接
|
|
|
|
+
|
|
|
|
+ # 提取中间16个时间步的表示
|
|
|
|
+ # 这里我们使用全局平均池化或直接切片
|
|
|
|
+ # 方法1: 使用全局平均池化然后上采样
|
|
|
|
+ # x = tf.reduce_mean(x, axis=1, keepdims=True)
|
|
|
|
+ # x = tf.tile(x, [1, output_steps, 1])
|
|
|
|
+
|
|
|
|
+ # 方法2: 直接切片中间16个时间步 (更符合你的需求)
|
|
|
|
+ # 由于是因果卷积,中间时间步大致对应输入的中间部分
|
|
|
|
+ start_idx = (time_steps - output_steps) // 2
|
|
|
|
+ x = x[:, start_idx:start_idx + output_steps, :]
|
|
|
|
+
|
|
|
|
+ # 输出层
|
|
|
|
+ output = Dense(output_steps, activation=None, name='cdq_output')(x)
|
|
|
|
+
|
|
|
|
+ # 创建模型
|
|
model = Model(nwp_input, output)
|
|
model = Model(nwp_input, output)
|
|
|
|
+
|
|
|
|
+ # 优化器
|
|
adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
|
|
adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=True)
|
|
model.compile(loss=loss, optimizer=adam)
|
|
model.compile(loss=loss, optimizer=adam)
|
|
return model
|
|
return model
|