1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677 |
- import math
- import torch
- from torch import nn
- from utils.Arg import Arg
- arg = Arg()
- class PositionalEncoding(nn.Module):
- def __init__(self, d_model, dropout=0.1, max_len=5000):
- super(PositionalEncoding, self).__init__()
- self.dropout = nn.Dropout(p=dropout)
- pe = torch.zeros(max_len, d_model)
- position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
- div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
- pe[:, 0::2] = torch.sin(position * div_term)
- pe[:, 1::2] = torch.cos(position * div_term)
- pe = pe.unsqueeze(0).transpose(0, 1)
- self.register_buffer('pe', pe)
- def forward(self, x):
- x = x + self.pe[:x.size(0), :]
- return self.dropout(x)
- # class TimeSeriesTransformer(nn.Module):
- # def __init__(self, input_dim, output_dim, d_model, nhead, num_layers, dropout=0.1):
- # super().__init__()
- # self.input_dim = input_dim
- # self.output_dim = output_dim
- # self.pos_enc = PositionalEncoding(d_model, dropout)
- # self.lstm = nn.LSTM(input_dim, d_model, batch_first=True) # 添加LSTM层
- # self.transformer = nn.Transformer(d_model, nhead, num_layers, dropout=dropout)
- # self.act = nn.GELU() # 尝试使用GELU作为激活函数
- # self.hidden_dim = 128 # 定义一个隐藏层的维度
- # self.linear = nn.Linear(d_model, self.hidden_dim)
- # self.output_proj = nn.Linear(self.hidden_dim, output_dim)
- # self.layer_norm = nn.LayerNorm(d_model)
- #
- # def forward(self, src, tgt):
- # src = self.pos_enc(src) # 在输入数据通过LSTM和Transformer之前,先进行位置编码
- # src, _ = self.lstm(src)
- # tgt = self.pos_enc(tgt) # 在目标数据通过LSTM和Transformer之前,先进行位置编码
- # tgt, _ = self.lstm(tgt)
- #
- # output = self.transformer(src, tgt)
- # output = self.layer_norm(output)
- # output = self.act(self.linear(output))
- # output = self.output_proj(output)
- # output = output.squeeze(0)
- # return output
- class TimeSeriesTransformer(nn.Module):
- def __init__(self, input_dim, output_dim, d_model, nhead, num_layers, dropout=0.1):
- super().__init__()
- self.input_dim = input_dim
- self.output_dim = output_dim
- self.pos_enc = PositionalEncoding(d_model, dropout)
- self.transformer = nn.Transformer(d_model, nhead, num_layers, dropout=dropout)
- self.act = nn.GELU() # 尝试使用GELU作为激活函数
- self.hidden_dim = 64 # 降低隐藏层维度
- self.linear = nn.Linear(d_model, self.hidden_dim)
- self.output_proj = nn.Linear(self.hidden_dim, output_dim)
- self.layer_norm = nn.LayerNorm(d_model)
- self.sigmoid = nn.Tanh()
- def forward(self, src, tgt):
- src = self.pos_enc(src) # 在输入数据通过Transformer之前,先进行位置编码
- tgt = self.pos_enc(tgt) # 在目标数据通过Transformer之前,先进行位置编码
- output = self.transformer(src, tgt)
- output = self.layer_norm(output)
- output = self.act(self.linear(output))
- output = self.output_proj(output)
- output = self.sigmoid(output.squeeze(0))
- return output
|