使用 PyTorch 从零构建 Transformer 模型:原理、代码与训练预测
本文基于 PyTorch 从零实现 Transformer 模型,涵盖位置编码、多头注意力机制、前馈网络等核心组件。通过构建完整的 Encoder-Decoder 架构,演示了数据集生成、模型定义、训练循环及预测流程。代码包含详细注释,帮助深入理解 Transformer 的工作原理与工程落地细节。

本文基于 PyTorch 从零实现 Transformer 模型,涵盖位置编码、多头注意力机制、前馈网络等核心组件。通过构建完整的 Encoder-Decoder 架构,演示了数据集生成、模型定义、训练循环及预测流程。代码包含详细注释,帮助深入理解 Transformer 的工作原理与工程落地细节。

本文基于 PyTorch 从零实现 Transformer 模型,涵盖位置编码、多头注意力机制、前馈网络等核心组件。通过构建完整的 Encoder-Decoder 架构,演示了数据集生成、模型定义、训练循环及预测流程。
首先,确保你已经安装了 PyTorch 和其他必要的库。
pip install torch torchvision matplotlib numpy pandas
我们将创建一个简单的样例数据集,用于演示目的。这里我们使用一个非常简单的语言建模任务,即生成数字序列的下一个数字。
generate_data.pyimport numpy as np
def generate_sequence(length, vocab_size):
return np.random.randint(vocab_size, size=length)
def generate_dataset(num_samples, seq_length, vocab_size):
X = []
y = []
for _ in range(num_samples):
sequence = generate_sequence(seq_length + 1, vocab_size)
X.append(sequence[:-1])
y.append(sequence[-1])
return np.array(X), np.array(y)
# Parameters
num_samples = 1000
seq_length = 10
vocab_size = 10
# Generate dataset
X_train, y_train = generate_dataset(num_samples, seq_length, vocab_size)
X_test, y_test = generate_dataset(num_samples // 10, seq_length, vocab_size)
# Save dataset
np.save('X_train.npy', X_train)
np.save('y_train.npy', y_train)
np.save('X_test.npy', X_test)
np.save('y_test.npy', y_test)
运行上述脚本以生成数据集:
python generate_data.py
我们将从头开始实现 Positional Encoding、Multi-Head Attention、Feed-Forward Network 等基本组件。
components.pyimport torch
import torch.nn as nn
import math
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=5000):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return x
class MultiHeadAttention(nn.Module):
def __init__(self, embed_size, heads):
super(MultiHeadAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert (self.head_dim * heads == embed_size),
.values = nn.Linear(.head_dim, .head_dim, bias=)
.keys = nn.Linear(.head_dim, .head_dim, bias=)
.queries = nn.Linear(.head_dim, .head_dim, bias=)
.fc_out = nn.Linear(heads * .head_dim, embed_size)
():
N = query.shape[]
value_len, key_len, query_len = values.shape[], keys.shape[], query.shape[]
values = values.reshape(N, value_len, .heads, .head_dim)
keys = keys.reshape(N, key_len, .heads, .head_dim)
queries = query.reshape(N, query_len, .heads, .head_dim)
values = .values(values)
keys = .keys(keys)
queries = .queries(queries)
energy = torch.einsum(, [queries, keys])
mask :
energy = energy.masked_fill(mask == , ())
attention = torch.softmax(energy / (.embed_size ** (/)), dim=)
out = torch.einsum(, [attention, values]).reshape(
N, query_len, .heads*.head_dim
)
out = .fc_out(out)
out
(nn.Module):
():
(FeedForward, ).__init__()
.model = nn.Sequential(
nn.Linear(embed_size, embed_size*expansion_factor),
nn.ReLU(),
nn.Linear(embed_size*expansion_factor, embed_size),
)
():
.model(x)
我们将基于基本组件构建 Encoder 和 Decoder 层。
encoder_decoder.pyimport torch
import torch.nn as nn
from components import PositionalEncoding, MultiHeadAttention, FeedForward
class TransformerBlock(nn.Module):
def __init__(self, embed_size, heads, dropout, forward_expansion):
super(TransformerBlock, self).__init__()
self.attention = MultiHeadAttention(embed_size, heads)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.feed_forward = FeedForward(embed_size, forward_expansion)
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, mask):
attention = self.attention(value, key, query, mask)
# Add skip connection, run through normalization and finally dropout
x = self.dropout(self.norm1(attention + query))
forward = self.feed_forward(x)
out = self.dropout(self.norm2(forward + x))
return out
class Encoder(nn.Module):
def __init__(
self,
src_vocab_size,
embed_size,
num_layers,
heads,
device,
forward_expansion,
dropout,
max_length,
):
super(Encoder, self).__init__()
self.embed_size = embed_size
self.device = device
self.word_embedding = nn.Embedding(src_vocab_size, embed_size)
self.position_embedding = PositionalEncoding(embed_size, max_length)
.layers = nn.ModuleList(
[
TransformerBlock(
embed_size,
heads,
dropout=dropout,
forward_expansion=forward_expansion,
)
_ (num_layers)
]
)
.dropout = nn.Dropout(dropout)
():
N, seq_length = x.shape
positions = torch.arange(, seq_length).expand(N, seq_length).to(.device)
out = .dropout((.word_embedding(x) + .position_embedding(positions)))
layer .layers:
out = layer(out, out, out, mask)
out
(nn.Module):
():
(DecoderBlock, ).__init__()
.attention = MultiHeadAttention(embed_size, heads)
.norm = nn.LayerNorm(embed_size)
.transformer_block = TransformerBlock(
embed_size, heads, dropout, forward_expansion
)
.dropout = nn.Dropout(dropout)
():
attention = .attention(x, x, x, trg_mask)
query = .dropout(.norm(attention + x))
out = .transformer_block(value, key, query, src_mask)
out
(nn.Module):
():
(Decoder, ).__init__()
.device = device
.word_embedding = nn.Embedding(trg_vocab_size, embed_size)
.position_embedding = PositionalEncoding(embed_size, max_length)
.layers = nn.ModuleList(
[
DecoderBlock(embed_size, heads, forward_expansion, dropout, device)
_ (num_layers)
]
)
.fc_out = nn.Linear(embed_size, trg_vocab_size)
.dropout = nn.Dropout(dropout)
():
N, seq_length = x.shape
positions = torch.arange(, seq_length).expand(N, seq_length).to(.device)
x = .dropout((.word_embedding(x) + .position_embedding(positions)))
layer .layers:
x = layer(x, enc_out, enc_out, src_mask, trg_mask)
out = .fc_out(x)
out
我们将基于 Encoder 和 Decoder 层组合成完整的 Transformer 模型。
transformer_model.pyimport torch
import torch.nn as nn
from encoder_decoder import Encoder, Decoder
class Transformer(nn.Module):
def __init__(
self,
src_vocab_size,
trg_vocab_size,
src_pad_idx,
trg_pad_idx,
embed_size=256,
num_layers=6,
forward_expansion=4,
heads=8,
dropout=0,
device="cpu",
max_length=100,
):
super(Transformer, self).__init__()
self.encoder = Encoder(
src_vocab_size,
embed_size,
num_layers,
heads,
device,
forward_expansion,
dropout,
max_length,
)
self.decoder = Decoder(
trg_vocab_size,
embed_size,
num_layers,
heads,
forward_expansion,
dropout,
device,
max_length,
)
self.src_pad_idx = src_pad_idx
self.trg_pad_idx = trg_pad_idx
self.device = device
def make_src_mask(self, src):
src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)
# (N, 1, 1, src_len)
return src_mask.to(self.device)
def make_trg_mask(self, trg):
N, trg_len = trg.shape
trg_mask = torch.tril(torch.ones((trg_len, trg_len))).expand(
N, 1, trg_len, trg_len
)
return trg_mask.to(.device)
():
src_mask = .make_src_mask(src)
trg_mask = .make_trg_mask(trg)
enc_src = .encoder(src, src_mask)
out = .decoder(trg, enc_src, src_mask, trg_mask)
out
我们将编写训练代码来训练 Transformer 模型。
train_transformer.pyimport torch
import torch.nn as nn
import torch.optim as optim
from transformer_model import Transformer
from components import PositionalEncoding, MultiHeadAttention, FeedForward
from encoder_decoder import TransformerBlock, Encoder, Decoder
import numpy as np
import math
# Load data
X_train = np.load('X_train.npy')
y_train = np.load('y_train.npy')
X_test = np.load('X_test.npy')
y_test = np.load('y_test.npy')
# Convert to PyTorch tensors
X_train_tensor = torch.from_numpy(X_train).long()
y_train_tensor = torch.from_numpy(y_train).long()
X_test_tensor = torch.from_numpy(X_test).long()
y_test_tensor = torch.from_numpy(y_test).long()
# Hyperparameters
src_vocab_size = 10 # Size of vocabulary
trg_vocab_size = 10 # Size of vocabulary
embed_size = 256 # Embedding dimension
num_layers = 2 # Number of layers in the Transformer
heads = 8 # Number of heads in the multiheadattention models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
forward_expansion = 4
dropout = 0.1 # Dropout rate
max_length = 100 # Maximum length of sequences
lr = 0.0001 # Learning rate
batch_size = 16
epochs =
model = Transformer(
src_vocab_size,
trg_vocab_size,
src_pad_idx=,
trg_pad_idx=,
embed_size=embed_size,
num_layers=num_layers,
forward_expansion=forward_expansion,
heads=heads,
dropout=dropout,
device=device,
max_length=max_length,
).to(device)
criterion = nn.CrossEntropyLoss(ignore_index=)
optimizer = optim.Adam(model.parameters(), lr=lr)
epoch (epochs):
model.train()
total_loss =
num_batches = (X_train_tensor) // batch_size
i (num_batches):
start_idx = i * batch_size
end_idx = (i + ) * batch_size
src = X_train_tensor[start_idx:end_idx].permute(, ).to(device)
trg = y_train_tensor[start_idx:end_idx].unsqueeze().to(device)
optimizer.zero_grad()
output = model(src, trg)
output = output.squeeze()
loss = criterion(output, trg.squeeze())
loss.backward()
optimizer.step()
total_loss += loss.item()
avg_loss = total_loss / num_batches
()
torch.save(model.state_dict(), )
我们将编写预测代码来测试 Transformer 模型的性能。
predict_transformer.pyimport torch
from transformer_model import Transformer
import numpy as np
# Load trained model
src_vocab_size = 10 # Size of vocabulary
trg_vocab_size = 10 # Size of vocabulary
embed_size = 256 # Embedding dimension
num_layers = 2 # Number of layers in the Transformer
heads = 8 # Number of heads in the multiheadattention models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
forward_expansion = 4
dropout = 0.1 # Dropout rate
max_length = 100 # Maximum length of sequences
model = Transformer(
src_vocab_size,
trg_vocab_size,
src_pad_idx=0,
trg_pad_idx=0,
embed_size=embed_size,
num_layers=num_layers,
forward_expansion=forward_expansion,
heads=heads,
dropout=dropout,
device=device,
max_length=max_length,
).to(device)
model.load_state_dict(torch.load('model.pth'))
model.eval()
# Sample input
sample_input = np.array([3, 7, 2, 5, 8, 9, 1, 4, 6]).reshape((9, 1)) # Shape: (seq_length, batch_size)
sample_input_tensor = torch.from_numpy(sample_input).long().to(device)
# Predict next token
with torch.no_grad():
src_mask = model.make_src_mask(sample_input_tensor)
trg_mask = model.make_trg_mask(sample_input_tensor)
output = model(sample_input_tensor, sample_input_tensor)
_, predicted = torch.(output, dim=-)
next_token = predicted[-].item()
()
从头开始构建一个简单的 Transformer 模型,并使用简单的数字序列数据集进行训练和预测。以下是所有相关的代码文件:
generate_data.py)components.py)encoder_decoder.py)transformer_model.py)train_transformer.py)predict_transformer.py)通过以上步骤,你可以深入理解 Transformer 模型的内部机制,包括自注意力机制如何工作、位置编码如何注入序列信息以及 Encoder-Decoder 架构如何协同处理输入输出序列。

微信公众号「极客日志」,在微信中扫描左侧二维码关注。展示文案:极客日志 zeeklog
使用加密算法(如AES、TripleDES、Rabbit或RC4)加密和解密文本明文。 在线工具,加密/解密文本在线工具,online
生成新的随机RSA私钥和公钥pem证书。 在线工具,RSA密钥对生成器在线工具,online
基于 Mermaid.js 实时预览流程图、时序图等图表,支持源码编辑与即时渲染。 在线工具,Mermaid 预览与可视化编辑在线工具,online
解析常见 curl 参数并生成 fetch、axios、PHP curl 或 Python requests 示例代码。 在线工具,curl 转代码在线工具,online
将字符串编码和解码为其 Base64 格式表示形式即可。 在线工具,Base64 字符串编码/解码在线工具,online
将字符串、文件或图像转换为其 Base64 表示形式。 在线工具,Base64 文件转换器在线工具,online