2025-10-10 18:43:20 +02:00
|
|
|
|
from typing import Optional
|
2025-10-05 15:40:29 +02:00
|
|
|
|
import torch
|
|
|
|
|
|
import torch.nn as nn
|
|
|
|
|
|
from .FeedForwardNetwork import FeedForwardNetwork
|
|
|
|
|
|
from .TorchMultiHeadAttention import TorchMultiHeadAttention as MultiHeadAttention
|
2025-10-06 13:03:03 +02:00
|
|
|
|
from ..Utils.attention_mask import get_causal_attention_mask
|
2025-10-05 17:49:01 +02:00
|
|
|
|
|
2025-10-06 13:03:03 +02:00
|
|
|
|
# B, L(T), E_D
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Decoder(nn.Module):
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
|
|
self,
|
|
|
|
|
|
embedding_dimension: int,
|
|
|
|
|
|
feed_forward_hidden_layer_dimension: int,
|
|
|
|
|
|
number_of_attention_heads: int,
|
|
|
|
|
|
) -> None:
|
|
|
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
|
|
|
|
self.__masked_attention = MultiHeadAttention(
|
2025-10-06 13:03:03 +02:00
|
|
|
|
embedding_dimension, number_of_attention_heads, dropout=0.1
|
2025-10-05 15:40:29 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
self.__layer_norm_1 = nn.LayerNorm(embedding_dimension)
|
|
|
|
|
|
|
|
|
|
|
|
self.__cross_attention = MultiHeadAttention(
|
|
|
|
|
|
embedding_dimension, number_of_attention_heads, dropout=0.1
|
|
|
|
|
|
)
|
|
|
|
|
|
self.__layer_norm_2 = nn.LayerNorm(embedding_dimension)
|
|
|
|
|
|
|
|
|
|
|
|
self.__dropout = nn.Dropout(0.1)
|
|
|
|
|
|
|
|
|
|
|
|
self.__feed_forward_network = FeedForwardNetwork(
|
|
|
|
|
|
embedding_dimension, feed_forward_hidden_layer_dimension
|
|
|
|
|
|
)
|
|
|
|
|
|
self.__layer_norm_3 = nn.LayerNorm(embedding_dimension)
|
|
|
|
|
|
|
2025-10-07 16:37:20 +02:00
|
|
|
|
def forward(
|
|
|
|
|
|
self,
|
|
|
|
|
|
args: tuple[
|
|
|
|
|
|
torch.Tensor,
|
|
|
|
|
|
torch.Tensor,
|
|
|
|
|
|
torch.Tensor,
|
2025-10-09 13:24:48 +02:00
|
|
|
|
torch.Tensor,
|
2025-10-10 18:43:20 +02:00
|
|
|
|
torch.Tensor,
|
|
|
|
|
|
Optional[bool]
|
2025-10-07 16:37:20 +02:00
|
|
|
|
]
|
|
|
|
|
|
): # -> list[torch.Tensor]: # k_x = v_x . While x_q = x
|
|
|
|
|
|
# WARNING: args is needed to have sequential
|
2025-10-10 18:43:20 +02:00
|
|
|
|
if len(args) < 6:
|
|
|
|
|
|
args = args + (False)
|
|
|
|
|
|
x, k_x, v_x, src_padding_mask, tgt_padding_mask, decoder_only = args
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
2025-10-06 13:03:03 +02:00
|
|
|
|
# build of attention mask
|
2025-10-10 18:43:20 +02:00
|
|
|
|
# TODO: create a prefix causal mask if needed
|
2025-10-06 13:03:03 +02:00
|
|
|
|
attention_mask = get_causal_attention_mask(x.size(1))
|
|
|
|
|
|
|
2025-10-05 15:40:29 +02:00
|
|
|
|
# 1) Masked Attention
|
|
|
|
|
|
MASKED_ATTENTION = self.__masked_attention(
|
2025-10-09 13:24:48 +02:00
|
|
|
|
x, x, x, key_padding_mask=tgt_padding_mask, attention_mask=attention_mask
|
2025-10-05 15:40:29 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# 2) Dropout
|
2025-10-08 12:34:09 +02:00
|
|
|
|
# DROPPED_MASKED_ATTENTION = self.__dropout(MASKED_ATTENTION)
|
|
|
|
|
|
# del MASKED_ATTENTION
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
|
|
|
|
|
# 3) Residual Connection
|
2025-10-08 12:34:09 +02:00
|
|
|
|
x = x + MASKED_ATTENTION
|
|
|
|
|
|
del MASKED_ATTENTION
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
|
|
|
|
|
# 4) Layer Normalization
|
|
|
|
|
|
x = self.__layer_norm_1(x)
|
|
|
|
|
|
|
|
|
|
|
|
|
2025-10-10 18:43:20 +02:00
|
|
|
|
if not decoder_only:
|
|
|
|
|
|
# 5) Encoder–decoder (cross) attention
|
|
|
|
|
|
CROSS_ATTENTION = self.__cross_attention(
|
|
|
|
|
|
x, k_x, v_x, key_padding_mask=src_padding_mask
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# 6) Dropout
|
|
|
|
|
|
# DROPPED_CROSS_ATTENTION = self.__dropout(CROSS_ATTENTION)
|
|
|
|
|
|
# del CROSS_ATTENTION
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
2025-10-10 18:43:20 +02:00
|
|
|
|
# 7) Residual Connection
|
|
|
|
|
|
x = x + CROSS_ATTENTION
|
|
|
|
|
|
del CROSS_ATTENTION
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
2025-10-10 18:43:20 +02:00
|
|
|
|
# 8) Layer Normalization
|
|
|
|
|
|
x = self.__layer_norm_2(x)
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
|
|
|
|
|
# 9) Position-wise feed-forward
|
|
|
|
|
|
FEED_FORWARD = self.__feed_forward_network(x)
|
|
|
|
|
|
|
|
|
|
|
|
# 10) Dropout
|
2025-10-08 12:34:09 +02:00
|
|
|
|
# DROPPED_FEED_FORWARD = self.__dropout(FEED_FORWARD)
|
|
|
|
|
|
# del FEED_FORWARD
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
|
|
|
|
|
# 11) Residual Connection
|
2025-10-08 12:34:09 +02:00
|
|
|
|
x = x + FEED_FORWARD
|
|
|
|
|
|
del FEED_FORWARD
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
|
|
|
|
|
# 12) Layer Normalization
|
|
|
|
|
|
x = self.__layer_norm_3(x)
|
|
|
|
|
|
|
2025-10-10 18:43:20 +02:00
|
|
|
|
return (x, k_x, v_x, src_padding_mask, tgt_padding_mask, decoder_only)
|
2025-10-05 15:40:29 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# use eval to disable dropout ecc
|