Pipeline fix and added a util to decode

This commit is contained in:
Christian Risi
2025-10-09 13:24:48 +02:00
parent f3b83eda3d
commit aac7675b30
7 changed files with 78 additions and 29 deletions

View File

@@ -41,18 +41,19 @@ class Decoder(nn.Module):
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor
]
): # -> list[torch.Tensor]: # k_x = v_x . While x_q = x
# WARNING: args is needed to have sequential
x, k_x, v_x, padding_mask = args
x, k_x, v_x, src_padding_mask, tgt_padding_mask = args
# build of attention mask
attention_mask = get_causal_attention_mask(x.size(1))
# 1) Masked Attention
MASKED_ATTENTION = self.__masked_attention(
x, x, x, key_padding_mask=padding_mask, attention_mask=attention_mask
x, x, x, key_padding_mask=tgt_padding_mask, attention_mask=attention_mask
)
# 2) Dropout
@@ -68,7 +69,7 @@ class Decoder(nn.Module):
# 5) Encoderdecoder (cross) attention
CROSS_ATTENTION = self.__cross_attention(
x, k_x, v_x, key_padding_mask=padding_mask
x, k_x, v_x, key_padding_mask=src_padding_mask
)
# 6) Dropout
@@ -96,7 +97,7 @@ class Decoder(nn.Module):
# 12) Layer Normalization
x = self.__layer_norm_3(x)
return (x, k_x, v_x, padding_mask)
return (x, k_x, v_x, src_padding_mask, tgt_padding_mask)
# use eval to disable dropout ecc