added padding_mask entry to decoder and encoder
This commit is contained in:
@@ -36,11 +36,11 @@ class Decoder(nn.Module):
|
||||
|
||||
|
||||
|
||||
def forward(self, x, k_x, v_x, attention_mask) -> torch.Tensor: # k_x = v_x . While x_q = x
|
||||
def forward(self, x, k_x, v_x, padding_mask = None) -> torch.Tensor: # k_x = v_x . While x_q = x
|
||||
|
||||
# 1) Masked Attention
|
||||
MASKED_ATTENTION = self.__masked_attention(
|
||||
x, x, x, attention_mask=attention_mask
|
||||
x, x, x, key_padding_mask=padding_mask
|
||||
)
|
||||
|
||||
# 2) Dropout
|
||||
@@ -57,7 +57,7 @@ class Decoder(nn.Module):
|
||||
x = self.__layer_norm_1(x)
|
||||
|
||||
# 5) Encoder–decoder (cross) attention
|
||||
CROSS_ATTENTION = self.__cross_attention(x, k_x, v_x)
|
||||
CROSS_ATTENTION = self.__cross_attention(x, k_x, v_x key_padding_mask=padding_mask)
|
||||
|
||||
# 6) Dropout
|
||||
DROPPED_CROSS_ATTENTION = self.__dropout(CROSS_ATTENTION)
|
||||
|
||||
Reference in New Issue
Block a user