From 0158db2dce6ca4b7f2d991657dd8023a144f2260 Mon Sep 17 00:00:00 2001 From: Christian Risi <75698846+CnF-Gris@users.noreply.github.com> Date: Thu, 9 Oct 2025 11:37:21 +0200 Subject: [PATCH] Fixed a bug where I took encoder embeddings rather than encoder output --- Project_Model/Libs/Transformer/Models/TrainingModel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Project_Model/Libs/Transformer/Models/TrainingModel.py b/Project_Model/Libs/Transformer/Models/TrainingModel.py index 2a72717..c88ba6c 100644 --- a/Project_Model/Libs/Transformer/Models/TrainingModel.py +++ b/Project_Model/Libs/Transformer/Models/TrainingModel.py @@ -38,7 +38,7 @@ class TrainingModel(torch.nn.Module): self.__detokener = DeToken(latent_space, vocabulary_size) def forward(self, args: tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - + encoder_embedder_input, padding_tensor, decoder_embedder_input = args encoder_tensor = self.__encoder_embedder(encoder_embedder_input) @@ -47,7 +47,7 @@ class TrainingModel(torch.nn.Module): encoder_output, _ = self.__encoder((encoder_tensor, padding_tensor)) decoder_output, _, _, _ = self.__decoder( - (decoder_tensor, encoder_tensor, encoder_tensor, None) + (decoder_tensor, encoder_output, encoder_output, None) ) logits: torch.Tensor = self.__detokener(decoder_output)