From 9b0c57c2381a816ddc739f571673ccfb3bd64dff Mon Sep 17 00:00:00 2001 From: GassiGiuseppe Date: Wed, 8 Oct 2025 11:26:47 +0200 Subject: [PATCH] Batcher ended, attention it returns list of tokenId, which later needs to be embedded --- Project_Model/Libs/Batch/Classes/Batcher.py | 25 ++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/Project_Model/Libs/Batch/Classes/Batcher.py b/Project_Model/Libs/Batch/Classes/Batcher.py index 2f04405..6cf68d4 100644 --- a/Project_Model/Libs/Batch/Classes/Batcher.py +++ b/Project_Model/Libs/Batch/Classes/Batcher.py @@ -4,6 +4,8 @@ import Project_Model.Libs.BPE as BPE #from BPE import TokeNanoCore as Tokenizer from Scripts.Libs.CleaningPipeline.special_token import SpecialToken from Project_Model.Libs.Transformer.Classes.SpannedMasker import SpannedMasker +from TokenCompletation import TokenCompletationTransformer +from Project_Model.Libs.BPE.Enums.SpecialToken import SpecialToken import random class Batcher: @@ -20,8 +22,12 @@ class Batcher: self._tokenizer = tokenizer self._masker = masker + sotl = self._tokenizer.encode(SpecialToken.START_TRIPLE_LIST.value) + eos = self._tokenizer.encode(SpecialToken.END_OF_SEQUENCE.value) + self._token_completation = TokenCompletationTransformer(sotl,eos) + def get_batch(self): - for batch in pd.read_csv(self._dataset_path, chunksize= int(self._batch_size/3)): #now we support 3 task + for batch in pd.read_csv(self._dataset_path, chunksize= int(self._batch_size/4)): #now we support 3 task # each batch get 4 transformation for the 4 tasks and then shuffled # now a batch is ["Abstract"], ["Triples"] # tokenize the strings: @@ -35,8 +41,9 @@ class Batcher: rdf2txt_batch = self.__rdf2txt_transformation(tokenized_batch) txt2rdf_batch = self.__txt2rdf_transformation(tokenized_batch) mask_batch = self.__masking_trasformation(tokenized_batch) + completation = self.__token_completation_task(tokenized_batch) - output = pd.concat([rdf2txt_batch,txt2rdf_batch,mask_batch],ignore_index=True) + output = pd.concat([rdf2txt_batch,txt2rdf_batch,mask_batch,completation],ignore_index=True) output.sample(frac=1).reset_index(drop=True) yield output @@ -72,9 +79,16 @@ class Batcher: output[["X", "Y"]] = pd.DataFrame(xy_tuples.tolist(), index=batch.index) return output[["X", "Y"]] - + + def __token_completation_task(self, batch: pd.DataFrame): + xy_tuples = batch["RDFs"].apply(self._token_completation.get_completation_tuple) + output = batch.copy() + output[["X", "Y"]] = pd.DataFrame(xy_tuples.tolist(), index=batch.index) + return output[["X", "Y"]] + +""" DATASET_PATH = "Assets/Dataset/Tmp/rdf_text.csv" VOCABULARY_path = "Assets/Dataset/Tmp/trimmed.json" @@ -87,6 +101,7 @@ MASKER = SpannedMasker(TOKENANO.vocabulary_size,SPECIAL_TOKENS) prova = "Cactus Flower is a 1969 American screwball comedy film directed by Gene Saks, and starring Walter Matthau, Ingrid Bergman and Goldie Hawn, who won an Academy Award for her performance.The screenplay was adapted by I. A. L. Diamond from the 1965 Broadway play of the same title written by Abe Burrows, which, in turn, is based on the French play Fleur de cactus by Pierre Barillet and Jean-Pierre Gredy. Cactus Flower was the ninth highest-grossing film of 1969." print(TOKENANO.encode(prova)) -batcher = Batcher(DATASET_PATH,9,TOKENANO,MASKER) +batcher = Batcher(DATASET_PATH,8,TOKENANO,MASKER) for batch in batcher.get_batch(): - print(batch) \ No newline at end of file + print(batch) +""" \ No newline at end of file