2025-10-08 11:39:08 +02:00
import random
2025-10-10 20:10:08 +02:00
import sys
from typing import Any , Generator
2025-10-07 15:36:51 +02:00
import pandas as pd
2025-10-10 20:10:08 +02:00
from pathlib import Path
from Project_Model . Libs . Batch . Enums . TaskType import TaskType
2025-10-07 17:41:53 +02:00
import Project_Model . Libs . BPE as BPE
2025-10-10 20:10:08 +02:00
# from Scripts.Libs.CleaningPipeline.special_token import SpecialToken
from Project_Model . Libs . Transformer import SpannedMasker , truncate_rdf_list , normalize_sequence
2025-10-08 11:26:47 +02:00
from TokenCompletation import TokenCompletationTransformer
2025-10-10 20:10:08 +02:00
from Project_Model . Libs . BPE import SpecialToken
2025-10-08 11:39:08 +02:00
2025-10-10 20:10:08 +02:00
MAX_LENGHT = 128
2025-10-07 15:36:51 +02:00
class Batcher :
2025-10-10 20:10:08 +02:00
def __init__ ( self , dataset_path : Path , tokenizer : BPE . TokeNanoCore , masker : SpannedMasker , seed : int = 0 ) - > None :
2025-10-07 15:36:51 +02:00
# ABSTRACT, TRIPLE
# tasks:
# rdf2text: X: TRIPLE, Y: ABSTRACT
# text2rdf: X: ABSTRACT, X:TRIPLE
# masking ( call masker): X: incomplete_triple Y: complete_triple (as exam)
# completation: X: TRIPLE SUBSET, Y: related TRIPLE SUBSET
2025-10-10 20:10:08 +02:00
# it will truncate
# it will instantiate spanmaskter and truncator
2025-10-07 15:36:51 +02:00
self . _dataset_path = dataset_path
self . _tokenizer = tokenizer
2025-10-08 00:39:16 +02:00
self . _masker = masker
2025-10-10 20:10:08 +02:00
self . _seed = seed
# self._token_completation = TokenCompletationTransformer(sotl,eos)
self . _completation_task_token_truncator = truncate_rdf_list
2025-10-07 15:36:51 +02:00
2025-10-08 11:26:47 +02:00
2025-10-08 11:39:08 +02:00
2025-10-10 20:10:08 +02:00
def batch ( self , batch_size ) - > Generator [ tuple [ list [ list [ int ] ] , list [ list [ int ] ] , list [ list [ int ] ] , list [ list [ int ] ] , TaskType ] , Any , Any ] :
"""
Yields : X , Y , padding_X
"""
RNG = random . Random ( self . _seed )
self . _masker . reseed ( self . _seed )
2025-10-10 22:27:01 +02:00
for batch in pd . read_csv ( self . _dataset_path , chunksize = batch_size ) :
2025-10-08 11:39:08 +02:00
2025-10-07 17:41:53 +02:00
tokenized_batch = pd . DataFrame ( )
2025-10-10 20:10:08 +02:00
# encode
2025-10-08 11:39:08 +02:00
tokenized_batch [ [ " Abstract " , " RDFs " ] ] = (
batch [ [ " Abstract " , " RDFs " ] ]
. map ( lambda t : self . _tokenizer . encode ( t ) )
)
2025-10-10 20:10:08 +02:00
X , Y , padding_X , padding_Y = self . __rdf2txt_transformation ( tokenized_batch )
yield X , Y , padding_X , padding_Y , TaskType . RDF2TXT
X , Y , padding_X , padding_Y , = self . __txt2rdf_transformation ( tokenized_batch )
yield X , Y , padding_X , padding_Y , TaskType . TEXT2RDF
X , Y , padding_X , padding_Y , = self . __masking_trasformation ( tokenized_batch )
yield X , Y , padding_X , padding_Y , TaskType . MASKING
X , Y , padding_X , padding_Y , = self . __token_completation_task ( tokenized_batch , RNG . randint ( 0 , sys . maxsize ) )
yield X , Y , padding_X , padding_Y , TaskType . COMPLETATION
# output = pd.concat([rdf2txt_batch,txt2rdf_batch,completation_batch],ignore_index=True)
# output = output.sample(frac=1).reset_index(drop=True)
# self.decode_debug(output)
# yield output
2025-10-07 17:41:53 +02:00
2025-10-07 15:36:51 +02:00
2025-10-08 11:39:08 +02:00
def __random_subset_rdfs ( self , batch : pd . DataFrame , seed = 0 ) :
# WIP
2025-10-07 20:09:51 +02:00
rng = random . Random ( seed )
def to_list ( x ) :
return x . split ( SpecialToken . START_TRIPLE . value ) [ 1 : ]
2025-10-07 17:41:53 +02:00
batch [ " RDFs " ] = batch [ " RDFs " ] . map (
2025-10-07 20:09:51 +02:00
to_list
2025-10-07 17:41:53 +02:00
)
2025-10-07 15:36:51 +02:00
2025-10-10 20:10:08 +02:00
def decode_debug ( self , batch : pd . DataFrame ) :
decoded = pd . DataFrame ( )
decoded [ [ " X " , " Y " ] ] = (
batch [ [ " X " , " Y " ] ]
. map ( lambda t : self . _tokenizer . decode ( t ) )
)
print ( decoded )
def __normalization ( self , X : list [ list [ int ] ] , Y : list [ list [ int ] ] ) - > tuple [ list [ list [ int ] ] , list [ list [ int ] ] , list [ list [ int ] ] , list [ list [ int ] ] ] :
pad_token = self . _tokenizer . encode ( SpecialToken . PAD . value ) [ 0 ]
end_token = self . _tokenizer . encode ( SpecialToken . END_OF_SEQUENCE . value ) [ 0 ]
out_X = [ ]
padding_X = [ ]
out_Y = [ ]
padding_Y = [ ]
for x in X :
out_x , padding_x = normalize_sequence ( x , MAX_LENGHT , pad_token , end_token , True )
out_X . append ( out_x )
padding_X . append ( padding_x )
for y in Y :
out_y , padding_y = normalize_sequence ( y , MAX_LENGHT , pad_token , end_token , True )
out_Y . append ( out_y )
padding_Y . append ( padding_y )
return out_X , out_Y , padding_X , padding_Y
2025-10-07 15:36:51 +02:00
def __rdf2txt_transformation ( self , batch : pd . DataFrame ) :
2025-10-10 20:10:08 +02:00
task_token = self . _tokenizer . encode ( SpecialToken . RDF_TO_TEXT . value )
out = batch . rename ( columns = { " RDFs " : " X " , " Abstract " : " Y " } ) [ [ " X " , " Y " ] ]
out [ " X " ] = [ task_token + x for x in out [ " X " ] ]
return self . __normalization ( out [ " X " ] . to_list ( ) , out [ " Y " ] . to_list ( ) )
2025-10-07 15:36:51 +02:00
def __txt2rdf_transformation ( self , batch : pd . DataFrame ) :
2025-10-10 20:10:08 +02:00
task_token = self . _tokenizer . encode ( SpecialToken . TEXT_TO_RDF . value )
out = batch . rename ( columns = { " Abstract " : " X " , " RDFs " : " Y " } ) [ [ " X " , " Y " ] ]
out [ " X " ] = [ task_token + x for x in out [ " X " ] ]
return self . __normalization ( out [ " X " ] . to_list ( ) , out [ " Y " ] . to_list ( ) )
2025-10-07 15:36:51 +02:00
2025-10-08 00:39:16 +02:00
def __masking_trasformation ( self , batch : pd . DataFrame ) :
2025-10-10 20:10:08 +02:00
X = [ ]
Y = [ ]
for rdf in batch [ " RDFs " ] :
x , y = self . _masker . mask_sequence ( rdf )
X . append ( x )
Y . append ( y )
return self . __normalization ( X , Y )
2025-10-08 00:39:16 +02:00
2025-10-08 11:26:47 +02:00
2025-10-10 20:10:08 +02:00
def __token_completation_task ( self , batch : pd . DataFrame , minibatch_seed : int ) :
continue_triple_token = self . _tokenizer . encode ( SpecialToken . CONTINUE_RDF . value ) [ 0 ]
eot = self . _tokenizer . encode ( SpecialToken . END_TRIPLE . value ) [ 0 ]
X = [ ]
Y = [ ]
for rdf in batch [ " RDFs " ] :
x , y = self . _completation_task_token_truncator ( rdf , 0.5 , continue_triple_token , eot , minibatch_seed )
X . append ( x )
Y . append ( y )
return self . __normalization ( X , Y )
2025-10-07 17:41:53 +02:00
2025-10-08 00:39:16 +02:00
2025-10-10 20:10:08 +02:00
if __name__ == " __main__ " :
DATASET_PATH = Path ( " Assets/Dataset/Tmp/rdf_text.csv " )
VOCABULARY_path = " Assets/Dataset/Tmp/trimmed.json "
from pathlib import Path
VOCABULARY = BPE . load_nanos_vocabulary ( Path ( VOCABULARY_path ) )
SPECIAL_LIST = BPE . default_special_tokens ( )
TOKENANO = BPE . TokeNanoCore ( VOCABULARY , SPECIAL_LIST )
SPECIAL_TOKENS : set [ int ] = set ( TOKENANO . encode ( " " . join ( SPECIAL_LIST ) ) )
MASKER = SpannedMasker ( TOKENANO . vocabulary_size , SPECIAL_TOKENS )
prova = " <ABS>Cactus Flower is a 1969 American screwball comedy film directed by Gene Saks, and starring Walter Matthau, Ingrid Bergman and Goldie Hawn, who won an Academy Award for her performance.The screenplay was adapted by I. A. L. Diamond from the 1965 Broadway play of the same title written by Abe Burrows, which, in turn, is based on the French play Fleur de cactus by Pierre Barillet and Jean-Pierre Gredy. Cactus Flower was the ninth highest-grossing film of 1969. "
print ( TOKENANO . encode ( prova ) )
batcher = Batcher ( DATASET_PATH , TOKENANO , MASKER )
for batch in batcher . batch ( 8 ) :
print ( batch )