WIP training Batching

This commit is contained in:
GassiGiuseppe 2025-10-07 17:41:53 +02:00
parent 490edcfd53
commit b4ee8362a2
2 changed files with 60 additions and 10 deletions

View File

@ -6,6 +6,7 @@ from ..Classes import NanoSocratesSpecial
from ..Utils import special_regex_maker
from ..Enums import TokenType
from ..Enums import SpecialToken
class TokeNanoCore:
@ -44,6 +45,27 @@ class TokeNanoCore:
output.extend(self.__bpe_encoder.encode(piece))
return output
def encode_incomplete_string(self, corpus: str) -> list[int]:
"""
Encode string which don't end with a special token
"""
corpus = corpus + SpecialToken.CORPUS_END.value
output: list[int] = []
for piece, token_type in self.__splitter.split_text(corpus):
if token_type == TokenType.SPECIAL:
output.extend(self.__special_encoder.encode(piece))
# slow but clear
if token_type == TokenType.BPE:
output.extend(self.__bpe_encoder.encode(piece))
return output[:-1]
def decode(self, corpus: list[int]) -> str:
output_str = ""

View File

@ -1,10 +1,12 @@
import pandas as pd
from BPE import TokeNanoCore as Tokenizer
from pathlib import Path
import Project_Model.Libs.BPE as BPE
#from BPE import TokeNanoCore as Tokenizer
from Scripts.Libs.CleaningPipeline.special_token import SpecialToken
class Batcher:
def __init__(self, dataset_path: str, batch_size:int, tokenizer: Tokenizer) -> None:
def __init__(self, dataset_path: str, batch_size:int, tokenizer: BPE.TokeNanoCore) -> None:
# ABSTRACT, TRIPLE
# tasks:
# rdf2text: X: TRIPLE, Y: ABSTRACT
@ -21,24 +23,50 @@ class Batcher:
# each batch get 4 transformation for the 4 tasks and then shuffled
# now a batch is ["Abstract"], ["Triples"]
# tokenize the strings:
tokenized_batch
tokenized_batch[["Abstract","Triples"]] = batch[["Abstract","Triples"]].map(lambda t: self._tokenizer.encode(t))
# batch = batch.drop(columns=['MovieID'])
tokenized_batch = pd.DataFrame()
# bho = batch.map(lambda x: self._tokenizer.encode(x))
tokenized_batch[["Abstract","RDFs"]] = batch[["Abstract","RDFs"]].map(
lambda t: self._tokenizer.encode_incomplete_string(t))
# ??? i hope this works, later will be tested
rdf2_txt_batch = self.__rdf2txt_transformation(tokenized_batch)
rdf2txt_batch = self.__rdf2txt_transformation(tokenized_batch)
txt2rdf_batch = self.__txt2rdf_transformation(tokenized_batch)
output = pd.concat([rdf2txt_batch,txt2rdf_batch],ignore_index=True)
output.sample(frac=1).reset_index(drop=True)
yield output
def __random_subset_rdfs(self, batch: pd.DataFrame):
batch["RDFs"] = batch["RDFs"].map(
lambda x: x.split(SpecialToken.START_TRIPLE.value)[1:]
)
def __rdf2txt_transformation(self, batch: pd.DataFrame):
# rename ["Triples"] as ["X"]
# rename ["Abstract"] as ["Y"]
# return just them
batch = batch.rename(columns={"Triples": "X", "Abstract": "Y"})
batch = batch.rename(columns={"RDFs": "X", "Abstract": "Y"})
return batch[["X", "Y"]] #.sample(frac=1).reset_index(drop=True)
def __txt2rdf_transformation(self, batch: pd.DataFrame):
batch = batch.rename(columns={ "Abstract": "X","Triples": "Y"})
batch = batch.rename(columns={ "Abstract": "X","RDFs": "Y"})
return batch[["X", "Y"]]# .sample(frac=1).reset_index(drop=True)
def __masking()
#def __masking()
DATASET_PATH = "Assets/Dataset/Tmp/rdf_text.csv"
VOCABULARY_path = "Assets/Dataset/Tmp/trimmed.json"
VOCABULARY = BPE.load_nanos_vocabulary(Path(VOCABULARY_path))
SPECIAL_TOKEN_LIST = [token.value for token in SpecialToken]
TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_TOKEN_LIST)
prova = "<ABS>Cactus Flower is a 1969 American screwball comedy film directed by Gene Saks, and starring Walter Matthau, Ingrid Bergman and Goldie Hawn, who won an Academy Award for her performance.The screenplay was adapted by I. A. L. Diamond from the 1965 Broadway play of the same title written by Abe Burrows, which, in turn, is based on the French play Fleur de cactus by Pierre Barillet and Jean-Pierre Gredy. Cactus Flower was the ninth highest-grossing film of 1969."
print(TOKENANO.encode(prova))
batcher = Batcher(DATASET_PATH,3,TOKENANO)
for batch in batcher.get_batch():
print(batch)