WIP Batcher added class to fourth task
This commit is contained in:
parent
7027414342
commit
159266a603
@ -3,10 +3,11 @@ from pathlib import Path
|
||||
import Project_Model.Libs.BPE as BPE
|
||||
#from BPE import TokeNanoCore as Tokenizer
|
||||
from Scripts.Libs.CleaningPipeline.special_token import SpecialToken
|
||||
from Project_Model.Libs.Transformer.Classes.SpannedMasker import SpannedMasker
|
||||
import random
|
||||
class Batcher:
|
||||
|
||||
def __init__(self, dataset_path: str, batch_size:int, tokenizer: BPE.TokeNanoCore) -> None:
|
||||
def __init__(self, dataset_path: str, batch_size:int, tokenizer: BPE.TokeNanoCore, masker: SpannedMasker) -> None:
|
||||
# ABSTRACT, TRIPLE
|
||||
# tasks:
|
||||
# rdf2text: X: TRIPLE, Y: ABSTRACT
|
||||
@ -17,6 +18,7 @@ class Batcher:
|
||||
self._dataset_path = dataset_path
|
||||
self._batch_size = batch_size
|
||||
self._tokenizer = tokenizer
|
||||
self._masker = masker
|
||||
|
||||
def get_batch(self):
|
||||
for batch in pd.read_csv(self._dataset_path, chunksize= int(self._batch_size/3)): #now we support 3 task
|
||||
@ -27,13 +29,14 @@ class Batcher:
|
||||
tokenized_batch = pd.DataFrame()
|
||||
# bho = batch.map(lambda x: self._tokenizer.encode(x))
|
||||
tokenized_batch[["Abstract","RDFs"]] = batch[["Abstract","RDFs"]].map(
|
||||
lambda t: self._tokenizer.encode_incomplete_string(t))
|
||||
lambda t: self._tokenizer.encode(t))
|
||||
|
||||
|
||||
# ??? i hope this works, later will be tested
|
||||
rdf2txt_batch = self.__rdf2txt_transformation(tokenized_batch)
|
||||
txt2rdf_batch = self.__txt2rdf_transformation(tokenized_batch)
|
||||
mask_batch = self.__masking_trasformation(tokenized_batch)
|
||||
|
||||
output = pd.concat([rdf2txt_batch,txt2rdf_batch],ignore_index=True)
|
||||
output = pd.concat([rdf2txt_batch,txt2rdf_batch,mask_batch],ignore_index=True)
|
||||
output.sample(frac=1).reset_index(drop=True)
|
||||
yield output
|
||||
|
||||
@ -60,18 +63,30 @@ class Batcher:
|
||||
batch = batch.rename(columns={ "Abstract": "X","RDFs": "Y"})
|
||||
return batch[["X", "Y"]]# .sample(frac=1).reset_index(drop=True)
|
||||
|
||||
#def __masking()
|
||||
def __masking_trasformation(self, batch: pd.DataFrame):
|
||||
# mask_sequence: List[int] -> Tuple[List[int], List[int]]
|
||||
xy_tuples = batch["RDFs"].apply(self._masker.mask_sequence) # Series of (X, Y)
|
||||
|
||||
output = batch.copy()
|
||||
# Expand into two columns preserving the original index
|
||||
output[["X", "Y"]] = pd.DataFrame(xy_tuples.tolist(), index=batch.index)
|
||||
return output[["X", "Y"]]
|
||||
|
||||
|
||||
|
||||
|
||||
DATASET_PATH = "Assets/Dataset/Tmp/rdf_text.csv"
|
||||
VOCABULARY_path = "Assets/Dataset/Tmp/trimmed.json"
|
||||
|
||||
VOCABULARY = BPE.load_nanos_vocabulary(Path(VOCABULARY_path))
|
||||
SPECIAL_TOKEN_LIST = [token.value for token in SpecialToken]
|
||||
TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_TOKEN_LIST)
|
||||
SPECIAL_LIST = BPE.default_special_tokens()
|
||||
TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_LIST)
|
||||
SPECIAL_TOKENS: set[int] = set(TOKENANO.encode("".join(SPECIAL_LIST)))
|
||||
|
||||
MASKER = SpannedMasker(TOKENANO.vocabulary_size,SPECIAL_TOKENS)
|
||||
|
||||
prova = "<ABS>Cactus Flower is a 1969 American screwball comedy film directed by Gene Saks, and starring Walter Matthau, Ingrid Bergman and Goldie Hawn, who won an Academy Award for her performance.The screenplay was adapted by I. A. L. Diamond from the 1965 Broadway play of the same title written by Abe Burrows, which, in turn, is based on the French play Fleur de cactus by Pierre Barillet and Jean-Pierre Gredy. Cactus Flower was the ninth highest-grossing film of 1969."
|
||||
print(TOKENANO.encode(prova))
|
||||
batcher = Batcher(DATASET_PATH,3,TOKENANO)
|
||||
batcher = Batcher(DATASET_PATH,9,TOKENANO,MASKER)
|
||||
for batch in batcher.get_batch():
|
||||
print(batch)
|
||||
33
Project_Model/Libs/Batch/Classes/TokenCompletation.py
Normal file
33
Project_Model/Libs/Batch/Classes/TokenCompletation.py
Normal file
@ -0,0 +1,33 @@
|
||||
|
||||
class TokenCompletationTransformer:
|
||||
|
||||
def __init__(self,SOTL_token,EOS_token, input_percent:float = 0.5) -> None:
|
||||
self.__SOTL_token = SOTL_token
|
||||
self.__EOS_token = EOS_token
|
||||
self.__input_percent = input_percent
|
||||
pass
|
||||
|
||||
|
||||
def get_completation_tuple(
|
||||
self,
|
||||
token_sequence: list[int],
|
||||
)-> tuple[list[int], list[int]]:
|
||||
|
||||
# split the sequence by encoded(<SOTL>), dont take the first, firts pertenge in as X the other as Y
|
||||
sotl_count =int( token_sequence.count(self.__SOTL_token) * self.__input_percent)
|
||||
|
||||
sotl_index = 0
|
||||
percent_index = 0
|
||||
while sotl_index < sotl_count:
|
||||
token = token_sequence[percent_index]
|
||||
if token == self.__SOTL_token:
|
||||
sotl_index += 1
|
||||
|
||||
percent_index+=1
|
||||
|
||||
percent_index = percent_index -1
|
||||
x_list = token_sequence[:percent_index]
|
||||
x_list.append(self.__EOS_token)
|
||||
y_list = token_sequence[percent_index:]
|
||||
return (x_list,y_list)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user