24 Commits

Author SHA1 Message Date
GassiGiuseppe
3446870291 typo 2025-10-10 22:27:01 +02:00
GassiGiuseppe
e76dbeb9a7 typo 2025-10-10 22:26:06 +02:00
GassiGiuseppe
96610612fe Batcher added 2025-10-10 20:10:08 +02:00
Christian Risi
bed9718f27 Added BPE small vocabulary 2025-10-10 11:40:39 +02:00
GassiGiuseppe
93865bee8a typo 2025-10-09 22:26:17 +02:00
Christian Risi
1c0ddb8753 Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 22:23:36 +02:00
Christian Risi
51399f9dc9 commit of toy dataset with whole batch 2025-10-09 22:22:42 +02:00
GassiGiuseppe
d1ba4ae026 last update for collab
( we are gonna run it on a 100 yey)
2025-10-09 21:57:05 +02:00
Christian Risi
db0090981c Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 21:53:45 +02:00
Christian Risi
e1c5649d67 updated to overfit over toy dataset 2025-10-09 21:53:42 +02:00
GassiGiuseppe
0bca241662 update environment yaml 2025-10-09 20:53:45 +02:00
GassiGiuseppe
005d7af6a0 lil update of requirements 2025-10-09 20:30:06 +02:00
GassiGiuseppe
9068db550e Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 19:44:46 +02:00
GassiGiuseppe
d8f81e1a47 that god can have mercy upon us 2025-10-09 19:43:50 +02:00
Christian Risi
a67df9724e Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 18:14:33 +02:00
Christian Risi
c5fd57d854 Updated train playground 2025-10-09 18:14:29 +02:00
GassiGiuseppe
ee253c39f4 Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 13:31:37 +02:00
GassiGiuseppe
2036b4015f added logistic collector 2025-10-09 13:31:16 +02:00
Christian Risi
aac7675b30 Pipeline fix and added a util to decode 2025-10-09 13:24:48 +02:00
GassiGiuseppe
d2fdeb18a2 bla bla doctor 2025-10-09 12:41:47 +02:00
Christian Risi
f3b83eda3d Rework 2025-10-09 11:37:46 +02:00
Christian Risi
0158db2dce Fixed a bug where I took encoder embeddings rather than encoder output 2025-10-09 11:37:21 +02:00
Christian Risi
ba592c3480 Disabled Softmax 2025-10-09 11:36:56 +02:00
Christian Risi
1f9c30b531 Added Custom Learning Rate 2025-10-09 11:36:40 +02:00
21 changed files with 8859 additions and 148 deletions

BIN
Assets/Model/small/bpe-small.json LFS Normal file

Binary file not shown.

193
Playgrounds/doctor.ipynb Normal file
View File

@@ -0,0 +1,193 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "ddfb4457",
"metadata": {},
"outputs": [
{
"ename": "AssertionError",
"evalue": "target id 3872 >= V (256). Fix TOKEN_SPACE_SIZE.",
"output_type": "error",
"traceback": [
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
"\u001b[31mAssertionError\u001b[39m Traceback (most recent call last)",
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 126\u001b[39m\n\u001b[32m 124\u001b[39m \u001b[38;5;66;03m# sanity guard (helps debug vocab mismatches fast)\u001b[39;00m\n\u001b[32m 125\u001b[39m max_seen = tgt[:, :Tp].max().item()\n\u001b[32m--> \u001b[39m\u001b[32m126\u001b[39m \u001b[38;5;28;01massert\u001b[39;00m max_seen < V \u001b[38;5;129;01mor\u001b[39;00m (tgt[:, :Tp] == PAD_TOKEN).all(), \\\n\u001b[32m 127\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mtarget id \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmax_seen\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m >= V (\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mV\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m). Fix TOKEN_SPACE_SIZE.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 129\u001b[39m \u001b[38;5;66;03m# CE over all tokens produced so far (0..t). PAD is ignored by ignore_index\u001b[39;00m\n\u001b[32m 130\u001b[39m loss_t = cross_entropy(\n\u001b[32m 131\u001b[39m logits_btV.reshape(-\u001b[32m1\u001b[39m, V), \u001b[38;5;66;03m# [B*(t+1), V]\u001b[39;00m\n\u001b[32m 132\u001b[39m tgt[:, :Tp].reshape(-\u001b[32m1\u001b[39m) \u001b[38;5;66;03m# [B*(t+1)]\u001b[39;00m\n\u001b[32m 133\u001b[39m )\n",
"\u001b[31mAssertionError\u001b[39m: target id 3872 >= V (256). Fix TOKEN_SPACE_SIZE."
]
}
],
"source": [
"import random\n",
"import torch\n",
"import pandas as pd\n",
"from pathlib import Path\n",
"import Project_Model.Libs.Embedder as Embedder\n",
"import Project_Model.Libs.BPE as BPE\n",
"import Project_Model.Libs.Transformer as Transformer\n",
"import Project_Model.Libs.TorchShims as torch_shims\n",
"from Project_Model.Libs.Training.learning_rade_shedulers import CustomLR\n",
"from Project_Model.Libs.Training.logistic_collector import LogitsCollector # external collector\n",
"\n",
"# set a fixed seed\n",
"torch.manual_seed(0)\n",
"random.seed(0)\n",
"DEVICE = torch_shims.get_default_device()\n",
"torch.set_default_device(DEVICE)\n",
"\n",
"# BPE Init\n",
"VOCABULARY_PATH = Path(\"Assets/Model/toy_10/toy_dictionary.json\")\n",
"SPECIAL_VOC = BPE.default_special_tokens()\n",
"\n",
"VOCABULARY = BPE.load_nanos_vocabulary(VOCABULARY_PATH)\n",
"TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_VOC)\n",
"\n",
"# Constants (TEMP size; will be corrected after dataset scan below)\n",
"TOKEN_SPACE_SIZE = TOKENANO.vocabulary_size + 1\n",
"EMBEDDED_SIZE = 256\n",
"FEED_FORWARD_MULTIPLIER = 4\n",
"ATTENTION_HEADS = 4\n",
"SENTENCE_LENGTH = 256\n",
"NUMBER_OF_BLOCKS = 2\n",
"MAX_EPOCHS = int(1e4)\n",
"\n",
"PAD_TOKEN = TOKENANO.encode(\"<PAD>\")[0]\n",
"END_TOKEN = TOKENANO.encode(\"<END>\")[0]\n",
"\n",
"# Load CSV\n",
"TOY_DATASET_PATH = Path(\"Assets/Dataset/1-hop/toy/rdf_text.csv\")\n",
"TOY_DATASET = pd.read_csv(TOY_DATASET_PATH)\n",
"\n",
"TOY_BATCH_INPUT_LIST: list[list[int]] = []\n",
"TOY_BATCH_PADDING_LIST: list[list[bool]] = []\n",
"TOY_BATCH_TARGET_LIST: list[list[int]] = []\n",
"TOY_BATCH_DECODER_DEFAULT: list[list[int]] = []\n",
"\n",
"for index, row in TOY_DATASET.iterrows():\n",
" RDFs: str = row[\"RDFs\"]\n",
" Abstract: str = row[\"Abstract\"]\n",
"\n",
" input_tokens = TOKENANO.encode(RDFs) # encoder input ids\n",
" output_tokens = TOKENANO.encode(Abstract)[1:] # decoder target ids (shifted left)\n",
" decoder_default_tokens = TOKENANO.encode(\"<SOS>\") # decoder input starts with <SOS>\n",
"\n",
" input_tokens, padding = Transformer.normalize_sequence(\n",
" input_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN\n",
" ) # pad/trim + end token\n",
" output_tokens, _ = Transformer.normalize_sequence(\n",
" output_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN\n",
" ) # pad/trim + end token\n",
" decoder_default_tokens = Transformer.pad_sequence(\n",
" decoder_default_tokens, SENTENCE_LENGTH, PAD_TOKEN\n",
" ) # pad with PAD up to SENTENCE_LENGTH\n",
"\n",
" TOY_BATCH_INPUT_LIST.append(input_tokens)\n",
" TOY_BATCH_PADDING_LIST.append(padding)\n",
" TOY_BATCH_TARGET_LIST.append(output_tokens)\n",
" TOY_BATCH_DECODER_DEFAULT.append(decoder_default_tokens)\n",
"\n",
"# fix V to cover ALL ids (including specials) # <- important\n",
"max_enc_id = max(max(row) for row in TOY_BATCH_INPUT_LIST) if TOY_BATCH_INPUT_LIST else 0\n",
"max_tgt_id = max(max(row) for row in TOY_BATCH_TARGET_LIST) if TOY_BATCH_TARGET_LIST else 0\n",
"TOKEN_SPACE_SIZE = max(TOKEN_SPACE_SIZE, max(PAD_TOKEN, END_TOKEN, max_enc_id, max_tgt_id) + 1)\n",
"\n",
"# Training loop\n",
"LOSS_HISTORY = []\n",
"NANOSOCRATES = Transformer.TrainingModel(\n",
" TOKEN_SPACE_SIZE,\n",
" EMBEDDED_SIZE,\n",
" FEED_FORWARD_MULTIPLIER,\n",
" ATTENTION_HEADS,\n",
" NUMBER_OF_BLOCKS,\n",
")\n",
"\n",
"collector = LogitsCollector(PAD_TOKEN, END_TOKEN, TOKENANO) # collects logits and decodes\n",
"\n",
"NANOSOCRATES.train()\n",
"cross_entropy = torch.nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)\n",
"optimizer = torch.optim.AdamW(NANOSOCRATES.parameters(), lr=1.0) # base lr works as factor\n",
"scheduler = CustomLR(optimizer, EMBEDDED_SIZE, warmup_steps=4000, factor=1.0) # step each optimizer step\n",
"\n",
"current_epoch = 0\n",
"BATCH_SIZE = min(32, len(TOY_BATCH_INPUT_LIST)) # small batch to stabilize\n",
"\n",
"while current_epoch < MAX_EPOCHS:\n",
" # simple fixed mini-batch from the top; later you can shuffle/slice\n",
" enc = torch.tensor(TOY_BATCH_INPUT_LIST[:BATCH_SIZE], dtype=torch.long) # [B,T] encoder token ids\n",
" pad = torch.tensor(TOY_BATCH_PADDING_LIST[:BATCH_SIZE], dtype=torch.bool) # [B,T] True where encoder PAD is present\n",
" tgt = torch.tensor(TOY_BATCH_TARGET_LIST[:BATCH_SIZE], dtype=torch.long) # [B,T] decoder targets (ground-truth)\n",
"\n",
" # decoder prefix buffer: <SOS> at pos 0, PAD elsewhere (no shift here) # we will fill it step by step\n",
" dec = torch.tensor(TOY_BATCH_DECODER_DEFAULT[:BATCH_SIZE], dtype=torch.long) # [B,T]\n",
"\n",
" total_loss = 0.0\n",
" collector.reset() # start fresh for this epoch\n",
"\n",
" T = tgt.size(1) # sequence length\n",
" for t in range(T):\n",
" # skip all-PAD steps to avoid CE divide-by-zero late in the sequence\n",
" if (tgt[:, t] == PAD_TOKEN).all(): # all PAD at this timestep\n",
" break\n",
"\n",
" optimizer.zero_grad(set_to_none=True) # clear grads for this token step\n",
"\n",
" prefix = dec[:, : t + 1] # [B, t+1] current decoder prefix\n",
" dec_pad_mask = prefix.eq(PAD_TOKEN) # [B, t+1] True where PAD inside prefix\n",
"\n",
" # now decoder returns all steps up to t -> [B, t+1, V]\n",
" logits_btV: torch.Tensor = NANOSOCRATES((enc, pad, prefix, dec_pad_mask)) # full logits for learning\n",
" collector.add(logits_btV) # collector will take the last step\n",
"\n",
" Tp = logits_btV.size(1) # t+1\n",
" V = logits_btV.size(-1) # vocab size\n",
"\n",
" # sanity guard (helps debug vocab mismatches fast)\n",
" max_seen = tgt[:, :Tp].max().item()\n",
" assert max_seen < V or (tgt[:, :Tp] == PAD_TOKEN).all(), \\\n",
" f\"target id {max_seen} >= V ({V}). Fix TOKEN_SPACE_SIZE.\"\n",
"\n",
" # CE over all tokens produced so far (0..t). PAD is ignored by ignore_index\n",
" loss_t = cross_entropy(\n",
" logits_btV.reshape(-1, V), # [B*(t+1), V]\n",
" tgt[:, :Tp].reshape(-1) # [B*(t+1)]\n",
" )\n",
"\n",
" loss_t.backward() # backprop for this step\n",
" optimizer.step() # update params\n",
" scheduler.step() # Noam/warmup: step per optimizer step\n",
"\n",
" total_loss = float(loss_t.detach()) # keep last step loss for logging\n",
"\n",
" # teacher forcing: reveal the correct token for next position\n",
" if t < T - 1:\n",
" dec[:, t + 1] = tgt[:, t] # write ground-truth into next slot\n",
"\n",
" current_epoch += 1\n",
" print(f\"EPOCH {current_epoch}\\n\\tLoss: {total_loss:.6f}\") # simple log\n",
" collector.print_decoded() # print decoded predictions for the batch\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "deep_learning",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

File diff suppressed because one or more lines are too long

170
Playgrounds/prova.py Normal file
View File

@@ -0,0 +1,170 @@
import random
import torch
import pandas as pd
from pathlib import Path
import Project_Model.Libs.Embedder as Embedder
import Project_Model.Libs.BPE as BPE
import Project_Model.Libs.Transformer as Transformer
import Project_Model.Libs.TorchShims as torch_shims
# set a fixed seed
torch.manual_seed(0)
random.seed(0)
DEVICE = torch_shims.get_default_device()
torch.set_default_device(DEVICE)
# set a default device
# BPE Init
VOCABULARY_PATH = Path("Assets/Model/toy_10/toy_dictionary.json")
SPECIAL_VOC = BPE.default_special_tokens()
VOCABULARY = BPE.load_nanos_vocabulary(VOCABULARY_PATH)
TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_VOC)
# Constants
TOKEN_SPACE_SIZE = TOKENANO.vocabulary_size + 1
EMBEDDED_SIZE = 256
FEED_FORWARD_MULTIPLIER = 4
ATTENTION_HEADS = 8
SENTENCE_LENGTH = 256
NUMBER_OF_BLOCKS = 4
MAX_EPOCHS = int(1e3)
PAD_TOKEN = TOKENANO.encode("<PAD>")[0]
END_TOKEN = TOKENANO.encode("<END>")[0]
# Load CSV
TOY_DATASET_PATH = Path("Assets/Dataset/1-hop/toy/rdf_text.csv")
TOY_DATASET = pd.read_csv(TOY_DATASET_PATH)
TOY_BATCH_INPUT_LIST: list[list[int]] = []
TOY_BATCH_PADDING_LIST: list[list[bool]] = []
TOY_BATCH_TARGET_LIST: list[list[int]] = []
TOY_BATCH_DECODER_DEFAULT: list[list[int]]= []
for index, row in TOY_DATASET.iterrows():
RDFs: str = row["RDFs"]
Abstract: str = row["Abstract"]
input_tokens = TOKENANO.encode(RDFs)
output_tokens = TOKENANO.encode(Abstract)[1:]
decoder_default_tokens = TOKENANO.encode("<SOS>")
input_tokens, padding = Transformer.normalize_sequence(
input_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
)
output_tokens, _ = Transformer.normalize_sequence(
output_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
)
decoder_default_tokens, _ = Transformer.normalize_sequence(
decoder_default_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN, False
)
TOY_BATCH_INPUT_LIST.append(input_tokens)
TOY_BATCH_PADDING_LIST.append(padding)
TOY_BATCH_TARGET_LIST.append(output_tokens)
TOY_BATCH_DECODER_DEFAULT.append(decoder_default_tokens)
output_tokens = TOKENANO.encode(RDFs)
input_tokens = TOKENANO.encode(Abstract)[1:]
decoder_default_tokens = TOKENANO.encode("<SOS>")
input_tokens, padding = Transformer.normalize_sequence(
input_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
)
output_tokens, _ = Transformer.normalize_sequence(
output_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
)
decoder_default_tokens, _ = Transformer.normalize_sequence(
decoder_default_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN, False
)
TOY_BATCH_INPUT_LIST.append(input_tokens)
TOY_BATCH_PADDING_LIST.append(padding)
TOY_BATCH_TARGET_LIST.append(output_tokens)
TOY_BATCH_DECODER_DEFAULT.append(decoder_default_tokens)
# Training loop
LOSS_HISTORY = []
NANOSOCRATES = Transformer.TrainingModel(
TOKEN_SPACE_SIZE,
EMBEDDED_SIZE,
FEED_FORWARD_MULTIPLIER,
ATTENTION_HEADS,
NUMBER_OF_BLOCKS
)
cross_entropy = torch.nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)
optimizer = torch.optim.AdamW(NANOSOCRATES.parameters())
scheduler = Transformer.WarmupLR(optimizer, 4000, EMBEDDED_SIZE)
last_loss = 0
current_epoch = 0
while current_epoch < MAX_EPOCHS:
optimizer.zero_grad()
encoder_list = torch.tensor(TOY_BATCH_INPUT_LIST[:])
decoder_list = torch.tensor(TOY_BATCH_DECODER_DEFAULT[:])
src_padding = torch.tensor(TOY_BATCH_PADDING_LIST[:], dtype=torch.bool)
# Transform target into logits
target_logits = torch.tensor(TOY_BATCH_TARGET_LIST[:])
last_loss = 0
last_prediction: torch.Tensor
for i in range(0, SENTENCE_LENGTH):
optimizer.zero_grad()
tgt_padding = decoder_list.eq(PAD_TOKEN)
logits: torch.Tensor = NANOSOCRATES((encoder_list, src_padding, decoder_list, tgt_padding))
prob = torch.softmax(logits, 2)
most_probable_tokens = torch.argmax(prob, 2)
last_prediction = most_probable_tokens
logits = logits[:,:i,:]
logits = logits.permute(0, 2, 1)
loss : torch.Tensor = cross_entropy(logits, target_logits[:, 0:i])
# loss : torch.Tensor = cross_entropy(logits, target_logits)
last_loss = loss
loss.backward()
optimizer.step()
scheduler.step()
if i < SENTENCE_LENGTH - 1:
decoder_list[:,i+1] = target_logits[:,i]
current_epoch += 1
if current_epoch % 1 == 0:
print(f"EPOCH {current_epoch}\n\tLoss: {last_loss}")
for encoded_sentence, expected_sentence in zip(
Transformer.tensor2token(last_prediction[:,:], END_TOKEN), # type: ignore
Transformer.tensor2token(target_logits[:,:], END_TOKEN)
):
decoded_sentence = TOKENANO.decode(encoded_sentence)
decoded_target = TOKENANO.decode(expected_sentence)
print(f"\tACTUAL:\n\t\t{decoded_sentence}\n\tEXPECTED:\n\t\t{decoded_target}\n")

View File

@@ -189,7 +189,7 @@ class NanoSocratesBPE(Encoder):
token_stack.appendleft(right_token)
token_stack.appendleft(left_token)
return UTF_8_STRING_ARR.decode("utf-8")
return UTF_8_STRING_ARR.decode("utf-8", errors="ignore")
def __token_decode(self, token_id: int) -> tuple[int, int]:

View File

@@ -31,7 +31,7 @@ class TokeNanoCore:
def vocabulary_size(self):
BPE_VOC_SIZE = self.__bpe_encoder.vocabulary_size
SPECIAL_VOC_SIZE = self.__special_encoder.vocabulary_size
return BPE_VOC_SIZE + SPECIAL_VOC_SIZE
return BPE_VOC_SIZE + SPECIAL_VOC_SIZE + 1
def encode(self, corpus: str) -> list[int]:
output: list[int] = []

View File

@@ -1,49 +1,68 @@
import random
from typing import Generator
import sys
from typing import Any, Generator
import pandas as pd
from pathlib import Path
from Project_Model.Libs.Batch.Enums.TaskType import TaskType
import Project_Model.Libs.BPE as BPE
from Scripts.Libs.CleaningPipeline.special_token import SpecialToken
from Project_Model.Libs.Transformer.Classes.SpannedMasker import SpannedMasker
# from Scripts.Libs.CleaningPipeline.special_token import SpecialToken
from Project_Model.Libs.Transformer import SpannedMasker, truncate_rdf_list, normalize_sequence
from TokenCompletation import TokenCompletationTransformer
from Project_Model.Libs.BPE.Enums.SpecialToken import SpecialToken
from Project_Model.Libs.BPE import SpecialToken
MAX_LENGHT = 128
class Batcher:
def __init__(self, dataset_path: str, batch_size:int, tokenizer: BPE.TokeNanoCore, masker: SpannedMasker) -> None:
def __init__(self, dataset_path: Path, tokenizer: BPE.TokeNanoCore, masker: SpannedMasker, seed:int = 0) -> None:
# ABSTRACT, TRIPLE
# tasks:
# rdf2text: X: TRIPLE, Y: ABSTRACT
# text2rdf: X: ABSTRACT, X:TRIPLE
# masking ( call masker): X: incomplete_triple Y: complete_triple (as exam)
# completation: X: TRIPLE SUBSET, Y: related TRIPLE SUBSET
# it will truncate
# it will instantiate spanmaskter and truncator
self._dataset_path = dataset_path
self._batch_size = batch_size
self._tokenizer = tokenizer
self._masker = masker
sotl = self._tokenizer.encode(SpecialToken.START_TRIPLE_LIST.value)
eos = self._tokenizer.encode(SpecialToken.END_OF_SEQUENCE.value)
self._token_completation = TokenCompletationTransformer(sotl,eos)
self._seed = seed
# self._token_completation = TokenCompletationTransformer(sotl,eos)
self._completation_task_token_truncator = truncate_rdf_list
def get_batch(self)-> Generator[pd.DataFrame]:
for batch in pd.read_csv(self._dataset_path, chunksize= int(self._batch_size/4)): #now we support 3 task
def batch(self, batch_size)-> Generator[tuple[list[list[int]], list[list[int]], list[list[int]],list[list[int]], TaskType],Any,Any]:
"""
Yields: X,Y,padding_X
"""
RNG = random.Random(self._seed)
self._masker.reseed(self._seed)
for batch in pd.read_csv(self._dataset_path, chunksize= batch_size):
tokenized_batch = pd.DataFrame()
# encode
tokenized_batch[["Abstract","RDFs"]] = (
batch[["Abstract","RDFs"]]
.map(lambda t: self._tokenizer.encode(t))
)
rdf2txt_batch = self.__rdf2txt_transformation(tokenized_batch)
txt2rdf_batch = self.__txt2rdf_transformation(tokenized_batch)
mask_batch = self.__masking_trasformation(tokenized_batch)
completation_batch = self.__token_completation_task(tokenized_batch)
output = pd.concat([rdf2txt_batch,txt2rdf_batch,mask_batch,completation_batch],ignore_index=True)
output = output.sample(frac=1).reset_index(drop=True)
yield output
X,Y, padding_X, padding_Y = self.__rdf2txt_transformation(tokenized_batch)
yield X,Y, padding_X, padding_Y, TaskType.RDF2TXT
X,Y, padding_X, padding_Y, = self.__txt2rdf_transformation(tokenized_batch)
yield X,Y, padding_X, padding_Y, TaskType.TEXT2RDF
X,Y, padding_X, padding_Y, = self.__masking_trasformation(tokenized_batch)
yield X,Y, padding_X, padding_Y, TaskType.MASKING
X,Y, padding_X, padding_Y, = self.__token_completation_task(tokenized_batch, RNG.randint(0,sys.maxsize))
yield X,Y, padding_X, padding_Y, TaskType.COMPLETATION
# output = pd.concat([rdf2txt_batch,txt2rdf_batch,completation_batch],ignore_index=True)
# output = output.sample(frac=1).reset_index(drop=True)
# self.decode_debug(output)
# yield output
def __random_subset_rdfs(self, batch: pd.DataFrame, seed = 0):
@@ -57,48 +76,89 @@ class Batcher:
to_list
)
def decode_debug(self, batch: pd.DataFrame):
decoded = pd.DataFrame()
decoded[["X","Y"]] = (
batch[["X","Y"]]
.map(lambda t: self._tokenizer.decode(t))
)
print(decoded)
def __normalization(self, X:list[list[int]], Y: list[list[int]])-> tuple[list[list[int]], list[list[int]], list[list[int]], list[list[int]]]:
pad_token = self._tokenizer.encode(SpecialToken.PAD.value)[0]
end_token = self._tokenizer.encode(SpecialToken.END_OF_SEQUENCE.value)[0]
out_X = []
padding_X = []
out_Y = []
padding_Y = []
for x in X:
out_x, padding_x = normalize_sequence(x,MAX_LENGHT,pad_token,end_token,True)
out_X.append(out_x)
padding_X.append(padding_x)
for y in Y:
out_y, padding_y = normalize_sequence(y,MAX_LENGHT,pad_token,end_token,True)
out_Y.append(out_y)
padding_Y.append(padding_y)
return out_X,out_Y,padding_X,padding_Y
def __rdf2txt_transformation(self, batch: pd.DataFrame):
batch = batch.rename(columns={"RDFs": "X", "Abstract": "Y"})
return batch[["X", "Y"]]
task_token = self._tokenizer.encode(SpecialToken.RDF_TO_TEXT.value)
out = batch.rename(columns={"RDFs":"X","Abstract":"Y"})[["X","Y"]]
out["X"] = [task_token + x for x in out["X"]]
return self.__normalization(out["X"].to_list(),out["Y"].to_list())
def __txt2rdf_transformation(self, batch: pd.DataFrame):
batch = batch.rename(columns={ "Abstract": "X","RDFs": "Y"})
return batch[["X", "Y"]]
task_token = self._tokenizer.encode(SpecialToken.TEXT_TO_RDF.value)
out = batch.rename(columns={"Abstract":"X","RDFs":"Y"})[["X","Y"]]
out["X"] = [task_token + x for x in out["X"]]
return self.__normalization(out["X"].to_list(),out["Y"].to_list())
def __masking_trasformation(self, batch: pd.DataFrame):
# mask_sequence: List[int] -> Tuple[List[int], List[int]]
xy_tuples = batch["RDFs"].apply(self._masker.mask_sequence) # Series of (X, Y)
output = batch.copy()
# Expand into two columns preserving the original index
output[["X", "Y"]] = pd.DataFrame(xy_tuples.tolist(), index=batch.index)
return output[["X", "Y"]]
X = []
Y = []
for rdf in batch["RDFs"]:
x,y = self._masker.mask_sequence(rdf)
X.append(x)
Y.append(y)
return self.__normalization(X,Y)
def __token_completation_task(self, batch: pd.DataFrame):
xy_tuples = batch["RDFs"].apply(self._token_completation.get_completation_tuple)
output = batch.copy()
output[["X", "Y"]] = pd.DataFrame(xy_tuples.tolist(), index=batch.index)
return output[["X", "Y"]]
def __token_completation_task(self, batch: pd.DataFrame, minibatch_seed: int):
continue_triple_token = self._tokenizer.encode(SpecialToken.CONTINUE_RDF.value)[0]
eot = self._tokenizer.encode(SpecialToken.END_TRIPLE.value)[0]
X = []
Y = []
for rdf in batch["RDFs"]:
x,y = self._completation_task_token_truncator(rdf, 0.5, continue_triple_token, eot, minibatch_seed)
X.append(x)
Y.append(y)
return self.__normalization(X,Y)
"""
DATASET_PATH = "Assets/Dataset/Tmp/rdf_text.csv"
VOCABULARY_path = "Assets/Dataset/Tmp/trimmed.json"
from pathlib import Path
VOCABULARY = BPE.load_nanos_vocabulary(Path(VOCABULARY_path))
SPECIAL_LIST = BPE.default_special_tokens()
TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_LIST)
SPECIAL_TOKENS: set[int] = set(TOKENANO.encode("".join(SPECIAL_LIST)))
if __name__ == "__main__":
MASKER = SpannedMasker(TOKENANO.vocabulary_size,SPECIAL_TOKENS)
DATASET_PATH = Path("Assets/Dataset/Tmp/rdf_text.csv")
VOCABULARY_path = "Assets/Dataset/Tmp/trimmed.json"
prova = "<ABS>Cactus Flower is a 1969 American screwball comedy film directed by Gene Saks, and starring Walter Matthau, Ingrid Bergman and Goldie Hawn, who won an Academy Award for her performance.The screenplay was adapted by I. A. L. Diamond from the 1965 Broadway play of the same title written by Abe Burrows, which, in turn, is based on the French play Fleur de cactus by Pierre Barillet and Jean-Pierre Gredy. Cactus Flower was the ninth highest-grossing film of 1969."
print(TOKENANO.encode(prova))
batcher = Batcher(DATASET_PATH,8,TOKENANO,MASKER)
for batch in batcher.get_batch():
print(batch)
"""
from pathlib import Path
VOCABULARY = BPE.load_nanos_vocabulary(Path(VOCABULARY_path))
SPECIAL_LIST = BPE.default_special_tokens()
TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_LIST)
SPECIAL_TOKENS: set[int] = set(TOKENANO.encode("".join(SPECIAL_LIST)))
MASKER = SpannedMasker(TOKENANO.vocabulary_size,SPECIAL_TOKENS)
prova = "<ABS>Cactus Flower is a 1969 American screwball comedy film directed by Gene Saks, and starring Walter Matthau, Ingrid Bergman and Goldie Hawn, who won an Academy Award for her performance.The screenplay was adapted by I. A. L. Diamond from the 1965 Broadway play of the same title written by Abe Burrows, which, in turn, is based on the French play Fleur de cactus by Pierre Barillet and Jean-Pierre Gredy. Cactus Flower was the ninth highest-grossing film of 1969."
print(TOKENANO.encode(prova))
batcher = Batcher(DATASET_PATH,TOKENANO,MASKER)
for batch in batcher.batch(8):
print(batch)

View File

@@ -1,41 +0,0 @@
import numpy as np
# custom LR from attention is all you need
class Custom_lr():
def __init__(self, d_model: int, warmup_step:int) -> None:
self.__d_model = d_model
self.__warmup_step = warmup_step
self.__epoch = 0
def step(self) -> int:
self.__epoch += 1
return (self.__d_model ** -0.5) * min(self.__epoch ** -0.5,
self.__epoch * (self.__warmup_step ** -1.5))
# OTHER LR
# Learning rate schedules (matching visualization parameters)
def step_lr(epoch, lr):
# StepLR: step_size=20, gamma=0.5 (from visualization)
return lr * 0.5 if epoch % 20 == 0 and epoch > 0 else lr
def exp_lr(epoch, lr):
# ExponentialLR: gamma=0.95 (from visualization)
return lr * 0.95
def cosine_lr(epoch, lr):
# CosineAnnealingLR: lr_min=0.001, lr_max=0.1, max_epochs=100 (from visualization)
lr_min, lr_max = 0.001, 0.1
max_epochs = 100
return lr_min + 0.5 * (lr_max - lr_min) * (1 + np.cos(epoch * np.pi / max_epochs))
def cyclical_lr(epoch, lr):
# CyclicalLR: base_lr=0.001, max_lr=0.1, step_size=20 (from visualization)
base_lr = 0.001
max_lr = 0.1
step_size = 20
cycle = np.floor(1 + epoch / (2 * step_size))
x = np.abs(epoch / step_size - 2 * cycle + 1)
return base_lr + (max_lr - base_lr) * max(0, (1 - x))

View File

@@ -0,0 +1,43 @@
import torch
class LogitsCollector:
def __init__(self, pad_token: int, end_token: int, tokenizer) -> None:
self.__pad_token = pad_token # used to skip PAD
self.__end_token = end_token # used to stop at END
self.__tokenizer = tokenizer # exposes .decode(list[int]) -> str
self.__steps: list[torch.Tensor] = [] # list of per-step logits [B,V]
def reset(self) -> None:
self.__steps.clear() # clear history
def add(self, logits_step: torch.Tensor) -> None:
if logits_step.dim() == 3: # handle [B,1,V]
logits_step = logits_step[:, -1, :] # -> [B,V]
self.__steps.append(logits_step.detach()) # store raw logits (detached)
def tokens(self) -> list[list[int]]:
if not self.__steps:
return []
stack = torch.stack(self.__steps, dim=0) # [T,B,V]
probs = torch.softmax(stack, dim=-1) # softmax over vocab -> [T,B,V]
ids = probs.argmax(dim=-1).transpose(0, 1) # greedy ids -> [B,T]
out: list[list[int]] = []
for row in ids.tolist():
seq: list[int] = []
for tok in row:
# if tok == self.__end_token: # stop on END
# break
if tok == self.__pad_token: # skip PAD
continue
seq.append(tok)
out.append(seq)
return out
def print_decoded(self) -> None:
for i, seq in enumerate(self.tokens()):
try:
# text = text + self.__end_token
text = self.__tokenizer.decode(seq) # decode tokens to string
except Exception:
text = str(seq) # fallback to ids
print(f"[{i}] {text}") # simple print

View File

@@ -14,6 +14,6 @@ class DeToken(torch.nn.Module):
x = self.__linear(x)
# 2) Go to logits
x = torch.softmax(x, 2)
# x = torch.softmax(x, 2)
return x

View File

@@ -41,18 +41,19 @@ class Decoder(nn.Module):
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor
]
): # -> list[torch.Tensor]: # k_x = v_x . While x_q = x
# WARNING: args is needed to have sequential
x, k_x, v_x, padding_mask = args
x, k_x, v_x, src_padding_mask, tgt_padding_mask = args
# build of attention mask
attention_mask = get_causal_attention_mask(x.size(1))
# 1) Masked Attention
MASKED_ATTENTION = self.__masked_attention(
x, x, x, key_padding_mask=padding_mask, attention_mask=attention_mask
x, x, x, key_padding_mask=tgt_padding_mask, attention_mask=attention_mask
)
# 2) Dropout
@@ -68,7 +69,7 @@ class Decoder(nn.Module):
# 5) Encoderdecoder (cross) attention
CROSS_ATTENTION = self.__cross_attention(
x, k_x, v_x, key_padding_mask=padding_mask
x, k_x, v_x, key_padding_mask=src_padding_mask
)
# 6) Dropout
@@ -96,7 +97,7 @@ class Decoder(nn.Module):
# 12) Layer Normalization
x = self.__layer_norm_3(x)
return (x, k_x, v_x, padding_mask)
return (x, k_x, v_x, src_padding_mask, tgt_padding_mask)
# use eval to disable dropout ecc

View File

@@ -25,6 +25,11 @@ class SpannedMasker:
self.__forbidden_tokens = forbidden_tokens
def reseed(self, seed:int):
self.__rng = random.Random(seed)
def mask_sequence(
self,
token_sequence: list[int],

View File

@@ -0,0 +1,47 @@
from typing import override
import torch
# custom LR from attention is all you need
class WarmupLR(torch.optim.lr_scheduler.LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
warmup_steps: int,
embedding_size: int,
warming_multiplier: float = -1.5,
decaying_multiplier: float = -0.5,
multiplicative_factor: float = 1.0,
last_epoch: int = -1,
) -> None:
self.__warmup_steps = warmup_steps
self.__embedding_size = embedding_size
self.__warming_multiplier = warming_multiplier
self.__decaying_multiplier = decaying_multiplier
self.__multiplicative_factor = multiplicative_factor
super().__init__(optimizer, last_epoch)
def __scale_at(self, step: int) -> float:
step = max(step, 1)
return (
self.__multiplicative_factor
* (self.__embedding_size**self.__decaying_multiplier)
* min(
step**self.__decaying_multiplier,
step * (self.__warmup_steps**self.__warming_multiplier),
)
)
@override
def get_lr(self) -> list[float]:
torch.optim.lr_scheduler._warn_get_lr_called_within_step(self)
step = max(self.last_epoch, 1)
scale = self.__scale_at(step)
return [base_lr * scale for base_lr in self.base_lrs]
def _get_closed_form_lr(self):
step = max(self.last_epoch, 1)
scale = self.__scale_at(step)
return [base_lr * scale for base_lr in self.base_lrs]

View File

@@ -5,6 +5,7 @@ from .FeedForwardNetwork import FeedForwardNetwork
from .TorchMultiHeadAttention import TorchMultiHeadAttention
from .SpannedMasker import SpannedMasker
from .DeToken import DeToken
from .WarmupLR import WarmupLR
__all__ = [
"Decoder",
@@ -12,5 +13,6 @@ __all__ = [
"FeedForwardNetwork",
"TorchMultiHeadAttention",
"SpannedMasker",
"DeToken"
"DeToken",
"WarmupLR"
]

View File

@@ -37,17 +37,17 @@ class TrainingModel(torch.nn.Module):
self.__detokener = DeToken(latent_space, vocabulary_size)
def forward(self, args: tuple[torch.Tensor, torch.Tensor, torch.Tensor]):
encoder_embedder_input, padding_tensor, decoder_embedder_input = args
def forward(self, args: tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]):
encoder_embedder_input, src_padding, decoder_embedder_input, tgt_padding = args
encoder_tensor = self.__encoder_embedder(encoder_embedder_input)
decoder_tensor = self.__decoder_embedder(decoder_embedder_input)
encoder_output, _ = self.__encoder((encoder_tensor, padding_tensor))
encoder_output, _ = self.__encoder((encoder_tensor, src_padding))
decoder_output, _, _, _ = self.__decoder(
(decoder_tensor, encoder_tensor, encoder_tensor, None)
decoder_output, _, _, _, _ = self.__decoder(
(decoder_tensor, encoder_output, encoder_output, src_padding, tgt_padding)
)
logits: torch.Tensor = self.__detokener(decoder_output)

View File

@@ -3,6 +3,7 @@ from .task_type import TaskType
from .post_tokenization import truncate_sequence, pad_sequence, normalize_sequence, create_padding_mask
from .inference_masking import inference_masking
from .truncate_rdf_list import truncate_rdf_list
from .decode_out import tensor2token
__all__ = [
"TaskType",
@@ -13,5 +14,6 @@ __all__ = [
"create_padding_mask",
"normalize_sequence",
"inference_masking",
"truncate_rdf_list"
"truncate_rdf_list",
"tensor2token"
]

View File

@@ -0,0 +1,27 @@
from typing import Generator
import torch
def tensor2token(tensor: torch.Tensor, end_token: int) -> Generator[list[int]]:
if len(tensor.shape) < 1 or len(tensor.shape) > 2:
raise ValueError("Shape is not correct")
if len(tensor.shape) == 1:
token_list: list[int] = tensor.tolist()
token_list.append(end_token)
yield token_list
return
batch_len: int
batch_len, _ = tensor.shape
for i in range(batch_len):
smaller_tensor = tensor[i, :]
token_list: list[int] = smaller_tensor.tolist()
token_list.append(end_token)
yield token_list

View File

@@ -1,17 +1,20 @@
def truncate_sequence(
sequence: list[int], truncate_at: int, end_token: int
sequence: list[int], truncate_at: int, end_token: int, add_ending: bool
) -> list[int]:
if len(sequence) < truncate_at - 1:
sequence.append(end_token)
if add_ending:
sequence.append(end_token)
return sequence
if len(sequence) < truncate_at:
sequence[-1] = end_token
if add_ending:
sequence[-1] = end_token
return sequence
TRUNCATED_SEQUENCE = sequence[:truncate_at]
TRUNCATED_SEQUENCE[-1] = end_token
if add_ending:
TRUNCATED_SEQUENCE[-1] = end_token
return TRUNCATED_SEQUENCE
@@ -48,8 +51,9 @@ def normalize_sequence(
max_length: int,
pad_token: int,
end_token: int,
add_ending: bool = True
) -> tuple[list[int], list[bool]]:
new_sequence = truncate_sequence(sequence, max_length, end_token)
new_sequence = truncate_sequence(sequence, max_length, end_token, add_ending)
new_sequence = pad_sequence(new_sequence, max_length, pad_token)
PADDING_MASK = create_padding_mask(new_sequence, pad_token)

Binary file not shown.

View File

@@ -16,3 +16,4 @@ urllib3==2.5.0
wheel==0.45.1
Wikipedia-API==0.8.1
SQLAlchemy
torch