24 Commits

Author SHA1 Message Date
GassiGiuseppe
3446870291 typo 2025-10-10 22:27:01 +02:00
GassiGiuseppe
e76dbeb9a7 typo 2025-10-10 22:26:06 +02:00
GassiGiuseppe
96610612fe Batcher added 2025-10-10 20:10:08 +02:00
Christian Risi
bed9718f27 Added BPE small vocabulary 2025-10-10 11:40:39 +02:00
GassiGiuseppe
93865bee8a typo 2025-10-09 22:26:17 +02:00
Christian Risi
1c0ddb8753 Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 22:23:36 +02:00
Christian Risi
51399f9dc9 commit of toy dataset with whole batch 2025-10-09 22:22:42 +02:00
GassiGiuseppe
d1ba4ae026 last update for collab
( we are gonna run it on a 100 yey)
2025-10-09 21:57:05 +02:00
Christian Risi
db0090981c Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 21:53:45 +02:00
Christian Risi
e1c5649d67 updated to overfit over toy dataset 2025-10-09 21:53:42 +02:00
GassiGiuseppe
0bca241662 update environment yaml 2025-10-09 20:53:45 +02:00
GassiGiuseppe
005d7af6a0 lil update of requirements 2025-10-09 20:30:06 +02:00
GassiGiuseppe
9068db550e Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 19:44:46 +02:00
GassiGiuseppe
d8f81e1a47 that god can have mercy upon us 2025-10-09 19:43:50 +02:00
Christian Risi
a67df9724e Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 18:14:33 +02:00
Christian Risi
c5fd57d854 Updated train playground 2025-10-09 18:14:29 +02:00
GassiGiuseppe
ee253c39f4 Merge branch 'dev.embedder' of https://repositories.communitynotfound.work/PoliBa-DeepLearning/NanoSocrates into dev.embedder 2025-10-09 13:31:37 +02:00
GassiGiuseppe
2036b4015f added logistic collector 2025-10-09 13:31:16 +02:00
Christian Risi
aac7675b30 Pipeline fix and added a util to decode 2025-10-09 13:24:48 +02:00
GassiGiuseppe
d2fdeb18a2 bla bla doctor 2025-10-09 12:41:47 +02:00
Christian Risi
f3b83eda3d Rework 2025-10-09 11:37:46 +02:00
Christian Risi
0158db2dce Fixed a bug where I took encoder embeddings rather than encoder output 2025-10-09 11:37:21 +02:00
Christian Risi
ba592c3480 Disabled Softmax 2025-10-09 11:36:56 +02:00
Christian Risi
1f9c30b531 Added Custom Learning Rate 2025-10-09 11:36:40 +02:00
27 changed files with 8728 additions and 856 deletions

BIN
Assets/Model/small/bpe-small.json LFS Normal file

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@@ -1,125 +0,0 @@
import random
import torch
import pandas as pd
from pathlib import Path
import Project_Model.Libs.Embedder as Embedder
import Project_Model.Libs.BPE as BPE
import Project_Model.Libs.Transformer as Transformer
import Project_Model.Libs.TorchShims as torch_shims
from Project_Model.Libs.Training.learning_rade_shedulers import Custom_lr
from Project_Model.Libs.Training.logistic_collector import LogitsCollector # import the external collector
# set a fixed seed
torch.manual_seed(0)
random.seed(0)
DEVICE = torch_shims.get_default_device()
torch.set_default_device(DEVICE)
# BPE Init
VOCABULARY_PATH = Path("Assets/Model/toy_10/toy_dictionary.json")
SPECIAL_VOC = BPE.default_special_tokens()
VOCABULARY = BPE.load_nanos_vocabulary(VOCABULARY_PATH)
TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_VOC)
# Constants
TOKEN_SPACE_SIZE = TOKENANO.vocabulary_size + 1
EMBEDDED_SIZE = 256
FEED_FORWARD_MULTIPLIER = 4
ATTENTION_HEADS = 4
SENTENCE_LENGTH = 256
NUMBER_OF_BLOCKS = 2
MAX_EPOCHS = int(1e3)
PAD_TOKEN = TOKENANO.encode("<PAD>")[0]
END_TOKEN = TOKENANO.encode("<END>")[0]
# Load CSV
TOY_DATASET_PATH = Path("Assets/Dataset/1-hop/toy/rdf_text.csv")
TOY_DATASET = pd.read_csv(TOY_DATASET_PATH)
TOY_BATCH_INPUT_LIST: list[list[int]] = []
TOY_BATCH_PADDING_LIST: list[list[bool]] = []
TOY_BATCH_TARGET_LIST: list[list[int]] = []
TOY_BATCH_DECODER_DEFAULT: list[list[int]] = []
for index, row in TOY_DATASET.iterrows():
RDFs: str = row["RDFs"]
Abstract: str = row["Abstract"]
input_tokens = TOKENANO.encode(RDFs) # encoder input ids
output_tokens = TOKENANO.encode(Abstract)[1:] # decoder target ids (shifted left)
decoder_default_tokens = TOKENANO.encode("<SOS>") # decoder input starts with <SOS>
input_tokens, padding = Transformer.normalize_sequence(
input_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
) # pad/trim + end token
output_tokens, _ = Transformer.normalize_sequence(
output_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
) # pad/trim + end token
decoder_default_tokens = Transformer.pad_sequence(
decoder_default_tokens, SENTENCE_LENGTH, PAD_TOKEN
) # pad with PAD up to SENTENCE_LENGTH
TOY_BATCH_INPUT_LIST.append(input_tokens)
TOY_BATCH_PADDING_LIST.append(padding)
TOY_BATCH_TARGET_LIST.append(output_tokens)
TOY_BATCH_DECODER_DEFAULT.append(decoder_default_tokens)
# Training loop
LOSS_HISTORY = []
NANOSOCRATES = Transformer.TrainingModel(
TOKEN_SPACE_SIZE,
EMBEDDED_SIZE,
FEED_FORWARD_MULTIPLIER,
ATTENTION_HEADS,
NUMBER_OF_BLOCKS,
)
collector = LogitsCollector(PAD_TOKEN, END_TOKEN, TOKENANO) # collects logits and decodes
NANOSOCRATES.train()
cross_entropy = torch.nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)
optimizer = torch.optim.AdamW(NANOSOCRATES.parameters())
scheduler = Custom_lr(EMBEDDED_SIZE, 4000) # step each optimizer step
current_epoch = 0
BATCH_SIZE = min(32, len(TOY_BATCH_INPUT_LIST)) # small batch to stabilize
while current_epoch < MAX_EPOCHS:
# simple fixed mini-batch from the top; later you can shuffle/slice
enc = torch.tensor(TOY_BATCH_INPUT_LIST[:BATCH_SIZE], dtype=torch.long) # [B,T] encoder token ids
pad = torch.tensor(TOY_BATCH_PADDING_LIST[:BATCH_SIZE], dtype=torch.bool) # [B,T] True where encoder PAD is present
tgt = torch.tensor(TOY_BATCH_TARGET_LIST[:BATCH_SIZE], dtype=torch.long) # [B,T] decoder targets (ground-truth)
# decoder prefix buffer: <SOS> at pos 0, PAD elsewhere (no shift here) # we will fill it step by step
dec = torch.tensor(TOY_BATCH_DECODER_DEFAULT[:BATCH_SIZE], dtype=torch.long) # [B,T]
total_loss = 0.0
collector.reset() # start fresh for this epoch
T = tgt.size(1) # sequence length
for t in range(T):
optimizer.zero_grad(set_to_none=True) # clear grads for this token step
prefix = dec[:, : t + 1] # [B, t+1] current decoder prefix
dec_pad_mask = prefix.eq(PAD_TOKEN) # [B, t+1] True where PAD inside prefix
# one-step logits given prefix (trainer model expects 4 args now)
logits_t: torch.Tensor = NANOSOCRATES((enc, pad, prefix, dec_pad_mask)) # [B,V] logits for step t
collector.add(logits_t) # store logits for decoding later
loss_t = cross_entropy(logits_t, tgt[:, t]) # CE expects raw logits; PAD ignored
loss_t.backward() # backprop for this step
optimizer.step() # update params
scheduler.step() # Noam/warmup: step per optimizer step
total_loss = float(loss_t.detach()) # keep last step loss for logging
# teacher forcing: reveal the correct token for next position
if t < T - 1:
dec[:, t + 1] = tgt[:, t] # write ground-truth into next slot
current_epoch += 1
print(f"EPOCH {current_epoch}\n\tLoss: {total_loss:.6f}") # simple log
collector.print_decoded() # print decoded predictions for the batch

View File

@@ -1,221 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c8741a8f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"EPOCH 1\n",
"\tLoss: 7.424792\n",
"[0] \n",
"[1] \n",
"[2] \n",
"[3] \n",
"[4] \n",
"[5] \n",
"[6] \n",
"[7] \n",
"[8] \n",
"[9] \n"
]
}
],
"source": [
"import random\n",
"import torch\n",
"import pandas as pd\n",
"from pathlib import Path\n",
"import Project_Model.Libs.Embedder as Embedder\n",
"import Project_Model.Libs.BPE as BPE\n",
"import Project_Model.Libs.Transformer as Transformer\n",
"import Project_Model.Libs.TorchShims as torch_shims\n",
"from Project_Model.Libs.Training.learning_rade_shedulers import Custom_lr\n",
"\n",
"import torch\n",
"\n",
"class LogitsCollector:\n",
" def __init__(self, pad_token: int, end_token: int, tokenizer) -> None:\n",
" self.__pad_token = pad_token # used to skip PAD\n",
" self.__end_token = end_token # used to stop at END\n",
" self.__tokenizer = tokenizer # exposes .decode(list[int]) -> str\n",
" self.__steps: list[torch.Tensor] = [] # list of per-step logits [B,V]\n",
"\n",
" def reset(self) -> None:\n",
" self.__steps.clear() # clear history\n",
"\n",
" def add(self, logits_step: torch.Tensor) -> None:\n",
" if logits_step.dim() == 3: # handle [B,1,V]\n",
" logits_step = logits_step[:, -1, :] # -> [B,V]\n",
" self.__steps.append(logits_step.detach()) # store raw logits (detached)\n",
"\n",
" def tokens(self) -> list[list[int]]:\n",
" if not self.__steps:\n",
" return []\n",
" stack = torch.stack(self.__steps, dim=0) # [T,B,V]\n",
" probs = torch.softmax(stack, dim=-1) # softmax over vocab -> [T,B,V]\n",
" ids = probs.argmax(dim=-1).transpose(0, 1) # greedy ids -> [B,T]\n",
" out: list[list[int]] = []\n",
" for row in ids.tolist():\n",
" seq: list[int] = []\n",
" for tok in row:\n",
" if tok == self.__end_token: # stop on END\n",
" break\n",
" if tok == self.__pad_token: # skip PAD\n",
" continue\n",
" seq.append(tok)\n",
" out.append(seq)\n",
" return out\n",
"\n",
" def print_decoded(self) -> None:\n",
" for i, seq in enumerate(self.tokens()):\n",
" try:\n",
" text = self.__tokenizer.decode(seq) # decode tokens to string\n",
" except Exception:\n",
" text = str(seq) # fallback to ids\n",
" print(f\"[{i}] {text}\") # simple print\n",
"\n",
"\n",
"# set a fixed seed\n",
"torch.manual_seed(0)\n",
"random.seed(0)\n",
"DEVICE = torch_shims.get_default_device()\n",
"torch.set_default_device(DEVICE)\n",
"\n",
"# BPE Init\n",
"VOCABULARY_PATH = Path(\"Assets/Model/toy_10/toy_dictionary.json\")\n",
"SPECIAL_VOC = BPE.default_special_tokens()\n",
"\n",
"VOCABULARY = BPE.load_nanos_vocabulary(VOCABULARY_PATH)\n",
"TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_VOC)\n",
"\n",
"# Constants\n",
"TOKEN_SPACE_SIZE = TOKENANO.vocabulary_size + 1\n",
"EMBEDDED_SIZE = 256\n",
"FEED_FORWARD_MULTIPLIER = 4\n",
"ATTENTION_HEADS = 4\n",
"SENTENCE_LENGTH = 256\n",
"NUMBER_OF_BLOCKS = 2\n",
"MAX_EPOCHS = int(1e3)\n",
"\n",
"PAD_TOKEN = TOKENANO.encode(\"<PAD>\")[0]\n",
"END_TOKEN = TOKENANO.encode(\"<END>\")[0]\n",
"\n",
"# Load CSV\n",
"TOY_DATASET_PATH = Path(\"Assets/Dataset/1-hop/toy/rdf_text.csv\")\n",
"TOY_DATASET = pd.read_csv(TOY_DATASET_PATH)\n",
"\n",
"TOY_BATCH_INPUT_LIST: list[list[int]] = []\n",
"TOY_BATCH_PADDING_LIST: list[list[bool]] = []\n",
"TOY_BATCH_TARGET_LIST: list[list[int]] = []\n",
"TOY_BATCH_DECODER_DEFAULT: list[list[int]] = []\n",
"\n",
"for index, row in TOY_DATASET.iterrows():\n",
" RDFs: str = row[\"RDFs\"]\n",
" Abstract: str = row[\"Abstract\"]\n",
"\n",
" input_tokens = TOKENANO.encode(RDFs) # encoder input ids\n",
" output_tokens = TOKENANO.encode(Abstract)[1:] # decoder target ids (shifted left)\n",
" decoder_default_tokens = TOKENANO.encode(\"<SOS>\") # decoder input starts with <SOS>\n",
"\n",
" input_tokens, padding = Transformer.normalize_sequence(\n",
" input_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN\n",
" ) # pad/trim + end token\n",
" output_tokens, _ = Transformer.normalize_sequence(\n",
" output_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN\n",
" ) # pad/trim + end token\n",
" decoder_default_tokens = Transformer.pad_sequence(\n",
" decoder_default_tokens, SENTENCE_LENGTH, PAD_TOKEN\n",
" ) # pad with PAD up to SENTENCE_LENGTH\n",
"\n",
" TOY_BATCH_INPUT_LIST.append(input_tokens)\n",
" TOY_BATCH_PADDING_LIST.append(padding)\n",
" TOY_BATCH_TARGET_LIST.append(output_tokens)\n",
" TOY_BATCH_DECODER_DEFAULT.append(decoder_default_tokens)\n",
"\n",
"# Training loop\n",
"LOSS_HISTORY = []\n",
"NANOSOCRATES = Transformer.TrainingModel(\n",
" TOKEN_SPACE_SIZE,\n",
" EMBEDDED_SIZE,\n",
" FEED_FORWARD_MULTIPLIER,\n",
" ATTENTION_HEADS,\n",
" NUMBER_OF_BLOCKS,\n",
")\n",
"\n",
"collector = LogitsCollector(PAD_TOKEN, END_TOKEN, TOKENANO) # collects logits and decodes\n",
"\n",
"NANOSOCRATES.train()\n",
"cross_entropy = torch.nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)\n",
"optimizer = torch.optim.AdamW(NANOSOCRATES.parameters())\n",
"scheduler = Custom_lr(EMBEDDED_SIZE, 4000) # step each optimizer step\n",
"\n",
"current_epoch = 0\n",
"BATCH_SIZE = min(32, len(TOY_BATCH_INPUT_LIST)) # small batch to stabilize\n",
"\n",
"while current_epoch < MAX_EPOCHS:\n",
" # simple fixed mini-batch from the top; later you can shuffle/slice\n",
" enc = torch.tensor(TOY_BATCH_INPUT_LIST[:BATCH_SIZE], dtype=torch.long) # [B,T] encoder token ids\n",
" pad = torch.tensor(TOY_BATCH_PADDING_LIST[:BATCH_SIZE], dtype=torch.bool) # [B,T] True where encoder PAD is present\n",
" tgt = torch.tensor(TOY_BATCH_TARGET_LIST[:BATCH_SIZE], dtype=torch.long) # [B,T] decoder targets (ground-truth)\n",
"\n",
" # decoder prefix buffer: <SOS> at pos 0, PAD elsewhere (no shift here) # we will fill it step by step\n",
" dec = torch.tensor(TOY_BATCH_DECODER_DEFAULT[:BATCH_SIZE], dtype=torch.long) # [B,T]\n",
"\n",
" total_loss = 0.0\n",
" collector.reset() # start fresh for this epoch\n",
"\n",
" T = tgt.size(1) # sequence length\n",
" for t in range(T):\n",
" optimizer.zero_grad(set_to_none=True) # clear grads for this token step\n",
"\n",
" prefix = dec[:, : t + 1] # [B, t+1] current decoder prefix\n",
" dec_pad_mask = prefix.eq(PAD_TOKEN) # [B, t+1] True where PAD inside prefix\n",
"\n",
" # one-step logits given prefix (trainer model expects 4 args now)\n",
" logits_t: torch.Tensor = NANOSOCRATES((enc, pad, prefix, dec_pad_mask)) # [B,V] logits for step t\n",
" collector.add(logits_t) # store logits for decoding later\n",
"\n",
" loss_t = cross_entropy(logits_t, tgt[:, t]) # CE expects raw logits; PAD ignored\n",
" loss_t.backward() # backprop for this step\n",
" optimizer.step() # update params\n",
" scheduler.step() # Noam/warmup: step per optimizer step\n",
"\n",
" total_loss = float(loss_t.detach()) # keep last step loss for logging\n",
"\n",
" # teacher forcing: reveal the correct token for next position\n",
" if t < T - 1:\n",
" dec[:, t + 1] = tgt[:, t] # write ground-truth into next slot\n",
"\n",
" current_epoch += 1\n",
" print(f\"EPOCH {current_epoch}\\n\\tLoss: {total_loss:.6f}\") # simple log\n",
" collector.print_decoded() # print decoded predictions for the batch\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "deep_learning",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,205 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "0afbf498",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"EPOCH 1\n",
"\tLoss: 9.174470901489258\n",
"EPOCH 2\n",
"\tLoss: 9.20919132232666\n",
"EPOCH 3\n",
"\tLoss: 9.227106094360352\n",
"EPOCH 4\n",
"\tLoss: 9.172086715698242\n",
"EPOCH 5\n",
"\tLoss: 9.180150985717773\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
"\u001b[31mKeyboardInterrupt\u001b[39m Traceback (most recent call last)",
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 116\u001b[39m\n\u001b[32m 113\u001b[39m step_target = target_logits[:, i] \u001b[38;5;66;03m# [B]\u001b[39;00m\n\u001b[32m 115\u001b[39m loss = cross_entropy(step_logits,step_target) \u001b[38;5;66;03m# now loss is without softmax\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m116\u001b[39m \u001b[43mloss\u001b[49m\u001b[43m.\u001b[49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# DAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMN\u001b[39;00m\n\u001b[32m 117\u001b[39m last_loss = loss\n\u001b[32m 118\u001b[39m optimizer.step()\n",
"\u001b[36mFile \u001b[39m\u001b[32m~/miniconda3/envs/deep_learning/lib/python3.13/site-packages/torch/_tensor.py:638\u001b[39m, in \u001b[36mTensor.backward\u001b[39m\u001b[34m(self, gradient, retain_graph, create_graph, inputs)\u001b[39m\n\u001b[32m 595\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33mr\u001b[39m\u001b[33;03m\"\"\"Computes the gradient of current tensor wrt graph leaves.\u001b[39;00m\n\u001b[32m 596\u001b[39m \n\u001b[32m 597\u001b[39m \u001b[33;03mThe graph is differentiated using the chain rule. If the tensor is\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 635\u001b[39m \u001b[33;03m used to compute the :attr:`tensors`. Defaults to ``None``.\u001b[39;00m\n\u001b[32m 636\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 637\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[32m--> \u001b[39m\u001b[32m638\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mhandle_torch_function\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 639\u001b[39m \u001b[43m \u001b[49m\u001b[43mTensor\u001b[49m\u001b[43m.\u001b[49m\u001b[43mbackward\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 640\u001b[39m \u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 641\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[32m 642\u001b[39m \u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m=\u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 643\u001b[39m \u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m=\u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 644\u001b[39m \u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 645\u001b[39m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m=\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 646\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 647\u001b[39m torch.autograd.backward(\n\u001b[32m 648\u001b[39m \u001b[38;5;28mself\u001b[39m, gradient, retain_graph, create_graph, inputs=inputs\n\u001b[32m 649\u001b[39m )\n",
"\u001b[36mFile \u001b[39m\u001b[32m~/miniconda3/envs/deep_learning/lib/python3.13/site-packages/torch/overrides.py:1725\u001b[39m, in \u001b[36mhandle_torch_function\u001b[39m\u001b[34m(public_api, relevant_args, *args, **kwargs)\u001b[39m\n\u001b[32m 1721\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m _is_torch_function_mode_enabled():\n\u001b[32m 1722\u001b[39m \u001b[38;5;66;03m# if we're here, the mode must be set to a TorchFunctionStackMode\u001b[39;00m\n\u001b[32m 1723\u001b[39m \u001b[38;5;66;03m# this unsets it and calls directly into TorchFunctionStackMode's torch function\u001b[39;00m\n\u001b[32m 1724\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m _pop_mode_temporarily() \u001b[38;5;28;01mas\u001b[39;00m mode:\n\u001b[32m-> \u001b[39m\u001b[32m1725\u001b[39m result = \u001b[43mmode\u001b[49m\u001b[43m.\u001b[49m\u001b[43m__torch_function__\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpublic_api\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtypes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1726\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m result \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mNotImplemented\u001b[39m:\n\u001b[32m 1727\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m result\n",
"\u001b[36mFile \u001b[39m\u001b[32m~/miniconda3/envs/deep_learning/lib/python3.13/site-packages/torch/utils/_device.py:103\u001b[39m, in \u001b[36mDeviceContext.__torch_function__\u001b[39m\u001b[34m(self, func, types, args, kwargs)\u001b[39m\n\u001b[32m 101\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m func \u001b[38;5;129;01min\u001b[39;00m _device_constructors() \u001b[38;5;129;01mand\u001b[39;00m kwargs.get(\u001b[33m\"\u001b[39m\u001b[33mdevice\u001b[39m\u001b[33m\"\u001b[39m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 102\u001b[39m kwargs[\u001b[33m\"\u001b[39m\u001b[33mdevice\u001b[39m\u001b[33m\"\u001b[39m] = \u001b[38;5;28mself\u001b[39m.device\n\u001b[32m--> \u001b[39m\u001b[32m103\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[36mFile \u001b[39m\u001b[32m~/miniconda3/envs/deep_learning/lib/python3.13/site-packages/torch/_tensor.py:647\u001b[39m, in \u001b[36mTensor.backward\u001b[39m\u001b[34m(self, gradient, retain_graph, create_graph, inputs)\u001b[39m\n\u001b[32m 637\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[32m 638\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[32m 639\u001b[39m Tensor.backward,\n\u001b[32m 640\u001b[39m (\u001b[38;5;28mself\u001b[39m,),\n\u001b[32m (...)\u001b[39m\u001b[32m 645\u001b[39m inputs=inputs,\n\u001b[32m 646\u001b[39m )\n\u001b[32m--> \u001b[39m\u001b[32m647\u001b[39m \u001b[43mtorch\u001b[49m\u001b[43m.\u001b[49m\u001b[43mautograd\u001b[49m\u001b[43m.\u001b[49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 648\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m=\u001b[49m\u001b[43minputs\u001b[49m\n\u001b[32m 649\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[36mFile \u001b[39m\u001b[32m~/miniconda3/envs/deep_learning/lib/python3.13/site-packages/torch/autograd/__init__.py:354\u001b[39m, in \u001b[36mbackward\u001b[39m\u001b[34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[39m\n\u001b[32m 349\u001b[39m retain_graph = create_graph\n\u001b[32m 351\u001b[39m \u001b[38;5;66;03m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[32m 352\u001b[39m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[32m 353\u001b[39m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m354\u001b[39m \u001b[43m_engine_run_backward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 355\u001b[39m \u001b[43m \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 356\u001b[39m \u001b[43m \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 357\u001b[39m \u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 358\u001b[39m \u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 359\u001b[39m \u001b[43m \u001b[49m\u001b[43minputs_tuple\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 360\u001b[39m \u001b[43m \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 361\u001b[39m \u001b[43m \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 362\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[36mFile \u001b[39m\u001b[32m~/miniconda3/envs/deep_learning/lib/python3.13/site-packages/torch/autograd/graph.py:829\u001b[39m, in \u001b[36m_engine_run_backward\u001b[39m\u001b[34m(t_outputs, *args, **kwargs)\u001b[39m\n\u001b[32m 827\u001b[39m unregister_hooks = _register_logging_hooks_on_whole_graph(t_outputs)\n\u001b[32m 828\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m829\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mVariable\u001b[49m\u001b[43m.\u001b[49m\u001b[43m_execution_engine\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[32m 830\u001b[39m \u001b[43m \u001b[49m\u001b[43mt_outputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\n\u001b[32m 831\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# Calls into the C++ engine to run the backward pass\u001b[39;00m\n\u001b[32m 832\u001b[39m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[32m 833\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m attach_logging_hooks:\n",
"\u001b[31mKeyboardInterrupt\u001b[39m: "
]
}
],
"source": [
"import random\n",
"import torch\n",
"import pandas as pd\n",
"from pathlib import Path\n",
"import Project_Model.Libs.Embedder as Embedder\n",
"import Project_Model.Libs.BPE as BPE\n",
"import Project_Model.Libs.Transformer as Transformer\n",
"import Project_Model.Libs.TorchShims as torch_shims\n",
"\n",
"# set a fixed seed\n",
"torch.manual_seed(0)\n",
"random.seed(0)\n",
"DEVICE = torch_shims.get_default_device()\n",
"torch.set_default_device(DEVICE)\n",
"\n",
"# set a default device\n",
"\n",
"# BPE Init\n",
"VOCABULARY_PATH = Path(\"Assets/Model/toy_10/toy_dictionary.json\")\n",
"SPECIAL_VOC = BPE.default_special_tokens()\n",
"\n",
"VOCABULARY = BPE.load_nanos_vocabulary(VOCABULARY_PATH)\n",
"TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_VOC)\n",
"\n",
"\n",
"# Constants\n",
"TOKEN_SPACE_SIZE = TOKENANO.vocabulary_size + 1\n",
"EMBEDDED_SIZE = 256\n",
"FEED_FORWARD_MULTIPLIER = 4\n",
"ATTENTION_HEADS = 4\n",
"SENTENCE_LENGTH = 256\n",
"NUMBER_OF_BLOCKS = 2\n",
"MAX_EPOCHS = int(1e3)\n",
"\n",
"\n",
"PAD_TOKEN = TOKENANO.encode(\"<PAD>\")[0]\n",
"END_TOKEN = TOKENANO.encode(\"<END>\")[0]\n",
"\n",
"\n",
"# Load CSV\n",
"TOY_DATASET_PATH = Path(\"Assets/Dataset/1-hop/toy/rdf_text.csv\")\n",
"\n",
"TOY_DATASET = pd.read_csv(TOY_DATASET_PATH)\n",
"\n",
"TOY_BATCH_INPUT_LIST: list[list[int]] = []\n",
"TOY_BATCH_PADDING_LIST: list[list[bool]] = []\n",
"TOY_BATCH_TARGET_LIST: list[list[int]] = []\n",
"TOY_BATCH_DECODER_DEFAULT: list[list[int]]= []\n",
"\n",
"\n",
"for index, row in TOY_DATASET.iterrows():\n",
"\n",
" RDFs: str = row[\"RDFs\"]\n",
" Abstract: str = row[\"Abstract\"]\n",
"\n",
" input_tokens = TOKENANO.encode(RDFs)\n",
" output_tokens = TOKENANO.encode(Abstract)[1:]\n",
" decoder_default_tokens = TOKENANO.encode(\"<SOS>\")\n",
"\n",
" input_tokens, padding = Transformer.normalize_sequence(\n",
" input_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN\n",
" )\n",
" output_tokens, _ = Transformer.normalize_sequence(\n",
" output_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN\n",
" )\n",
" decoder_default_tokens, _ = Transformer.normalize_sequence(\n",
" decoder_default_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN\n",
" )\n",
"\n",
" TOY_BATCH_INPUT_LIST.append(input_tokens)\n",
" TOY_BATCH_PADDING_LIST.append(padding)\n",
" TOY_BATCH_TARGET_LIST.append(output_tokens)\n",
" TOY_BATCH_DECODER_DEFAULT.append(decoder_default_tokens)\n",
"\n",
"# Training loop\n",
"LOSS_HISTORY = []\n",
"NANOSOCRATES = Transformer.TrainingModel(\n",
" TOKEN_SPACE_SIZE,\n",
" EMBEDDED_SIZE,\n",
" FEED_FORWARD_MULTIPLIER,\n",
" ATTENTION_HEADS,\n",
" NUMBER_OF_BLOCKS\n",
")\n",
"\n",
"NANOSOCRATES.train() # nothing important, activates dropout etc \n",
"cross_entropy = torch.nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)\n",
"optimizer = torch.optim.AdamW(NANOSOCRATES.parameters())\n",
"scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 4)\n",
"\n",
"last_loss = 0\n",
"\n",
"current_epoch = 0\n",
"while current_epoch < MAX_EPOCHS:\n",
"\n",
" encoder_list = torch.tensor([TOY_BATCH_INPUT_LIST[0]])\n",
" decoder_list = torch.tensor([TOY_BATCH_DECODER_DEFAULT[0]])\n",
" padding_list = torch.tensor([TOY_BATCH_PADDING_LIST[0]], dtype=torch.bool)\n",
" target_logits = torch.tensor([TOY_BATCH_TARGET_LIST[0]]) # Transform target into logits\n",
"\n",
" optimizer.zero_grad() # to clear gradient\n",
"\n",
" last_loss = 0.0\n",
"\n",
" for i in range(0, SENTENCE_LENGTH):\n",
"\n",
" # optimizer.zero_grad()\n",
" # forward \n",
" logits: torch.Tensor = NANOSOCRATES((encoder_list, padding_list, decoder_list))\n",
" # probabilities = torch.softmax(logits,2)\n",
" \n",
"\n",
" step_logits = logits[:, i, :] # [B, V]\n",
" step_target = target_logits[:, i] # [B]\n",
"\n",
" loss = cross_entropy(step_logits,step_target) # now loss is without softmax\n",
" loss.backward() # DAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMN\n",
" last_loss = loss\n",
" optimizer.step()\n",
" optimizer.zero_grad()\n",
" scheduler.step()\n",
" \n",
" probabilities = torch.softmax(logits,2)\n",
" most_probable_tokens = torch.argmax(probabilities, 2) \n",
" if i < SENTENCE_LENGTH - 1:\n",
" decoder_list[:,i+1] = most_probable_tokens[:,i]\n",
"\n",
"\n",
" current_epoch += 1\n",
"\n",
" if current_epoch % 1 == 0:\n",
" print(f\"EPOCH {current_epoch}\\n\\tLoss: {last_loss}\")\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "deep_learning",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because one or more lines are too long

170
Playgrounds/prova.py Normal file
View File

@@ -0,0 +1,170 @@
import random
import torch
import pandas as pd
from pathlib import Path
import Project_Model.Libs.Embedder as Embedder
import Project_Model.Libs.BPE as BPE
import Project_Model.Libs.Transformer as Transformer
import Project_Model.Libs.TorchShims as torch_shims
# set a fixed seed
torch.manual_seed(0)
random.seed(0)
DEVICE = torch_shims.get_default_device()
torch.set_default_device(DEVICE)
# set a default device
# BPE Init
VOCABULARY_PATH = Path("Assets/Model/toy_10/toy_dictionary.json")
SPECIAL_VOC = BPE.default_special_tokens()
VOCABULARY = BPE.load_nanos_vocabulary(VOCABULARY_PATH)
TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_VOC)
# Constants
TOKEN_SPACE_SIZE = TOKENANO.vocabulary_size + 1
EMBEDDED_SIZE = 256
FEED_FORWARD_MULTIPLIER = 4
ATTENTION_HEADS = 8
SENTENCE_LENGTH = 256
NUMBER_OF_BLOCKS = 4
MAX_EPOCHS = int(1e3)
PAD_TOKEN = TOKENANO.encode("<PAD>")[0]
END_TOKEN = TOKENANO.encode("<END>")[0]
# Load CSV
TOY_DATASET_PATH = Path("Assets/Dataset/1-hop/toy/rdf_text.csv")
TOY_DATASET = pd.read_csv(TOY_DATASET_PATH)
TOY_BATCH_INPUT_LIST: list[list[int]] = []
TOY_BATCH_PADDING_LIST: list[list[bool]] = []
TOY_BATCH_TARGET_LIST: list[list[int]] = []
TOY_BATCH_DECODER_DEFAULT: list[list[int]]= []
for index, row in TOY_DATASET.iterrows():
RDFs: str = row["RDFs"]
Abstract: str = row["Abstract"]
input_tokens = TOKENANO.encode(RDFs)
output_tokens = TOKENANO.encode(Abstract)[1:]
decoder_default_tokens = TOKENANO.encode("<SOS>")
input_tokens, padding = Transformer.normalize_sequence(
input_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
)
output_tokens, _ = Transformer.normalize_sequence(
output_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
)
decoder_default_tokens, _ = Transformer.normalize_sequence(
decoder_default_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN, False
)
TOY_BATCH_INPUT_LIST.append(input_tokens)
TOY_BATCH_PADDING_LIST.append(padding)
TOY_BATCH_TARGET_LIST.append(output_tokens)
TOY_BATCH_DECODER_DEFAULT.append(decoder_default_tokens)
output_tokens = TOKENANO.encode(RDFs)
input_tokens = TOKENANO.encode(Abstract)[1:]
decoder_default_tokens = TOKENANO.encode("<SOS>")
input_tokens, padding = Transformer.normalize_sequence(
input_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
)
output_tokens, _ = Transformer.normalize_sequence(
output_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN
)
decoder_default_tokens, _ = Transformer.normalize_sequence(
decoder_default_tokens, SENTENCE_LENGTH, PAD_TOKEN, END_TOKEN, False
)
TOY_BATCH_INPUT_LIST.append(input_tokens)
TOY_BATCH_PADDING_LIST.append(padding)
TOY_BATCH_TARGET_LIST.append(output_tokens)
TOY_BATCH_DECODER_DEFAULT.append(decoder_default_tokens)
# Training loop
LOSS_HISTORY = []
NANOSOCRATES = Transformer.TrainingModel(
TOKEN_SPACE_SIZE,
EMBEDDED_SIZE,
FEED_FORWARD_MULTIPLIER,
ATTENTION_HEADS,
NUMBER_OF_BLOCKS
)
cross_entropy = torch.nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)
optimizer = torch.optim.AdamW(NANOSOCRATES.parameters())
scheduler = Transformer.WarmupLR(optimizer, 4000, EMBEDDED_SIZE)
last_loss = 0
current_epoch = 0
while current_epoch < MAX_EPOCHS:
optimizer.zero_grad()
encoder_list = torch.tensor(TOY_BATCH_INPUT_LIST[:])
decoder_list = torch.tensor(TOY_BATCH_DECODER_DEFAULT[:])
src_padding = torch.tensor(TOY_BATCH_PADDING_LIST[:], dtype=torch.bool)
# Transform target into logits
target_logits = torch.tensor(TOY_BATCH_TARGET_LIST[:])
last_loss = 0
last_prediction: torch.Tensor
for i in range(0, SENTENCE_LENGTH):
optimizer.zero_grad()
tgt_padding = decoder_list.eq(PAD_TOKEN)
logits: torch.Tensor = NANOSOCRATES((encoder_list, src_padding, decoder_list, tgt_padding))
prob = torch.softmax(logits, 2)
most_probable_tokens = torch.argmax(prob, 2)
last_prediction = most_probable_tokens
logits = logits[:,:i,:]
logits = logits.permute(0, 2, 1)
loss : torch.Tensor = cross_entropy(logits, target_logits[:, 0:i])
# loss : torch.Tensor = cross_entropy(logits, target_logits)
last_loss = loss
loss.backward()
optimizer.step()
scheduler.step()
if i < SENTENCE_LENGTH - 1:
decoder_list[:,i+1] = target_logits[:,i]
current_epoch += 1
if current_epoch % 1 == 0:
print(f"EPOCH {current_epoch}\n\tLoss: {last_loss}")
for encoded_sentence, expected_sentence in zip(
Transformer.tensor2token(last_prediction[:,:], END_TOKEN), # type: ignore
Transformer.tensor2token(target_logits[:,:], END_TOKEN)
):
decoded_sentence = TOKENANO.decode(encoded_sentence)
decoded_target = TOKENANO.decode(expected_sentence)
print(f"\tACTUAL:\n\t\t{decoded_sentence}\n\tEXPECTED:\n\t\t{decoded_target}\n")

View File

@@ -189,7 +189,7 @@ class NanoSocratesBPE(Encoder):
token_stack.appendleft(right_token) token_stack.appendleft(right_token)
token_stack.appendleft(left_token) token_stack.appendleft(left_token)
return UTF_8_STRING_ARR.decode("utf-8") return UTF_8_STRING_ARR.decode("utf-8", errors="ignore")
def __token_decode(self, token_id: int) -> tuple[int, int]: def __token_decode(self, token_id: int) -> tuple[int, int]:

View File

@@ -31,7 +31,7 @@ class TokeNanoCore:
def vocabulary_size(self): def vocabulary_size(self):
BPE_VOC_SIZE = self.__bpe_encoder.vocabulary_size BPE_VOC_SIZE = self.__bpe_encoder.vocabulary_size
SPECIAL_VOC_SIZE = self.__special_encoder.vocabulary_size SPECIAL_VOC_SIZE = self.__special_encoder.vocabulary_size
return BPE_VOC_SIZE + SPECIAL_VOC_SIZE return BPE_VOC_SIZE + SPECIAL_VOC_SIZE + 1
def encode(self, corpus: str) -> list[int]: def encode(self, corpus: str) -> list[int]:
output: list[int] = [] output: list[int] = []

View File

@@ -1,11 +0,0 @@
from ....Libs.Embedder.Classes.NanoSocratesEmbedder import NanoSocratesEmbedder
import torch
class BatchEmbedder(torch.nn.Module):
def __init__(self, vocabulary_size: int, embedding_size: int) -> None:
super().__init__()
self.__embedder = NanoSocratesEmbedder(vocabulary_size,embedding_size)
def forward(self, )

View File

@@ -1,49 +1,68 @@
import random import random
from typing import Generator import sys
from typing import Any, Generator
import pandas as pd import pandas as pd
from pathlib import Path
from Project_Model.Libs.Batch.Enums.TaskType import TaskType
import Project_Model.Libs.BPE as BPE import Project_Model.Libs.BPE as BPE
from Scripts.Libs.CleaningPipeline.special_token import SpecialToken # from Scripts.Libs.CleaningPipeline.special_token import SpecialToken
from Project_Model.Libs.Transformer.Classes.SpannedMasker import SpannedMasker from Project_Model.Libs.Transformer import SpannedMasker, truncate_rdf_list, normalize_sequence
from TokenCompletation import TokenCompletationTransformer from TokenCompletation import TokenCompletationTransformer
from Project_Model.Libs.BPE.Enums.SpecialToken import SpecialToken from Project_Model.Libs.BPE import SpecialToken
MAX_LENGHT = 128
class Batcher: class Batcher:
def __init__(self, dataset_path: str, batch_size:int, tokenizer: BPE.TokeNanoCore, masker: SpannedMasker) -> None: def __init__(self, dataset_path: Path, tokenizer: BPE.TokeNanoCore, masker: SpannedMasker, seed:int = 0) -> None:
# ABSTRACT, TRIPLE # ABSTRACT, TRIPLE
# tasks: # tasks:
# rdf2text: X: TRIPLE, Y: ABSTRACT # rdf2text: X: TRIPLE, Y: ABSTRACT
# text2rdf: X: ABSTRACT, X:TRIPLE # text2rdf: X: ABSTRACT, X:TRIPLE
# masking ( call masker): X: incomplete_triple Y: complete_triple (as exam) # masking ( call masker): X: incomplete_triple Y: complete_triple (as exam)
# completation: X: TRIPLE SUBSET, Y: related TRIPLE SUBSET # completation: X: TRIPLE SUBSET, Y: related TRIPLE SUBSET
# it will truncate
# it will instantiate spanmaskter and truncator
self._dataset_path = dataset_path self._dataset_path = dataset_path
self._batch_size = batch_size
self._tokenizer = tokenizer self._tokenizer = tokenizer
self._masker = masker self._masker = masker
sotl = self._tokenizer.encode(SpecialToken.START_TRIPLE_LIST.value) self._seed = seed
eos = self._tokenizer.encode(SpecialToken.END_OF_SEQUENCE.value) # self._token_completation = TokenCompletationTransformer(sotl,eos)
self._token_completation = TokenCompletationTransformer(sotl,eos) self._completation_task_token_truncator = truncate_rdf_list
def get_batch(self)-> Generator[pd.DataFrame]:
for batch in pd.read_csv(self._dataset_path, chunksize= int(self._batch_size/4)): #now we support 3 task
def batch(self, batch_size)-> Generator[tuple[list[list[int]], list[list[int]], list[list[int]],list[list[int]], TaskType],Any,Any]:
"""
Yields: X,Y,padding_X
"""
RNG = random.Random(self._seed)
self._masker.reseed(self._seed)
for batch in pd.read_csv(self._dataset_path, chunksize= batch_size):
tokenized_batch = pd.DataFrame() tokenized_batch = pd.DataFrame()
# encode
tokenized_batch[["Abstract","RDFs"]] = ( tokenized_batch[["Abstract","RDFs"]] = (
batch[["Abstract","RDFs"]] batch[["Abstract","RDFs"]]
.map(lambda t: self._tokenizer.encode(t)) .map(lambda t: self._tokenizer.encode(t))
) )
rdf2txt_batch = self.__rdf2txt_transformation(tokenized_batch) X,Y, padding_X, padding_Y = self.__rdf2txt_transformation(tokenized_batch)
txt2rdf_batch = self.__txt2rdf_transformation(tokenized_batch) yield X,Y, padding_X, padding_Y, TaskType.RDF2TXT
mask_batch = self.__masking_trasformation(tokenized_batch) X,Y, padding_X, padding_Y, = self.__txt2rdf_transformation(tokenized_batch)
completation_batch = self.__token_completation_task(tokenized_batch) yield X,Y, padding_X, padding_Y, TaskType.TEXT2RDF
X,Y, padding_X, padding_Y, = self.__masking_trasformation(tokenized_batch)
yield X,Y, padding_X, padding_Y, TaskType.MASKING
X,Y, padding_X, padding_Y, = self.__token_completation_task(tokenized_batch, RNG.randint(0,sys.maxsize))
yield X,Y, padding_X, padding_Y, TaskType.COMPLETATION
output = pd.concat([rdf2txt_batch,txt2rdf_batch,mask_batch,completation_batch],ignore_index=True) # output = pd.concat([rdf2txt_batch,txt2rdf_batch,completation_batch],ignore_index=True)
output = output.sample(frac=1).reset_index(drop=True) # output = output.sample(frac=1).reset_index(drop=True)
yield output # self.decode_debug(output)
# yield output
def __random_subset_rdfs(self, batch: pd.DataFrame, seed = 0): def __random_subset_rdfs(self, batch: pd.DataFrame, seed = 0):
@@ -57,48 +76,89 @@ class Batcher:
to_list to_list
) )
def decode_debug(self, batch: pd.DataFrame):
decoded = pd.DataFrame()
decoded[["X","Y"]] = (
batch[["X","Y"]]
.map(lambda t: self._tokenizer.decode(t))
)
print(decoded)
def __normalization(self, X:list[list[int]], Y: list[list[int]])-> tuple[list[list[int]], list[list[int]], list[list[int]], list[list[int]]]:
pad_token = self._tokenizer.encode(SpecialToken.PAD.value)[0]
end_token = self._tokenizer.encode(SpecialToken.END_OF_SEQUENCE.value)[0]
out_X = []
padding_X = []
out_Y = []
padding_Y = []
for x in X:
out_x, padding_x = normalize_sequence(x,MAX_LENGHT,pad_token,end_token,True)
out_X.append(out_x)
padding_X.append(padding_x)
for y in Y:
out_y, padding_y = normalize_sequence(y,MAX_LENGHT,pad_token,end_token,True)
out_Y.append(out_y)
padding_Y.append(padding_y)
return out_X,out_Y,padding_X,padding_Y
def __rdf2txt_transformation(self, batch: pd.DataFrame): def __rdf2txt_transformation(self, batch: pd.DataFrame):
batch = batch.rename(columns={"RDFs": "X", "Abstract": "Y"}) task_token = self._tokenizer.encode(SpecialToken.RDF_TO_TEXT.value)
return batch[["X", "Y"]] out = batch.rename(columns={"RDFs":"X","Abstract":"Y"})[["X","Y"]]
out["X"] = [task_token + x for x in out["X"]]
return self.__normalization(out["X"].to_list(),out["Y"].to_list())
def __txt2rdf_transformation(self, batch: pd.DataFrame): def __txt2rdf_transformation(self, batch: pd.DataFrame):
batch = batch.rename(columns={ "Abstract": "X","RDFs": "Y"}) task_token = self._tokenizer.encode(SpecialToken.TEXT_TO_RDF.value)
return batch[["X", "Y"]] out = batch.rename(columns={"Abstract":"X","RDFs":"Y"})[["X","Y"]]
out["X"] = [task_token + x for x in out["X"]]
return self.__normalization(out["X"].to_list(),out["Y"].to_list())
def __masking_trasformation(self, batch: pd.DataFrame): def __masking_trasformation(self, batch: pd.DataFrame):
# mask_sequence: List[int] -> Tuple[List[int], List[int]] X = []
xy_tuples = batch["RDFs"].apply(self._masker.mask_sequence) # Series of (X, Y) Y = []
for rdf in batch["RDFs"]:
output = batch.copy() x,y = self._masker.mask_sequence(rdf)
# Expand into two columns preserving the original index X.append(x)
output[["X", "Y"]] = pd.DataFrame(xy_tuples.tolist(), index=batch.index) Y.append(y)
return output[["X", "Y"]] return self.__normalization(X,Y)
def __token_completation_task(self, batch: pd.DataFrame): def __token_completation_task(self, batch: pd.DataFrame, minibatch_seed: int):
xy_tuples = batch["RDFs"].apply(self._token_completation.get_completation_tuple) continue_triple_token = self._tokenizer.encode(SpecialToken.CONTINUE_RDF.value)[0]
output = batch.copy() eot = self._tokenizer.encode(SpecialToken.END_TRIPLE.value)[0]
output[["X", "Y"]] = pd.DataFrame(xy_tuples.tolist(), index=batch.index) X = []
return output[["X", "Y"]] Y = []
for rdf in batch["RDFs"]:
x,y = self._completation_task_token_truncator(rdf, 0.5, continue_triple_token, eot, minibatch_seed)
X.append(x)
Y.append(y)
return self.__normalization(X,Y)
"""
DATASET_PATH = "Assets/Dataset/Tmp/rdf_text.csv"
VOCABULARY_path = "Assets/Dataset/Tmp/trimmed.json"
from pathlib import Path if __name__ == "__main__":
VOCABULARY = BPE.load_nanos_vocabulary(Path(VOCABULARY_path))
SPECIAL_LIST = BPE.default_special_tokens()
TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_LIST)
SPECIAL_TOKENS: set[int] = set(TOKENANO.encode("".join(SPECIAL_LIST)))
MASKER = SpannedMasker(TOKENANO.vocabulary_size,SPECIAL_TOKENS) DATASET_PATH = Path("Assets/Dataset/Tmp/rdf_text.csv")
VOCABULARY_path = "Assets/Dataset/Tmp/trimmed.json"
prova = "<ABS>Cactus Flower is a 1969 American screwball comedy film directed by Gene Saks, and starring Walter Matthau, Ingrid Bergman and Goldie Hawn, who won an Academy Award for her performance.The screenplay was adapted by I. A. L. Diamond from the 1965 Broadway play of the same title written by Abe Burrows, which, in turn, is based on the French play Fleur de cactus by Pierre Barillet and Jean-Pierre Gredy. Cactus Flower was the ninth highest-grossing film of 1969." from pathlib import Path
print(TOKENANO.encode(prova)) VOCABULARY = BPE.load_nanos_vocabulary(Path(VOCABULARY_path))
batcher = Batcher(DATASET_PATH,8,TOKENANO,MASKER) SPECIAL_LIST = BPE.default_special_tokens()
for batch in batcher.get_batch(): TOKENANO = BPE.TokeNanoCore(VOCABULARY, SPECIAL_LIST)
SPECIAL_TOKENS: set[int] = set(TOKENANO.encode("".join(SPECIAL_LIST)))
MASKER = SpannedMasker(TOKENANO.vocabulary_size,SPECIAL_TOKENS)
prova = "<ABS>Cactus Flower is a 1969 American screwball comedy film directed by Gene Saks, and starring Walter Matthau, Ingrid Bergman and Goldie Hawn, who won an Academy Award for her performance.The screenplay was adapted by I. A. L. Diamond from the 1965 Broadway play of the same title written by Abe Burrows, which, in turn, is based on the French play Fleur de cactus by Pierre Barillet and Jean-Pierre Gredy. Cactus Flower was the ninth highest-grossing film of 1969."
print(TOKENANO.encode(prova))
batcher = Batcher(DATASET_PATH,TOKENANO,MASKER)
for batch in batcher.batch(8):
print(batch) print(batch)
"""

View File

@@ -1,41 +0,0 @@
import numpy as np
# custom LR from attention is all you need
class Custom_lr():
def __init__(self, d_model: int, warmup_step:int) -> None:
self.__d_model = d_model
self.__warmup_step = warmup_step
self.__epoch = 0
def step(self) -> int:
self.__epoch += 1
return (self.__d_model ** -0.5) * min(self.__epoch ** -0.5,
self.__epoch * (self.__warmup_step ** -1.5))
# OTHER LR
# Learning rate schedules (matching visualization parameters)
def step_lr(epoch, lr):
# StepLR: step_size=20, gamma=0.5 (from visualization)
return lr * 0.5 if epoch % 20 == 0 and epoch > 0 else lr
def exp_lr(epoch, lr):
# ExponentialLR: gamma=0.95 (from visualization)
return lr * 0.95
def cosine_lr(epoch, lr):
# CosineAnnealingLR: lr_min=0.001, lr_max=0.1, max_epochs=100 (from visualization)
lr_min, lr_max = 0.001, 0.1
max_epochs = 100
return lr_min + 0.5 * (lr_max - lr_min) * (1 + np.cos(epoch * np.pi / max_epochs))
def cyclical_lr(epoch, lr):
# CyclicalLR: base_lr=0.001, max_lr=0.1, step_size=20 (from visualization)
base_lr = 0.001
max_lr = 0.1
step_size = 20
cycle = np.floor(1 + epoch / (2 * step_size))
x = np.abs(epoch / step_size - 2 * cycle + 1)
return base_lr + (max_lr - base_lr) * max(0, (1 - x))

View File

@@ -25,8 +25,8 @@ class LogitsCollector:
for row in ids.tolist(): for row in ids.tolist():
seq: list[int] = [] seq: list[int] = []
for tok in row: for tok in row:
if tok == self.__end_token: # stop on END # if tok == self.__end_token: # stop on END
break # break
if tok == self.__pad_token: # skip PAD if tok == self.__pad_token: # skip PAD
continue continue
seq.append(tok) seq.append(tok)
@@ -36,6 +36,7 @@ class LogitsCollector:
def print_decoded(self) -> None: def print_decoded(self) -> None:
for i, seq in enumerate(self.tokens()): for i, seq in enumerate(self.tokens()):
try: try:
# text = text + self.__end_token
text = self.__tokenizer.decode(seq) # decode tokens to string text = self.__tokenizer.decode(seq) # decode tokens to string
except Exception: except Exception:
text = str(seq) # fallback to ids text = str(seq) # fallback to ids

View File

@@ -46,14 +46,14 @@ class Decoder(nn.Module):
] ]
): # -> list[torch.Tensor]: # k_x = v_x . While x_q = x ): # -> list[torch.Tensor]: # k_x = v_x . While x_q = x
# WARNING: args is needed to have sequential # WARNING: args is needed to have sequential
x, k_x, v_x, padding_mask,encoder_padding_mask = args x, k_x, v_x, src_padding_mask, tgt_padding_mask = args
# build of attention mask # build of attention mask
attention_mask = get_causal_attention_mask(x.size(1)) attention_mask = get_causal_attention_mask(x.size(1))
# 1) Masked Attention # 1) Masked Attention
MASKED_ATTENTION = self.__masked_attention( MASKED_ATTENTION = self.__masked_attention(
x, x, x, key_padding_mask=padding_mask, attention_mask=attention_mask x, x, x, key_padding_mask=tgt_padding_mask, attention_mask=attention_mask
) )
# 2) Dropout # 2) Dropout
@@ -69,7 +69,7 @@ class Decoder(nn.Module):
# 5) Encoderdecoder (cross) attention # 5) Encoderdecoder (cross) attention
CROSS_ATTENTION = self.__cross_attention( CROSS_ATTENTION = self.__cross_attention(
x, k_x, v_x, key_padding_mask=encoder_padding_mask x, k_x, v_x, key_padding_mask=src_padding_mask
) )
# 6) Dropout # 6) Dropout
@@ -97,7 +97,7 @@ class Decoder(nn.Module):
# 12) Layer Normalization # 12) Layer Normalization
x = self.__layer_norm_3(x) x = self.__layer_norm_3(x)
return (x, k_x, v_x, padding_mask, encoder_padding_mask) return (x, k_x, v_x, src_padding_mask, tgt_padding_mask)
# use eval to disable dropout ecc # use eval to disable dropout ecc

View File

@@ -1,23 +0,0 @@
import torch
from NanoSocratesCore import NanoSocratesCore
class NanoSocrates(torch.nn.Module):
def __init__(self,
embedded_size: int,
feed_forward_dim: int,
encoder_layers: int,
decoder_layers:int,
attention_heads: int,
vocab_size: int) -> None:
super().__init__()
self._model = NanoSocratesCore(
embedded_size,
feed_forward_dim,
encoder_layers,
decoder_layers,
attention_heads,
vocab_size)

View File

@@ -16,11 +16,8 @@ class NanoSocratesCore(torch.nn.Module):
num_encoder_layers: int = 2, num_encoder_layers: int = 2,
num_decoder_layers: int = 2, num_decoder_layers: int = 2,
num_attention_heads: int = 4, num_attention_heads: int = 4,
pad_token: int = 0,
) -> None: ) -> None:
super().__init__()
self.__pad_token = pad_token
feed_forward_dim = embedding_size * feed_forward_multiplier feed_forward_dim = embedding_size * feed_forward_multiplier
self.__sentence_length = sentence_length self.__sentence_length = sentence_length
@@ -46,64 +43,69 @@ class NanoSocratesCore(torch.nn.Module):
self.__input_embeder = NanoSocratesEmbedder(vocab_size, embedding_size) self.__input_embeder = NanoSocratesEmbedder(vocab_size, embedding_size)
self.__output_embedder = NanoSocratesEmbedder(vocab_size, embedding_size) self.__output_embedder = NanoSocratesEmbedder(vocab_size, embedding_size)
@torch.no_grad() # inference only
def forward( def forward(
self, self,
encoder_input: list[list[int]], encoder_input: list[list[int]],
decoder_input: list[list[int]], # must start with <SOS> and PAD elsewhere decoder_input: list[list[int]],
encoder_padding_mask: list[list[bool]], # True where encoder is PAD encoder_padding_mask: list[list[int]],
): ):
if len(encoder_padding_mask) != len(encoder_input):
raise Exception("Mismatch in received_dimensions")
# TODO: check for tensor in input to embedder
# 1) Embed User-Input for encoders # 1) Embed User-Input for encoders
ENCODER_INPUT = self.__input_embeder(encoder_input) # [B,S,E] ENCODER_INPUT = self.__input_embeder(encoder_input)
# 2) Encode User-Input # 2) Encode User-Input
ENCODER_OUTPUT, encoder_padding_mask = self.__encoder_sequence( ENCODER_OUTPUT, _ = self.__encoder_sequence(ENCODER_INPUT, encoder_padding_mask)
(ENCODER_INPUT, encoder_padding_mask) # as tuple
) # [B,S,E], [B,S]
del ENCODER_INPUT del ENCODER_INPUT
# 3) Autoregressive Output (greedy)
LOGITS_HISTORY: list[torch.Tensor] = [] # keep per-step distributions
decoder_token_list = [row[:] for row in decoder_input] # copy tokens
decoder_phase = 0
exit_loop = False exit_loop = False
decoder_token_list = decoder_input[:]
decoder_phase = 0
LOGITS_HISTORY: list[torch.Tensor] = []
# 3) Autoregressive Output
while not exit_loop: while not exit_loop:
decoder_phase += 1 # move to next position
# 3.1) Build decoder key padding mask from current tokens (True where PAD) # 3.0) Increment Counter
DECODER_KEY_PADDING_MASK: list[list[bool]] = [ decoder_phase += 1
[tok == self.__pad_token for tok in row] for row in decoder_token_list
] # [B,T]
# 3.2) Embed Decoder Input (full sequence; decoder builds causal mask inside) # 3.1) Embed Decoder Input
DECODER_INPUT = self.__output_embedder(decoder_token_list) # [B,T,E] decoder_input = self.__output_embedder(decoder_token_list)
# 3.3) Decode (self-attn uses causal mask internally; we provide PAD masks) # 3.2) Decode Decoder Input
DECODER_OUTPUT, _, _, _ = self.__decoder_sequence( DECODER_OUTPUT, _, _, _ = self.__decoder_sequence(
(DECODER_INPUT, ENCODER_OUTPUT, ENCODER_OUTPUT, decoder_input, ENCODER_OUTPUT, ENCODER_OUTPUT
DECODER_KEY_PADDING_MASK, encoder_padding_mask) )
) # [B,T,E]
del DECODER_INPUT
# 3.4) Project to token space # 3.3) Go back to Token space
LOGITS = self.__linear(DECODER_OUTPUT) # [B,T,V] # TODO: change name
LOGITS = self.__linear(DECODER_OUTPUT)
del DECODER_OUTPUT del DECODER_OUTPUT
# 3.5) Probabilities and greedy pick at current step # 3.4) Transform in probabilities
TOKEN_PROBABILITIES = torch.softmax(LOGITS, dim=-1) # [B,T,V] # TODO: change name
LOGITS_HISTORY.append(TOKEN_PROBABILITIES) # store for this step TOKEN_PROBABILITIES = torch.softmax(LOGITS, dim=-1)
del LOGITS
step_idx = decoder_phase - 1 # 0-based LOGITS_HISTORY.append(TOKEN_PROBABILITIES)
TOKEN_IDS = TOKEN_PROBABILITIES[:, step_idx, :].argmax(dim=-1).tolist() # [B] -> list[int]
# 3.6) Write prediction into next slot (the slot is PAD) # 3.5) Take most probable tokens
if step_idx + 1 < self.__sentence_length: TOKEN_IDS = torch.argmax(TOKEN_PROBABILITIES, -1)
for b, tok in enumerate(TOKEN_IDS):
decoder_token_list[b][step_idx + 1] = tok # feed next position
# 3.7) Stop when we filled the sequence # TODO: check for dimensions and for efficiency
DECODER_TOKEN_TENSOR = torch.tensor(decoder_token_list)
DECODER_TOKEN_TENSOR[:, decoder_phase] = TOKEN_IDS
decoder_token_list = DECODER_TOKEN_TENSOR.tolist()
del TOKEN_IDS
del DECODER_TOKEN_TENSOR
# 3.6) Check if we generated all tokens
if decoder_phase == self.__sentence_length - 1: if decoder_phase == self.__sentence_length - 1:
exit_loop = True exit_loop = True
return LOGITS_HISTORY # list of [B,T,V] (per step) return LOGITS_HISTORY

View File

@@ -25,6 +25,11 @@ class SpannedMasker:
self.__forbidden_tokens = forbidden_tokens self.__forbidden_tokens = forbidden_tokens
def reseed(self, seed:int):
self.__rng = random.Random(seed)
def mask_sequence( def mask_sequence(
self, self,
token_sequence: list[int], token_sequence: list[int],

View File

@@ -0,0 +1,47 @@
from typing import override
import torch
# custom LR from attention is all you need
class WarmupLR(torch.optim.lr_scheduler.LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
warmup_steps: int,
embedding_size: int,
warming_multiplier: float = -1.5,
decaying_multiplier: float = -0.5,
multiplicative_factor: float = 1.0,
last_epoch: int = -1,
) -> None:
self.__warmup_steps = warmup_steps
self.__embedding_size = embedding_size
self.__warming_multiplier = warming_multiplier
self.__decaying_multiplier = decaying_multiplier
self.__multiplicative_factor = multiplicative_factor
super().__init__(optimizer, last_epoch)
def __scale_at(self, step: int) -> float:
step = max(step, 1)
return (
self.__multiplicative_factor
* (self.__embedding_size**self.__decaying_multiplier)
* min(
step**self.__decaying_multiplier,
step * (self.__warmup_steps**self.__warming_multiplier),
)
)
@override
def get_lr(self) -> list[float]:
torch.optim.lr_scheduler._warn_get_lr_called_within_step(self)
step = max(self.last_epoch, 1)
scale = self.__scale_at(step)
return [base_lr * scale for base_lr in self.base_lrs]
def _get_closed_form_lr(self):
step = max(self.last_epoch, 1)
scale = self.__scale_at(step)
return [base_lr * scale for base_lr in self.base_lrs]

View File

@@ -5,6 +5,7 @@ from .FeedForwardNetwork import FeedForwardNetwork
from .TorchMultiHeadAttention import TorchMultiHeadAttention from .TorchMultiHeadAttention import TorchMultiHeadAttention
from .SpannedMasker import SpannedMasker from .SpannedMasker import SpannedMasker
from .DeToken import DeToken from .DeToken import DeToken
from .WarmupLR import WarmupLR
__all__ = [ __all__ = [
"Decoder", "Decoder",
@@ -12,5 +13,6 @@ __all__ = [
"FeedForwardNetwork", "FeedForwardNetwork",
"TorchMultiHeadAttention", "TorchMultiHeadAttention",
"SpannedMasker", "SpannedMasker",
"DeToken" "DeToken",
"WarmupLR"
] ]

View File

@@ -24,49 +24,32 @@ class TrainingModel(torch.nn.Module):
vocabulary_size, latent_space vocabulary_size, latent_space
) )
# do NOT share layer weights TMP_ENCODERS = [
enc_layers = [
Encoder(latent_space, feed_forward_latent_space, attention_heads) Encoder(latent_space, feed_forward_latent_space, attention_heads)
for _ in range(layer_number) ] * layer_number
]
dec_layers = [
Decoder(latent_space, feed_forward_latent_space, attention_heads)
for _ in range(layer_number)
]
self.__encoder = torch.nn.Sequential(*enc_layers) TMP_DECODERS = [
self.__decoder = torch.nn.Sequential(*dec_layers) Decoder(latent_space, feed_forward_latent_space, attention_heads)
] * layer_number
self.__encoder = torch.nn.Sequential(*TMP_ENCODERS)
self.__decoder = torch.nn.Sequential(*TMP_DECODERS)
self.__detokener = DeToken(latent_space, vocabulary_size) self.__detokener = DeToken(latent_space, vocabulary_size)
def forward( def forward(self, args: tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]):
self,
args: tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
):
# returns logits for the LAST decoder position only -> [B, V]
(
encoder_embedder_input, # [B,S] encoder tokens
encoder_padding_mask, # [B,S] True where encoder is PAD
decoder_embedder_prefix, # [B,Tp] decoder prefix (e.g., <SOS> + tokens so far)
decoder_padding_mask, # [B,Tp] True where decoder prefix has PAD
) = args
# 1) embeddings encoder_embedder_input, src_padding, decoder_embedder_input, tgt_padding = args
encoder_tensor = self.__encoder_embedder(encoder_embedder_input) # [B,S,E]
decoder_tensor = self.__decoder_embedder(decoder_embedder_prefix) # [B,Tp,E]
# 2) encode encoder_tensor = self.__encoder_embedder(encoder_embedder_input)
encoder_output, _ = self.__encoder((encoder_tensor, encoder_padding_mask)) # [B,S,E], [B,S] decoder_tensor = self.__decoder_embedder(decoder_embedder_input)
encoder_output, _ = self.__encoder((encoder_tensor, src_padding))
# 3) decode (causal mask is built inside the decoder)
decoder_output, _, _, _, _ = self.__decoder( decoder_output, _, _, _, _ = self.__decoder(
(decoder_tensor, encoder_output, encoder_output, (decoder_tensor, encoder_output, encoder_output, src_padding, tgt_padding)
decoder_padding_mask, encoder_padding_mask) )
) # [B,Tp,E], ...
# 4) project only the last time step logits: torch.Tensor = self.__detokener(decoder_output)
last_hidden = decoder_output[:, -1:, :] # [B,1,E]
step_logits = self.__detokener(last_hidden) # [B,1,V]
step_logits = step_logits[:, -1, :] # [B,V]
return step_logits # logits for one token return logits

View File

@@ -3,6 +3,7 @@ from .task_type import TaskType
from .post_tokenization import truncate_sequence, pad_sequence, normalize_sequence, create_padding_mask from .post_tokenization import truncate_sequence, pad_sequence, normalize_sequence, create_padding_mask
from .inference_masking import inference_masking from .inference_masking import inference_masking
from .truncate_rdf_list import truncate_rdf_list from .truncate_rdf_list import truncate_rdf_list
from .decode_out import tensor2token
__all__ = [ __all__ = [
"TaskType", "TaskType",
@@ -13,5 +14,6 @@ __all__ = [
"create_padding_mask", "create_padding_mask",
"normalize_sequence", "normalize_sequence",
"inference_masking", "inference_masking",
"truncate_rdf_list" "truncate_rdf_list",
"tensor2token"
] ]

View File

@@ -0,0 +1,27 @@
from typing import Generator
import torch
def tensor2token(tensor: torch.Tensor, end_token: int) -> Generator[list[int]]:
if len(tensor.shape) < 1 or len(tensor.shape) > 2:
raise ValueError("Shape is not correct")
if len(tensor.shape) == 1:
token_list: list[int] = tensor.tolist()
token_list.append(end_token)
yield token_list
return
batch_len: int
batch_len, _ = tensor.shape
for i in range(batch_len):
smaller_tensor = tensor[i, :]
token_list: list[int] = smaller_tensor.tolist()
token_list.append(end_token)
yield token_list

View File

@@ -1,16 +1,19 @@
def truncate_sequence( def truncate_sequence(
sequence: list[int], truncate_at: int, end_token: int sequence: list[int], truncate_at: int, end_token: int, add_ending: bool
) -> list[int]: ) -> list[int]:
if len(sequence) < truncate_at - 1: if len(sequence) < truncate_at - 1:
if add_ending:
sequence.append(end_token) sequence.append(end_token)
return sequence return sequence
if len(sequence) < truncate_at: if len(sequence) < truncate_at:
if add_ending:
sequence[-1] = end_token sequence[-1] = end_token
return sequence return sequence
TRUNCATED_SEQUENCE = sequence[:truncate_at] TRUNCATED_SEQUENCE = sequence[:truncate_at]
if add_ending:
TRUNCATED_SEQUENCE[-1] = end_token TRUNCATED_SEQUENCE[-1] = end_token
return TRUNCATED_SEQUENCE return TRUNCATED_SEQUENCE
@@ -48,8 +51,9 @@ def normalize_sequence(
max_length: int, max_length: int,
pad_token: int, pad_token: int,
end_token: int, end_token: int,
add_ending: bool = True
) -> tuple[list[int], list[bool]]: ) -> tuple[list[int], list[bool]]:
new_sequence = truncate_sequence(sequence, max_length, end_token) new_sequence = truncate_sequence(sequence, max_length, end_token, add_ending)
new_sequence = pad_sequence(new_sequence, max_length, pad_token) new_sequence = pad_sequence(new_sequence, max_length, pad_token)
PADDING_MASK = create_padding_mask(new_sequence, pad_token) PADDING_MASK = create_padding_mask(new_sequence, pad_token)

Binary file not shown.

View File

@@ -16,3 +16,4 @@ urllib3==2.5.0
wheel==0.45.1 wheel==0.45.1
Wikipedia-API==0.8.1 Wikipedia-API==0.8.1
SQLAlchemy SQLAlchemy
torch