This commit is contained in:
Christian Risi 2025-10-09 11:37:46 +02:00
parent 0158db2dce
commit f3b83eda3d

View File

@ -2,7 +2,7 @@
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": null,
"id": "adbd9598", "id": "adbd9598",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -11,30 +11,17 @@
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"c:\\Users\\Chris\\miniconda3\\envs\\deep_learning\\Lib\\site-packages\\torch\\utils\\_device.py:103: UserWarning: Aten Op fallback from XPU to CPU happends. This may have performance implications. If need debug the fallback ops please set environment variable `PYTORCH_DEBUG_XPU_FALLBACK=1` (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\build\\xpu\\ATen\\RegisterXPU_0.cpp:54528.)\n", "c:\\Users\\Chris\\miniconda3\\envs\\deep_learning\\Lib\\site-packages\\torch\\utils\\_device.py:103: UserWarning: Aten Op fallback from XPU to CPU happends. This may have performance implications. If need debug the fallback ops please set environment variable `PYTORCH_DEBUG_XPU_FALLBACK=1` (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\build\\xpu\\ATen\\RegisterXPU_0.cpp:54528.)\n",
" return func(*args, **kwargs)\n", " return func(*args, **kwargs)\n"
"252.87s - name 'tensor' is not defined\n",
"Traceback (most recent call last):\n",
" File \"c:\\Users\\Chris\\miniconda3\\envs\\deep_learning\\Lib\\site-packages\\debugpy\\_vendored\\pydevd\\_pydevd_bundle\\pydevd_vars.py\", line 636, in change_attr_expression\n",
" value = eval(expression, frame.f_globals, frame.f_locals)\n",
" File \"<string>\", line 1, in <module>\n",
"NameError: name 'tensor' is not defined\n"
] ]
}, },
{ {
"ename": "", "name": "stdout",
"evalue": "", "output_type": "stream",
"output_type": "error", "text": [
"traceback": [ "EPOCH 1\n",
"\u001b[1;31mCannot execute code, session has been disposed. Please try restarting the Kernel." "\tLoss: 9.161508560180664\n",
] "EPOCH 2\n",
}, "\tLoss: 9.131484031677246\n"
{
"ename": "",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[1;31mCannot execute code, session has been disposed. Please try restarting the Kernel. \n",
"\u001b[1;31mView Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
] ]
} }
], ],
@ -124,7 +111,7 @@
")\n", ")\n",
"cross_entropy = torch.nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)\n", "cross_entropy = torch.nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)\n",
"optimizer = torch.optim.AdamW(NANOSOCRATES.parameters())\n", "optimizer = torch.optim.AdamW(NANOSOCRATES.parameters())\n",
"scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 4)\n", "scheduler = Transformer.WarmupLR(optimizer, 4000, EMBEDDED_SIZE)\n",
"last_loss = 0\n", "last_loss = 0\n",
"current_epoch = 0\n", "current_epoch = 0\n",
"\n", "\n",
@ -146,18 +133,23 @@
" optimizer.zero_grad()\n", " optimizer.zero_grad()\n",
"\n", "\n",
" logits: torch.Tensor = NANOSOCRATES((encoder_list, padding_list, decoder_list))\n", " logits: torch.Tensor = NANOSOCRATES((encoder_list, padding_list, decoder_list))\n",
" prob = torch.softmax(logits, 2)\n",
"\n", "\n",
" most_probable_tokens = torch.argmax(logits, 2)\n", " most_probable_tokens = torch.argmax(prob, 2)\n",
"\n", "\n",
" logits = logits[:,i,:]\n", " logits = logits[:,0:i,:]\n",
" logits = logits.permute(0, 2, 1)\n",
"\n",
" loss : torch.Tensor = cross_entropy(logits, target_logits[:, 0:i])\n",
" # loss : torch.Tensor = cross_entropy(logits, target_logits)\n",
"\n", "\n",
" loss = cross_entropy(logits, target_logits[:,i])\n",
" last_loss = loss\n", " last_loss = loss\n",
" loss.backward()\n",
" optimizer.step()\n", " optimizer.step()\n",
" scheduler.step()\n", " scheduler.step()\n",
"\n", "\n",
" if i < SENTENCE_LENGTH - 1:\n", " if i < SENTENCE_LENGTH - 1:\n",
" decoder_list[:,i+1] = most_probable_tokens[:,i]\n", " decoder_list[:,i+1] = target_logits[:,i]\n",
"\n", "\n",
"\n", "\n",
" current_epoch += 1\n", " current_epoch += 1\n",