Fixes for evaluation
This commit is contained in:
@@ -1,8 +1,14 @@
|
||||
from .model_utils import decompose_nano_socrates, create_standalone_model
|
||||
from .model_utils import decompose_nano_socrates, create_standalone_model, train2inference
|
||||
from .ModelType import ModelType
|
||||
from .decode_batch import decode_batch
|
||||
from .metrics import precision, recall, accuracy, f1, meteor, bleu, rouge, average, rdf2txt, txt2rdf, rdf_completion_1, rdf_completion_2, remove_padding, balance_paddings
|
||||
|
||||
__all__ = [
|
||||
"ModelType",
|
||||
"decompose_nano_socrates",
|
||||
"create_standalone_model"
|
||||
"create_standalone_model",
|
||||
"decode_batch",
|
||||
"train2inference",
|
||||
"precision", "recall", "accuracy", "f1", "meteor", "bleu", "rouge", "average",
|
||||
"rdf2txt", "txt2rdf", "rdf_completion_1", "rdf_completion_2", "remove_padding", "balance_paddings"
|
||||
]
|
||||
16
Project_Model/Libs/TransformerUtils/decode_batch.py
Normal file
16
Project_Model/Libs/TransformerUtils/decode_batch.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import torch
|
||||
import Project_Model.Libs.BPE as BPE
|
||||
|
||||
def decode_batch(batch: torch.Tensor, tokenizer: BPE.TokeNanoCore ,uknonw_token: int) -> list[str]:
|
||||
|
||||
strings = []
|
||||
|
||||
BATCH, _ = batch.shape
|
||||
|
||||
for i in range(0, BATCH):
|
||||
|
||||
tokens: list[int] = batch.tolist()[i]
|
||||
tokens = list(map(lambda x: uknonw_token if x > tokenizer.vocabulary_size else x, tokens))
|
||||
strings.append(tokenizer.decode(tokens))
|
||||
|
||||
return strings
|
||||
99
Project_Model/Libs/TransformerUtils/metrics.py
Normal file
99
Project_Model/Libs/TransformerUtils/metrics.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import evaluate as eval
|
||||
|
||||
BLEU = eval.load("bleu")
|
||||
ROUGE = eval.load("rouge")
|
||||
METEOR = eval.load("meteor")
|
||||
|
||||
def precision(ref: list[int], pred: list[int]):
|
||||
metric = eval.load("precision")
|
||||
return metric.compute(predictions=pred, references=ref, average="weighted", zero_division=0)
|
||||
|
||||
|
||||
def recall(ref: list[int], pred: list[int]):
|
||||
metric = eval.load("recall")
|
||||
return metric.compute(predictions=pred, references=ref, average="weighted", zero_division=0)
|
||||
|
||||
|
||||
def accuracy(ref: list[int], pred: list[int]):
|
||||
metric = eval.load("accuracy")
|
||||
return metric.compute(predictions=pred, references=ref)
|
||||
|
||||
|
||||
def meteor(ref: list[str], pred: list[str]):
|
||||
metric = METEOR
|
||||
return metric.compute(predictions=pred, references=ref)
|
||||
|
||||
|
||||
def bleu(ref: list[str], pred: list[str]):
|
||||
metric = BLEU
|
||||
return metric.compute(predictions=pred, references=ref)
|
||||
|
||||
|
||||
def rouge(ref: list[str], pred: list[str]):
|
||||
metric = ROUGE
|
||||
return metric.compute(predictions=pred, references=ref)
|
||||
|
||||
|
||||
def f1(precision: float, recall: float):
|
||||
return (2 * recall * precision) / (precision + recall)
|
||||
|
||||
|
||||
def average(array: list[float]):
|
||||
return sum(array) / len(array)
|
||||
|
||||
|
||||
def rdf2txt(ref: list[str], pred: list[str]):
|
||||
|
||||
b_m = bleu(ref, pred)
|
||||
r_m = rouge(ref, pred)
|
||||
m_m = meteor(ref, pred)
|
||||
|
||||
return (b_m, r_m, m_m)
|
||||
|
||||
def txt2rdf(ref: list[int], pred: list[int]):
|
||||
|
||||
p_m = precision(ref, pred)
|
||||
r_m = recall(ref, pred)
|
||||
|
||||
return (p_m, r_m)
|
||||
|
||||
def rdf_completion_1(ref: list[int], pred: list[int]):
|
||||
|
||||
a_m = accuracy(ref, pred)
|
||||
|
||||
return a_m
|
||||
|
||||
|
||||
def rdf_completion_2(ref: list[int], pred: list[int]):
|
||||
|
||||
p_m = precision(ref, pred)
|
||||
r_m = recall(ref, pred)
|
||||
|
||||
return (p_m, r_m)
|
||||
|
||||
|
||||
def remove_padding(seq: list[int], pad_token: int, end_token: int):
|
||||
clean_seq = list(filter(lambda x: x != pad_token, seq))
|
||||
|
||||
if clean_seq[-1] == end_token:
|
||||
return clean_seq
|
||||
|
||||
clean_seq.append(
|
||||
end_token
|
||||
)
|
||||
|
||||
return clean_seq
|
||||
|
||||
|
||||
def balance_paddings(seq_1: list[int], seq_2: list[int], pad_token: int):
|
||||
SEQ_1_LEN = len(seq_1)
|
||||
SEQ_2_LEN = len(seq_2)
|
||||
|
||||
if SEQ_1_LEN > SEQ_2_LEN:
|
||||
PAD = [pad_token] * (SEQ_1_LEN - SEQ_2_LEN)
|
||||
seq_2.extend(PAD)
|
||||
|
||||
if SEQ_2_LEN > SEQ_1_LEN:
|
||||
seq_2 = seq_2[:SEQ_1_LEN]
|
||||
|
||||
return (seq_1, seq_2)
|
||||
@@ -1,13 +1,13 @@
|
||||
import torch
|
||||
from Project_Model.Libs.Embedder import NanoSocratesEmbedder
|
||||
from Project_Model.Libs.Transformer import TrainingModel, NanoSocraDecoder, NanoSocratEncoder, DeToken, Encoder, Decoder
|
||||
from Project_Model.Libs.Transformer import TrainingModel,NanoSocratesCore, NanoSocraDecoder, NanoSocratEncoder, DeToken, Encoder, Decoder
|
||||
from .ModelType import ModelType
|
||||
|
||||
|
||||
|
||||
def decompose_nano_socrates(
|
||||
model: TrainingModel, vocabulary_size: int, embedding_size: int
|
||||
) -> tuple[TrainingModel, NanoSocratEncoder, NanoSocraDecoder]:
|
||||
model: TrainingModel | NanoSocratesCore , vocabulary_size: int, embedding_size: int
|
||||
) -> tuple[TrainingModel | NanoSocratesCore, NanoSocratEncoder, NanoSocraDecoder]:
|
||||
|
||||
encoder_pieces, decoder_pieces = model.take_pieces()
|
||||
encoder_embedder, encoder, encoder_detokener = encoder_pieces
|
||||
@@ -19,6 +19,26 @@ def decompose_nano_socrates(
|
||||
NanoSocraDecoder(decoder_embedder, decoder, decoder_detokener),
|
||||
)
|
||||
|
||||
def train2inference(
|
||||
train_model: TrainingModel,
|
||||
inference_model: NanoSocratesCore
|
||||
) -> NanoSocratesCore:
|
||||
|
||||
encoder_pieces, decoder_pieces = train_model.take_pieces()
|
||||
enc_emb, encoder, enc_det = encoder_pieces
|
||||
dec_emb, decoder, dec_det = decoder_pieces
|
||||
inference_model.load_pieces(
|
||||
enc_emb,
|
||||
dec_emb,
|
||||
encoder,
|
||||
decoder,
|
||||
enc_det,
|
||||
dec_det
|
||||
)
|
||||
|
||||
return inference_model
|
||||
|
||||
|
||||
|
||||
def create_standalone_model(
|
||||
model_type: ModelType,
|
||||
|
||||
Reference in New Issue
Block a user