diff --git a/.vscode/settings.json b/.vscode/settings.json index 1d34b01..cae6d86 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,16 +1,33 @@ { - // For linux - "terminal.integrated.env.linux": { - "PYTHONPATH": "${workspaceFolder}" - }, - // For OSX - "terminal.integrated.env.osx": { - "PYTHONPATH": "${workspaceFolder}" - }, - // For Windows - "terminal.integrated.env.windows": { - "PYTHONPATH": "${workspaceFolder}" - } + // Always treat the project root as the working dir for Jupyter + "jupyter.notebookFileRoot": "${workspaceFolder}", + + // When you click "Run Python File in Terminal", DON'T cd into the file's folder + "python.terminal.executeInFileDir": false, + + // Start new integrated terminals at the project root + "terminal.integrated.cwd": "${workspaceFolder}", + + // Make pytest run from the root without needing a pytest.ini + "python.testing.pytestEnabled": true, + "python.testing.cwd": "${workspaceFolder}", + "python.testing.pytestArgs": ["src/test"], + + // Help Pylance resolve imports like `from src...` without red squiggles + "python.analysis.extraPaths": ["${workspaceFolder}"], + + // For linux + "terminal.integrated.env.linux": { + "PYTHONPATH": "${workspaceFolder}" + }, + // For OSX + "terminal.integrated.env.osx": { + "PYTHONPATH": "${workspaceFolder}" + }, + // For Windows + "terminal.integrated.env.windows": { + "PYTHONPATH": "${workspaceFolder}" + } } // { diff --git a/Scripts/DataCleaning/data_output_models/bpe_corpus.py b/Scripts/DataCleaning/data_output_models/bpe_corpus.py new file mode 100644 index 0000000..a0348b6 --- /dev/null +++ b/Scripts/DataCleaning/data_output_models/bpe_corpus.py @@ -0,0 +1,21 @@ +from Scripts.Libs.Utils.dataframe_interaction import get_raw_from_dataframe +from Scripts.Libs.CleaningPipeline.special_token import SpecialToken +import pandas as pd + +class BPE_corpus(): + + def __init__(self, output_path :str): + self.output_handler = open(output_path, "w") + + def close(self): + # add corpus end before closing + self.output_handler.write(SpecialToken.CORPUS_END.value) + self.output_handler.close() + + def write_from_str(self, output: str): + if output == '': + return + self.output_handler.write(output) + + def write_from_df(self, df: pd.DataFrame): + self.write_from_str(get_raw_from_dataframe(df)) \ No newline at end of file diff --git a/Scripts/DataCleaning/data_output_models/rdf_completation_task.py b/Scripts/DataCleaning/data_output_models/rdf_completation_task.py new file mode 100644 index 0000000..111b2b9 --- /dev/null +++ b/Scripts/DataCleaning/data_output_models/rdf_completation_task.py @@ -0,0 +1,26 @@ +import pandas as pd + +class RDF_completation_task_dataset(): + """ + Write the CSV for the fourth task, which is "Predicting subsequent triples based on a given context". + Each RDF is saved as str + CSV Composition: ["MovieID","RDF"] + """ + def __init__(self, output_path:str): + + + self.output = open(output_path, "w") + # then the first row as header + header = ["MovieID","RDF"] + self.output.write(",".join(header) + "\n") + + def close(self): + self.output.close() + + def write(self, RDF: pd.DataFrame): + """ + Args: + RDF (pd.DataFrame): ["MovieID","RDF"] + """ + + RDF.to_csv(self.output, index=False, header=False) \ No newline at end of file diff --git a/Scripts/DataCleaning/data_output_models/rdf_mask_task.py b/Scripts/DataCleaning/data_output_models/rdf_mask_task.py new file mode 100644 index 0000000..01b943d --- /dev/null +++ b/Scripts/DataCleaning/data_output_models/rdf_mask_task.py @@ -0,0 +1,58 @@ +import pandas as pd + +# do not worry about circular dependencies, this class will never call something else +from Scripts.DataCleaning.filter import PipelineApplier + +class RDF_mask_task_dataset(): + """ + Write the CSV for the third task, which is "Predicting a masked component within an RDF triple". + The CSV is like: for each RDF there will be 3 rows, where every time one of the componments is missing. + CSV Composition: ["MovieID","IncompleteRDF","Missing","RDF"] + """ + def __init__(self, output_path:str): + + # this methods will only be used by this class, but they belong in a lower level + self._build_triple = PipelineApplier.build_triple + self._build_incomplete_triple = PipelineApplier.build_incomplete_triple + + self.output = open(output_path, "w") + # then the first row as header + header = ["MovieID","IncompleteRDF","Missing","RDF"] + self.output.write(",".join(header) + "\n") + + def close(self): + self.output.close() + + def write(self, RDF: pd.DataFrame): + rdf_complete = self._build_triple(RDF) + + rdf_without_subject = self._build_incomplete_triple(RDF.drop(columns=["SubjectURI"])) + rdf_without_relationship = self._build_incomplete_triple(RDF.drop(columns=["RelationshipURI"])) + rdf_without_object = self._build_incomplete_triple(RDF.drop(columns=["ObjectURI"])) + #### + df_subject = pd.DataFrame({ + "MovieID": RDF["MovieID"], + "IncompleteRDF": rdf_without_subject, + "Missing": RDF["SubjectURI"], + "RDF": rdf_complete, + }) + + df_relationship = pd.DataFrame({ + "MovieID": RDF["MovieID"], + "IncompleteRDF": rdf_without_relationship, + "Missing": RDF["RelationshipURI"], + "RDF": rdf_complete, + }) + + df_object = pd.DataFrame({ + "MovieID": RDF["MovieID"], + "IncompleteRDF": rdf_without_object, + "Missing": RDF["ObjectURI"], + "RDF": rdf_complete, + }) + + + output_df = pd.concat([df_subject, df_relationship, df_object], ignore_index=True) + output_df.to_csv(self.output, index=False, header=False) + + diff --git a/Scripts/DataCleaning/data_output_models/rdf_text_tasks.py b/Scripts/DataCleaning/data_output_models/rdf_text_tasks.py new file mode 100644 index 0000000..918e600 --- /dev/null +++ b/Scripts/DataCleaning/data_output_models/rdf_text_tasks.py @@ -0,0 +1,26 @@ +import pandas as pd + +class RDF_text_task_dataset(): + """ + Write the CSV for the firsts two tasks, which are "Generating structured RDF triples from natural language text" and reverse. + In the CVS the RDFs will be saved toghether as a string. + CSV Composition: ["MovieID","RDFs","Abstract"] + """ + def __init__(self, output_path:str): + + + self.output = open(output_path, "w") + # then the first row as header + header = ["MovieID","RDFs","Abstract"] + self.output.write(",".join(header) + "\n") + + def close(self): + self.output.close() + + def write(self, RDF: pd.DataFrame): + """ + Args: + RDF (pd.DataFrame): ["MovieID","Triple","Abstract"] + """ + + RDF.to_csv(self.output, index=False, header=False) \ No newline at end of file diff --git a/Scripts/DataCleaning/filter.py b/Scripts/DataCleaning/filter.py new file mode 100644 index 0000000..50d6ead --- /dev/null +++ b/Scripts/DataCleaning/filter.py @@ -0,0 +1,184 @@ +# This file deletes in the pipeline the unwanted relationship by different rules +import pandas as pd +import sqlite3 +import numpy as np + +from Scripts.Libs.CleaningPipeline.special_token import SpecialToken +from Scripts.Libs.CleaningPipeline.sql_endpoint import SqlEndpoint + + +class PipelineApplier(): + + def __init__(self): + + self.MOVIE_FILTER = pd.DataFrame() + self.REL_FILTER = pd.DataFrame() + + + def delete_relationship_by_str(self, RDF: pd.DataFrame, uri: str) -> pd.DataFrame: + return RDF[RDF["RelationshipURI"]!= uri] + + def generate_list_relationship_filter(self, filter_list: list[str]) -> None: + """Store RelationshipURI filters as a set """ + self.relationship_filter_list: set[str] = set(filter_list) + + def delete_relationship_by_list_filter(self, RDF: pd.DataFrame) -> pd.DataFrame: + """Remove rows whose RelationshipURI is in the stored filter. Generate it first callig the generate_list_relationship_filter""" + return RDF[~RDF["RelationshipURI"].isin(self.relationship_filter_list)] + + + def generate_frequency_movie_filter(self, MOVIE_COUNT: pd.DataFrame ,min_treshold: int, max_treshold: int): + """ + You MUST call this before filter the dataset by movie frequence [filter_by_frequence_movie_id()], + since this method creates such filter + Args: + MOVIE_COUNT (pd.DataFrame): ["MovieID","Count"] + min_treshold (int): + max_treshold (int): + """ + MOVIE_COUNT = MOVIE_COUNT[MOVIE_COUNT["Count"] >= min_treshold] + MOVIE_COUNT = MOVIE_COUNT[MOVIE_COUNT["Count"] < max_treshold] + self.MOVIE_FILTER = MOVIE_COUNT #["MovieID"] + + def generate_frequency_relationship_filter(self, REL_COUNT: pd.DataFrame ,min_treshold: int, max_treshold: int): + REL_COUNT = REL_COUNT[REL_COUNT["Count"] >= min_treshold] + REL_COUNT = REL_COUNT[REL_COUNT["Count"] < max_treshold] + self.REL_FILTER = REL_COUNT #["RelationshipURI"] + + def filter_by_frequency_movie_id(self, RDF: pd.DataFrame) -> pd.DataFrame: + RDF = RDF[RDF["MovieID"].isin(self.MOVIE_FILTER["MovieID"])] + return RDF + + def filter_by_frequency_relationship(self, RDF: pd.DataFrame) -> pd.DataFrame: + RDF = RDF[RDF["RelationshipURI"].isin(self.REL_FILTER["RelationshipURI"])] + return RDF + + def rdf_add_special_token(self, RDF: pd.DataFrame): + """ + Adds RDF special token to each element of the tuple. i.e: SUBJ to SubjectURI, OBJ to ObjectURI, REL to RelationshipURI. + Check Scrits/Libs/CleaningPipeline/special_token.py for the up-to-date special token. + It only adds the special token of the three element of the RDF, no other special token. + Args: + RDF (pd.DataFrame): + Returns: + pd.DataFrame: ["MovieURI","SubjectURI","RelationshipURI","ObjectURI","Abstract"] + """ + # if the filter runned before sliced the RDF and created a View, here the problem is resolved + # for more context: SettingWithCopyWarning + RDF = RDF.copy() + # at the beginning of SubjectURI RelationshipURI ObjectURI, add their special token + RDF["SubjectURI"] = SpecialToken.SUBJECT.value + RDF["SubjectURI"] + RDF["ObjectURI"] = SpecialToken.OBJECT.value + RDF["ObjectURI"] + RDF["RelationshipURI"] = SpecialToken.RELATIONSHIP.value + RDF["RelationshipURI"] + return RDF + + + def drop_na_from_dataset(self, RDF: pd.DataFrame) -> pd.DataFrame: + # dataset has SubjectURI RelationshipURI ObjectURI + # want to drop the '' in them + # Replace empty strings with NaN + RDF = RDF.replace('', np.nan) + # Drop rows where any of the key columns are NaN + RDF = RDF.dropna(subset=["SubjectURI", "RelationshipURI", "ObjectURI"]) + return RDF + + def rebuild_by_movie(self, RDF: pd.DataFrame) -> pd.DataFrame: + """_summary_ + + Args: + RDF (pd.DataFrame): ["MovieID","SubjectURI","RelationshipURI","ObjectURI","Abstract"] + + Returns: + pd.DataFrame: ["MovieID","Triple","Abstract"] + """ + # to execute this method you have to have itereted by movie_id + # because as design we want at the end one row for each movie + # MovieID and abstract can be given as input for a more generic method + # movie_id = RDF["MovieID"].iloc(0) + # abstract = RDF["Abstract"].iloc(0) + # first let's combine each row creating column triple as join of rdf + RDF["Triple"] = RDF["SubjectURI"] + RDF["RelationshipURI"] + RDF["ObjectURI"] + # special token + RDF["Triple"] = SpecialToken.START_TRIPLE.value + RDF["Triple"] + SpecialToken.END_TRIPLE.value + # combine rows into one + # MovieID and Abstract are unique for each other 1 <-> 1 + RDF = RDF.groupby(["MovieID", "Abstract"])["Triple"].apply("".join).reset_index() + # add special token for: start of triple, end of triple and start of abstract + RDF["Triple"] = SpecialToken.START_TRIPLE_LIST.value + RDF["Triple"] + RDF["Abstract"] = SpecialToken.ABSTRACT.value + RDF["Abstract"] + return RDF[["MovieID","Triple","Abstract"]] + + def group_by_movie_from_triple(self, RDF: pd.DataFrame) -> pd.DataFrame: + """ + Args: + RDF (pd.DataFrame): ["MovieID","Triple","Abstract"] + + Returns: + pd.DataFrame: ["MovieID","Triple","Abstract"] + """ + # combine rows into one + # MovieID and Abstract are unique for each other 1 <-> 1 + RDF = RDF.groupby(["MovieID", "Abstract"])["Triple"].apply("".join).reset_index() + # add special token for: start of triple, end of triple and start of abstract + RDF["Triple"] = SpecialToken.START_TRIPLE_LIST.value + RDF["Triple"] + RDF["Abstract"] = SpecialToken.ABSTRACT.value + RDF["Abstract"] + return RDF[["MovieID","Triple","Abstract"]] + + + @staticmethod + def build_triple(RDF: pd.DataFrame): + """ + Obtains joined RDF triple in one element, togheter with START and END special token + Args: + RDF (pd.DataFrame): at least ["SubjectURI", "RelationshipURI", "ObjectURI"] + Returns: + pd.DataFrame: RDF["Triple"] (just this column) + """ + # let's combine each row creating column triple as join of rdf + RDF["Triple"] = RDF["SubjectURI"] + RDF["RelationshipURI"] + RDF["ObjectURI"] + # special token + RDF["Triple"] = SpecialToken.START_TRIPLE.value + RDF["Triple"] + SpecialToken.END_TRIPLE.value + return RDF["Triple"] + + @staticmethod + def build_incomplete_triple(RDF: pd.DataFrame): + """ + Method helper used for the third task: "Predicting a masked component within an RDF triple". + Obtains joined RDF triple in one element, togheter with START and END special token. + The MISSING element will be replaced by the special token + Args: + RDF (pd.DataFrame): 2 of the following ["SubjectURI", "RelationshipURI", "ObjectURI"] + Returns: + RDF["Triple"]: pd.Series (just this column, NOT A DATAFRAME) + """ + # let's create a new column "Triple" with the joined RDF + + # the following creates a column of MASK token of the lenght of the dataframe, + # it is not needed since we expect to have a dataframe of just one column, but its more robust (AND SLOW) + MISSING = pd.Series([SpecialToken.MASK.value] * len(RDF), index=RDF.index) + + RDF["Triple"] = ( + RDF.get("SubjectURI", MISSING) + + RDF.get("RelationshipURI", MISSING) + + RDF.get("ObjectURI", MISSING)) + # special token + RDF["Triple"] = SpecialToken.START_TRIPLE.value + RDF["Triple"] + SpecialToken.END_TRIPLE.value + return RDF["Triple"] + + @staticmethod + def build_for_mask_task(RDF_incomplete: pd.DataFrame, MISSING: pd.DataFrame) -> pd.DataFrame: + # currently not used + """ + Method helper used for the third task: "Predicting a masked component within an RDF triple". + Given two Dataframe, the first containing the incompleted RDF and the other only the missing componment, + this methods applies the special token + Args: + RDF (pd.DataFrame): _description_ + + Returns: + pd.DataFrame: _description_ + """ + # take an example dataframe as ["SubjectURI",""] + # as input two dataframe, one with 2 column + return None + diff --git a/Scripts/DataCleaning/path_splitter_tree.py b/Scripts/DataCleaning/path_splitter_tree.py index e7f6f9e..9c0914a 100644 --- a/Scripts/DataCleaning/path_splitter_tree.py +++ b/Scripts/DataCleaning/path_splitter_tree.py @@ -101,7 +101,6 @@ def tree_like(file: str, csv_uri_header:str, out: str): FILE = open(file, "r", encoding="utf-8") - # TODO: Change here so it takes single URI from a CSV file # It is needed the header-name for row in csv.DictReader(FILE): diff --git a/Scripts/DataCleaning/pipeline.py b/Scripts/DataCleaning/pipeline.py new file mode 100644 index 0000000..eb5b2f7 --- /dev/null +++ b/Scripts/DataCleaning/pipeline.py @@ -0,0 +1,131 @@ +import re +from Scripts.Libs.CleaningPipeline.sql_endpoint import SqlEndpoint +from Scripts.DataCleaning.filter import PipelineApplier +# tasks dataset builder +from Scripts.DataCleaning.data_output_models.rdf_mask_task import RDF_mask_task_dataset +from Scripts.DataCleaning.data_output_models.bpe_corpus import BPE_corpus +from Scripts.DataCleaning.data_output_models.rdf_text_tasks import RDF_text_task_dataset +from Scripts.DataCleaning.data_output_models.rdf_completation_task import RDF_completation_task_dataset + +import pandas as pd + +class Pipeline(): + def __init__(self): + self.sql_endpoint = SqlEndpoint() + # classes to manage taskes' datasets + self.task_rdf_mask = RDF_mask_task_dataset("./Assets/Dataset/Tmp/rdf_mask.csv") + self.task_bpe_corpus = BPE_corpus("./Assets/Dataset/Tmp/corpus.txt") + self.task_rdf_text = RDF_text_task_dataset("./Assets/Dataset/Tmp/rdf_text.csv") + self.task_rdf_completation = RDF_completation_task_dataset("./Assets/Dataset/Tmp/rdf_completation.csv") + + # prepare the filter + # the filter applier needs to know the frequence of Movies and Relationship among all the Dataset + self.filter_applier = PipelineApplier() + MOVIE_COUNT = self.sql_endpoint.get_movies_id_count() + REL_COUNT = self.sql_endpoint.get_relationship_count() + self.filter_applier.generate_frequency_movie_filter(MOVIE_COUNT,50,3000) + self.filter_applier.generate_frequency_relationship_filter(REL_COUNT, 50, 2395627) + # prepare the filter on the relationshipURI you want to delete: + relationship_uri_banned_list = [ + "dbp-dbp:wikiPageUsesTemplate","w3:2000/01/rdf-schema#label","dbp-dbo:abstract", + "dbp-dbo:wikiPageID","dbp-dbo:wikiPageRevisionID", "dbp-dbo:wikiPageDisambiguates", + "w3:2002/07/owl#sameAs","dbp-dbp:image","dbp-dbo:wikiPageLength", "w3:2000/01/rdf-schema#comment", + "dbp-dbo:thumbnail", "foaf:depiction", "w3:1999/02/22-rdf-syntax-ns#type"] + self.filter_applier.generate_list_relationship_filter(relationship_uri_banned_list) + + + def execute_task_bpe_corpus(self): + for RDF in self._get_cleaned_movie_rows(): + RDF = self.filter_applier.rebuild_by_movie(RDF) + RDF = RDF[["Triple","Abstract"]] + self.task_bpe_corpus.write_from_df(RDF) + self._end_file_handler() + + + def execute_task_rdf_mask(self): + for RDF in self._get_cleaned_movie_rows(): + self.task_rdf_mask.write(RDF) + self._end_file_handler() + + + def execute_tasks_rdf_text(self): + for RDF in self._get_cleaned_movie_rows(): + RDF = self.filter_applier.rebuild_by_movie(RDF) + self.task_rdf_text.write(RDF) + self._end_file_handler() + + + def execute_task_rdf_completation(self): + for RDF in self._get_cleaned_movie_rows(): + RDF["Triple"] = self.filter_applier.build_triple(RDF) + self.task_rdf_completation.write(RDF[["MovieID","Triple"]]) + self._end_file_handler() + + + def execute_all_task(self): + for RDF in self._get_cleaned_movie_rows(): + self.task_rdf_mask.write(RDF) + + RDF["Triple"] = self.filter_applier.build_triple(RDF) + self.task_rdf_completation.write(RDF[["MovieID","Triple"]]) + + RDF = self.filter_applier.group_by_movie_from_triple(RDF[["MovieID","Triple","Abstract"]]) + + self.task_rdf_text.write(RDF) + self.task_bpe_corpus.write_from_df(RDF[["Triple","Abstract"]]) + + self._end_file_handler() + + + def _end_file_handler(self): + self.task_bpe_corpus.close() + self.task_rdf_mask.close() + self.task_rdf_text.close() + self.task_rdf_completation.close() + + + def _get_cleaned_movie_rows(self): + for RDF in self.sql_endpoint.get_abbreviated_dataset_by_movie_id(): + RDF = self.filter_applier.drop_na_from_dataset(RDF) + RDF = self.filter_applier.filter_by_frequency_movie_id(RDF) + RDF = self.filter_applier.filter_by_frequency_relationship(RDF) + # other filter + # + RDF = self.filter_applier.delete_relationship_by_list_filter(RDF) + if RDF.empty: + continue + RDF = self.filter_applier.rdf_add_special_token(RDF) # WARNING, THIS MUST BE DONE AFTER FILTER BY FREQUENCE + yield RDF + + + def use_toy_dataset(self): + # CHOOSEN MOVIE: + # The Dark Knight : 117248 + # Inception : 147074 + # The Avengers : 113621 + # Cast Away : 1123 + # The Departed : 117586 + # American Psycho : 90177 + # Avatar : 71587 + # Django Unchained : 138952 + # Spirited Away : 144137 + # Knives Out : 148025 + movie_list = [117248, 147074, 113621, 1123, 117586, 90177, 71587, 138952, 144137, 148025] + self.sql_endpoint.movie_ids = movie_list + + + +# there are a lot of settings to manage +# you only need to change settings: +# in the init for file paths, frequency filter limit, banned reletionshipURI +# in the use_toy_dataset , to change the toy dataset +# in _get_cleaned_movie_rows: to change how the pipeline behave + +pipeline = Pipeline() + +# pipeline.use_toy_dataset() +# pipeline.execute_task_bpe_corpus() +# pipeline.execute_task_rdf_mask() +# pipeline.execute_tasks_rdf_text() +# pipeline.execute_task_rdf_completation() +pipeline.execute_all_task() \ No newline at end of file diff --git a/Scripts/Libs/CleaningPipeline/special_token.py b/Scripts/Libs/CleaningPipeline/special_token.py new file mode 100644 index 0000000..644ad71 --- /dev/null +++ b/Scripts/Libs/CleaningPipeline/special_token.py @@ -0,0 +1,21 @@ +from enum import Enum + +class SpecialToken(str, Enum): + # (Enum, str) -> throws an error + START_TRIPLE_LIST = "" + START_TRIPLE = "" + END_TRIPLE = "" + SUBJECT = "" + RELATIONSHIP = "" + OBJECT = "" + ABSTRACT = "" + CORPUS_END = "" + + ## Tasks' Token + RDF_TO_TEXT = "" + TEXT_TO_RDF = "" + CONTINUE_RDF = "" + MASK = "" + + #BPE Training: + \ No newline at end of file diff --git a/Scripts/Libs/CleaningPipeline/sql_endpoint.py b/Scripts/Libs/CleaningPipeline/sql_endpoint.py new file mode 100644 index 0000000..66ba1ea --- /dev/null +++ b/Scripts/Libs/CleaningPipeline/sql_endpoint.py @@ -0,0 +1,144 @@ +####################################################### +# This file stand as endpoint to interact with DB # +####################################################### + +# import sqlite3 +import pandas as pd +from sqlalchemy import create_engine +from Scripts.Libs.CleaningPipeline.special_token import SpecialToken + + +class SqlEndpoint(): + + def __init__(self, DB_PATH = "./Assets/Dataset/DatawareHouse/dataset.db", chunk_size_row = 500): + # self.CONN = sqlite3.connect(DB_PATH) # DEPRECATED + self.sql_engine = create_engine(f"sqlite:///{DB_PATH}") + # /// 3 slash -> relative path + # //// 4 slash -> absolute + # self.conn = self.sql_engine.connect().execution_options(stream_results=True) + # it seems that sqlite doenst support streamer cursor + # PRAGMA exeutes better in writing not reading + self.chunk_size_row = chunk_size_row # not used now, since each chunk is a movie + self.movie_ids = movie_ids = pd.read_sql_query("SELECT MovieID FROM Movies;", self.sql_engine)["MovieID"] + + def get_RDF(self) -> pd.DataFrame : + + QUERY = """ + SELECT MovieID, SubjectURI, RelationshipURI, ObjectURI + FROM RDFs + INNER JOIN Subjects USING (SubjectID) + INNER JOIN Relationships USING (RelationshipID) + INNER JOIN Objects USING (ObjectID); + """ + + return pd.read_sql_query(QUERY, self.CONN) + + def get_chunked_abbreviated_dataset(self) -> pd.DataFrame : + """ + Returns: + pd.DataFrame: MovieID, SubjectURI, RelationshipURI, ObjectURI, Abstract + """ + + QUERY = """ + SELECT MovieID, SubjectURI, RelationshipURI, ObjectURI, Abstract + FROM RDFs + INNER JOIN ParsedSubjects USING (SubjectID) + INNER JOIN ParsedRelationships USING (RelationshipID) + INNER JOIN ParsedObjects USING (ObjectID) + INNER JOIN WikipediaAbstracts USING (MovieID); + """ + + # return pd.read_sql_query(QUERY, self.CONN, chunksize=500) + # sqlite3 + return pd.read_sql_query(QUERY, self.sql_engine, chunksize=self.chunk_size_row) + + + def get_chunked_abbreviated_dataset_with_start_token(self)-> pd.DataFrame: + # DEPRECATED ! + start_token = SpecialToken() + QUERY = """ + SELECT + MovieID, + ? || SubjectURI AS SubjectURI, + ? || RelationshipURI AS RelationshipURI, + ? || ObjectURI AS ObjectURI, + Abstract + FROM RDFs + INNER JOIN ParsedSubjects USING (SubjectID) + INNER JOIN ParsedRelationships USING (RelationshipID) + INNER JOIN ParsedObjects USING (ObjectID) + INNER JOIN WikipediaAbstracts USING (MovieID); + """ + return pd.read_sql_query(QUERY, self.sql_engine, chunksize=self.chunk_size_row) + + def get_abbreviated_dataset_by_movie_id(self):# -> iter[pd.DataFrame]: + """ + Gets each time a DataFrame per movie ( with all its rows in the dataset). + The retrieved RDFs are already abbrevieted by the sql parser + Yields: + Pandas.DataFrame: [MovieID, SubjectURI, RelationshipURI, ObjectURI, Abstract] + """ + # chunk by movieId, abstract is the same and some intersting logic are appliable + # movie_ids = pd.read_sql_query("SELECT MovieID FROM Movies;", self.sql_engine)["MovieID"] + # CHOOSEN MOVIE: + # The Dark Knight : 117248 + # Inception : 147074 + # The Avengers : 113621 + # Cast Away : 1123 + # The Departed : 117586 + # American Psycho : 90177 + # Avatar : 71587 + # Django Unchained : 138952 + # Spirited Away : 144137 + # Knives Out : 148025 + # movie_list = [117248, 147074, 113621, 1123, 117586, 90177, 71587, 138952, 144137, 148025] + # movie_ids = movie_list + + QUERY = """ + SELECT MovieID, SubjectURI, RelationshipURI, ObjectURI, Abstract + FROM RDFs + INNER JOIN ParsedSubjects USING (SubjectID) + INNER JOIN ParsedRelationships USING (RelationshipID) + INNER JOIN ParsedObjects USING (ObjectID) + INNER JOIN WikipediaAbstracts USING (MovieID) + WHERE MovieID = (?); + """ + + for movie_id in self.movie_ids: + yield pd.read_sql_query(QUERY, self.sql_engine, params=(movie_id,)) + + def get_movies_id_count(self) -> pd.DataFrame: + """ + Gets the count of each Movie in the Dataset + Returns: + Pandas.DataFrame: [MovieID, Count] + """ + QUERY = """ + SELECT MovieID, COUNT(*) AS Count + FROM RDFs + GROUP BY MovieID; + """ + return pd.read_sql_query(QUERY, self.sql_engine) + + def get_relationship_count(self) -> pd.DataFrame: + """ + Gets the count of each Relationship in the Dataset + Returns: + Pandas.DataFrame: [RelationshipURI, Count] + """ + QUERY = """ + SELECT RelationshipURI, COUNT(*) AS Count + FROM RDFs + INNER JOIN ParsedRelationships USING (RelationshipID) + GROUP BY RelationshipURI; + """ + return pd.read_sql_query(QUERY, self.sql_engine) + + + +if __name__ == "__main__" : + sql_endpoint = SqlEndpoint() + for pandas_row in sql_endpoint.get_abbreviated_dataset_by_movie_id(): + print(pandas_row) + # sql_endpoint.get_RDF() + print("done") \ No newline at end of file diff --git a/Scripts/Libs/Utils/dataframe_interaction.py b/Scripts/Libs/Utils/dataframe_interaction.py new file mode 100644 index 0000000..c4df33a --- /dev/null +++ b/Scripts/Libs/Utils/dataframe_interaction.py @@ -0,0 +1,9 @@ +import pandas as pd + + + +def get_raw_from_dataframe(DF: pd.DataFrame) -> str: + output = '' + for row in DF.itertuples(index=False, name=None): + output += "".join(map(str, row)) + return output diff --git a/Scripts/UML/CleaningPipeline/cleaning-pipeline.excalidraw.json b/Scripts/UML/CleaningPipeline/cleaning-pipeline.excalidraw.json new file mode 100644 index 0000000..c7019f5 --- /dev/null +++ b/Scripts/UML/CleaningPipeline/cleaning-pipeline.excalidraw.json @@ -0,0 +1,634 @@ +{ + "type": "excalidraw", + "version": 2, + "source": "https://marketplace.visualstudio.com/items?itemName=pomdtr.excalidraw-editor", + "elements": [ + { + "id": "JNB9z-PeqZ4s8KDfWaoXe", + "type": "rectangle", + "x": 106, + "y": 27, + "width": 653, + "height": 263, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a2", + "roundness": { + "type": 3 + }, + "seed": 710740889, + "version": 326, + "versionNonce": 1107631703, + "isDeleted": false, + "boundElements": null, + "updated": 1759156408059, + "link": null, + "locked": false + }, + { + "id": "e13wNTgUpn2flMpmMttqx", + "type": "text", + "x": 200.5943407656526, + "y": 44.07937975075269, + "width": 307.2781467269385, + "height": 23.3097531902191, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a3", + "roundness": null, + "seed": 1012740663, + "version": 444, + "versionNonce": 589551257, + "isDeleted": false, + "boundElements": null, + "updated": 1759156408059, + "link": null, + "locked": false, + "text": "Libs/CleaningPipeline/sql_endpoint", + "fontSize": 18.64780255217528, + "fontFamily": 5, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "Libs/CleaningPipeline/sql_endpoint", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "id": "CgxCElJkKBtIHv-5WQrbo", + "type": "text", + "x": 195, + "y": 80.44259472749451, + "width": 403.64997665852184, + "height": 186.4780255217528, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a4", + "roundness": null, + "seed": 1261951799, + "version": 507, + "versionNonce": 1922906999, + "isDeleted": false, + "boundElements": null, + "updated": 1759156408059, + "link": null, + "locked": false, + "text": "Class SqlEndpoint:\n - sql_engine\n + movie_ids: list[int]\n\n #\n + get_abbreviated_dataset_by_movie_id\n\n", + "fontSize": 18.64780255217528, + "fontFamily": 5, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "Class SqlEndpoint:\n - sql_engine\n + movie_ids: list[int]\n\n #\n + get_abbreviated_dataset_by_movie_id\n\n", + "autoResize": true, + "lineHeight": 1.25 + }, + { + "type": "line", + "version": 4979, + "versionNonce": 1473849177, + "isDeleted": false, + "id": "sYReMTdYblr-oJtYYJALU", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": -67.14432426259049, + "y": 87.19293561900287, + "strokeColor": "#000000", + "backgroundColor": "#a5d8ff", + "width": 77.09201683999922, + "height": 99.49948667804088, + "seed": 1263944119, + "groupIds": [ + "9YkNe1yqnfZy9Z1JX2xr4", + "BDBCTrrhjbJynRAyuf3xJ" + ], + "strokeSharpness": "round", + "boundElementIds": [], + "startBinding": null, + "endBinding": null, + "lastCommittedPoint": null, + "startArrowhead": null, + "endArrowhead": null, + "points": [ + [ + 0, + 0 + ], + [ + 0.2542098813493443, + 75.20117273657175 + ], + [ + 0.011896425679918422, + 83.76249969444815 + ], + [ + 3.970409367559332, + 87.46174320643391 + ], + [ + 17.75573317066317, + 90.59250103325854 + ], + [ + 41.05683533152865, + 91.56737225214069 + ], + [ + 63.319497586673116, + 90.01084754868091 + ], + [ + 75.14781395923075, + 86.28844687220405 + ], + [ + 76.81603792670788, + 83.15042405259751 + ], + [ + 77.05033394391478, + 76.25776215104557 + ], + [ + 76.86643881413028, + 6.3089586511537865 + ], + [ + 76.45188016352971, + -0.2999144698665015 + ], + [ + 71.50179495549581, + -3.9936571317850627 + ], + [ + 61.077971898861186, + -6.132877429442784 + ], + [ + 37.32348754161154, + -7.932114425900202 + ], + [ + 18.278415656797975, + -6.859225353587373 + ], + [ + 3.2995959613238286, + -3.2201165291205287 + ], + [ + -0.04168289608444441, + -0.045185660461322996 + ], + [ + 0, + 0 + ] + ], + "index": "a6", + "frameId": null, + "roundness": { + "type": 2 + }, + "boundElements": [], + "updated": 1759158252997, + "link": null, + "locked": false + }, + { + "type": "line", + "version": 2684, + "versionNonce": 952947769, + "isDeleted": false, + "id": "0S6dEWQVqKUVkP6Z5IX1l", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": -66.6203948243155, + "y": 144.31921927673278, + "strokeColor": "#000000", + "backgroundColor": "#a5d8ff", + "width": 77.17198221193564, + "height": 8.562348957853036, + "seed": 817033943, + "groupIds": [ + "9YkNe1yqnfZy9Z1JX2xr4", + "BDBCTrrhjbJynRAyuf3xJ" + ], + "strokeSharpness": "round", + "boundElementIds": [], + "startBinding": null, + "endBinding": null, + "lastCommittedPoint": null, + "startArrowhead": null, + "endArrowhead": null, + "points": [ + [ + 0, + 0 + ], + [ + 2.033150371639873, + 3.413095389435587 + ], + [ + 10.801287372573954, + 6.276651055277943 + ], + [ + 22.468666942209353, + 8.010803051612635 + ], + [ + 40.747074201802775, + 8.168828515515864 + ], + [ + 62.077348233027564, + 7.0647721921469495 + ], + [ + 74.53446931782398, + 3.04824021069218 + ], + [ + 77.17198221193564, + -0.3935204423371723 + ] + ], + "index": "a7", + "frameId": null, + "roundness": { + "type": 2 + }, + "boundElements": [], + "updated": 1759158252997, + "link": null, + "locked": false + }, + { + "type": "line", + "version": 2770, + "versionNonce": 477619481, + "isDeleted": false, + "id": "szGLND7J0nVOvRkNXX9AS", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": -67.65225214681931, + "y": 115.35516394150972, + "strokeColor": "#000000", + "backgroundColor": "#a5d8ff", + "width": 77.17198221193564, + "height": 8.562348957853036, + "seed": 1704755191, + "groupIds": [ + "9YkNe1yqnfZy9Z1JX2xr4", + "BDBCTrrhjbJynRAyuf3xJ" + ], + "strokeSharpness": "round", + "boundElementIds": [], + "startBinding": null, + "endBinding": null, + "lastCommittedPoint": null, + "startArrowhead": null, + "endArrowhead": null, + "points": [ + [ + 0, + 0 + ], + [ + 2.033150371639873, + 3.413095389435587 + ], + [ + 10.801287372573954, + 6.276651055277943 + ], + [ + 22.468666942209353, + 8.010803051612635 + ], + [ + 40.747074201802775, + 8.168828515515864 + ], + [ + 62.077348233027564, + 7.0647721921469495 + ], + [ + 74.53446931782398, + 3.04824021069218 + ], + [ + 77.17198221193564, + -0.3935204423371723 + ] + ], + "index": "a8", + "frameId": null, + "roundness": { + "type": 2 + }, + "boundElements": [], + "updated": 1759158252997, + "link": null, + "locked": false + }, + { + "type": "ellipse", + "version": 5767, + "versionNonce": 2119031289, + "isDeleted": false, + "id": "O3t2uGktJlDd1_OX_bpV4", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": -68.71020112890136, + "y": 80.06066699332126, + "strokeColor": "#000000", + "backgroundColor": "#a5d8ff", + "width": 76.59753601865496, + "height": 15.49127539284798, + "seed": 471296279, + "groupIds": [ + "9YkNe1yqnfZy9Z1JX2xr4", + "BDBCTrrhjbJynRAyuf3xJ" + ], + "strokeSharpness": "sharp", + "boundElementIds": [ + "bxuMGTzXLn7H-uBCptINx" + ], + "index": "a9", + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1759158252997, + "link": null, + "locked": false + }, + { + "type": "ellipse", + "version": 1177, + "versionNonce": 525480665, + "isDeleted": false, + "id": "_SzKlOBOvJgBg7FX0JTTM", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": -32.218214023678854, + "y": 104.53733467322485, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "width": 11.226103154161754, + "height": 12.183758484455605, + "seed": 1368927799, + "groupIds": [ + "9YkNe1yqnfZy9Z1JX2xr4", + "BDBCTrrhjbJynRAyuf3xJ" + ], + "strokeSharpness": "sharp", + "boundElementIds": [], + "index": "aA", + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1759158252997, + "link": null, + "locked": false + }, + { + "type": "ellipse", + "version": 1465, + "versionNonce": 1410887609, + "isDeleted": false, + "id": "oJMl2Kxa3SPaiAY0kxo7A", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": -31.867072239745255, + "y": 130.75394896028996, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "width": 11.226103154161754, + "height": 12.183758484455605, + "seed": 1627606871, + "groupIds": [ + "9YkNe1yqnfZy9Z1JX2xr4", + "BDBCTrrhjbJynRAyuf3xJ" + ], + "strokeSharpness": "sharp", + "boundElementIds": [], + "index": "aB", + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1759158252997, + "link": null, + "locked": false + }, + { + "type": "ellipse", + "version": 1348, + "versionNonce": 314839193, + "isDeleted": false, + "id": "fB6pJBSMA-pRHrpgYKaLL", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 6.239590202363168, + "x": -31.218214023678854, + "y": 159.52267553159635, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "width": 11.226103154161754, + "height": 12.183758484455605, + "seed": 1420643447, + "groupIds": [ + "9YkNe1yqnfZy9Z1JX2xr4", + "BDBCTrrhjbJynRAyuf3xJ" + ], + "strokeSharpness": "sharp", + "boundElementIds": [], + "index": "aC", + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1759158252997, + "link": null, + "locked": false + }, + { + "type": "text", + "version": 846, + "versionNonce": 1091081593, + "isDeleted": false, + "id": "9gZ3Yy1MeP9kEOTLODqLG", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": -76.81018163712321, + "y": 181.11281713043917, + "strokeColor": "#000000", + "backgroundColor": "#a5d8ff", + "width": 95.63072204589844, + "height": 23.595161071904883, + "seed": 2019206551, + "groupIds": [ + "BDBCTrrhjbJynRAyuf3xJ" + ], + "strokeSharpness": "sharp", + "boundElementIds": [], + "fontSize": 17.4778970902999, + "fontFamily": 1, + "text": "dataset.db", + "baseline": 16.595161071904883, + "textAlign": "center", + "verticalAlign": "top", + "index": "aD", + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1759158252997, + "link": null, + "locked": false, + "containerId": null, + "originalText": "dataset.db", + "autoResize": true, + "lineHeight": 1.350000000000001 + }, + { + "id": "3eOw20xMhpB5jf_RMG24P", + "type": "text", + "x": 1131.3333333333335, + "y": 31.333333333333428, + "width": 508.3333333333333, + "height": 550, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "aE", + "roundness": null, + "seed": 1535658041, + "version": 821, + "versionNonce": 1630266809, + "isDeleted": false, + "boundElements": null, + "updated": 1759157181677, + "link": null, + "locked": false, + "text": "Class PipelineApplier\n - movie_frequence_filter : pd.DataFrame()\n - rel_Frequence_Filter : pd.DataFrame()\n - rel_banned_list: list[str]\n\n + generate_movie_frequency_filter()\n + generate_rel_frequency_filter()\n + generate_list_relationship_filter()\n \n + filter_by_movie_frequency()\n + filter_by_relationship_frequency()\n + delete_relationship_by_list_filter()\n + delete_relationship_by_str()\n\n + drop_na() \n\n + rdf_add_special_token()\n + group_triple_by_movie()\n + build_by_movie()\n # static\n + build_triple()\n + build_incomplete_triple()", + "fontSize": 20, + "fontFamily": 5, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "Class PipelineApplier\n - movie_frequence_filter : pd.DataFrame()\n - rel_Frequence_Filter : pd.DataFrame()\n - rel_banned_list: list[str]\n\n + generate_movie_frequency_filter()\n + generate_rel_frequency_filter()\n + generate_list_relationship_filter()\n \n + filter_by_movie_frequency()\n + filter_by_relationship_frequency()\n + delete_relationship_by_list_filter()\n + delete_relationship_by_str()\n\n + drop_na() \n\n + rdf_add_special_token()\n + group_triple_by_movie()\n + build_by_movie()\n # static\n + build_triple()\n + build_incomplete_triple()", + "autoResize": false, + "lineHeight": 1.25 + }, + { + "id": "Fbl1gpb5r7QrdRauGUWm2", + "type": "text", + "x": 158.23809523809535, + "y": 502.52380952380935, + "width": 484.2857142857143, + "height": 500, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "aF", + "roundness": null, + "seed": 2066618807, + "version": 552, + "versionNonce": 1269344823, + "isDeleted": false, + "boundElements": null, + "updated": 1759158199532, + "link": null, + "locked": false, + "text": "Class Pipeline\n - sql_endpoint: SqlEndpoint()\n\n - task_rdf_mask_file_handler:\n - task_bpe_corpus_file_handler:\n - task_rdf_text_file_handler:\n - task_rdf_completation_file_handler:\n\n - Filter_applier : PipelineApplier()\n\n #\n - get_cleaned_movie_rows()\n \n + execute_task_bpe_corpus()\n + execute_task_rdf_mask()\n + execute_task_rdf_text()\n + execute_task_rdf_completation()\n + execute_all_task()\n\n + use_toy_dataset()", + "fontSize": 20, + "fontFamily": 5, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "Class Pipeline\n - sql_endpoint: SqlEndpoint()\n\n - task_rdf_mask_file_handler:\n - task_bpe_corpus_file_handler:\n - task_rdf_text_file_handler:\n - task_rdf_completation_file_handler:\n\n - Filter_applier : PipelineApplier()\n\n #\n - get_cleaned_movie_rows()\n \n + execute_task_bpe_corpus()\n + execute_task_rdf_mask()\n + execute_task_rdf_text()\n + execute_task_rdf_completation()\n + execute_all_task()\n\n + use_toy_dataset()", + "autoResize": false, + "lineHeight": 1.25 + } + ], + "appState": { + "gridSize": 20, + "gridStep": 5, + "gridModeEnabled": false, + "viewBackgroundColor": "#ffffff" + }, + "files": {} +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index e87882c..70a3169 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,3 +15,4 @@ tzdata==2025.2 urllib3==2.5.0 wheel==0.45.1 Wikipedia-API==0.8.1 +SQLAlchemy