diff --git a/Scripts/DataCleaning/data_output_models/debug_csv.py b/Scripts/DataCleaning/data_output_models/debug_csv.py new file mode 100644 index 0000000..c120765 --- /dev/null +++ b/Scripts/DataCleaning/data_output_models/debug_csv.py @@ -0,0 +1,21 @@ +import pandas as pd + +class Debug_csv(): + def __init__(self, output_path:str): + + + self.output = open(output_path, "w") + # then the first row as header + header = ["MovieURI","SubjectURI","RelationshipURI","ObjectURI","Abstract"] + self.output.write(",".join(header) + "\n") + + def close(self): + self.output.close() + + def write(self, RDF: pd.DataFrame): + """ + Args: + RDF (pd.DataFrame): ["MovieURI","SubjectURI","RelationshipURI","ObjectURI","Abstract"] + """ + + RDF.to_csv(self.output, index=False, header=False) \ No newline at end of file diff --git a/Scripts/DataCleaning/filter.py b/Scripts/DataCleaning/filter.py index 317ea6b..592d628 100644 --- a/Scripts/DataCleaning/filter.py +++ b/Scripts/DataCleaning/filter.py @@ -186,3 +186,9 @@ class PipelineApplier(): # as input two dataframe, one with 2 column return None + def regex_on_objects(self, RDF: pd.DataFrame) -> pd.DataFrame: + RDF["ObjectURI"] = (RDF["ObjectURI"].astype("string") + .str.replace(r"\r?\n+", ", ", regex=True) # newlines -> ", " + .str.replace(r"\*", "", regex=True)) # delete all asterisks + + return RDF \ No newline at end of file diff --git a/Scripts/DataCleaning/pipeline.py b/Scripts/DataCleaning/pipeline.py index 153f127..f0a2169 100644 --- a/Scripts/DataCleaning/pipeline.py +++ b/Scripts/DataCleaning/pipeline.py @@ -6,17 +6,12 @@ from Scripts.DataCleaning.data_output_models.rdf_mask_task import RDF_mask_task_ from Scripts.DataCleaning.data_output_models.bpe_corpus import BPE_corpus from Scripts.DataCleaning.data_output_models.rdf_text_tasks import RDF_text_task_dataset from Scripts.DataCleaning.data_output_models.rdf_completation_task import RDF_completation_task_dataset +from Scripts.DataCleaning.data_output_models.debug_csv import Debug_csv import pandas as pd class Pipeline(): - def __init__(self, - mask_task_dataset_path:str = "./Assets/Dataset/Tmp/rdf_mask.csv", - bpe_corpus_path:str = "./Assets/Dataset/Tmp/corpus.txt", - text_to_rdf_task_dataset_path:str = "./Assets/Dataset/Tmp/rdf_text.csv", - completation_rdf_task_dataset_path:str = "./Assets/Dataset/Tmp/rdf_completation.csv", - - ): + def __init__(self): self.sql_endpoint = SqlEndpoint() # classes to manage taskes' datasets self.task_rdf_mask = RDF_mask_task_dataset(mask_task_dataset_path) @@ -98,6 +93,8 @@ class Pipeline(): # other filter # RDF = self.filter_applier.delete_relationship_by_list_filter(RDF) + # regex on ObjectURI + RDF = self.filter_applier.regex_on_objects(RDF) if RDF.empty: continue RDF = self.filter_applier.rdf_add_special_token(RDF) # WARNING, THIS MUST BE DONE AFTER FILTER BY FREQUENCE @@ -119,22 +116,27 @@ class Pipeline(): movie_list = [117248, 147074, 113621, 1123, 117586, 90177, 71587, 138952, 144137, 148025] self.sql_endpoint.movie_ids = movie_list - def reduce_movie_list(self, starting_offset:int , ending_offset:int): - self.filter_applier.reduce_movie_list(starting_offset,ending_offset) + def generate_csv_debug_file(self, debug_path:str): + debug_csv = Debug_csv(debug_path) + for RDF in self._get_cleaned_movie_rows(): + debug_csv.write(RDF) + + debug_csv.close() # there are a lot of settings to manage -# you only need to change settings: +# you only need to change settings: # in the init for file paths, frequency filter limit, banned reletionshipURI # in the use_toy_dataset , to change the toy dataset # in _get_cleaned_movie_rows: to change how the pipeline behave #pipeline = Pipeline() -# pipeline.use_toy_dataset() +pipeline.use_toy_dataset() # pipeline.execute_task_bpe_corpus() # pipeline.execute_task_rdf_mask() # pipeline.execute_tasks_rdf_text() # pipeline.execute_task_rdf_completation() -# pipeline.execute_all_task() \ No newline at end of file +# pipeline.execute_all_task() +pipeline.generate_csv_debug_file("Assets/Dataset/Tmp/debug.csv") \ No newline at end of file