diff --git a/SupervisedGym/SupervisedGym/__main__.py b/SupervisedGym/SupervisedGym/__main__.py index cd061ce1498351f1605b63ba09d1cf905f14f8e4..e03c5da36bff155fa83eb74d0278943478901845 100644 --- a/SupervisedGym/SupervisedGym/__main__.py +++ b/SupervisedGym/SupervisedGym/__main__.py @@ -6,11 +6,11 @@ import torch import pandas as pd import xml.etree.ElementTree as ET from SupervisedGym.model_assembler import ModelAssembler -from SupervisedGym.models import FeedForwardTunable +from SupervisedGym.models import FeedForwardTunable, StackedRNNTunable, TransformerTunable -SAVE_PATH = "" -TIMEOUT = 60 * 60 * 10 # 10 hours minutes +# SAVE_PATH = "" +# TIMEOUT = 60 * 60 * 10 # 10 hours minutes DATA_CONF = DataConf( group_id="experimentId", pred_horizon=10, @@ -18,25 +18,44 @@ DATA_CONF = DataConf( shuffle=False, ) +DIRECTORY = r"C:\Users\wojci\OneDrive\Documents\Studia\ZPP\last1\genom" + if __name__ == "__main__": + assebler = ModelAssembler( raw_data=pd.read_csv( - "/home/szysad/mimuw/3rok/ZPP/training-data/FCR-data/set5/SD-01-29-2021.csv" + # "/home/szysad/mimuw/3rok/ZPP/training-data/FCR-data/set5/SD-01-29-2021.csv" + r"C:\Users\wojci\OneDrive\Documents\Studia\ZPP\trening\alpha-star-solver\FCRtraining\Genom\genom-newest-at-05-28.csv" ), cp_problem=ET.parse( - "/home/szysad/mimuw/3rok/ZPP/cp_solver/FCR-config/FCRclear-CP.xmi" + # "/home/szysad/mimuw/3rok/ZPP/cp_solver/FCR-config/FCRclear-CP.xmi" + r"C:\Users\wojci\OneDrive\Documents\Studia\ZPP\trening\alpha-star-solver\FCRtraining\Genom\GenomZPPSolverSmall-CP.xmi" ), val_loss=torch.nn.L1Loss(), data_conf=DATA_CONF, ) - model = assebler.assemble_model( - timeout=TIMEOUT, - n_trials=5, - ensemble_size=5, - avaible_model_types=[FeedForwardTunable], - fast_dev_run=True, - ) + for time in [60 * 60, 5 * 60 * 60]: + for networks, name in [ + ([FeedForwardTunable], "FF"), + ([StackedRNNTunable], "RNN"), + ([TransformerTunable], "Trans"), + ([FeedForwardTunable, StackedRNNTunable, TransformerTunable], "All") + ]: + for ensembling in [1, 5]: + + time_per_model = time // len(networks) + models = {} + for model_type in networks: + models[model_type] = {"timeout": time_per_model, "n_trials": None} + + model = assebler.assemble_model( + ensemble_size=ensembling, + models=models + ) + + file_name = "Time=" + str(time) + " Model=" + name + " Ensembling=" + str(ensembling) + file_path = DIRECTORY + "\\" + file_name - with open(SAVE_PATH, "wb") as f: - f.write(model.parse_model()) + with open(file_path, "wb") as f: + f.write(model.parse_model()) diff --git a/SupervisedGym/SupervisedGym/modeltuner/objective.py b/SupervisedGym/SupervisedGym/modeltuner/objective.py index 22abe26eb70d4f66db5bdb67a5297a35169389d4..bec89ed9d62d1aa226db504c24f0c60cf7e5f586 100644 --- a/SupervisedGym/SupervisedGym/modeltuner/objective.py +++ b/SupervisedGym/SupervisedGym/modeltuner/objective.py @@ -101,7 +101,7 @@ class Objective: def __call__(self, trial: optuna.trial.Trial) -> float: # generate batch size pow - batch_size_pow = trial.suggest_int("batch_size_pow", 5, 12) + batch_size_pow = trial.suggest_int("batch_size_pow", 5, 8) batch_size = 2 ** batch_size_pow input_scaler_name = trial.suggest_categorical( diff --git a/SupervisedGym/SupervisedGym/modeltuner/utils.py b/SupervisedGym/SupervisedGym/modeltuner/utils.py index 8b9fe625341a468e9760523be6f1ee58b5314995..ea7666265da7e152e535e41a8a1cc2ea4b7e38f4 100644 --- a/SupervisedGym/SupervisedGym/modeltuner/utils.py +++ b/SupervisedGym/SupervisedGym/modeltuner/utils.py @@ -51,7 +51,7 @@ def generate_objective_hparams( ) min_epochs = max(1, int(log(train_items, 100))) return ObjectiveHParams( - val_check_interval=1, + val_check_interval=1.0, max_epochs=min_epochs * 50, min_epochs=min_epochs, es_divergence_threshold=None,