Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
UvA DL Tutorials: Updating progress bar API to PL 1.8
  • Loading branch information
phlippe committed Jan 4, 2023
commit a5811f5a0dd629f8797570d69e4faa61958a5279
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ def train_model(model_name, save_name=None, **kwargs):
), # Save the best checkpoint based on the maximum val_acc recorded. Saves only weights and not optimizer
LearningRateMonitor("epoch"),
], # Log learning rate every epoch
progress_bar_refresh_rate=1,
enable_progress_bar=True,
) # In case your notebook crashes due to the progress bar, consider increasing the refresh rate
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -982,7 +982,7 @@ def train_reverse(**kwargs):
gpus=1 if str(device).startswith("cuda") else 0,
max_epochs=10,
gradient_clip_val=5,
progress_bar_refresh_rate=1,
enable_progress_bar=True,
)
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need

Expand Down Expand Up @@ -1442,7 +1442,7 @@ def train_anomaly(**kwargs):
gpus=1 if str(device).startswith("cuda") else 0,
max_epochs=100,
gradient_clip_val=2,
progress_bar_refresh_rate=1,
enable_progress_bar=True,
)
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need

Expand Down
6 changes: 3 additions & 3 deletions course_UvA-DL/06-graph-neural-networks/GNN_overview.py
Original file line number Diff line number Diff line change
Expand Up @@ -634,7 +634,7 @@ def test_step(self, batch, batch_idx):
# Additionally to the Lightning module, we define a training function below.
# As we have a single graph, we use a batch size of 1 for the data loader and share the same data loader for the train,
# validation, and test set (the mask is picked inside the Lightning module).
# Besides, we set the argument `progress_bar_refresh_rate` to zero as it usually shows the progress per epoch,
# Besides, we set the argument `enable_progress_bar` to False as it usually shows the progress per epoch,
# but an epoch only consists of a single step.
# If you have downloaded the pre-trained models in the beginning of the tutorial, we load those instead of training from scratch.
# Finally, we test the model and return the results.
Expand All @@ -653,7 +653,7 @@ def train_node_classifier(model_name, dataset, **model_kwargs):
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
gpus=AVAIL_GPUS,
max_epochs=200,
progress_bar_refresh_rate=0,
enable_progress_bar=False,
) # 0 because epoch size is 1
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need

Expand Down Expand Up @@ -934,7 +934,7 @@ def train_graph_classifier(model_name, **model_kwargs):
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
gpus=AVAIL_GPUS,
max_epochs=500,
progress_bar_refresh_rate=0,
enable_progress_bar=False,
)
trainer.logger._default_hp_metric = None

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -650,7 +650,7 @@ def train_model(**kwargs):
OutlierCallback(),
LearningRateMonitor("epoch"),
],
progress_bar_refresh_rate=1,
enable_progress_bar=True,
)
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "MNIST.ckpt")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ def train_model(**kwargs):
ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch"),
],
progress_bar_refresh_rate=1,
enable_progress_bar=True,
)
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
Expand Down
2 changes: 1 addition & 1 deletion course_UvA-DL/12-meta-learning/Meta_Learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ def train_model(model_class, train_loader, val_loader, **kwargs):
ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch"),
],
progress_bar_refresh_rate=0,
enable_progress_bar=False,
)
trainer.logger._default_hp_metric = None

Expand Down
6 changes: 3 additions & 3 deletions course_UvA-DL/13-contrastive-learning/SimCLR.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ def train_simclr(batch_size, max_epochs=500, **kwargs):
ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc_top5"),
LearningRateMonitor("epoch"),
],
progress_bar_refresh_rate=1,
enable_progress_bar=True,
)
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need

Expand Down Expand Up @@ -546,7 +546,7 @@ def train_logreg(batch_size, train_feats_data, test_feats_data, model_suffix, ma
ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch"),
],
progress_bar_refresh_rate=0,
enable_progress_bar=False,
check_val_every_n_epoch=10,
)
trainer.logger._default_hp_metric = None
Expand Down Expand Up @@ -737,7 +737,7 @@ def train_resnet(batch_size, max_epochs=100, **kwargs):
ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch"),
],
progress_bar_refresh_rate=1,
enable_progress_bar=True,
check_val_every_n_epoch=2,
)
trainer.logger._default_hp_metric = None
Expand Down