Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.

Commit f20a78f

Browse files
phlippeBordapre-commit-ci[bot]awaelchli
authored
UvA DL Tutorials: Updating PL API to >1.8 (#222)
Co-authored-by: Jirka Borovec <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Adrian Wälchli <[email protected]> Co-authored-by: Jirka <[email protected]>
1 parent fbed9e5 commit f20a78f

File tree

22 files changed

+80
-58
lines changed

22 files changed

+80
-58
lines changed

course_UvA-DL/01-introduction-to-pytorch/.meta.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
title: "Tutorial 1: Introduction to PyTorch"
22
author: Phillip Lippe
33
created: 2021-08-27
4-
updated: 2021-11-29
4+
updated: 2023-01-04
55
license: CC BY-SA
66
description: |
77
This tutorial will give a short introduction to PyTorch basics, and get you setup for writing your own neural networks.

course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -455,7 +455,7 @@
455455

456456
# Additionally, some operations on a GPU are implemented stochastic for efficiency
457457
# We want to ensure that all operations are deterministic on GPU (if used) for reproducibility
458-
torch.backends.cudnn.determinstic = True
458+
torch.backends.cudnn.deterministic = True
459459
torch.backends.cudnn.benchmark = False
460460

461461
# %% [markdown]

course_UvA-DL/02-activation-functions/.meta.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
title: "Tutorial 2: Activation Functions"
22
author: Phillip Lippe
33
created: 2021-08-27
4-
updated: 2021-08-27
4+
updated: 2023-01-04
55
license: CC BY-SA
66
description: |
77
In this tutorial, we will take a closer look at (popular) activation functions and investigate their effect on optimization properties in neural networks.

course_UvA-DL/02-activation-functions/Activation_Functions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def set_seed(seed):
6464

6565
# Additionally, some operations on a GPU are implemented stochastic for efficiency
6666
# We want to ensure that all operations are deterministic on GPU (if used) for reproducibility
67-
torch.backends.cudnn.determinstic = True
67+
torch.backends.cudnn.deterministic = True
6868
torch.backends.cudnn.benchmark = False
6969

7070
# Fetching the device that will be used throughout this notebook

course_UvA-DL/03-initialization-and-optimization/.meta.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
title: "Tutorial 3: Initialization and Optimization"
22
author: Phillip Lippe
33
created: 2021-08-27
4-
updated: 2021-11-29
4+
updated: 2023-01-04
55
license: CC BY-SA
66
tags:
77
- Image

course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
pl.seed_everything(42)
4848

4949
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
50-
torch.backends.cudnn.determinstic = True
50+
torch.backends.cudnn.deterministic = True
5151
torch.backends.cudnn.benchmark = False
5252

5353
# Fetching the device that will be used throughout this notebook
@@ -937,8 +937,8 @@ def pathological_curve_loss(w1, w2):
937937
def plot_curve(
938938
curve_fn, x_range=(-5, 5), y_range=(-5, 5), plot_3d=False, cmap=cm.viridis, title="Pathological curvature"
939939
):
940-
fig = plt.figure()
941-
ax = fig.gca(projection="3d") if plot_3d else fig.gca()
940+
_ = plt.figure()
941+
ax = plt.axes(projection="3d") if plot_3d else plt.axes()
942942

943943
x = torch.arange(x_range[0], x_range[1], (x_range[1] - x_range[0]) / 100.0)
944944
y = torch.arange(y_range[0], y_range[1], (y_range[1] - y_range[0]) / 100.0)

course_UvA-DL/04-inception-resnet-densenet/.meta.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
title: "Tutorial 4: Inception, ResNet and DenseNet"
22
author: Phillip Lippe
33
created: 2021-08-27
4-
updated: 2021-11-29
4+
updated: 2023-01-04
55
license: CC BY-SA
66
tags:
77
- Image
@@ -18,5 +18,6 @@ requirements:
1818
- matplotlib
1919
- seaborn
2020
- tabulate
21+
- pytorch-lightning>=1.8
2122
accelerator:
2223
- GPU

course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
pl.seed_everything(42)
5050

5151
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
52-
torch.backends.cudnn.determinstic = True
52+
torch.backends.cudnn.deterministic = True
5353
torch.backends.cudnn.benchmark = False
5454

5555
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
@@ -348,7 +348,8 @@ def train_model(model_name, save_name=None, **kwargs):
348348
trainer = pl.Trainer(
349349
default_root_dir=os.path.join(CHECKPOINT_PATH, save_name), # Where to save models
350350
# We run on a single GPU (if possible)
351-
gpus=1 if str(device) == "cuda:0" else 0,
351+
accelerator="gpu" if str(device).startswith("cuda") else "cpu",
352+
devices=1,
352353
# How many epochs to train for if no patience is set
353354
max_epochs=180,
354355
callbacks=[
@@ -357,7 +358,7 @@ def train_model(model_name, save_name=None, **kwargs):
357358
), # Save the best checkpoint based on the maximum val_acc recorded. Saves only weights and not optimizer
358359
LearningRateMonitor("epoch"),
359360
], # Log learning rate every epoch
360-
progress_bar_refresh_rate=1,
361+
enable_progress_bar=True,
361362
) # In case your notebook crashes due to the progress bar, consider increasing the refresh rate
362363
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
363364
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need

course_UvA-DL/05-transformers-and-MH-attention/.meta.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
title: "Tutorial 5: Transformers and Multi-Head Attention"
22
author: Phillip Lippe
33
created: 2021-06-30
4-
updated: 2021-11-29
4+
updated: 2023-01-04
55
license: CC BY-SA
66
build: 0
77
tags:
@@ -19,5 +19,6 @@ requirements:
1919
- torchvision
2020
- matplotlib
2121
- seaborn
22+
- pytorch-lightning>=1.8
2223
accelerator:
2324
- GPU

course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@
6161
pl.seed_everything(42)
6262

6363
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
64-
torch.backends.cudnn.determinstic = True
64+
torch.backends.cudnn.deterministic = True
6565
torch.backends.cudnn.benchmark = False
6666

6767
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
@@ -979,10 +979,11 @@ def train_reverse(**kwargs):
979979
trainer = pl.Trainer(
980980
default_root_dir=root_dir,
981981
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
982-
gpus=1 if str(device).startswith("cuda") else 0,
982+
accelerator="gpu" if str(device).startswith("cuda") else "cpu",
983+
devices=1,
983984
max_epochs=10,
984985
gradient_clip_val=5,
985-
progress_bar_refresh_rate=1,
986+
enable_progress_bar=True,
986987
)
987988
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
988989

@@ -1439,10 +1440,11 @@ def train_anomaly(**kwargs):
14391440
trainer = pl.Trainer(
14401441
default_root_dir=root_dir,
14411442
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
1442-
gpus=1 if str(device).startswith("cuda") else 0,
1443+
accelerator="gpu" if str(device).startswith("cuda") else "cpu",
1444+
devices=1,
14431445
max_epochs=100,
14441446
gradient_clip_val=2,
1445-
progress_bar_refresh_rate=1,
1447+
enable_progress_bar=True,
14461448
)
14471449
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
14481450

0 commit comments

Comments
 (0)