From c1c81683ba2a13aa36b4d02eb8e1b7044a95b9f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 18:14:52 -0400 Subject: [PATCH 01/25] update optimization --- .../Initialization_and_Optimization.py | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py b/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py index fdf27edbc..9bb0e8e71 100644 --- a/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py +++ b/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py @@ -1,3 +1,15 @@ +# --- +# jupyter: +# jupytext: +# cell_metadata_filter: -all +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.14.5 +# --- + # %% [markdown] #
# In the first half of the notebook, we will review different initialization techniques, and go step by step from the simplest initialization to methods that are nowadays used in very deep networks. @@ -15,7 +27,7 @@ import matplotlib.pyplot as plt import numpy as np -import pytorch_lightning as pl +import lightning as L import seaborn as sns import torch import torch.nn as nn @@ -33,7 +45,7 @@ sns.set() # %% [markdown] -# Instead of the `set_seed` function as in Tutorial 3, we can use PyTorch Lightning's build-in function `pl.seed_everything`. +# Instead of the `set_seed` function as in Tutorial 3, we can use Lightning's build-in function `L.seed_everything`. # We will reuse the path variables `DATASET_PATH` and `CHECKPOINT_PATH` as in Tutorial 3. # Adjust the paths if necessary. @@ -44,7 +56,7 @@ CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/InitOptim/") # Seed everything -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -938,7 +950,9 @@ def plot_curve( curve_fn, x_range=(-5, 5), y_range=(-5, 5), plot_3d=False, cmap=cm.viridis, title="Pathological curvature" ): fig = plt.figure() - ax = fig.gca(projection="3d") if plot_3d else fig.gca() + ax = fig.gca() + if plot_3d: + ax = fig.add_subplot(projection='3d') x = torch.arange(x_range[0], x_range[1], (x_range[1] - x_range[0]) / 100.0) y = torch.arange(y_range[0], y_range[1], (y_range[1] - y_range[0]) / 100.0) From 0504638a0e56cbd26cf8861f2f7538e03d686421 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 18:26:59 -0400 Subject: [PATCH 02/25] update inception --- .../Inception_ResNet_DenseNet.py | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py index 5420a9dbe..b5ac20a6c 100644 --- a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py +++ b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py @@ -11,7 +11,7 @@ import matplotlib import matplotlib.pyplot as plt import numpy as np -import pytorch_lightning as pl +import lightning as L import seaborn as sns import tabulate import torch @@ -21,13 +21,15 @@ import torchvision # %matplotlib inline -from IPython.display import HTML, display, set_matplotlib_formats +from IPython.display import HTML, display +import matplotlib_inline.backend_inline + from PIL import Image -from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint +from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint from torchvision import transforms from torchvision.datasets import CIFAR10 -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export matplotlib.rcParams["lines.linewidth"] = 2.0 sns.reset_orig() @@ -46,7 +48,7 @@ # Function for setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -136,9 +138,9 @@ # We need to do a little trick because the validation set should not use the augmentation. train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True) val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True) -pl.seed_everything(42) +L.seed_everything(42) train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000]) -pl.seed_everything(42) +L.seed_everything(42) _, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000]) # Loading the test set @@ -180,7 +182,7 @@ # %% [markdown] # ## PyTorch Lightning # -# In this notebook and in many following ones, we will make use of the library [PyTorch Lightning](https://www.pytorchlightning.ai/). +# In this notebook and in many following ones, we will make use of the library [PyTorch Lightning](https://www.lightning.ai/docs/pytorch/stable). # PyTorch Lightning is a framework that simplifies your code needed to train, evaluate, and test a model in PyTorch. # It also handles logging into [TensorBoard](https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html), a visualization toolkit for ML experiments, and saving model checkpoints automatically with minimal code overhead from our side. # This is extremely helpful for us as we want to focus on implementing different model architectures and spend little time on other code overhead. @@ -192,12 +194,12 @@ # %% # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # %% [markdown] # Thus, in the future, we don't have to define our own `set_seed` function anymore. # -# In PyTorch Lightning, we define `pl.LightningModule`'s (inheriting from `Module`) that organize our code into 5 main sections: +# In PyTorch Lightning, we define `L.LightningModule`'s (inheriting from `Module`) that organize our code into 5 main sections: # # 1. Initialization (`__init__`), where we create all necessary parameters/models # 2. Optimizers (`configure_optimizers`) where we create the optimizers, learning rate scheduler, etc. @@ -208,13 +210,13 @@ # 5. Test loop (`test_step`) which is the same as validation, only on a test set. # # Therefore, we don't abstract the PyTorch code, but rather organize it and define some default operations that are commonly used. -# If you need to change something else in your training/validation/test loop, there are many possible functions you can overwrite (see the [docs](https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html) for details). +# If you need to change something else in your training/validation/test loop, there are many possible functions you can overwrite (see the [docs](https://lightning.ai/docs/pytorch/stable/common/lightning_module.html) for details). # # Now we can look at an example of how a Lightning Module for training a CNN looks like: # %% -class CIFARModule(pl.LightningModule): +class CIFARModule(L.LightningModule): def __init__(self, model_name, model_hparams, optimizer_name, optimizer_hparams): """ Inputs: @@ -322,7 +324,7 @@ def create_model(model_name, model_hparams): # Besides the Lightning module, the second most important module in PyTorch Lightning is the `Trainer`. # The trainer is responsible to execute the training steps defined in the Lightning module and completes the framework. # Similar to the Lightning module, you can override any key part that you don't want to be automated, but the default settings are often the best practice to do. -# For a full overview, see the [documentation](https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html). +# For a full overview, see the [documentation](https://lightning.ai/docs/pytorch/stable/common/trainer.html). # The most important functions we use below are: # # * `trainer.fit`: Takes as input a lightning module, a training dataset, and an (optional) validation dataset. @@ -345,10 +347,11 @@ def train_model(model_name, save_name=None, **kwargs): save_name = model_name # Create a PyTorch Lightning trainer with the generation callback - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, save_name), # Where to save models # We run on a single GPU (if possible) - gpus=1 if str(device) == "cuda:0" else 0, + accelerator=("cuda" if str(device) == "cuda:0" else "cpu"), + devices=1, # How many epochs to train for if no patience is set max_epochs=180, callbacks=[ @@ -357,7 +360,6 @@ def train_model(model_name, save_name=None, **kwargs): ), # Save the best checkpoint based on the maximum val_acc recorded. Saves only weights and not optimizer LearningRateMonitor("epoch"), ], # Log learning rate every epoch - progress_bar_refresh_rate=1, ) # In case your notebook crashes due to the progress bar, consider increasing the refresh rate trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard trainer.logger._default_hp_metric = None # Optional logging argument that we don't need @@ -369,7 +371,7 @@ def train_model(model_name, save_name=None, **kwargs): # Automatically loads the model with the saved hyperparameters model = CIFARModule.load_from_checkpoint(pretrained_filename) else: - pl.seed_everything(42) # To be reproducable + L.seed_everything(42) # To be reproducable model = CIFARModule(model_name=model_name, **kwargs) trainer.fit(model, train_loader, val_loader) model = CIFARModule.load_from_checkpoint( From da590dbbb083e79a79fcbe4347666eac20a4c254 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 18:38:50 -0400 Subject: [PATCH 03/25] update attention --- .../Introduction_to_PyTorch.py | 16 ++++++++++-- .../Activation_Functions.py | 4 +-- .../Initialization_and_Optimization.py | 4 +-- .../Inception_ResNet_DenseNet.py | 12 +++++++++ .../Transformers_MHAttention.py | 26 +++++++++---------- 5 files changed, 43 insertions(+), 19 deletions(-) diff --git a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py index 74454e209..a146f64a4 100644 --- a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py +++ b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py @@ -1,3 +1,15 @@ +# --- +# jupyter: +# jupytext: +# cell_metadata_filter: -all +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.14.5 +# --- + # %% [markdown] #
# Welcome to our PyTorch tutorial for the Deep Learning course 2020 at the University of Amsterdam! @@ -31,12 +43,12 @@ import torch.utils.data as data # %matplotlib inline -from IPython.display import set_matplotlib_formats +import matplotlib_inline.backend_inline from matplotlib.colors import to_rgba from torch import Tensor from tqdm.notebook import tqdm # Progress bar -set_matplotlib_formats("svg", "pdf") +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export # %% [markdown] # ## The Basics of PyTorch diff --git a/course_UvA-DL/02-activation-functions/Activation_Functions.py b/course_UvA-DL/02-activation-functions/Activation_Functions.py index 9645ff57f..7b95d4e3d 100644 --- a/course_UvA-DL/02-activation-functions/Activation_Functions.py +++ b/course_UvA-DL/02-activation-functions/Activation_Functions.py @@ -21,12 +21,12 @@ import torchvision # %matplotlib inline -from IPython.display import set_matplotlib_formats +import matplotlib_inline.backend_inline from torchvision import transforms from torchvision.datasets import FashionMNIST from tqdm.notebook import tqdm -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export sns.set() # %% [markdown] diff --git a/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py b/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py index 9bb0e8e71..3978de76a 100644 --- a/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py +++ b/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py @@ -35,13 +35,13 @@ import torch.utils.data as data # %matplotlib inline -from IPython.display import set_matplotlib_formats +import matplotlib_inline.backend_inline from matplotlib import cm from torchvision import transforms from torchvision.datasets import FashionMNIST from tqdm.notebook import tqdm -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export sns.set() # %% [markdown] diff --git a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py index b5ac20a6c..bc7a2e3b5 100644 --- a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py +++ b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py @@ -1,3 +1,15 @@ +# --- +# jupyter: +# jupytext: +# cell_metadata_filter: -all +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.14.5 +# --- + # %% [markdown] #
# Let's start with importing our standard libraries here. diff --git a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py index cca56f2ee..ea5c9ac49 100644 --- a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py +++ b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py @@ -28,7 +28,7 @@ import numpy as np # PyTorch Lightning -import pytorch_lightning as pl +import lightning as L import seaborn as sns # PyTorch @@ -40,15 +40,15 @@ # Torchvision import torchvision -from IPython.display import set_matplotlib_formats -from pytorch_lightning.callbacks import ModelCheckpoint +import matplotlib_inline.backend_inline +from lightning.pytorch.callbacks import ModelCheckpoint from torchvision import transforms from torchvision.datasets import CIFAR100 from tqdm.notebook import tqdm plt.set_cmap("cividis") # %matplotlib inline -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export matplotlib.rcParams["lines.linewidth"] = 2.0 sns.reset_orig() @@ -58,7 +58,7 @@ CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/Transformers/") # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -246,7 +246,7 @@ def scaled_dot_product(q, k, v, mask=None): # %% seq_len, d_k = 3, 2 -pl.seed_everything(42) +L.seed_everything(42) q = torch.randn(seq_len, d_k) k = torch.randn(seq_len, d_k) v = torch.randn(seq_len, d_k) @@ -744,7 +744,7 @@ def get_lr_factor(self, epoch): # %% -class TransformerPredictor(pl.LightningModule): +class TransformerPredictor(L.LightningModule): def __init__( self, input_dim, @@ -976,13 +976,13 @@ def train_reverse(**kwargs): # Create a PyTorch Lightning trainer with the generation callback root_dir = os.path.join(CHECKPOINT_PATH, "ReverseTask") os.makedirs(root_dir, exist_ok=True) - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=root_dir, callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")], - gpus=1 if str(device).startswith("cuda") else 0, + accelerator=("cuda" if str(device).startswith("cuda") else "cpu"), + devices=1, max_epochs=10, gradient_clip_val=5, - progress_bar_refresh_rate=1, ) trainer.logger._default_hp_metric = None # Optional logging argument that we don't need @@ -1436,13 +1436,13 @@ def train_anomaly(**kwargs): # Create a PyTorch Lightning trainer with the generation callback root_dir = os.path.join(CHECKPOINT_PATH, "SetAnomalyTask") os.makedirs(root_dir, exist_ok=True) - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=root_dir, callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")], - gpus=1 if str(device).startswith("cuda") else 0, + accelerator=("cuda" if str(device).startswith("cuda") else "cpu"), + devices=1, max_epochs=100, gradient_clip_val=2, - progress_bar_refresh_rate=1, ) trainer.logger._default_hp_metric = None # Optional logging argument that we don't need From ec3d50a98dbdaaabe89746a2ada173bd5a01da1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 18:51:49 -0400 Subject: [PATCH 04/25] incomplete GNN --- .../06-graph-neural-networks/GNN_overview.py | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/course_UvA-DL/06-graph-neural-networks/GNN_overview.py b/course_UvA-DL/06-graph-neural-networks/GNN_overview.py index 2f0d7c487..65d422f05 100644 --- a/course_UvA-DL/06-graph-neural-networks/GNN_overview.py +++ b/course_UvA-DL/06-graph-neural-networks/GNN_overview.py @@ -11,7 +11,7 @@ from urllib.error import HTTPError # PyTorch Lightning -import pytorch_lightning as pl +import lightning as L # PyTorch import torch @@ -25,7 +25,7 @@ import torch_geometric.nn as geom_nn # PL callbacks -from pytorch_lightning.callbacks import ModelCheckpoint +from lightning.pytorch.callbacks import ModelCheckpoint from torch import Tensor AVAIL_GPUS = min(1, torch.cuda.device_count()) @@ -36,7 +36,7 @@ CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/GNNs/") # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -580,7 +580,7 @@ def forward(self, x, *args, **kwargs): # %% -class NodeLevelGNN(pl.LightningModule): +class NodeLevelGNN(L.LightningModule): def __init__(self, model_name, **model_kwargs): super().__init__() # Saving hyperparameters @@ -875,7 +875,7 @@ def forward(self, x, edge_index, batch_idx): # %% -class GraphLevelGNN(pl.LightningModule): +class GraphLevelGNN(L.LightningModule): def __init__(self, **model_kwargs): super().__init__() # Saving hyperparameters @@ -924,17 +924,18 @@ def test_step(self, batch, batch_idx): # %% def train_graph_classifier(model_name, **model_kwargs): - pl.seed_everything(42) + L.seed_everything(42) # Create a PyTorch Lightning trainer with the generation callback root_dir = os.path.join(CHECKPOINT_PATH, "GraphLevel" + model_name) os.makedirs(root_dir, exist_ok=True) - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=root_dir, callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")], - gpus=AVAIL_GPUS, + accelerator="cuda", + devices=AVAIL_GPUS, max_epochs=500, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.logger._default_hp_metric = None @@ -944,7 +945,7 @@ def train_graph_classifier(model_name, **model_kwargs): print("Found pretrained model, loading...") model = GraphLevelGNN.load_from_checkpoint(pretrained_filename) else: - pl.seed_everything(42) + L.seed_everything(42) model = GraphLevelGNN( c_in=tu_dataset.num_node_features, c_out=1 if tu_dataset.num_classes == 2 else tu_dataset.num_classes, From 0b92f1d034384850b1428ba8a39bac6b477c291c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 18:56:24 -0400 Subject: [PATCH 05/25] update energy models --- .../Deep_Energy_Models.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py b/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py index 2d29e6dfb..06f3f621a 100644 --- a/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py +++ b/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py @@ -15,7 +15,7 @@ import numpy as np # PyTorch Lightning -import pytorch_lightning as pl +import lightning as L # PyTorch import torch @@ -27,12 +27,12 @@ import torchvision # %matplotlib inline -from IPython.display import set_matplotlib_formats -from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint +import matplotlib_inline.backend_inline +from lightning.pytorch.callbacks import Callback, LearningRateMonitor, ModelCheckpoint from torchvision import transforms from torchvision.datasets import MNIST -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export matplotlib.rcParams["lines.linewidth"] = 2.0 # Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10) @@ -41,7 +41,7 @@ CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/tutorial8") # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -463,7 +463,7 @@ def generate_samples(model, inp_imgs, steps=60, step_size=10, return_img_per_ste # %% -class DeepEnergyModel(pl.LightningModule): +class DeepEnergyModel(L.LightningModule): def __init__(self, img_shape, batch_size, alpha=0.1, lr=1e-4, beta1=0.0, **CNN_args): super().__init__() self.save_hyperparameters() @@ -547,7 +547,7 @@ def validation_step(self, batch, batch_idx): # %% -class GenerateCallback(pl.Callback): +class GenerateCallback(Callback): def __init__(self, batch_size=8, vis_steps=8, num_steps=256, every_n_epochs=5): super().__init__() self.batch_size = batch_size # Number of images to generate @@ -588,7 +588,7 @@ def generate_imgs(self, pl_module): # %% -class SamplerCallback(pl.Callback): +class SamplerCallback(Callback): def __init__(self, num_imgs=32, every_n_epochs=5): super().__init__() self.num_imgs = num_imgs # Number of images to plot @@ -610,7 +610,7 @@ def on_epoch_end(self, trainer, pl_module): # %% -class OutlierCallback(pl.Callback): +class OutlierCallback(Callback): def __init__(self, batch_size=1024): super().__init__() self.batch_size = batch_size @@ -638,9 +638,10 @@ def on_epoch_end(self, trainer, pl_module): # %% def train_model(**kwargs): # Create a PyTorch Lightning trainer with the generation callback - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, "MNIST"), - gpus=1 if str(device).startswith("cuda") else 0, + accelerator=("cuda" if str(device).startswith("cuda") else "cpu"), + devices=1, max_epochs=60, gradient_clip_val=0.1, callbacks=[ @@ -650,7 +651,6 @@ def train_model(**kwargs): OutlierCallback(), LearningRateMonitor("epoch"), ], - progress_bar_refresh_rate=1, ) # Check whether pretrained model exists. If yes, load it and skip training pretrained_filename = os.path.join(CHECKPOINT_PATH, "MNIST.ckpt") @@ -658,7 +658,7 @@ def train_model(**kwargs): print("Found pretrained model, loading...") model = DeepEnergyModel.load_from_checkpoint(pretrained_filename) else: - pl.seed_everything(42) + L.seed_everything(42) model = DeepEnergyModel(**kwargs) trainer.fit(model, train_loader, test_loader) model = DeepEnergyModel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) @@ -707,7 +707,7 @@ def train_model(**kwargs): # %% model.to(device) -pl.seed_everything(43) +L.seed_everything(43) callback = GenerateCallback(batch_size=4, vis_steps=8, num_steps=256) imgs_per_step = callback.generate_imgs(model) imgs_per_step = imgs_per_step.cpu() From a5466e475025603aa9011aad5cd24254de18c59d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 19:06:15 -0400 Subject: [PATCH 06/25] update deep autoencoders --- .../08-deep-autoencoders/Deep_Autoencoders.py | 33 +++++++++++++------ 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py b/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py index 8a1bf840b..d7a6b70d9 100644 --- a/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py +++ b/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py @@ -1,3 +1,15 @@ +# --- +# jupyter: +# jupytext: +# cell_metadata_filter: -all +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.14.5 +# --- + # %% [markdown] #
@@ -8,7 +20,7 @@ import matplotlib import matplotlib.pyplot as plt -import pytorch_lightning as pl +import lightning as L import seaborn as sns import torch import torch.nn as nn @@ -16,15 +28,15 @@ import torch.optim as optim import torch.utils.data as data import torchvision -from IPython.display import set_matplotlib_formats -from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint +import matplotlib_inline.backend_inline +from lightning.pytorch.callbacks import Callback, LearningRateMonitor, ModelCheckpoint from torch.utils.tensorboard import SummaryWriter from torchvision import transforms from torchvision.datasets import CIFAR10 from tqdm.notebook import tqdm # %matplotlib inline -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export matplotlib.rcParams["lines.linewidth"] = 2.0 sns.reset_orig() sns.set() @@ -38,7 +50,7 @@ CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/tutorial9") # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -94,7 +106,7 @@ # Loading the training dataset. We need to split it into a training and validation part train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=transform, download=True) -pl.seed_everything(42) +L.seed_everything(42) train_set, val_set = torch.utils.data.random_split(train_dataset, [45000, 5000]) # Loading the test set @@ -236,7 +248,7 @@ def forward(self, x): # %% -class Autoencoder(pl.LightningModule): +class Autoencoder(L.LightningModule): def __init__( self, base_channel_size: int, @@ -352,7 +364,7 @@ def compare_imgs(img1, img2, title_prefix=""): # %% -class GenerateCallback(pl.Callback): +class GenerateCallback(Callback): def __init__(self, input_imgs, every_n_epochs=1): super().__init__() self.input_imgs = input_imgs # Images to reconstruct during training @@ -383,9 +395,10 @@ def on_train_epoch_end(self, trainer, pl_module): # %% def train_cifar(latent_dim): # Create a PyTorch Lightning trainer with the generation callback - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, "cifar10_%i" % latent_dim), - gpus=1 if str(device).startswith("cuda") else 0, + accelerator=("cuda" if str(device).startswith("cuda") else "cpu"), + devices=1, max_epochs=500, callbacks=[ ModelCheckpoint(save_weights_only=True), From 88722ac7a5ed3f5a96e17f45b9deddb3b091f26f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 19:15:19 -0400 Subject: [PATCH 07/25] normalizing flows incomplete --- .../09-normalizing-flows/NF_image_modeling.py | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py b/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py index 253149d1b..ff5f0dee6 100644 --- a/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py +++ b/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py @@ -13,7 +13,7 @@ import matplotlib import matplotlib.pyplot as plt import numpy as np -import pytorch_lightning as pl +import lightning as L import seaborn as sns import tabulate import torch @@ -22,16 +22,17 @@ import torch.optim as optim import torch.utils.data as data import torchvision -from IPython.display import HTML, display, set_matplotlib_formats +from IPython.display import HTML, display +import matplotlib_inline.backend_inline from matplotlib.colors import to_rgb -from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint +from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint from torch import Tensor from torchvision import transforms from torchvision.datasets import MNIST from tqdm.notebook import tqdm # %matplotlib inline -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export matplotlib.rcParams["lines.linewidth"] = 2.0 sns.reset_orig() @@ -41,7 +42,7 @@ CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/tutorial11") # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -97,7 +98,7 @@ def discretize(sample): # Loading the training dataset. We need to split it into a training and validation part train_dataset = MNIST(root=DATASET_PATH, train=True, transform=transform, download=True) -pl.seed_everything(42) +L.seed_everything(42) train_set, val_set = torch.utils.data.random_split(train_dataset, [50000, 10000]) # Loading the test set @@ -258,7 +259,7 @@ def show_imgs(imgs, title=None, row_size=4): # %% -class ImageFlow(pl.LightningModule): +class ImageFlow(L.LightningModule): def __init__(self, flows, import_samples=8): """ Args: @@ -449,7 +450,7 @@ def dequant(self, z, ldj): # %% # Testing invertibility of dequantization layer -pl.seed_everything(42) +L.seed_everything(42) orig_img = train_set[0][0].unsqueeze(dim=0) ldj = torch.zeros( 1, @@ -916,9 +917,10 @@ def create_simple_flow(use_vardeq=True): # %% def train_flow(flow, model_name="MNISTFlow"): # Create a PyTorch Lightning trainer - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, model_name), - gpus=1 if torch.cuda.is_available() else 0, + accelerator="auto", + devices=1, max_epochs=200, gradient_clip_val=1.0, callbacks=[ @@ -1216,12 +1218,12 @@ def print_num_params(model): # The seeds are set to obtain reproducable generations and are not cherry picked. # %% -pl.seed_everything(44) +L.seed_everything(44) samples = flow_dict["vardeq"]["model"].sample(img_shape=[16, 1, 28, 28]) show_imgs(samples.cpu()) # %% -pl.seed_everything(44) +L.seed_everything(44) samples = flow_dict["multiscale"]["model"].sample(img_shape=[16, 8, 7, 7]) show_imgs(samples.cpu()) @@ -1262,12 +1264,12 @@ def interpolate(model, img1, img2, num_steps=8): exmp_imgs, _ = next(iter(train_loader)) # %% -pl.seed_everything(42) +L.seed_everything(42) for i in range(2): interpolate(flow_dict["vardeq"]["model"], exmp_imgs[2 * i], exmp_imgs[2 * i + 1]) # %% -pl.seed_everything(42) +L.seed_everything(42) for i in range(2): interpolate(flow_dict["multiscale"]["model"], exmp_imgs[2 * i], exmp_imgs[2 * i + 1]) @@ -1290,7 +1292,7 @@ def interpolate(model, img1, img2, num_steps=8): # Below we visualize three examples of this: # %% -pl.seed_everything(44) +L.seed_everything(44) for _ in range(3): z_init = flow_dict["multiscale"]["model"].prior.sample(sample_shape=[1, 8, 7, 7]) z_init = z_init.expand(8, -1, -1, -1) From 47f1cfe326a80910a3974408cc35c90c907b02fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 19:21:20 -0400 Subject: [PATCH 08/25] autoregressive --- .../Autoregressive_Image_Modeling.py | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py b/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py index 1458be665..ca8838579 100644 --- a/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py +++ b/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py @@ -39,7 +39,7 @@ # Imports for plotting import matplotlib.pyplot as plt import numpy as np -import pytorch_lightning as pl +import lightning as L import seaborn as sns import torch import torch.nn as nn @@ -47,9 +47,9 @@ import torch.optim as optim import torch.utils.data as data import torchvision -from IPython.display import set_matplotlib_formats +import matplotlib_inline.backend_inline from matplotlib.colors import to_rgb -from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint +from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint from torch import Tensor from torchvision import transforms from torchvision.datasets import MNIST @@ -57,7 +57,7 @@ plt.set_cmap("cividis") # %matplotlib inline -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export # Path to the folder where the datasets are/should be downloaded (e.g. MNIST) DATASET_PATH = os.environ.get("PATH_DATASETS", "data") @@ -65,7 +65,7 @@ CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/tutorial12") # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -117,7 +117,7 @@ def discretize(sample): # Loading the training dataset. We need to split it into a training and validation part train_dataset = MNIST(root=DATASET_PATH, train=True, transform=transform, download=True) -pl.seed_everything(42) +L.seed_everything(42) train_set, val_set = torch.utils.data.random_split(train_dataset, [50000, 10000]) # Loading the test set @@ -529,7 +529,7 @@ def forward(self, v_stack, h_stack): # %% -class PixelCNN(pl.LightningModule): +class PixelCNN(L.LightningModule): def __init__(self, c_in, c_hidden): super().__init__() self.save_hyperparameters() @@ -675,9 +675,10 @@ def test_step(self, batch, batch_idx): # %% def train_model(**kwargs): # Create a PyTorch Lightning trainer with the generation callback - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, "PixelCNN"), - gpus=1 if str(device).startswith("cuda") else 0, + accelerator="auto", + devices=1, max_epochs=150, callbacks=[ ModelCheckpoint(save_weights_only=True, mode="min", monitor="val_bpd"), @@ -749,7 +750,7 @@ def train_model(**kwargs): # Let's therefore use our sampling function to generate a few digits: # %% -pl.seed_everything(1) +L.seed_everything(1) samples = model.sample(img_shape=(16, 1, 28, 28)) show_imgs(samples.cpu()) @@ -772,7 +773,7 @@ def train_model(**kwargs): # $64\times64$ instead of $28\times28$: # %% -pl.seed_everything(1) +L.seed_everything(1) samples = model.sample(img_shape=(8, 1, 64, 64)) show_imgs(samples.cpu()) @@ -810,7 +811,7 @@ def autocomplete_image(img): show_imgs([img, img_init]) # Generate 12 example completions img_init = img_init.unsqueeze(dim=0).expand(12, -1, -1, -1).to(device) - pl.seed_everything(1) + L.seed_everything(1) img_generated = model.sample(img_init.shape, img_init) print("Autocompletion samples:") show_imgs(img_generated) From cfab2ee1ca58eb7fc2e12532696dc46426a13aef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 19:24:03 -0400 Subject: [PATCH 09/25] vit --- .../Vision_Transformer.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/course_UvA-DL/11-vision-transformer/Vision_Transformer.py b/course_UvA-DL/11-vision-transformer/Vision_Transformer.py index 169cd9cc8..ecc7a9009 100644 --- a/course_UvA-DL/11-vision-transformer/Vision_Transformer.py +++ b/course_UvA-DL/11-vision-transformer/Vision_Transformer.py @@ -9,7 +9,7 @@ import matplotlib import matplotlib.pyplot as plt -import pytorch_lightning as pl +import lightning as L import seaborn as sns import torch import torch.nn as nn @@ -17,14 +17,14 @@ import torch.optim as optim import torch.utils.data as data import torchvision -from IPython.display import set_matplotlib_formats -from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint +import matplotlib_inline.backend_inline +from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint from torchvision import transforms from torchvision.datasets import CIFAR10 plt.set_cmap("cividis") # %matplotlib inline -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export matplotlib.rcParams["lines.linewidth"] = 2.0 sns.reset_orig() @@ -36,7 +36,7 @@ CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/VisionTransformers/") # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -105,9 +105,9 @@ # We need to do a little trick because the validation set should not use the augmentation. train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True) val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True) -pl.seed_everything(42) +L.seed_everything(42) train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000]) -pl.seed_everything(42) +L.seed_everything(42) _, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000]) # Loading the test set @@ -328,7 +328,7 @@ def forward(self, x): # %% -class ViT(pl.LightningModule): +class ViT(L.LightningModule): def __init__(self, model_kwargs, lr): super().__init__() self.save_hyperparameters() @@ -376,15 +376,15 @@ def test_step(self, batch, batch_idx): # %% def train_model(**kwargs): - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, "ViT"), - gpus=1 if str(device) == "cuda:0" else 0, + accelerator="auto", + devices=1, max_epochs=180, callbacks=[ ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"), LearningRateMonitor("epoch"), ], - progress_bar_refresh_rate=1, ) trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard trainer.logger._default_hp_metric = None # Optional logging argument that we don't need @@ -396,7 +396,7 @@ def train_model(**kwargs): # Automatically loads the model with the saved hyperparameters model = ViT.load_from_checkpoint(pretrained_filename) else: - pl.seed_everything(42) # To be reproducable + L.seed_everything(42) # To be reproducable model = ViT(**kwargs) trainer.fit(model, train_loader, val_loader) # Load best checkpoint after training From b4d7be3c584adb27c0ad971ae4189b00b891aba8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 19:28:16 -0400 Subject: [PATCH 10/25] meta learning --- .../12-meta-learning/Meta_Learning.py | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/course_UvA-DL/12-meta-learning/Meta_Learning.py b/course_UvA-DL/12-meta-learning/Meta_Learning.py index 1ec243525..a25b66282 100644 --- a/course_UvA-DL/12-meta-learning/Meta_Learning.py +++ b/course_UvA-DL/12-meta-learning/Meta_Learning.py @@ -28,23 +28,23 @@ import matplotlib import matplotlib.pyplot as plt import numpy as np -import pytorch_lightning as pl +import lightning as L import seaborn as sns import torch import torch.nn.functional as F import torch.optim as optim import torch.utils.data as data import torchvision -from IPython.display import set_matplotlib_formats +import matplotlib_inline.backend_inline from PIL import Image -from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint +from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint from torchvision import transforms from torchvision.datasets import CIFAR100, SVHN from tqdm.auto import tqdm plt.set_cmap("cividis") # %matplotlib inline -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export matplotlib.rcParams["lines.linewidth"] = 2.0 sns.reset_orig() @@ -57,7 +57,7 @@ CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/MetaLearning/") # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -183,7 +183,7 @@ def __len__(self): # We will assign the classes randomly to training, validation and test, and use a 80%-10%-10% split. # %% -pl.seed_everything(0) # Set seed for reproducibility +L.seed_everything(0) # Set seed for reproducibility classes = torch.randperm(100) # Returns random permutation of numbers 0 to 99 train_classes, val_classes, test_classes = classes[:80], classes[80:90], classes[90:] @@ -475,7 +475,7 @@ def get_convnet(output_size): # %% -class ProtoNet(pl.LightningModule): +class ProtoNet(L.LightningModule): def __init__(self, proto_dim, lr): """Inputs. @@ -553,15 +553,16 @@ def validation_step(self, batch, batch_idx): # %% def train_model(model_class, train_loader, val_loader, **kwargs): - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, model_class.__name__), - gpus=1 if str(device) == "cuda:0" else 0, + accelerator="auto", + devices=1, max_epochs=200, callbacks=[ ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"), LearningRateMonitor("epoch"), ], - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.logger._default_hp_metric = None @@ -572,7 +573,7 @@ def train_model(model_class, train_loader, val_loader, **kwargs): # Automatically loads the model with the saved hyperparameters model = model_class.load_from_checkpoint(pretrained_filename) else: - pl.seed_everything(42) # To be reproducable + L.seed_everything(42) # To be reproducable model = model_class(**kwargs) trainer.fit(model, train_loader, val_loader) model = model_class.load_from_checkpoint( @@ -844,7 +845,7 @@ def plot_few_shot(acc_dict, name, color=None, ax=None): # %% -class ProtoMAML(pl.LightningModule): +class ProtoMAML(L.LightningModule): def __init__(self, proto_dim, lr, lr_inner, lr_output, num_inner_steps): """Inputs. @@ -1091,7 +1092,7 @@ def collate_fn(item_list): # %% def test_protomaml(model, dataset, k_shot=4): - pl.seed_everything(42) + L.seed_everything(42) model = model.to(device) num_classes = dataset.targets.unique().shape[0] From 8ab731c6d987989aa3f903ab4546a01cd0738da0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 23:35:18 +0000 Subject: [PATCH 11/25] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../Introduction_to_PyTorch.py | 6 +++--- .../02-activation-functions/Activation_Functions.py | 6 +++--- .../Initialization_and_Optimization.py | 10 +++++----- .../Inception_ResNet_DenseNet.py | 7 +++---- .../Transformers_MHAttention.py | 8 ++++---- .../Deep_Energy_Models.py | 12 ++++++------ .../08-deep-autoencoders/Deep_Autoencoders.py | 4 ++-- .../09-normalizing-flows/NF_image_modeling.py | 6 +++--- .../Autoregressive_Image_Modeling.py | 7 ++++--- .../11-vision-transformer/Vision_Transformer.py | 4 ++-- course_UvA-DL/12-meta-learning/Meta_Learning.py | 6 +++--- 11 files changed, 38 insertions(+), 38 deletions(-) diff --git a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py index a146f64a4..d67d1adff 100644 --- a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py +++ b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py @@ -37,13 +37,13 @@ import time import matplotlib.pyplot as plt + +# %matplotlib inline +import matplotlib_inline.backend_inline import numpy as np import torch import torch.nn as nn import torch.utils.data as data - -# %matplotlib inline -import matplotlib_inline.backend_inline from matplotlib.colors import to_rgba from torch import Tensor from tqdm.notebook import tqdm # Progress bar diff --git a/course_UvA-DL/02-activation-functions/Activation_Functions.py b/course_UvA-DL/02-activation-functions/Activation_Functions.py index 7b95d4e3d..c25bf327e 100644 --- a/course_UvA-DL/02-activation-functions/Activation_Functions.py +++ b/course_UvA-DL/02-activation-functions/Activation_Functions.py @@ -11,6 +11,9 @@ from urllib.error import HTTPError import matplotlib.pyplot as plt + +# %matplotlib inline +import matplotlib_inline.backend_inline import numpy as np import seaborn as sns import torch @@ -19,9 +22,6 @@ import torch.optim as optim import torch.utils.data as data import torchvision - -# %matplotlib inline -import matplotlib_inline.backend_inline from torchvision import transforms from torchvision.datasets import FashionMNIST from tqdm.notebook import tqdm diff --git a/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py b/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py index 3978de76a..b9405e561 100644 --- a/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py +++ b/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py @@ -25,17 +25,17 @@ import urllib.request from urllib.error import HTTPError +import lightning as L import matplotlib.pyplot as plt + +# %matplotlib inline +import matplotlib_inline.backend_inline import numpy as np -import lightning as L import seaborn as sns import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data - -# %matplotlib inline -import matplotlib_inline.backend_inline from matplotlib import cm from torchvision import transforms from torchvision.datasets import FashionMNIST @@ -952,7 +952,7 @@ def plot_curve( fig = plt.figure() ax = fig.gca() if plot_3d: - ax = fig.add_subplot(projection='3d') + ax = fig.add_subplot(projection="3d") x = torch.arange(x_range[0], x_range[1], (x_range[1] - x_range[0]) / 100.0) y = torch.arange(y_range[0], y_range[1], (y_range[1] - y_range[0]) / 100.0) diff --git a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py index bc7a2e3b5..0e0ef4ccd 100644 --- a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py +++ b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py @@ -20,10 +20,11 @@ from types import SimpleNamespace from urllib.error import HTTPError +import lightning as L import matplotlib import matplotlib.pyplot as plt +import matplotlib_inline.backend_inline import numpy as np -import lightning as L import seaborn as sns import tabulate import torch @@ -34,10 +35,8 @@ # %matplotlib inline from IPython.display import HTML, display -import matplotlib_inline.backend_inline - -from PIL import Image from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint +from PIL import Image from torchvision import transforms from torchvision.datasets import CIFAR10 diff --git a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py index ea5c9ac49..d53f7956f 100644 --- a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py +++ b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py @@ -22,13 +22,14 @@ from functools import partial from urllib.error import HTTPError +# PyTorch Lightning +import lightning as L + # Plotting import matplotlib import matplotlib.pyplot as plt +import matplotlib_inline.backend_inline import numpy as np - -# PyTorch Lightning -import lightning as L import seaborn as sns # PyTorch @@ -40,7 +41,6 @@ # Torchvision import torchvision -import matplotlib_inline.backend_inline from lightning.pytorch.callbacks import ModelCheckpoint from torchvision import transforms from torchvision.datasets import CIFAR100 diff --git a/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py b/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py index 06f3f621a..8fa87bb92 100644 --- a/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py +++ b/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py @@ -9,13 +9,16 @@ import urllib.request from urllib.error import HTTPError +# PyTorch Lightning +import lightning as L + # Plotting import matplotlib import matplotlib.pyplot as plt -import numpy as np -# PyTorch Lightning -import lightning as L +# %matplotlib inline +import matplotlib_inline.backend_inline +import numpy as np # PyTorch import torch @@ -25,9 +28,6 @@ # Torchvision import torchvision - -# %matplotlib inline -import matplotlib_inline.backend_inline from lightning.pytorch.callbacks import Callback, LearningRateMonitor, ModelCheckpoint from torchvision import transforms from torchvision.datasets import MNIST diff --git a/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py b/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py index d7a6b70d9..def55b115 100644 --- a/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py +++ b/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py @@ -18,9 +18,10 @@ import urllib.request from urllib.error import HTTPError +import lightning as L import matplotlib import matplotlib.pyplot as plt -import lightning as L +import matplotlib_inline.backend_inline import seaborn as sns import torch import torch.nn as nn @@ -28,7 +29,6 @@ import torch.optim as optim import torch.utils.data as data import torchvision -import matplotlib_inline.backend_inline from lightning.pytorch.callbacks import Callback, LearningRateMonitor, ModelCheckpoint from torch.utils.tensorboard import SummaryWriter from torchvision import transforms diff --git a/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py b/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py index ff5f0dee6..636563ef4 100644 --- a/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py +++ b/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py @@ -10,10 +10,11 @@ import urllib.request from urllib.error import HTTPError +import lightning as L import matplotlib import matplotlib.pyplot as plt +import matplotlib_inline.backend_inline import numpy as np -import lightning as L import seaborn as sns import tabulate import torch @@ -23,9 +24,8 @@ import torch.utils.data as data import torchvision from IPython.display import HTML, display -import matplotlib_inline.backend_inline -from matplotlib.colors import to_rgb from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint +from matplotlib.colors import to_rgb from torch import Tensor from torchvision import transforms from torchvision.datasets import MNIST diff --git a/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py b/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py index ca8838579..24026e644 100644 --- a/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py +++ b/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py @@ -36,10 +36,12 @@ import urllib.request from urllib.error import HTTPError +import lightning as L + # Imports for plotting import matplotlib.pyplot as plt +import matplotlib_inline.backend_inline import numpy as np -import lightning as L import seaborn as sns import torch import torch.nn as nn @@ -47,9 +49,8 @@ import torch.optim as optim import torch.utils.data as data import torchvision -import matplotlib_inline.backend_inline -from matplotlib.colors import to_rgb from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint +from matplotlib.colors import to_rgb from torch import Tensor from torchvision import transforms from torchvision.datasets import MNIST diff --git a/course_UvA-DL/11-vision-transformer/Vision_Transformer.py b/course_UvA-DL/11-vision-transformer/Vision_Transformer.py index ecc7a9009..6ab4e0167 100644 --- a/course_UvA-DL/11-vision-transformer/Vision_Transformer.py +++ b/course_UvA-DL/11-vision-transformer/Vision_Transformer.py @@ -7,9 +7,10 @@ import urllib.request from urllib.error import HTTPError +import lightning as L import matplotlib import matplotlib.pyplot as plt -import lightning as L +import matplotlib_inline.backend_inline import seaborn as sns import torch import torch.nn as nn @@ -17,7 +18,6 @@ import torch.optim as optim import torch.utils.data as data import torchvision -import matplotlib_inline.backend_inline from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint from torchvision import transforms from torchvision.datasets import CIFAR10 diff --git a/course_UvA-DL/12-meta-learning/Meta_Learning.py b/course_UvA-DL/12-meta-learning/Meta_Learning.py index a25b66282..3e92dcfc8 100644 --- a/course_UvA-DL/12-meta-learning/Meta_Learning.py +++ b/course_UvA-DL/12-meta-learning/Meta_Learning.py @@ -25,19 +25,19 @@ from statistics import mean, stdev from urllib.error import HTTPError +import lightning as L import matplotlib import matplotlib.pyplot as plt +import matplotlib_inline.backend_inline import numpy as np -import lightning as L import seaborn as sns import torch import torch.nn.functional as F import torch.optim as optim import torch.utils.data as data import torchvision -import matplotlib_inline.backend_inline -from PIL import Image from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint +from PIL import Image from torchvision import transforms from torchvision.datasets import CIFAR100, SVHN from tqdm.auto import tqdm From 18c24300761149891cb73866c30799cf7e534c7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 19:39:55 -0400 Subject: [PATCH 12/25] Apply suggestions from code review --- .../Introduction_to_PyTorch.py | 12 ------------ .../Initialization_and_Optimization.py | 12 ------------ .../Inception_ResNet_DenseNet.py | 12 ------------ .../08-deep-autoencoders/Deep_Autoencoders.py | 12 ------------ 4 files changed, 48 deletions(-) diff --git a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py index d67d1adff..206530be1 100644 --- a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py +++ b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py @@ -1,15 +1,3 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# --- - # %% [markdown] #
# Welcome to our PyTorch tutorial for the Deep Learning course 2020 at the University of Amsterdam! diff --git a/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py b/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py index b9405e561..fc949adf3 100644 --- a/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py +++ b/course_UvA-DL/03-initialization-and-optimization/Initialization_and_Optimization.py @@ -1,15 +1,3 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# --- - # %% [markdown] #
# In the first half of the notebook, we will review different initialization techniques, and go step by step from the simplest initialization to methods that are nowadays used in very deep networks. diff --git a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py index 0e0ef4ccd..dfcd04472 100644 --- a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py +++ b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py @@ -1,15 +1,3 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# --- - # %% [markdown] #
# Let's start with importing our standard libraries here. diff --git a/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py b/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py index def55b115..250923dfe 100644 --- a/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py +++ b/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py @@ -1,15 +1,3 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# --- - # %% [markdown] #
From 000c803673a498082209ffc0e962c88809fd6c3d Mon Sep 17 00:00:00 2001 From: awaelchli Date: Tue, 14 Mar 2023 00:43:57 +0100 Subject: [PATCH 13/25] update gnn --- .../06-graph-neural-networks/GNN_overview.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/course_UvA-DL/06-graph-neural-networks/GNN_overview.py b/course_UvA-DL/06-graph-neural-networks/GNN_overview.py index 65d422f05..177d1210e 100644 --- a/course_UvA-DL/06-graph-neural-networks/GNN_overview.py +++ b/course_UvA-DL/06-graph-neural-networks/GNN_overview.py @@ -634,7 +634,7 @@ def test_step(self, batch, batch_idx): # Additionally to the Lightning module, we define a training function below. # As we have a single graph, we use a batch size of 1 for the data loader and share the same data loader for the train, # validation, and test set (the mask is picked inside the Lightning module). -# Besides, we set the argument `progress_bar_refresh_rate` to zero as it usually shows the progress per epoch, +# Besides, we set the argument `enable_progress_bar` to False as it usually shows the progress per epoch, # but an epoch only consists of a single step. # If you have downloaded the pre-trained models in the beginning of the tutorial, we load those instead of training from scratch. # Finally, we test the model and return the results. @@ -642,18 +642,19 @@ def test_step(self, batch, batch_idx): # %% def train_node_classifier(model_name, dataset, **model_kwargs): - pl.seed_everything(42) + L.seed_everything(42) node_data_loader = geom_data.DataLoader(dataset, batch_size=1) # Create a PyTorch Lightning trainer root_dir = os.path.join(CHECKPOINT_PATH, "NodeLevel" + model_name) os.makedirs(root_dir, exist_ok=True) - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=root_dir, callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")], - gpus=AVAIL_GPUS, + accelerator="auto", + devices=AVAIL_GPUS, max_epochs=200, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) # 0 because epoch size is 1 trainer.logger._default_hp_metric = None # Optional logging argument that we don't need @@ -663,7 +664,7 @@ def train_node_classifier(model_name, dataset, **model_kwargs): print("Found pretrained model, loading...") model = NodeLevelGNN.load_from_checkpoint(pretrained_filename) else: - pl.seed_everything() + L.seed_everything() model = NodeLevelGNN( model_name=model_name, c_in=dataset.num_node_features, c_out=dataset.num_classes, **model_kwargs ) From 3824605b80f9a674678dac775dd436d30e8f0c7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 19:48:01 -0400 Subject: [PATCH 14/25] simclr --- .../13-contrastive-learning/SimCLR.py | 41 ++++++++++--------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/course_UvA-DL/13-contrastive-learning/SimCLR.py b/course_UvA-DL/13-contrastive-learning/SimCLR.py index bc7ea4264..b185b08bb 100644 --- a/course_UvA-DL/13-contrastive-learning/SimCLR.py +++ b/course_UvA-DL/13-contrastive-learning/SimCLR.py @@ -35,7 +35,7 @@ import matplotlib import matplotlib.pyplot as plt -import pytorch_lightning as pl +import lightning as L import seaborn as sns import torch import torch.nn as nn @@ -43,15 +43,15 @@ import torch.optim as optim import torch.utils.data as data import torchvision -from IPython.display import set_matplotlib_formats -from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint +import matplotlib_inline.backend_inline +from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint from torchvision import transforms from torchvision.datasets import STL10 from tqdm.notebook import tqdm plt.set_cmap("cividis") # %matplotlib inline -set_matplotlib_formats("svg", "pdf") # For export +matplotlib_inline.backend_inline.set_matplotlib_formats("svg", "pdf") # For export matplotlib.rcParams["lines.linewidth"] = 2.0 sns.set() @@ -67,7 +67,7 @@ NUM_WORKERS = os.cpu_count() # Setting the seed -pl.seed_everything(42) +L.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True @@ -215,7 +215,7 @@ def __call__(self, x): # %% # Visualize some examples -pl.seed_everything(42) +L.seed_everything(42) NUM_IMAGES = 6 imgs = torch.stack([img for idx in range(NUM_IMAGES) for img in unlabeled_data[idx][0]], dim=0) img_grid = torchvision.utils.make_grid(imgs, nrow=6, normalize=True, pad_value=0.9) @@ -275,7 +275,7 @@ def __call__(self, x): # %% -class SimCLR(pl.LightningModule): +class SimCLR(L.LightningModule): def __init__(self, hidden_dim, lr, temperature, weight_decay, max_epochs=500): super().__init__() self.save_hyperparameters() @@ -355,15 +355,15 @@ def validation_step(self, batch, batch_idx): # %% def train_simclr(batch_size, max_epochs=500, **kwargs): - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, "SimCLR"), - gpus=1 if str(device) == "cuda:0" else 0, + accelerator="auto", + devices=1, max_epochs=max_epochs, callbacks=[ ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc_top5"), LearningRateMonitor("epoch"), ], - progress_bar_refresh_rate=1, ) trainer.logger._default_hp_metric = None # Optional logging argument that we don't need @@ -390,7 +390,7 @@ def train_simclr(batch_size, max_epochs=500, **kwargs): pin_memory=True, num_workers=NUM_WORKERS, ) - pl.seed_everything(42) # To be reproducable + L.seed_everything(42) # To be reproducable model = SimCLR(max_epochs=max_epochs, **kwargs) trainer.fit(model, train_loader, val_loader) # Load best checkpoint after training @@ -442,7 +442,7 @@ def train_simclr(batch_size, max_epochs=500, **kwargs): # %% -class LogisticRegression(pl.LightningModule): +class LogisticRegression(L.LightningModule): def __init__(self, feature_dim, num_classes, lr, weight_decay, max_epochs=100): super().__init__() self.save_hyperparameters() @@ -538,15 +538,16 @@ def prepare_data_features(model, dataset): # %% def train_logreg(batch_size, train_feats_data, test_feats_data, model_suffix, max_epochs=100, **kwargs): - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, "LogisticRegression"), - gpus=1 if str(device) == "cuda:0" else 0, + accelerator="auto", + devices=1, max_epochs=max_epochs, callbacks=[ ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"), LearningRateMonitor("epoch"), ], - progress_bar_refresh_rate=0, + enable_progress_bar=False, check_val_every_n_epoch=10, ) trainer.logger._default_hp_metric = None @@ -663,7 +664,7 @@ def get_smaller_dataset(original_dataset, num_imgs_per_label): # %% -class ResNet(pl.LightningModule): +class ResNet(L.LightningModule): def __init__(self, num_classes, lr, weight_decay, max_epochs=100): super().__init__() self.save_hyperparameters() @@ -729,15 +730,15 @@ def test_step(self, batch, batch_idx): # %% def train_resnet(batch_size, max_epochs=100, **kwargs): - trainer = pl.Trainer( + trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, "ResNet"), - gpus=1 if str(device) == "cuda:0" else 0, + accelerator="auto", + devices=1, max_epochs=max_epochs, callbacks=[ ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"), LearningRateMonitor("epoch"), ], - progress_bar_refresh_rate=1, check_val_every_n_epoch=2, ) trainer.logger._default_hp_metric = None @@ -761,7 +762,7 @@ def train_resnet(batch_size, max_epochs=100, **kwargs): print("Found pretrained model at %s, loading..." % pretrained_filename) model = ResNet.load_from_checkpoint(pretrained_filename) else: - pl.seed_everything(42) # To be reproducable + L.seed_everything(42) # To be reproducable model = ResNet(**kwargs) trainer.fit(model, train_loader, test_loader) model = ResNet.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) From d1a93e7404746f99e359447a63a96843d96d8d1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 19:48:45 -0400 Subject: [PATCH 15/25] update --- .../Transformers_MHAttention.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py index ea5c9ac49..bb85865c5 100644 --- a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py +++ b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py @@ -1,3 +1,15 @@ +# --- +# jupyter: +# jupytext: +# cell_metadata_filter: -all +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.14.5 +# --- + # %% [markdown] #
# Despite the huge success of Transformers in NLP, we will _not_ include the NLP domain in our notebook here. From 9377298b7374fcbeed17998dfedf653336bb2faa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 23:49:20 +0000 Subject: [PATCH 16/25] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- course_UvA-DL/13-contrastive-learning/SimCLR.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/course_UvA-DL/13-contrastive-learning/SimCLR.py b/course_UvA-DL/13-contrastive-learning/SimCLR.py index b185b08bb..4fd32c6c8 100644 --- a/course_UvA-DL/13-contrastive-learning/SimCLR.py +++ b/course_UvA-DL/13-contrastive-learning/SimCLR.py @@ -33,9 +33,10 @@ from copy import deepcopy from urllib.error import HTTPError +import lightning as L import matplotlib import matplotlib.pyplot as plt -import lightning as L +import matplotlib_inline.backend_inline import seaborn as sns import torch import torch.nn as nn @@ -43,7 +44,6 @@ import torch.optim as optim import torch.utils.data as data import torchvision -import matplotlib_inline.backend_inline from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint from torchvision import transforms from torchvision.datasets import STL10 From b24b1224aa747aa6b151b8e865514cdb6b94ac53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Mon, 13 Mar 2023 19:49:58 -0400 Subject: [PATCH 17/25] Update course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py --- .../Transformers_MHAttention.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py index 4287b7ea3..d53f7956f 100644 --- a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py +++ b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py @@ -1,15 +1,3 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# --- - # %% [markdown] #
# Despite the huge success of Transformers in NLP, we will _not_ include the NLP domain in our notebook here. From 099c50aedf2f866b815b5da3f026f0685aacafe4 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Tue, 14 Mar 2023 00:51:28 +0100 Subject: [PATCH 18/25] update --- .../Transformers_MHAttention.py | 2 +- course_UvA-DL/13-contrastive-learning/SimCLR.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py index d53f7956f..ffe69a424 100644 --- a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py +++ b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py @@ -958,7 +958,7 @@ def test_step(self, batch, batch_idx): # %% [markdown] # Finally, we can create a training function similar to the one we have seen in Tutorial 5 for PyTorch Lightning. -# We create a `pl.Trainer` object, running for $N$ epochs, logging in TensorBoard, and saving our best model based on the validation. +# We create a `L.Trainer` object, running for $N$ epochs, logging in TensorBoard, and saving our best model based on the validation. # Afterward, we test our models on the test set. # An additional parameter we pass to the trainer here is `gradient_clip_val`. # This clips the norm of the gradients for all parameters before taking an optimizer step and prevents the model diff --git a/course_UvA-DL/13-contrastive-learning/SimCLR.py b/course_UvA-DL/13-contrastive-learning/SimCLR.py index 4fd32c6c8..1bc97bb79 100644 --- a/course_UvA-DL/13-contrastive-learning/SimCLR.py +++ b/course_UvA-DL/13-contrastive-learning/SimCLR.py @@ -566,7 +566,7 @@ def train_logreg(batch_size, train_feats_data, test_feats_data, model_suffix, ma print(f"Found pretrained model at {pretrained_filename}, loading...") model = LogisticRegression.load_from_checkpoint(pretrained_filename) else: - pl.seed_everything(42) # To be reproducable + L.seed_everything(42) # To be reproducable model = LogisticRegression(**kwargs) trainer.fit(model, train_loader, test_loader) model = LogisticRegression.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) From 82f2be637d555db8d2867507788131bf7ae3348b Mon Sep 17 00:00:00 2001 From: awaelchli Date: Tue, 14 Mar 2023 00:52:25 +0100 Subject: [PATCH 19/25] links --- course_UvA-DL/09-normalizing-flows/NF_image_modeling.py | 2 +- .../Autoregressive_Image_Modeling.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py b/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py index 636563ef4..f7033382e 100644 --- a/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py +++ b/course_UvA-DL/09-normalizing-flows/NF_image_modeling.py @@ -1,6 +1,6 @@ # %% [markdown] #
-# Throughout this notebook, we make use of [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). +# Throughout this notebook, we make use of [PyTorch Lightning](https://lightning.ai/docs/pytorch/stable/). # The first cell imports our usual libraries. # %% diff --git a/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py b/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py index 24026e644..5adb92868 100644 --- a/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py +++ b/course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py @@ -26,7 +26,7 @@ # # First of all, we need to import our standard libraries. Similarly as in # the last couple of tutorials, we will use [PyTorch -# Lightning](https://pytorch-lightning.readthedocs.io/en/stable/) here as +# Lightning](https://lightning.ai/docs/pytorch/stable/) here as # well. # %% From 28accf921dafbfb16427c5f66a560cb96b45a372 Mon Sep 17 00:00:00 2001 From: Jirka Date: Tue, 14 Mar 2023 02:50:30 +0100 Subject: [PATCH 20/25] 2.0.0rc0 --- course_UvA-DL/01-introduction-to-pytorch/.meta.yml | 2 +- course_UvA-DL/02-activation-functions/.meta.yml | 2 +- course_UvA-DL/03-initialization-and-optimization/.meta.yml | 2 +- course_UvA-DL/04-inception-resnet-densenet/.meta.yaml | 4 ++-- course_UvA-DL/05-transformers-and-MH-attention/.meta.yml | 4 ++-- course_UvA-DL/06-graph-neural-networks/.meta.yml | 4 ++-- .../07-deep-energy-based-generative-models/.meta.yml | 4 ++-- course_UvA-DL/08-deep-autoencoders/.meta.yml | 4 ++-- course_UvA-DL/09-normalizing-flows/.meta.yml | 4 ++-- course_UvA-DL/10-autoregressive-image-modeling/.meta.yml | 3 ++- course_UvA-DL/11-vision-transformer/.meta.yml | 4 ++-- course_UvA-DL/12-meta-learning/.meta.yml | 4 ++-- course_UvA-DL/13-contrastive-learning/.meta.yml | 3 ++- 13 files changed, 23 insertions(+), 21 deletions(-) diff --git a/course_UvA-DL/01-introduction-to-pytorch/.meta.yml b/course_UvA-DL/01-introduction-to-pytorch/.meta.yml index ce7c76634..7af544f9c 100644 --- a/course_UvA-DL/01-introduction-to-pytorch/.meta.yml +++ b/course_UvA-DL/01-introduction-to-pytorch/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 1: Introduction to PyTorch" author: Phillip Lippe created: 2021-08-27 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA description: | This tutorial will give a short introduction to PyTorch basics, and get you setup for writing your own neural networks. diff --git a/course_UvA-DL/02-activation-functions/.meta.yml b/course_UvA-DL/02-activation-functions/.meta.yml index 9b435ff02..febc3faa3 100644 --- a/course_UvA-DL/02-activation-functions/.meta.yml +++ b/course_UvA-DL/02-activation-functions/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 2: Activation Functions" author: Phillip Lippe created: 2021-08-27 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA description: | In this tutorial, we will take a closer look at (popular) activation functions and investigate their effect on optimization properties in neural networks. diff --git a/course_UvA-DL/03-initialization-and-optimization/.meta.yml b/course_UvA-DL/03-initialization-and-optimization/.meta.yml index 82961aea1..5f448da1c 100644 --- a/course_UvA-DL/03-initialization-and-optimization/.meta.yml +++ b/course_UvA-DL/03-initialization-and-optimization/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 3: Initialization and Optimization" author: Phillip Lippe created: 2021-08-27 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA tags: - Image diff --git a/course_UvA-DL/04-inception-resnet-densenet/.meta.yaml b/course_UvA-DL/04-inception-resnet-densenet/.meta.yaml index 06c537aa1..9632a803a 100644 --- a/course_UvA-DL/04-inception-resnet-densenet/.meta.yaml +++ b/course_UvA-DL/04-inception-resnet-densenet/.meta.yaml @@ -1,7 +1,7 @@ title: "Tutorial 4: Inception, ResNet and DenseNet" author: Phillip Lippe created: 2021-08-27 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA tags: - Image @@ -18,6 +18,6 @@ requirements: - matplotlib - seaborn - tabulate - - pytorch-lightning>=1.8 + - pytorch-lightning>=2.0.0rc0 accelerator: - GPU diff --git a/course_UvA-DL/05-transformers-and-MH-attention/.meta.yml b/course_UvA-DL/05-transformers-and-MH-attention/.meta.yml index 80b75e295..c6e99e263 100644 --- a/course_UvA-DL/05-transformers-and-MH-attention/.meta.yml +++ b/course_UvA-DL/05-transformers-and-MH-attention/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 5: Transformers and Multi-Head Attention" author: Phillip Lippe created: 2021-06-30 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA build: 0 tags: @@ -19,6 +19,6 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=1.8 + - pytorch-lightning>=2.0.0rc0 accelerator: - GPU diff --git a/course_UvA-DL/06-graph-neural-networks/.meta.yml b/course_UvA-DL/06-graph-neural-networks/.meta.yml index fc1dc3c66..1f47043a9 100644 --- a/course_UvA-DL/06-graph-neural-networks/.meta.yml +++ b/course_UvA-DL/06-graph-neural-networks/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 6: Basics of Graph Neural Networks" author: Phillip Lippe created: 2021-06-07 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA build: 0 tags: @@ -23,7 +23,7 @@ requirements: - torch-cluster - torch-spline-conv - torch-geometric - - pytorch-lightning>=1.8 + - pytorch-lightning>=2.0.0rc0 pip__find-link: # - https://pytorch-geometric.com/whl/torch-1.8.0+cu101.html - https://pytorch-geometric.com/whl/torch-%(TORCH_MAJOR_DOT_MINOR)s.0+%(DEVICE)s.html diff --git a/course_UvA-DL/07-deep-energy-based-generative-models/.meta.yml b/course_UvA-DL/07-deep-energy-based-generative-models/.meta.yml index ab105f312..532b72b0a 100644 --- a/course_UvA-DL/07-deep-energy-based-generative-models/.meta.yml +++ b/course_UvA-DL/07-deep-energy-based-generative-models/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 7: Deep Energy-Based Generative Models" author: Phillip Lippe created: 2021-07-12 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA build: 0 tags: @@ -22,7 +22,7 @@ requirements: - torchvision - matplotlib - tensorboard - - pytorch-lightning>=1.8 + - pytorch-lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/08-deep-autoencoders/.meta.yml b/course_UvA-DL/08-deep-autoencoders/.meta.yml index ca54ef5a0..7dff34bbf 100644 --- a/course_UvA-DL/08-deep-autoencoders/.meta.yml +++ b/course_UvA-DL/08-deep-autoencoders/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 8: Deep Autoencoders" author: Phillip Lippe created: 2021-07-12 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA build: 0 tags: @@ -22,7 +22,7 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=1.8 + - pytorch-lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/09-normalizing-flows/.meta.yml b/course_UvA-DL/09-normalizing-flows/.meta.yml index cf386b0e6..9abb9b089 100644 --- a/course_UvA-DL/09-normalizing-flows/.meta.yml +++ b/course_UvA-DL/09-normalizing-flows/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 9: Normalizing Flows for Image Modeling" author: Phillip Lippe created: 2021-06-07 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA build: 0 tags: @@ -25,7 +25,7 @@ requirements: - matplotlib - seaborn - tabulate - - pytorch-lightning>=1.8 + - pytorch-lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/10-autoregressive-image-modeling/.meta.yml b/course_UvA-DL/10-autoregressive-image-modeling/.meta.yml index ae4008f01..f1fbeb654 100644 --- a/course_UvA-DL/10-autoregressive-image-modeling/.meta.yml +++ b/course_UvA-DL/10-autoregressive-image-modeling/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 10: Autoregressive Image Modeling" author: Phillip Lippe created: 2021-07-12 -updated: 2021-07-12 +updated: 2023-03-14 license: CC BY-SA build: 0 tags: @@ -18,5 +18,6 @@ requirements: - torchvision - matplotlib - seaborn + - pytorch-lightning>=2.0.0rc0 accelerator: - GPU diff --git a/course_UvA-DL/11-vision-transformer/.meta.yml b/course_UvA-DL/11-vision-transformer/.meta.yml index 9be56a10e..e55be0d22 100644 --- a/course_UvA-DL/11-vision-transformer/.meta.yml +++ b/course_UvA-DL/11-vision-transformer/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 11: Vision Transformers" author: Phillip Lippe created: 2021-08-21 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA description: | In this tutorial, we will take a closer look at a recent new trend: Transformers for Computer Vision. @@ -17,7 +17,7 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=1.8 + - pytorch-lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/12-meta-learning/.meta.yml b/course_UvA-DL/12-meta-learning/.meta.yml index 7b640478f..9eb358eef 100644 --- a/course_UvA-DL/12-meta-learning/.meta.yml +++ b/course_UvA-DL/12-meta-learning/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 12: Meta-Learning - Learning to Learn" author: Phillip Lippe created: 2021-08-21 -updated: 2023-01-04 +updated: 2023-03-14 license: CC BY-SA tags: - Few-shot-learning @@ -23,7 +23,7 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=1.8 + - pytorch-lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/13-contrastive-learning/.meta.yml b/course_UvA-DL/13-contrastive-learning/.meta.yml index 2c03a2388..4739e9c96 100644 --- a/course_UvA-DL/13-contrastive-learning/.meta.yml +++ b/course_UvA-DL/13-contrastive-learning/.meta.yml @@ -1,7 +1,7 @@ title: "Tutorial 13: Self-Supervised Contrastive Learning with SimCLR" author: Phillip Lippe created: 2021-08-30 -updated: 2021-08-30 +updated: 2023-03-14 license: CC BY-SA tags: - Image @@ -20,6 +20,7 @@ requirements: - torchvision - matplotlib - seaborn + - pytorch-lightning>=2.0.0rc0 accelerator: - CPU - GPU From 10615e9a76b14e011e3610fe40663317fe7b9998 Mon Sep 17 00:00:00 2001 From: Jirka Date: Tue, 14 Mar 2023 02:54:06 +0100 Subject: [PATCH 21/25] lightning --- course_UvA-DL/04-inception-resnet-densenet/.meta.yaml | 2 +- course_UvA-DL/05-transformers-and-MH-attention/.meta.yml | 2 +- course_UvA-DL/06-graph-neural-networks/.meta.yml | 2 +- course_UvA-DL/07-deep-energy-based-generative-models/.meta.yml | 2 +- course_UvA-DL/08-deep-autoencoders/.meta.yml | 2 +- course_UvA-DL/09-normalizing-flows/.meta.yml | 2 +- course_UvA-DL/10-autoregressive-image-modeling/.meta.yml | 2 +- course_UvA-DL/11-vision-transformer/.meta.yml | 2 +- course_UvA-DL/12-meta-learning/.meta.yml | 2 +- course_UvA-DL/13-contrastive-learning/.meta.yml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/course_UvA-DL/04-inception-resnet-densenet/.meta.yaml b/course_UvA-DL/04-inception-resnet-densenet/.meta.yaml index 9632a803a..dc7b7b08c 100644 --- a/course_UvA-DL/04-inception-resnet-densenet/.meta.yaml +++ b/course_UvA-DL/04-inception-resnet-densenet/.meta.yaml @@ -18,6 +18,6 @@ requirements: - matplotlib - seaborn - tabulate - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 accelerator: - GPU diff --git a/course_UvA-DL/05-transformers-and-MH-attention/.meta.yml b/course_UvA-DL/05-transformers-and-MH-attention/.meta.yml index c6e99e263..0c8a0ee8a 100644 --- a/course_UvA-DL/05-transformers-and-MH-attention/.meta.yml +++ b/course_UvA-DL/05-transformers-and-MH-attention/.meta.yml @@ -19,6 +19,6 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 accelerator: - GPU diff --git a/course_UvA-DL/06-graph-neural-networks/.meta.yml b/course_UvA-DL/06-graph-neural-networks/.meta.yml index 1f47043a9..cfd63af21 100644 --- a/course_UvA-DL/06-graph-neural-networks/.meta.yml +++ b/course_UvA-DL/06-graph-neural-networks/.meta.yml @@ -23,7 +23,7 @@ requirements: - torch-cluster - torch-spline-conv - torch-geometric - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 pip__find-link: # - https://pytorch-geometric.com/whl/torch-1.8.0+cu101.html - https://pytorch-geometric.com/whl/torch-%(TORCH_MAJOR_DOT_MINOR)s.0+%(DEVICE)s.html diff --git a/course_UvA-DL/07-deep-energy-based-generative-models/.meta.yml b/course_UvA-DL/07-deep-energy-based-generative-models/.meta.yml index 532b72b0a..a5d7c01fc 100644 --- a/course_UvA-DL/07-deep-energy-based-generative-models/.meta.yml +++ b/course_UvA-DL/07-deep-energy-based-generative-models/.meta.yml @@ -22,7 +22,7 @@ requirements: - torchvision - matplotlib - tensorboard - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/08-deep-autoencoders/.meta.yml b/course_UvA-DL/08-deep-autoencoders/.meta.yml index 7dff34bbf..5c4df67e6 100644 --- a/course_UvA-DL/08-deep-autoencoders/.meta.yml +++ b/course_UvA-DL/08-deep-autoencoders/.meta.yml @@ -22,7 +22,7 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/09-normalizing-flows/.meta.yml b/course_UvA-DL/09-normalizing-flows/.meta.yml index 9abb9b089..d366a9934 100644 --- a/course_UvA-DL/09-normalizing-flows/.meta.yml +++ b/course_UvA-DL/09-normalizing-flows/.meta.yml @@ -25,7 +25,7 @@ requirements: - matplotlib - seaborn - tabulate - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/10-autoregressive-image-modeling/.meta.yml b/course_UvA-DL/10-autoregressive-image-modeling/.meta.yml index f1fbeb654..ac181f2ac 100644 --- a/course_UvA-DL/10-autoregressive-image-modeling/.meta.yml +++ b/course_UvA-DL/10-autoregressive-image-modeling/.meta.yml @@ -18,6 +18,6 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 accelerator: - GPU diff --git a/course_UvA-DL/11-vision-transformer/.meta.yml b/course_UvA-DL/11-vision-transformer/.meta.yml index e55be0d22..171d8774e 100644 --- a/course_UvA-DL/11-vision-transformer/.meta.yml +++ b/course_UvA-DL/11-vision-transformer/.meta.yml @@ -17,7 +17,7 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/12-meta-learning/.meta.yml b/course_UvA-DL/12-meta-learning/.meta.yml index 9eb358eef..18f459dcd 100644 --- a/course_UvA-DL/12-meta-learning/.meta.yml +++ b/course_UvA-DL/12-meta-learning/.meta.yml @@ -23,7 +23,7 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 accelerator: - CPU - GPU diff --git a/course_UvA-DL/13-contrastive-learning/.meta.yml b/course_UvA-DL/13-contrastive-learning/.meta.yml index 4739e9c96..6f9832fe7 100644 --- a/course_UvA-DL/13-contrastive-learning/.meta.yml +++ b/course_UvA-DL/13-contrastive-learning/.meta.yml @@ -20,7 +20,7 @@ requirements: - torchvision - matplotlib - seaborn - - pytorch-lightning>=2.0.0rc0 + - lightning>=2.0.0rc0 accelerator: - CPU - GPU From 217be55277f4c3e8bb867c9eb259461bead9102f Mon Sep 17 00:00:00 2001 From: Jirka Date: Tue, 14 Mar 2023 02:57:27 +0100 Subject: [PATCH 22/25] 2.0 --- _requirements/default.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_requirements/default.txt b/_requirements/default.txt index 7906aa4fb..42e73331b 100644 --- a/_requirements/default.txt +++ b/_requirements/default.txt @@ -1,5 +1,5 @@ setuptools==67.4.0 ipython[notebook]>=8.0.0, <8.12.0 torch>=1.8.1, <1.14.0 -pytorch-lightning>=1.4, <1.9 +pytorch-lightning>=1.4, <2.0.0 torchmetrics>=0.7, <0.12 From 1dc811a6aef4e077df96ed92a1c2f730a0abc7af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Tue, 14 Mar 2023 06:12:12 -0400 Subject: [PATCH 23/25] Apply suggestions from code review --- .../04-inception-resnet-densenet/Inception_ResNet_DenseNet.py | 2 +- .../Transformers_MHAttention.py | 4 ++-- .../Deep_Energy_Models.py | 2 +- course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py index 45a3ccb18..ffee8ff48 100644 --- a/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py +++ b/course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py @@ -349,7 +349,7 @@ def train_model(model_name, save_name=None, **kwargs): trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, save_name), # Where to save models # We run on a single GPU (if possible) - accelerator=("cuda" if str(device) == "cuda:0" else "cpu"), + accelerator="auto", devices=1, # How many epochs to train for if no patience is set max_epochs=180, diff --git a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py index c27eec3bf..f74ed35b1 100644 --- a/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py +++ b/course_UvA-DL/05-transformers-and-MH-attention/Transformers_MHAttention.py @@ -979,7 +979,7 @@ def train_reverse(**kwargs): trainer = L.Trainer( default_root_dir=root_dir, callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")], - accelerator=("cuda" if str(device).startswith("cuda") else "cpu"), + accelerator="auto", devices=1, max_epochs=10, gradient_clip_val=5, @@ -1439,7 +1439,7 @@ def train_anomaly(**kwargs): trainer = L.Trainer( default_root_dir=root_dir, callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")], - accelerator=("cuda" if str(device).startswith("cuda") else "cpu"), + accelerator="auto", devices=1, max_epochs=100, gradient_clip_val=2, diff --git a/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py b/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py index fc09ab4b6..6cd07a40a 100644 --- a/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py +++ b/course_UvA-DL/07-deep-energy-based-generative-models/Deep_Energy_Models.py @@ -640,7 +640,7 @@ def train_model(**kwargs): # Create a PyTorch Lightning trainer with the generation callback trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, "MNIST"), - accelerator=("cuda" if str(device).startswith("cuda") else "cpu"), + accelerator="auto", devices=1, max_epochs=60, gradient_clip_val=0.1, diff --git a/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py b/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py index a38598d19..da289b9e6 100644 --- a/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py +++ b/course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py @@ -385,7 +385,7 @@ def train_cifar(latent_dim): # Create a PyTorch Lightning trainer with the generation callback trainer = L.Trainer( default_root_dir=os.path.join(CHECKPOINT_PATH, "cifar10_%i" % latent_dim), - accelerator=("cuda" if str(device).startswith("cuda") else "cpu"), + accelerator="auto", devices=1, max_epochs=500, callbacks=[ From bb1e40a1c2c5c97cd225232fb4ac8164f1a7e92d Mon Sep 17 00:00:00 2001 From: awaelchli Date: Tue, 14 Mar 2023 11:25:12 +0100 Subject: [PATCH 24/25] docs build fix --- .../01-introduction-to-pytorch/Introduction_to_PyTorch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py index b8f5f16fc..d9f4ad5e6 100644 --- a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py +++ b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py @@ -185,7 +185,7 @@ print("X2 (after)", x2) # %% [markdown] -# In-place operations are usually marked with a underscore postfix (e.g. "add_" instead of "add"). +# In-place operations are usually marked with a underscore postfix (e.g. "torch.add_" instead of "torch.add"). # # Another common operation aims at changing the shape of a tensor. # A tensor of size (2,3) can be re-organized to any other shape with the same number of elements (e.g. a tensor of size (6), or (3,2), ...). From 835a6287d6bdfaa22f9a6363bfa6787ef0ef1b1e Mon Sep 17 00:00:00 2001 From: awaelchli Date: Tue, 14 Mar 2023 12:03:09 +0100 Subject: [PATCH 25/25] stupid sphinx --- .../01-introduction-to-pytorch/Introduction_to_PyTorch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py index d9f4ad5e6..c692d1402 100644 --- a/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py +++ b/course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py @@ -185,7 +185,7 @@ print("X2 (after)", x2) # %% [markdown] -# In-place operations are usually marked with a underscore postfix (e.g. "torch.add_" instead of "torch.add"). +# In-place operations are usually marked with a underscore postfix (for example `torch.add_` instead of `torch.add`). # # Another common operation aims at changing the shape of a tensor. # A tensor of size (2,3) can be re-organized to any other shape with the same number of elements (e.g. a tensor of size (6), or (3,2), ...).