Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fixing
  • Loading branch information
Borda committed Mar 24, 2023
commit b5440f9d6201a695483523b723ac92d26c7f1aa0
5 changes: 1 addition & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,4 @@ repos:
rev: v0.0.259
hooks:
- id: ruff
args:
- "--fix"
# Respect `exclude` and `extend-exclude` settings.
- "--force-exclude"
args: ["--fix"]
Original file line number Diff line number Diff line change
Expand Up @@ -217,12 +217,13 @@
# %%
class CIFARModule(L.LightningModule):
def __init__(self, model_name, model_hparams, optimizer_name, optimizer_hparams):
"""
Inputs:
model_name - Name of the model/CNN to run. Used for creating the model (see function below)
model_hparams - Hyperparameters for the model, as dictionary.
optimizer_name - Name of the optimizer to use. Currently supported: Adam, SGD
optimizer_hparams - Hyperparameters for the optimizer, as dictionary. This includes learning rate, weight decay, etc.
"""CIFARModule.

Args:
model_name: Name of the model/CNN to run. Used for creating the model (see function below)
model_hparams: Hyperparameters for the model, as dictionary.
optimizer_name: Name of the optimizer to use. Currently supported: Adam, SGD
optimizer_hparams: Hyperparameters for the optimizer, as dictionary. This includes learning rate, weight decay, etc.
"""
super().__init__()
# Exports the hyperparameters to a YAML file, and create "self.hparams" namespace
Expand Down Expand Up @@ -337,10 +338,11 @@ def create_model(model_name, model_hparams):

# %%
def train_model(model_name, save_name=None, **kwargs):
"""
Inputs:
model_name - Name of the model you want to run. Is used to look up the class in "model_dict"
save_name (optional) - If specified, this name will be used for creating the checkpoint and logging directory.
"""Train model.

Args:
model_name: Name of the model you want to run. Is used to look up the class in "model_dict"
save_name (optional): If specified, this name will be used for creating the checkpoint and logging directory.
"""
if save_name is None:
save_name = model_name
Expand Down Expand Up @@ -420,10 +422,10 @@ def __init__(self, c_in, c_red: dict, c_out: dict, act_fn):
"""InceptionBlock.

Args:
c_in - Number of input feature maps from the previous layers
c_red - Dictionary with keys "3x3" and "5x5" specifying the output of the dimensionality reducing 1x1 convolutions
c_out - Dictionary with keys "1x1", "3x3", "5x5", and "max"
act_fn - Activation class constructor (e.g. nn.ReLU)
c_in: Number of input feature maps from the previous layers
c_red: Dictionary with keys "3x3" and "5x5" specifying the output of the dimensionality reducing 1x1 convolutions
c_out: Dictionary with keys "1x1", "3x3", "5x5", and "max"
act_fn: Activation class constructor (e.g. nn.ReLU)
"""
super().__init__()

Expand Down Expand Up @@ -669,9 +671,9 @@ class ResNetBlock(nn.Module):
def __init__(self, c_in, act_fn, subsample=False, c_out=-1):
"""ResNetBlock.

Inputs:
c_in - Number of input features
act_fn - Activation class constructor (e.g. nn.ReLU)
Args:
c_in: Number of input features
act_fn: Activation class constructor (e.g. nn.ReLU)
subsample - If True, we want to apply a stride inside the block and reduce the output shape by 2 in height and width
c_out - Number of output features. Note that this is only relevant if subsample is True, as otherwise, c_out = c_in
"""
Expand Down Expand Up @@ -715,7 +717,7 @@ class PreActResNetBlock(nn.Module):
def __init__(self, c_in, act_fn, subsample=False, c_out=-1):
"""PreAct ResNet Block.

Inputs:
Args:
c_in - Number of input features
act_fn - Activation class constructor (e.g. nn.ReLU)
subsample - If True, we want to apply a stride inside the block and reduce the output shape by 2 in height and width
Expand Down Expand Up @@ -785,7 +787,7 @@ def __init__(
):
"""ResNet.

Inputs:
Args:
num_classes - Number of classification outputs (10 for CIFAR10)
num_blocks - List with the number of ResNet blocks to use. The first block of each group uses downsampling, except the first.
c_hidden - List with the hidden dimensionalities in the different blocks. Usually multiplied by 2 the deeper we go.
Expand Down Expand Up @@ -955,7 +957,7 @@ class DenseLayer(nn.Module):
def __init__(self, c_in, bn_size, growth_rate, act_fn):
"""DenseLayer.

Inputs:
Args:
c_in - Number of input channels
bn_size - Bottleneck size (factor of growth rate) for the output of the 1x1 convolution. Typically between 2 and 4.
growth_rate - Number of output channels of the 3x3 convolution
Expand Down Expand Up @@ -987,7 +989,7 @@ class DenseBlock(nn.Module):
def __init__(self, c_in, num_layers, bn_size, growth_rate, act_fn):
"""Dense Block.

Inputs:
Args:
c_in - Number of input channels
num_layers - Number of dense layers to apply in the block
bn_size - Bottleneck size to use in the dense layers
Expand Down
38 changes: 19 additions & 19 deletions course_UvA-DL/11-vision-transformer/Vision_Transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,10 +154,10 @@
# %%
def img_to_patch(x, patch_size, flatten_channels=True):
"""
Inputs:
x - Tensor representing the image of shape [B, C, H, W]
patch_size - Number of pixels per dimension of the patches (integer)
flatten_channels - If True, the patches will be returned in a flattened format
Args:
x: Tensor representing the image of shape [B, C, H, W]
patch_size: Number of pixels per dimension of the patches (integer)
flatten_channels: If True, the patches will be returned in a flattened format
as a feature vector instead of a image grid.
"""
B, C, H, W = x.shape
Expand Down Expand Up @@ -211,12 +211,12 @@ class AttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads, dropout=0.0):
"""Attention Block.

Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
Args:
embed_dim: Dimensionality of input and attention feature vectors
hidden_dim: Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
num_heads: Number of heads to use in the Multi-Head Attention block
dropout: Amount of dropout to apply in the feed-forward network
"""
super().__init__()

Expand Down Expand Up @@ -271,17 +271,17 @@ def __init__(
):
"""Vision Transformer.

Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
hidden_dim - Dimensionality of the hidden layer in the feed-forward networks
Args:
embed_dim: Dimensionality of the input feature vectors to the Transformer
hidden_dim: Dimensionality of the hidden layer in the feed-forward networks
within the Transformer
num_channels - Number of channels of the input (3 for RGB)
num_heads - Number of heads to use in the Multi-Head Attention block
num_layers - Number of layers to use in the Transformer
num_classes - Number of classes to predict
patch_size - Number of pixels that the patches have per dimension
num_patches - Maximum number of patches an image can have
dropout - Amount of dropout to apply in the feed-forward network and
num_channels: Number of channels of the input (3 for RGB)
num_heads: Number of heads to use in the Multi-Head Attention block
num_layers: Number of layers to use in the Transformer
num_classes: Number of classes to predict
patch_size: Number of pixels that the patches have per dimension
num_patches: Maximum number of patches an image can have
dropout: Amount of dropout to apply in the feed-forward network and
on the input encoding
"""
super().__init__()
Expand Down
77 changes: 40 additions & 37 deletions course_UvA-DL/12-meta-learning/Meta_Learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,10 +153,10 @@
class ImageDataset(data.Dataset):
def __init__(self, imgs, targets, img_transform=None):
"""
Inputs:
imgs - Numpy array of shape [N,32,32,3] containing all images.
targets - PyTorch array of shape [N] containing all labels.
img_transform - A torchvision transformation that should be applied
Args:
imgs: Numpy array of shape [N,32,32,3] containing all images.
targets: PyTorch array of shape [N] containing all labels.
img_transform: A torchvision transformation that should be applied
to the images before returning. If none, no transformation
is applied.
"""
Expand Down Expand Up @@ -259,17 +259,17 @@ class FewShotBatchSampler:
def __init__(self, dataset_targets, N_way, K_shot, include_query=False, shuffle=True, shuffle_once=False):
"""FewShot Batch Sampler.

Inputs:
dataset_targets - PyTorch tensor of the labels of the data elements.
N_way - Number of classes to sample per batch.
K_shot - Number of examples to sample per class in the batch.
include_query - If True, returns batch of size N_way*K_shot*2, which
Args:
dataset_targets: PyTorch tensor of the labels of the data elements.
N_way: Number of classes to sample per batch.
K_shot: Number of examples to sample per class in the batch.
include_query: If True, returns batch of size N_way*K_shot*2, which
can be split into support and query set. Simplifies
the implementation of sampling the same classes but
distinct examples for support and query set.
shuffle - If True, examples and classes are newly shuffled in each
shuffle: If True, examples and classes are newly shuffled in each
iteration (for training)
shuffle_once - If True, examples and classes are shuffled once in
shuffle_once: If True, examples and classes are shuffled once in
the beginning, but kept constant across iterations
(for validation)
"""
Expand Down Expand Up @@ -478,10 +478,11 @@ def get_convnet(output_size):
# %%
class ProtoNet(L.LightningModule):
def __init__(self, proto_dim, lr):
"""Inputs.
"""ProtoNet.

proto_dim - Dimensionality of prototype feature space
lr - Learning rate of Adam optimizer
Args:
proto_dim: Dimensionality of prototype feature space
lr: Learning rate of Adam optimizer
"""
super().__init__()
self.save_hyperparameters()
Expand Down Expand Up @@ -629,15 +630,16 @@ def train_model(model_class, train_loader, val_loader, **kwargs):
# %%
@torch.no_grad()
def test_proto_net(model, dataset, data_feats=None, k_shot=4):
"""Inputs.

model - Pretrained ProtoNet model
dataset - The dataset on which the test should be performed.
Should be instance of ImageDataset
data_feats - The encoded features of all images in the dataset.
If None, they will be newly calculated, and returned
for later usage.
k_shot - Number of examples per class in the support set.
"""Test proto net.

Args:
model: Pretrained ProtoNet model
dataset: The dataset on which the test should be performed.
Should be instance of ImageDataset
data_feats: The encoded features of all images in the dataset.
If None, they will be newly calculated, and returned
for later usage.
k_shot: Number of examples per class in the support set.
"""
model = model.to(device)
model.eval()
Expand Down Expand Up @@ -848,13 +850,14 @@ def plot_few_shot(acc_dict, name, color=None, ax=None):
# %%
class ProtoMAML(L.LightningModule):
def __init__(self, proto_dim, lr, lr_inner, lr_output, num_inner_steps):
"""Inputs.

proto_dim - Dimensionality of prototype feature space
lr - Learning rate of the outer loop Adam optimizer
lr_inner - Learning rate of the inner loop SGD optimizer
lr_output - Learning rate for the output layer in the inner loop
num_inner_steps - Number of inner loop updates to perform
"""ProtoMAML.

Args:
proto_dim: Dimensionality of prototype feature space
lr: Learning rate of the outer loop Adam optimizer
lr_inner: Learning rate of the inner loop SGD optimizer
lr_output: Learning rate for the output layer in the inner loop
num_inner_steps: Number of inner loop updates to perform
"""
super().__init__()
self.save_hyperparameters()
Expand Down Expand Up @@ -970,16 +973,16 @@ class TaskBatchSampler:
def __init__(self, dataset_targets, batch_size, N_way, K_shot, include_query=False, shuffle=True):
"""Task Batch Sampler.

Inputs:
dataset_targets - PyTorch tensor of the labels of the data elements.
batch_size - Number of tasks to aggregate in a batch
N_way - Number of classes to sample per batch.
K_shot - Number of examples to sample per class in the batch.
include_query - If True, returns batch of size N_way*K_shot*2, which
Args:
dataset_targets: PyTorch tensor of the labels of the data elements.
batch_size: Number of tasks to aggregate in a batch
N_way: Number of classes to sample per batch.
K_shot: Number of examples to sample per class in the batch.
include_query: If True, returns batch of size N_way*K_shot*2, which
can be split into support and query set. Simplifies
the implementation of sampling the same classes but
distinct examples for support and query set.
shuffle - If True, examples and classes are newly shuffled in each
shuffle: If True, examples and classes are newly shuffled in each
iteration (for training)
"""
super().__init__()
Expand Down