Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Mar 24, 2023
commit de3d954a61b436600e05a52b1fae19c10fc391e2
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def vis_act_fn(act_fn, ax, x):
# %%
class BaseNetwork(nn.Module):
def __init__(self, act_fn, input_size=784, num_classes=10, hidden_sizes=[512, 256, 256, 128]):
"""Base Network
"""Base Network.

Args:
act_fn: Object of the activation function that should be used as non-linearity in the network.
Expand Down Expand Up @@ -431,7 +431,7 @@ def save_model(model, model_path, model_name):

# %%
def visualize_gradients(net, color="C0"):
"""Visualize gradients
"""Visualize gradients.

Args:
net: Object of class BaseNetwork
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
# %%
class BaseNetwork(nn.Module):
def __init__(self, act_fn, input_size=784, num_classes=10, hidden_sizes=[512, 256, 256, 128]):
"""Base Network
"""Base Network.

Args:
act_fn: Object of the activation function that should be used as non-linearity in the network.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ def train_model(model_name, save_name=None, **kwargs):
# %%
class InceptionBlock(nn.Module):
def __init__(self, c_in, c_red: dict, c_out: dict, act_fn):
"""InceptionBlock
"""InceptionBlock.

Args:
c_in - Number of input feature maps from the previous layers
Expand Down Expand Up @@ -667,7 +667,7 @@ def forward(self, x):

class ResNetBlock(nn.Module):
def __init__(self, c_in, act_fn, subsample=False, c_out=-1):
"""ResNetBlock
"""ResNetBlock.

Inputs:
c_in - Number of input features
Expand Down Expand Up @@ -713,7 +713,7 @@ def forward(self, x):
# %%
class PreActResNetBlock(nn.Module):
def __init__(self, c_in, act_fn, subsample=False, c_out=-1):
"""PreAct ResNet Block
"""PreAct ResNet Block.

Inputs:
c_in - Number of input features
Expand Down Expand Up @@ -783,7 +783,7 @@ def __init__(
block_name="ResNetBlock",
**kwargs,
):
"""ResNet
"""ResNet.

Inputs:
num_classes - Number of classification outputs (10 for CIFAR10)
Expand Down Expand Up @@ -953,7 +953,7 @@ def forward(self, x):
# %%
class DenseLayer(nn.Module):
def __init__(self, c_in, bn_size, growth_rate, act_fn):
"""DenseLayer
"""DenseLayer.

Inputs:
c_in - Number of input channels
Expand Down Expand Up @@ -985,7 +985,7 @@ def forward(self, x):
# %%
class DenseBlock(nn.Module):
def __init__(self, c_in, num_layers, bn_size, growth_rate, act_fn):
"""Dense Block
"""Dense Block.

Inputs:
c_in - Number of input channels
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ def forward(self, x, mask=None, return_attention=False):
# %%
class EncoderBlock(nn.Module):
def __init__(self, input_dim, num_heads, dim_feedforward, dropout=0.0):
"""EncoderBlock
"""EncoderBlock.

Args:
input_dim: Dimensionality of the input
Expand Down Expand Up @@ -573,7 +573,7 @@ def get_attention_maps(self, x, mask=None):
# %%
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=5000):
"""Positional Encoding
"""Positional Encoding.

Args:
d_model: Hidden dimensionality of the input.
Expand Down Expand Up @@ -760,7 +760,7 @@ def __init__(
dropout=0.0,
input_dropout=0.0,
):
"""TransformerPredictor
"""TransformerPredictor.

Args:
input_dim: Hidden dimensionality of the input
Expand Down
16 changes: 8 additions & 8 deletions course_UvA-DL/06-graph-neural-networks/GNN_overview.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def __init__(self, c_in, c_out):
self.projection = nn.Linear(c_in, c_out)

def forward(self, node_feats, adj_matrix):
"""forward
"""Forward.

Args:
node_feats: Tensor with node features of shape [batch_size, num_nodes, c_in]
Expand Down Expand Up @@ -318,7 +318,7 @@ def __init__(self, c_in, c_out, num_heads=1, concat_heads=True, alpha=0.2):
nn.init.xavier_uniform_(self.a.data, gain=1.414)

def forward(self, node_feats, adj_matrix, print_attn_probs=False):
"""forward
"""Forward.

Args:
node_feats: Input features of the node. Shape: [batch_size, c_in]
Expand Down Expand Up @@ -499,7 +499,7 @@ def __init__(
dp_rate=0.1,
**kwargs,
):
"""GNNModel
"""GNNModel.

Args:
c_in: Dimension of input features
Expand All @@ -526,7 +526,7 @@ def __init__(
self.layers = nn.ModuleList(layers)

def forward(self, x, edge_index):
"""forward
"""Forward.

Args:
x: Input features per node
Expand All @@ -553,7 +553,7 @@ def forward(self, x, edge_index):
# %%
class MLPModel(nn.Module):
def __init__(self, c_in, c_hidden, c_out, num_layers=2, dp_rate=0.1):
"""MLPModel
"""MLPModel.

Args:
c_in: Dimension of input features
Expand All @@ -572,7 +572,7 @@ def __init__(self, c_in, c_hidden, c_out, num_layers=2, dp_rate=0.1):
self.layers = nn.Sequential(*layers)

def forward(self, x, *args, **kwargs):
"""forward
"""Forward.

Args:
x: Input features per node
Expand Down Expand Up @@ -850,7 +850,7 @@ def print_results(result_dict):
# %%
class GraphGNNModel(nn.Module):
def __init__(self, c_in, c_hidden, c_out, dp_rate_linear=0.5, **kwargs):
"""GraphGNNModel
"""GraphGNNModel.

Args:
c_in: Dimension of input features
Expand All @@ -864,7 +864,7 @@ def __init__(self, c_in, c_hidden, c_out, dp_rate_linear=0.5, **kwargs):
self.head = nn.Sequential(nn.Dropout(dp_rate_linear), nn.Linear(c_hidden, c_out))

def forward(self, x, edge_index, batch_idx):
"""forward
"""Forward.

Args:
x: Input features per node
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ def forward(self, x):
# %%
class Sampler:
def __init__(self, model, img_shape, sample_size, max_len=8192):
"""Sampler
"""Sampler.

Args:
model: Neural network to use for modeling E_theta
Expand Down
4 changes: 2 additions & 2 deletions course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def get_train_images(num):
# %%
class Encoder(nn.Module):
def __init__(self, num_input_channels: int, base_channel_size: int, latent_dim: int, act_fn: object = nn.GELU):
"""Encoder
"""Encoder.

Args:
num_input_channels : Number of input channels of the image. For CIFAR, this parameter is 3
Expand Down Expand Up @@ -191,7 +191,7 @@ def forward(self, x):
# %%
class Decoder(nn.Module):
def __init__(self, num_input_channels: int, base_channel_size: int, latent_dim: int, act_fn: object = nn.GELU):
"""Decoder
"""Decoder.

Args:
num_input_channels : Number of channels of the image to reconstruct. For CIFAR, this parameter is 3
Expand Down
16 changes: 7 additions & 9 deletions course_UvA-DL/09-normalizing-flows/NF_image_modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ def show_imgs(imgs, title=None, row_size=4):
# %%
class ImageFlow(L.LightningModule):
def __init__(self, flows, import_samples=8):
"""ImageFlow
"""ImageFlow.

Args:
flows: A list of flows (each a nn.Module) that should be applied on the images.
Expand Down Expand Up @@ -402,7 +402,7 @@ def test_step(self, batch, batch_idx):
# %%
class Dequantization(nn.Module):
def __init__(self, alpha=1e-5, quants=256):
"""Dequantization
"""Dequantization.

Args:
alpha: small constant that is used to scale the original input.
Expand Down Expand Up @@ -590,7 +590,7 @@ def visualize_dequantization(quants, prior=None):
# %%
class VariationalDequantization(Dequantization):
def __init__(self, var_flows, alpha=1e-5):
"""Variational Dequantization
"""Variational Dequantization.

Args:
var_flows: A list of flow transformations to use for modeling q(u|x)
Expand Down Expand Up @@ -676,7 +676,7 @@ def __init__(self, network, mask, c_in):
self.register_buffer("mask", mask)

def forward(self, z, ldj, reverse=False, orig_img=None):
"""forward
"""Forward.

Args:
z: Latent input to the flow
Expand Down Expand Up @@ -804,8 +804,7 @@ def forward(self, x):

class LayerNormChannels(nn.Module):
def __init__(self, c_in, eps=1e-5):
"""
This module applies layer norm across channels in an image.
"""This module applies layer norm across channels in an image.

Args:
c_in: Number of channels of the input
Expand All @@ -826,8 +825,7 @@ def forward(self, x):

class GatedConv(nn.Module):
def __init__(self, c_in, c_hidden):
"""
This module applies a two-layer convolutional ResNet block with input gate
"""This module applies a two-layer convolutional ResNet block with input gate.

Args:
c_in: Number of channels of the input
Expand Down Expand Up @@ -1332,7 +1330,7 @@ def interpolate(model, img1, img2, num_steps=8):

# %%
def visualize_dequant_distribution(model: ImageFlow, imgs: Tensor, title: str = None):
"""Visualize dequant distribution
"""Visualize dequant distribution.

Args:
model: The flow of which we want to visualize the dequantization distribution
Expand Down
4 changes: 2 additions & 2 deletions course_UvA-DL/11-vision-transformer/Vision_Transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def img_to_patch(x, patch_size, flatten_channels=True):
# %%
class AttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads, dropout=0.0):
"""Attention Block
"""Attention Block.

Inputs:
embed_dim - Dimensionality of input and attention feature vectors
Expand Down Expand Up @@ -269,7 +269,7 @@ def __init__(
num_patches,
dropout=0.0,
):
"""Vision Transformer
"""Vision Transformer.

Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
Expand Down
4 changes: 2 additions & 2 deletions course_UvA-DL/12-meta-learning/Meta_Learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ def dataset_from_labels(imgs, targets, class_set, **kwargs):
# %%
class FewShotBatchSampler:
def __init__(self, dataset_targets, N_way, K_shot, include_query=False, shuffle=True, shuffle_once=False):
"""FewShot Batch Sampler
"""FewShot Batch Sampler.

Inputs:
dataset_targets - PyTorch tensor of the labels of the data elements.
Expand Down Expand Up @@ -968,7 +968,7 @@ def validation_step(self, batch, batch_idx):
# %%
class TaskBatchSampler:
def __init__(self, dataset_targets, batch_size, N_way, K_shot, include_query=False, shuffle=True):
"""Task Batch Sampler
"""Task Batch Sampler.

Inputs:
dataset_targets - PyTorch tensor of the labels of the data elements.
Expand Down
3 changes: 0 additions & 3 deletions lightning_examples/reinforce-learning-DQN/dqn.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

# %%
class DQN(nn.Module):

def __init__(self, obs_size: int, n_actions: int, hidden_size: int = 128):
"""Simple MLP network.

Expand Down Expand Up @@ -113,7 +112,6 @@ def __iter__(self) -> Iterator[Tuple]:

# %%
class Agent:

def __init__(self, env: gym.Env, replay_buffer: ReplayBuffer) -> None:
"""Base Agent class handeling the interaction with the environment.

Expand Down Expand Up @@ -193,7 +191,6 @@ def play_step(

# %%
class DQNLightning(LightningModule):

def __init__(
self,
batch_size: int = 16,
Expand Down