-
Notifications
You must be signed in to change notification settings - Fork 11
Expand file tree
/
Copy pathnn_classifier.py
More file actions
96 lines (78 loc) · 3.18 KB
/
nn_classifier.py
File metadata and controls
96 lines (78 loc) · 3.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import TensorDataset, DataLoader
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size_list, num_classes):
super(NeuralNet, self).__init__()
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(input_size, hidden_size_list[0])
self.fc2 = nn.Linear(hidden_size_list[0], hidden_size_list[1])
self.fc3 = nn.Linear(hidden_size_list[1], num_classes)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.dropout2(out)
out = self.fc2(out)
out = F.relu(out)
out = self.fc3(out)
return out
def create_torch_dataloader(feature_bank, label_bank, batch_size, shuffle=False, num_workers=2, pin_memory=True):
# transform to torch tensor
tensor_x, tensor_y = torch.Tensor(feature_bank), torch.Tensor(label_bank)
dataloader = DataLoader(
TensorDataset(tensor_x, tensor_y),
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory
)
return dataloader
def net_train(net, train_loader, optimizer, epoch, criterion):
"""Training"""
net.train()
overall_loss = 0.0
for batch_idx, (data, label) in enumerate(train_loader):
data, label = data.cuda(non_blocking=True), label.cuda(non_blocking=True)
optimizer.zero_grad()
output = net(data)
loss = criterion(output, label.long())
loss.backward()
optimizer.step()
overall_loss += loss.item()
print('Train Epoch: {} \tLoss: {:.6f}'.format(epoch, overall_loss*train_loader.batch_size/len(train_loader.dataset)))
def net_test(net, test_loader, epoch, criterion, keyword='Accuracy'):
"""Testing"""
net.eval()
test_loss = 0.0
correct = 0.0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
output = net(data)
test_loss += criterion(output, target.long()).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_acc = 100. * correct / len(test_loader.dataset)
test_loss /= len(test_loader.dataset)
print('{{"metric": "Eval - {}", "value": {}, "epoch": {}}}'.format(
keyword, 100. * correct / len(test_loader.dataset), epoch))
return test_acc
def predict_feature(net, data_loader):
net.eval()
feature_bank, target_bank = [], []
with torch.no_grad():
# generate feature bank
for data, target in tqdm(data_loader, desc='Feature extracting'):
feature = net(data.cuda(non_blocking=True))
feature = F.normalize(feature, dim=1)
feature_bank.append(feature)
target_bank.append(target)
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).contiguous()
target_bank = torch.cat(target_bank, dim=0).contiguous()
return feature_bank.cpu().detach().numpy(), target_bank.detach().numpy()