1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
import torch.nn as nn
import torch.nn.functional as F
import torch
def target_transform(labels):
return (labels > 0) + 0.
def twoargmax(a):
l = list(zip(a, range(len(a))))
l.sort()
return [x[1] for x in l[-2:]]
def loss_function(output, target):
return F.binary_cross_entropy(output, target)
def count_correct(output, target):
correct = 0
for i in range(len(output)):
selected = twoargmax(output[i])
both_correct = True
for selection in selected:
if target[i][selection] != 1:
both_correct = False
if both_correct:
correct += 1
return correct
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 3, padding=1)
self.conv2 = nn.Conv2d(6, 16, 3, padding=1)
self.conv3 = nn.Conv2d(16, 32, 3, padding=1)
self.fc1 = nn.Linear(288, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 6)
def forward(self, x):
x = x.unsqueeze(1)
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = F.max_pool2d(F.relu(self.conv3(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
x = torch.sigmoid(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
|