m-chrzan.xyz
aboutsummaryrefslogtreecommitdiff
path: root/src/net.py
blob: af4e984c3dd874316512fdaa48da4d9209494498 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import torch.nn as nn

class Net(nn.Module):
    def __init__(self, convolutions, linears, outputs, finalizer, batch_norm=False, dropout=False):
        super(Net, self).__init__()

        self.finalizer = finalizer

        self.convolutions = self.make_convolutions(convolutions, batch_norm,
                dropout)
        self.linears = self.make_linears(linears, batch_norm, dropout)
        self.final_linear = nn.Linear(
                linears[-1].linear_args()['out_features'], outputs)

    def forward(self, x):
        x = x.unsqueeze(1)
        x = self.convolutions(x)
        x = x.view(-1, self.num_flat_features(x))
        x = self.linears(x)
        x = self.final_linear(x)
        x = self.finalizer(x)
        return x

    def num_flat_features(self, x):
        size = x.size()[1:]  # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

    def make_convolutions(self, convolutions, batch_norm, dropout):
        layers = []
        for convolution in convolutions:
            conv_args = convolution.conv_args()
            layers.append(nn.Conv2d(**conv_args))
            if batch_norm:
                layers.append(nn.BatchNorm2d(conv_args['out_channels']))
            layers.append(nn.ReLU())
            if dropout:
                layers.append(nn.Dropout2d(dropout))
            if convolution.max_pool:
                layers.append(nn.MaxPool2d(2))

        return nn.Sequential(*layers)

    def make_linears(self, linears, batch_norm, dropout):
        layers = []
        for linear in linears:
            linear_args = linear.linear_args()
            layers.append(nn.Linear(**linear_args))
            if batch_norm:
                layers.append(nn.BatchNorm1d(linear_args['out_features']))
            layers.append(nn.ReLU())
            if dropout:
                layers.append(nn.Dropout(dropout))

        return nn.Sequential(*layers)