m-chrzan.xyz
aboutsummaryrefslogtreecommitdiff
path: root/train
diff options
context:
space:
mode:
authorMarcin Chrzanowski <m@m-chrzan.xyz>2021-05-27 19:54:24 +0200
committerMarcin Chrzanowski <m@m-chrzan.xyz>2021-05-27 19:54:24 +0200
commitbb6a6004bfb5398b5cdf8f8a973b47e5659aec79 (patch)
treef770a9cdb856d25e731289c86583ac3c71937f3a /train
parent22cd84cf78a114d75a93f75f66f6ea61c02c94ef (diff)
Fix indents
Diffstat (limited to 'train')
-rw-r--r--train/train.py97
1 files changed, 55 insertions, 42 deletions
diff --git a/train/train.py b/train/train.py
index a88129a..1d8f26e 100644
--- a/train/train.py
+++ b/train/train.py
@@ -7,45 +7,58 @@ from torch import optim
from data.generate import get_single_example
from data.testset import get_testset
-def train_model(model, lr, num_steps, batch_size, device='cpu'):
- model.to(device)
-
- start_time = time()
- accs = []
-
- loss_function = nn.CrossEntropyLoss()
- optimizer = optim.Adam(model.parameters(), lr=lr)
-
- test_X, test_Y = get_testset()
-
- for step in range(num_steps):
- batch_examples = [get_single_example() for i in range(batch_size)]
-
- batch_X = torch.tensor([x[0] for x in batch_examples],
- device=device
- ).transpose(0, 1)
- batch_Y = torch.tensor([x[1] for x in batch_examples],
- device=device).transpose(0, 1)
-
- model.train()
- model.zero_grad()
- logits = model(batch_X)
- loss = loss_function(logits.reshape(-1, 10), batch_Y.reshape(-1))
- loss.backward()
- optimizer.step()
-
- if step % (num_steps//100) == 0 or step == num_steps - 1:
- # Printing a summary of the current state of training every 1% of steps.
- model.eval()
- predicted_logits = model.forward(test_X).reshape(-1, 10)
- test_acc = (
- torch.sum(torch.argmax(predicted_logits, dim=-1) == test_Y.reshape(-1))
- / test_Y.reshape(-1).shape[0])
- print('step', step, 'out of', num_steps)
- print('loss train', float(loss))
- print('accuracy test', float(test_acc))
- print()
- accs.append(test_acc)
- print('\nTRAINING TIME:', time()-start_time)
- model.eval()
- return accs
+def do_verbose_test(model, n_tokens, seqlen, max_count):
+ print('verbose test:')
+ x, y = get_single_example(n_tokens, seqlen, max_count)
+ x = torch.tensor([x]).transpose(0, 1)
+ print('in :', x.squeeze())
+ print('expected out:', torch.tensor(y))
+ print('model out :', torch.argmax(model(x), dim=2).squeeze())
+
+def train_model(model, lr, num_steps, batch_size, n_tokens, seqlen, max_count, device='cpu'):
+ torch.autograd.set_detect_anomaly(True)
+ model.to(device)
+
+ start_time = time()
+ accs = []
+
+ loss_function = nn.CrossEntropyLoss(
+ # weight=torch.log(2 + torch.tensor(range(max_count+1), dtype=torch.float))
+ )
+ optimizer = optim.Adam(model.parameters(), lr=lr)
+
+ test_X, test_Y = get_testset(n_tokens, seqlen, max_count)
+ print('test size', test_X.shape)
+
+ for step in range(num_steps):
+ batch_examples = [get_single_example(n_tokens, seqlen, max_count) for i in range(batch_size)]
+
+ batch_X = torch.tensor([x[0] for x in batch_examples],
+ device=device
+ ).transpose(0, 1)
+ batch_Y = torch.tensor([x[1] for x in batch_examples],
+ device=device).transpose(0, 1)
+
+ model.train()
+ model.zero_grad()
+ logits = model(batch_X)
+ loss = loss_function(logits.reshape(-1, max_count + 1), batch_Y.reshape(-1))
+ loss.backward()
+ optimizer.step()
+
+ if step % (num_steps//100) == 0 or step == num_steps - 1:
+ # Printing a summary of the current state of training every 1% of steps.
+ model.eval()
+ predicted_logits = model.forward(test_X).reshape(-1, max_count + 1)
+ test_acc = (
+ torch.sum(torch.argmax(predicted_logits, dim=-1) == test_Y.reshape(-1))
+ / test_Y.reshape(-1).shape[0])
+ print('step', step, 'out of', num_steps)
+ print('loss train', float(loss))
+ print('accuracy test', float(test_acc))
+ do_verbose_test(model, n_tokens, seqlen, max_count)
+ print()
+ accs.append(test_acc)
+ print('\nTRAINING TIME:', time()-start_time)
+ model.eval()
+ return accs