|
1 | | -import torch as t |
2 | | -from torch.autograd import Variable as v |
| 1 | +import torch |
| 2 | +from torch.autograd import Variable |
3 | 3 |
|
4 | 4 | # simple gradient |
5 | | -a = v(t.FloatTensor([2, 3]), requires_grad=True) |
| 5 | +a = Variable(torch.FloatTensor([2, 3]), requires_grad=True) |
6 | 6 | b = a + 3 |
7 | 7 | c = b * b * 3 |
8 | 8 | out = c.mean() |
9 | 9 | out.backward() |
10 | | -print('*'*10) |
| 10 | +print('*' * 10) |
11 | 11 | print('=====simple gradient======') |
12 | 12 | print('input') |
13 | 13 | print(a.data) |
|
17 | 17 | print(a.grad.data) |
18 | 18 |
|
19 | 19 | # backward on non-scalar output |
20 | | -m = v(t.FloatTensor([[2, 3]]), requires_grad=True) |
21 | | -n = v(t.zeros(1, 2)) |
22 | | -n[0, 0] = m[0, 0] ** 2 |
23 | | -n[0, 1] = m[0, 1] ** 3 |
24 | | -n.backward(t.FloatTensor([[1, 1]])) |
25 | | -print('*'*10) |
| 20 | +m = Variable(torch.FloatTensor([[2, 3]]), requires_grad=True) |
| 21 | +n = Variable(torch.zeros(1, 2)) |
| 22 | +n[0, 0] = m[0, 0]**2 |
| 23 | +n[0, 1] = m[0, 1]**3 |
| 24 | +n.backward(torch.FloatTensor([[1, 1]])) |
| 25 | +print('*' * 10) |
26 | 26 | print('=====non scalar output======') |
27 | 27 | print('input') |
28 | 28 | print(m.data) |
29 | 29 | print('input gradients are') |
30 | 30 | print(m.grad.data) |
31 | 31 |
|
32 | 32 | # jacobian |
33 | | -j = t.zeros(2 ,2) |
34 | | -k = v(t.zeros(1, 2)) |
| 33 | +j = torch.zeros(2, 2) |
| 34 | +k = Variable(torch.zeros(1, 2)) |
35 | 35 | m.grad.data.zero_() |
36 | | -k[0, 0] = m[0, 0] ** 2 + 3 * m[0 ,1] |
37 | | -k[0, 1] = m[0, 1] ** 2 + 2 * m[0, 0] |
38 | | -k.backward(t.FloatTensor([[1, 0]]), retain_variables=True) |
| 36 | +k[0, 0] = m[0, 0]**2 + 3 * m[0, 1] |
| 37 | +k[0, 1] = m[0, 1]**2 + 2 * m[0, 0] |
| 38 | +k.backward(torch.FloatTensor([[1, 0]]), retain_variables=True) |
39 | 39 | j[:, 0] = m.grad.data |
40 | 40 | m.grad.data.zero_() |
41 | | -k.backward(t.FloatTensor([[0, 1]])) |
| 41 | +k.backward(torch.FloatTensor([[0, 1]])) |
42 | 42 | j[:, 1] = m.grad.data |
43 | 43 | print('jacobian matrix is') |
44 | 44 | print(j) |
45 | 45 |
|
46 | 46 | # compute jacobian matrix |
47 | | -x = t.FloatTensor([2, 1]).view(1, 2) |
48 | | -x = v(x, requires_grad=True) |
49 | | -y = v(t.FloatTensor([[1, 2], [3, 4]])) |
| 47 | +x = torch.FloatTensor([2, 1]).view(1, 2) |
| 48 | +x = Variable(x, requires_grad=True) |
| 49 | +y = Variable(torch.FloatTensor([[1, 2], [3, 4]])) |
50 | 50 |
|
51 | | -z = t.mm(x, y) |
52 | | -jacobian = t.zeros((2, 2)) |
53 | | -z.backward(t.FloatTensor([[1, 0]]), retain_variables=True) # dz1/dx1, dz2/dx1 |
| 51 | +z = torch.mm(x, y) |
| 52 | +jacobian = torch.zeros((2, 2)) |
| 53 | +z.backward( |
| 54 | + torch.FloatTensor([[1, 0]]), retain_variables=True) # dz1/dx1, dz2/dx1 |
54 | 55 | jacobian[:, 0] = x.grad.data |
55 | 56 | x.grad.data.zero_() |
56 | | -z.backward(t.FloatTensor([[0, 1]])) # dz1/dx2, dz2/dx2 |
| 57 | +z.backward(torch.FloatTensor([[0, 1]])) # dz1/dx2, dz2/dx2 |
57 | 58 | jacobian[:, 1] = x.grad.data |
58 | 59 | print('=========jacobian========') |
59 | 60 | print('x') |
|
0 commit comments