19
19
import torch .nn .functional as F
20
20
21
21
import matplotlib .pyplot as plt
22
- % matplotlib
22
+ # %matplotlib
23
23
24
24
25
25
# custom weights initialization called on netG and netD
@@ -34,7 +34,7 @@ def update_learning_rate(optimizer, epoch, init_lr, decay_rate, lr_decay_epochs)
34
34
lr = init_lr * (decay_rate ** (epoch // lr_decay_epochs ))
35
35
36
36
if epoch % lr_decay_epochs == 0 :
37
- print 'LR set to {}' .format (lr )
37
+ print ( 'LR set to {}' .format (lr ) )
38
38
39
39
for param_group in optimizer .param_groups :
40
40
param_group ['lr' ] = lr
@@ -113,13 +113,14 @@ def sample(self, N):
113
113
return np .reshape (samples , (- 1 , 1 ))
114
114
115
115
116
- def GeneratorDistribution (object ):
116
+ class GeneratorDistribution (object ):
117
117
def __init__ (self , range ):
118
118
self .range = range
119
119
120
120
def sample (self , N ):
121
121
samples = np .linspace (- self .range , self .range , N ) + \
122
122
np .random .random (N ) * 0.01
123
+ return samples
123
124
124
125
125
126
class Generator (torch .nn .Module ):
@@ -208,7 +209,7 @@ def forward(self, x):
208
209
D_x = output .data .mean ()
209
210
210
211
# train with fake
211
- z = torch .FloatTensor (gen_dist .sample (N ))
212
+ z = torch .FloatTensor (gen_dist .sample (N ))[..., None ] # (N_sample, N_channel)
212
213
if use_cuda :
213
214
z = z .cuda ()
214
215
zv = Variable (z )
@@ -242,8 +243,8 @@ def forward(self, x):
242
243
decay_rate = 0.95 ,
243
244
lr_decay_epochs = 150 )
244
245
245
- print '[%d/%d] Loss_D: %.4f Loss_G %.4f D(x): %.4f D(G(z)): %.4f / %.4f' \
246
- % (epoch , epochs , errD .data [0 ], errG .data [0 ], D_x , D_G_z1 , D_G_z2 )
246
+ print ( '[%d/%d] Loss_D: %.4f Loss_G %.4f D(x): %.4f D(G(z)): %.4f / %.4f' \
247
+ % (epoch , epochs , errD .data [0 ], errG .data [0 ], D_x , D_G_z1 , D_G_z2 ))
247
248
248
249
if epoch % plot_every_epochs == 0 :
249
250
# Plot distribution
0 commit comments