|
18 | 18 | parser.add_argument('-log-interval', type=int, default=1, help='how many steps to wait before logging training status [default: 1]') |
19 | 19 | parser.add_argument('-test-interval', type=int, default=100, help='how many steps to wait before testing [default: 100]') |
20 | 20 | parser.add_argument('-save-interval', type=int, default=500, help='how many steps to wait before saving [default:500]') |
21 | | -parser.add_argument('-save-dir', type=str, default='snapshot', help='where to save the checkpoint') |
| 21 | +parser.add_argument('-save-dir', type=str, default='snapshot', help='where to save the snapshot') |
22 | 22 | # data |
23 | 23 | parser.add_argument('-shuffle', action='store_true', default=False, help='shuffle the data every epoch' ) |
24 | 24 | # model |
@@ -68,16 +68,16 @@ def mr(text_field, label_field, **kargs): |
68 | 68 | print("\nLoading data...") |
69 | 69 | text_field = data.Field(lower=True) |
70 | 70 | label_field = data.Field(sequential=False) |
71 | | -#train_iter, dev_iter = mr(text_field, label_field, device=-1, repeat=False) |
72 | | -train_iter, dev_iter, test_iter = sst(text_field, label_field, device=-1, repeat=False) |
| 71 | +train_iter, dev_iter = mr(text_field, label_field, device=-1, repeat=False) |
| 72 | +#train_iter, dev_iter, test_iter = sst(text_field, label_field, device=-1, repeat=False) |
73 | 73 |
|
74 | 74 |
|
75 | 75 | # update args and print |
76 | 76 | args.embed_num = len(text_field.vocab) |
77 | 77 | args.class_num = len(label_field.vocab) - 1 |
78 | 78 | args.cuda = args.no_cuda and torch.cuda.is_available(); del args.no_cuda |
79 | 79 | args.kernel_sizes = [int(k) for k in args.kernel_sizes.split(',')] |
80 | | -args.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')) |
| 80 | +args.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) |
81 | 81 |
|
82 | 82 | print("\nParameters:") |
83 | 83 | for attr, value in sorted(args.__dict__.items()): |
|
0 commit comments