diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..cc75c920
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+*.py[cod]
+*.so
+*.egg
+*.egg-info
+*.DS_Store
diff --git a/Code/1_data_prepare/1_1_cifar10_to_png.py b/Code/1_data_prepare/1_1_cifar10_to_png.py
index 9df14994..047944e5 100644
--- a/Code/1_data_prepare/1_1_cifar10_to_png.py
+++ b/Code/1_data_prepare/1_1_cifar10_to_png.py
@@ -3,22 +3,23 @@
将cifar10的data_batch_12345 转换成 png格式的图片
每个类别单独存放在一个文件夹,文件夹名称为0-9
"""
-from scipy.misc import imsave
+from imageio import imwrite
import numpy as np
import os
import pickle
-data_dir = '../../Data/cifar-10-batches-py/'
-train_o_dir = '../../Data/cifar-10-png/raw_train/'
-test_o_dir = '../../Data/cifar-10-png/raw_test/'
+
+base_dir = "D:/python 11/新建文件夹/practise/pytorch" #修改为当前Data 目录所在的绝对路径
+data_dir = os.path.join(base_dir, "Data", "cifar-10-batches-py")
+train_o_dir = os.path.join( base_dir, "Data", "cifar-10-png", "raw_train")
+test_o_dir = os.path.join( base_dir, "Data", "cifar-10-png", "raw_test")
Train = False # 不解压训练集,仅解压测试集
# 解压缩,返回解压后的字典
def unpickle(file):
- fo = open(file, 'rb')
- dict_ = pickle.load(fo, encoding='bytes')
- fo.close()
+ with open(file, 'rb') as fo:
+ dict_ = pickle.load(fo, encoding='bytes')
return dict_
def my_mkdir(my_dir):
@@ -30,7 +31,7 @@ def my_mkdir(my_dir):
if __name__ == '__main__':
if Train:
for j in range(1, 6):
- data_path = data_dir + "data_batch_" + str(j) # data_batch_12345
+ data_path = os.path.join(data_dir, "data_batch_" + str(j)) # data_batch_12345
train_data = unpickle(data_path)
print(data_path + " is loading...")
@@ -44,13 +45,13 @@ def my_mkdir(my_dir):
img_name = label_num + '_' + str(i + (j - 1)*10000) + '.png'
img_path = os.path.join(o_dir, img_name)
- imsave(img_path, img)
+ imwrite(img_path, img)
print(data_path + " loaded.")
print("test_batch is loading...")
# 生成测试集图片
- test_data_path = data_dir + "test_batch"
+ test_data_path = os.path.join(data_dir, "test_batch")
test_data = unpickle(test_data_path)
for i in range(0, 10000):
img = np.reshape(test_data[b'data'][i], (3, 32, 32))
@@ -62,6 +63,6 @@ def my_mkdir(my_dir):
img_name = label_num + '_' + str(i) + '.png'
img_path = os.path.join(o_dir, img_name)
- imsave(img_path, img)
+ imwrite(img_path, img)
print("test_batch loaded.")
diff --git a/Code/1_data_prepare/1_2_split_dataset.py b/Code/1_data_prepare/1_2_split_dataset.py
index 21db34e7..1ca426bd 100644
--- a/Code/1_data_prepare/1_2_split_dataset.py
+++ b/Code/1_data_prepare/1_2_split_dataset.py
@@ -8,10 +8,10 @@
import random
import shutil
-dataset_dir = '../../Data/cifar-10-png/raw_test/'
-train_dir = '../../Data/train/'
-valid_dir = '../../Data/valid/'
-test_dir = '../../Data/test/'
+dataset_dir = os.path.join("..", "..", "Data", "cifar-10-png", "raw_test")
+train_dir = os.path.join("..", "..", "Data", "train")
+valid_dir = os.path.join("..", "..", "Data", "valid")
+test_dir = os.path.join("..", "..", "Data", "test")
train_per = 0.8
valid_per = 0.1
@@ -27,7 +27,7 @@ def makedir(new_dir):
for root, dirs, files in os.walk(dataset_dir):
for sDir in dirs:
- imgs_list = glob.glob(os.path.join(root, sDir)+'/*.png')
+ imgs_list = glob.glob(os.path.join(root, sDir, '*.png'))
random.seed(666)
random.shuffle(imgs_list)
imgs_num = len(imgs_list)
@@ -37,14 +37,14 @@ def makedir(new_dir):
for i in range(imgs_num):
if i < train_point:
- out_dir = train_dir + sDir + '/'
+ out_dir = os.path.join(train_dir, sDir)
elif i < valid_point:
- out_dir = valid_dir + sDir + '/'
+ out_dir = os.path.join(valid_dir, sDir)
else:
- out_dir = test_dir + sDir + '/'
+ out_dir = os.path.join(test_dir, sDir)
makedir(out_dir)
- out_path = out_dir + os.path.split(imgs_list[i])[-1]
+ out_path = os.path.join(out_dir, os.path.split(imgs_list[i])[-1])
shutil.copy(imgs_list[i], out_path)
print('Class:{}, train:{}, valid:{}, test:{}'.format(sDir, train_point, valid_point-train_point, imgs_num-valid_point))
diff --git a/Code/1_data_prepare/1_3_generate_txt.py b/Code/1_data_prepare/1_3_generate_txt.py
index c588b72e..057f0d6e 100644
--- a/Code/1_data_prepare/1_3_generate_txt.py
+++ b/Code/1_data_prepare/1_3_generate_txt.py
@@ -4,11 +4,11 @@
为数据集生成对应的txt文件
'''
-train_txt_path = '../../Data/train.txt'
-train_dir = '../../Data/train/'
+train_txt_path = os.path.join("..", "..", "Data", "train.txt")
+train_dir = os.path.join("..", "..", "Data", "train")
-valid_txt_path = '../../Data/valid.txt'
-valid_dir = '../../Data/valid/'
+valid_txt_path = os.path.join("..", "..", "Data", "valid.txt")
+valid_dir = os.path.join("..", "..", "Data", "valid")
def gen_txt(txt_path, img_dir):
@@ -30,4 +30,5 @@ def gen_txt(txt_path, img_dir):
if __name__ == '__main__':
gen_txt(train_txt_path, train_dir)
- gen_txt(valid_txt_path, valid_dir)
\ No newline at end of file
+ gen_txt(valid_txt_path, valid_dir)
+
diff --git a/Code/1_data_prepare/1_5_compute_mean.py b/Code/1_data_prepare/1_5_compute_mean.py
index 28cb90e9..721a7d95 100644
--- a/Code/1_data_prepare/1_5_compute_mean.py
+++ b/Code/1_data_prepare/1_5_compute_mean.py
@@ -3,6 +3,7 @@
import numpy as np
import cv2
import random
+import os
"""
随机挑选CNum张图片,进行按通道计算均值mean和标准差std
@@ -10,7 +11,7 @@
"""
-train_txt_path = '../../Data/train.txt'
+train_txt_path = os.path.join("..", "..", "Data/train.txt")
CNum = 2000 # 挑选多少图片进行计算
diff --git a/Code/2_model/2_finetune.py b/Code/2_model/2_finetune.py
index 90407e8f..c6307c4c 100644
--- a/Code/2_model/2_finetune.py
+++ b/Code/2_model/2_finetune.py
@@ -14,8 +14,8 @@
from utils.utils import MyDataset, validate, show_confMat
from datetime import datetime
-train_txt_path = '../../Data/train.txt'
-valid_txt_path = '../../Data/valid.txt'
+train_txt_path = os.path.join("..", "..", "Data", "train.txt")
+valid_txt_path = os.path.join("..", "..", "Data", "valid.txt")
classes_name = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
@@ -25,7 +25,7 @@
max_epoch = 1
# log
-result_dir = '../../Result/'
+result_dir = os.path.join("..", "..", "Result")
now_time = datetime.now()
time_str = datetime.strftime(now_time, '%m-%d_%H-%M-%S')
diff --git a/Code/4_viewer/1_tensorboardX_demo.py b/Code/4_viewer/1_tensorboardX_demo.py
index 9da8523b..acb60ba4 100644
--- a/Code/4_viewer/1_tensorboardX_demo.py
+++ b/Code/4_viewer/1_tensorboardX_demo.py
@@ -1,4 +1,5 @@
# coding: utf-8
+import os
import torch
import torchvision.utils as vutils
import numpy as np
@@ -7,7 +8,7 @@
from tensorboardX import SummaryWriter
resnet18 = models.resnet18(False)
-writer = SummaryWriter('../../Result/runs')
+writer = SummaryWriter(os.path.join("..", "..", "Result", "runs"))
sample_rate = 44100
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
@@ -23,10 +24,10 @@
s1 = torch.rand(1) # value to keep
s2 = torch.rand(1)
# data grouping by `slash`
- writer.add_scalar('data/scalar_systemtime', s1[0], n_iter)
+ writer.add_scalar(os.path.join("data", "scalar_systemtime"), s1[0], n_iter)
# data grouping by `slash`
- writer.add_scalar('data/scalar_customtime', s1[0], n_iter, walltime=n_iter)
- writer.add_scalars('data/scalar_group', {"xsinx": n_iter * np.sin(n_iter),
+ writer.add_scalar(os.path.join("data", "scalar_customtime"), s1[0], n_iter, walltime=n_iter)
+ writer.add_scalars(os.path.join("data", "scalar_group"), {"xsinx": n_iter * np.sin(n_iter),
"xcosx": n_iter * np.cos(n_iter),
"arctanx": np.arctan(n_iter)}, n_iter)
x = torch.rand(32, 3, 64, 64) # output from network
@@ -56,15 +57,15 @@
precision,
recall, n_iter)
# export scalar data to JSON for external processing
-writer.export_scalars_to_json("../../Result/all_scalars.json")
+writer.export_scalars_to_json(os.path.join("..", "..", "Result", "all_scalars.json"))
-dataset = datasets.MNIST('../../Data/mnist', train=False, download=True)
+dataset = datasets.MNIST(os.path.join("..", "..", "Data", "mnist"), train=False, download=True)
images = dataset.test_data[:100].float()
label = dataset.test_labels[:100]
features = images.view(100, 784)
writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))
writer.add_embedding(features, global_step=1, tag='noMetadata')
-dataset = datasets.MNIST('../../Data/mnist', train=True, download=True)
+dataset = datasets.MNIST(os.path.join("..", "..", "Data", "mnist"), train=True, download=True)
images_train = dataset.train_data[:100].float()
labels_train = dataset.train_labels[:100]
features_train = images_train.view(100, 784)
diff --git a/Code/4_viewer/2_visual_weights.py b/Code/4_viewer/2_visual_weights.py
index cc1e340d..c11d9ea0 100644
--- a/Code/4_viewer/2_visual_weights.py
+++ b/Code/4_viewer/2_visual_weights.py
@@ -1,4 +1,5 @@
# coding: utf-8
+import os
import torch
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
@@ -42,10 +43,10 @@ def initialize_weights(self):
net = Net() # 创建一个网络
-pretrained_dict = torch.load('../2_model/net_params.pkl')
+pretrained_dict = torch.load(os.path.join("..", "2_model", "net_params.pkl"))
net.load_state_dict(pretrained_dict)
-writer = SummaryWriter(log_dir='../../Result/visual_weights')
+writer = SummaryWriter(log_dir=os.path.join("..", "..", "Result", "visual_weights"))
params = net.state_dict()
for k, v in params.items():
if 'conv' in k and 'weight' in k:
diff --git a/Code/4_viewer/3_visual_featuremaps.py b/Code/4_viewer/3_visual_featuremaps.py
index 07ef0aaf..23b175e9 100644
--- a/Code/4_viewer/3_visual_featuremaps.py
+++ b/Code/4_viewer/3_visual_featuremaps.py
@@ -1,4 +1,5 @@
# coding: utf-8
+import os
import torch
import torchvision.utils as vutils
import numpy as np
@@ -12,9 +13,9 @@
vis_layer = 'conv1'
-log_dir = '../../Result/visual_featuremaps'
-txt_path = '../../Data/visual.txt'
-pretrained_path = '../../Data/net_params_72p.pkl'
+log_dir = os.path.join("..", "..", "Result", "visual_featuremaps")
+txt_path = os.path.join("..", "..", "Data", "visual.txt")
+pretrained_path = os.path.join("..", "..", "Data", "net_params_72p.pkl")
net = Net()
pretrained_dict = torch.load(pretrained_path)
diff --git a/Code/4_viewer/4_hist_grad_weight.py b/Code/4_viewer/4_hist_grad_weight.py
index 677df784..79b00d8e 100644
--- a/Code/4_viewer/4_hist_grad_weight.py
+++ b/Code/4_viewer/4_hist_grad_weight.py
@@ -9,13 +9,14 @@
import torch.nn as nn
import torch.optim as optim
import sys
+import os
sys.path.append("..")
from utils.utils import MyDataset, validate, show_confMat, Net
from tensorboardX import SummaryWriter
from datetime import datetime
-train_txt_path = '../../Data/train.txt'
-valid_txt_path = '../../Data/valid.txt'
+train_txt_path = os.path.join("..", "..", "Data", "train.txt")
+valid_txt_path = os.path.join("..", "..", "Data", "valid.txt")
classes_name = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
@@ -25,7 +26,7 @@
max_epoch = 1
# log
-log_dir = '../../Result/hist_grad_weight'
+log_dir = os.path.join("..", "..", "Result", "hist_grad_weight")
writer = SummaryWriter(log_dir=log_dir)
diff --git a/Code/4_viewer/5_Show_ConfMat.py b/Code/4_viewer/5_Show_ConfMat.py
index f86f2a23..a3d23b52 100644
--- a/Code/4_viewer/5_Show_ConfMat.py
+++ b/Code/4_viewer/5_Show_ConfMat.py
@@ -42,5 +42,5 @@ def show_confMat(confusion_mat, classes_name, set_name, out_dir):
if __name__ == '__main__':
- print('QQ group: {}, password: {}'.format(671103375, 2018))
+ print('QQ group: {} or {} or {} or {}, password: {}'.format(671103375, 773031536, 514974779, 854620826, 2018))
diff --git a/Code/4_viewer/6_hook_for_grad_cam.py b/Code/4_viewer/6_hook_for_grad_cam.py
new file mode 100644
index 00000000..faf4b880
--- /dev/null
+++ b/Code/4_viewer/6_hook_for_grad_cam.py
@@ -0,0 +1,184 @@
+# coding: utf-8
+"""
+通过实现Grad-CAM学习module中的forward_hook和backward_hook函数
+"""
+import cv2
+import os
+import numpy as np
+from PIL import Image
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torchvision.transforms as transforms
+
+
+class Net(nn.Module):
+ def __init__(self):
+ super(Net, self).__init__()
+ self.conv1 = nn.Conv2d(3, 6, 5)
+ self.pool1 = nn.MaxPool2d(2, 2)
+ self.conv2 = nn.Conv2d(6, 16, 5)
+ self.pool2 = nn.MaxPool2d(2, 2)
+ self.fc1 = nn.Linear(16 * 5 * 5, 120)
+ self.fc2 = nn.Linear(120, 84)
+ self.fc3 = nn.Linear(84, 10)
+
+ def forward(self, x):
+ x = self.pool1(F.relu(self.conv1(x)))
+ x = self.pool1(F.relu(self.conv2(x)))
+ x = x.view(-1, 16 * 5 * 5)
+ x = F.relu(self.fc1(x))
+ x = F.relu(self.fc2(x))
+ x = self.fc3(x)
+ return x
+
+
+def img_transform(img_in, transform):
+ """
+ 将img进行预处理,并转换成模型输入所需的形式—— B*C*H*W
+ :param img_roi: np.array
+ :return:
+ """
+ img = img_in.copy()
+ img = Image.fromarray(np.uint8(img))
+ img = transform(img)
+ img = img.unsqueeze(0) # C*H*W --> B*C*H*W
+ return img
+
+
+def img_preprocess(img_in):
+ """
+ 读取图片,转为模型可读的形式
+ :param img_in: ndarray, [H, W, C]
+ :return: PIL.image
+ """
+ img = img_in.copy()
+ img = cv2.resize(img,(32, 32))
+ img = img[:, :, ::-1] # BGR --> RGB
+ transform = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize([0.4948052, 0.48568845, 0.44682974], [0.24580306, 0.24236229, 0.2603115])
+ ])
+ img_input = img_transform(img, transform)
+ return img_input
+
+
+def backward_hook(module, grad_in, grad_out):
+ grad_block.append(grad_out[0].detach())
+
+
+def farward_hook(module, input, output):
+ fmap_block.append(output)
+
+
+def show_cam_on_image(img, mask, out_dir):
+ heatmap = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET)
+ heatmap = np.float32(heatmap) / 255
+ cam = heatmap + np.float32(img)
+ cam = cam / np.max(cam)
+
+ path_cam_img = os.path.join(out_dir, "cam.jpg")
+ path_raw_img = os.path.join(out_dir, "raw.jpg")
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ cv2.imwrite(path_cam_img, np.uint8(255 * cam))
+ cv2.imwrite(path_raw_img, np.uint8(255 * img))
+
+
+def comp_class_vec(ouput_vec, index=None):
+ """
+ 计算类向量
+ :param ouput_vec: tensor
+ :param index: int,指定类别
+ :return: tensor
+ """
+ if not index:
+ index = np.argmax(ouput_vec.cpu().data.numpy())
+ else:
+ index = np.array(index)
+ index = index[np.newaxis, np.newaxis]
+ index = torch.from_numpy(index)
+ one_hot = torch.zeros(1, 10).scatter_(1, index, 1)
+ one_hot.requires_grad = True
+ class_vec = torch.sum(one_hot * output) # one_hot = 11.8605
+
+ return class_vec
+
+
+def gen_cam(feature_map, grads):
+ """
+ 依据梯度和特征图,生成cam
+ :param feature_map: np.array, in [C, H, W]
+ :param grads: np.array, in [C, H, W]
+ :return: np.array, [H, W]
+ """
+ cam = np.zeros(feature_map.shape[1:], dtype=np.float32) # cam shape (H, W)
+
+ weights = np.mean(grads, axis=(1, 2)) #
+
+ for i, w in enumerate(weights):
+ cam += w * feature_map[i, :, :]
+
+ cam = np.maximum(cam, 0)
+ cam = cv2.resize(cam, (32, 32))
+ cam -= np.min(cam)
+ cam /= np.max(cam)
+
+ return cam
+
+
+if __name__ == '__main__':
+
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ path_img = os.path.join(BASE_DIR, "..", "..", "Data", "cam_img", "test_img_8.png")
+ path_net = os.path.join(BASE_DIR, "..", "..", "Data", "net_params_72p.pkl")
+ output_dir = os.path.join(BASE_DIR, "..", "..", "Result", "backward_hook_cam")
+
+ classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
+ fmap_block = list()
+ grad_block = list()
+
+ # 图片读取;网络加载
+ img = cv2.imread(path_img, 1) # H*W*C
+ img_input = img_preprocess(img)
+ net = Net()
+ net.load_state_dict(torch.load(path_net))
+
+ # 注册hook
+ net.conv2.register_forward_hook(farward_hook)
+ net.conv2.register_backward_hook(backward_hook)
+
+ # forward
+ output = net(img_input)
+ idx = np.argmax(output.cpu().data.numpy())
+ print("predict: {}".format(classes[idx]))
+
+ # backward
+ net.zero_grad()
+ class_loss = comp_class_vec(output)
+ class_loss.backward()
+
+ # 生成cam
+ grads_val = grad_block[0].cpu().data.numpy().squeeze()
+ fmap = fmap_block[0].cpu().data.numpy().squeeze()
+ cam = gen_cam(fmap, grads_val)
+
+ # 保存cam图片
+ img_show = np.float32(cv2.resize(img, (32, 32))) / 255
+ show_cam_on_image(img_show, cam, output_dir)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Code/main_training/main.py b/Code/main_training/main.py
index 13f3cf72..8fd0c526 100644
--- a/Code/main_training/main.py
+++ b/Code/main_training/main.py
@@ -15,8 +15,8 @@
from tensorboardX import SummaryWriter
from datetime import datetime
-train_txt_path = '../../Data/train.txt'
-valid_txt_path = '../../Data/valid.txt'
+train_txt_path = os.path.join("..", "..", "Data", "train.txt")
+valid_txt_path = os.path.join("..", "..", "Data", "valid.txt")
classes_name = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
@@ -26,7 +26,7 @@
max_epoch = 1
# log
-result_dir = '../../Result/'
+result_dir = os.path.join("..", "..", "Result")
now_time = datetime.now()
time_str = datetime.strftime(now_time, '%m-%d_%H-%M-%S')
diff --git a/Data/alipay.jpg b/Data/alipay.jpg
new file mode 100644
index 00000000..4ecdf146
Binary files /dev/null and b/Data/alipay.jpg differ
diff --git a/Data/cam_img/test_img_1.png b/Data/cam_img/test_img_1.png
new file mode 100644
index 00000000..bbdb6579
Binary files /dev/null and b/Data/cam_img/test_img_1.png differ
diff --git a/Data/cam_img/test_img_2.png b/Data/cam_img/test_img_2.png
new file mode 100644
index 00000000..8da5599e
Binary files /dev/null and b/Data/cam_img/test_img_2.png differ
diff --git a/Data/cam_img/test_img_3.png b/Data/cam_img/test_img_3.png
new file mode 100644
index 00000000..bb054813
Binary files /dev/null and b/Data/cam_img/test_img_3.png differ
diff --git a/Data/cam_img/test_img_4.png b/Data/cam_img/test_img_4.png
new file mode 100644
index 00000000..a3762d04
Binary files /dev/null and b/Data/cam_img/test_img_4.png differ
diff --git a/Data/cam_img/test_img_5.png b/Data/cam_img/test_img_5.png
new file mode 100644
index 00000000..1af7f3ec
Binary files /dev/null and b/Data/cam_img/test_img_5.png differ
diff --git a/Data/cam_img/test_img_6.png b/Data/cam_img/test_img_6.png
new file mode 100644
index 00000000..9c6e7f22
Binary files /dev/null and b/Data/cam_img/test_img_6.png differ
diff --git a/Data/cam_img/test_img_7.png b/Data/cam_img/test_img_7.png
new file mode 100644
index 00000000..5a50b63e
Binary files /dev/null and b/Data/cam_img/test_img_7.png differ
diff --git a/Data/cam_img/test_img_8.png b/Data/cam_img/test_img_8.png
new file mode 100644
index 00000000..776e12e9
Binary files /dev/null and b/Data/cam_img/test_img_8.png differ
diff --git a/Data/wechat.jpg b/Data/wechat.jpg
new file mode 100644
index 00000000..79e43103
Binary files /dev/null and b/Data/wechat.jpg differ
diff --git a/readme.md b/readme.md
index b16b1fbd..50db95df 100644
--- a/readme.md
+++ b/readme.md
@@ -1,11 +1,23 @@
# Pytorch模型训练实用教程
-
+
+
+---
+
+📢:《PyTorch实用教程》(第二版)已开源,欢迎阅读:https://tingsongyu.github.io/PyTorch-Tutorial-2nd/
+
+📢:《PyTorch实用教程》(第二版)已开源,欢迎阅读:https://tingsongyu.github.io/PyTorch-Tutorial-2nd/
+
+📢:《PyTorch实用教程》(第二版)已开源,欢迎阅读:https://tingsongyu.github.io/PyTorch-Tutorial-2nd/
+
+第二版新增丰富的**深度学习应用案例**和**推理部署框架**,包括CV、NLP和LLM的十多个实战项目,以及ONNX和TensorRT的教程。
# 1.简介
+
本代码为教程——《Pytorch模型训练实用教程》中配套代码;
《Pytorch模型训练实用教程》可通过如下方式获取:
+
1. https://github.com/tensor-yu/PyTorch_Tutorial/tree/master/Data
-2. QQ群:671103375
+2. QQ群: 四群:854620826
# 2.环境配置
@@ -24,7 +36,17 @@ https://pytorch.org/get-started/locally/
# 3.问题反馈
若发现任何问题和改进意见,请您随时联系我。
联系方式:yts3221@126.com
-读者qq群:671103375
+读者qq群:
+
+ 一群:671103375 (已满)
+
+ 二群:773031536 (已满)
+
+ 三群:514974779 (已满)
+
+ 四群:854620826(已满)
+
+ 五群:1021300804
# 4.修改记录
0.0.5:
@@ -32,3 +54,20 @@ https://pytorch.org/get-started/locally/
2. 2.3小节删除注释;
3. 修改权值初始化杂谈中的理解错误;
4. 全文代码缩进。
+
+---
+
+如果本教程对你有帮助😀😀,请作者喝杯茶吧🍵🍵🥂🥂
+
+WeChat:
Alipay:
+
+
+
+
+
+---
+
+
+## Stargazers over time
+
+[](https://starchart.cc/TingsongYu/PyTorch_Tutorial)