1+ # -*- coding: utf-8 -*-
2+ """
3+ Created on Thu May 17 21:01:53 2018
4+
5+ @author: Winham
6+
7+ ===================基于1维CNN的ECG分类算法========================
8+
9+ *需要第三方工具包numpy,h5py,scikit-learn
10+ *基于深度学习框架TensorFlow
11+ *涉及函数的使用方法可自行查看工具包文档,baidu即可得
12+ *ECG算法入门系列博客:https://blog.csdn.net/qq_15746879
13+ *开源github:https://github.com/Aiwiscal
14+
15+ *本代码所需要的数据和标签文件来自matlab提取
16+ *详情:https://blog.csdn.net/qq_15746879/article/details/80340958
17+ ==================================================================
18+ """
19+ #载入所需工具包
20+ import time
21+ import numpy as np
22+ import h5py as hp
23+ import tensorflow as tf
24+ from sklearn .metrics import confusion_matrix
25+
26+ sess = tf .InteractiveSession ()
27+
28+ #载入.mat文件的函数,h5py解码并转换为numpy数组
29+ def load_mat (path_data ,name_data ,dtype = 'float32' ):
30+ data = hp .File (path_data )
31+ arrays_d = {}
32+ for k ,v in data .items ():
33+ arrays_d [k ]= np .array (v )
34+ dataArr = np .array (arrays_d [name_data ],dtype = dtype )
35+ return dataArr
36+
37+ #使用TensorFlow组件完成CNN网络的搭建,与教程中参数略有不同
38+ def CNNnet (inputs ,n_class ):
39+ conv1 = tf .layers .conv1d (inputs = inputs , filters = 4 , kernel_size = 31 , strides = 1 , \
40+ padding = 'same' , activation = tf .nn .relu )
41+ avg_pool_1 = tf .layers .average_pooling1d (inputs = conv1 , pool_size = 5 , strides = 5 , \
42+ padding = 'same' )
43+ conv2 = tf .layers .conv1d (inputs = avg_pool_1 , filters = 8 , kernel_size = 6 , strides = 1 ,\
44+ padding = 'same' , activation = tf .nn .relu )
45+ avg_pool_2 = tf .layers .average_pooling1d (inputs = conv2 , pool_size = 5 , strides = 5 ,\
46+ padding = 'same' )
47+
48+ flat = tf .reshape (avg_pool_2 , (- 1 , int (250 / 5 / 5 * 8 )))
49+
50+ logits = tf .layers .dense (inputs = flat , units = n_class , activation = None )
51+ logits = tf .nn .softmax (logits )
52+ return logits
53+
54+ #随机获取一个batch大小的数据,用于训练
55+ def get_batch (train_x ,train_y ,batch_size ):
56+ indices = np .random .choice (train_x .shape [0 ],batch_size ,False )
57+ batch_x = train_x [indices ]
58+ batch_y = train_y [indices ]
59+ return batch_x ,batch_y
60+
61+ #设定路径及文件名并载入,这里的心拍在Matlab下截取完成
62+ #详情:https://blog.csdn.net/qq_15746879/article/details/80340671
63+ Path = 'F:/Python files/ECGPrimer/' #自定义路径要正确
64+ DataFile = 'Data_CNN.mat'
65+ LabelFile = 'Label_OneHot.mat'
66+
67+ print ("Loading data and labels..." )
68+ tic = time .time ()
69+ Data = load_mat (Path + DataFile ,'Data' )
70+ Label = load_mat (Path + LabelFile ,'Label' )
71+ Data = Data .T
72+ Indices = np .arange (Data .shape [0 ]) #随机打乱索引并切分训练集与测试集
73+ np .random .shuffle (Indices )
74+
75+ print ("Divide training and testing set..." )
76+ train_x = Data [Indices [:10000 ]]
77+ train_y = Label [Indices [:10000 ]]
78+ test_x = Data [Indices [10000 :]]
79+ test_y = Label [Indices [10000 :]]
80+ toc = time .time ()
81+ print ("Elapsed time is %f sec." % (toc - tic ))
82+ print ("======================================" )
83+
84+ print ("1D-CNN setup and initialize..." )
85+ tic = time .time ()
86+ x = tf .placeholder (tf .float32 , [None , 250 ]) #定义placeholder数据入口
87+ x_ = tf .reshape (x ,[- 1 ,250 ,1 ])
88+ y_ = tf .placeholder (tf .float32 ,[None ,4 ])
89+
90+ logits = CNNnet (x_ ,4 )
91+
92+ learning_rate = 0.01
93+ batch_size = 16
94+ maxiters = 15000
95+
96+ cost = tf .reduce_mean (tf .nn .softmax_cross_entropy_with_logits (logits = logits ,labels = y_ ))
97+ #这里使用了自适应学习率的Adam训练方法,可以认为是SGD的高级演化版本之一
98+ train_step = tf .train .AdamOptimizer (learning_rate = learning_rate ).minimize (cost )
99+ tf .global_variables_initializer ().run ()
100+ toc = time .time ()
101+ print ("Elapsed time is %f sec." % (toc - tic ))
102+ print ("======================================" )
103+
104+ print ("1D-CNN training and testing..." )
105+ tic = time .time ()
106+ for i in range (maxiters ):
107+ batch_x ,batch_y = get_batch (train_x ,train_y ,batch_size )
108+ train_step .run (feed_dict = {x :batch_x ,y_ :batch_y })
109+ if i % 500 == 0 :
110+ loss = cost .eval (feed_dict = {x :train_x ,y_ :train_y })
111+ print ("Iteration %d/%d:loss %f" % (i ,maxiters ,loss ))
112+
113+ y_pred = logits .eval (feed_dict = {x :test_x ,y_ :test_y })
114+ y_pred = np .argmax (y_pred ,axis = 1 )
115+ y_true = np .argmax (test_y ,axis = 1 )
116+ toc = time .time ()
117+ print ("Elapsed time is %f sec." % (toc - tic ))
118+
119+ Acc = np .mean (y_pred == y_true )
120+ Conf_Mat = confusion_matrix (y_true ,y_pred ) #利用专用函数得到混淆矩阵
121+ Acc_N = Conf_Mat [0 ][0 ]/ np .sum (Conf_Mat [0 ])
122+ Acc_V = Conf_Mat [1 ][1 ]/ np .sum (Conf_Mat [1 ])
123+ Acc_R = Conf_Mat [2 ][2 ]/ np .sum (Conf_Mat [2 ])
124+ Acc_L = Conf_Mat [3 ][3 ]/ np .sum (Conf_Mat [3 ])
125+
126+
127+ print ('\n Accuracy=%.2f%%' % (Acc * 100 ))
128+ print ('Accuracy_N=%.2f%%' % (Acc_N * 100 ))
129+ print ('Accuracy_V=%.2f%%' % (Acc_V * 100 ))
130+ print ('Accuracy_R=%.2f%%' % (Acc_R * 100 ))
131+ print ('Accuracy_L=%.2f%%' % (Acc_L * 100 ))
132+ print ('\n Confusion Matrix:\n ' )
133+ print (Conf_Mat )
134+ print ("======================================" )
0 commit comments