From 8bb76a9d7c288a0d3d1ab509e143978fb500d39b Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 7 Jul 2020 01:05:52 +0800 Subject: [PATCH 01/72] Update RNN.py --- Note/create/RNN.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/create/RNN.py b/Note/create/RNN.py index 41b5f1348..56bcb5903 100644 --- a/Note/create/RNN.py +++ b/Note/create/RNN.py @@ -114,7 +114,7 @@ def lstm_weight_x(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): ig_weight_x=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[1]) og_weight_x=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[2]) cltm_weight_x=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[3]) - return fg_weight_x,ig_weight_x,og_weight_x,cltm_weight_x + return [fg_weight_x,ig_weight_x,og_weight_x,cltm_weight_x] def lstm_weight_h(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): @@ -128,7 +128,7 @@ def lstm_weight_h(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): ig_weight_h=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[1]) og_weight_h=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[2]) cltm_weight_h=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[3]) - return fg_weight_h,ig_weight_h,og_weight_h,cltm_weight_h + return [fg_weight_h,ig_weight_h,og_weight_h,cltm_weight_h] def lstm_bias(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): @@ -142,7 +142,7 @@ def lstm_bias(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): ig_bias=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[1]) og_bias=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[2]) cltm_bias=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[3]) - return fg_bias,ig_bias,og_bias,cltm_bias + return [fg_bias,ig_bias,og_bias,cltm_bias] def gru_weight_x(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): @@ -154,7 +154,7 @@ def gru_weight_x(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): ug_weight_x=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[0]) rg_weight_x=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[1]) cltm_weight_x=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[3]) - return ug_weight_x,rg_weight_x,cltm_weight_x + return [ug_weight_x,rg_weight_x,cltm_weight_x] def gru_weight_h(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): @@ -166,7 +166,7 @@ def gru_weight_h(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): ug_weight_h=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[0]) rg_weight_h=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[1]) cltm_weight_h=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[3]) - return ug_weight_h,rg_weight_h,cltm_weight_h + return [ug_weight_h,rg_weight_h,cltm_weight_h] def gru_bias(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): @@ -178,7 +178,7 @@ def gru_bias(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): ug_bias=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[0]) rg_bias=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[1]) cltm_bias=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[3]) - return ug_bias,rg_bias,cltm_bias + return [ug_bias,rg_bias,cltm_bias] def m_relugru_weight_x(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): @@ -190,7 +190,7 @@ def m_relugru_weight_x(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): ug_weight_x=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[0]) rg_weight_x=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[1]) cltm_weight_x=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[3]) - return ug_weight_x,rg_weight_x,cltm_weight_x + return [ug_weight_x,rg_weight_x,cltm_weight_x] def m_relugru_weight_h(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): @@ -202,7 +202,7 @@ def m_relugru_weight_h(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): ug_weight_h=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[0]) rg_weight_h=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[1]) cltm_weight_h=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[3]) - return ug_weight_h,rg_weight_h,cltm_weight_h + return [ug_weight_h,rg_weight_h,cltm_weight_h] def m_relugru_bias(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): @@ -214,7 +214,7 @@ def m_relugru_bias(shape,mean=0,stddev=0.07,dtype=tf.float32,name=None): ug_bias=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[0]) rg_bias=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[1]) cltm_bias=tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=dtype),name=name[3]) - return ug_bias,rg_bias,cltm_bias + return [ug_bias,rg_bias,cltm_bias] def word_emb(data,cword_weight): From ccefefb414ac90179b6b7d36a8be5e31debbc599 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 9 Jul 2020 18:50:26 +0800 Subject: [PATCH 02/72] Update FNN.py --- Note/nn/FNN/FNN.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index 3f8ec818f..e33dc2b09 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -49,9 +49,9 @@ def __init__(self,train_data=None,train_labels=None): self.shape0=train_data.shape[0] self.data_shape=train_data.shape self.labels_shape=train_labels.shape - self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,self.data_shape[1]],name='data') + self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None],name='data') if len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,self.labels_shape[1]],name='labels') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') else: self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None],name='labels') self.data_dtype=train_data.dtype @@ -761,9 +761,9 @@ def restore(self,model_path): self.labels_shape=pickle.load(input_file) self.graph=tf.Graph() with self.graph.as_default(): - self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,self.data_shape[1]],name='data') + self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,None],name='data') if len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,self.labels_shape[1]],name='labels') + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None],name='labels') else: self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None],name='labels') self.hidden_layers=pickle.load(input_file) From a69fc2ede48a9e93e44aafdf5eaea81231feaa5c Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 9 Jul 2020 18:52:56 +0800 Subject: [PATCH 03/72] Update CNN.py --- Note/nn/CNN/CNN.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index de4095c07..f220d7c39 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -50,8 +50,8 @@ def __init__(self,train_data=None,train_labels=None): self.shape0=train_data.shape[0] self.data_shape=train_data.shape self.labels_shape=train_labels.shape - self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,self.data_shape[1],self.data_shape[2],self.data_shape[3]],name='data') - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,self.labels_shape[1]],name='labels') + self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None,None,None],name='data') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') self.data_dtype=train_data.dtype self.labels_dtype=train_labels.dtype self.conv=[] @@ -941,8 +941,8 @@ def restore(self,model_path): self.labels_shape=pickle.load(input_file) self.graph=tf.Graph() with self.graph.as_default(): - self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,self.data_shape[1],self.data_shape[2],self.data_shape[3]],name='data') - self.labels=tf.placeholder(dtype=self.labels_dtype.dtype,shape=[None,self.labels_shape[1]],name='labels') + self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,None,None,None],name='data') + self.labels=tf.placeholder(dtype=self.labels_dtype.dtype,shape=[None,None],name='labels') self.conv=pickle.load(input_file) self.max_pool=pickle.load(input_file) self.avg_pool=pickle.load(input_file) From 76de6856238432f8bf007b4d497b0493cbaa54da Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 9 Jul 2020 18:53:48 +0800 Subject: [PATCH 04/72] Update GRU.py --- Note/nn/RNN/GRU.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index a131e20f3..3766152f8 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -49,11 +49,11 @@ def __init__(self,train_data=None,train_labels=None): self.shape0=train_data.shape[0] self.data_shape=train_data.shape self.labels_shape=train_labels.shape - self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,self.data_shape[1],self.data_shape[2]],name='data') + self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None,None],name='data') if len(self.labels_shape)==3: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None,self.labels_shape[2]],name='labels') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None,None],name='labels') elif len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,self.labels_shape[1]],name='labels') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') self.train_data_dtype=train_data.dtype self.train_labels_dtype=np.int32 self.hidden=None @@ -1087,11 +1087,11 @@ def restore(self,model_path): self.labels_shape=pickle.load(input_file) self.graph=tf.Graph() with self.graph.as_default(): - self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,self.data_shape[1],self.data_shape[2]],name='data') + self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,None,None],name='data') if len(self.labels_shape)==3: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,self.labels_shape[2]],name='labels') + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,None],name='labels') elif len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,self.labels_shape[1]],name='labels') + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None],name='labels') self.hidden=pickle.load(input_file) self.pattern=pickle.load(input_file) self.predicate=pickle.load(input_file) From 76559a8174049672bbccdfa826072b0c768b6d0e Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 9 Jul 2020 18:54:34 +0800 Subject: [PATCH 05/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index 141112a22..9b647b696 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -49,11 +49,11 @@ def __init__(self,train_data=None,train_labels=None): self.shape0=train_data.shape[0] self.data_shape=train_data.shape self.labels_shape=train_labels.shape - self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,self.data_shape[1],self.data_shape[2]],name='data') + self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None,None],name='data') if len(self.labels_shape)==3: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None,self.labels_shape[2]],name='labels') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None,None],name='labels') elif len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,self.labels_shape[1]],name='labels') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') self.train_data_dtype=train_data.dtype self.train_labels_dtype=np.int32 self.hidden=None @@ -1211,11 +1211,11 @@ def restore(self,model_path): self.labels_shape=pickle.load(input_file) self.graph=tf.Graph() with self.graph.as_default(): - self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,self.data_shape[1],self.data_shape[2]],name='data') + self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,None,None],name='data') if len(self.labels_shape)==3: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,self.labels_shape[2]],name='labels') + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,None],name='labels') elif len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,self.labels_shape[1]],name='labels') + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None],name='labels') self.hidden=pickle.load(input_file) self.pattern=pickle.load(input_file) self.predicate=pickle.load(input_file) From ae6997a6a004d7ab43f6a7393c99c1e564eac1ae Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 9 Jul 2020 18:55:15 +0800 Subject: [PATCH 06/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index f4731aeda..2e17960d2 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -49,11 +49,11 @@ def __init__(self,train_data=None,train_labels=None): self.shape0=train_data.shape[0] self.data_shape=train_data.shape self.labels_shape=train_labels.shape - self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,self.data_shape[1],self.data_shape[2]],name='data') + self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None,None],name='data') if len(self.labels_shape)==3: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None,self.labels_shape[2]],name='labels') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None,None],name='labels') elif len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,self.labels_shape[1]],name='labels') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') self.train_data_dtype=train_data.dtype self.train_labels_dtype=np.int32 self.hidden=None @@ -1020,11 +1020,11 @@ def restore(self,model_path): self.labels_shape=pickle.load(input_file) self.graph=tf.Graph() with self.graph.as_default(): - self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,self.data_shape[1],self.data_shape[2]],name='data') + self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,None,None],name='data') if len(self.labels_shape)==3: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,self.labels_shape[2]],name='labels') + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,None],name='labels') elif len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,self.labels_shape[1]],name='labels') + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None],name='labels') self.hidden=pickle.load(input_file) self.pattern=pickle.load(input_file) self.predicate=pickle.load(input_file) From 23fdb42341a2630b09dd179c6eccb59e4eb446b3 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 9 Jul 2020 18:56:03 +0800 Subject: [PATCH 07/72] Update RNN.py --- Note/nn/RNN/RNN.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index 57f1ca299..15419dafe 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -49,11 +49,11 @@ def __init__(self,train_data=None,train_labels=None): self.shape0=train_data.shape[0] self.data_shape=train_data.shape self.labels_shape=train_labels.shape - self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,self.data_shape[1],self.data_shape[2]],name='data') + self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None,None],name='data') if len(self.labels_shape)==3: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None,self.labels_shape[2]],name='labels') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None,None],name='labels') elif len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,self.labels_shape[1]],name='labels') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') self.train_data_dtype=train_data.dtype self.train_labels_dtype=np.int32 self.hidden=None @@ -679,11 +679,11 @@ def restore(self,model_path): self.labels_shape=pickle.load(input_file) self.graph=tf.Graph() with self.graph.as_default(): - self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,self.data_shape[1],self.data_shape[2]],name='data') + self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,None,None],name='data') if len(self.labels_shape)==3: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,self.labels_shape[2]],name='labels) + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,None],name='labels) elif len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,self.labels_shape[1]],name='labels) + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None],name='labels) self.hidden=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) From e6d2727df0ee5890e1065e8d44177d7d335cfbf9 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 01:32:30 +0800 Subject: [PATCH 08/72] Update Note Architecture.py --- Note/create/Note Architecture.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Note/create/Note Architecture.py b/Note/create/Note Architecture.py index d0f043fc2..7e412acd8 100644 --- a/Note/create/Note Architecture.py +++ b/Note/create/Note Architecture.py @@ -66,11 +66,11 @@ def __init__(): self.use_cpu_gpu='/gpu:0' - def weight_init(self,shape,mean,stddev,name): + def weight_init(self,shape,mean,stddev,name=None): return tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=self.dtype),name=name) - def bias_init(self,shape,mean,stddev,name): + def bias_init(self,shape,mean,stddev,name=None): return tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=self.dtype),name=name) From e86f60dae7a706bd29ae19952d1b223045410d46 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:13:25 +0800 Subject: [PATCH 09/72] Update Note Architecture.py --- Note/create/Note Architecture.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/Note/create/Note Architecture.py b/Note/create/Note Architecture.py index 7e412acd8..f1b43cb4e 100644 --- a/Note/create/Note Architecture.py +++ b/Note/create/Note Architecture.py @@ -196,9 +196,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: self.train_accuracy_list.append(float(train_acc)) self.train_accuracy=train_acc @@ -212,9 +212,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su loss=sess.run(train_loss,feed_dict=feed_dict) else: loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(float(accuracy)) @@ -243,12 +243,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) print() - print('last loss:{0}'.format(self.train_loss)) + print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: - if len(self.labels_shape)==2: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('accuracy:{0:.3f}'.format(self.train_accuracy)) + + if train_summary_path!=None: train_writer.close() if continue_train==True: From 5ce702d42477fa22370e15a2efeb9417efa737c0 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:13:52 +0800 Subject: [PATCH 10/72] Update FNN.py --- Note/nn/FNN/FNN.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index e33dc2b09..7c8cf884e 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -400,9 +400,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: self.train_accuracy_list.append(float(train_acc)) self.train_accuracy=train_acc @@ -417,9 +417,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N loss=sess.run(train_loss,feed_dict=feed_dict) else: loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(float(accuracy)) @@ -448,7 +448,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) print() - print('last loss:{0}'.format(self.train_loss)) + print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: if len(self.labels_shape)==2: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) @@ -720,11 +720,11 @@ def save(self,model_path,i=None,one=True): output_file=open(model_path+'-{0}.dat'.format(i+1),'wb') pickle.dump(self.last_weight,output_file) pickle.dump(self.last_bias,output_file) - pickle.dump(self.data_dtype,output_file) - pickle.dump(self.labels_dtype,output_file) pickle.dump(self.shape0,output_file) pickle.dump(self.data_shape,output_file) pickle.dump(self.labels_shape,output_file) + pickle.dump(self.data_dtype,output_file) + pickle.dump(self.labels_dtype,output_file) pickle.dump(self.hidden_layers,output_file) pickle.dump(self.function,output_file) pickle.dump(self.batch,output_file) @@ -754,11 +754,11 @@ def restore(self,model_path): input_file=open(model_path,'rb') self.last_weight=pickle.load(input_file) self.last_bias=pickle.load(input_file) - self.data_dtype=pickle.load(input_file) - self.labels_dtype=pickle.load(input_file) self.shape0=pickle.load(input_file) self.data_shape=pickle.load(input_file) self.labels_shape=pickle.load(input_file) + self.data_dtype=pickle.load(input_file) + self.labels_dtype=pickle.load(input_file) self.graph=tf.Graph() with self.graph.as_default(): self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,None],name='data') From b86fc907f2fa4140b21e143154dc606531b768b6 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:14:14 +0800 Subject: [PATCH 11/72] Update CNN.py --- Note/nn/CNN/CNN.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index f220d7c39..7be45f0d6 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -514,9 +514,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches - self.train_loss_list.append(loss) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: self.train_accuracy_list.append(train_acc) self.train_accuracy=train_acc @@ -531,9 +531,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N loss=sess.run(train_loss,feed_dict=feed_dict) else: loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(loss) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(accuracy) @@ -562,7 +562,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) print() - print('last loss:{0}'.format(self.train_loss)) + print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: From a5ece8058bc7a82ddbdd9088d2cbd8f4925fa7ff Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:14:36 +0800 Subject: [PATCH 12/72] Update GRU.py --- Note/nn/RNN/GRU.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index 3766152f8..6aa4505e1 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -647,9 +647,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: self.train_accuracy_list.append(float(train_acc)) self.train_accuracy=train_acc @@ -664,9 +664,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, loss=sess.run(train_loss,feed_dict=feed_dict) else: loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(float(accuracy)) @@ -695,7 +695,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) print() - print('last loss:{0}'.format(self.train_loss)) + print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: From 3c9995dd8256b9b2d4ef510fa8dc484631514d69 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:14:56 +0800 Subject: [PATCH 13/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index 9b647b696..3a1af65e0 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -736,9 +736,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: self.train_accuracy_list.append(float(train_acc)) self.train_accuracy=train_acc @@ -753,9 +753,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, loss=sess.run(train_loss,feed_dict=feed_dict) else: loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(float(accuracy)) @@ -784,7 +784,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) print() - print('last loss:{0}'.format(self.train_loss)) + print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: From e091182d43a5a632e512a985635bd75c5651fbcd Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:15:22 +0800 Subject: [PATCH 14/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index 2e17960d2..5a1252d14 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -615,9 +615,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: self.train_accuracy_list.append(float(train_acc)) self.train_accuracy=train_acc @@ -632,9 +632,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, loss=sess.run(train_loss,feed_dict=feed_dict) else: loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict={self.data:self.train_data,self.labels:self.train_labels}) self.train_accuracy_list.append(float(accuracy)) @@ -663,7 +663,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) print() - print('last loss:{0}'.format(self.train_loss)) + print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: From e4869090df4fba52cbf5fd2e0563d60b43498d93 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:15:39 +0800 Subject: [PATCH 15/72] Update RNN.py --- Note/nn/RNN/RNN.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index 15419dafe..f1181ed8a 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -339,9 +339,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: self.train_accuracy_list.append(float(train_acc)) self.train_accuracy=train_acc @@ -356,9 +356,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, loss=sess.run(train_loss,feed_dict=feed_dict) else: loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(float(loss)) + self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float16) + self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(float(accuracy)) @@ -387,7 +387,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) print() - print('last loss:{0}'.format(self.train_loss)) + print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: @@ -681,9 +681,9 @@ def restore(self,model_path): with self.graph.as_default(): self.data=tf.placeholder(dtype=self.data_dtype,shape=[None,None,None],name='data') if len(self.labels_shape)==3: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,None],name='labels) + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None,None],name='labels') elif len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None],name='labels) + self.labels=tf.placeholder(dtype=self.labels_dtype,shape=[None,None],name='labels') self.hidden=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) From 2e3de049061150c730498c8203e257dd6dbd433f Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:19:37 +0800 Subject: [PATCH 16/72] Update Note Architecture.py --- Note/create/Note Architecture.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Note/create/Note Architecture.py b/Note/create/Note Architecture.py index f1b43cb4e..f0bfc84da 100644 --- a/Note/create/Note Architecture.py +++ b/Note/create/Note Architecture.py @@ -318,14 +318,14 @@ def test(self,test_data,test_labels,batch=None): test_acc=total_test_acc/test_batches self.test_loss=test_loss self.test_accuracy=test_acc - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) sess.close() return From e719d81a7ade6cade72c791f0de5f08bd52db3f2 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:24:32 +0800 Subject: [PATCH 17/72] Update FNN.py --- Note/nn/FNN/FNN.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index 7c8cf884e..c30967331 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -552,14 +552,14 @@ def test(self,test_data,test_labels,batch=None): test_acc=total_test_acc/test_batches self.test_loss=test_loss self.test_accuracy=test_acc - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) sess.close() return From 6c4d2dfc38932a6f4e4e6f16babf84cd872365ab Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:25:18 +0800 Subject: [PATCH 18/72] Update CNN.py --- Note/nn/CNN/CNN.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index 7be45f0d6..cf0bc13a4 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -665,14 +665,14 @@ def test(self,test_data,test_labels,batch=None): test_acc=total_test_acc/test_batches self.test_loss=test_loss self.test_accuracy=test_acc - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) sess.close() return From 77b73ee580f2533dd5f7eb97da6b0e43d5d917b6 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:26:00 +0800 Subject: [PATCH 19/72] Update GRU.py --- Note/nn/RNN/GRU.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index 6aa4505e1..cc3721388 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -872,14 +872,14 @@ def test(self,test_data,test_labels,batch=None): test_acc=total_test_acc/test_batches self.test_loss=test_loss self.test_accuracy=test_acc - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) sess.close() return From 56edbdcb874d224384e69e7e81e6646b9ac98893 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:26:17 +0800 Subject: [PATCH 20/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index 3a1af65e0..9ebe4029b 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -982,14 +982,14 @@ def test(self,test_data,test_labels,batch=None): test_acc=total_test_acc/test_batches self.test_loss=test_loss self.test_accuracy=test_acc - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) sess.close() return From eb1b3b722817d0ec430f746f9ebc1663b67252f8 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:27:03 +0800 Subject: [PATCH 21/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index 5a1252d14..d5490726a 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -819,14 +819,14 @@ def test(self,test_data,test_labels,batch=None,use_nn=True): test_acc=total_test_acc/test_batches self.test_loss=test_loss self.test_accuracy=test_acc - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) sess.close() return From c15b5b003df9eb438f9fac3b5ac4d1d1fad90f57 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 15 Jul 2020 21:27:17 +0800 Subject: [PATCH 22/72] Update RNN.py --- Note/nn/RNN/RNN.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index f1181ed8a..98f8c1d6b 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -518,14 +518,14 @@ def test(self,test_data,test_labels,batch=None): test_acc=total_test_acc/test_batches self.test_loss=test_loss self.test_accuracy=test_acc - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) - self.test_loss=self.test_loss.astype(np.float16) + self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) sess.close() return From a6e424659a81c3b799318bed0fa911dede7d89f9 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:05:47 +0800 Subject: [PATCH 23/72] Update FNN.py --- Note/nn/FNN/FNN.py | 995 ++++++++++++++++++++++++++------------------- 1 file changed, 577 insertions(+), 418 deletions(-) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index c30967331..fc4831838 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -3,6 +3,7 @@ import pandas as pd import matplotlib.pyplot as plt import pickle +from keras.preprocessing.image import ImageDataGenerator import time @@ -39,7 +40,7 @@ def read_data_csv(path,dtype=None,header=None): return np.array(data,dtype=dtype) -class fnn: +class cnn: def __init__(self,train_data=None,train_labels=None): self.graph=tf.Graph() self.train_data=train_data @@ -49,22 +50,26 @@ def __init__(self,train_data=None,train_labels=None): self.shape0=train_data.shape[0] self.data_shape=train_data.shape self.labels_shape=train_labels.shape - self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None],name='data') - if len(self.labels_shape)==2: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') - else: - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None],name='labels') + self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None,None,None],name='data') + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') self.data_dtype=train_data.dtype self.labels_dtype=train_labels.dtype - self.hidden=[] - self.hidden_layers=None - self.layers=None + self.conv=[] + self.max_pool=None + self.avg_pool=None + self.fc=[] self.function=[] - self.weight=[] - self.bias=[] - self.last_weight=[] - self.last_bias=[] + self.weight_conv=[] + self.weight_fc=[] + self.bias_conv=[] + self.bias_fc=[] + self.last_weight_conv=[] + self.last_weight_fc=[] + self.last_bias_conv=[] + self.last_bias_fc=[] self.activation=[] + self.activation_fc=[] + self.flattened_len=None self.batch=None self.epoch=None self.l2=None @@ -77,7 +82,8 @@ def __init__(self,train_data=None,train_labels=None): self.train_accuracy_list=[] self.test_loss=None self.test_accuracy=None - self.continue_train=False + self.continue_train=None + self.continue_flag=None self.flag=None self.end_flag=False self.test_flag=None @@ -86,15 +92,37 @@ def __init__(self,train_data=None,train_labels=None): self.use_cpu_gpu='/gpu:0' + def data_enhance(self,rotation_range=40,width_shift_range=0.2,height_shift_range=0.2, + shear_range=0.2,zoom_range=0.2,horizontal_flip=True,fill_mode='nearest'): + datagen=ImageDataGenerator(rotation_range=rotation_range,width_shift_range=width_shift_range,height_shift_range=height_shift_range, + shear_range=shear_range,zoom_range=zoom_range,horizontal_flip=horizontal_flip,fill_mode=fill_mode) + for data in datagen.flow(self.train_data,batch_size=self.train_data.shape[0]): + self.train_data=data + break + return + + def weight_init(self,shape,mean,stddev,name): return tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=self.dtype),name=name) def bias_init(self,shape,mean,stddev,name): return tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=self.dtype),name=name) - + + + def conv_f(self,data,weight,i): + return tf.nn.conv2d(data,weight,strides=[1,self.conv[i][2],self.conv[i][2],1],padding=self.conv[i][3]) + + + def max_pool_f(self,data,i): + return tf.nn.max_pool(data,ksize=[1,self.max_pool[i][0],self.max_pool[i][0],1],strides=[1,self.max_pool[i][1],self.max_pool[i][1],1],padding=self.max_pool[i][2]) - def structure(self,hidden,function,layers=None,mean=0,stddev=0.07,dtype=np.float32): + + def avg_pool_f(self,data,i): + return tf.nn.avg_pool(data,ksize=[1,self.avg_pool[i][0],self.avg_pool[i][0],1],strides=[1,self.avg_pool[i][1],self.avg_pool[i][1],1],padding=self.avg_pool[i][2]) + + + def structure(self,conv=None,max_pool=None,avg_pool=None,fc=None,function=None,mean=0,stddev=0.07,dtype=tf.float32): with self.graph.as_default(): self.continue_train=False self.total_epoch=0 @@ -103,159 +131,264 @@ def structure(self,hidden,function,layers=None,mean=0,stddev=0.07,dtype=np.float self.test_flag=False self.train_loss_list.clear() self.train_accuracy_list.clear() - self.weight.clear() - self.bias.clear() - self.last_weight=[] - self.last_bias=[] - self.hidden=hidden + self.weight_conv.clear() + self.bias_conv.clear() + self.weight_fc.clear() + self.bias_fc.clear() + self.conv=conv + self.max_pool=max_pool + self.avg_pool=avg_pool + self.fc=fc self.function=function - self.layers=layers + self.mean=mean + self.stddev=stddev self.dtype=dtype self.time=None with tf.name_scope('parameter_initialization'): - if self.layers!=None: - self.hidden_layers=self.layers-2 - for i in range(self.hidden_layers+1): - if i==0: - self.weight.append(self.weight_init([self.data_shape[1],self.hidden],mean=mean,stddev=stddev,name='weight_{0}'.format(i+1))) - self.bias.append(self.bias_init([self.hidden],mean=mean,stddev=stddev,name='bias_{0}'.format(i+1))) - if i==self.hidden_layers: - if len(self.labels_shape)==2: - self.weight.append(self.weight_init([self.hidden,self.labels_shape[1]],mean=mean,stddev=stddev,name='weight_output')) - self.bias.append(self.bias_init([self.labels_shape[1]],mean=mean,stddev=stddev,name='bias_output')) - else: - self.weight.append(self.weight_init([self.hidden,1],mean=mean,stddev=stddev,name='weight_output')) - self.bias.append(self.bias_init([1],mean=mean,stddev=stddev,name='bias_output')) - elif i>0 and i0 and i<=len(self.hidden)-1: - self.weight.append(self.weight_init([self.hidden[i-1],self.hidden[i]],mean=mean,stddev=stddev,name='weight_{0}'.format(i+1))) - self.bias.append(self.bias_init([self.hidden[i]],mean=mean,stddev=stddev,name='bias_{0}'.format(i+1))) + for i in range(len(self.conv)): + if i==0: + self.weight_conv.append(self.weight_init([self.conv[i][0],self.conv[i][0],self.data_shape[3],self.conv[i][1]],mean=mean,stddev=stddev,name='conv_{0}_weight'.format(i+1))) + self.bias_conv.append(self.bias_init([self.conv[i][1]],mean=mean,stddev=stddev,name='conv_{0}_bias'.format(i+1))) + else: + self.weight_conv.append(self.weight_init([self.conv[i][0],self.conv[i][0],self.conv[i-1][1],self.conv[i][1]],mean=mean,stddev=stddev,name='conv_{0}_weight'.format(i+1))) + self.bias_conv.append(self.bias_init([self.conv[i][1]],mean=mean,stddev=stddev,name='conv_{0}_bias'.format(i+1))) return - - - def forward_propagation(self,data,dropout=None,use_nn=False): + + + def forward_propagation_fc(self,data,dropout,shape,use_nn): with self.graph.as_default(): - forward_cpu_gpu=[] - for i in range(self.hidden_layers): + for i in range(len(self.fc)): if type(self.cpu_gpu)==str: - forward_cpu_gpu.append(self.cpu_gpu) - elif len(self.cpu_gpu)!=self.hidden_layers: - forward_cpu_gpu.append(self.cpu_gpu[0]) + self.forward_cpu_gpu[1].append(self.cpu_gpu) + elif len(self.cpu_gpu)!=len(self.fc): + self.forward_cpu_gpu[1].append(self.cpu_gpu[1][0]) else: - forward_cpu_gpu.append(self.cpu_gpu[i]) + self.forward_cpu_gpu[1].append(self.cpu_gpu[1][i]) if use_nn==True: - for i in range(self.hidden_layers): + for i in range(len(self.fc)): if type(self.use_cpu_gpu)==str: - forward_cpu_gpu.append(self.use_cpu_gpu) + self.forward_cpu_gpu[1].append(self.use_cpu_gpu) else: - forward_cpu_gpu.append(self.use_cpu_gpu[i]) + self.forward_cpu_gpu[1].append(self.use_cpu_gpu[1][i]) if use_nn==False: - weight=self.weight - bias=self.bias + weight_fc=self.weight_fc + bias_fc=self.bias_fc else: - weight=[] - bias=[] - for i in range(len(self.last_weight)): - weight.append(tf.constant(self.last_weight[i])) - bias.append(tf.constant(self.last_bias[i])) - self.activation=[x for x in range(self.hidden_layers)] + weight_fc=[] + bias_fc=[] + for i in range(len(self.last_weight_fc)): + weight_fc.append(tf.constant(self.last_weight_fc[i])) + bias_fc.append(tf.constant(self.last_bias_fc[i])) + if self.continue_train==True and self.flag==1: + self.weight_fc=[x for x in range(len(self.fc)+1)] + self.bias_fc=[x for x in range(len(self.fc)+1)] + for i in range(len(self.fc)+1): + if i==len(self.fc): + self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='out_weight') + self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='out_bias') + else: + self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='fc_{0}_weight'.format(i+1)) + self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='fc_{0}_bias'.format(i+1)) + self.flag=0 + self.activation_fc=[x for x in range(len(self.fc))] + if use_nn!=True and len(self.weight_fc)!=len(self.fc)+1: + for i in range(len(self.fc)+1): + if i==0: + self.weight_fc.append(self.weight_init([shape,self.fc[i][0]],mean=self.mean,stddev=self.stddev,name='fc_{0}_weight'.format(i+1))) + self.bias_fc.append(self.bias_init([self.fc[i][0]],mean=self.mean,stddev=self.stddev,name='fc_{0}_bias'.format(i+1))) + elif i==len(self.fc): + self.weight_fc.append(self.weight_init([self.fc[i-1][0],self.train_labels.shape[1]],mean=self.mean,stddev=self.stddev,name='output_weight')) + self.bias_fc.append(self.bias_init([self.train_labels.shape[1]],mean=self.mean,stddev=self.stddev,name='output_bias')) + else: + self.weight_fc.append(self.weight_init([self.fc[i-1][0],self.fc[i][0]],mean=self.mean,stddev=self.stddev,name='fc_{0}_weight'.format(i+1))) + self.bias_fc.append(self.bias_init([self.fc[i][0]],mean=self.mean,stddev=self.stddev,name='fc_{0}_bias'.format(i+1))) if type(dropout)==list: data=tf.nn.dropout(data,dropout[0]) + for i in range(len(self.fc)): + with tf.device(self.forward_cpu_gpu[1][i]): + if self.fc[i][1]=='sigmoid': + if i==0: + self.activation_fc[i]=tf.nn.sigmoid(tf.matmul(data,weight_fc[i])+bias_fc[i]) + if type(dropout)==list: + self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) + else: + self.activation_fc[i]=tf.nn.sigmoid(tf.matmul(self.activation_fc[i-1],weight_fc[i])+bias_fc[i]) + if type(dropout)==list: + self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) + if self.fc[i][1]=='tanh': + if i==0: + self.activation_fc[i]=tf.nn.tanh(tf.matmul(data,weight_fc[i])+bias_fc[i]) + if type(dropout)==list: + self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) + else: + self.activation_fc[i]=tf.nn.tanh(tf.matmul(self.activation_fc[i-1],weight_fc[i])+bias_fc[i]) + if type(dropout)==list: + self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) + if self.fc[i][1]=='relu': + if i==0: + self.activation_fc[i]=tf.nn.relu(tf.matmul(data,weight_fc[i])+bias_fc[i]) + if type(dropout)==list: + self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) + else: + self.activation_fc[i]=tf.nn.relu(tf.matmul(self.activation_fc[i-1],weight_fc[i])+bias_fc[i]) + if type(dropout)==list: + self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) + if self.fc[i][1]=='elu': + if i==0: + self.activation_fc[i]=tf.nn.elu(tf.matmul(data,weight_fc[i])+bias_fc[i]) + if type(dropout)==list: + self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) + else: + self.activation_fc[i]=tf.nn.elu(tf.matmul(self.activation_fc[i-1],weight_fc[i])+bias_fc[i]) + if type(dropout)==list: + self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) + if dropout!=None and type(dropout)!=list: + self.activation_fc[-1]=tf.nn.dropout(self.activation_fc[-1],dropout) + return weight_fc,bias_fc + + + def forward_propagation(self,data,dropout=None,use_nn=False): + with self.graph.as_default(): + self.forward_cpu_gpu=[[],[]] + for i in range(len(self.conv)): + if type(self.cpu_gpu)==str: + self.forward_cpu_gpu[0].append(self.cpu_gpu) + elif len(self.cpu_gpu[0][0])!=len(self.conv): + self.forward_cpu_gpu[0].append(self.cpu_gpu[0][0]) + else: + self.forward_cpu_gpu[0].append(self.cpu_gpu[0][i]) + if use_nn==True: + for i in range(len(self.conv)): + if type(self.use_cpu_gpu)==str: + self.forward_cpu_gpu.append(self.use_cpu_gpu) + else: + self.forward_cpu_gpu.append(self.use_cpu_gpu[0][i]) + if use_nn==False: + weight_conv=self.weight_conv + bias_conv=self.bias_conv + else: + weight_conv=[] + bias_conv=[] + for i in range(len(self.last_weight_conv)): + weight_conv.append(tf.constant(self.last_weight_conv[i])) + bias_conv.append(tf.constant(self.last_bias_conv[i])) + self.activation=[x for x in range(len(self.conv))] with tf.name_scope('forward_propagation'): - for i in range(self.hidden_layers): - with tf.device(forward_cpu_gpu[i]): + for i in range(len(self.conv)): + with tf.device(self.forward_cpu_gpu[0][i]): if type(self.function)==list: if self.function[i]=='sigmoid': if i==0: - self.activation[i]=tf.nn.sigmoid(tf.matmul(data,weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.sigmoid(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) else: - self.activation[i]=tf.nn.sigmoid(tf.matmul(self.activation[i-1],weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.sigmoid(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) if self.function[i]=='tanh': if i==0: - self.activation[i]=tf.nn.tanh(tf.matmul(data,weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.tanh(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) else: - self.activation[i]=tf.nn.tanh(tf.matmul(self.activation[i-1],weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.tanh(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) if self.function[i]=='relu': if i==0: - self.activation[i]=tf.nn.relu(tf.matmul(data,weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.relu(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) else: - self.activation[i]=tf.nn.relu(tf.matmul(self.activation[i-1],weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.relu(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) if self.function[i]=='elu': if i==0: - self.activation[i]=tf.nn.elu(tf.matmul(data,weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.elu(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) else: - self.activation[i]=tf.nn.elu(tf.matmul(self.activation[i-1],weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.elu(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) elif type(self.function)==str: if self.function=='sigmoid': if i==0: - self.activation[i]=tf.nn.sigmoid(tf.matmul(data,weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.sigmoid(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) else: - self.activation[i]=tf.nn.sigmoid(tf.matmul(self.activation[i-1],weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.sigmoid(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) if self.function=='tanh': if i==0: - self.activation[i]=tf.nn.tanh(tf.matmul(data,weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.tanh(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) else: - self.activation[i]=tf.nn.tanh(tf.matmul(self.activation[i-1],weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.tanh(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) if self.function=='relu': if i==0: - self.activation[i]=tf.nn.relu(tf.matmul(data,weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.relu(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) else: - self.activation[i]=tf.nn.relu(tf.matmul(self.activation[i-1],weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.relu(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) if self.function=='elu': if i==0: - self.activation[i]=tf.nn.elu(tf.matmul(data,weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + self.activation[i]=tf.nn.elu(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) else: - self.activation[i]=tf.nn.elu(tf.matmul(self.activation[i-1],weight[i])+bias[i]) - if type(dropout)==list: - self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) - if dropout!=None and type(dropout)!=list: - self.activation[-1]=tf.nn.dropout(self.activation[-1],dropout) - output=tf.matmul(self.activation[-1],weight[-1])+bias[-1] + self.activation[i]=tf.nn.elu(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) + if type(self.max_pool)==list and self.max_pool[i][0]!=0: + self.activation[i]=self.max_pool_f(self.activation[i],i) + if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: + self.activation[i]=self.avg_pool_f(self.activation[i],i) + flattened_layer=tf.reshape(self.activation[-1],[-1,self.activation[-1].shape[1]*self.activation[-1].shape[2]*self.activation[-1].shape[3]]) + shape=flattened_layer.shape + shape=np.array(shape[1]) + shape=shape.astype(np.int) + self.flattened_len=shape + weight_fc,bias_fc=self.forward_propagation_fc(flattened_layer,dropout,shape,use_nn) + output=tf.matmul(self.activation_fc[-1],weight_fc[-1])+bias_fc[-1] return output @@ -267,6 +400,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.dropout=dropout self.optimizer=optimizer self.lr=lr + self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -281,57 +415,41 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.continue_train=True if cpu_gpu!=None: self.cpu_gpu=cpu_gpu - if type(self.cpu_gpu)==list and (len(self.cpu_gpu)!=self.hidden_layers+1 or len(self.cpu_gpu)==1): - self.cpu_gpu.append('/gpu:0') + if type(self.cpu_gpu)==list and len(self.cpu_gpu)!=3: + train_cpu_gpu='/gpu:0' if type(self.cpu_gpu)==str: train_cpu_gpu=self.cpu_gpu - else: - train_cpu_gpu=self.cpu_gpu[-1] with tf.device(train_cpu_gpu): if continue_train==True and self.end_flag==True: self.end_flag=False - self.weight=[x for x in range(self.hidden_layers+1)] - self.bias=[x for x in range(self.hidden_layers+1)] - for i in range(self.hidden_layers+1): - if i==self.hidden_layers: - self.weight[i]=tf.Variable(self.last_weight[i],name='weight_output') - self.bias[i]=tf.Variable(self.last_bias[i],name='bias_output') - else: - self.weight[i]=tf.Variable(self.last_weight[i],name='weight_{0}'.format(i+1)) - self.bias[i]=tf.Variable(self.last_bias[i],name='bias_{0}'.format(i+1)) + self.weight_conv=[x for x in range(len(self.conv))] + self.bias_conv=[x for x in range(len(self.conv))] + for i in range(len(self.conv)): + self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) + self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) if continue_train==True and self.flag==1: - self.weight=[x for x in range(self.hidden_layers+1)] - self.bias=[x for x in range(self.hidden_layers+1)] - for i in range(self.hidden_layers+1): - if i==self.hidden_layers: - self.weight[i]=tf.Variable(self.last_weight[i],name='weight_output') - self.bias[i]=tf.Variable(self.last_bias[i],name='bias_output') - else: - self.weight[i]=tf.Variable(self.last_weight[i],name='weight_{0}'.format(i+1)) - self.bias[i]=tf.Variable(self.last_bias[i],name='bias_{0}'.format(i+1)) + self.weight_conv=[x for x in range(len(self.conv))] + self.bias_conv=[x for x in range(len(self.conv))] + for i in range(len(self.conv)): + self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) + self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) self.flag=0 # ---------------forward propagation--------------- train_output=self.forward_propagation(self.data,self.dropout) # ---------------------------------------- with tf.name_scope('train_loss'): - if len(self.labels_shape)==1: - if l2==None: - train_loss=tf.reduce_mean(tf.square(train_output-tf.expand_dims(self.labels,axis=1))) - else: - train_loss=tf.square(train_output-tf.expand_dims(self.labels,axis=1)) - train_loss=tf.reduce_mean(train_loss+l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) - elif self.labels_shape[1]==1: + if self.labels_shape[1]==1: if l2==None: train_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels)) else: train_loss=tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels) - train_loss=tf.reduce_mean(train_loss+l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) else: if l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) else: - train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) - train_loss=train_loss+l2*sum([tf.reduce_sum(x**2) for x in self.weight]) + train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) if self.optimizer=='Gradient': opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) if self.optimizer=='RMSprop': @@ -341,159 +459,164 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: - with tf.name_scope('train_accuracy'): - if len(self.labels_shape)==1: - train_accuracy=tf.reduce_mean(tf.abs(train_output-self.labels)) + if acc==True: + with tf.name_scope('train_accuracy'): + equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) + train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + if train_summary_path!=None: + train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_writer=tf.summary.FileWriter(train_summary_path) + config=tf.ConfigProto() + config.gpu_options.allow_growth=True + config.allow_soft_placement=True + sess=tf.Session(config=config) + sess.run(tf.global_variables_initializer()) + self.sess=sess + if self.total_epoch==0: + epoch=epoch+1 + for i in range(epoch): + if self.batch!=None: + batches=int((self.shape0-self.shape0%self.batch)/self.batch) + total_loss=0 + total_acc=0 + random=np.arange(self.shape0) + np.random.shuffle(random) + train_data=self.train_data[random] + train_labels=self.train_labels[random] + for j in range(batches): + index1=j*self.batch + index2=(j+1)*self.batch + train_data_batch=train_data[index1:index2] + train_labels_batch=train_labels[index1:index2] + feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} + if i==0 and self.total_epoch==0: + batch_loss=sess.run(train_loss,feed_dict=feed_dict) else: - equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) - train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) - train_writer=tf.summary.FileWriter(train_summary_path) - config=tf.ConfigProto() - config.gpu_options.allow_growth=True - config.allow_soft_placement=True - sess=tf.Session(config=config) - sess.run(tf.global_variables_initializer()) - self.sess=sess - if self.total_epoch==0: - epoch=epoch+1 - for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) - total_loss=0 - total_acc=0 - random=np.arange(self.shape0) - np.random.shuffle(random) - train_data=self.train_data[random] - train_labels=self.train_labels[random] - for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch - train_data_batch=train_data[index1:index2] - train_labels_batch=train_labels[index1:index2] - feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} - if i==0 and self.total_epoch==0: - batch_loss=sess.run(train_loss,feed_dict=feed_dict) - else: - batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - total_loss+=batch_loss - if acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc - if self.shape0%self.batch!=0: - batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) - train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) - train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) - feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} - if i==0 and self.total_epoch==0: - batch_loss=sess.run(train_loss,feed_dict=feed_dict) - else: - batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - total_loss+=batch_loss - if acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc - loss=total_loss/batches - train_acc=total_acc/batches - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) + batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) + total_loss+=batch_loss if acc==True: - self.train_accuracy_list.append(float(train_acc)) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float16) - else: - random=np.arange(self.shape0) - np.random.shuffle(random) - train_data=self.train_data[random] - train_labels=self.train_labels[random] - feed_dict={self.data:train_data,self.labels:train_labels} + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc + if self.shape0%self.batch!=0: + batches+=1 + index1=batches*self.batch + index2=self.batch-(self.shape0-batches*self.batch) + train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) + train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) + feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} if i==0 and self.total_epoch==0: - loss=sess.run(train_loss,feed_dict=feed_dict) + batch_loss=sess.run(train_loss,feed_dict=feed_dict) else: - loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) + batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) + total_loss+=batch_loss if acc==True: - accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(float(accuracy)) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float16) - if epoch%10!=0: - temp_epoch=epoch-epoch%10 - temp_epoch=int(temp_epoch/10) + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc + loss=total_loss/batches + train_acc=total_acc/batches + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + if acc==True: + self.train_accuracy_list.append(train_acc.astype(np.float32)) + self.train_accuracy=train_acc + self.train_accuracy=self.train_accuracy.astype(np.float32) + else: + random=np.arange(self.shape0) + np.random.shuffle(random) + train_data=self.train_data[random] + train_labels=self.train_labels[random] + feed_dict={self.data:train_data,self.labels:train_labels} + if i==0 and self.total_epoch==0: + loss=sess.run(train_loss,feed_dict=feed_dict) else: - temp_epoch=epoch/10 - if temp_epoch==0: - temp_epoch=1 - if i%temp_epoch==0: - if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) + loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + if acc==True: + accuracy=sess.run(train_accuracy,feed_dict=feed_dict) + self.train_accuracy_list.append(accuracy.astype(np.float32)) + self.train_accuracy=accuracy + self.train_accuracy=self.train_accuracy.astype(np.float32) + if epoch%10!=0: + temp_epoch=epoch-epoch%10 + temp_epoch=int(temp_epoch/10) + else: + temp_epoch=epoch/10 + if temp_epoch==0: + temp_epoch=1 + if i%temp_epoch==0: + if continue_train==True: + if self.epoch!=None: + self.total_epoch=self.epoch+i+1 else: - print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) - if model_path!=None and i%epoch*2==0: - self.save(model_path,i,one) - if train_summary_path!=None: - train_summary=sess.run(train_merging,feed_dict=feed_dict) - train_writer.add_summary(train_summary,i) - print() - print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: - if len(self.labels_shape)==2: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + self.total_epoch=i + if continue_train==True: + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) else: - print('accuracy:{0:.3f}'.format(self.train_accuracy)) - if train_summary_path!=None: - train_writer.close() - if continue_train==True: - self.last_weight=sess.run(self.weight) - self.last_bias=sess.run(self.bias) - for i in range(self.hidden_layers+1): - if i==self.hidden_layers: - self.weight[i]=tf.Variable(self.last_weight[i],name='weight_output') - self.bias[i]=tf.Variable(self.last_bias[i],name='bias_output') - else: - self.weight[i]=tf.Variable(self.last_weight[i],name='weight_{0}'.format(i+1)) - self.bias[i]=tf.Variable(self.last_bias[i],name='bias_{0}'.format(i+1)) - self.last_weight.clear() - self.last_bias.clear() - sess.run(tf.global_variables_initializer()) - if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch + print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) + if model_path!=None and i%epoch*2==0: + self.save(model_path,i,one) + if train_summary_path!=None: + train_summary=sess.run(train_merging,feed_dict=feed_dict) + train_writer.add_summary(train_summary,i) + print() + print('last loss:{0:.6f}'.format(self.train_loss)) + if acc==True: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if train_summary_path!=None: + train_writer.close() + if continue_train==True: + self.last_weight_conv=sess.run(self.weight_conv) + self.last_bias_conv=sess.run(self.bias_conv) + self.last_weight_fc=sess.run(self.weight_fc) + self.last_bias_fc=sess.run(self.bias_fc) + for i in range(len(self.conv)): + self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) + self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) + for i in range(len(self.fc)+1): + if i==len(self.fc): + self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='output_weight') + self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='output_bias') else: - self.total_epoch=epoch-1 - self.epoch=self.total_epoch - if continue_train!=True: - self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.time=_time + self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='fc_{0}_weight'.format(i+1)) + self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='fc_{0}_weight'.format(i+1)) + self.last_weight_conv.clear() + self.last_bias_conv.clear() + self.last_weight_fc.clear() + self.last_bias_fc.clear() + sess.run(tf.global_variables_initializer()) + if continue_train==True: + if self.epoch!=None: + self.total_epoch=self.epoch+epoch else: - self.time+=_time - print('time:{0:.3f}s'.format(self.time)) - return + self.total_epoch=epoch-1 + self.epoch=self.total_epoch + if continue_train!=True: + self.epoch=epoch-1 + t2=time.time() + _time=t2-t1 + if continue_train!=True or self.time==None: + self.time=_time + else: + self.time+=_time + print('time:{0:.3f}s'.format(self.time)) + return def end(self): with self.graph.as_default(): self.end_flag=True - self.last_weight=self.sess.run(self.weight) - self.last_bias=self.sess.run(self.bias) - self.weight.clear() - self.bias.clear() + self.last_weight_conv=self.sess.run(self.weight_conv) + self.last_bias_conv=self.sess.run(self.bias_conv) + self.last_weight_fc=self.sess.run(self.weight_fc) + self.last_bias_fc=self.sess.run(self.bias_fc) + self.weight_conv.clear() + self.bias_conv.clear() + self.weight_fc.clear() + self.bias_fc.clear() self.total_epoch=self.epoch self.sess.close() return @@ -507,28 +630,19 @@ def test(self,test_data,test_labels,batch=None): use_nn=True self.test_flag=True shape=test_labels.shape - test_data_placeholder=tf.placeholder(dtype=test_data.dtype,shape=[None,test_data.shape[1]]) - if len(shape)==2: - test_labels_placeholder=tf.placeholder(dtype=test_labels.dtype,shape=[None,shape[1]]) - else: - test_labels_placeholder=tf.placeholder(dtype=test_labels.dtype,shape=[None]) + test_data_placeholder=tf.placeholder(dtype=test_data.dtype,shape=[None,test_data.shape[1],test_data.shape[2],test_data.shape[3]]) + test_labels_placeholder=tf.placeholder(dtype=test_labels.dtype,shape=[None,shape[1]]) test_output=self.forward_propagation(test_data_placeholder,use_nn=use_nn) - if len(shape)==1: - test_loss=tf.reduce_mean(tf.square(test_output-test_labels_placeholder)) - elif shape[1]==1: + if shape[1]==1: test_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=test_output,labels=test_labels_placeholder)) else: test_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=test_output,labels=test_labels_placeholder)) - if len(shape)==1: - test_accuracy=tf.reduce_mean(tf.abs(test_output-test_labels_placeholder)) - else: - equal=tf.equal(tf.argmax(test_output,1),tf.argmax(test_labels_placeholder,1)) - test_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + equal=tf.equal(tf.argmax(test_output,1),tf.argmax(test_labels_placeholder,1)) + test_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) config=tf.ConfigProto() config.gpu_options.allow_growth=True config.allow_soft_placement=True sess=tf.Session(config=config) - sess.run(tf.global_variables_initializer()) if batch!=None: total_test_loss=0 total_test_acc=0 @@ -553,18 +667,18 @@ def test(self,test_data,test_labels,batch=None): self.test_loss=test_loss self.test_accuracy=test_acc self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) print('test loss:{0:.6f}'.format(self.test_loss)) print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) sess.close() return - + def train_info(self): print() print('batch:{0}'.format(self.batch)) @@ -583,15 +697,16 @@ def train_info(self): print() print('-------------------------------------') print() - print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc==True: + print() + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) return def test_info(self): print() - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print() print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) return @@ -618,21 +733,22 @@ def train_visual(self): plt.title('train accuracy') plt.xlabel('epoch') plt.ylabel('accuracy') - print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc==True: + print() + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) return - + def comparison(self): print() - print('train loss:{0}'.format(self.train_loss)) + print('train loss:{0:.6f}'.format(self.train_loss)) print() print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) print() print('-------------------------------------') print() - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print() print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) return @@ -641,91 +757,156 @@ def comparison(self): def network(self): print() total_params=0 - if type(self.hidden)==list: - for i in range(len(self.hidden)+2): - if i==0: - print('input layer\t{0}'.format(self.data_shape[1])) - print() - if i==len(self.hidden)+1: - if self.labels_shape[1]==1: - print('output layer\t{0}\t{1}\t{2}'.format(self.labels_shape[1],self.labels_shape[1]*self.hidden[i-2]+1,'sigmoid')) - total_params+=self.labels_shape[1]*self.hidden[i-2]+1 + for i in range(len(self.conv)+1): + if i==0: + print('input layer\t{0}\t{1}'.format(self.data_shape[1],self.data_shape[3])) + print() + if type(self.function)==list: + if i==1: + if self.conv[i-1][3]=='SAME': + print('conv_layer_{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(i,self.conv[i-1][0],self.data_shape[1],self.conv[i-1][1],np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3],self.function[i-1])) + total_params+=np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3] print() else: - print('output layer\t{0}\t{1}\t{2}'.format(self.labels_shape[1],self.labels_shape[1]*self.hidden[i-2]+self.labels_shape[1],'softmax')) - total_params+=self.labels_shape[1]*self.hidden[i-2]+self.labels_shape[1] + conv_output_shape=int((self.data_shape[1]-self.conv[i-1][0])/self.conv[i-1][2]+1) + print('conv_layer_{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(i,self.conv[i-1][0],conv_output_shape,self.conv[i-1][1],np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3],self.function[i-1])) + total_params+=np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3] print() - if i>0 and i0 and i0 and i0 and i Date: Thu, 16 Jul 2020 15:06:12 +0800 Subject: [PATCH 24/72] Update CNN.py --- Note/nn/CNN/CNN.py | 313 +++++++++++++++++++++++---------------------- 1 file changed, 159 insertions(+), 154 deletions(-) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index cf0bc13a4..fc4831838 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -400,6 +400,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.dropout=dropout self.optimizer=optimizer self.lr=lr + self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -458,151 +459,151 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: - with tf.name_scope('train_accuracy'): - equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) - train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) - train_writer=tf.summary.FileWriter(train_summary_path) - config=tf.ConfigProto() - config.gpu_options.allow_growth=True - config.allow_soft_placement=True - sess=tf.Session(config=config) - sess.run(tf.global_variables_initializer()) - self.sess=sess - if self.total_epoch==0: - epoch=epoch+1 - for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) - total_loss=0 - total_acc=0 - random=np.arange(self.shape0) - np.random.shuffle(random) - train_data=self.train_data[random] - train_labels=self.train_labels[random] - for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch - train_data_batch=train_data[index1:index2] - train_labels_batch=train_labels[index1:index2] - feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} - if i==0 and self.total_epoch==0: - batch_loss=sess.run(train_loss,feed_dict=feed_dict) - else: - batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - total_loss+=batch_loss - if acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc - if self.shape0%self.batch!=0: - batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) - train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) - train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) - feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} - if i==0 and self.total_epoch==0: - batch_loss=sess.run(train_loss,feed_dict=feed_dict) - else: - batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - total_loss+=batch_loss - if acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc - loss=total_loss/batches - train_acc=total_acc/batches - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) + if acc==True: + with tf.name_scope('train_accuracy'): + equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) + train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + if train_summary_path!=None: + train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_writer=tf.summary.FileWriter(train_summary_path) + config=tf.ConfigProto() + config.gpu_options.allow_growth=True + config.allow_soft_placement=True + sess=tf.Session(config=config) + sess.run(tf.global_variables_initializer()) + self.sess=sess + if self.total_epoch==0: + epoch=epoch+1 + for i in range(epoch): + if self.batch!=None: + batches=int((self.shape0-self.shape0%self.batch)/self.batch) + total_loss=0 + total_acc=0 + random=np.arange(self.shape0) + np.random.shuffle(random) + train_data=self.train_data[random] + train_labels=self.train_labels[random] + for j in range(batches): + index1=j*self.batch + index2=(j+1)*self.batch + train_data_batch=train_data[index1:index2] + train_labels_batch=train_labels[index1:index2] + feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} + if i==0 and self.total_epoch==0: + batch_loss=sess.run(train_loss,feed_dict=feed_dict) + else: + batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) + total_loss+=batch_loss if acc==True: - self.train_accuracy_list.append(train_acc) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float16) - else: - random=np.arange(self.shape0) - np.random.shuffle(random) - train_data=self.train_data[random] - train_labels=self.train_labels[random] - feed_dict={self.data:train_data,self.labels:train_labels} + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc + if self.shape0%self.batch!=0: + batches+=1 + index1=batches*self.batch + index2=self.batch-(self.shape0-batches*self.batch) + train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) + train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) + feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} if i==0 and self.total_epoch==0: - loss=sess.run(train_loss,feed_dict=feed_dict) + batch_loss=sess.run(train_loss,feed_dict=feed_dict) else: - loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) + batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) + total_loss+=batch_loss if acc==True: - accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(accuracy) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float16) - if epoch%10!=0: - temp_epoch=epoch-epoch%10 - temp_epoch=int(temp_epoch/10) + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc + loss=total_loss/batches + train_acc=total_acc/batches + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + if acc==True: + self.train_accuracy_list.append(train_acc.astype(np.float32)) + self.train_accuracy=train_acc + self.train_accuracy=self.train_accuracy.astype(np.float32) + else: + random=np.arange(self.shape0) + np.random.shuffle(random) + train_data=self.train_data[random] + train_labels=self.train_labels[random] + feed_dict={self.data:train_data,self.labels:train_labels} + if i==0 and self.total_epoch==0: + loss=sess.run(train_loss,feed_dict=feed_dict) else: - temp_epoch=epoch/10 - if temp_epoch==0: - temp_epoch=1 - if i%temp_epoch==0: - if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) - else: - print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) - if model_path!=None and i%epoch*2==0: - self.save(model_path,i,one) - if train_summary_path!=None: - train_summary=sess.run(train_merging,feed_dict=feed_dict) - train_writer.add_summary(train_summary,i) - print() - print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - if train_summary_path!=None: - train_writer.close() - if continue_train==True: - self.last_weight_conv=sess.run(self.weight_conv) - self.last_bias_conv=sess.run(self.bias_conv) - self.last_weight_fc=sess.run(self.weight_fc) - self.last_bias_fc=sess.run(self.bias_fc) - for i in range(len(self.conv)): - self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) - self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) - for i in range(len(self.fc)+1): - if i==len(self.fc): - self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='output_weight') - self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='output_bias') + loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + if acc==True: + accuracy=sess.run(train_accuracy,feed_dict=feed_dict) + self.train_accuracy_list.append(accuracy.astype(np.float32)) + self.train_accuracy=accuracy + self.train_accuracy=self.train_accuracy.astype(np.float32) + if epoch%10!=0: + temp_epoch=epoch-epoch%10 + temp_epoch=int(temp_epoch/10) + else: + temp_epoch=epoch/10 + if temp_epoch==0: + temp_epoch=1 + if i%temp_epoch==0: + if continue_train==True: + if self.epoch!=None: + self.total_epoch=self.epoch+i+1 else: - self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='fc_{0}_weight'.format(i+1)) - self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='fc_{0}_weight'.format(i+1)) - self.last_weight_conv.clear() - self.last_bias_conv.clear() - self.last_weight_fc.clear() - self.last_bias_fc.clear() - sess.run(tf.global_variables_initializer()) - if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch + self.total_epoch=i + if continue_train==True: + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) else: - self.total_epoch=epoch-1 - self.epoch=self.total_epoch - if continue_train!=True: - self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.time=_time + print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) + if model_path!=None and i%epoch*2==0: + self.save(model_path,i,one) + if train_summary_path!=None: + train_summary=sess.run(train_merging,feed_dict=feed_dict) + train_writer.add_summary(train_summary,i) + print() + print('last loss:{0:.6f}'.format(self.train_loss)) + if acc==True: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if train_summary_path!=None: + train_writer.close() + if continue_train==True: + self.last_weight_conv=sess.run(self.weight_conv) + self.last_bias_conv=sess.run(self.bias_conv) + self.last_weight_fc=sess.run(self.weight_fc) + self.last_bias_fc=sess.run(self.bias_fc) + for i in range(len(self.conv)): + self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) + self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) + for i in range(len(self.fc)+1): + if i==len(self.fc): + self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='output_weight') + self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='output_bias') + else: + self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='fc_{0}_weight'.format(i+1)) + self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='fc_{0}_weight'.format(i+1)) + self.last_weight_conv.clear() + self.last_bias_conv.clear() + self.last_weight_fc.clear() + self.last_bias_fc.clear() + sess.run(tf.global_variables_initializer()) + if continue_train==True: + if self.epoch!=None: + self.total_epoch=self.epoch+epoch else: - self.time+=_time - print('time:{0:.3f}s'.format(self.time)) - return + self.total_epoch=epoch-1 + self.epoch=self.total_epoch + if continue_train!=True: + self.epoch=epoch-1 + t2=time.time() + _time=t2-t1 + if continue_train!=True or self.time==None: + self.time=_time + else: + self.time+=_time + print('time:{0:.3f}s'.format(self.time)) + return def end(self): @@ -666,12 +667,12 @@ def test(self,test_data,test_labels,batch=None): self.test_loss=test_loss self.test_accuracy=test_acc self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) print('test loss:{0:.6f}'.format(self.test_loss)) print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) sess.close() @@ -696,15 +697,16 @@ def train_info(self): print() print('-------------------------------------') print() - print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc==True: + print() + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) return def test_info(self): print() - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print() print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) return @@ -731,21 +733,22 @@ def train_visual(self): plt.title('train accuracy') plt.xlabel('epoch') plt.ylabel('accuracy') - print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc==True: + print() + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) return def comparison(self): print() - print('train loss:{0}'.format(self.train_loss)) + print('train loss:{0:.6f}'.format(self.train_loss)) print() print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) print() print('-------------------------------------') print() - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print() print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) return @@ -761,12 +764,12 @@ def network(self): if type(self.function)==list: if i==1: if self.conv[i-1][3]=='SAME': - print('conv_layer_{0}\t{1}\t{3}\t{4}\t{5}\t{6}'.format(i,self.conv[i-1][0],self.data_shape[1],self.conv[i-1][1],np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3],self.function[i-1])) + print('conv_layer_{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(i,self.conv[i-1][0],self.data_shape[1],self.conv[i-1][1],np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3],self.function[i-1])) total_params+=np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3] print() else: conv_output_shape=int((self.data_shape[1]-self.conv[i-1][0])/self.conv[i-1][2]+1) - print('conv_layer_{0}\t{1}\t{3}\t{4}\t{5}\t{6}'.format(i,self.conv[i-1][0],conv_output_shape,self.conv[i-1][1],np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3],self.function[i-1])) + print('conv_layer_{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(i,self.conv[i-1][0],conv_output_shape,self.conv[i-1][1],np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3],self.function[i-1])) total_params+=np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3] print() if type(self.max_pool)==list and self.max_pool[i-1][0]!=0: @@ -787,12 +790,12 @@ def network(self): print() elif i>0 and i0 and i Date: Thu, 16 Jul 2020 15:06:55 +0800 Subject: [PATCH 25/72] Update GRU.py --- Note/nn/RNN/GRU.py | 81 ++++++++++++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 28 deletions(-) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index cc3721388..8dd217d56 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -436,6 +436,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.l2=l2 self.optimizer=optimizer self.lr=lr + self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -651,9 +652,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) if acc==True: - self.train_accuracy_list.append(float(train_acc)) + self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -669,9 +670,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(float(accuracy)) + self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -697,7 +698,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, print() print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if self.predicate==False: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('accuracy:{0:.6f}'.format(self.train_accuracy)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -873,14 +877,17 @@ def test(self,test_data,test_labels,batch=None): self.test_loss=test_loss self.test_accuracy=test_acc self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) print('test loss:{0:.6f}'.format(self.test_loss)) - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) sess.close() return @@ -901,17 +908,24 @@ def train_info(self): print() print('-------------------------------------') print() - print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc==True: + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return def test_info(self): print() - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return @@ -931,28 +945,37 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') - print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if self.acc==True: + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc==True: + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return def comparison(self): print() - print('train loss:{0}'.format(self.train_loss)) + print('train loss:{0:.6f}'.format(self.train_loss)) print() print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - print() - print('-------------------------------------') - print() - print('test loss:{0}'.format(self.test_loss)) - print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.test_flag: + print() + print('-------------------------------------') + print() + print('test loss:{0:.6f}'.format(self.test_loss)) + print() + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return @@ -1035,6 +1058,7 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.l2,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1100,6 +1124,7 @@ def restore(self,model_path): self.l2=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From 3a51358758894d3ac29383bafefc808b1ad1077f Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:07:13 +0800 Subject: [PATCH 26/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 78 +++++++++++++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 27 deletions(-) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index 9ebe4029b..12daa7209 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -740,9 +740,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) if acc==True: - self.train_accuracy_list.append(float(train_acc)) + self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -758,9 +758,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(float(accuracy)) + self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -786,7 +786,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, print() print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if self.predicate==False: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('accuracy:{0:.6f}'.format(self.train_accuracy)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -983,14 +986,17 @@ def test(self,test_data,test_labels,batch=None): self.test_loss=test_loss self.test_accuracy=test_acc self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) print('test loss:{0:.6f}'.format(self.test_loss)) - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) sess.close() return @@ -1011,17 +1017,24 @@ def train_info(self): print() print('-------------------------------------') print() - print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc==True: + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return def test_info(self): print() - print('test loss:{0}'.format(self.test_loss)) + print('test loss:{0:.6f}'.format(self.test_loss)) print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return @@ -1041,14 +1054,19 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') - print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if self.acc==True: + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc==True: + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -1057,12 +1075,16 @@ def comparison(self): print('train loss:{0}'.format(self.train_loss)) print() print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - print() - print('-------------------------------------') - print() - print('test loss:{0}'.format(self.test_loss)) - print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.test_flag: + print() + print('-------------------------------------') + print() + print('test loss:{0:.6f}'.format(self.test_loss)) + print() + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return @@ -1153,6 +1175,7 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.l2,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1224,6 +1247,7 @@ def restore(self,model_path): self.l2=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From 60b244f17bf4233525292858428cf6c5df111904 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:07:52 +0800 Subject: [PATCH 27/72] Update RNN.py --- Note/nn/RNN/RNN.py | 63 ++++++++++++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 21 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index 98f8c1d6b..7cad8356e 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -343,9 +343,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) if acc==True: - self.train_accuracy_list.append(float(train_acc)) + self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -361,9 +361,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(float(accuracy)) + self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -526,7 +526,10 @@ def test(self,test_data,test_labels,batch=None): self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float16) print('test loss:{0:.6f}'.format(self.test_loss)) - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) sess.close() return @@ -548,8 +551,12 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if self.acc==True: + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -557,7 +564,10 @@ def test_info(self): print() print('test loss:{0}'.format(self.test_loss)) print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return @@ -577,14 +587,19 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + if self.acc==True: + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if self.acc==True: + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -593,12 +608,16 @@ def comparison(self): print('train loss:{0}'.format(self.train_loss)) print() print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - print() - print('-------------------------------------') - print() - print('test loss:{0}'.format(self.test_loss)) - print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.test_flag: + print() + print('-------------------------------------') + print() + print('test loss:{0:.6f}'.format(self.test_loss)) + print() + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return @@ -645,6 +664,7 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.l2,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + pickle.dump(self.acc,output_file) pickle.dump(float(self.train_loss),output_file) pickle.dump(float(self.train_accuracy*100),output_file) pickle.dump(self.test_flag,output_file) @@ -690,6 +710,7 @@ def restore(self,model_path): self.l2=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From 606d1fd1e6ef37c0c820a3888bddd834664a9d38 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:08:08 +0800 Subject: [PATCH 28/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 68 ++++++++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 24 deletions(-) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index d5490726a..bd9c360ab 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -619,9 +619,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) if acc==True: - self.train_accuracy_list.append(float(train_acc)) + self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -637,9 +637,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict={self.data:self.train_data,self.labels:self.train_labels}) - self.train_accuracy_list.append(float(accuracy)) + self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -820,14 +820,16 @@ def test(self,test_data,test_labels,batch=None,use_nn=True): self.test_loss=test_loss self.test_accuracy=test_acc self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) - print('test loss:{0:.6f}'.format(self.test_loss)) - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + self.test_accuracy=self.test_accuracy.astype(np.float32) + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) sess.close() return @@ -849,8 +851,12 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if self.acc==True: + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -858,7 +864,10 @@ def test_info(self): print() print('test loss:{0}'.format(self.test_loss)) print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return @@ -878,14 +887,19 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + if self.acc==True: + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if self.acc==True: + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -894,12 +908,16 @@ def comparison(self): print('train loss:{0}'.format(self.train_loss)) print() print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - print() - print('-------------------------------------') - print() - print('test loss:{0}'.format(self.test_loss)) - print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.test_flag: + print() + print('-------------------------------------') + print() + print('test loss:{0:.6f}'.format(self.test_loss)) + print() + if self.predicate==False: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return @@ -974,6 +992,7 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.l2,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1033,6 +1052,7 @@ def restore(self,model_path): self.l2=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From 7ab2908f5af1b6e7abb7d6c9332c4a827b122090 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:16:42 +0800 Subject: [PATCH 29/72] Update Note Architecture.py --- Note/create/Note Architecture.py | 55 +++++++++++++++++++------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/Note/create/Note Architecture.py b/Note/create/Note Architecture.py index f0bfc84da..dfaecadb2 100644 --- a/Note/create/Note Architecture.py +++ b/Note/create/Note Architecture.py @@ -101,6 +101,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su self.batch=batch self.optimizer=optimizer self.lr=lr + self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -200,9 +201,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) if acc==True: - self.train_accuracy_list.append(float(train_acc)) + self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -217,9 +218,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su self.train_loss=self.train_loss.astype(np.float32) if acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(float(accuracy)) + self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float16) + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -245,8 +246,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su print() print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: - - + if len(self.labels_shape)==2: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('accuracy:{0:.3f}'.format(self.train_accuracy)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -319,14 +322,15 @@ def test(self,test_data,test_labels,batch=None): self.test_loss=test_loss self.test_accuracy=test_acc self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float16) + self.test_accuracy=self.test_accuracy.astype(np.float32) print('test loss:{0:.6f}'.format(self.test_loss)) - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + + sess.close() return @@ -346,16 +350,16 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + + return def test_info(self): print() print('test loss:{0}'.format(self.test_loss)) - print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + + return @@ -375,14 +379,15 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + if self.acc==True: + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + + return @@ -395,8 +400,8 @@ def comparison(self): print('-------------------------------------') print() print('test loss:{0}'.format(self.test_loss)) - print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + + return @@ -411,6 +416,9 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.epoch,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + + + pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -440,6 +448,9 @@ def restore(self,model_path): self.epoch=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + + + self.acc=pickle.load(input_file) self.total_time=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) From e0134de71b77122f3a3c01d1828754158aefbbca Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:24:23 +0800 Subject: [PATCH 30/72] Update FNN.py --- Note/nn/FNN/FNN.py | 796 +++++++++++++++++++-------------------------- 1 file changed, 330 insertions(+), 466 deletions(-) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index fc4831838..cbc10b5a5 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -3,7 +3,6 @@ import pandas as pd import matplotlib.pyplot as plt import pickle -from keras.preprocessing.image import ImageDataGenerator import time @@ -40,7 +39,7 @@ def read_data_csv(path,dtype=None,header=None): return np.array(data,dtype=dtype) -class cnn: +class fnn: def __init__(self,train_data=None,train_labels=None): self.graph=tf.Graph() self.train_data=train_data @@ -50,26 +49,22 @@ def __init__(self,train_data=None,train_labels=None): self.shape0=train_data.shape[0] self.data_shape=train_data.shape self.labels_shape=train_labels.shape - self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None,None,None],name='data') - self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') + self.data=tf.placeholder(dtype=train_data.dtype,shape=[None,None],name='data') + if len(self.labels_shape)==2: + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None,None],name='labels') + else: + self.labels=tf.placeholder(dtype=train_labels.dtype,shape=[None],name='labels') self.data_dtype=train_data.dtype self.labels_dtype=train_labels.dtype - self.conv=[] - self.max_pool=None - self.avg_pool=None - self.fc=[] + self.hidden=[] + self.hidden_layers=None + self.layers=None self.function=[] - self.weight_conv=[] - self.weight_fc=[] - self.bias_conv=[] - self.bias_fc=[] - self.last_weight_conv=[] - self.last_weight_fc=[] - self.last_bias_conv=[] - self.last_bias_fc=[] + self.weight=[] + self.bias=[] + self.last_weight=[] + self.last_bias=[] self.activation=[] - self.activation_fc=[] - self.flattened_len=None self.batch=None self.epoch=None self.l2=None @@ -82,8 +77,7 @@ def __init__(self,train_data=None,train_labels=None): self.train_accuracy_list=[] self.test_loss=None self.test_accuracy=None - self.continue_train=None - self.continue_flag=None + self.continue_train=False self.flag=None self.end_flag=False self.test_flag=None @@ -92,37 +86,15 @@ def __init__(self,train_data=None,train_labels=None): self.use_cpu_gpu='/gpu:0' - def data_enhance(self,rotation_range=40,width_shift_range=0.2,height_shift_range=0.2, - shear_range=0.2,zoom_range=0.2,horizontal_flip=True,fill_mode='nearest'): - datagen=ImageDataGenerator(rotation_range=rotation_range,width_shift_range=width_shift_range,height_shift_range=height_shift_range, - shear_range=shear_range,zoom_range=zoom_range,horizontal_flip=horizontal_flip,fill_mode=fill_mode) - for data in datagen.flow(self.train_data,batch_size=self.train_data.shape[0]): - self.train_data=data - break - return - - def weight_init(self,shape,mean,stddev,name): return tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=self.dtype),name=name) def bias_init(self,shape,mean,stddev,name): return tf.Variable(tf.random.normal(shape=shape,mean=mean,stddev=stddev,dtype=self.dtype),name=name) - - - def conv_f(self,data,weight,i): - return tf.nn.conv2d(data,weight,strides=[1,self.conv[i][2],self.conv[i][2],1],padding=self.conv[i][3]) - - - def max_pool_f(self,data,i): - return tf.nn.max_pool(data,ksize=[1,self.max_pool[i][0],self.max_pool[i][0],1],strides=[1,self.max_pool[i][1],self.max_pool[i][1],1],padding=self.max_pool[i][2]) - - - def avg_pool_f(self,data,i): - return tf.nn.avg_pool(data,ksize=[1,self.avg_pool[i][0],self.avg_pool[i][0],1],strides=[1,self.avg_pool[i][1],self.avg_pool[i][1],1],padding=self.avg_pool[i][2]) - + - def structure(self,conv=None,max_pool=None,avg_pool=None,fc=None,function=None,mean=0,stddev=0.07,dtype=tf.float32): + def structure(self,hidden,function,layers=None,mean=0,stddev=0.07,dtype=np.float32): with self.graph.as_default(): self.continue_train=False self.total_epoch=0 @@ -131,264 +103,159 @@ def structure(self,conv=None,max_pool=None,avg_pool=None,fc=None,function=None,m self.test_flag=False self.train_loss_list.clear() self.train_accuracy_list.clear() - self.weight_conv.clear() - self.bias_conv.clear() - self.weight_fc.clear() - self.bias_fc.clear() - self.conv=conv - self.max_pool=max_pool - self.avg_pool=avg_pool - self.fc=fc + self.weight.clear() + self.bias.clear() + self.last_weight=[] + self.last_bias=[] + self.hidden=hidden self.function=function - self.mean=mean - self.stddev=stddev + self.layers=layers self.dtype=dtype self.time=None with tf.name_scope('parameter_initialization'): - for i in range(len(self.conv)): - if i==0: - self.weight_conv.append(self.weight_init([self.conv[i][0],self.conv[i][0],self.data_shape[3],self.conv[i][1]],mean=mean,stddev=stddev,name='conv_{0}_weight'.format(i+1))) - self.bias_conv.append(self.bias_init([self.conv[i][1]],mean=mean,stddev=stddev,name='conv_{0}_bias'.format(i+1))) - else: - self.weight_conv.append(self.weight_init([self.conv[i][0],self.conv[i][0],self.conv[i-1][1],self.conv[i][1]],mean=mean,stddev=stddev,name='conv_{0}_weight'.format(i+1))) - self.bias_conv.append(self.bias_init([self.conv[i][1]],mean=mean,stddev=stddev,name='conv_{0}_bias'.format(i+1))) - return - - - def forward_propagation_fc(self,data,dropout,shape,use_nn): - with self.graph.as_default(): - for i in range(len(self.fc)): - if type(self.cpu_gpu)==str: - self.forward_cpu_gpu[1].append(self.cpu_gpu) - elif len(self.cpu_gpu)!=len(self.fc): - self.forward_cpu_gpu[1].append(self.cpu_gpu[1][0]) - else: - self.forward_cpu_gpu[1].append(self.cpu_gpu[1][i]) - if use_nn==True: - for i in range(len(self.fc)): - if type(self.use_cpu_gpu)==str: - self.forward_cpu_gpu[1].append(self.use_cpu_gpu) - else: - self.forward_cpu_gpu[1].append(self.use_cpu_gpu[1][i]) - if use_nn==False: - weight_fc=self.weight_fc - bias_fc=self.bias_fc - else: - weight_fc=[] - bias_fc=[] - for i in range(len(self.last_weight_fc)): - weight_fc.append(tf.constant(self.last_weight_fc[i])) - bias_fc.append(tf.constant(self.last_bias_fc[i])) - if self.continue_train==True and self.flag==1: - self.weight_fc=[x for x in range(len(self.fc)+1)] - self.bias_fc=[x for x in range(len(self.fc)+1)] - for i in range(len(self.fc)+1): - if i==len(self.fc): - self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='out_weight') - self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='out_bias') - else: - self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='fc_{0}_weight'.format(i+1)) - self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='fc_{0}_bias'.format(i+1)) - self.flag=0 - self.activation_fc=[x for x in range(len(self.fc))] - if use_nn!=True and len(self.weight_fc)!=len(self.fc)+1: - for i in range(len(self.fc)+1): - if i==0: - self.weight_fc.append(self.weight_init([shape,self.fc[i][0]],mean=self.mean,stddev=self.stddev,name='fc_{0}_weight'.format(i+1))) - self.bias_fc.append(self.bias_init([self.fc[i][0]],mean=self.mean,stddev=self.stddev,name='fc_{0}_bias'.format(i+1))) - elif i==len(self.fc): - self.weight_fc.append(self.weight_init([self.fc[i-1][0],self.train_labels.shape[1]],mean=self.mean,stddev=self.stddev,name='output_weight')) - self.bias_fc.append(self.bias_init([self.train_labels.shape[1]],mean=self.mean,stddev=self.stddev,name='output_bias')) - else: - self.weight_fc.append(self.weight_init([self.fc[i-1][0],self.fc[i][0]],mean=self.mean,stddev=self.stddev,name='fc_{0}_weight'.format(i+1))) - self.bias_fc.append(self.bias_init([self.fc[i][0]],mean=self.mean,stddev=self.stddev,name='fc_{0}_bias'.format(i+1))) - if type(dropout)==list: - data=tf.nn.dropout(data,dropout[0]) - for i in range(len(self.fc)): - with tf.device(self.forward_cpu_gpu[1][i]): - if self.fc[i][1]=='sigmoid': + if self.layers!=None: + self.hidden_layers=self.layers-2 + for i in range(self.hidden_layers+1): if i==0: - self.activation_fc[i]=tf.nn.sigmoid(tf.matmul(data,weight_fc[i])+bias_fc[i]) - if type(dropout)==list: - self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) - else: - self.activation_fc[i]=tf.nn.sigmoid(tf.matmul(self.activation_fc[i-1],weight_fc[i])+bias_fc[i]) - if type(dropout)==list: - self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) - if self.fc[i][1]=='tanh': - if i==0: - self.activation_fc[i]=tf.nn.tanh(tf.matmul(data,weight_fc[i])+bias_fc[i]) - if type(dropout)==list: - self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) - else: - self.activation_fc[i]=tf.nn.tanh(tf.matmul(self.activation_fc[i-1],weight_fc[i])+bias_fc[i]) - if type(dropout)==list: - self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) - if self.fc[i][1]=='relu': - if i==0: - self.activation_fc[i]=tf.nn.relu(tf.matmul(data,weight_fc[i])+bias_fc[i]) - if type(dropout)==list: - self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) - else: - self.activation_fc[i]=tf.nn.relu(tf.matmul(self.activation_fc[i-1],weight_fc[i])+bias_fc[i]) - if type(dropout)==list: - self.activation_fc[i]=tf.nn.dropout(self.activation_fc[i],dropout[i+1]) - if self.fc[i][1]=='elu': + self.weight.append(self.weight_init([self.data_shape[1],self.hidden],mean=mean,stddev=stddev,name='weight_{0}'.format(i+1))) + self.bias.append(self.bias_init([self.hidden],mean=mean,stddev=stddev,name='bias_{0}'.format(i+1))) + if i==self.hidden_layers: + if len(self.labels_shape)==2: + self.weight.append(self.weight_init([self.hidden,self.labels_shape[1]],mean=mean,stddev=stddev,name='weight_output')) + self.bias.append(self.bias_init([self.labels_shape[1]],mean=mean,stddev=stddev,name='bias_output')) + else: + self.weight.append(self.weight_init([self.hidden,1],mean=mean,stddev=stddev,name='weight_output')) + self.bias.append(self.bias_init([1],mean=mean,stddev=stddev,name='bias_output')) + elif i>0 and i0 and i<=len(self.hidden)-1: + self.weight.append(self.weight_init([self.hidden[i-1],self.hidden[i]],mean=mean,stddev=stddev,name='weight_{0}'.format(i+1))) + self.bias.append(self.bias_init([self.hidden[i]],mean=mean,stddev=stddev,name='bias_{0}'.format(i+1))) + return + + def forward_propagation(self,data,dropout=None,use_nn=False): with self.graph.as_default(): - self.forward_cpu_gpu=[[],[]] - for i in range(len(self.conv)): + forward_cpu_gpu=[] + for i in range(self.hidden_layers): if type(self.cpu_gpu)==str: - self.forward_cpu_gpu[0].append(self.cpu_gpu) - elif len(self.cpu_gpu[0][0])!=len(self.conv): - self.forward_cpu_gpu[0].append(self.cpu_gpu[0][0]) + forward_cpu_gpu.append(self.cpu_gpu) + elif len(self.cpu_gpu)!=self.hidden_layers: + forward_cpu_gpu.append(self.cpu_gpu[0]) else: - self.forward_cpu_gpu[0].append(self.cpu_gpu[0][i]) + forward_cpu_gpu.append(self.cpu_gpu[i]) if use_nn==True: - for i in range(len(self.conv)): + for i in range(self.hidden_layers): if type(self.use_cpu_gpu)==str: - self.forward_cpu_gpu.append(self.use_cpu_gpu) + forward_cpu_gpu.append(self.use_cpu_gpu) else: - self.forward_cpu_gpu.append(self.use_cpu_gpu[0][i]) + forward_cpu_gpu.append(self.use_cpu_gpu[i]) if use_nn==False: - weight_conv=self.weight_conv - bias_conv=self.bias_conv + weight=self.weight + bias=self.bias else: - weight_conv=[] - bias_conv=[] - for i in range(len(self.last_weight_conv)): - weight_conv.append(tf.constant(self.last_weight_conv[i])) - bias_conv.append(tf.constant(self.last_bias_conv[i])) - self.activation=[x for x in range(len(self.conv))] + weight=[] + bias=[] + for i in range(len(self.last_weight)): + weight.append(tf.constant(self.last_weight[i])) + bias.append(tf.constant(self.last_bias[i])) + self.activation=[x for x in range(self.hidden_layers)] + if type(dropout)==list: + data=tf.nn.dropout(data,dropout[0]) with tf.name_scope('forward_propagation'): - for i in range(len(self.conv)): - with tf.device(self.forward_cpu_gpu[0][i]): + for i in range(self.hidden_layers): + with tf.device(forward_cpu_gpu[i]): if type(self.function)==list: if self.function[i]=='sigmoid': if i==0: - self.activation[i]=tf.nn.sigmoid(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.sigmoid(tf.matmul(data,weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) else: - self.activation[i]=tf.nn.sigmoid(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.sigmoid(tf.matmul(self.activation[i-1],weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) if self.function[i]=='tanh': if i==0: - self.activation[i]=tf.nn.tanh(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.tanh(tf.matmul(data,weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) else: - self.activation[i]=tf.nn.tanh(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.tanh(tf.matmul(self.activation[i-1],weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) if self.function[i]=='relu': if i==0: - self.activation[i]=tf.nn.relu(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.relu(tf.matmul(data,weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) else: - self.activation[i]=tf.nn.relu(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.relu(tf.matmul(self.activation[i-1],weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) if self.function[i]=='elu': if i==0: - self.activation[i]=tf.nn.elu(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.elu(tf.matmul(data,weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) else: - self.activation[i]=tf.nn.elu(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.elu(tf.matmul(self.activation[i-1],weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) elif type(self.function)==str: if self.function=='sigmoid': if i==0: - self.activation[i]=tf.nn.sigmoid(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.sigmoid(tf.matmul(data,weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) else: - self.activation[i]=tf.nn.sigmoid(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.sigmoid(tf.matmul(self.activation[i-1],weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) if self.function=='tanh': if i==0: - self.activation[i]=tf.nn.tanh(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.tanh(tf.matmul(data,weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) else: - self.activation[i]=tf.nn.tanh(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.tanh(tf.matmul(self.activation[i-1],weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) if self.function=='relu': if i==0: - self.activation[i]=tf.nn.relu(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.relu(tf.matmul(data,weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) else: - self.activation[i]=tf.nn.relu(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.relu(tf.matmul(self.activation[i-1],weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) if self.function=='elu': if i==0: - self.activation[i]=tf.nn.elu(self.conv_f(data,weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) + self.activation[i]=tf.nn.elu(tf.matmul(data,weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) else: - self.activation[i]=tf.nn.elu(self.conv_f(self.activation[i-1],weight_conv[i],i)+bias_conv[i]) - if type(self.max_pool)==list and self.max_pool[i][0]!=0: - self.activation[i]=self.max_pool_f(self.activation[i],i) - if type(self.avg_pool)==list and self.avg_pool[i][0]!=0: - self.activation[i]=self.avg_pool_f(self.activation[i],i) - flattened_layer=tf.reshape(self.activation[-1],[-1,self.activation[-1].shape[1]*self.activation[-1].shape[2]*self.activation[-1].shape[3]]) - shape=flattened_layer.shape - shape=np.array(shape[1]) - shape=shape.astype(np.int) - self.flattened_len=shape - weight_fc,bias_fc=self.forward_propagation_fc(flattened_layer,dropout,shape,use_nn) - output=tf.matmul(self.activation_fc[-1],weight_fc[-1])+bias_fc[-1] + self.activation[i]=tf.nn.elu(tf.matmul(self.activation[i-1],weight[i])+bias[i]) + if type(dropout)==list: + self.activation[i]=tf.nn.dropout(self.activation[i],dropout[i+1]) + if dropout!=None and type(dropout)!=list: + self.activation[-1]=tf.nn.dropout(self.activation[-1],dropout) + output=tf.matmul(self.activation[-1],weight[-1])+bias[-1] return output @@ -415,41 +282,57 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.continue_train=True if cpu_gpu!=None: self.cpu_gpu=cpu_gpu - if type(self.cpu_gpu)==list and len(self.cpu_gpu)!=3: - train_cpu_gpu='/gpu:0' + if type(self.cpu_gpu)==list and (len(self.cpu_gpu)!=self.hidden_layers+1 or len(self.cpu_gpu)==1): + self.cpu_gpu.append('/gpu:0') if type(self.cpu_gpu)==str: train_cpu_gpu=self.cpu_gpu + else: + train_cpu_gpu=self.cpu_gpu[-1] with tf.device(train_cpu_gpu): if continue_train==True and self.end_flag==True: self.end_flag=False - self.weight_conv=[x for x in range(len(self.conv))] - self.bias_conv=[x for x in range(len(self.conv))] - for i in range(len(self.conv)): - self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) - self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) + self.weight=[x for x in range(self.hidden_layers+1)] + self.bias=[x for x in range(self.hidden_layers+1)] + for i in range(self.hidden_layers+1): + if i==self.hidden_layers: + self.weight[i]=tf.Variable(self.last_weight[i],name='weight_output') + self.bias[i]=tf.Variable(self.last_bias[i],name='bias_output') + else: + self.weight[i]=tf.Variable(self.last_weight[i],name='weight_{0}'.format(i+1)) + self.bias[i]=tf.Variable(self.last_bias[i],name='bias_{0}'.format(i+1)) if continue_train==True and self.flag==1: - self.weight_conv=[x for x in range(len(self.conv))] - self.bias_conv=[x for x in range(len(self.conv))] - for i in range(len(self.conv)): - self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) - self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) + self.weight=[x for x in range(self.hidden_layers+1)] + self.bias=[x for x in range(self.hidden_layers+1)] + for i in range(self.hidden_layers+1): + if i==self.hidden_layers: + self.weight[i]=tf.Variable(self.last_weight[i],name='weight_output') + self.bias[i]=tf.Variable(self.last_bias[i],name='bias_output') + else: + self.weight[i]=tf.Variable(self.last_weight[i],name='weight_{0}'.format(i+1)) + self.bias[i]=tf.Variable(self.last_bias[i],name='bias_{0}'.format(i+1)) self.flag=0 # ---------------forward propagation--------------- train_output=self.forward_propagation(self.data,self.dropout) # ---------------------------------------- with tf.name_scope('train_loss'): - if self.labels_shape[1]==1: + if len(self.labels_shape)==1: + if l2==None: + train_loss=tf.reduce_mean(tf.square(train_output-tf.expand_dims(self.labels,axis=1))) + else: + train_loss=tf.square(train_output-tf.expand_dims(self.labels,axis=1)) + train_loss=tf.reduce_mean(train_loss+l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) + elif self.labels_shape[1]==1: if l2==None: train_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels)) else: train_loss=tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels) - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) + train_loss=tf.reduce_mean(train_loss+l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) else: if l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) else: - train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels) - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) + train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) + train_loss=train_loss+l2*sum([tf.reduce_sum(x**2) for x in self.weight]) if self.optimizer=='Gradient': opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) if self.optimizer=='RMSprop': @@ -461,8 +344,11 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N train_loss_scalar=tf.summary.scalar('train_loss',train_loss) if acc==True: with tf.name_scope('train_accuracy'): - equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) - train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + if len(self.labels_shape)==1: + train_accuracy=tf.reduce_mean(tf.abs(train_output-self.labels)) + else: + equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) + train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) if train_summary_path!=None: train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) @@ -565,28 +451,24 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N print() print('last loss:{0:.6f}'.format(self.train_loss)) if acc==True: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if len(self.labels_shape)==2: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('accuracy:{0:.6f}'.format(self.train_accuracy)) if train_summary_path!=None: train_writer.close() if continue_train==True: - self.last_weight_conv=sess.run(self.weight_conv) - self.last_bias_conv=sess.run(self.bias_conv) - self.last_weight_fc=sess.run(self.weight_fc) - self.last_bias_fc=sess.run(self.bias_fc) - for i in range(len(self.conv)): - self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) - self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) - for i in range(len(self.fc)+1): - if i==len(self.fc): - self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='output_weight') - self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='output_bias') + self.last_weight=sess.run(self.weight) + self.last_bias=sess.run(self.bias) + for i in range(self.hidden_layers+1): + if i==self.hidden_layers: + self.weight[i]=tf.Variable(self.last_weight[i],name='weight_output') + self.bias[i]=tf.Variable(self.last_bias[i],name='bias_output') else: - self.weight_fc[i]=tf.Variable(self.last_weight_fc[i],name='fc_{0}_weight'.format(i+1)) - self.bias_fc[i]=tf.Variable(self.last_bias_fc[i],name='fc_{0}_weight'.format(i+1)) - self.last_weight_conv.clear() - self.last_bias_conv.clear() - self.last_weight_fc.clear() - self.last_bias_fc.clear() + self.weight[i]=tf.Variable(self.last_weight[i],name='weight_{0}'.format(i+1)) + self.bias[i]=tf.Variable(self.last_bias[i],name='bias_{0}'.format(i+1)) + self.last_weight.clear() + self.last_bias.clear() sess.run(tf.global_variables_initializer()) if continue_train==True: if self.epoch!=None: @@ -609,14 +491,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N def end(self): with self.graph.as_default(): self.end_flag=True - self.last_weight_conv=self.sess.run(self.weight_conv) - self.last_bias_conv=self.sess.run(self.bias_conv) - self.last_weight_fc=self.sess.run(self.weight_fc) - self.last_bias_fc=self.sess.run(self.bias_fc) - self.weight_conv.clear() - self.bias_conv.clear() - self.weight_fc.clear() - self.bias_fc.clear() + self.last_weight=self.sess.run(self.weight) + self.last_bias=self.sess.run(self.bias) + self.weight.clear() + self.bias.clear() self.total_epoch=self.epoch self.sess.close() return @@ -630,19 +508,28 @@ def test(self,test_data,test_labels,batch=None): use_nn=True self.test_flag=True shape=test_labels.shape - test_data_placeholder=tf.placeholder(dtype=test_data.dtype,shape=[None,test_data.shape[1],test_data.shape[2],test_data.shape[3]]) - test_labels_placeholder=tf.placeholder(dtype=test_labels.dtype,shape=[None,shape[1]]) + test_data_placeholder=tf.placeholder(dtype=test_data.dtype,shape=[None,test_data.shape[1]]) + if len(shape)==2: + test_labels_placeholder=tf.placeholder(dtype=test_labels.dtype,shape=[None,shape[1]]) + else: + test_labels_placeholder=tf.placeholder(dtype=test_labels.dtype,shape=[None]) test_output=self.forward_propagation(test_data_placeholder,use_nn=use_nn) - if shape[1]==1: + if len(shape)==1: + test_loss=tf.reduce_mean(tf.square(test_output-test_labels_placeholder)) + elif shape[1]==1: test_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=test_output,labels=test_labels_placeholder)) else: test_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=test_output,labels=test_labels_placeholder)) - equal=tf.equal(tf.argmax(test_output,1),tf.argmax(test_labels_placeholder,1)) - test_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + if len(shape)==1: + test_accuracy=tf.reduce_mean(tf.abs(test_output-test_labels_placeholder)) + else: + equal=tf.equal(tf.argmax(test_output,1),tf.argmax(test_labels_placeholder,1)) + test_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) config=tf.ConfigProto() config.gpu_options.allow_growth=True config.allow_soft_placement=True sess=tf.Session(config=config) + sess.run(tf.global_variables_initializer()) if batch!=None: total_test_loss=0 total_test_acc=0 @@ -674,11 +561,14 @@ def test(self,test_data,test_labels,batch=None): self.test_loss=self.test_loss.astype(np.float32) self.test_accuracy=self.test_accuracy.astype(np.float32) print('test loss:{0:.6f}'.format(self.test_loss)) - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if len(self.labels_shape)==2: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) sess.close() return - + def train_info(self): print() print('batch:{0}'.format(self.batch)) @@ -700,7 +590,10 @@ def train_info(self): print('train loss:{0:.6f}'.format(self.train_loss)) if self.acc==True: print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if len(self.labels_shape)==2: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -708,7 +601,10 @@ def test_info(self): print() print('test loss:{0:.6f}'.format(self.test_loss)) print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if len(self.labels_shape)==2: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return @@ -728,192 +624,136 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + if self.acc==True: + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0:.6f}'.format(self.train_loss)) if self.acc==True: print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if len(self.labels_shape)==2: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return - + def comparison(self): print() print('train loss:{0:.6f}'.format(self.train_loss)) - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - print() - print('-------------------------------------') - print() - print('test loss:{0:.6f}'.format(self.test_loss)) - print() - print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + if self.acc==True: + print() + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + if self.test_flag: + print() + print('-------------------------------------') + print() + print('test loss:{0:.6f}'.format(self.test_loss)) + print() + if len(self.labels_shape)==2: + print('test accuracy:{0:.3f}%'.format(self.test_accuracy*100)) + else: + print('test accuracy:{0:.6f}'.format(self.test_accuracy)) return def network(self): print() total_params=0 - for i in range(len(self.conv)+1): - if i==0: - print('input layer\t{0}\t{1}'.format(self.data_shape[1],self.data_shape[3])) - print() - if type(self.function)==list: - if i==1: - if self.conv[i-1][3]=='SAME': - print('conv_layer_{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(i,self.conv[i-1][0],self.data_shape[1],self.conv[i-1][1],np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3],self.function[i-1])) - total_params+=np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3] + if type(self.hidden)==list: + for i in range(len(self.hidden)+2): + if i==0: + print('input layer\t{0}'.format(self.data_shape[1])) + print() + if i==len(self.hidden)+1: + if self.labels_shape[1]==1: + print('output layer\t{0}\t{1}\t{2}'.format(self.labels_shape[1],self.labels_shape[1]*self.hidden[i-2]+1,'sigmoid')) + total_params+=self.labels_shape[1]*self.hidden[i-2]+1 print() else: - conv_output_shape=int((self.data_shape[1]-self.conv[i-1][0])/self.conv[i-1][2]+1) - print('conv_layer_{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(i,self.conv[i-1][0],conv_output_shape,self.conv[i-1][1],np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3],self.function[i-1])) - total_params+=np.prod(self.weight_conv[i-1].shape)+self.weight_conv[i-1].shape[3] + print('output layer\t{0}\t{1}\t{2}'.format(self.labels_shape[1],self.labels_shape[1]*self.hidden[i-2]+self.labels_shape[1],'softmax')) + total_params+=self.labels_shape[1]*self.hidden[i-2]+self.labels_shape[1] print() - if type(self.max_pool)==list and self.max_pool[i-1][0]!=0: - if self.max_pool[i-1][2]=='SAME': - print('max_pool_layer_{0}\t{1}\t{2}\t{3}'.format(i,self.max_pool[i-1][0],conv_output_shape,self.conv[i-1][1])) - print() - else: - max_pool_output_shape=int((conv_output_shape-self.max_pool[i-1][0])/self.max_pool[i-1][1]+1) - print('max_pool_layer_{0}\t{1}\t{2}\t{3}'.format(i,self.max_pool[i-1][0],max_pool_output_shape,self.conv[i-1][1])) - print() - if type(self.avg_pool)==list and self.avg_pool[i-1][0]!=0: - if self.avg_pool[i-1][2]=='SAME': - print('avg_pool_layer_{0}\t{1}\t{2}\t{3}'.format(i,self.avg_pool[i-1][0],conv_output_shape,self.conv[i-1][1])) + if i>0 and i0 and i0 and i0 and i Date: Thu, 16 Jul 2020 15:25:08 +0800 Subject: [PATCH 31/72] Update CNN.py --- Note/nn/CNN/CNN.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index fc4831838..c78aa7ac9 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -910,10 +910,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.function,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.l2,output_file) - pickle.dump(self.dropout,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + pickle.dump(self.l2,output_file) + pickle.dump(self.dropout,output_file) pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) @@ -954,10 +954,10 @@ def restore(self,model_path): self.function=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.l2=pickle.load(input_file) - self.dropout=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + self.l2=pickle.load(input_file) + self.dropout=pickle.load(input_file) self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) From a989cde1786282f163416302807c5dbf456af5b8 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:25:27 +0800 Subject: [PATCH 32/72] Update GRU.py --- Note/nn/RNN/GRU.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index 8dd217d56..ca77d1c9b 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -1055,9 +1055,9 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.predicate,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.l2,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + pickle.dump(self.l2,output_file) pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) @@ -1121,9 +1121,9 @@ def restore(self,model_path): self.predicate=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.l2=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + self.l2=pickle.load(input_file) self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) From 587035ff5e6e0b3f5b7744d556cc4f84195c9261 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:25:43 +0800 Subject: [PATCH 33/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index 12daa7209..b1025383a 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -1172,10 +1172,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.predicate,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.l2,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) - pickle.dump(self.acc,output_file) + pickle.dump(self.l2,output_file) + pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1244,9 +1244,9 @@ def restore(self,model_path): self.predicate=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.l2=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + self.l2=pickle.load(input_file) self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) From 0d5df37c3aa1c951a7b47c494aa5f414b01b9c93 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:26:21 +0800 Subject: [PATCH 34/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index bd9c360ab..ecd702cd2 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -989,9 +989,9 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.predicate,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.l2,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + pickle.dump(self.l2,output_file) pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) @@ -1049,9 +1049,9 @@ def restore(self,model_path): self.predicate=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.l2=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + self.l2=pickle.load(input_file) self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) From 5fc635d8d36f36bb70d2308da881f6dedd397ad7 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 15:26:36 +0800 Subject: [PATCH 35/72] Update RNN.py --- Note/nn/RNN/RNN.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index 7cad8356e..5a5ccaf89 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -661,9 +661,9 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.hidden,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.l2,output_file) pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + pickle.dump(self.l2,output_file) pickle.dump(self.acc,output_file) pickle.dump(float(self.train_loss),output_file) pickle.dump(float(self.train_accuracy*100),output_file) @@ -707,9 +707,9 @@ def restore(self,model_path): self.hidden=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.l2=pickle.load(input_file) self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) + self.l2=pickle.load(input_file) self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) From 633df6f02e930e3f3d953b4bf47c527370cf7919 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 20:00:24 +0800 Subject: [PATCH 36/72] Update Note Architecture.py --- Note/create/Note Architecture.py | 254 +++++++++++++++---------------- 1 file changed, 127 insertions(+), 127 deletions(-) diff --git a/Note/create/Note Architecture.py b/Note/create/Note Architecture.py index dfaecadb2..ee8053e0f 100644 --- a/Note/create/Note Architecture.py +++ b/Note/create/Note Architecture.py @@ -134,144 +134,144 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su with tf.name_scope('train_loss'): - if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) - if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: - with tf.name_scope('train_accuracy'): + if self.optimizer=='Gradient': + opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='RMSprop': + opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='Momentum': + opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + if self.optimizer=='Adam': + opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + if acc==True: + with tf.name_scope('train_accuracy'): + + + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + if train_summary_path!=None: + train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_writer=tf.summary.FileWriter(train_summary_path) + config=tf.ConfigProto() + config.gpu_options.allow_growth=True + config.allow_soft_placement=True + sess=tf.Session(config=config) + sess.run(tf.global_variables_initializer()) + self.sess=sess + if self.total_epoch==0: + epoch=epoch+1 + for i in range(epoch): + if self.batch!=None: + batches=int((self.shape0-self.shape0%self.batch)/self.batch) + total_loss=0 + total_acc=0 + random=np.arange(self.shape0) + np.random.shuffle(random) + + + for j in range(batches): + index1=j*self.batch + index2=(j+1)*self.batch - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) - train_writer=tf.summary.FileWriter(train_summary_path) - config=tf.ConfigProto() - config.gpu_options.allow_growth=True - config.allow_soft_placement=True - sess=tf.Session(config=config) - sess.run(tf.global_variables_initializer()) - self.sess=sess - if self.total_epoch==0: - epoch=epoch+1 - for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) - total_loss=0 - total_acc=0 - random=np.arange(self.shape0) - np.random.shuffle(random) + if i==0 and self.total_epoch==0: + batch_loss=sess.run(train_loss,feed_dict=feed_dict) + else: + batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) + total_loss+=batch_loss + if acc==True: + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc + if self.shape0%self.batch!=0: + batches+=1 + index1=batches*self.batch + index2=self.batch-(self.shape0-batches*self.batch) - for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch - - - if i==0 and self.total_epoch==0: - batch_loss=sess.run(train_loss,feed_dict=feed_dict) - else: - batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - total_loss+=batch_loss - if acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc - if self.shape0%self.batch!=0: - batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) - - - if i==0 and self.total_epoch==0: - batch_loss=sess.run(train_loss,feed_dict=feed_dict) - else: - batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - total_loss+=batch_loss - if acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc - loss=total_loss/batches - train_acc=total_acc/batches - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) - if acc==True: - self.train_accuracy_list.append(train_acc.astype(np.float32)) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float32) - else: - random=np.arange(self.shape0) - np.random.shuffle(random) - - if i==0 and self.total_epoch==0: - loss=sess.run(train_loss,feed_dict=feed_dict) + batch_loss=sess.run(train_loss,feed_dict=feed_dict) else: - loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) + batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) + total_loss+=batch_loss if acc==True: - accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(accuracy.astype(np.float32)) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float32) - if epoch%10!=0: - temp_epoch=epoch-epoch%10 - temp_epoch=int(temp_epoch/10) + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc + loss=total_loss/batches + train_acc=total_acc/batches + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + if acc==True: + self.train_accuracy_list.append(train_acc.astype(np.float32)) + self.train_accuracy=train_acc + self.train_accuracy=self.train_accuracy.astype(np.float32) + else: + random=np.arange(self.shape0) + np.random.shuffle(random) + + + if i==0 and self.total_epoch==0: + loss=sess.run(train_loss,feed_dict=feed_dict) else: - temp_epoch=epoch/10 - if temp_epoch==0: - temp_epoch=1 - if i%temp_epoch==0: - if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) + loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + if acc==True: + accuracy=sess.run(train_accuracy,feed_dict=feed_dict) + self.train_accuracy_list.append(accuracy.astype(np.float32)) + self.train_accuracy=accuracy + self.train_accuracy=self.train_accuracy.astype(np.float32) + if epoch%10!=0: + temp_epoch=epoch-epoch%10 + temp_epoch=int(temp_epoch/10) + else: + temp_epoch=epoch/10 + if temp_epoch==0: + temp_epoch=1 + if i%temp_epoch==0: + if continue_train==True: + if self.epoch!=None: + self.total_epoch=self.epoch+i+1 else: - print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) - if model_path!=None and i%epoch*2==0: - self.save(model_path,i,one) - if train_summary_path!=None: - train_summary=sess.run(train_merging,feed_dict=feed_dict) - train_writer.add_summary(train_summary,i) - print() - print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: - if len(self.labels_shape)==2: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('accuracy:{0:.3f}'.format(self.train_accuracy)) - if train_summary_path!=None: - train_writer.close() - if continue_train==True: - - - sess.run(tf.global_variables_initializer()) - if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch + self.total_epoch=i + if continue_train==True: + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) else: - self.total_epoch=epoch-1 - self.epoch=self.total_epoch - if continue_train!=True: - self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.total_time=_time + print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) + if model_path!=None and i%epoch*2==0: + self.save(model_path,i,one) + if train_summary_path!=None: + train_summary=sess.run(train_merging,feed_dict=feed_dict) + train_writer.add_summary(train_summary,i) + print() + print('last loss:{0:.6f}'.format(self.train_loss)) + if acc==True: + if len(self.labels_shape)==2: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('accuracy:{0:.3f}'.format(self.train_accuracy)) + if train_summary_path!=None: + train_writer.close() + if continue_train==True: + + + sess.run(tf.global_variables_initializer()) + if continue_train==True: + if self.epoch!=None: + self.total_epoch=self.epoch+epoch else: - self.total_time+=_time - print('time:{0:.3f}s'.format(self.time)) - return + self.total_epoch=epoch-1 + self.epoch=self.total_epoch + if continue_train!=True: + self.epoch=epoch-1 + t2=time.time() + _time=t2-t1 + if continue_train!=True or self.time==None: + self.total_time=_time + else: + self.total_time+=_time + print('time:{0:.3f}s'.format(self.time)) + return def end(self): From 9aacaa6f328391326964bd1e6adc58a2bc9d089e Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 20:09:11 +0800 Subject: [PATCH 37/72] Update FNN.py --- Note/nn/FNN/FNN.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index cbc10b5a5..883d87f4a 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -333,15 +333,15 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) train_loss=train_loss+l2*sum([tf.reduce_sum(x**2) for x in self.weight]) - if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) - if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + if self.optimizer=='Gradient': + opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='RMSprop': + opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='Momentum': + opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + if self.optimizer=='Adam': + opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) if acc==True: with tf.name_scope('train_accuracy'): if len(self.labels_shape)==1: From d26f249f4848d76f6d39145d86da818a45150706 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 20:09:29 +0800 Subject: [PATCH 38/72] Update CNN.py --- Note/nn/CNN/CNN.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index c78aa7ac9..14c22b8ae 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -450,15 +450,15 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels) train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) - if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) - if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + if self.optimizer=='Gradient': + opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='RMSprop': + opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='Momentum': + opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + if self.optimizer=='Adam': + opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) if acc==True: with tf.name_scope('train_accuracy'): equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) From 7110c9bbb37c8ad91e0e82e763d0747f0e92b13b Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 20:10:33 +0800 Subject: [PATCH 39/72] Update GRU.py --- Note/nn/RNN/GRU.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index ca77d1c9b..d194b22a0 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -575,15 +575,15 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) - if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) - if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + if self.optimizer=='Gradient': + opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='RMSprop': + opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='Momentum': + opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + if self.optimizer=='Adam': + opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) if acc==True: with tf.name_scope('train_accuracy'): if self.pattern=='1n': From d369662df692bf1756141a0195d3ba1fc140b81f Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 20:10:48 +0800 Subject: [PATCH 40/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index b1025383a..38bd1a304 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -663,15 +663,15 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) - if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) - if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + if self.optimizer=='Gradient': + opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='RMSprop': + opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='Momentum': + opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + if self.optimizer=='Adam': + opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) if acc==True: with tf.name_scope('train_accuracy'): if self.pattern=='1n': From 4abdd25270f672676d304e67096eba72f1ec3760 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 20:11:29 +0800 Subject: [PATCH 41/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index ecd702cd2..59dc286f6 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -542,15 +542,15 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) - if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) - if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + if self.optimizer=='Gradient': + opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='RMSprop': + opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='Momentum': + opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + if self.optimizer=='Adam': + opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) if acc==True: with tf.name_scope('train_accuracy'): if self.pattern=='1n': From 337a78e84ba1ed6c152d64b7c7f2259627bd0470 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jul 2020 20:11:44 +0800 Subject: [PATCH 42/72] Update RNN.py --- Note/nn/RNN/RNN.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index 5a5ccaf89..00494be98 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -266,15 +266,15 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o,labels=self.labels,axis=2),axis=1) train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) - if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) - if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + if self.optimizer=='Gradient': + opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='RMSprop': + opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + if self.optimizer=='Momentum': + opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + if self.optimizer=='Adam': + opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) if acc==True: with tf.name_scope('train_accuracy'): if self.pattern=='1n': From 012c51aceb4dbc9333a3bbd40bff35fcdd54d763 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 09:48:04 +0800 Subject: [PATCH 43/72] Update Note Architecture.py --- Note/create/Note Architecture.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/Note/create/Note Architecture.py b/Note/create/Note Architecture.py index ee8053e0f..25a81a932 100644 --- a/Note/create/Note Architecture.py +++ b/Note/create/Note Architecture.py @@ -51,6 +51,8 @@ def __init__(): self.epoch=None self.optimizer=None self.lr=None + + self.train_loss=None self.train_accuracy=None self.train_loss_list=[] @@ -142,14 +144,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: + if self.acc==True: with tf.name_scope('train_accuracy'): - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + train_merging=tf.summary.merge([train_loss_scalar]) + if self.acc==True: + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -178,7 +182,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc if self.shape0%self.batch!=0: @@ -192,7 +196,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc loss=total_loss/batches @@ -200,7 +204,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc self.train_accuracy=self.train_accuracy.astype(np.float32) @@ -216,7 +220,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy @@ -245,7 +249,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: + if self.acc==True: if len(self.labels_shape)==2: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) else: From d4afd3771bf418a2070cb335c44998a7bc0c6a38 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 09:48:34 +0800 Subject: [PATCH 44/72] Update FNN.py --- Note/nn/FNN/FNN.py | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index 883d87f4a..020c0c59c 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -316,23 +316,23 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N # ---------------------------------------- with tf.name_scope('train_loss'): if len(self.labels_shape)==1: - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.square(train_output-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(train_output-tf.expand_dims(self.labels,axis=1)) - train_loss=tf.reduce_mean(train_loss+l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) + train_loss=tf.reduce_mean(train_loss+self.l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) elif self.labels_shape[1]==1: - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels)) else: train_loss=tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels) - train_loss=tf.reduce_mean(train_loss+l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) + train_loss=tf.reduce_mean(train_loss+self.l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) else: - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) - train_loss=train_loss+l2*sum([tf.reduce_sum(x**2) for x in self.weight]) + train_loss=train_loss+self.l2*sum([tf.reduce_sum(x**2) for x in self.weight]) if self.optimizer=='Gradient': opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) if self.optimizer=='RMSprop': @@ -341,17 +341,19 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: + if self.acc==True: with tf.name_scope('train_accuracy'): if len(self.labels_shape)==1: train_accuracy=tf.reduce_mean(tf.abs(train_output-self.labels)) else: equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + train_merging=tf.summary.merge([train_loss_scalar]) + if self.acc==True: + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -381,7 +383,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc if self.shape0%self.batch!=0: @@ -396,7 +398,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc loss=total_loss/batches @@ -404,7 +406,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc self.train_accuracy=self.train_accuracy.astype(np.float32) @@ -421,7 +423,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy @@ -450,7 +452,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: + if self.acc==True: if len(self.labels_shape)==2: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) else: From bb92b0d2d00684f235a757ec7c7cc0cceac86aec Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 09:49:19 +0800 Subject: [PATCH 45/72] Update CNN.py --- Note/nn/CNN/CNN.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index 14c22b8ae..63da93789 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -439,17 +439,17 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N # ---------------------------------------- with tf.name_scope('train_loss'): if self.labels_shape[1]==1: - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels)) else: train_loss=tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels) - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) else: - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels) - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) if self.optimizer=='Gradient': opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) if self.optimizer=='RMSprop': @@ -458,14 +458,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: + if self.acc==True: with tf.name_scope('train_accuracy'): equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + train_merging=tf.summary.merge([train_loss_scalar]) + if self.acc==True: + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([ train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -495,7 +497,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc if self.shape0%self.batch!=0: @@ -510,7 +512,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc loss=total_loss/batches @@ -518,7 +520,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc self.train_accuracy=self.train_accuracy.astype(np.float32) @@ -535,7 +537,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy @@ -564,7 +566,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: + if self.acc==True: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: train_writer.close() From da4935aa2a20b266fb9030c49cbd3e428c3af5fe Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 09:49:44 +0800 Subject: [PATCH 46/72] Update GRU.py --- Note/nn/RNN/GRU.py | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index d194b22a0..12a8afda3 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -539,42 +539,42 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, # ---------------------------------------- with tf.name_scope('train_loss'): if self.pattern=='1n': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='n1' or self.predicate==True: if self.pattern=='n1': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) else: - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1)) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='nn': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) if self.optimizer=='Gradient': opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) if self.optimizer=='RMSprop': @@ -583,8 +583,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: + if self.acc==True: with tf.name_scope('train_accuracy'): if self.pattern=='1n': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) @@ -596,9 +595,12 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_accuracy=tf.reduce_mean(tf.abs(self.output[-1]-self.labels)) elif self.pattern=='nn': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + train_merging=tf.summary.merge([train_loss_scalar]) + if self.acc==True: + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -628,7 +630,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc if self.shape0%self.batch!=0: @@ -643,7 +645,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc loss=total_loss/batches @@ -651,7 +653,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc self.train_accuracy=self.train_accuracy.astype(np.float32) @@ -668,7 +670,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy @@ -697,7 +699,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: + if self.acc==True: if self.predicate==False: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) else: From 94eb7381f67304d233fafc092bc3fef99f081ac3 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 09:50:27 +0800 Subject: [PATCH 47/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index 38bd1a304..943f08ac6 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -507,6 +507,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.l2=l2 self.optimizer=optimizer self.lr=lr + self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -627,42 +628,42 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, # ---------------------------------------- with tf.name_scope('train_loss'): if self.pattern=='1n': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='n1' or self.predicate==True: if self.pattern=='n1': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) else: - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1)) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='nn': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) if self.optimizer=='Gradient': opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) if self.optimizer=='RMSprop': @@ -671,8 +672,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: + if self.acc==True: with tf.name_scope('train_accuracy'): if self.pattern=='1n': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) @@ -684,9 +684,12 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_accuracy=tf.reduce_mean(tf.abs(self.output[-1]-self.labels)) elif self.pattern=='nn': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + train_merging=tf.summary.merge([train_loss_scalar]) + if self.acc==True: + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -716,7 +719,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc if self.shape0%self.batch!=0: @@ -731,7 +734,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc loss=total_loss/batches @@ -739,7 +742,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc self.train_accuracy=self.train_accuracy.astype(np.float32) @@ -756,7 +759,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy @@ -785,7 +788,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: + if self.acc==True: if self.predicate==False: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) else: From aaed3e3d5c517867fd5a4a94d236079e572e963f Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 09:50:41 +0800 Subject: [PATCH 48/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 45 +++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index 59dc286f6..671c4c276 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -422,6 +422,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.l2=l2 self.optimizer=optimizer self.lr=lr + self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -506,42 +507,42 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, # ---------------------------------------- with tf.name_scope('train_loss'): if self.pattern=='1n': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='n1' or self.predicate==True: if self.pattern=='n1': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) else: - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1)) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='nn': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) if self.optimizer=='Gradient': opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) if self.optimizer=='RMSprop': @@ -550,8 +551,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: + if self.acc==True: with tf.name_scope('train_accuracy'): if self.pattern=='1n': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) @@ -563,9 +563,12 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_accuracy=tf.reduce_mean(tf.abs(self.output[-1]-self.labels)) elif self.pattern=='nn': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + train_merging=tf.summary.merge([train_loss_scalar]) + if self.acc==True: + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -595,7 +598,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc if self.shape0%self.batch!=0: @@ -610,7 +613,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc loss=total_loss/batches @@ -618,7 +621,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc self.train_accuracy=self.train_accuracy.astype(np.float32) @@ -635,7 +638,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: accuracy=sess.run(train_accuracy,feed_dict={self.data:self.train_data,self.labels:self.train_labels}) self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy @@ -664,7 +667,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: + if self.acc==True: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: train_writer.close() From 891d8cae277a6c4bcc96033a93aac955d196f254 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 09:51:19 +0800 Subject: [PATCH 49/72] Update RNN.py --- Note/nn/RNN/RNN.py | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index 00494be98..d77baf840 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -199,6 +199,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.l2=l2 self.optimizer=optimizer self.lr=lr + self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -242,30 +243,30 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, # ---------------------------------------- with tf.name_scope('train_loss'): if self.pattern=='1n': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o,labels=self.labels,axis=2),axis=1) - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) elif self.pattern=='n1' or self.predicate==True: if self.pattern=='n1': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o[-1],labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o[-1],labels=self.labels) - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.square(self.o[-1]-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(self.o[-1]-tf.expand_dims(self.labels,axis=1)) - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) elif self.pattern=='nn': - if l2==None: + if self.l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o,labels=self.labels,axis=2),axis=1) - train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) if self.optimizer=='Gradient': opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) if self.optimizer=='RMSprop': @@ -275,7 +276,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if acc==True: + if self.acc==True: with tf.name_scope('train_accuracy'): if self.pattern=='1n': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.o,2),tf.argmax(self.labels,2)),tf.float32)) @@ -289,7 +290,11 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.o,2),tf.argmax(self.labels,2)),tf.float32)) train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) if train_summary_path!=None: - train_merging=tf.summary.merge([train_loss_scalar,train_accuracy_scalar]) + train_loss_scalar=tf.summary.scalar('train_loss',train_loss) + train_merging=tf.summary.merge([train_loss_scalar]) + if self.acc==True: + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -319,7 +324,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc if self.shape0%self.batch!=0: @@ -334,7 +339,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if acc==True: + if self.acc==True: batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc loss=total_loss/batches @@ -342,7 +347,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: self.train_accuracy_list.append(train_acc.astype(np.float32)) self.train_accuracy=train_acc self.train_accuracy=self.train_accuracy.astype(np.float32) @@ -359,7 +364,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if acc==True: + if self.acc==True: accuracy=sess.run(train_accuracy,feed_dict=feed_dict) self.train_accuracy_list.append(accuracy.astype(np.float32)) self.train_accuracy=accuracy @@ -388,7 +393,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if acc==True: + if self.acc==True: print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: train_writer.close() From 2c7491709a6bc2992174089477e4c307f333026f Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 12:55:01 +0800 Subject: [PATCH 50/72] Update FNN.py --- Note/nn/FNN/FNN.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index 020c0c59c..a9ccf54d4 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -300,6 +300,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: self.weight[i]=tf.Variable(self.last_weight[i],name='weight_{0}'.format(i+1)) self.bias[i]=tf.Variable(self.last_bias[i],name='bias_{0}'.format(i+1)) + self.last_weight.clear() + self.last_bias.clear() if continue_train==True and self.flag==1: self.weight=[x for x in range(self.hidden_layers+1)] self.bias=[x for x in range(self.hidden_layers+1)] @@ -310,6 +312,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: self.weight[i]=tf.Variable(self.last_weight[i],name='weight_{0}'.format(i+1)) self.bias[i]=tf.Variable(self.last_bias[i],name='bias_{0}'.format(i+1)) + self.last_weight.clear() + self.last_bias.clear() self.flag=0 # ---------------forward propagation--------------- train_output=self.forward_propagation(self.data,self.dropout) From 878127feaefb13cd94c345de4861cd975be143ea Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 12:55:26 +0800 Subject: [PATCH 51/72] Update CNN.py --- Note/nn/CNN/CNN.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index 63da93789..88bb46e91 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -427,12 +427,20 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N for i in range(len(self.conv)): self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) + self.last_weight_conv.clear() + self.last_bias_conv.clear() + self.last_weight_fc.clear() + self.last_bias_fc.clear() if continue_train==True and self.flag==1: self.weight_conv=[x for x in range(len(self.conv))] self.bias_conv=[x for x in range(len(self.conv))] for i in range(len(self.conv)): self.weight_conv[i]=tf.Variable(self.last_weight_conv[i],name='conv_{0}_weight'.format(i+1)) self.bias_conv[i]=tf.Variable(self.last_bias_conv[i],name='conv_{0}_bias'.format(i+1)) + self.last_weight_conv.clear() + self.last_bias_conv.clear() + self.last_weight_fc.clear() + self.last_bias_fc.clear() self.flag=0 # ---------------forward propagation--------------- train_output=self.forward_propagation(self.data,self.dropout) From 8d7ba612caf7c23793df8ce5f6aa0e85029efe56 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 12:56:04 +0800 Subject: [PATCH 52/72] Update GRU.py --- Note/nn/RNN/GRU.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index 12a8afda3..258c70a43 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -496,6 +496,19 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.cltm_bias=tf.Variable(self.last_cltm_bias,name='cltm_bias') self.weight_o=tf.Variable(self.last_weight_o,name='weight_o') self.bias_o=tf.Variable(self.last_bias_o,name='bias_o') + self.last_embedding_w=None + self.last_embedding_b=None + self.last_ug_weight_x=None + self.last_ug_weight_h=None + self.last_rg_weight_x=None + self.last_rg_weight_h=None + self.last_cltm_weight_x=None + self.last_cltm_weight_h=None + self.last_ug_bias=None + self.last_rg_bias=None + self.last_cltm_bias=None + self.last_weight_o=None + self.last_bias_o=None if continue_train==True and self.flag==1: self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') self.embedding_b=tf.Variable(self.last_embedding_b,name='embedding_b') @@ -533,6 +546,19 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.cltm_bias=tf.Variable(self.last_cltm_bias,name='cltm_bias') self.weight_o=tf.Variable(self.last_weight_o,name='weight_o') self.bias_o=tf.Variable(self.last_bias_o,name='bias_o') + self.last_embedding_w=None + self.last_embedding_b=None + self.last_ug_weight_x=None + self.last_ug_weight_h=None + self.last_rg_weight_x=None + self.last_rg_weight_h=None + self.last_cltm_weight_x=None + self.last_cltm_weight_h=None + self.last_ug_bias=None + self.last_rg_bias=None + self.last_cltm_bias=None + self.last_weight_o=None + self.last_bias_o=None self.flag=0 # ---------------forward propagation--------------- self.forward_propagation(self.data) From c83b08c2fdaee396c8b24dac888134cc0b6e52ba Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 12:56:20 +0800 Subject: [PATCH 53/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index 943f08ac6..e248dbfe8 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -576,6 +576,22 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.cltm_bias=tf.Variable(self.last_cltm_bias,name='cltm_bias') self.weight_o=tf.Variable(self.last_weight_o,name='weight_o') self.bias_o=tf.Variable(self.last_bias_o,name='bias_o') + self.last_embedding_w=None + self.last_embedding_b=None + self.last_fg_weight_x=None + self.last_fg_weight_h=None + self.last_ig_weight_x=None + self.last_ig_weight_h=None + self.last_og_weight_x=None + self.last_og_weight_h=None + self.last_cltm_weight_x=None + self.last_cltm_weight_h=None + self.last_fg_bias=None + self.last_ig_bias=None + self.last_og_bias=None + self.last_cltm_bias=None + self.last_weight_o=None + self.last_bias_o=None if continue_train==True and self.flag==1: self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') self.embedding_b=tf.Variable(self.last_embedding_b,name='embedding_b') @@ -622,6 +638,22 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.cltm_bias=tf.Variable(self.last_cltm_bias,name='cltm_bias') self.weight_o=tf.Variable(self.last_weight_o,name='weight_o') self.bias_o=tf.Variable(self.last_bias_o,name='bias_o') + self.last_embedding_w=None + self.last_embedding_b=None + self.last_fg_weight_x=None + self.last_fg_weight_h=None + self.last_ig_weight_x=None + self.last_ig_weight_h=None + self.last_og_weight_x=None + self.last_og_weight_h=None + self.last_cltm_weight_x=None + self.last_cltm_weight_h=None + self.last_fg_bias=None + self.last_ig_bias=None + self.last_og_bias=None + self.last_cltm_bias=None + self.last_weight_o=None + self.last_bias_o=None self.flag=0 # ---------------forward propagation--------------- self.forward_propagation(self.data) From 62c173aeae933fba52b929d96cfd76ff88e22c6b Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 12:56:59 +0800 Subject: [PATCH 54/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index 671c4c276..f0ee90680 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -473,6 +473,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.ug_bias=tf.Variable(self.last_ug_bias,name='ug_bias') self.cltm_bias=tf.Variable(self.last_cltm_bias,name='cltm_bias') self.bias_o=tf.Variable(self.last_bias_o,name='bias_o') + self.last_embedding_w=None + self.last_embedding_b=None + self.last_ug_weight_x=None + self.last_ug_weight_h=None + self.last_cltm_weight_x=None + self.last_cltm_weight_h=None + self.last_ug_bias=None + self.last_cltm_bias=None + self.last_weight_o=None + self.last_bias_o=None if continue_train==True and self.flag==1: self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') self.embedding_b=tf.Variable(self.last_embedding_b,name='embedding_b') @@ -501,6 +511,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.ug_bias=tf.Variable(self.last_ug_bias,name='ug_bias') self.cltm_bias=tf.Variable(self.last_cltm_bias,name='cltm_bias') self.bias_o=tf.Variable(self.last_bias_o,name='bias_o') + self.last_embedding_w=None + self.last_embedding_b=None + self.last_ug_weight_x=None + self.last_ug_weight_h=None + self.last_cltm_weight_x=None + self.last_cltm_weight_h=None + self.last_ug_bias=None + self.last_cltm_bias=None + self.last_weight_o=None + self.last_bias_o=None self.flag=0 # ---------------forward propagation--------------- self.forward_propagation(self.data) From 06a19b5973c789c9d16b279d7825b9e56ecfac73 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 12:57:16 +0800 Subject: [PATCH 55/72] Update RNN.py --- Note/nn/RNN/RNN.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index d77baf840..f30e07bbb 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -228,6 +228,14 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.bias_x=tf.Variable(self.last_bias_x,name='bias_x') self.bias_h=tf.Variable(self.last_bias_h,name='bias_h') self.bias_o=tf.Variable(self.last_bias_o,name='bias_o') + self.last_embedding_w=None + self.last_embedding_b=None + self.last_weight_x=None + self.last_weight_h=None + self.last_weight_o=None + self.last_bias_x=None + self.last_bias_h=None + self.last_bias_o=None if continue_train==True and self.flag==1: self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') self.embedding_b=tf.Variable(self.last_embedding_b,name='embedding_b') @@ -237,6 +245,14 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.bias_x=tf.Variable(self.last_bias_x,name='bias_x') self.bias_h=tf.Variable(self.last_bias_h,name='bias_h') self.bias_o=tf.Variable(self.last_bias_o,name='bias_o') + self.last_embedding_w=None + self.last_embedding_b=None + self.last_weight_x=None + self.last_weight_h=None + self.last_weight_o=None + self.last_bias_x=None + self.last_bias_h=None + self.last_bias_o=None self.flag=0 # ---------------forward propagation--------------- self.forward_propagation(self.data) From 24f80d45f89fa82ef29968b701738a8d5bf8e89d Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jul 2020 20:08:36 +0800 Subject: [PATCH 56/72] Update version.py From 3cbd077e86ea91b1affd8ea20e76f354f58821ff Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 26 Jul 2020 22:56:14 +0800 Subject: [PATCH 57/72] Update FNN.py --- Note/nn/FNN/FNN.py | 96 ++++++++++++++++++++-------------------------- 1 file changed, 41 insertions(+), 55 deletions(-) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index a9ccf54d4..e25034678 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -259,7 +259,7 @@ def forward_propagation(self,data,dropout=None,use_nn=False): return output - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=None,acc=True,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): t1=time.time() with self.graph.as_default(): self.batch=batch @@ -267,7 +267,6 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.dropout=dropout self.optimizer=optimizer self.lr=lr - self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -345,19 +344,17 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.acc==True: - with tf.name_scope('train_accuracy'): - if len(self.labels_shape)==1: - train_accuracy=tf.reduce_mean(tf.abs(train_output-self.labels)) - else: - equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) - train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + with tf.name_scope('train_accuracy'): + if len(self.labels_shape)==1: + train_accuracy=tf.reduce_mean(tf.abs(train_output-self.labels)) + else: + equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) + train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) if train_summary_path!=None: train_loss_scalar=tf.summary.scalar('train_loss',train_loss) train_merging=tf.summary.merge([train_loss_scalar]) - if self.acc==True: - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - train_merging=tf.summary.merge([train_accuracy_scalar]) + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -387,9 +384,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc if self.shape0%self.batch!=0: batches+=1 index1=batches*self.batch @@ -402,18 +398,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - self.train_accuracy_list.append(train_acc.astype(np.float32)) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float32) + self.train_accuracy_list.append(train_acc.astype(np.float32)) + self.train_accuracy=train_acc + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -427,11 +421,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(accuracy.astype(np.float32)) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float32) + accuracy=sess.run(train_accuracy,feed_dict=feed_dict) + self.train_accuracy_list.append(accuracy.astype(np.float32)) + self.train_accuracy=accuracy + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -456,11 +449,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - if len(self.labels_shape)==2: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('accuracy:{0:.6f}'.format(self.train_accuracy)) + if len(self.labels_shape)==2: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('accuracy:{0:.6f}'.format(self.train_accuracy)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -594,12 +586,11 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print() - if len(self.labels_shape)==2: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if len(self.labels_shape)==2: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -630,28 +621,25 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - if self.acc==True: - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print() - if len(self.labels_shape)==2: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if len(self.labels_shape)==2: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return def comparison(self): print() print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print() + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if self.test_flag: print() print('-------------------------------------') @@ -760,7 +748,6 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.lr,output_file) pickle.dump(self.dropout,output_file) pickle.dump(self.optimizer,output_file) - pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -802,7 +789,6 @@ def restore(self,model_path): self.lr=pickle.load(input_file) self.dropout=pickle.load(input_file) self.optimizer=pickle.load(input_file) - self.acc==pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From 6ebed5f4352d80fec63942d5850c70a7a64ef4ad Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 26 Jul 2020 22:57:13 +0800 Subject: [PATCH 58/72] Update CNN.py --- Note/nn/CNN/CNN.py | 56 ++++++++++++++++++---------------------------- 1 file changed, 22 insertions(+), 34 deletions(-) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index 88bb46e91..baeebbea9 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -392,7 +392,7 @@ def forward_propagation(self,data,dropout=None,use_nn=False): return output - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=None,acc=True,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): t1=time.time() with self.graph.as_default(): self.batch=batch @@ -400,7 +400,6 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.dropout=dropout self.optimizer=optimizer self.lr=lr - self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -466,16 +465,14 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.acc==True: - with tf.name_scope('train_accuracy'): - equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) - train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + with tf.name_scope('train_accuracy'): + equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) + train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) if train_summary_path!=None: train_loss_scalar=tf.summary.scalar('train_loss',train_loss) train_merging=tf.summary.merge([train_loss_scalar]) - if self.acc==True: - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - train_merging=tf.summary.merge([ train_accuracy_scalar]) + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([ train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -505,9 +502,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc if self.shape0%self.batch!=0: batches+=1 index1=batches*self.batch @@ -520,18 +516,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - self.train_accuracy_list.append(train_acc.astype(np.float32)) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float32) + self.train_accuracy_list.append(train_acc.astype(np.float32)) + self.train_accuracy=train_acc + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -545,11 +539,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(accuracy.astype(np.float32)) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float32) + accuracy=sess.run(train_accuracy,feed_dict=feed_dict) + self.train_accuracy_list.append(accuracy.astype(np.float32)) + self.train_accuracy=accuracy + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -574,8 +567,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -708,9 +700,8 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print() + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) return @@ -744,9 +735,8 @@ def train_visual(self): plt.xlabel('epoch') plt.ylabel('accuracy') print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print() + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) return @@ -924,7 +914,6 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) pickle.dump(self.dropout,output_file) - pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -968,7 +957,6 @@ def restore(self,model_path): self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) self.dropout=pickle.load(input_file) - self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From 70b47323e818c8b1a2bbc2bba0b55d7d4b044fb8 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 26 Jul 2020 22:58:03 +0800 Subject: [PATCH 59/72] Update GRU.py --- Note/nn/RNN/GRU.py | 96 ++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 54 deletions(-) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index 258c70a43..cc8f23723 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -427,7 +427,7 @@ def forward_propagation(self,data,labels=None,use_nn=False): return - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): t1=time.time() with self.graph.as_default(): self.C.clear() @@ -436,7 +436,6 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.l2=l2 self.optimizer=optimizer self.lr=lr - self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -609,18 +608,17 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.acc==True: - with tf.name_scope('train_accuracy'): - if self.pattern=='1n': - train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) - elif self.pattern=='n1' or self.predicate==True: - if self.pattern=='n1': - equal=tf.equal(tf.argmax(self.output[-1],1),tf.argmax(self.labels,1)) - train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) - else: - train_accuracy=tf.reduce_mean(tf.abs(self.output[-1]-self.labels)) - elif self.pattern=='nn': - train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) + with tf.name_scope('train_accuracy'): + if self.pattern=='1n': + train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) + elif self.pattern=='n1' or self.predicate==True: + if self.pattern=='n1': + equal=tf.equal(tf.argmax(self.output[-1],1),tf.argmax(self.labels,1)) + train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + else: + train_accuracy=tf.reduce_mean(tf.abs(self.output[-1]-self.labels)) + elif self.pattern=='nn': + train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) if train_summary_path!=None: train_loss_scalar=tf.summary.scalar('train_loss',train_loss) train_merging=tf.summary.merge([train_loss_scalar]) @@ -656,9 +654,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc if self.shape0%self.batch!=0: batches+=1 index1=batches*self.batch @@ -671,18 +668,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - self.train_accuracy_list.append(train_acc.astype(np.float32)) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float32) + self.train_accuracy_list.append(train_acc.astype(np.float32)) + self.train_accuracy=train_acc + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -696,11 +691,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(accuracy.astype(np.float32)) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float32) + accuracy=sess.run(train_accuracy,feed_dict=feed_dict) + self.train_accuracy_list.append(accuracy.astype(np.float32)) + self.train_accuracy=accuracy + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -725,11 +719,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - if self.predicate==False: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('accuracy:{0:.6f}'.format(self.train_accuracy)) + if self.predicate==False: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('accuracy:{0:.6f}'.format(self.train_accuracy)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -937,12 +930,11 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print() - if self.predicate==False: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -973,19 +965,17 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - if self.acc==True: - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print() - if self.predicate==False: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -1086,7 +1076,6 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) - pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1152,7 +1141,6 @@ def restore(self,model_path): self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) - self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From e8a000c6497a8ce6178a8410a10a6bc1fba76071 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 26 Jul 2020 22:58:46 +0800 Subject: [PATCH 60/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 101 +++++++++++++++++++------------------------- 1 file changed, 44 insertions(+), 57 deletions(-) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index e248dbfe8..b9e941f93 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -498,7 +498,7 @@ def forward_propagation(self,data,labels=None,use_nn=False): return - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): t1=time.time() with self.graph.as_default(): self.C.clear() @@ -507,7 +507,6 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.l2=l2 self.optimizer=optimizer self.lr=lr - self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -704,24 +703,22 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.acc==True: - with tf.name_scope('train_accuracy'): - if self.pattern=='1n': - train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) - elif self.pattern=='n1' or self.predicate==True: - if self.pattern=='n1': - equal=tf.equal(tf.argmax(self.output[-1],1),tf.argmax(self.labels,1)) - train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) - else: - train_accuracy=tf.reduce_mean(tf.abs(self.output[-1]-self.labels)) - elif self.pattern=='nn': - train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) + with tf.name_scope('train_accuracy'): + if self.pattern=='1n': + train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) + elif self.pattern=='n1' or self.predicate==True: + if self.pattern=='n1': + equal=tf.equal(tf.argmax(self.output[-1],1),tf.argmax(self.labels,1)) + train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + else: + train_accuracy=tf.reduce_mean(tf.abs(self.output[-1]-self.labels)) + elif self.pattern=='nn': + train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) if train_summary_path!=None: train_loss_scalar=tf.summary.scalar('train_loss',train_loss) train_merging=tf.summary.merge([train_loss_scalar]) - if self.acc==True: - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - train_merging=tf.summary.merge([train_accuracy_scalar]) + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -751,9 +748,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc if self.shape0%self.batch!=0: batches+=1 index1=batches*self.batch @@ -766,18 +762,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - self.train_accuracy_list.append(train_acc.astype(np.float32)) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float32) + self.train_accuracy_list.append(train_acc.astype(np.float32)) + self.train_accuracy=train_acc + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -791,11 +785,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(accuracy.astype(np.float32)) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float32) + accuracy=sess.run(train_accuracy,feed_dict=feed_dict) + self.train_accuracy_list.append(accuracy.astype(np.float32)) + self.train_accuracy=accuracy + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -820,11 +813,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - if self.predicate==False: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('accuracy:{0:.6f}'.format(self.train_accuracy)) + if self.predicate==False: + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('accuracy:{0:.6f}'.format(self.train_accuracy)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -1053,12 +1045,11 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print() - if self.predicate==False: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -1089,19 +1080,17 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - if self.acc==True: - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print() - if self.predicate==False: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -1210,7 +1199,6 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) - pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1282,7 +1270,6 @@ def restore(self,model_path): self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) - self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From 75890a6ee1df0d1241ac4eeeeab50ff14d9d7071 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 26 Jul 2020 22:59:30 +0800 Subject: [PATCH 61/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 95 +++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 54 deletions(-) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index f0ee90680..5e3015ccc 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -413,7 +413,7 @@ def forward_propagation(self,data,labels=None,use_nn=False): return - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): t1=time.time() with self.graph.as_default(): self.C.clear() @@ -422,7 +422,6 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.l2=l2 self.optimizer=optimizer self.lr=lr - self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -571,24 +570,22 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.acc==True: - with tf.name_scope('train_accuracy'): - if self.pattern=='1n': - train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) - elif self.pattern=='n1' or self.predicate==True: - if self.pattern=='n1': - equal=tf.equal(tf.argmax(self.output[-1],1),tf.argmax(self.labels,1)) - train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) - else: - train_accuracy=tf.reduce_mean(tf.abs(self.output[-1]-self.labels)) - elif self.pattern=='nn': - train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) + with tf.name_scope('train_accuracy'): + if self.pattern=='1n': + train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) + elif self.pattern=='n1' or self.predicate==True: + if self.pattern=='n1': + equal=tf.equal(tf.argmax(self.output[-1],1),tf.argmax(self.labels,1)) + train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + else: + train_accuracy=tf.reduce_mean(tf.abs(self.output[-1]-self.labels)) + elif self.pattern=='nn': + train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) if train_summary_path!=None: train_loss_scalar=tf.summary.scalar('train_loss',train_loss) train_merging=tf.summary.merge([train_loss_scalar]) - if self.acc==True: - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - train_merging=tf.summary.merge([train_accuracy_scalar]) + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -618,9 +615,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc if self.shape0%self.batch!=0: batches+=1 index1=batches*self.batch @@ -633,18 +629,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - self.train_accuracy_list.append(train_acc.astype(np.float32)) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float32) + self.train_accuracy_list.append(train_acc.astype(np.float32)) + self.train_accuracy=train_acc + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -658,11 +652,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - accuracy=sess.run(train_accuracy,feed_dict={self.data:self.train_data,self.labels:self.train_labels}) - self.train_accuracy_list.append(accuracy.astype(np.float32)) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float32) + accuracy=sess.run(train_accuracy,feed_dict={self.data:self.train_data,self.labels:self.train_labels}) + self.train_accuracy_list.append(accuracy.astype(np.float32)) + self.train_accuracy=accuracy + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -687,8 +680,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -874,12 +866,11 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0}'.format(self.train_loss)) - if self.acc==True: - print() - if self.predicate==False: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -910,19 +901,17 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - if self.acc==True: - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0}'.format(self.train_loss)) - if self.acc==True: - print() - if self.predicate==False: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -1015,7 +1004,6 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) - pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1075,7 +1063,6 @@ def restore(self,model_path): self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) - self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From 7cd279e51519281f1b757adeecf88e70d3a9a6c5 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 26 Jul 2020 23:00:12 +0800 Subject: [PATCH 62/72] Update RNN.py --- Note/nn/RNN/RNN.py | 97 ++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 55 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index f30e07bbb..c1712935c 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -190,7 +190,7 @@ def forward_propagation(self,data,use_nn=False): return - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): t1=time.time() with self.graph.as_default(): self.h.clear() @@ -199,7 +199,6 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.l2=l2 self.optimizer=optimizer self.lr=lr - self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True @@ -292,25 +291,23 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) train_loss_scalar=tf.summary.scalar('train_loss',train_loss) - if self.acc==True: - with tf.name_scope('train_accuracy'): - if self.pattern=='1n': - train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.o,2),tf.argmax(self.labels,2)),tf.float32)) - elif self.pattern=='n1' or self.predicate==True: - if self.pattern=='n1': - equal=tf.equal(tf.argmax(self.o[-1],1),tf.argmax(self.labels,1)) - train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) - else: - train_accuracy=tf.reduce_mean(tf.abs(self.o[-1]-self.labels)) - elif self.pattern=='nn': - train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.o,2),tf.argmax(self.labels,2)),tf.float32)) - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + with tf.name_scope('train_accuracy'): + if self.pattern=='1n': + train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.o,2),tf.argmax(self.labels,2)),tf.float32)) + elif self.pattern=='n1' or self.predicate==True: + if self.pattern=='n1': + equal=tf.equal(tf.argmax(self.o[-1],1),tf.argmax(self.labels,1)) + train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) + else: + train_accuracy=tf.reduce_mean(tf.abs(self.o[-1]-self.labels)) + elif self.pattern=='nn': + train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.o,2),tf.argmax(self.labels,2)),tf.float32)) + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) if train_summary_path!=None: train_loss_scalar=tf.summary.scalar('train_loss',train_loss) train_merging=tf.summary.merge([train_loss_scalar]) - if self.acc==True: - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - train_merging=tf.summary.merge([train_accuracy_scalar]) + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -340,9 +337,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc if self.shape0%self.batch!=0: batches+=1 index1=batches*self.batch @@ -355,18 +351,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) + total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - self.train_accuracy_list.append(train_acc.astype(np.float32)) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float32) + self.train_accuracy_list.append(train_acc.astype(np.float32)) + self.train_accuracy=train_acc + self.train_accuracy=self.train_accuracy.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -380,11 +374,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(accuracy.astype(np.float32)) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float32) + accuracy=sess.run(train_accuracy,feed_dict=feed_dict) + self.train_accuracy_list.append(accuracy.astype(np.float32)) + self.train_accuracy=accuracy + self.train_accuracy=self.train_accuracy.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -409,8 +402,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,acc=True, train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -572,12 +564,11 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0}'.format(self.train_loss)) - if self.acc==True: - print() - if self.predicate==False: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -608,19 +599,17 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - if self.acc==True: - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0}'.format(self.train_loss)) - if self.acc==True: - print() - if self.predicate==False: - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('train accuracy:{0:.6f}'.format(self.train_accuracy)) + print() + if self.predicate==False: + print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + else: + print('train accuracy:{0:.6f}'.format(self.train_accuracy)) return @@ -685,7 +674,6 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) - pickle.dump(self.acc,output_file) pickle.dump(float(self.train_loss),output_file) pickle.dump(float(self.train_accuracy*100),output_file) pickle.dump(self.test_flag,output_file) @@ -731,7 +719,6 @@ def restore(self,model_path): self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) - self.acc=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) From b1d5295f92beb96da97c1dc8c44d70ecfe6d8705 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 26 Jul 2020 23:11:18 +0800 Subject: [PATCH 63/72] Update Note Architecture.py --- Note/create/Note Architecture.py | 93 ++++++++++++++------------------ 1 file changed, 41 insertions(+), 52 deletions(-) diff --git a/Note/create/Note Architecture.py b/Note/create/Note Architecture.py index 25a81a932..34cc521fd 100644 --- a/Note/create/Note Architecture.py +++ b/Note/create/Note Architecture.py @@ -54,11 +54,11 @@ def __init__(): self.train_loss=None - self.train_accuracy=None + self.train_acc=None self.train_loss_list=[] - self.train_accuracy_list=[] + self.train_acc_list=[] self.test_loss=None - self.test_accuracy=None + self.test_acc=None self.continue_train=False self.flag=None self.end_flag=False @@ -84,7 +84,7 @@ def structure(): self.end_flag=False self.test_flag=False self.train_loss_list.clear() - self.train_accuracy_list.clear() + self.train_acc_list.clear() self.dtype=dtype @@ -97,24 +97,23 @@ def forward_propagation(): - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): t1=time.time() with self.graph.as_default(): self.batch=batch self.optimizer=optimizer self.lr=lr - self.acc=acc if continue_train!=True: if self.continue_train==True: continue_train=True else: self.train_loss_list.clear() - self.train_accuracy_list.clear() + self.train_acc_list.clear() if self.continue_train==False and continue_train==True: if self.end_flag==False and self.flag==0: self.epoch=None self.train_loss_list.clear() - self.train_accuracy_list.clear() + self.train_acc_list.clear() self.continue_train=True if cpu_gpu!=None: self.cpu_gpu=cpu_gpu @@ -144,16 +143,14 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) - if self.acc==True: - with tf.name_scope('train_accuracy'): + with tf.name_scope('train_accuracy'): if train_summary_path!=None: train_loss_scalar=tf.summary.scalar('train_loss',train_loss) train_merging=tf.summary.merge([train_loss_scalar]) - if self.acc==True: - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - train_merging=tf.summary.merge([train_accuracy_scalar]) + train_acc_scalar=tf.summary.scalar('train_accuracy',train_acc) + train_merging=tf.summary.merge([train_acc_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -182,9 +179,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_acc,feed_dict=feed_dict) + total_acc+=batch_acc if self.shape0%self.batch!=0: batches+=1 index1=batches*self.batch @@ -196,18 +192,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su else: batch_loss,_=sess.run([train_loss,opt],feed_dict=feed_dict) total_loss+=batch_loss - if self.acc==True: - batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) - total_acc+=batch_acc + batch_acc=sess.run(train_acc,feed_dict=feed_dict) + total_acc+=batch_acc loss=total_loss/batches train_acc=total_acc/batches self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - self.train_accuracy_list.append(train_acc.astype(np.float32)) - self.train_accuracy=train_acc - self.train_accuracy=self.train_accuracy.astype(np.float32) + self.train_acc_list.append(train_acc.astype(np.float32)) + self.train_acc=train_acc + self.train_acc=self.train_acc.astype(np.float32) else: random=np.arange(self.shape0) np.random.shuffle(random) @@ -220,11 +214,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss self.train_loss=self.train_loss.astype(np.float32) - if self.acc==True: - accuracy=sess.run(train_accuracy,feed_dict=feed_dict) - self.train_accuracy_list.append(accuracy.astype(np.float32)) - self.train_accuracy=accuracy - self.train_accuracy=self.train_accuracy.astype(np.float32) + acc=sess.run(train_acc,feed_dict=feed_dict) + self.train_acc_list.append(acc.astype(np.float32)) + self.train_acc=acc + self.train_acc=self.train_acc.astype(np.float32) if epoch%10!=0: temp_epoch=epoch-epoch%10 temp_epoch=int(temp_epoch/10) @@ -249,11 +242,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,acc=True,train_su train_writer.add_summary(train_summary,i) print() print('last loss:{0:.6f}'.format(self.train_loss)) - if self.acc==True: - if len(self.labels_shape)==2: - print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) - else: - print('accuracy:{0:.3f}'.format(self.train_accuracy)) + if len(self.labels_shape)==2: + print('accuracy:{0:.3f}%'.format(self.train_acc*100)) + else: + print('accuracy:{0:.3f}'.format(self.train_acc)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -311,7 +303,7 @@ def test(self,test_data,test_labels,batch=None): test_labels_batch=test_labels[j*batch:(j+1)*batch] batch_test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data_batch,test_labels_placeholder:test_labels_batch}) total_test_loss+=batch_test_loss - batch_test_acc=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data_batch,test_labels_placeholder:test_labels_batch}) + batch_test_acc=sess.run(test_acc,feed_dict={test_data_placeholder:test_data_batch,test_labels_placeholder:test_labels_batch}) total_test_acc+=batch_test_acc if test_data.shape[0]%batch!=0: test_batches+=1 @@ -319,19 +311,19 @@ def test(self,test_data,test_labels,batch=None): test_labels_batch=np.concatenate([test_labels[batches*batch:],test_labels[:batch-(test_labels.shape[0]-batches*batch)]]) batch_test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data_batch,test_labels_placeholder:test_labels_batch}) total_test_loss+=batch_test_loss - batch_test_acc=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data_batch,test_labels_placeholder:test_labels_batch}) + batch_test_acc=sess.run(test_acc,feed_dict={test_data_placeholder:test_data_batch,test_labels_placeholder:test_labels_batch}) total_test_acc+=batch_test_acc test_loss=total_test_loss/test_batches test_acc=total_test_acc/test_batches self.test_loss=test_loss - self.test_accuracy=test_acc + self.test_acc=test_acc self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float32) + self.test_acc=self.test_acc.astype(np.float32) else: self.test_loss=sess.run(test_loss,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) - self.test_accuracy=sess.run(test_accuracy,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) + self.test_acc=sess.run(test_acc,feed_dict={test_data_placeholder:test_data,test_labels_placeholder:test_labels}) self.test_loss=self.test_loss.astype(np.float32) - self.test_accuracy=self.test_accuracy.astype(np.float32) + self.test_acc=self.test_acc.astype(np.float32) print('test loss:{0:.6f}'.format(self.test_loss)) @@ -383,12 +375,11 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - if self.acc==True: - plt.figure(2) - plt.plot(np.arange(self.epoch+1),self.train_accuracy_list) - plt.title('train accuracy') - plt.xlabel('epoch') - plt.ylabel('accuracy') + plt.figure(2) + plt.plot(np.arange(self.epoch+1),self.train_acc_list) + plt.title('train accuracy') + plt.xlabel('epoch') + plt.ylabel('accuracy') print('train loss:{0}'.format(self.train_loss)) @@ -399,7 +390,7 @@ def comparison(self): print() print('train loss:{0}'.format(self.train_loss)) print() - print('train accuracy:{0:.3f}%'.format(self.train_accuracy*100)) + print('train accuracy:{0:.3f}%'.format(self.train_acc*100)) print() print('-------------------------------------') print() @@ -422,13 +413,12 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.lr,output_file) - pickle.dump(self.acc,output_file) pickle.dump(self.train_loss,output_file) - pickle.dump(self.train_accuracy,output_file) + pickle.dump(self.train_acc,output_file) pickle.dump(self.test_flag,output_file) if self.test_flag==True: pickle.dump(self.test_loss,output_file) - pickle.dump(self.test_accuracy,output_file) + pickle.dump(self.test_acc,output_file) pickle.dump(self.train_loss_list,output_file) pickle.dump(self.train_accuracy_list,output_file) pickle.dump(self.epoch,output_file) @@ -454,16 +444,15 @@ def restore(self,model_path): self.lr=pickle.load(input_file) - self.acc=pickle.load(input_file) self.total_time=pickle.load(input_file) self.train_loss=pickle.load(input_file) - self.train_accuracy=pickle.load(input_file) + self.train_acc=pickle.load(input_file) self.test_flag=pickle.load(input_file) if self.test_flag==True: self.test_loss=pickle.load(input_file) self.test_accuracy=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) - self.train_accuracy_list=pickle.load(input_file) + self.train_acc_list=pickle.load(input_file) self.epoch=pickle.load(input_file) self.total_epoch=pickle.load(input_file) self.time=pickle.load(input_file) From acdff278170e5610a0822812d0a650475f4ec524 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 27 Jul 2020 22:10:55 +0800 Subject: [PATCH 64/72] Update Note Architecture.py --- Note/create/Note Architecture.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Note/create/Note Architecture.py b/Note/create/Note Architecture.py index 34cc521fd..bd0757988 100644 --- a/Note/create/Note Architecture.py +++ b/Note/create/Note Architecture.py @@ -243,9 +243,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat print() print('last loss:{0:.6f}'.format(self.train_loss)) if len(self.labels_shape)==2: - print('accuracy:{0:.3f}%'.format(self.train_acc*100)) + print('acc:{0:.3f}%'.format(self.train_acc*100)) else: - print('accuracy:{0:.3f}'.format(self.train_acc)) + print('acc:{0:.3f}'.format(self.train_acc)) if train_summary_path!=None: train_writer.close() if continue_train==True: @@ -377,9 +377,9 @@ def train_visual(self): plt.ylabel('loss') plt.figure(2) plt.plot(np.arange(self.epoch+1),self.train_acc_list) - plt.title('train accuracy') + plt.title('train acc') plt.xlabel('epoch') - plt.ylabel('accuracy') + plt.ylabel('acc') print('train loss:{0}'.format(self.train_loss)) @@ -390,7 +390,7 @@ def comparison(self): print() print('train loss:{0}'.format(self.train_loss)) print() - print('train accuracy:{0:.3f}%'.format(self.train_acc*100)) + print('train acc:{0:.3f}%'.format(self.train_acc*100)) print() print('-------------------------------------') print() From a57745af61ba329405e516c8928d30d9e248755e Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 28 Jul 2020 15:33:09 +0800 Subject: [PATCH 65/72] Update FNN.py --- Note/nn/FNN/FNN.py | 146 ++++++++++++++++++++++----------------------- 1 file changed, 71 insertions(+), 75 deletions(-) diff --git a/Note/nn/FNN/FNN.py b/Note/nn/FNN/FNN.py index e25034678..11679225c 100644 --- a/Note/nn/FNN/FNN.py +++ b/Note/nn/FNN/FNN.py @@ -66,11 +66,11 @@ def __init__(self,train_data=None,train_labels=None): self.last_bias=[] self.activation=[] self.batch=None - self.epoch=None + self.epoch=0 self.l2=None self.dropout=None - self.optimizer=None self.lr=None + self.optimizer=None self.train_loss=None self.train_accuracy=None self.train_loss_list=[] @@ -81,9 +81,10 @@ def __init__(self,train_data=None,train_labels=None): self.flag=None self.end_flag=False self.test_flag=None - self.time=None - self.cpu_gpu='/gpu:0' - self.use_cpu_gpu='/gpu:0' + self.total_epoch=0 + self.time=0 + self.total_time=0 + self.processor='/gpu:0' def weight_init(self,shape,mean,stddev,name): @@ -97,7 +98,6 @@ def bias_init(self,shape,mean,stddev,name): def structure(self,hidden,function,layers=None,mean=0,stddev=0.07,dtype=np.float32): with self.graph.as_default(): self.continue_train=False - self.total_epoch=0 self.flag=None self.end_flag=False self.test_flag=False @@ -110,8 +110,11 @@ def structure(self,hidden,function,layers=None,mean=0,stddev=0.07,dtype=np.float self.hidden=hidden self.function=function self.layers=layers + self.epoch=0 self.dtype=dtype - self.time=None + self.total_epoch=0 + self.time=0 + self.total_time=0 with tf.name_scope('parameter_initialization'): if self.layers!=None: self.hidden_layers=self.layers-2 @@ -259,8 +262,7 @@ def forward_propagation(self,data,dropout=None,use_nn=False): return output - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): - t1=time.time() + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=None,train_summary_path=None,model_path=None,one=True,continue_train=False,processor=None): with self.graph.as_default(): self.batch=batch self.l2=l2 @@ -274,20 +276,18 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.train_loss_list.clear() self.train_accuracy_list.clear() if self.continue_train==False and continue_train==True: - if self.end_flag==False and self.flag==0: - self.epoch=None self.train_loss_list.clear() self.train_accuracy_list.clear() self.continue_train=True - if cpu_gpu!=None: - self.cpu_gpu=cpu_gpu - if type(self.cpu_gpu)==list and (len(self.cpu_gpu)!=self.hidden_layers+1 or len(self.cpu_gpu)==1): - self.cpu_gpu.append('/gpu:0') - if type(self.cpu_gpu)==str: - train_cpu_gpu=self.cpu_gpu + if processor!=None: + self.processor=processor + if type(self.processor)==list and (len(self.processor)!=self.hidden_layers+1 or len(self.processor)==1): + self.processor.append('/gpu:0') + if type(self.processor)==str: + train_processor=self.processor else: - train_cpu_gpu=self.cpu_gpu[-1] - with tf.device(train_cpu_gpu): + train_processor=self.processor[-1] + with tf.device(train_processor): if continue_train==True and self.end_flag==True: self.end_flag=False self.weight=[x for x in range(self.hidden_layers+1)] @@ -302,6 +302,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.last_weight.clear() self.last_bias.clear() if continue_train==True and self.flag==1: + self.flag=0 self.weight=[x for x in range(self.hidden_layers+1)] self.bias=[x for x in range(self.hidden_layers+1)] for i in range(self.hidden_layers+1): @@ -313,37 +314,36 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.bias[i]=tf.Variable(self.last_bias[i],name='bias_{0}'.format(i+1)) self.last_weight.clear() self.last_bias.clear() - self.flag=0 # ---------------forward propagation--------------- train_output=self.forward_propagation(self.data,self.dropout) # ---------------------------------------- with tf.name_scope('train_loss'): if len(self.labels_shape)==1: - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.square(train_output-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(train_output-tf.expand_dims(self.labels,axis=1)) - train_loss=tf.reduce_mean(train_loss+self.l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) + train_loss=tf.reduce_mean(train_loss+l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) elif self.labels_shape[1]==1: - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels)) else: train_loss=tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels) - train_loss=tf.reduce_mean(train_loss+self.l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) + train_loss=tf.reduce_mean(train_loss+l2/2*sum([tf.reduce_sum(x**2) for x in self.weight])) else: - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) - train_loss=train_loss+self.l2*sum([tf.reduce_sum(x**2) for x in self.weight]) + train_loss=train_loss+l2*sum([tf.reduce_sum(x**2) for x in self.weight]) if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.RMSPropOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + opt=tf.train.MomentumOptimizer(learning_rate=lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(train_loss) with tf.name_scope('train_accuracy'): if len(self.labels_shape)==1: train_accuracy=tf.reduce_mean(tf.abs(train_output-self.labels)) @@ -364,9 +364,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.sess=sess if self.total_epoch==0: epoch=epoch+1 + t1=time.time() for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) + if batch!=None: + batches=int((self.shape0-self.shape0%self.batch)/batch) total_loss=0 total_acc=0 random=np.arange(self.shape0) @@ -374,8 +375,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N train_data=self.train_data[random] train_labels=self.train_labels[random] for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch + index1=j*batch + index2=(j+1)*batch train_data_batch=train_data[index1:index2] train_labels_batch=train_labels[index1:index2] feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -386,10 +387,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N total_loss+=batch_loss batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc - if self.shape0%self.batch!=0: + if self.shape0%batch!=0: batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) + index1=batches*batch + index2=batch-(self.shape0-batches*batch) train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -434,12 +435,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N temp_epoch=1 if i%temp_epoch==0: if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) if model_path!=None and i%epoch*2==0: @@ -447,6 +443,13 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N if train_summary_path!=None: train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) + t2=time.time() + _time=int(t2-t1) + if continue_train!=True or self.time==0: + self.total_time=_time + else: + self.total_time+=_time + self.time=_time print() print('last loss:{0:.6f}'.format(self.train_loss)) if len(self.labels_shape)==2: @@ -469,31 +472,26 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.last_bias.clear() sess.run(tf.global_variables_initializer()) if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch - else: + if self.total_epoch==0: self.total_epoch=epoch-1 - self.epoch=self.total_epoch + self.epoch=epoch-1 + else: + self.total_epoch=self.total_epoch+epoch + self.epoch=epoch if continue_train!=True: self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.time=_time - else: - self.time+=_time - print('time:{0:.3f}s'.format(self.time)) + print('time:{0}s'.format(self.time)) return def end(self): with self.graph.as_default(): self.end_flag=True + self.continue_train=False self.last_weight=self.sess.run(self.weight) self.last_bias=self.sess.run(self.bias) self.weight.clear() self.bias.clear() - self.total_epoch=self.epoch self.sess.close() return @@ -756,11 +754,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.test_accuracy,output_file) pickle.dump(self.train_loss_list,output_file) pickle.dump(self.train_accuracy_list,output_file) - pickle.dump(self.epoch,output_file) pickle.dump(self.total_epoch,output_file) pickle.dump(self.time,output_file) - pickle.dump(self.cpu_gpu,output_file) - pickle.dump(self.use_cpu_gpu,output_file) + pickle.dump(self.total_time,output_file) + pickle.dump(self.processor,output_file) output_file.close() return @@ -797,25 +794,24 @@ def restore(self,model_path): self.test_accuracy=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) self.train_accuracy_list=pickle.load(input_file) - self.epoch=pickle.load(input_file) self.total_epoch=pickle.load(input_file) self.time=pickle.load(input_file) - self.cpu_gpu=pickle.load(input_file) - self.use_cpu_gpu=pickle.load(input_file) + self.total_time=pickle.load(input_file) + self.processor=pickle.load(input_file) self.flag=1 input_file.close() return - def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): + def classify(self,data,one_hot=False,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(self.processor)==str: + processor=self.processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] - with tf.device(use_cpu_gpu): + processor=self.processor[-1] + with tf.device(processor): data=tf.constant(data) output=self.forward_propagation(data,use_nn=True) config=tf.ConfigProto() @@ -852,15 +848,15 @@ def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): return output - def predicate(self,data,save_path=None,save_csv=None,cpu_gpu=None): + def predicate(self,data,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] - with tf.device(use_cpu_gpu): + _processor=processor[-1] + with tf.device(_processor): data=tf.constant(data) output=self.forward_propagation(data,use_nn=True) config=tf.ConfigProto() From 465cd71fbb227bbdc8da237544af1e9bd29667f2 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 28 Jul 2020 15:34:29 +0800 Subject: [PATCH 66/72] Update CNN.py --- Note/nn/CNN/CNN.py | 130 ++++++++++++++++++++++----------------------- 1 file changed, 63 insertions(+), 67 deletions(-) diff --git a/Note/nn/CNN/CNN.py b/Note/nn/CNN/CNN.py index baeebbea9..56ba464f0 100644 --- a/Note/nn/CNN/CNN.py +++ b/Note/nn/CNN/CNN.py @@ -71,11 +71,11 @@ def __init__(self,train_data=None,train_labels=None): self.activation_fc=[] self.flattened_len=None self.batch=None - self.epoch=None + self.epoch=0 self.l2=None self.dropout=None - self.optimizer=None self.lr=None + self.optimizer=None self.train_loss=None self.train_accuracy=None self.train_loss_list=[] @@ -87,9 +87,10 @@ def __init__(self,train_data=None,train_labels=None): self.flag=None self.end_flag=False self.test_flag=None - self.time=None - self.cpu_gpu='/gpu:0' - self.use_cpu_gpu='/gpu:0' + self.total_time=0 + self.time=0 + self.total_time=0 + self.processor='/gpu:0' def data_enhance(self,rotation_range=40,width_shift_range=0.2,height_shift_range=0.2, @@ -125,7 +126,6 @@ def avg_pool_f(self,data,i): def structure(self,conv=None,max_pool=None,avg_pool=None,fc=None,function=None,mean=0,stddev=0.07,dtype=tf.float32): with self.graph.as_default(): self.continue_train=False - self.total_epoch=0 self.flag=None self.end_flag=False self.test_flag=False @@ -142,8 +142,11 @@ def structure(self,conv=None,max_pool=None,avg_pool=None,fc=None,function=None,m self.function=function self.mean=mean self.stddev=stddev + self.epoch=0 self.dtype=dtype - self.time=None + self.total_epoch=0 + self.time=0 + self.total_time=0 with tf.name_scope('parameter_initialization'): for i in range(len(self.conv)): if i==0: @@ -392,8 +395,7 @@ def forward_propagation(self,data,dropout=None,use_nn=False): return output - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): - t1=time.time() + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=None,train_summary_path=None,model_path=None,one=True,continue_train=False,processor=None): with self.graph.as_default(): self.batch=batch self.l2=l2 @@ -407,18 +409,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.train_loss_list.clear() self.train_accuracy_list.clear() if self.continue_train==False and continue_train==True: - if self.end_flag==False and self.flag==0: - self.epoch=None self.train_loss_list.clear() self.train_accuracy_list.clear() self.continue_train=True - if cpu_gpu!=None: - self.cpu_gpu=cpu_gpu - if type(self.cpu_gpu)==list and len(self.cpu_gpu)!=3: - train_cpu_gpu='/gpu:0' - if type(self.cpu_gpu)==str: - train_cpu_gpu=self.cpu_gpu - with tf.device(train_cpu_gpu): + if processor!=None: + self.processor=processor + if type(self.processor)==list and len(self.processor)!=3: + train_processor='/gpu:0' + if type(self.processor)==str: + train_processor=self.processor + with tf.device(train_processor): if continue_train==True and self.end_flag==True: self.end_flag=False self.weight_conv=[x for x in range(len(self.conv))] @@ -431,6 +431,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.last_weight_fc.clear() self.last_bias_fc.clear() if continue_train==True and self.flag==1: + self.flag=0 self.weight_conv=[x for x in range(len(self.conv))] self.bias_conv=[x for x in range(len(self.conv))] for i in range(len(self.conv)): @@ -440,31 +441,30 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.last_bias_conv.clear() self.last_weight_fc.clear() self.last_bias_fc.clear() - self.flag=0 # ---------------forward propagation--------------- train_output=self.forward_propagation(self.data,self.dropout) # ---------------------------------------- with tf.name_scope('train_loss'): if self.labels_shape[1]==1: - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels)) else: train_loss=tf.nn.sigmoid_cross_entropy_with_logits(logits=train_output,labels=self.labels) - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) else: - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=train_output,labels=self.labels) - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.weight_conv])+sum([tf.reduce_sum(x**2) for x in self.weight_fc]))) if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.RMSPropOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + opt=tf.train.MomentumOptimizer(learning_rate=lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(train_loss) with tf.name_scope('train_accuracy'): equal=tf.equal(tf.argmax(train_output,1),tf.argmax(self.labels,1)) train_accuracy=tf.reduce_mean(tf.cast(equal,tf.float32)) @@ -482,9 +482,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.sess=sess if self.total_epoch==0: epoch=epoch+1 + t1=time.time() for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) + if batch!=None: + batches=int((self.shape0-self.shape0%batch)/batch) total_loss=0 total_acc=0 random=np.arange(self.shape0) @@ -492,8 +493,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N train_data=self.train_data[random] train_labels=self.train_labels[random] for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch + index1=j*batch + index2=(j+1)*batch train_data_batch=train_data[index1:index2] train_labels_batch=train_labels[index1:index2] feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -504,10 +505,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N total_loss+=batch_loss batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc - if self.shape0%self.batch!=0: + if self.shape0%batch!=0: batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) + index1=batches*batch + index2=batch-(self.shape0-batches*batch) train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -552,12 +553,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N temp_epoch=1 if i%temp_epoch==0: if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) if model_path!=None and i%epoch*2==0: @@ -565,6 +561,13 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N if train_summary_path!=None: train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) + t2=time.time() + _time=int(t2-t1) + if continue_train!=True or self.time==0: + self.total_time=_time + else: + self.total_time+=_time + self.time=_time print() print('last loss:{0:.6f}'.format(self.train_loss)) print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) @@ -591,26 +594,22 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,dropout=N self.last_bias_fc.clear() sess.run(tf.global_variables_initializer()) if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch - else: + if self.total_epoch==0: self.total_epoch=epoch-1 - self.epoch=self.total_epoch + self.epoch=epoch-1 + else: + self.total_epoch=self.total_epoch+epoch + self.epoch=epoch if continue_train!=True: self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.time=_time - else: - self.time+=_time - print('time:{0:.3f}s'.format(self.time)) + print('time:{0}s'.format(self.time)) return def end(self): with self.graph.as_default(): self.end_flag=True + self.continue_train=False self.last_weight_conv=self.sess.run(self.weight_conv) self.last_bias_conv=self.sess.run(self.bias_conv) self.last_weight_fc=self.sess.run(self.weight_fc) @@ -619,7 +618,6 @@ def end(self): self.bias_conv.clear() self.weight_fc.clear() self.bias_fc.clear() - self.total_epoch=self.epoch self.sess.close() return @@ -910,10 +908,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.function,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) pickle.dump(self.dropout,output_file) + pickle.dump(self.optimizer,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -922,11 +920,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.test_accuracy,output_file) pickle.dump(self.train_loss_list,output_file) pickle.dump(self.train_accuracy_list,output_file) - pickle.dump(self.epoch,output_file) pickle.dump(self.total_epoch,output_file) pickle.dump(self.time,output_file) - pickle.dump(self.cpu_gpu,output_file) - pickle.dump(self.use_cpu_gpu,output_file) + pickle.dump(self.total_time,output_file) + pickle.dump(self.processor,output_file) output_file.close() return @@ -953,10 +950,10 @@ def restore(self,model_path): self.function=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) self.dropout=pickle.load(input_file) + self.optimizer=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) @@ -965,25 +962,24 @@ def restore(self,model_path): self.test_accuracy=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) self.train_accuracy_list=pickle.load(input_file) - self.epoch=pickle.load(input_file) self.total_epoch=pickle.load(input_file) self.time=pickle.load(input_file) - self.cpu_gpu=pickle.load(input_file) - self.use_cpu_gpu=pickle.load(input_file) + self.total_time=pickle.load(input_file) + self.processor=pickle.load(input_file) self.flag=1 input_file.close() return - def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): + def classify(self,data,one_hot=False,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] - with tf.device(use_cpu_gpu): + _processor=processor[-1] + with tf.device(_processor): data=tf.constant(data) output=self.forward_propagation(data,use_nn=True) config=tf.ConfigProto() From 9895da0fd0f820f61864e1692f390b4a2fc03ed7 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 28 Jul 2020 15:35:12 +0800 Subject: [PATCH 67/72] Update GRU.py --- Note/nn/RNN/GRU.py | 167 ++++++++++++++++++++++----------------------- 1 file changed, 81 insertions(+), 86 deletions(-) diff --git a/Note/nn/RNN/GRU.py b/Note/nn/RNN/GRU.py index cc8f23723..a38a439c4 100644 --- a/Note/nn/RNN/GRU.py +++ b/Note/nn/RNN/GRU.py @@ -88,10 +88,10 @@ def __init__(self,train_data=None,train_labels=None): self.last_cltm_bias=None self.last_bias_o=None self.batch=None - self.epoch=None + self.epoch=0 self.dropout=None - self.optimizer=None self.lr=None + self.optimizer=None self.train_loss=None self.train_accuracy=None self.train_loss_list=[] @@ -102,9 +102,10 @@ def __init__(self,train_data=None,train_labels=None): self.flag=None self.end_flag=False self.test_flag=None - self.time=None - self.cpu_gpu='/gpu:0' - self.use_cpu_gpu='/gpu:0' + self.total_epoch=0 + self.time=0 + self.total_time=0 + self.processor='/gpu:0' def embedding(self,d,mean=0.07,stddev=0.07,dtype=tf.float32): @@ -124,7 +125,6 @@ def bias_init(self,shape,mean,stddev,name): def structure(self,hidden,pattern,layers=None,predicate=False,mean=0,stddev=0.07,dtype=tf.float32): with self.graph.as_default(): self.continue_train=False - self.total_epoch=0 self.flag=None self.end_flag=False self.test_flag=False @@ -134,8 +134,11 @@ def structure(self,hidden,pattern,layers=None,predicate=False,mean=0,stddev=0.07 self.pattern=pattern self.layers=layers self.predicate=predicate + self.epoch=0 self.dtype=dtype - self.time=None + self.total_epoch=0 + self.time=0 + self.total_time=0 with tf.name_scope('parameter_initialization'): if self.layers!=None: self.ug_weight_x=[] @@ -427,8 +430,7 @@ def forward_propagation(self,data,labels=None,use_nn=False): return - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): - t1=time.time() + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,processor=None): with self.graph.as_default(): self.C.clear() self.h.clear() @@ -443,20 +445,18 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.train_loss_list.clear() self.train_accuracy_list.clear() if self.continue_train==False and continue_train==True: - if self.end_flag==False and self.flag==0: - self.epoch=None self.train_loss_list.clear() self.train_accuracy_list.clear() self.continue_train=True - if cpu_gpu!=None: - self.cpu_gpu=cpu_gpu - if type(self.cpu_gpu)==list and (len(self.cpu_gpu)!=self.layers+1 or len(self.cpu_gpu)==1): - self.cpu_gpu.append('/gpu:0') - if type(self.cpu_gpu)==str: - train_cpu_gpu=self.cpu_gpu + if processor!=None: + self.processor=processor + if type(self.processor)==list and (len(self.processor)!=self.layers+1 or len(self.processor)==1): + self.processor.append('/gpu:0') + if type(self.processor)==str: + train_processor=self.processor else: - train_cpu_gpu=self.cpu_gpu[-1] - with tf.device(train_cpu_gpu): + train_processor=self.processor[-1] + with tf.device(train_processor): if continue_train==True and self.end_flag==True: self.end_flag=False self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') @@ -509,6 +509,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_weight_o=None self.last_bias_o=None if continue_train==True and self.flag==1: + self.flag=0 self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') self.embedding_b=tf.Variable(self.last_embedding_b,name='embedding_b') if self.layers!=None: @@ -558,56 +559,55 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_cltm_bias=None self.last_weight_o=None self.last_bias_o=None - self.flag=0 # ---------------forward propagation--------------- self.forward_propagation(self.data) # ---------------------------------------- with tf.name_scope('train_loss'): if self.pattern=='1n': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='n1' or self.predicate==True: if self.pattern=='n1': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) else: - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1)) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='nn': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.rg_weight_x**2)+tf.reduce_sum(self.rg_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.rg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.RMSPropOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + opt=tf.train.MomentumOptimizer(learning_rate=lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(train_loss) with tf.name_scope('train_accuracy'): if self.pattern=='1n': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) @@ -622,9 +622,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum if train_summary_path!=None: train_loss_scalar=tf.summary.scalar('train_loss',train_loss) train_merging=tf.summary.merge([train_loss_scalar]) - if self.acc==True: - train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) - train_merging=tf.summary.merge([train_accuracy_scalar]) + train_accuracy_scalar=tf.summary.scalar('train_accuracy',train_accuracy) + train_merging=tf.summary.merge([train_accuracy_scalar]) train_writer=tf.summary.FileWriter(train_summary_path) config=tf.ConfigProto() config.gpu_options.allow_growth=True @@ -634,9 +633,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.sess=sess if self.total_epoch==0: epoch=epoch+1 + t1=time.time() for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) + if batch!=None: + batches=int((self.shape0-self.shape0%batch)/batch) total_loss=0 total_acc=0 random=np.arange(self.shape0) @@ -644,8 +644,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum train_data=self.train_data[random] train_labels=self.train_labels[random] for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch + index1=j*batch + index2=(j+1)*batch train_data_batch=train_data[index1:index2] train_labels_batch=train_labels[index1:index2] feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -656,10 +656,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum total_loss+=batch_loss batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc - if self.shape0%self.batch!=0: + if self.shape0%batch!=0: batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) + index1=batches*batch + index2=batch-(self.shape0-batches*batch) train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -704,12 +704,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum temp_epoch=1 if i%temp_epoch==0: if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) if model_path!=None and i%epoch*2==0: @@ -717,6 +712,13 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum if train_summary_path!=None: train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) + t2=time.time() + _time=int(t2-t1) + if continue_train!=True or self.time==0: + self.total_time=_time + else: + self.total_time+=_time + self.time=_time print() print('last loss:{0:.6f}'.format(self.train_loss)) if self.predicate==False: @@ -788,26 +790,22 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_bias_o=None sess.run(tf.global_variables_initializer()) if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch - else: + if self.total_epoch==0: self.total_epoch=epoch-1 - self.epoch=self.total_epoch + self.epoch=epoch-1 + else: + self.total_epoch=self.total_epoch+epoch + self.epoch=epoch if continue_train!=True: self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.time=_time - else: - self.time+=_time - print('time:{0:.3f}s'.format(self.time)) + print('time:{0}s'.format(self.time)) return def end(self): with self.graph.as_default(): self.end_flag=True + self.continue_train=False self.last_embedding_w=self.sess.run(self.embedding_w) self.last_embedding_b=self.sess.run(self.embedding_b) self.last_ug_weight_x=self.sess.run(self.ug_weight_x) @@ -832,7 +830,6 @@ def end(self): self.cltm_bias=None self.weight_o=None self.bias_o=None - self.total_epoch=self.epoch self.sess.close() return @@ -1073,9 +1070,9 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.predicate,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) + pickle.dump(self.optimizer,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1084,11 +1081,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.test_accuracy,output_file) pickle.dump(self.train_loss_list,output_file) pickle.dump(self.train_accuracy_list,output_file) - pickle.dump(self.epoch,output_file) pickle.dump(self.total_epoch,output_file) pickle.dump(self.time,output_file) - pickle.dump(self.cpu_gpu,output_file) - pickle.dump(self.use_cpu_gpu,output_file) + pickle.dump(self.total_time,output_file) + pickle.dump(self.processor,output_file) output_file.close() return @@ -1138,9 +1134,9 @@ def restore(self,model_path): self.predicate=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) + self.optimizer=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) @@ -1149,27 +1145,26 @@ def restore(self,model_path): self.test_accuracy=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) self.train_accuracy_list=pickle.load(input_file) - self.epoch=pickle.load(input_file) self.total_epoch=pickle.load(input_file) self.time=pickle.load(input_file) - self.cpu_gpu=pickle.load(input_file) - self.use_cpu_gpu=pickle.load(input_file) + self.total_time=pickle.load(input_file) + self.processor=pickle.load(input_file) self.flag=1 input_file.close() return - def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): + def classify(self,data,one_hot=False,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] + _processor=processor[-1] self.C.clear() self.h.clear() - with tf.device(use_cpu_gpu): + with tf.device(_processor): data=tf.constant(data) self.forward_propagation(data,use_nn=True) config=tf.ConfigProto() @@ -1218,17 +1213,17 @@ def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): return output - def predicate(self,data,save_path=None,save_csv=None,cpu_gpu=None): + def predicate(self,data,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] + _processor=processor[-1] self.C.clear() self.h.clear() - with tf.device(use_cpu_gpu): + with tf.device(_processor): data=tf.constant(data) self.forward_propagation(data,use_nn=True) config=tf.ConfigProto() From 615295820e29c51cf81dc85bcc8d44ba5cdf3446 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 28 Jul 2020 15:35:25 +0800 Subject: [PATCH 68/72] Update LSTM.py --- Note/nn/RNN/LSTM.py | 160 +++++++++++++++++++++----------------------- 1 file changed, 78 insertions(+), 82 deletions(-) diff --git a/Note/nn/RNN/LSTM.py b/Note/nn/RNN/LSTM.py index b9e941f93..35904da8f 100644 --- a/Note/nn/RNN/LSTM.py +++ b/Note/nn/RNN/LSTM.py @@ -97,8 +97,8 @@ def __init__(self,train_data=None,train_labels=None): self.batch=None self.epoch=None self.l2=None - self.optimizer=None self.lr=None + self.optimizer=None self.train_loss=None self.train_accuracy=None self.train_loss_list=[] @@ -109,9 +109,10 @@ def __init__(self,train_data=None,train_labels=None): self.flag=None self.end_flag=False self.test_flag=None - self.time=None - self.cpu_gpu='/gpu:0' - self.use_cpu_gpu='/gpu:0' + self.total_epoch=0 + self.time=0 + self.total_time=0 + self.processor='/gpu:0' def embedding(self,d,mean=0.07,stddev=0.07,dtype=tf.float32): @@ -131,7 +132,6 @@ def bias_init(self,shape,mean,stddev,name): def structure(self,hidden,pattern,layers=None,predicate=False,mean=0,stddev=0.07,dtype=tf.float32): with self.graph.as_default(): self.continue_train=False - self.total_epoch=0 self.flag=None self.end_flag=False self.test_flag=False @@ -141,8 +141,11 @@ def structure(self,hidden,pattern,layers=None,predicate=False,mean=0,stddev=0.07 self.pattern=pattern self.layers=layers self.predicate=predicate + self.epoch=0 self.dtype=dtype - self.time=None + self.total_epoch=0 + self.time=0 + self.total_time=0 with tf.name_scope('parameter_initialization'): if self.layers!=None: self.fg_weight_x=[] @@ -498,8 +501,7 @@ def forward_propagation(self,data,labels=None,use_nn=False): return - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): - t1=time.time() + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,processor=None): with self.graph.as_default(): self.C.clear() self.h.clear() @@ -514,20 +516,18 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.train_loss_list.clear() self.train_accuracy_list.clear() if self.continue_train==False and continue_train==True: - if self.end_flag==False and self.flag==0: - self.epoch=None self.train_loss_list.clear() self.train_accuracy_list.clear() self.continue_train=True - if cpu_gpu!=None: - self.cpu_gpu=cpu_gpu - if type(self.cpu_gpu)==list and (len(self.cpu_gpu)!=self.layers+1 or len(self.cpu_gpu)==1): - self.cpu_gpu.append('/gpu:0') - if type(self.cpu_gpu)==str: - train_cpu_gpu=self.cpu_gpu + if processor!=None: + self.processor=processor + if type(self.processor)==list and (len(self.processor)!=self.layers+1 or len(self.processor)==1): + self.processor.append('/gpu:0') + if type(self.processor)==str: + train_processor=self.processor else: - train_cpu_gpu=self.cpu_gpu[-1] - with tf.device(train_cpu_gpu): + train_processor=self.processor[-1] + with tf.device(train_processor): if continue_train==True and self.end_flag==True: self.end_flag=False self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') @@ -592,6 +592,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_weight_o=None self.last_bias_o=None if continue_train==True and self.flag==1: + self.flag=0 self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') self.embedding_b=tf.Variable(self.last_embedding_b,name='embedding_b') if self.layers!=None: @@ -653,56 +654,55 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_cltm_bias=None self.last_weight_o=None self.last_bias_o=None - self.flag=0 # ---------------forward propagation--------------- self.forward_propagation(self.data) # ---------------------------------------- with tf.name_scope('train_loss'): if self.pattern=='1n': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='n1' or self.predicate==True: if self.pattern=='n1': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) else: - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1)) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='nn': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.fg_weight_x**2)+tf.reduce_sum(self.fg_weight_h**2)+tf.reduce_sum(self.ig_weight_x**2)+tf.reduce_sum(self.ig_weight_h**2)+tf.reduce_sum(self.og_weight_x**2)+tf.reduce_sum(self.og_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.fg_weight_x])+sum([tf.reduce_sum(x**2) for x in self.fg_weight_h])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ig_weight_h])+sum([tf.reduce_sum(x**2) for x in self.og_weight_x])+sum([tf.reduce_sum(x**2) for x in self.og_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.RMSPropOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + opt=tf.train.MomentumOptimizer(learning_rate=lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(train_loss) with tf.name_scope('train_accuracy'): if self.pattern=='1n': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) @@ -728,9 +728,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.sess=sess if self.total_epoch==0: epoch=epoch+1 + t1=time.time() for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) + if batch!=None: + batches=int((self.shape0-self.shape0%batch)/batch) total_loss=0 total_acc=0 random=np.arange(self.shape0) @@ -738,8 +739,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum train_data=self.train_data[random] train_labels=self.train_labels[random] for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch + index1=j*batch + index2=(j+1)*batch train_data_batch=train_data[index1:index2] train_labels_batch=train_labels[index1:index2] feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -750,10 +751,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum total_loss+=batch_loss batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc - if self.shape0%self.batch!=0: + if self.shape0%batch!=0: batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) + index1=batches*batch + index2=batch-(self.shape0-batches*batch) train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -798,12 +799,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum temp_epoch=1 if i%temp_epoch==0: if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) if model_path!=None and i%epoch*2==0: @@ -811,6 +807,13 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum if train_summary_path!=None: train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) + t2=time.time() + _time=int(t2-t1) + if continue_train!=True or self.time==0: + self.total_time=_time + else: + self.total_time+=_time + self.time=_time print() print('last loss:{0:.6f}'.format(self.train_loss)) if self.predicate==False: @@ -897,26 +900,22 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_bias_o=None sess.run(tf.global_variables_initializer()) if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch - else: + if self.total_epoch==0: self.total_epoch=epoch-1 - self.epoch=self.total_epoch + self.epoch=epoch-1 + else: + self.total_epoch=self.total_epoch+epoch + self.epoch=epoch if continue_train!=True: self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.time=_time - else: - self.time+=_time - print('time:{0:.3f}s'.format(self.time)) + print('time:{0}s'.format(self.time)) return def end(self): with self.graph.as_default(): self.end_flag=True + self.continue_train=False self.last_embedding_w=self.sess.run(self.embedding_w) self.last_embedding_b=self.sess.run(self.embedding_b) self.last_fg_weight_x=self.sess.run(self.fg_weight_x) @@ -947,7 +946,6 @@ def end(self): self.cltm_bias=None self.weight_o=None self.bias_o=None - self.total_epoch=self.epoch self.sess.close() return @@ -1196,9 +1194,9 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.predicate,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) + pickle.dump(self.optimizer,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1207,11 +1205,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.test_accuracy,output_file) pickle.dump(self.train_loss_list,output_file) pickle.dump(self.train_accuracy_list,output_file) - pickle.dump(self.epoch,output_file) pickle.dump(self.total_epoch,output_file) pickle.dump(self.time,output_file) - pickle.dump(self.cpu_gpu,output_file) - pickle.dump(self.use_cpu_gpu,output_file) + pickle.dump(self.total_time,output_file) + pickle.dump(self.processor,output_file) output_file.close() return @@ -1267,9 +1264,9 @@ def restore(self,model_path): self.predicate=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) + self.optimizer=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) @@ -1278,27 +1275,26 @@ def restore(self,model_path): self.test_accuracy=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) self.train_accuracy_list=pickle.load(input_file) - self.epoch=pickle.load(input_file) self.total_epoch=pickle.load(input_file) self.time=pickle.load(input_file) - self.cpu_gpu=pickle.load(input_file) - self.use_cpu_gpu=pickle.load(input_file) + self.total_time=pickle.load(input_file) + self.processor=pickle.load(input_file) self.flag=1 input_file.close() return - def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): + def classify(self,data,one_hot=False,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] + _processor=processor[-1] self.C.clear() self.h.clear() - with tf.device(use_cpu_gpu): + with tf.device(_processor): data=tf.constant(data) self.forward_propagation(data,use_nn=True) config=tf.ConfigProto() @@ -1347,17 +1343,17 @@ def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): return output - def predicate(self,data,save_path=None,save_csv=None,cpu_gpu=None): + def predicate(self,data,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] + _processor=processor[-1] self.C.clear() self.h.clear() - with tf.device(use_cpu_gpu): + with tf.device(_processor): data=tf.constant(data) self.forward_propagation(data,use_nn=True) config=tf.ConfigProto() From ec3cf6f6552b723bab11dc0739258634ffa29f90 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 28 Jul 2020 15:36:07 +0800 Subject: [PATCH 69/72] Update M_reluGRU.py --- Note/nn/RNN/M_reluGRU.py | 160 +++++++++++++++++++-------------------- 1 file changed, 78 insertions(+), 82 deletions(-) diff --git a/Note/nn/RNN/M_reluGRU.py b/Note/nn/RNN/M_reluGRU.py index 5e3015ccc..51d019d99 100644 --- a/Note/nn/RNN/M_reluGRU.py +++ b/Note/nn/RNN/M_reluGRU.py @@ -82,10 +82,10 @@ def __init__(self,train_data=None,train_labels=None): self.last_cltm_bias=None self.last_bias_o=None self.batch=None - self.epoch=None + self.epoch=0 self.dropout=None - self.optimizer=None self.lr=None + self.optimizer=None self.train_loss=None self.train_accuracy=None self.train_loss_list=[] @@ -96,9 +96,10 @@ def __init__(self,train_data=None,train_labels=None): self.flag=None self.end_flag=False self.test_flag=None - self.time=None - self.cpu_gpu='/gpu:0' - self.use_cpu_gpu='/gpu:0' + self.total_epoch=0 + self.time=0 + self.total_time=0 + self.processor='/gpu:0' def embedding(self,d,mean=0.07,stddev=0.07,dtype=tf.float32): @@ -118,7 +119,6 @@ def bias_init(self,shape,mean,stddev,name): def structure(self,hidden,pattern,layers=None,predicate=False,mean=0,stddev=0.07,dtype=tf.float32): with self.graph.as_default(): self.continue_train=False - self.total_epoch=0 self.flag=None self.end_flag=False self.test_flag=False @@ -128,8 +128,11 @@ def structure(self,hidden,pattern,layers=None,predicate=False,mean=0,stddev=0.07 self.pattern=pattern self.layers=layers self.predicate=predicate + self.epoch=0 self.dtype=dtype - self.time=None + self.total_epoch=0 + self.time=0 + self.total_time=0 with tf.name_scope('parameter_initialization'): if self.layers!=None: self.ug_weight_x=[] @@ -413,8 +416,7 @@ def forward_propagation(self,data,labels=None,use_nn=False): return - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): - t1=time.time() + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,processor=None): with self.graph.as_default(): self.C.clear() self.h.clear() @@ -429,20 +431,18 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.train_loss_list.clear() self.train_accuracy_list.clear() if self.continue_train==False and continue_train==True: - if self.end_flag==False and self.flag==0: - self.epoch=None self.train_loss_list.clear() self.train_accuracy_list.clear() self.continue_train=True - if cpu_gpu!=None: - self.cpu_gpu=cpu_gpu - if type(self.cpu_gpu)==list and (len(self.cpu_gpu)!=self.layers+1 or len(self.cpu_gpu)==1): - self.cpu_gpu.append('/gpu:0') - if type(self.cpu_gpu)==str: - train_cpu_gpu=self.cpu_gpu + if processor!=None: + self.processor=processor + if type(self.processor)==list and (len(self.processor)!=self.layers+1 or len(self.processor)==1): + self.processor.append('/gpu:0') + if type(self.processor)==str: + train_processor=self.processor else: - train_cpu_gpu=self.cpu_gpu[-1] - with tf.device(train_cpu_gpu): + train_processor=self.processor[-1] + with tf.device(train_processor): if continue_train==True and self.end_flag==True: self.end_flag=False self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') @@ -483,6 +483,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_weight_o=None self.last_bias_o=None if continue_train==True and self.flag==1: + self.flag=0 self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') self.embedding_b=tf.Variable(self.last_embedding_b,name='embedding_b') if self.layers!=None: @@ -520,56 +521,55 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_cltm_bias=None self.last_weight_o=None self.last_bias_o=None - self.flag=0 # ---------------forward propagation--------------- self.forward_propagation(self.data) # ---------------------------------------- with tf.name_scope('train_loss'): if self.pattern=='1n': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='n1' or self.predicate==True: if self.pattern=='n1': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[-1],labels=self.labels) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) else: - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(self.output[-1]-tf.expand_dims(self.labels,axis=1)) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) elif self.pattern=='nn': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output,labels=self.labels,axis=2),axis=1) if self.layers==None: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.ug_weight_x**2)+tf.reduce_sum(self.ug_weight_h**2)+tf.reduce_sum(self.cltm_weight_x**2)+tf.reduce_sum(self.cltm_weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - train_loss=tf.reduce_mean(train_loss+self.l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) + train_loss=tf.reduce_mean(train_loss+l2/2*(sum([tf.reduce_sum(x**2) for x in self.ug_weight_x])+sum([tf.reduce_sum(x**2) for x in self.ug_weight_h])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_x])+sum([tf.reduce_sum(x**2) for x in self.cltm_weight_h])+sum([tf.reduce_sum(x**2) for x in self.weight_o]))) if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.RMSPropOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + opt=tf.train.MomentumOptimizer(learning_rate=lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(train_loss) with tf.name_scope('train_accuracy'): if self.pattern=='1n': train_accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output,2),tf.argmax(self.labels,2)),tf.float32)) @@ -595,9 +595,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.sess=sess if self.total_epoch==0: epoch=epoch+1 + t1=time.time() for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) + if batch!=None: + batches=int((self.shape0-self.shape0%batch)/batch) total_loss=0 total_acc=0 random=np.arange(self.shape0) @@ -605,8 +606,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum train_data=self.train_data[random] train_labels=self.train_labels[random] for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch + index1=j*batch + index2=(j+1)*batch train_data_batch=train_data[index1:index2] train_labels_batch=train_labels[index1:index2] feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -617,10 +618,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum total_loss+=batch_loss batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc - if self.shape0%self.batch!=0: + if self.shape0%batch!=0: batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) + index1=batches*batch + index2=batch-(self.shape0-batches*batch) train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -665,12 +666,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum temp_epoch=1 if i%temp_epoch==0: if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) if model_path!=None and i%epoch*2==0: @@ -678,6 +674,13 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum if train_summary_path!=None: train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) + t2=time.time() + _time=int(t2-t1) + if continue_train!=True or self.time==0: + self.total_time=_time + else: + self.total_time+=_time + self.time=_time print() print('last loss:{0:.6f}'.format(self.train_loss)) print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) @@ -731,20 +734,15 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_bias_o=None sess.run(tf.global_variables_initializer()) if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch - else: + if self.total_epoch==0: self.total_epoch=epoch-1 - self.epoch=self.total_epoch + self.epoch=epoch-1 + else: + self.total_epoch=self.total_epoch+epoch + self.epoch=epoch if continue_train!=True: self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.time=_time - else: - self.time+=_time - print('time:{0:.3f}s'.format(self.time)) + print('time:{0}s'.format(self.time)) return @@ -1001,9 +999,9 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.predicate,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) + pickle.dump(self.optimizer,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_accuracy,output_file) pickle.dump(self.test_flag,output_file) @@ -1012,11 +1010,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.test_accuracy,output_file) pickle.dump(self.train_loss_list,output_file) pickle.dump(self.train_accuracy_list,output_file) - pickle.dump(self.epoch,output_file) pickle.dump(self.total_epoch,output_file) pickle.dump(self.time,output_file) - pickle.dump(self.cpu_gpu,output_file) - pickle.dump(self.use_cpu_gpu,output_file) + pickle.dump(self.total_time,output_file) + pickle.dump(self.processor,output_file) output_file.close() return @@ -1060,9 +1057,9 @@ def restore(self,model_path): self.predicate=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) + self.optimizer=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) @@ -1071,27 +1068,26 @@ def restore(self,model_path): self.test_accuracy=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) self.train_accuracy_list=pickle.load(input_file) - self.epoch=pickle.load(input_file) self.total_epoch=pickle.load(input_file) self.time=pickle.load(input_file) - self.cpu_gpu=pickle.load(input_file) - self.use_cpu_gpu=pickle.load(input_file) + self.total_time=pickle.load(input_file) + self.processor=pickle.load(input_file) self.flag=1 input_file.close() return - def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): + def classify(self,data,one_hot=False,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] + _processor=processor[-1] self.C.clear() self.h.clear() - with tf.device(use_cpu_gpu): + with tf.device(_processor): if self.normalize==True: if self.maximun==True: data/=np.max(data,axis=0) @@ -1146,17 +1142,17 @@ def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): return output - def predicate(self,data,save_path=None,save_csv=None,cpu_gpu=None): + def predicate(self,data,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] + _processor=processor[-1] self.C.clear() self.h.clear() - with tf.device(use_cpu_gpu): + with tf.device(_processor): if self.normalize==True: if self.maximun==True: data/=np.max(data,axis=0) From 7f1b2dfc21fd191dcccfbe919df1b2b6db746841 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 28 Jul 2020 15:36:23 +0800 Subject: [PATCH 70/72] Update RNN.py --- Note/nn/RNN/RNN.py | 147 ++++++++++++++++++++++----------------------- 1 file changed, 72 insertions(+), 75 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index c1712935c..c909a6415 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -77,10 +77,10 @@ def __init__(self,train_data=None,train_labels=None): self.last_bias_h=None self.last_bias_o=None self.batch=None - self.epoch=None + self.epoch=0 self.l2=None - self.optimizer=None self.lr=None + self.optimizer=None self.train_loss=None self.train_accuracy=None self.train_loss_list=[] @@ -91,7 +91,9 @@ def __init__(self,train_data=None,train_labels=None): self.flag=None self.end_flag=False self.test_flag=None - self.time=None + self.total_epoch=0 + self.time=0 + self.total_time=0 self.cpu_gpu='/gpu:0' self.use_cpu_gpu='/gpu:0' @@ -113,7 +115,6 @@ def bias_init(self,shape,mean,stddev,name): def structure(self,hidden,pattern,ed=None,predicate=False,mean=0,stddev=0.07,dtype=tf.float32): with self.graph.as_default(): self.continue_train=False - self.total_epoch=0 self.flag=None self.end_flag=False self.test_flag=False @@ -123,8 +124,11 @@ def structure(self,hidden,pattern,ed=None,predicate=False,mean=0,stddev=0.07,dty self.hidden=hidden self.pattern=pattern self.predicate=predicate + self.epoch=0 self.dtype=dtype - self.time=None + self.total_epoch=0 + self.time=0 + self.total_time=0 with tf.name_scope('parameter_initialization'): self.weight_x=self.weight_init([self.data_shape[2],self.hidden],mean=mean,stddev=stddev,name='weight_x') self.bias_x=self.bias_init([self.hidden],mean=mean,stddev=stddev,name='bias_x') @@ -190,8 +194,7 @@ def forward_propagation(self,data,use_nn=False): return - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): - t1=time.time() + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_summary_path=None,model_path=None,one=True,continue_train=False,processor=None): with self.graph.as_default(): self.h.clear() self.h.append(tf.zeros([1,self.hidden],name='h0')) @@ -206,18 +209,16 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.train_loss_list.clear() self.train_accuracy_list.clear() if self.continue_train==False and continue_train==True: - if self.end_flag==False and self.flag==0: - self.epoch=None self.train_loss_list.clear() self.train_accuracy_list.clear() self.continue_train=True - if cpu_gpu!=None: - self.cpu_gpu=cpu_gpu - if type(self.cpu_gpu)==str: - train_cpu_gpu=self.cpu_gpu + if processor!=None: + self.processor=processor + if type(self.processor)==str: + train_processor=self.processor else: - train_cpu_gpu=self.cpu_gpu[1] - with tf.device(train_cpu_gpu): + train_processor=self.processor[1] + with tf.device(train_processor): if continue_train==True and self.end_flag==True: self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') self.embedding_b=tf.Variable(self.last_embedding_b,name='embedding_b') @@ -236,6 +237,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_bias_h=None self.last_bias_o=None if continue_train==True and self.flag==1: + self.flag=0 self.embedding_w=tf.Variable(self.last_embedding_w,name='embedding_w') self.embedding_b=tf.Variable(self.last_embedding_b,name='embedding_b') self.weight_x=tf.Variable(self.last_weight_x,name='weight_x') @@ -252,44 +254,43 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_bias_x=None self.last_bias_h=None self.last_bias_o=None - self.flag=0 # ---------------forward propagation--------------- self.forward_propagation(self.data) # ---------------------------------------- with tf.name_scope('train_loss'): if self.pattern=='1n': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o,labels=self.labels,axis=2),axis=1) - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) elif self.pattern=='n1' or self.predicate==True: if self.pattern=='n1': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o[-1],labels=self.labels)) else: train_loss=tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o[-1],labels=self.labels) - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) else: - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.square(self.o[-1]-tf.expand_dims(self.labels,axis=1))) else: train_loss=tf.square(self.o[-1]-tf.expand_dims(self.labels,axis=1)) - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) elif self.pattern=='nn': - if self.l2==None: + if l2==None: train_loss=tf.reduce_mean(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o,labels=self.labels,axis=2),axis=1)) else: train_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.o,labels=self.labels,axis=2),axis=1) - train_loss=tf.reduce_mean(train_loss+self.l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) + train_loss=tf.reduce_mean(train_loss+l2/2*(tf.reduce_sum(self.weight_x**2)+tf.reduce_sum(self.weight_h**2)+tf.reduce_sum(self.weight_o**2))) if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.RMSPropOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + opt=tf.train.MomentumOptimizer(learning_rate=lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(train_loss) train_loss_scalar=tf.summary.scalar('train_loss',train_loss) with tf.name_scope('train_accuracy'): if self.pattern=='1n': @@ -317,9 +318,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.sess=sess if self.total_epoch==0: epoch=epoch+1 + t1=time.time() for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) + if batch!=None: + batches=int((self.shape0-self.shape0%batch)/batch) total_loss=0 total_acc=0 random=np.arange(self.shape0) @@ -327,8 +329,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum train_data=self.train_data[random] train_labels=self.train_labels[random] for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch + index1=j*batch + index2=(j+1)*batch train_data_batch=train_data[index1:index2] train_labels_batch=train_labels[index1:index2] feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -339,10 +341,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum total_loss+=batch_loss batch_acc=sess.run(train_accuracy,feed_dict=feed_dict) total_acc+=batch_acc - if self.shape0%self.batch!=0: + if self.shape0%batch!=0: batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) + index1=batches*batch + index2=batch-(self.shape0-batches*batch) train_data_batch=np.concatenate([train_data[index1:],train_data[:index2]]) train_labels_batch=np.concatenate([train_labels[index1:],train_labels[:index2]]) feed_dict={self.data:train_data_batch,self.labels:train_labels_batch} @@ -387,12 +389,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum temp_epoch=1 if i%temp_epoch==0: if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) if model_path!=None and i%epoch*2==0: @@ -400,6 +397,13 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum if train_summary_path!=None: train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) + t2=time.time() + _time=int(t2-t1) + if continue_train!=True or self.time==0: + self.total_time=_time + else: + self.total_time+=_time + self.time=_time print() print('last loss:{0:.6f}'.format(self.train_loss)) print('accuracy:{0:.3f}%'.format(self.train_accuracy*100)) @@ -430,26 +434,22 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,l2=None,train_sum self.last_bias_o=None sess.run(tf.global_variables_initializer()) if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch - else: + if self.total_epoch==0: self.total_epoch=epoch-1 - self.epoch=self.total_epoch + self.epoch=epoch-1 + else: + self.total_epoch=self.total_epoch+epoch + self.epoch=epoch if continue_train!=True: self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.time=_time - else: - self.time+=_time - print('time:{0:.3f}s'.format(self.time)) + print('time:{0}s'.format(self.time)) return def end(self): with self.graph.as_default(): self.end_flag=True + self.continue_train=False self.last_embedding_w=self.sess.run(self.embedding_w) self.last_embedding_b=self.sess.run(self.embedding_b) self.last_weight_x=self.sess.run(self.weight_x) @@ -466,7 +466,6 @@ def end(self): self.bias_x=None self.bias_h=None self.bias_o=None - self.total_epoch=self.epoch self.sess.close() return @@ -671,9 +670,9 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.hidden,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) pickle.dump(self.l2,output_file) + pickle.dump(self.optimizer,output_file) pickle.dump(float(self.train_loss),output_file) pickle.dump(float(self.train_accuracy*100),output_file) pickle.dump(self.test_flag,output_file) @@ -682,11 +681,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.test_accuracy,output_file) pickle.dump(self.train_loss_list,output_file) pickle.dump(self.train_accuracy_list,output_file) - pickle.dump(self.epoch,output_file) pickle.dump(self.total_epoch,output_file) pickle.dump(self.time,output_file) - pickle.dump(self.cpu_gpu,output_file) - pickle.dump(self.use_cpu_gpu,output_file) + pickle.dump(self.total_time,output_file) + pickle.dump(self.processor,output_file) output_file.close() return @@ -716,9 +714,9 @@ def restore(self,model_path): self.hidden=pickle.load(input_file) self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) self.l2=pickle.load(input_file) + self.optimizer=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_accuracy=pickle.load(input_file) self.test_flag=pickle.load(input_file) @@ -727,26 +725,25 @@ def restore(self,model_path): self.test_accuracy=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) self.train_accuracy_list=pickle.load(input_file) - self.epoch=pickle.load(input_file) self.total_epoch=pickle.load(input_file) self.time=pickle.load(input_file) - self.cpu_gpu=pickle.load(input_file) - self.use_cpu_gpu=pickle.load(input_file) + self.total_time=pickle.load(input_file) + self.processor=pickle.load(input_file) self.flag=1 input_file.close() return - def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): + def classify(self,data,one_hot=False,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] + _processor=processor[-1] self.h.clear() - with tf.device(use_cpu_gpu): + with tf.device(_processor): data=tf.constant(data) self.forward_propagation(data,use_nn=True) config=tf.ConfigProto() @@ -795,16 +792,16 @@ def classify(self,data,one_hot=False,save_path=None,save_csv=None,cpu_gpu=None): return output - def predicate(self,data,save_path=None,save_csv=None,cpu_gpu=None): + def predicate(self,data,save_path=None,save_csv=None,processor=None): with self.graph.as_default(): - if cpu_gpu!=None: - self.use_cpu_gpu=cpu_gpu - if type(self.use_cpu_gpu)==str: - use_cpu_gpu=self.use_cpu_gpu + if processor!=None: + self.processor=processor + if type(processor)==str: + _processor=processor else: - use_cpu_gpu=self.use_cpu_gpu[-1] + _processor=self.processor[-1] self.h.clear() - with tf.device(use_cpu_gpu): + with tf.device(_processor): data=tf.constant(data) self.forward_propagation(data,use_nn=True) config=tf.ConfigProto() From 3c71cdcb562fd79011c6030d2ca90694ae76c867 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 28 Jul 2020 15:37:41 +0800 Subject: [PATCH 71/72] Update Note Architecture.py --- Note/create/Note Architecture.py | 95 +++++++++++++++----------------- 1 file changed, 45 insertions(+), 50 deletions(-) diff --git a/Note/create/Note Architecture.py b/Note/create/Note Architecture.py index bd0757988..f080777b6 100644 --- a/Note/create/Note Architecture.py +++ b/Note/create/Note Architecture.py @@ -48,11 +48,11 @@ def __init__(): self.batch=None - self.epoch=None - self.optimizer=None + self.epoch=0 self.lr=None + self.optimizer=None self.train_loss=None self.train_acc=None self.train_loss_list=[] @@ -63,9 +63,10 @@ def __init__(): self.flag=None self.end_flag=False self.test_flag=None - self.time=None - self.cpu_gpu='/gpu:0' - self.use_cpu_gpu='/gpu:0' + self.total_epoch=0 + self.time=0 + self.total_time=0 + self.processor='/gpu:0' def weight_init(self,shape,mean,stddev,name=None): @@ -79,7 +80,6 @@ def bias_init(self,shape,mean,stddev,name=None): def structure(): with self.graph.as_default(): self.continue_train=False - self.total_epoch=0 self.flag=None self.end_flag=False self.test_flag=False @@ -87,8 +87,11 @@ def structure(): self.train_acc_list.clear() + self.epoch=0 self.dtype=dtype - self.time=None + self.total_epoch=0 + self.time=0 + self.total_time=0 @@ -97,8 +100,7 @@ def forward_propagation(): - def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_path=None,model_path=None,one=True,continue_train=False,cpu_gpu=None): - t1=time.time() + def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_path=None,model_path=None,one=True,continue_train=False,processor=None): with self.graph.as_default(): self.batch=batch self.optimizer=optimizer @@ -110,8 +112,6 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat self.train_loss_list.clear() self.train_acc_list.clear() if self.continue_train==False and continue_train==True: - if self.end_flag==False and self.flag==0: - self.epoch=None self.train_loss_list.clear() self.train_acc_list.clear() self.continue_train=True @@ -125,9 +125,9 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat if continue_train==True and self.flag==1: + self.flag=0 - self.flag=0 # ---------------forward propagation--------------- @@ -136,13 +136,13 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat if self.optimizer=='Gradient': - opt=tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='RMSprop': - opt=tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.RMSPropOptimizer(learning_rate=lr).minimize(train_loss) if self.optimizer=='Momentum': - opt=tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.99).minimize(train_loss) + opt=tf.train.MomentumOptimizer(learning_rate=lr,momentum=0.99).minimize(train_loss) if self.optimizer=='Adam': - opt=tf.train.AdamOptimizer(learning_rate=self.lr).minimize(train_loss) + opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(train_loss) with tf.name_scope('train_accuracy'): @@ -160,9 +160,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat self.sess=sess if self.total_epoch==0: epoch=epoch+1 + t1=time.time() for i in range(epoch): - if self.batch!=None: - batches=int((self.shape0-self.shape0%self.batch)/self.batch) + if batch!=None: + batches=int((self.shape0-self.shape0%batch)/batch) total_loss=0 total_acc=0 random=np.arange(self.shape0) @@ -170,8 +171,8 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat for j in range(batches): - index1=j*self.batch - index2=(j+1)*self.batch + index1=j*batch + index2=(j+1)*batch if i==0 and self.total_epoch==0: @@ -181,10 +182,10 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat total_loss+=batch_loss batch_acc=sess.run(train_acc,feed_dict=feed_dict) total_acc+=batch_acc - if self.shape0%self.batch!=0: + if self.shape0%batch!=0: batches+=1 - index1=batches*self.batch - index2=self.batch-(self.shape0-batches*self.batch) + index1=batches*batch + index2=batch-(self.shape0-batches*batch) if i==0 and self.total_epoch==0: @@ -227,12 +228,7 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat temp_epoch=1 if i%temp_epoch==0: if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+i+1 - else: - self.total_epoch=i - if continue_train==True: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f}'.format(i,self.train_loss)) if model_path!=None and i%epoch*2==0: @@ -240,6 +236,13 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat if train_summary_path!=None: train_summary=sess.run(train_merging,feed_dict=feed_dict) train_writer.add_summary(train_summary,i) + t2=time.time() + _time=int(t2-t1) + if continue_train!=True or self.time==0: + self.total_time=_time + else: + self.total_time+=_time + self.time=_time print() print('last loss:{0:.6f}'.format(self.train_loss)) if len(self.labels_shape)==2: @@ -253,29 +256,24 @@ def train(self,batch=None,epoch=None,optimizer='Adam',lr=0.001,train_summary_pat sess.run(tf.global_variables_initializer()) if continue_train==True: - if self.epoch!=None: - self.total_epoch=self.epoch+epoch - else: + if self.total_epoch==0: self.total_epoch=epoch-1 - self.epoch=self.total_epoch + self.epoch=epoch-1 + else: + self.total_epoch=self.total_epoch+epoch + self.epoch=epoch if continue_train!=True: self.epoch=epoch-1 - t2=time.time() - _time=t2-t1 - if continue_train!=True or self.time==None: - self.total_time=_time - else: - self.total_time+=_time - print('time:{0:.3f}s'.format(self.time)) + print('time:{0}s'.format(self.time)) return def end(self): with self.graph.as_default(): self.end_flag=True + self.continue_train=False - self.total_epoch=self.epoch self.sess.close() return @@ -409,10 +407,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.batch,output_file) pickle.dump(self.epoch,output_file) - pickle.dump(self.optimizer,output_file) pickle.dump(self.lr,output_file) + pickle.dump(self.optimizer,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_acc,output_file) pickle.dump(self.test_flag,output_file) @@ -421,11 +419,10 @@ def save(self,model_path,i=None,one=True): pickle.dump(self.test_acc,output_file) pickle.dump(self.train_loss_list,output_file) pickle.dump(self.train_accuracy_list,output_file) - pickle.dump(self.epoch,output_file) pickle.dump(self.total_epoch,output_file) pickle.dump(self.time,output_file) - pickle.dump(self.cpu_gpu,output_file) - pickle.dump(self.use_cpu_gpu,output_file) + pickle.dump(self.total_time,output_file) + pickle.dump(self.processor,output_file) output_file.close() return @@ -440,11 +437,10 @@ def restore(self,model_path): self.batch=pickle.load(input_file) self.epoch=pickle.load(input_file) - self.optimizer=pickle.load(input_file) self.lr=pickle.load(input_file) - self.total_time=pickle.load(input_file) + self.optimizer=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_acc=pickle.load(input_file) self.test_flag=pickle.load(input_file) @@ -453,11 +449,10 @@ def restore(self,model_path): self.test_accuracy=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) self.train_acc_list=pickle.load(input_file) - self.epoch=pickle.load(input_file) self.total_epoch=pickle.load(input_file) self.time=pickle.load(input_file) - self.cpu_gpu=pickle.load(input_file) - self.use_cpu_gpu=pickle.load(input_file) + self.total_time=pickle.load(input_file) + self.processor=pickle.load(input_file) self.flag=1 input_file.close() return From 11c08561b4e4e3f5d3aefa8e0c5228d748dbc2d3 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 28 Jul 2020 16:05:00 +0800 Subject: [PATCH 72/72] Update RNN.py --- Note/nn/RNN/RNN.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Note/nn/RNN/RNN.py b/Note/nn/RNN/RNN.py index c909a6415..d23bb7720 100644 --- a/Note/nn/RNN/RNN.py +++ b/Note/nn/RNN/RNN.py @@ -94,8 +94,7 @@ def __init__(self,train_data=None,train_labels=None): self.total_epoch=0 self.time=0 self.total_time=0 - self.cpu_gpu='/gpu:0' - self.use_cpu_gpu='/gpu:0' + self.processor='/gpu:0' def embedding(self,d,mean=0.07,stddev=0.07,dtype=tf.float32):